mirror of
https://github.com/syncthing/syncthing.git
synced 2026-01-03 11:29:10 -05:00
Compare commits
381 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3cc4cb0a0b | ||
|
|
e6cba61740 | ||
|
|
cd7ce73f59 | ||
|
|
fab4e33c58 | ||
|
|
b79b13a75b | ||
|
|
c294d5f087 | ||
|
|
10ead2e61f | ||
|
|
960b40fa89 | ||
|
|
afad329e99 | ||
|
|
4025284fba | ||
|
|
a595e814dd | ||
|
|
963d8121d9 | ||
|
|
03019988b1 | ||
|
|
97115afa32 | ||
|
|
c9f5bae177 | ||
|
|
2bd11ca4e3 | ||
|
|
a5de1acb46 | ||
|
|
5581751e9d | ||
|
|
055ae92273 | ||
|
|
dea7c77055 | ||
|
|
765dda6ad7 | ||
|
|
28702a1c9d | ||
|
|
40d1226612 | ||
|
|
effe8ce8a9 | ||
|
|
4c3ba24826 | ||
|
|
b2425b2a25 | ||
|
|
51c932164f | ||
|
|
19e82e93b1 | ||
|
|
3f785eaecf | ||
|
|
e59c0f38d9 | ||
|
|
64004c6bc0 | ||
|
|
422332de7e | ||
|
|
5a15ba7451 | ||
|
|
d3686bb1e2 | ||
|
|
3a6eeef580 | ||
|
|
1dc5c6b8a8 | ||
|
|
3532a560d8 | ||
|
|
d2d894d808 | ||
|
|
2aa38bfc4b | ||
|
|
9c3cee9ae4 | ||
|
|
fc521b5f9d | ||
|
|
5253368acc | ||
|
|
51cfc3d4be | ||
|
|
80bffd93e7 | ||
|
|
df4f22e899 | ||
|
|
8cc70843a5 | ||
|
|
70c841f23a | ||
|
|
7b22e09805 | ||
|
|
c5838c143c | ||
|
|
eaf71db7c9 | ||
|
|
b322b527b3 | ||
|
|
cc4b231875 | ||
|
|
05642a3e17 | ||
|
|
7dcc6bb579 | ||
|
|
f15c416e59 | ||
|
|
6fa97eeec7 | ||
|
|
03bbf273b3 | ||
|
|
1e376cd3a6 | ||
|
|
49cf939c04 | ||
|
|
338394f8c3 | ||
|
|
575b62d77b | ||
|
|
57fc0eb5b1 | ||
|
|
3a19fe3663 | ||
|
|
0c049179b4 | ||
|
|
e22c873ec4 | ||
|
|
d644ebab09 | ||
|
|
2fdc578a88 | ||
|
|
aeb3a3f7b5 | ||
|
|
044b7ce070 | ||
|
|
815e538f10 | ||
|
|
758233f001 | ||
|
|
f4f4fda520 | ||
|
|
1d77aeb69c | ||
|
|
29dbfc647d | ||
|
|
55d9514e83 | ||
|
|
46bd7956a3 | ||
|
|
e1ee394c26 | ||
|
|
19884ade99 | ||
|
|
f0a88061db | ||
|
|
6057138466 | ||
|
|
aaaa6556f3 | ||
|
|
4745431cda | ||
|
|
0455a948a9 | ||
|
|
bf3e249237 | ||
|
|
fb649e9525 | ||
|
|
9d1e2d9f46 | ||
|
|
9876d93b60 | ||
|
|
b3dd05580b | ||
|
|
32847f33fd | ||
|
|
bff9723fe3 | ||
|
|
d114648c16 | ||
|
|
44d0da02d0 | ||
|
|
c25107eff3 | ||
|
|
af5c36d2a8 | ||
|
|
0828a67145 | ||
|
|
617fb84983 | ||
|
|
6f8ac2b61c | ||
|
|
6f2b4b96cf | ||
|
|
3cc288a169 | ||
|
|
0bbbf3eb3b | ||
|
|
4b1b56fee8 | ||
|
|
154fc59e93 | ||
|
|
218c4c128c | ||
|
|
53f1af0cab | ||
|
|
f9577a38dc | ||
|
|
fadc7d9ba5 | ||
|
|
1e4b2133f6 | ||
|
|
bfefa6d016 | ||
|
|
8b66472949 | ||
|
|
3b3aa94c4e | ||
|
|
dc05275670 | ||
|
|
7921082ece | ||
|
|
efd6a29909 | ||
|
|
88c44b303d | ||
|
|
e7dbb8ccdc | ||
|
|
fe2a743c8d | ||
|
|
1f5c124ac4 | ||
|
|
64a5bc038a | ||
|
|
5d9396334c | ||
|
|
ec160f1f0a | ||
|
|
bbaeca96eb | ||
|
|
3ab779895f | ||
|
|
203c7360e7 | ||
|
|
a831f174ef | ||
|
|
153091f52f | ||
|
|
35d3af5039 | ||
|
|
57e8cd6eab | ||
|
|
fc123a71af | ||
|
|
f14836cf02 | ||
|
|
4178feb65f | ||
|
|
2edaf22590 | ||
|
|
acd3dab957 | ||
|
|
6bbd74adcd | ||
|
|
9bb928bb38 | ||
|
|
c87a6c5969 | ||
|
|
c482c13dcb | ||
|
|
b87ed97402 | ||
|
|
ee000dabfd | ||
|
|
a73a011ee0 | ||
|
|
2a8e5e2c14 | ||
|
|
5d9a41f712 | ||
|
|
ebcf4b60f6 | ||
|
|
f976b78917 | ||
|
|
078790bd0f | ||
|
|
b88c5a89a8 | ||
|
|
57028e3acc | ||
|
|
c586a17926 | ||
|
|
9d078bac54 | ||
|
|
38eaefcabd | ||
|
|
ba8cadc2f1 | ||
|
|
380d5dfa6d | ||
|
|
32af626630 | ||
|
|
8358fedaf4 | ||
|
|
ec82b0c648 | ||
|
|
81a87f873f | ||
|
|
d91b8ac444 | ||
|
|
952e51ac75 | ||
|
|
11267cd44f | ||
|
|
0e59e0aebd | ||
|
|
ae1d3b3dd3 | ||
|
|
1a91dbee5f | ||
|
|
fd507e3e41 | ||
|
|
9c1a67cf47 | ||
|
|
69e3824840 | ||
|
|
fcb1a98129 | ||
|
|
6d942635af | ||
|
|
cda2c5d459 | ||
|
|
969bb5a742 | ||
|
|
4bccc611c3 | ||
|
|
d18c4ece0c | ||
|
|
25c664b13a | ||
|
|
7c680c955f | ||
|
|
f037d1b6ca | ||
|
|
2c8b627008 | ||
|
|
221e3eddd5 | ||
|
|
74c39c677b | ||
|
|
e6558832bf | ||
|
|
9c50625c55 | ||
|
|
d372435e92 | ||
|
|
a53facf709 | ||
|
|
ffc39dfbcb | ||
|
|
cba38b15a9 | ||
|
|
5ac7564bfe | ||
|
|
53cd289b90 | ||
|
|
8dc13bcf1a | ||
|
|
261825a89b | ||
|
|
f47a5a309d | ||
|
|
a40f2b9fa0 | ||
|
|
cfcd3892f7 | ||
|
|
703987f61c | ||
|
|
e50a8917ec | ||
|
|
74d7c8e625 | ||
|
|
a5d1383fe8 | ||
|
|
bf2bcf515c | ||
|
|
4c5e94c64b | ||
|
|
b4043216b6 | ||
|
|
4371014667 | ||
|
|
fbb3222d29 | ||
|
|
4ca3889bed | ||
|
|
eef1aebe8c | ||
|
|
48382c4b59 | ||
|
|
9a45f0b31c | ||
|
|
25c26e2f81 | ||
|
|
e4837f14b1 | ||
|
|
ce86131d12 | ||
|
|
e6c9baf6ef | ||
|
|
8d6db7be31 | ||
|
|
a2548b1fd0 | ||
|
|
e4658bb99d | ||
|
|
bf2e4a561a | ||
|
|
1816320124 | ||
|
|
f09bfe293d | ||
|
|
7b4e8fda4b | ||
|
|
c95812353f | ||
|
|
b622ec7a28 | ||
|
|
d8fbe7b77f | ||
|
|
dbcac37d91 | ||
|
|
d4d391b34f | ||
|
|
571cf7d490 | ||
|
|
e18b19ca5a | ||
|
|
48651bf482 | ||
|
|
5034a41c08 | ||
|
|
6795173e77 | ||
|
|
219ef996f5 | ||
|
|
00af1db275 | ||
|
|
5935ea896f | ||
|
|
459983c05e | ||
|
|
ebf4f029ac | ||
|
|
0eec945df1 | ||
|
|
8824b9d68f | ||
|
|
d2862814c5 | ||
|
|
25fece2d50 | ||
|
|
beb4239d1b | ||
|
|
2b78e37d92 | ||
|
|
a2070d9ce4 | ||
|
|
5827a686b8 | ||
|
|
dec479532e | ||
|
|
5d173168cc | ||
|
|
2aac1cde04 | ||
|
|
3676f0268f | ||
|
|
a7b75a54bb | ||
|
|
961a87b743 | ||
|
|
e03d59e381 | ||
|
|
d46ce5003c | ||
|
|
4c4143d9be | ||
|
|
8bc7d259f4 | ||
|
|
2d047fa428 | ||
|
|
735d420d40 | ||
|
|
bc9fc1aece | ||
|
|
b88e3c99c1 | ||
|
|
ce3e6e084c | ||
|
|
2a58ca7697 | ||
|
|
af96f7a0cd | ||
|
|
7d39d1a925 | ||
|
|
1b6c700e18 | ||
|
|
6304bd60ee | ||
|
|
4ad4417740 | ||
|
|
6a4c259a73 | ||
|
|
12eabb220d | ||
|
|
d68ce2d68c | ||
|
|
8e02c040eb | ||
|
|
a7a317c284 | ||
|
|
9d6ef24660 | ||
|
|
14014408fb | ||
|
|
b933e9666a | ||
|
|
7aff59bcce | ||
|
|
8e2760cb3d | ||
|
|
7a9fc6dbd3 | ||
|
|
75d0dc251e | ||
|
|
9a50c4d93f | ||
|
|
010d5a0192 | ||
|
|
cf1594829a | ||
|
|
854d720ce0 | ||
|
|
2f43c74ece | ||
|
|
b9817ac6b4 | ||
|
|
1e8da0d494 | ||
|
|
c47be7b415 | ||
|
|
d3f6cb860f | ||
|
|
83d25f09a3 | ||
|
|
ed747a2d3d | ||
|
|
3a8ee4ce2e | ||
|
|
5ac01a3af4 | ||
|
|
46343f2f9e | ||
|
|
56ccb5b2ab | ||
|
|
9a946eed80 | ||
|
|
9c6cb0f630 | ||
|
|
1b066d6965 | ||
|
|
54c3caad53 | ||
|
|
9b5e8aaf83 | ||
|
|
5143c09bcf | ||
|
|
2496185629 | ||
|
|
34deb82aea | ||
|
|
8f72ae9da2 | ||
|
|
b753f01ac1 | ||
|
|
fd0a147ae6 | ||
|
|
e94bd90782 | ||
|
|
ce4b897d0e | ||
|
|
a7694029e2 | ||
|
|
1e9110b763 | ||
|
|
6f3fbbbe49 | ||
|
|
d346ec7bfe | ||
|
|
26a3613397 | ||
|
|
e6318bddf3 | ||
|
|
514bb0beda | ||
|
|
41b1bd2f05 | ||
|
|
bf40dadf04 | ||
|
|
cb1678ebec | ||
|
|
0c1ac568b5 | ||
|
|
0f9550c747 | ||
|
|
b13ae17a47 | ||
|
|
f762a12d18 | ||
|
|
20d30a80be | ||
|
|
229b218203 | ||
|
|
4b668aaca8 | ||
|
|
8c7f1421c6 | ||
|
|
d90b2c1d52 | ||
|
|
22f39be197 | ||
|
|
2fa45436c2 | ||
|
|
cadbb6bbce | ||
|
|
2c89f04be7 | ||
|
|
597011e3a9 | ||
|
|
0d433b58ba | ||
|
|
cde8ef56e5 | ||
|
|
110816c7aa | ||
|
|
fbb1e168f7 | ||
|
|
23085eb5ae | ||
|
|
7344a6205f | ||
|
|
4b76ec40c0 | ||
|
|
90101d0269 | ||
|
|
7ac84c0660 | ||
|
|
2090530bbb | ||
|
|
b6cb7ddbaf | ||
|
|
3422d9335c | ||
|
|
e91f9a944e | ||
|
|
e7ddc7cf0f | ||
|
|
40dfa48756 | ||
|
|
579f92cf5f | ||
|
|
4565125da9 | ||
|
|
ce13a01e65 | ||
|
|
618a8682b7 | ||
|
|
963077f918 | ||
|
|
3704d2d86b | ||
|
|
fc6a029311 | ||
|
|
7c7b1e6c2d | ||
|
|
892920039d | ||
|
|
51cdd38c3e | ||
|
|
80977bd4c0 | ||
|
|
d8022f94ef | ||
|
|
1c43587d7d | ||
|
|
b2ed32b118 | ||
|
|
0cc815d816 | ||
|
|
d452b7593f | ||
|
|
5346bdc683 | ||
|
|
dc5c1e2002 | ||
|
|
2e48e298a2 | ||
|
|
7a1aaaf5c4 | ||
|
|
bde92d5cfe | ||
|
|
691f0f4845 | ||
|
|
fdd458d2fe | ||
|
|
d2c0b8374a | ||
|
|
c96c78892d | ||
|
|
957643f523 | ||
|
|
749bbec566 | ||
|
|
25e363c5fb | ||
|
|
febeed3277 | ||
|
|
9d07aa006d | ||
|
|
12d69e25dd | ||
|
|
0c9f1efc75 | ||
|
|
665b4506e7 | ||
|
|
cb5548ceb8 | ||
|
|
c9492e54f7 | ||
|
|
12490eafff | ||
|
|
6e83d11d5f | ||
|
|
9c6aedc91b | ||
|
|
a9339d0627 | ||
|
|
4d9aa10532 | ||
|
|
b00264b594 | ||
|
|
e329c7015e | ||
|
|
1392cfc72d | ||
|
|
c6688d8f89 | ||
|
|
87abea0ba3 |
5
.gitignore
vendored
5
.gitignore
vendored
@@ -1,4 +1,4 @@
|
||||
syncthing
|
||||
./syncthing
|
||||
syncthing.exe
|
||||
*.tar.gz
|
||||
*.zip
|
||||
@@ -13,3 +13,6 @@ perfstats*.csv
|
||||
coverage.xml
|
||||
!gui/scripts/syncthing
|
||||
.DS_Store
|
||||
syncthing.md5
|
||||
syncthing.exe.md5
|
||||
RELEASE
|
||||
|
||||
18
AUTHORS
18
AUTHORS
@@ -5,15 +5,20 @@ Alexander Graf <register-github@alex-graf.de>
|
||||
Andrew Dunham <andrew@du.nham.ca>
|
||||
Audrius Butkevicius <audrius.butkevicius@gmail.com>
|
||||
Arthur Axel fREW Schmidt <frew@afoolishmanifesto.com> <frioux@gmail.com>
|
||||
Ben Schulz <ueomkail@gmail.com>
|
||||
Ben Curthoys <ben@bencurthoys.com>
|
||||
Ben Schulz <ueomkail@gmail.com> <uok@users.noreply.github.com>
|
||||
Ben Sidhom <bsidhom@gmail.com>
|
||||
Brandon Philips <brandon@ifup.org>
|
||||
Brendan Long <self@brendanlong.com>
|
||||
Caleb Callaway <enlightened.despot@gmail.com>
|
||||
Cathryne Linenweaver <cathryne.linenweaver@gmail.com> <Cathryne@users.noreply.github.com>
|
||||
Chris Joel <chris@scriptolo.gy>
|
||||
Colin Kennedy <moshen.colin@gmail.com>
|
||||
Daniel Martí <mvdan@mvdan.cc>
|
||||
Dennis Wilson <dw@risu.io>
|
||||
Dominik Heidler <dominik@heidler.eu>
|
||||
Emil Hessman <emil@hessman.se>
|
||||
Federico Castagnini <federico.castagnini@gmail.com>
|
||||
Felix Ableitner <me@nutomic.com>
|
||||
Felix Unterpaintner <bigbear2nd@gmail.com>
|
||||
Gilli Sigurdsson <gilli@vx.is>
|
||||
@@ -21,13 +26,24 @@ Jakob Borg <jakob@nym.se>
|
||||
James Patterson <jamespatterson@operamail.com> <jpjp@users.noreply.github.com>
|
||||
Jens Diemer <github.com@jensdiemer.de> <git@jensdiemer.de>
|
||||
Jochen Voss <voss@seehuhn.de>
|
||||
Johan Vromans <jvromans@squirrel.nl>
|
||||
Karol Różycki <rozycki.karol@gmail.com>
|
||||
Kamada Ken'ichi <kamada@nanohz.org>
|
||||
Lode Hoste <zillode@zillode.be>
|
||||
Marcin Dziadus <dziadus.marcin@gmail.com>
|
||||
Marc Laporte <marc@marclaporte.com>
|
||||
Marc Pujol <kilburn@la3.org>
|
||||
Michael Jephcote <rewt0r@gmx.com> <Rewt0r@users.noreply.github.com>
|
||||
Michael Tilli <pyfisch@gmail.com>
|
||||
Pascal Jungblut <github@pascalj.com> <mail@pascal-jungblut.com>
|
||||
Peter Hoeg <peter@speartail.com>
|
||||
Philippe Schommers <philippe@schommers.be>
|
||||
Phill Luby <phill.luby@newredo.com>
|
||||
Piotr Bejda <piotrb10@gmail.com>
|
||||
Ryan Sullivan <kayoticsully@gmail.com>
|
||||
Stefan Tatschner <stefan@sevenbyte.org>
|
||||
Tim Abell <tim@timwise.co.uk>
|
||||
Tobias Nygren <tnn@nygren.pp.se>
|
||||
Tomas Cerveny <kozec@kozec.com>
|
||||
Tully Robinson <tully@tojr.org>
|
||||
Veeti Paananen <veeti.paananen@rojekti.fi>
|
||||
|
||||
@@ -33,8 +33,8 @@ latest info on Transifex.
|
||||
|
||||
Every contribution is welcome. If you want to contribute but are unsure
|
||||
where to start, any open issues are fair game! Be prepared for a
|
||||
[certain amount of review](https://discourse.syncthing.net/t/733); it's
|
||||
all in the name of quality. :) Following the points below will make this
|
||||
[certain amount of review](https://github.com/syncthing/syncthing/wiki/FAQ#why-are-you-being-so-hard-on-my-pull-request);
|
||||
it's all in the name of quality. :) Following the points below will make this
|
||||
a smoother process.
|
||||
|
||||
Individuals making significant and valuable contributions are given
|
||||
@@ -46,6 +46,20 @@ All nontrivial contributions should go through the pull request
|
||||
mechanism for internal review. Determining what is "nontrivial" is left
|
||||
at the discretion of the contributor.
|
||||
|
||||
### Authorship
|
||||
|
||||
All code authors are listed in the AUTHORS file. Commits must be made
|
||||
with the same name and email as listed in the AUTHORS file. To
|
||||
accomplish this, ensure that your git configuration is set correctly
|
||||
prior to making your first commit;
|
||||
|
||||
$ git config --global user.name "Jane Doe"
|
||||
$ git config --global user.email janedoe@example.com
|
||||
|
||||
You must be reachable on the given email address. If you do not wish to
|
||||
use your real name for whatever reason, using a nickname or pseudonym is
|
||||
perfectly acceptable.
|
||||
|
||||
### Core Team
|
||||
|
||||
The Syncthing core team currently consists of the following members;
|
||||
@@ -79,8 +93,8 @@ The Syncthing core team currently consists of the following members;
|
||||
|
||||
## Licensing
|
||||
|
||||
All contributions are made under the same GPL license as the rest of the
|
||||
project, except documentation, user interface text and translation
|
||||
All contributions are made under the same MPLv2 license as the rest of
|
||||
the project, except documentation, user interface text and translation
|
||||
strings which are licensed under the Creative Commons Attribution 4.0
|
||||
International License. You retain the copyright to code you have
|
||||
written.
|
||||
@@ -91,8 +105,8 @@ add yourself as a separate commit in your first pull request.
|
||||
|
||||
## Building
|
||||
|
||||
[See the documentation](http://discourse.syncthing.net/t/44) on how to
|
||||
get started with a build environment.
|
||||
[See the documentation](https://github.com/syncthing/syncthing/wiki/Building)
|
||||
on how to get started with a build environment.
|
||||
|
||||
## Branches
|
||||
|
||||
@@ -119,8 +133,8 @@ Yes please!
|
||||
|
||||
## Documentation
|
||||
|
||||
[Over here!](http://discourse.syncthing.net/category/documentation)
|
||||
[Over here!](https://github.com/syncthing/syncthing/wiki)
|
||||
|
||||
## License
|
||||
|
||||
GPLv3
|
||||
MPLv2
|
||||
|
||||
38
Godeps/Godeps.json
generated
38
Godeps/Godeps.json
generated
@@ -1,14 +1,10 @@
|
||||
{
|
||||
"ImportPath": "github.com/syncthing/syncthing",
|
||||
"GoVersion": "go1.4rc1",
|
||||
"GoVersion": "go1.4",
|
||||
"Packages": [
|
||||
"./cmd/..."
|
||||
],
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "github.com/AudriusButkevicius/lrufdcache",
|
||||
"Rev": "9bddff8f67224ab3e7d80525a6ae9bcf1ce10769"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/bkaradzic/go-lz4",
|
||||
"Rev": "93a831dcee242be64a9cc9803dda84af25932de7"
|
||||
@@ -18,24 +14,28 @@
|
||||
"Rev": "f50d32b313bec2933a3e1049f7416a29f3413d29"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/calmh/osext",
|
||||
"Rev": "9bf61584e5f1f172e8766ddc9022d9c401faaa5e"
|
||||
"ImportPath": "github.com/calmh/luhn",
|
||||
"Rev": "0c8388ff95fa92d4094011e5a04fc99dea3d1632"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/calmh/xdr",
|
||||
"Rev": "45c46b7db7ff83b8b9ee09bbd95f36ab50043ece"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/groupcache/lru",
|
||||
"Rev": "f391194b967ae0d21deadc861ea87120d9687447"
|
||||
"Rev": "ff948d7666c5e0fd18d398f6278881724d36a90b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/juju/ratelimit",
|
||||
"Rev": "f9f36d11773655c0485207f0ad30dc2655f69d56"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/kardianos/osext",
|
||||
"Rev": "91292666f7e40f03185cdd1da7d85633c973eca7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syncthing/protocol",
|
||||
"Rev": "1a4398cc55c8fe82a964097eaf59f2475b020a49"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
|
||||
"Rev": "97e257099d2ab9578151ba85e2641e2cd14d3ca8"
|
||||
"Rev": "e3f32eb300aa1e514fe8ba58d008da90a062273d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/gosnappy/snappy",
|
||||
@@ -55,23 +55,19 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/bcrypt",
|
||||
"Comment": "null-236",
|
||||
"Rev": "69e2a90ed92d03812364aeb947b7068dc42e561e"
|
||||
"Rev": "4ed45ec682102c643324fae5dff8dab085b6c300"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/blowfish",
|
||||
"Comment": "null-236",
|
||||
"Rev": "69e2a90ed92d03812364aeb947b7068dc42e561e"
|
||||
"Rev": "4ed45ec682102c643324fae5dff8dab085b6c300"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/transform",
|
||||
"Comment": "null-112",
|
||||
"Rev": "2f707e0ad64637ca1318279be7201f5ed19c4050"
|
||||
"Rev": "c980adc4a823548817b9c47d38c6ca6b7d7d8b6a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/unicode/norm",
|
||||
"Comment": "null-112",
|
||||
"Rev": "2f707e0ad64637ca1318279be7201f5ed19c4050"
|
||||
"Rev": "c980adc4a823548817b9c47d38c6ca6b7d7d8b6a"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
24
Godeps/_workspace/src/github.com/AudriusButkevicius/lrufdcache/.gitignore
generated
vendored
24
Godeps/_workspace/src/github.com/AudriusButkevicius/lrufdcache/.gitignore
generated
vendored
@@ -1,24 +0,0 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
||||
25
Godeps/_workspace/src/github.com/AudriusButkevicius/lrufdcache/LICENSE
generated
vendored
25
Godeps/_workspace/src/github.com/AudriusButkevicius/lrufdcache/LICENSE
generated
vendored
@@ -1,25 +0,0 @@
|
||||
This is free and unencumbered software released into the public domain.
|
||||
|
||||
Anyone is free to copy, modify, publish, use, compile, sell, or
|
||||
distribute this software, either in source code form or as a compiled
|
||||
binary, for any purpose, commercial or non-commercial, and by any
|
||||
means.
|
||||
|
||||
In jurisdictions that recognize copyright laws, the author or authors
|
||||
of this software dedicate any and all copyright interest in the
|
||||
software to the public domain. We make this dedication for the benefit
|
||||
of the public at large and to the detriment of our heirs and
|
||||
successors. We intend this dedication to be an overt act of
|
||||
relinquishment in perpetuity of all present and future rights to this
|
||||
software under copyright law.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
For more information, please refer to <http://unlicense.org>
|
||||
|
||||
4
Godeps/_workspace/src/github.com/AudriusButkevicius/lrufdcache/README.md
generated
vendored
4
Godeps/_workspace/src/github.com/AudriusButkevicius/lrufdcache/README.md
generated
vendored
@@ -1,4 +0,0 @@
|
||||
lrufdcache
|
||||
==========
|
||||
|
||||
A LRU file descriptor cache
|
||||
88
Godeps/_workspace/src/github.com/AudriusButkevicius/lrufdcache/lrufdcache.go
generated
vendored
88
Godeps/_workspace/src/github.com/AudriusButkevicius/lrufdcache/lrufdcache.go
generated
vendored
@@ -1,88 +0,0 @@
|
||||
// Package logger implements a LRU file descriptor cache for concurrent ReadAt
|
||||
// calls.
|
||||
package lrufdcache
|
||||
|
||||
import (
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/groupcache/lru"
|
||||
)
|
||||
|
||||
// A wrapper around *os.File which counts references
|
||||
type CachedFile struct {
|
||||
file *os.File
|
||||
wg sync.WaitGroup
|
||||
// Locking between file.Close and file.ReadAt
|
||||
// (just to please the race detector...)
|
||||
flock sync.RWMutex
|
||||
}
|
||||
|
||||
// Tells the cache that we are done using the file, but it's up to the cache
|
||||
// to decide when this file will really be closed. The error, if any, will be
|
||||
// lost.
|
||||
func (f *CachedFile) Close() error {
|
||||
f.wg.Done()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read the file at the given offset.
|
||||
func (f *CachedFile) ReadAt(buf []byte, at int64) (int, error) {
|
||||
f.flock.RLock()
|
||||
defer f.flock.RUnlock()
|
||||
return f.file.ReadAt(buf, at)
|
||||
}
|
||||
|
||||
type FileCache struct {
|
||||
cache *lru.Cache
|
||||
mut sync.Mutex
|
||||
}
|
||||
|
||||
// Create a new cache with the number of entries to hold.
|
||||
func NewCache(entries int) *FileCache {
|
||||
c := FileCache{
|
||||
cache: lru.New(entries),
|
||||
}
|
||||
|
||||
c.cache.OnEvicted = func(key lru.Key, fdi interface{}) {
|
||||
// The file might not have been closed by all openers yet, therefore
|
||||
// spawn a routine which waits for that to happen and then closes the
|
||||
// file.
|
||||
go func(item *CachedFile) {
|
||||
item.wg.Wait()
|
||||
item.flock.Lock()
|
||||
item.file.Close()
|
||||
item.flock.Unlock()
|
||||
}(fdi.(*CachedFile))
|
||||
}
|
||||
return &c
|
||||
}
|
||||
|
||||
// Open and cache a file descriptor or use an existing cached descriptor for
|
||||
// the given path.
|
||||
func (c *FileCache) Open(path string) (*CachedFile, error) {
|
||||
// Evictions can only happen during c.cache.Add, and there is a potential
|
||||
// race between c.cache.Get and cfd.wg.Add where if not guarded by a mutex
|
||||
// could result in cfd getting closed before the counter is incremented if
|
||||
// a concurrent routine does a c.cache.Add
|
||||
c.mut.Lock()
|
||||
defer c.mut.Unlock()
|
||||
fdi, ok := c.cache.Get(path)
|
||||
if ok {
|
||||
cfd := fdi.(*CachedFile)
|
||||
cfd.wg.Add(1)
|
||||
return cfd, nil
|
||||
}
|
||||
|
||||
fd, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cfd := &CachedFile{
|
||||
file: fd,
|
||||
wg: sync.WaitGroup{},
|
||||
}
|
||||
cfd.wg.Add(1)
|
||||
c.cache.Add(path, cfd)
|
||||
return cfd, nil
|
||||
}
|
||||
195
Godeps/_workspace/src/github.com/AudriusButkevicius/lrufdcache/lrufdcache_test.go
generated
vendored
195
Godeps/_workspace/src/github.com/AudriusButkevicius/lrufdcache/lrufdcache_test.go
generated
vendored
@@ -1,195 +0,0 @@
|
||||
package lrufdcache
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNoopReadFailsOnClosed(t *testing.T) {
|
||||
fd, err := ioutil.TempFile("", "fdcache")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
fd.WriteString("test")
|
||||
fd.Close()
|
||||
buf := make([]byte, 4)
|
||||
defer os.Remove(fd.Name())
|
||||
|
||||
_, err = fd.ReadAt(buf, 0)
|
||||
if err == nil {
|
||||
t.Fatal("Expected error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSingleFileEviction(t *testing.T) {
|
||||
c := NewCache(1)
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
|
||||
fd, err := ioutil.TempFile("", "fdcache")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
fd.WriteString("test")
|
||||
fd.Close()
|
||||
buf := make([]byte, 4)
|
||||
defer os.Remove(fd.Name())
|
||||
|
||||
for k := 0; k < 100; k++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
cfd, err := c.Open(fd.Name())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
defer cfd.Close()
|
||||
|
||||
_, err = cfd.ReadAt(buf, 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestMultifileEviction(t *testing.T) {
|
||||
c := NewCache(1)
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
|
||||
for k := 0; k < 100; k++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
fd, err := ioutil.TempFile("", "fdcache")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
fd.WriteString("test")
|
||||
fd.Close()
|
||||
buf := make([]byte, 4)
|
||||
defer os.Remove(fd.Name())
|
||||
|
||||
cfd, err := c.Open(fd.Name())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
defer cfd.Close()
|
||||
|
||||
_, err = cfd.ReadAt(buf, 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestMixedEviction(t *testing.T) {
|
||||
c := NewCache(1)
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg2 := sync.WaitGroup{}
|
||||
for i := 0; i < 100; i++ {
|
||||
wg2.Add(1)
|
||||
go func() {
|
||||
defer wg2.Done()
|
||||
fd, err := ioutil.TempFile("", "fdcache")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
fd.WriteString("test")
|
||||
fd.Close()
|
||||
buf := make([]byte, 4)
|
||||
|
||||
for k := 0; k < 100; k++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
cfd, err := c.Open(fd.Name())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
defer cfd.Close()
|
||||
|
||||
_, err = cfd.ReadAt(buf, 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
wg2.Wait()
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestLimit(t *testing.T) {
|
||||
testcase := 50
|
||||
fd, err := ioutil.TempFile("", "fdcache")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
fd.Close()
|
||||
defer os.Remove(fd.Name())
|
||||
|
||||
c := NewCache(testcase)
|
||||
fds := make([]*CachedFile, testcase*2)
|
||||
for i := 0; i < testcase*2; i++ {
|
||||
fd, err := ioutil.TempFile("", "fdcache")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
fd.WriteString("test")
|
||||
fd.Close()
|
||||
defer os.Remove(fd.Name())
|
||||
|
||||
nfd, err := c.Open(fd.Name())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
fds = append(fds, nfd)
|
||||
nfd.Close()
|
||||
}
|
||||
|
||||
// Allow closes to happen
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
|
||||
buf := make([]byte, 4)
|
||||
ok := 0
|
||||
for _, fd := range fds {
|
||||
if fd == nil {
|
||||
continue
|
||||
}
|
||||
_, err := fd.ReadAt(buf, 0)
|
||||
if err == nil {
|
||||
ok++
|
||||
}
|
||||
}
|
||||
|
||||
if ok > testcase {
|
||||
t.Fatal("More than", testcase, "fds open")
|
||||
}
|
||||
}
|
||||
388
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/reader.go
generated
vendored
388
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/reader.go
generated
vendored
@@ -1,194 +1,194 @@
|
||||
/*
|
||||
* Copyright 2011-2012 Branimir Karadzic. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice, this
|
||||
* list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
* SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||
* THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package lz4
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrCorrupt indicates the input was corrupt
|
||||
ErrCorrupt = errors.New("corrupt input")
|
||||
)
|
||||
|
||||
const (
|
||||
mlBits = 4
|
||||
mlMask = (1 << mlBits) - 1
|
||||
runBits = 8 - mlBits
|
||||
runMask = (1 << runBits) - 1
|
||||
)
|
||||
|
||||
type decoder struct {
|
||||
src []byte
|
||||
dst []byte
|
||||
spos uint32
|
||||
dpos uint32
|
||||
ref uint32
|
||||
}
|
||||
|
||||
func (d *decoder) readByte() (uint8, error) {
|
||||
if int(d.spos) == len(d.src) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
b := d.src[d.spos]
|
||||
d.spos++
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (d *decoder) getLen() (uint32, error) {
|
||||
|
||||
length := uint32(0)
|
||||
ln, err := d.readByte()
|
||||
if err != nil {
|
||||
return 0, ErrCorrupt
|
||||
}
|
||||
for ln == 255 {
|
||||
length += 255
|
||||
ln, err = d.readByte()
|
||||
if err != nil {
|
||||
return 0, ErrCorrupt
|
||||
}
|
||||
}
|
||||
length += uint32(ln)
|
||||
|
||||
return length, nil
|
||||
}
|
||||
|
||||
func (d *decoder) cp(length, decr uint32) {
|
||||
|
||||
if int(d.ref+length) < int(d.dpos) {
|
||||
copy(d.dst[d.dpos:], d.dst[d.ref:d.ref+length])
|
||||
} else {
|
||||
for ii := uint32(0); ii < length; ii++ {
|
||||
d.dst[d.dpos+ii] = d.dst[d.ref+ii]
|
||||
}
|
||||
}
|
||||
d.dpos += length
|
||||
d.ref += length - decr
|
||||
}
|
||||
|
||||
func (d *decoder) finish(err error) error {
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Decode returns the decoded form of src. The returned slice may be a
|
||||
// subslice of dst if it was large enough to hold the entire decoded block.
|
||||
func Decode(dst, src []byte) ([]byte, error) {
|
||||
|
||||
if len(src) < 4 {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
uncompressedLen := binary.LittleEndian.Uint32(src)
|
||||
|
||||
if uncompressedLen == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if uncompressedLen > MaxInputSize {
|
||||
return nil, ErrTooLarge
|
||||
}
|
||||
|
||||
if dst == nil || len(dst) < int(uncompressedLen) {
|
||||
dst = make([]byte, uncompressedLen)
|
||||
}
|
||||
|
||||
d := decoder{src: src, dst: dst[:uncompressedLen], spos: 4}
|
||||
|
||||
decr := []uint32{0, 3, 2, 3}
|
||||
|
||||
for {
|
||||
code, err := d.readByte()
|
||||
if err != nil {
|
||||
return d.dst, d.finish(err)
|
||||
}
|
||||
|
||||
length := uint32(code >> mlBits)
|
||||
if length == runMask {
|
||||
ln, err := d.getLen()
|
||||
if err != nil {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
length += ln
|
||||
}
|
||||
|
||||
if int(d.spos+length) > len(d.src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
for ii := uint32(0); ii < length; ii++ {
|
||||
d.dst[d.dpos+ii] = d.src[d.spos+ii]
|
||||
}
|
||||
|
||||
d.spos += length
|
||||
d.dpos += length
|
||||
|
||||
if int(d.spos) == len(d.src) {
|
||||
return d.dst, nil
|
||||
}
|
||||
|
||||
if int(d.spos+2) >= len(d.src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
back := uint32(d.src[d.spos]) | uint32(d.src[d.spos+1])<<8
|
||||
|
||||
if back > d.dpos {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
d.spos += 2
|
||||
d.ref = d.dpos - back
|
||||
|
||||
length = uint32(code & mlMask)
|
||||
if length == mlMask {
|
||||
ln, err := d.getLen()
|
||||
if err != nil {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
length += ln
|
||||
}
|
||||
|
||||
literal := d.dpos - d.ref
|
||||
if literal < 4 {
|
||||
d.cp(4, decr[literal])
|
||||
} else {
|
||||
length += 4
|
||||
}
|
||||
|
||||
if d.dpos+length > uncompressedLen {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
d.cp(length, 0)
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Copyright 2011-2012 Branimir Karadzic. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice, this
|
||||
* list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
* SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||
* THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package lz4
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrCorrupt indicates the input was corrupt
|
||||
ErrCorrupt = errors.New("corrupt input")
|
||||
)
|
||||
|
||||
const (
|
||||
mlBits = 4
|
||||
mlMask = (1 << mlBits) - 1
|
||||
runBits = 8 - mlBits
|
||||
runMask = (1 << runBits) - 1
|
||||
)
|
||||
|
||||
type decoder struct {
|
||||
src []byte
|
||||
dst []byte
|
||||
spos uint32
|
||||
dpos uint32
|
||||
ref uint32
|
||||
}
|
||||
|
||||
func (d *decoder) readByte() (uint8, error) {
|
||||
if int(d.spos) == len(d.src) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
b := d.src[d.spos]
|
||||
d.spos++
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (d *decoder) getLen() (uint32, error) {
|
||||
|
||||
length := uint32(0)
|
||||
ln, err := d.readByte()
|
||||
if err != nil {
|
||||
return 0, ErrCorrupt
|
||||
}
|
||||
for ln == 255 {
|
||||
length += 255
|
||||
ln, err = d.readByte()
|
||||
if err != nil {
|
||||
return 0, ErrCorrupt
|
||||
}
|
||||
}
|
||||
length += uint32(ln)
|
||||
|
||||
return length, nil
|
||||
}
|
||||
|
||||
func (d *decoder) cp(length, decr uint32) {
|
||||
|
||||
if int(d.ref+length) < int(d.dpos) {
|
||||
copy(d.dst[d.dpos:], d.dst[d.ref:d.ref+length])
|
||||
} else {
|
||||
for ii := uint32(0); ii < length; ii++ {
|
||||
d.dst[d.dpos+ii] = d.dst[d.ref+ii]
|
||||
}
|
||||
}
|
||||
d.dpos += length
|
||||
d.ref += length - decr
|
||||
}
|
||||
|
||||
func (d *decoder) finish(err error) error {
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Decode returns the decoded form of src. The returned slice may be a
|
||||
// subslice of dst if it was large enough to hold the entire decoded block.
|
||||
func Decode(dst, src []byte) ([]byte, error) {
|
||||
|
||||
if len(src) < 4 {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
uncompressedLen := binary.LittleEndian.Uint32(src)
|
||||
|
||||
if uncompressedLen == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if uncompressedLen > MaxInputSize {
|
||||
return nil, ErrTooLarge
|
||||
}
|
||||
|
||||
if dst == nil || len(dst) < int(uncompressedLen) {
|
||||
dst = make([]byte, uncompressedLen)
|
||||
}
|
||||
|
||||
d := decoder{src: src, dst: dst[:uncompressedLen], spos: 4}
|
||||
|
||||
decr := []uint32{0, 3, 2, 3}
|
||||
|
||||
for {
|
||||
code, err := d.readByte()
|
||||
if err != nil {
|
||||
return d.dst, d.finish(err)
|
||||
}
|
||||
|
||||
length := uint32(code >> mlBits)
|
||||
if length == runMask {
|
||||
ln, err := d.getLen()
|
||||
if err != nil {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
length += ln
|
||||
}
|
||||
|
||||
if int(d.spos+length) > len(d.src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
for ii := uint32(0); ii < length; ii++ {
|
||||
d.dst[d.dpos+ii] = d.src[d.spos+ii]
|
||||
}
|
||||
|
||||
d.spos += length
|
||||
d.dpos += length
|
||||
|
||||
if int(d.spos) == len(d.src) {
|
||||
return d.dst, nil
|
||||
}
|
||||
|
||||
if int(d.spos+2) >= len(d.src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
back := uint32(d.src[d.spos]) | uint32(d.src[d.spos+1])<<8
|
||||
|
||||
if back > d.dpos {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
d.spos += 2
|
||||
d.ref = d.dpos - back
|
||||
|
||||
length = uint32(code & mlMask)
|
||||
if length == mlMask {
|
||||
ln, err := d.getLen()
|
||||
if err != nil {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
length += ln
|
||||
}
|
||||
|
||||
literal := d.dpos - d.ref
|
||||
if literal < 4 {
|
||||
d.cp(4, decr[literal])
|
||||
} else {
|
||||
length += 4
|
||||
}
|
||||
|
||||
if d.dpos+length > uncompressedLen {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
d.cp(length, 0)
|
||||
}
|
||||
}
|
||||
|
||||
376
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/writer.go
generated
vendored
376
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/writer.go
generated
vendored
@@ -1,188 +1,188 @@
|
||||
/*
|
||||
* Copyright 2011-2012 Branimir Karadzic. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice, this
|
||||
* list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
* SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||
* THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package lz4
|
||||
|
||||
import "encoding/binary"
|
||||
import "errors"
|
||||
|
||||
const (
|
||||
minMatch = 4
|
||||
hashLog = 17
|
||||
hashTableSize = 1 << hashLog
|
||||
hashShift = (minMatch * 8) - hashLog
|
||||
incompressible uint32 = 128
|
||||
uninitHash = 0x88888888
|
||||
|
||||
// MaxInputSize is the largest buffer than can be compressed in a single block
|
||||
MaxInputSize = 0x7E000000
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrTooLarge indicates the input buffer was too large
|
||||
ErrTooLarge = errors.New("input too large")
|
||||
)
|
||||
|
||||
type encoder struct {
|
||||
src []byte
|
||||
dst []byte
|
||||
hashTable []uint32
|
||||
pos uint32
|
||||
anchor uint32
|
||||
dpos uint32
|
||||
}
|
||||
|
||||
// CompressBound returns the maximum length of a lz4 block, given it's uncompressed length
|
||||
func CompressBound(isize int) int {
|
||||
if isize > MaxInputSize {
|
||||
return 0
|
||||
}
|
||||
return isize + ((isize) / 255) + 16 + 4
|
||||
}
|
||||
|
||||
func (e *encoder) writeLiterals(length, mlLen, pos uint32) {
|
||||
|
||||
ln := length
|
||||
|
||||
var code byte
|
||||
if ln > runMask-1 {
|
||||
code = runMask
|
||||
} else {
|
||||
code = byte(ln)
|
||||
}
|
||||
|
||||
if mlLen > mlMask-1 {
|
||||
e.dst[e.dpos] = (code << mlBits) + byte(mlMask)
|
||||
} else {
|
||||
e.dst[e.dpos] = (code << mlBits) + byte(mlLen)
|
||||
}
|
||||
e.dpos++
|
||||
|
||||
if code == runMask {
|
||||
ln -= runMask
|
||||
for ; ln > 254; ln -= 255 {
|
||||
e.dst[e.dpos] = 255
|
||||
e.dpos++
|
||||
}
|
||||
|
||||
e.dst[e.dpos] = byte(ln)
|
||||
e.dpos++
|
||||
}
|
||||
|
||||
for ii := uint32(0); ii < length; ii++ {
|
||||
e.dst[e.dpos+ii] = e.src[pos+ii]
|
||||
}
|
||||
|
||||
e.dpos += length
|
||||
}
|
||||
|
||||
// Encode returns the encoded form of src. The returned array may be a
|
||||
// sub-slice of dst if it was large enough to hold the entire output.
|
||||
func Encode(dst, src []byte) ([]byte, error) {
|
||||
|
||||
if len(src) >= MaxInputSize {
|
||||
return nil, ErrTooLarge
|
||||
}
|
||||
|
||||
if n := CompressBound(len(src)); len(dst) < n {
|
||||
dst = make([]byte, n)
|
||||
}
|
||||
|
||||
e := encoder{src: src, dst: dst, hashTable: make([]uint32, hashTableSize)}
|
||||
|
||||
binary.LittleEndian.PutUint32(dst, uint32(len(src)))
|
||||
e.dpos = 4
|
||||
|
||||
var (
|
||||
step uint32 = 1
|
||||
limit = incompressible
|
||||
)
|
||||
|
||||
for {
|
||||
if int(e.pos)+12 >= len(e.src) {
|
||||
e.writeLiterals(uint32(len(e.src))-e.anchor, 0, e.anchor)
|
||||
return e.dst[:e.dpos], nil
|
||||
}
|
||||
|
||||
sequence := uint32(e.src[e.pos+3])<<24 | uint32(e.src[e.pos+2])<<16 | uint32(e.src[e.pos+1])<<8 | uint32(e.src[e.pos+0])
|
||||
|
||||
hash := (sequence * 2654435761) >> hashShift
|
||||
ref := e.hashTable[hash] + uninitHash
|
||||
e.hashTable[hash] = e.pos - uninitHash
|
||||
|
||||
if ((e.pos-ref)>>16) != 0 || uint32(e.src[ref+3])<<24|uint32(e.src[ref+2])<<16|uint32(e.src[ref+1])<<8|uint32(e.src[ref+0]) != sequence {
|
||||
if e.pos-e.anchor > limit {
|
||||
limit <<= 1
|
||||
step += 1 + (step >> 2)
|
||||
}
|
||||
e.pos += step
|
||||
continue
|
||||
}
|
||||
|
||||
if step > 1 {
|
||||
e.hashTable[hash] = ref - uninitHash
|
||||
e.pos -= step - 1
|
||||
step = 1
|
||||
continue
|
||||
}
|
||||
limit = incompressible
|
||||
|
||||
ln := e.pos - e.anchor
|
||||
back := e.pos - ref
|
||||
|
||||
anchor := e.anchor
|
||||
|
||||
e.pos += minMatch
|
||||
ref += minMatch
|
||||
e.anchor = e.pos
|
||||
|
||||
for int(e.pos) < len(e.src)-5 && e.src[e.pos] == e.src[ref] {
|
||||
e.pos++
|
||||
ref++
|
||||
}
|
||||
|
||||
mlLen := e.pos - e.anchor
|
||||
|
||||
e.writeLiterals(ln, mlLen, anchor)
|
||||
e.dst[e.dpos] = uint8(back)
|
||||
e.dst[e.dpos+1] = uint8(back >> 8)
|
||||
e.dpos += 2
|
||||
|
||||
if mlLen > mlMask-1 {
|
||||
mlLen -= mlMask
|
||||
for mlLen > 254 {
|
||||
mlLen -= 255
|
||||
|
||||
e.dst[e.dpos] = 255
|
||||
e.dpos++
|
||||
}
|
||||
|
||||
e.dst[e.dpos] = byte(mlLen)
|
||||
e.dpos++
|
||||
}
|
||||
|
||||
e.anchor = e.pos
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Copyright 2011-2012 Branimir Karadzic. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice, this
|
||||
* list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
* SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||
* THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package lz4
|
||||
|
||||
import "encoding/binary"
|
||||
import "errors"
|
||||
|
||||
const (
|
||||
minMatch = 4
|
||||
hashLog = 17
|
||||
hashTableSize = 1 << hashLog
|
||||
hashShift = (minMatch * 8) - hashLog
|
||||
incompressible uint32 = 128
|
||||
uninitHash = 0x88888888
|
||||
|
||||
// MaxInputSize is the largest buffer than can be compressed in a single block
|
||||
MaxInputSize = 0x7E000000
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrTooLarge indicates the input buffer was too large
|
||||
ErrTooLarge = errors.New("input too large")
|
||||
)
|
||||
|
||||
type encoder struct {
|
||||
src []byte
|
||||
dst []byte
|
||||
hashTable []uint32
|
||||
pos uint32
|
||||
anchor uint32
|
||||
dpos uint32
|
||||
}
|
||||
|
||||
// CompressBound returns the maximum length of a lz4 block, given it's uncompressed length
|
||||
func CompressBound(isize int) int {
|
||||
if isize > MaxInputSize {
|
||||
return 0
|
||||
}
|
||||
return isize + ((isize) / 255) + 16 + 4
|
||||
}
|
||||
|
||||
func (e *encoder) writeLiterals(length, mlLen, pos uint32) {
|
||||
|
||||
ln := length
|
||||
|
||||
var code byte
|
||||
if ln > runMask-1 {
|
||||
code = runMask
|
||||
} else {
|
||||
code = byte(ln)
|
||||
}
|
||||
|
||||
if mlLen > mlMask-1 {
|
||||
e.dst[e.dpos] = (code << mlBits) + byte(mlMask)
|
||||
} else {
|
||||
e.dst[e.dpos] = (code << mlBits) + byte(mlLen)
|
||||
}
|
||||
e.dpos++
|
||||
|
||||
if code == runMask {
|
||||
ln -= runMask
|
||||
for ; ln > 254; ln -= 255 {
|
||||
e.dst[e.dpos] = 255
|
||||
e.dpos++
|
||||
}
|
||||
|
||||
e.dst[e.dpos] = byte(ln)
|
||||
e.dpos++
|
||||
}
|
||||
|
||||
for ii := uint32(0); ii < length; ii++ {
|
||||
e.dst[e.dpos+ii] = e.src[pos+ii]
|
||||
}
|
||||
|
||||
e.dpos += length
|
||||
}
|
||||
|
||||
// Encode returns the encoded form of src. The returned array may be a
|
||||
// sub-slice of dst if it was large enough to hold the entire output.
|
||||
func Encode(dst, src []byte) ([]byte, error) {
|
||||
|
||||
if len(src) >= MaxInputSize {
|
||||
return nil, ErrTooLarge
|
||||
}
|
||||
|
||||
if n := CompressBound(len(src)); len(dst) < n {
|
||||
dst = make([]byte, n)
|
||||
}
|
||||
|
||||
e := encoder{src: src, dst: dst, hashTable: make([]uint32, hashTableSize)}
|
||||
|
||||
binary.LittleEndian.PutUint32(dst, uint32(len(src)))
|
||||
e.dpos = 4
|
||||
|
||||
var (
|
||||
step uint32 = 1
|
||||
limit = incompressible
|
||||
)
|
||||
|
||||
for {
|
||||
if int(e.pos)+12 >= len(e.src) {
|
||||
e.writeLiterals(uint32(len(e.src))-e.anchor, 0, e.anchor)
|
||||
return e.dst[:e.dpos], nil
|
||||
}
|
||||
|
||||
sequence := uint32(e.src[e.pos+3])<<24 | uint32(e.src[e.pos+2])<<16 | uint32(e.src[e.pos+1])<<8 | uint32(e.src[e.pos+0])
|
||||
|
||||
hash := (sequence * 2654435761) >> hashShift
|
||||
ref := e.hashTable[hash] + uninitHash
|
||||
e.hashTable[hash] = e.pos - uninitHash
|
||||
|
||||
if ((e.pos-ref)>>16) != 0 || uint32(e.src[ref+3])<<24|uint32(e.src[ref+2])<<16|uint32(e.src[ref+1])<<8|uint32(e.src[ref+0]) != sequence {
|
||||
if e.pos-e.anchor > limit {
|
||||
limit <<= 1
|
||||
step += 1 + (step >> 2)
|
||||
}
|
||||
e.pos += step
|
||||
continue
|
||||
}
|
||||
|
||||
if step > 1 {
|
||||
e.hashTable[hash] = ref - uninitHash
|
||||
e.pos -= step - 1
|
||||
step = 1
|
||||
continue
|
||||
}
|
||||
limit = incompressible
|
||||
|
||||
ln := e.pos - e.anchor
|
||||
back := e.pos - ref
|
||||
|
||||
anchor := e.anchor
|
||||
|
||||
e.pos += minMatch
|
||||
ref += minMatch
|
||||
e.anchor = e.pos
|
||||
|
||||
for int(e.pos) < len(e.src)-5 && e.src[e.pos] == e.src[ref] {
|
||||
e.pos++
|
||||
ref++
|
||||
}
|
||||
|
||||
mlLen := e.pos - e.anchor
|
||||
|
||||
e.writeLiterals(ln, mlLen, anchor)
|
||||
e.dst[e.dpos] = uint8(back)
|
||||
e.dst[e.dpos+1] = uint8(back >> 8)
|
||||
e.dpos += 2
|
||||
|
||||
if mlLen > mlMask-1 {
|
||||
mlLen -= mlMask
|
||||
for mlLen > 254 {
|
||||
mlLen -= 255
|
||||
|
||||
e.dst[e.dpos] = 255
|
||||
e.dpos++
|
||||
}
|
||||
|
||||
e.dst[e.dpos] = byte(mlLen)
|
||||
e.dpos++
|
||||
}
|
||||
|
||||
e.anchor = e.pos
|
||||
}
|
||||
}
|
||||
|
||||
19
Godeps/_workspace/src/github.com/calmh/luhn/LICENSE
generated
vendored
Normal file
19
Godeps/_workspace/src/github.com/calmh/luhn/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
Copyright (C) 2014 Jakob Borg
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
- The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
15
internal/luhn/luhn.go → Godeps/_workspace/src/github.com/calmh/luhn/luhn.go
generated
vendored
15
internal/luhn/luhn.go → Godeps/_workspace/src/github.com/calmh/luhn/luhn.go
generated
vendored
@@ -1,17 +1,4 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation, either version 3 of the License, or (at your option)
|
||||
// any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
// Copyright (C) 2014 Jakob Borg
|
||||
|
||||
// Package luhn generates and validates Luhn mod N check digits.
|
||||
package luhn
|
||||
17
internal/luhn/luhn_test.go → Godeps/_workspace/src/github.com/calmh/luhn/luhn_test.go
generated
vendored
17
internal/luhn/luhn_test.go → Godeps/_workspace/src/github.com/calmh/luhn/luhn_test.go
generated
vendored
@@ -1,24 +1,11 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation, either version 3 of the License, or (at your option)
|
||||
// any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
// Copyright (C) 2014 Jakob Borg
|
||||
|
||||
package luhn_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/luhn"
|
||||
"github.com/calmh/luhn"
|
||||
)
|
||||
|
||||
func TestGenerate(t *testing.T) {
|
||||
20
Godeps/_workspace/src/github.com/calmh/osext/LICENSE
generated
vendored
20
Godeps/_workspace/src/github.com/calmh/osext/LICENSE
generated
vendored
@@ -1,20 +0,0 @@
|
||||
Copyright (c) 2012 Daniel Theophanes
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
|
||||
3. This notice may not be removed or altered from any source
|
||||
distribution.
|
||||
6
Godeps/_workspace/src/github.com/calmh/xdr/cmd/genxdr/main.go
generated
vendored
6
Godeps/_workspace/src/github.com/calmh/xdr/cmd/genxdr/main.go
generated
vendored
@@ -321,10 +321,10 @@ func generateDiagram(output io.Writer, s structInfo) {
|
||||
case "bool":
|
||||
fmt.Fprintf(output, "| %s |V|\n", center(name+" (V=0 or 1)", 59))
|
||||
fmt.Fprintln(output, line)
|
||||
case "uint16":
|
||||
case "int16", "uint16":
|
||||
fmt.Fprintf(output, "| %s | %s |\n", center("0x0000", 29), center(name, 29))
|
||||
fmt.Fprintln(output, line)
|
||||
case "uint32":
|
||||
case "int32", "uint32":
|
||||
fmt.Fprintf(output, "| %s |\n", center(name, 61))
|
||||
fmt.Fprintln(output, line)
|
||||
case "int64", "uint64":
|
||||
@@ -374,6 +374,8 @@ func generateXdr(output io.Writer, s structInfo) {
|
||||
}
|
||||
|
||||
switch tn {
|
||||
case "int16", "int32":
|
||||
fmt.Fprintf(output, "\tint %s%s;\n", fn, suf)
|
||||
case "uint16", "uint32":
|
||||
fmt.Fprintf(output, "\tunsigned int %s%s;\n", fn, suf)
|
||||
case "int64":
|
||||
|
||||
4
Godeps/_workspace/src/github.com/calmh/xdr/reader.go
generated
vendored
4
Godeps/_workspace/src/github.com/calmh/xdr/reader.go
generated
vendored
@@ -154,6 +154,10 @@ func (e XDRError) Error() string {
|
||||
return "xdr " + e.op + ": " + e.err.Error()
|
||||
}
|
||||
|
||||
func (e XDRError) IsEOF() bool {
|
||||
return e.err == io.EOF
|
||||
}
|
||||
|
||||
func (r *Reader) Error() error {
|
||||
if r.err == nil {
|
||||
return nil
|
||||
|
||||
121
Godeps/_workspace/src/github.com/golang/groupcache/lru/lru.go
generated
vendored
121
Godeps/_workspace/src/github.com/golang/groupcache/lru/lru.go
generated
vendored
@@ -1,121 +0,0 @@
|
||||
/*
|
||||
Copyright 2013 Google Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package lru implements an LRU cache.
|
||||
package lru
|
||||
|
||||
import "container/list"
|
||||
|
||||
// Cache is an LRU cache. It is not safe for concurrent access.
|
||||
type Cache struct {
|
||||
// MaxEntries is the maximum number of cache entries before
|
||||
// an item is evicted. Zero means no limit.
|
||||
MaxEntries int
|
||||
|
||||
// OnEvicted optionally specificies a callback function to be
|
||||
// executed when an entry is purged from the cache.
|
||||
OnEvicted func(key Key, value interface{})
|
||||
|
||||
ll *list.List
|
||||
cache map[interface{}]*list.Element
|
||||
}
|
||||
|
||||
// A Key may be any value that is comparable. See http://golang.org/ref/spec#Comparison_operators
|
||||
type Key interface{}
|
||||
|
||||
type entry struct {
|
||||
key Key
|
||||
value interface{}
|
||||
}
|
||||
|
||||
// New creates a new Cache.
|
||||
// If maxEntries is zero, the cache has no limit and it's assumed
|
||||
// that eviction is done by the caller.
|
||||
func New(maxEntries int) *Cache {
|
||||
return &Cache{
|
||||
MaxEntries: maxEntries,
|
||||
ll: list.New(),
|
||||
cache: make(map[interface{}]*list.Element),
|
||||
}
|
||||
}
|
||||
|
||||
// Add adds a value to the cache.
|
||||
func (c *Cache) Add(key Key, value interface{}) {
|
||||
if c.cache == nil {
|
||||
c.cache = make(map[interface{}]*list.Element)
|
||||
c.ll = list.New()
|
||||
}
|
||||
if ee, ok := c.cache[key]; ok {
|
||||
c.ll.MoveToFront(ee)
|
||||
ee.Value.(*entry).value = value
|
||||
return
|
||||
}
|
||||
ele := c.ll.PushFront(&entry{key, value})
|
||||
c.cache[key] = ele
|
||||
if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries {
|
||||
c.RemoveOldest()
|
||||
}
|
||||
}
|
||||
|
||||
// Get looks up a key's value from the cache.
|
||||
func (c *Cache) Get(key Key) (value interface{}, ok bool) {
|
||||
if c.cache == nil {
|
||||
return
|
||||
}
|
||||
if ele, hit := c.cache[key]; hit {
|
||||
c.ll.MoveToFront(ele)
|
||||
return ele.Value.(*entry).value, true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Remove removes the provided key from the cache.
|
||||
func (c *Cache) Remove(key Key) {
|
||||
if c.cache == nil {
|
||||
return
|
||||
}
|
||||
if ele, hit := c.cache[key]; hit {
|
||||
c.removeElement(ele)
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveOldest removes the oldest item from the cache.
|
||||
func (c *Cache) RemoveOldest() {
|
||||
if c.cache == nil {
|
||||
return
|
||||
}
|
||||
ele := c.ll.Back()
|
||||
if ele != nil {
|
||||
c.removeElement(ele)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache) removeElement(e *list.Element) {
|
||||
c.ll.Remove(e)
|
||||
kv := e.Value.(*entry)
|
||||
delete(c.cache, kv.key)
|
||||
if c.OnEvicted != nil {
|
||||
c.OnEvicted(kv.key, kv.value)
|
||||
}
|
||||
}
|
||||
|
||||
// Len returns the number of items in the cache.
|
||||
func (c *Cache) Len() int {
|
||||
if c.cache == nil {
|
||||
return 0
|
||||
}
|
||||
return c.ll.Len()
|
||||
}
|
||||
73
Godeps/_workspace/src/github.com/golang/groupcache/lru/lru_test.go
generated
vendored
73
Godeps/_workspace/src/github.com/golang/groupcache/lru/lru_test.go
generated
vendored
@@ -1,73 +0,0 @@
|
||||
/*
|
||||
Copyright 2013 Google Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package lru
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
type simpleStruct struct {
|
||||
int
|
||||
string
|
||||
}
|
||||
|
||||
type complexStruct struct {
|
||||
int
|
||||
simpleStruct
|
||||
}
|
||||
|
||||
var getTests = []struct {
|
||||
name string
|
||||
keyToAdd interface{}
|
||||
keyToGet interface{}
|
||||
expectedOk bool
|
||||
}{
|
||||
{"string_hit", "myKey", "myKey", true},
|
||||
{"string_miss", "myKey", "nonsense", false},
|
||||
{"simple_struct_hit", simpleStruct{1, "two"}, simpleStruct{1, "two"}, true},
|
||||
{"simeple_struct_miss", simpleStruct{1, "two"}, simpleStruct{0, "noway"}, false},
|
||||
{"complex_struct_hit", complexStruct{1, simpleStruct{2, "three"}},
|
||||
complexStruct{1, simpleStruct{2, "three"}}, true},
|
||||
}
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
for _, tt := range getTests {
|
||||
lru := New(0)
|
||||
lru.Add(tt.keyToAdd, 1234)
|
||||
val, ok := lru.Get(tt.keyToGet)
|
||||
if ok != tt.expectedOk {
|
||||
t.Fatalf("%s: cache hit = %v; want %v", tt.name, ok, !ok)
|
||||
} else if ok && val != 1234 {
|
||||
t.Fatalf("%s expected get to return 1234 but got %v", tt.name, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemove(t *testing.T) {
|
||||
lru := New(0)
|
||||
lru.Add("myKey", 1234)
|
||||
if val, ok := lru.Get("myKey"); !ok {
|
||||
t.Fatal("TestRemove returned no match")
|
||||
} else if val != 1234 {
|
||||
t.Fatalf("TestRemove failed. Expected %d, got %v", 1234, val)
|
||||
}
|
||||
|
||||
lru.Remove("myKey")
|
||||
if _, ok := lru.Get("myKey"); ok {
|
||||
t.Fatal("TestRemove returned a removed entry")
|
||||
}
|
||||
}
|
||||
27
Godeps/_workspace/src/github.com/kardianos/osext/LICENSE
generated
vendored
Normal file
27
Godeps/_workspace/src/github.com/kardianos/osext/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
14
Godeps/_workspace/src/github.com/kardianos/osext/README.md
generated
vendored
Normal file
14
Godeps/_workspace/src/github.com/kardianos/osext/README.md
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
### Extensions to the "os" package.
|
||||
|
||||
## Find the current Executable and ExecutableFolder.
|
||||
|
||||
There is sometimes utility in finding the current executable file
|
||||
that is running. This can be used for upgrading the current executable
|
||||
or finding resources located relative to the executable file.
|
||||
|
||||
Multi-platform and supports:
|
||||
* Linux
|
||||
* OS X
|
||||
* Windows
|
||||
* Plan 9
|
||||
* BSDs.
|
||||
@@ -25,8 +25,3 @@ func ExecutableFolder() (string, error) {
|
||||
folder, _ := filepath.Split(p)
|
||||
return folder, nil
|
||||
}
|
||||
|
||||
// Depricated. Same as Executable().
|
||||
func GetExePath() (exePath string, err error) {
|
||||
return Executable()
|
||||
}
|
||||
@@ -5,16 +5,16 @@
|
||||
package osext
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"os"
|
||||
"strconv"
|
||||
"os"
|
||||
"strconv"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func executable() (string, error) {
|
||||
f, err := os.Open("/proc/" + strconv.Itoa(os.Getpid()) + "/text")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
return syscall.Fd2path(int(f.Fd()))
|
||||
f, err := os.Open("/proc/" + strconv.Itoa(os.Getpid()) + "/text")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
return syscall.Fd2path(int(f.Fd()))
|
||||
}
|
||||
@@ -2,7 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux netbsd openbsd solaris
|
||||
// +build linux netbsd openbsd solaris dragonfly
|
||||
|
||||
package osext
|
||||
|
||||
@@ -19,7 +19,7 @@ func executable() (string, error) {
|
||||
return os.Readlink("/proc/self/exe")
|
||||
case "netbsd":
|
||||
return os.Readlink("/proc/curproc/exe")
|
||||
case "openbsd":
|
||||
case "openbsd", "dragonfly":
|
||||
return os.Readlink("/proc/curproc/file")
|
||||
case "solaris":
|
||||
return os.Readlink(fmt.Sprintf("/proc/%d/path/a.out", os.Getpid()))
|
||||
4
Godeps/_workspace/src/github.com/syncthing/protocol/AUTHORS
generated
vendored
Normal file
4
Godeps/_workspace/src/github.com/syncthing/protocol/AUTHORS
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
# This is the official list of Protocol Authors for copyright purposes.
|
||||
|
||||
Audrius Butkevicius <audrius.butkevicius@gmail.com>
|
||||
Jakob Borg <jakob@nym.se>
|
||||
76
Godeps/_workspace/src/github.com/syncthing/protocol/CONTRIBUTING.md
generated
vendored
Normal file
76
Godeps/_workspace/src/github.com/syncthing/protocol/CONTRIBUTING.md
generated
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
## Reporting Bugs
|
||||
|
||||
Please file bugs in the [Github Issue
|
||||
Tracker](https://github.com/syncthing/protocol/issues).
|
||||
|
||||
## Contributing Code
|
||||
|
||||
Every contribution is welcome. Following the points below will make this
|
||||
a smoother process.
|
||||
|
||||
Individuals making significant and valuable contributions are given
|
||||
commit-access to the project. If you make a significant contribution and
|
||||
are not considered for commit-access, please contact any of the
|
||||
Syncthing core team members.
|
||||
|
||||
All nontrivial contributions should go through the pull request
|
||||
mechanism for internal review. Determining what is "nontrivial" is left
|
||||
at the discretion of the contributor.
|
||||
|
||||
### Authorship
|
||||
|
||||
All code authors are listed in the AUTHORS file. Commits must be made
|
||||
with the same name and email as listed in the AUTHORS file. To
|
||||
accomplish this, ensure that your git configuration is set correctly
|
||||
prior to making your first commit;
|
||||
|
||||
$ git config --global user.name "Jane Doe"
|
||||
$ git config --global user.email janedoe@example.com
|
||||
|
||||
You must be reachable on the given email address. If you do not wish to
|
||||
use your real name for whatever reason, using a nickname or pseudonym is
|
||||
perfectly acceptable.
|
||||
|
||||
## Coding Style
|
||||
|
||||
- Follow the conventions laid out in [Effective Go](https://golang.org/doc/effective_go.html)
|
||||
as much as makes sense.
|
||||
|
||||
- All text files use Unix line endings.
|
||||
|
||||
- Each commit should be `go fmt` clean.
|
||||
|
||||
- The commit message subject should be a single short sentence
|
||||
describing the change, starting with a capital letter.
|
||||
|
||||
- Commits that resolve an existing issue must include the issue number
|
||||
as `(fixes #123)` at the end of the commit message subject.
|
||||
|
||||
- Imports are grouped per `goimports` standard; that is, standard
|
||||
library first, then third party libraries after a blank line.
|
||||
|
||||
- A contribution solving a single issue or introducing a single new
|
||||
feature should probably be a single commit based on the current
|
||||
`master` branch. You may be asked to "rebase" or "squash" your pull
|
||||
request to make sure this is the case, especially if there have been
|
||||
amendments during review.
|
||||
|
||||
## Licensing
|
||||
|
||||
All contributions are made under the same MIT license as the rest of the
|
||||
project, except documentation, user interface text and translation
|
||||
strings which are licensed under the Creative Commons Attribution 4.0
|
||||
International License. You retain the copyright to code you have
|
||||
written.
|
||||
|
||||
When accepting your first contribution, the maintainer of the project
|
||||
will ensure that you are added to the AUTHORS file. You are welcome to
|
||||
add yourself as a separate commit in your first pull request.
|
||||
|
||||
## Tests
|
||||
|
||||
Yes please!
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
||||
19
Godeps/_workspace/src/github.com/syncthing/protocol/LICENSE
generated
vendored
Normal file
19
Godeps/_workspace/src/github.com/syncthing/protocol/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
Copyright (C) 2014-2015 The Protocol Authors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
- The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
13
Godeps/_workspace/src/github.com/syncthing/protocol/README.md
generated
vendored
Normal file
13
Godeps/_workspace/src/github.com/syncthing/protocol/README.md
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
The BEPv1 Protocol
|
||||
==================
|
||||
|
||||
[](http://build.syncthing.net/job/protocol/lastBuild/)
|
||||
[](http://godoc.org/github.com/syncthing/protocol)
|
||||
[](http://opensource.org/licenses/MIT)
|
||||
|
||||
This is the protocol implementation used by Syncthing.
|
||||
|
||||
License
|
||||
=======
|
||||
|
||||
MIT
|
||||
@@ -1,17 +1,4 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation, either version 3 of the License, or (at your option)
|
||||
// any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
// Copyright (C) 2014 The Protocol Authors.
|
||||
|
||||
package protocol
|
||||
|
||||
53
Godeps/_workspace/src/github.com/syncthing/protocol/compression.go
generated
vendored
Normal file
53
Godeps/_workspace/src/github.com/syncthing/protocol/compression.go
generated
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
// Copyright (C) 2015 The Protocol Authors.
|
||||
|
||||
package protocol
|
||||
|
||||
import "fmt"
|
||||
|
||||
type Compression int
|
||||
|
||||
const (
|
||||
CompressMetadata Compression = iota // zero value is the default, default should be "metadata"
|
||||
CompressNever
|
||||
CompressAlways
|
||||
|
||||
compressionThreshold = 128 // don't bother compressing messages smaller than this many bytes
|
||||
)
|
||||
|
||||
var compressionMarshal = map[Compression]string{
|
||||
CompressNever: "never",
|
||||
CompressMetadata: "metadata",
|
||||
CompressAlways: "always",
|
||||
}
|
||||
|
||||
var compressionUnmarshal = map[string]Compression{
|
||||
// Legacy
|
||||
"false": CompressNever,
|
||||
"true": CompressMetadata,
|
||||
|
||||
// Current
|
||||
"never": CompressNever,
|
||||
"metadata": CompressMetadata,
|
||||
"always": CompressAlways,
|
||||
}
|
||||
|
||||
func (c Compression) String() string {
|
||||
s, ok := compressionMarshal[c]
|
||||
if !ok {
|
||||
return fmt.Sprintf("unknown:%d", c)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (c Compression) GoString() string {
|
||||
return fmt.Sprintf("%q", c.String())
|
||||
}
|
||||
|
||||
func (c Compression) MarshalText() ([]byte, error) {
|
||||
return []byte(compressionMarshal[c]), nil
|
||||
}
|
||||
|
||||
func (c *Compression) UnmarshalText(bs []byte) error {
|
||||
*c = compressionUnmarshal[string(bs)]
|
||||
return nil
|
||||
}
|
||||
49
Godeps/_workspace/src/github.com/syncthing/protocol/compression_test.go
generated
vendored
Normal file
49
Godeps/_workspace/src/github.com/syncthing/protocol/compression_test.go
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
// Copyright (C) 2015 The Protocol Authors.
|
||||
|
||||
package protocol
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestCompressionMarshal(t *testing.T) {
|
||||
uTestcases := []struct {
|
||||
s string
|
||||
c Compression
|
||||
}{
|
||||
{"true", CompressMetadata},
|
||||
{"false", CompressNever},
|
||||
{"never", CompressNever},
|
||||
{"metadata", CompressMetadata},
|
||||
{"always", CompressAlways},
|
||||
{"whatever", CompressMetadata},
|
||||
}
|
||||
|
||||
mTestcases := []struct {
|
||||
s string
|
||||
c Compression
|
||||
}{
|
||||
{"never", CompressNever},
|
||||
{"metadata", CompressMetadata},
|
||||
{"always", CompressAlways},
|
||||
}
|
||||
|
||||
var c Compression
|
||||
for _, tc := range uTestcases {
|
||||
err := c.UnmarshalText([]byte(tc.s))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if c != tc.c {
|
||||
t.Errorf("%s unmarshalled to %d, not %d", tc.s, c, tc.c)
|
||||
}
|
||||
}
|
||||
|
||||
for _, tc := range mTestcases {
|
||||
bs, err := tc.c.MarshalText()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if s := string(bs); s != tc.s {
|
||||
t.Errorf("%d marshalled to %q, not %q", tc.c, s, tc.s)
|
||||
}
|
||||
}
|
||||
}
|
||||
62
Godeps/_workspace/src/github.com/syncthing/protocol/counting.go
generated
vendored
Normal file
62
Godeps/_workspace/src/github.com/syncthing/protocol/counting.go
generated
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
// Copyright (C) 2014 The Protocol Authors.
|
||||
|
||||
package protocol
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
type countingReader struct {
|
||||
io.Reader
|
||||
tot int64 // bytes
|
||||
last int64 // unix nanos
|
||||
}
|
||||
|
||||
var (
|
||||
totalIncoming int64
|
||||
totalOutgoing int64
|
||||
)
|
||||
|
||||
func (c *countingReader) Read(bs []byte) (int, error) {
|
||||
n, err := c.Reader.Read(bs)
|
||||
atomic.AddInt64(&c.tot, int64(n))
|
||||
atomic.AddInt64(&totalIncoming, int64(n))
|
||||
atomic.StoreInt64(&c.last, time.Now().UnixNano())
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (c *countingReader) Tot() int64 {
|
||||
return atomic.LoadInt64(&c.tot)
|
||||
}
|
||||
|
||||
func (c *countingReader) Last() time.Time {
|
||||
return time.Unix(0, atomic.LoadInt64(&c.last))
|
||||
}
|
||||
|
||||
type countingWriter struct {
|
||||
io.Writer
|
||||
tot int64 // bytes
|
||||
last int64 // unix nanos
|
||||
}
|
||||
|
||||
func (c *countingWriter) Write(bs []byte) (int, error) {
|
||||
n, err := c.Writer.Write(bs)
|
||||
atomic.AddInt64(&c.tot, int64(n))
|
||||
atomic.AddInt64(&totalOutgoing, int64(n))
|
||||
atomic.StoreInt64(&c.last, time.Now().UnixNano())
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (c *countingWriter) Tot() int64 {
|
||||
return atomic.LoadInt64(&c.tot)
|
||||
}
|
||||
|
||||
func (c *countingWriter) Last() time.Time {
|
||||
return time.Unix(0, atomic.LoadInt64(&c.last))
|
||||
}
|
||||
|
||||
func TotalInOut() (int64, int64) {
|
||||
return atomic.LoadInt64(&totalIncoming), atomic.LoadInt64(&totalOutgoing)
|
||||
}
|
||||
15
Godeps/_workspace/src/github.com/syncthing/protocol/debug.go
generated
vendored
Normal file
15
Godeps/_workspace/src/github.com/syncthing/protocol/debug.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
// Copyright (C) 2014 The Protocol Authors.
|
||||
|
||||
package protocol
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/calmh/logger"
|
||||
)
|
||||
|
||||
var (
|
||||
debug = strings.Contains(os.Getenv("STTRACE"), "protocol") || os.Getenv("STTRACE") == "all"
|
||||
l = logger.DefaultLogger
|
||||
)
|
||||
@@ -1,17 +1,4 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation, either version 3 of the License, or (at your option)
|
||||
// any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
// Copyright (C) 2014 The Protocol Authors.
|
||||
|
||||
package protocol
|
||||
|
||||
@@ -24,7 +11,7 @@ import (
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/luhn"
|
||||
"github.com/calmh/luhn"
|
||||
)
|
||||
|
||||
type DeviceID [32]byte
|
||||
@@ -1,17 +1,4 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation, either version 3 of the License, or (at your option)
|
||||
// any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
// Copyright (C) 2014 The Protocol Authors.
|
||||
|
||||
package protocol
|
||||
|
||||
4
Godeps/_workspace/src/github.com/syncthing/protocol/doc.go
generated
vendored
Normal file
4
Godeps/_workspace/src/github.com/syncthing/protocol/doc.go
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
// Copyright (C) 2014 The Protocol Authors.
|
||||
|
||||
// Package protocol implements the Block Exchange Protocol.
|
||||
package protocol
|
||||
@@ -1,17 +1,4 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation, either version 3 of the License, or (at your option)
|
||||
// any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
// Copyright (C) 2014 The Protocol Authors.
|
||||
|
||||
package protocol
|
||||
|
||||
122
Godeps/_workspace/src/github.com/syncthing/protocol/message.go
generated
vendored
Normal file
122
Godeps/_workspace/src/github.com/syncthing/protocol/message.go
generated
vendored
Normal file
@@ -0,0 +1,122 @@
|
||||
// Copyright (C) 2014 The Protocol Authors.
|
||||
|
||||
//go:generate genxdr -o message_xdr.go message.go
|
||||
|
||||
package protocol
|
||||
|
||||
import "fmt"
|
||||
|
||||
type IndexMessage struct {
|
||||
Folder string
|
||||
Files []FileInfo
|
||||
Flags uint32
|
||||
Options []Option // max:64
|
||||
}
|
||||
|
||||
type FileInfo struct {
|
||||
Name string // max:8192
|
||||
Flags uint32
|
||||
Modified int64
|
||||
Version int64
|
||||
LocalVersion int64
|
||||
Blocks []BlockInfo
|
||||
}
|
||||
|
||||
func (f FileInfo) String() string {
|
||||
return fmt.Sprintf("File{Name:%q, Flags:0%o, Modified:%d, Version:%d, Size:%d, Blocks:%v}",
|
||||
f.Name, f.Flags, f.Modified, f.Version, f.Size(), f.Blocks)
|
||||
}
|
||||
|
||||
func (f FileInfo) Size() (bytes int64) {
|
||||
if f.IsDeleted() || f.IsDirectory() {
|
||||
return 128
|
||||
}
|
||||
for _, b := range f.Blocks {
|
||||
bytes += int64(b.Size)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (f FileInfo) IsDeleted() bool {
|
||||
return f.Flags&FlagDeleted != 0
|
||||
}
|
||||
|
||||
func (f FileInfo) IsInvalid() bool {
|
||||
return f.Flags&FlagInvalid != 0
|
||||
}
|
||||
|
||||
func (f FileInfo) IsDirectory() bool {
|
||||
return f.Flags&FlagDirectory != 0
|
||||
}
|
||||
|
||||
func (f FileInfo) IsSymlink() bool {
|
||||
return f.Flags&FlagSymlink != 0
|
||||
}
|
||||
|
||||
func (f FileInfo) HasPermissionBits() bool {
|
||||
return f.Flags&FlagNoPermBits == 0
|
||||
}
|
||||
|
||||
type BlockInfo struct {
|
||||
Offset int64 // noencode (cache only)
|
||||
Size int32
|
||||
Hash []byte // max:64
|
||||
}
|
||||
|
||||
func (b BlockInfo) String() string {
|
||||
return fmt.Sprintf("Block{%d/%d/%x}", b.Offset, b.Size, b.Hash)
|
||||
}
|
||||
|
||||
type RequestMessage struct {
|
||||
Folder string // max:64
|
||||
Name string // max:8192
|
||||
Offset int64
|
||||
Size int32
|
||||
Hash []byte // max:64
|
||||
Flags uint32
|
||||
Options []Option // max:64
|
||||
}
|
||||
|
||||
type ResponseMessage struct {
|
||||
Data []byte
|
||||
Error int32
|
||||
}
|
||||
|
||||
type ClusterConfigMessage struct {
|
||||
ClientName string // max:64
|
||||
ClientVersion string // max:64
|
||||
Folders []Folder
|
||||
Options []Option // max:64
|
||||
}
|
||||
|
||||
func (o *ClusterConfigMessage) GetOption(key string) string {
|
||||
for _, option := range o.Options {
|
||||
if option.Key == key {
|
||||
return option.Value
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type Folder struct {
|
||||
ID string // max:64
|
||||
Devices []Device
|
||||
}
|
||||
|
||||
type Device struct {
|
||||
ID []byte // max:32
|
||||
Flags uint32
|
||||
MaxLocalVersion int64
|
||||
}
|
||||
|
||||
type Option struct {
|
||||
Key string // max:64
|
||||
Value string // max:1024
|
||||
}
|
||||
|
||||
type CloseMessage struct {
|
||||
Reason string // max:1024
|
||||
Code int32
|
||||
}
|
||||
|
||||
type EmptyMessage struct{}
|
||||
@@ -30,11 +30,21 @@ IndexMessage Structure:
|
||||
\ Zero or more FileInfo Structures \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Flags |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Number of Options |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Zero or more Option Structures \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
struct IndexMessage {
|
||||
string Folder<64>;
|
||||
string Folder<>;
|
||||
FileInfo Files<>;
|
||||
unsigned int Flags;
|
||||
Option Options<64>;
|
||||
}
|
||||
|
||||
*/
|
||||
@@ -64,9 +74,6 @@ func (o IndexMessage) AppendXDR(bs []byte) ([]byte, error) {
|
||||
}
|
||||
|
||||
func (o IndexMessage) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
if l := len(o.Folder); l > 64 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("Folder", l, 64)
|
||||
}
|
||||
xw.WriteString(o.Folder)
|
||||
xw.WriteUint32(uint32(len(o.Files)))
|
||||
for i := range o.Files {
|
||||
@@ -75,6 +82,17 @@ func (o IndexMessage) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
}
|
||||
xw.WriteUint32(o.Flags)
|
||||
if l := len(o.Options); l > 64 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("Options", l, 64)
|
||||
}
|
||||
xw.WriteUint32(uint32(len(o.Options)))
|
||||
for i := range o.Options {
|
||||
_, err := o.Options[i].encodeXDR(xw)
|
||||
if err != nil {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
}
|
||||
return xw.Tot(), xw.Error()
|
||||
}
|
||||
|
||||
@@ -90,12 +108,21 @@ func (o *IndexMessage) UnmarshalXDR(bs []byte) error {
|
||||
}
|
||||
|
||||
func (o *IndexMessage) decodeXDR(xr *xdr.Reader) error {
|
||||
o.Folder = xr.ReadStringMax(64)
|
||||
o.Folder = xr.ReadString()
|
||||
_FilesSize := int(xr.ReadUint32())
|
||||
o.Files = make([]FileInfo, _FilesSize)
|
||||
for i := range o.Files {
|
||||
(&o.Files[i]).decodeXDR(xr)
|
||||
}
|
||||
o.Flags = xr.ReadUint32()
|
||||
_OptionsSize := int(xr.ReadUint32())
|
||||
if _OptionsSize > 64 {
|
||||
return xdr.ElementSizeExceeded("Options", _OptionsSize, 64)
|
||||
}
|
||||
o.Options = make([]Option, _OptionsSize)
|
||||
for i := range o.Options {
|
||||
(&o.Options[i]).decodeXDR(xr)
|
||||
}
|
||||
return xr.Error()
|
||||
}
|
||||
|
||||
@@ -138,8 +165,8 @@ struct FileInfo {
|
||||
string Name<8192>;
|
||||
unsigned int Flags;
|
||||
hyper Modified;
|
||||
unsigned hyper Version;
|
||||
unsigned hyper LocalVersion;
|
||||
hyper Version;
|
||||
hyper LocalVersion;
|
||||
BlockInfo Blocks<>;
|
||||
}
|
||||
|
||||
@@ -176,8 +203,8 @@ func (o FileInfo) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
xw.WriteString(o.Name)
|
||||
xw.WriteUint32(o.Flags)
|
||||
xw.WriteUint64(uint64(o.Modified))
|
||||
xw.WriteUint64(o.Version)
|
||||
xw.WriteUint64(o.LocalVersion)
|
||||
xw.WriteUint64(uint64(o.Version))
|
||||
xw.WriteUint64(uint64(o.LocalVersion))
|
||||
xw.WriteUint32(uint32(len(o.Blocks)))
|
||||
for i := range o.Blocks {
|
||||
_, err := o.Blocks[i].encodeXDR(xw)
|
||||
@@ -203,8 +230,8 @@ func (o *FileInfo) decodeXDR(xr *xdr.Reader) error {
|
||||
o.Name = xr.ReadStringMax(8192)
|
||||
o.Flags = xr.ReadUint32()
|
||||
o.Modified = int64(xr.ReadUint64())
|
||||
o.Version = xr.ReadUint64()
|
||||
o.LocalVersion = xr.ReadUint64()
|
||||
o.Version = int64(xr.ReadUint64())
|
||||
o.LocalVersion = int64(xr.ReadUint64())
|
||||
_BlocksSize := int(xr.ReadUint32())
|
||||
o.Blocks = make([]BlockInfo, _BlocksSize)
|
||||
for i := range o.Blocks {
|
||||
@@ -215,106 +242,6 @@ func (o *FileInfo) decodeXDR(xr *xdr.Reader) error {
|
||||
|
||||
/*
|
||||
|
||||
FileInfoTruncated Structure:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of Name |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Name (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Flags |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
+ Modified (64 bits) +
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
+ Version (64 bits) +
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
+ Local Version (64 bits) +
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Num Blocks |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
struct FileInfoTruncated {
|
||||
string Name<8192>;
|
||||
unsigned int Flags;
|
||||
hyper Modified;
|
||||
unsigned hyper Version;
|
||||
unsigned hyper LocalVersion;
|
||||
unsigned int NumBlocks;
|
||||
}
|
||||
|
||||
*/
|
||||
|
||||
func (o FileInfoTruncated) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o FileInfoTruncated) MarshalXDR() ([]byte, error) {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}
|
||||
|
||||
func (o FileInfoTruncated) MustMarshalXDR() []byte {
|
||||
bs, err := o.MarshalXDR()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
func (o FileInfoTruncated) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o FileInfoTruncated) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
if l := len(o.Name); l > 8192 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("Name", l, 8192)
|
||||
}
|
||||
xw.WriteString(o.Name)
|
||||
xw.WriteUint32(o.Flags)
|
||||
xw.WriteUint64(uint64(o.Modified))
|
||||
xw.WriteUint64(o.Version)
|
||||
xw.WriteUint64(o.LocalVersion)
|
||||
xw.WriteUint32(o.NumBlocks)
|
||||
return xw.Tot(), xw.Error()
|
||||
}
|
||||
|
||||
func (o *FileInfoTruncated) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *FileInfoTruncated) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *FileInfoTruncated) decodeXDR(xr *xdr.Reader) error {
|
||||
o.Name = xr.ReadStringMax(8192)
|
||||
o.Flags = xr.ReadUint32()
|
||||
o.Modified = int64(xr.ReadUint64())
|
||||
o.Version = xr.ReadUint64()
|
||||
o.LocalVersion = xr.ReadUint64()
|
||||
o.NumBlocks = xr.ReadUint32()
|
||||
return xr.Error()
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
BlockInfo Structure:
|
||||
|
||||
0 1 2 3
|
||||
@@ -331,7 +258,7 @@ BlockInfo Structure:
|
||||
|
||||
|
||||
struct BlockInfo {
|
||||
unsigned int Size;
|
||||
int Size;
|
||||
opaque Hash<64>;
|
||||
}
|
||||
|
||||
@@ -362,7 +289,7 @@ func (o BlockInfo) AppendXDR(bs []byte) ([]byte, error) {
|
||||
}
|
||||
|
||||
func (o BlockInfo) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
xw.WriteUint32(o.Size)
|
||||
xw.WriteUint32(uint32(o.Size))
|
||||
if l := len(o.Hash); l > 64 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("Hash", l, 64)
|
||||
}
|
||||
@@ -382,7 +309,7 @@ func (o *BlockInfo) UnmarshalXDR(bs []byte) error {
|
||||
}
|
||||
|
||||
func (o *BlockInfo) decodeXDR(xr *xdr.Reader) error {
|
||||
o.Size = xr.ReadUint32()
|
||||
o.Size = int32(xr.ReadUint32())
|
||||
o.Hash = xr.ReadBytesMax(64)
|
||||
return xr.Error()
|
||||
}
|
||||
@@ -412,13 +339,30 @@ RequestMessage Structure:
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Size |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of Hash |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Hash (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Flags |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Number of Options |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Zero or more Option Structures \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
struct RequestMessage {
|
||||
string Folder<64>;
|
||||
string Name<8192>;
|
||||
unsigned hyper Offset;
|
||||
unsigned int Size;
|
||||
hyper Offset;
|
||||
int Size;
|
||||
opaque Hash<64>;
|
||||
unsigned int Flags;
|
||||
Option Options<64>;
|
||||
}
|
||||
|
||||
*/
|
||||
@@ -456,8 +400,23 @@ func (o RequestMessage) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("Name", l, 8192)
|
||||
}
|
||||
xw.WriteString(o.Name)
|
||||
xw.WriteUint64(o.Offset)
|
||||
xw.WriteUint32(o.Size)
|
||||
xw.WriteUint64(uint64(o.Offset))
|
||||
xw.WriteUint32(uint32(o.Size))
|
||||
if l := len(o.Hash); l > 64 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("Hash", l, 64)
|
||||
}
|
||||
xw.WriteBytes(o.Hash)
|
||||
xw.WriteUint32(o.Flags)
|
||||
if l := len(o.Options); l > 64 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("Options", l, 64)
|
||||
}
|
||||
xw.WriteUint32(uint32(len(o.Options)))
|
||||
for i := range o.Options {
|
||||
_, err := o.Options[i].encodeXDR(xw)
|
||||
if err != nil {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
}
|
||||
return xw.Tot(), xw.Error()
|
||||
}
|
||||
|
||||
@@ -475,8 +434,18 @@ func (o *RequestMessage) UnmarshalXDR(bs []byte) error {
|
||||
func (o *RequestMessage) decodeXDR(xr *xdr.Reader) error {
|
||||
o.Folder = xr.ReadStringMax(64)
|
||||
o.Name = xr.ReadStringMax(8192)
|
||||
o.Offset = xr.ReadUint64()
|
||||
o.Size = xr.ReadUint32()
|
||||
o.Offset = int64(xr.ReadUint64())
|
||||
o.Size = int32(xr.ReadUint32())
|
||||
o.Hash = xr.ReadBytesMax(64)
|
||||
o.Flags = xr.ReadUint32()
|
||||
_OptionsSize := int(xr.ReadUint32())
|
||||
if _OptionsSize > 64 {
|
||||
return xdr.ElementSizeExceeded("Options", _OptionsSize, 64)
|
||||
}
|
||||
o.Options = make([]Option, _OptionsSize)
|
||||
for i := range o.Options {
|
||||
(&o.Options[i]).decodeXDR(xr)
|
||||
}
|
||||
return xr.Error()
|
||||
}
|
||||
|
||||
@@ -493,10 +462,13 @@ ResponseMessage Structure:
|
||||
\ Data (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Error |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
struct ResponseMessage {
|
||||
opaque Data<>;
|
||||
int Error;
|
||||
}
|
||||
|
||||
*/
|
||||
@@ -527,6 +499,7 @@ func (o ResponseMessage) AppendXDR(bs []byte) ([]byte, error) {
|
||||
|
||||
func (o ResponseMessage) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
xw.WriteBytes(o.Data)
|
||||
xw.WriteUint32(uint32(o.Error))
|
||||
return xw.Tot(), xw.Error()
|
||||
}
|
||||
|
||||
@@ -543,6 +516,7 @@ func (o *ResponseMessage) UnmarshalXDR(bs []byte) error {
|
||||
|
||||
func (o *ResponseMessage) decodeXDR(xr *xdr.Reader) error {
|
||||
o.Data = xr.ReadBytes()
|
||||
o.Error = int32(xr.ReadUint32())
|
||||
return xr.Error()
|
||||
}
|
||||
|
||||
@@ -582,7 +556,7 @@ ClusterConfigMessage Structure:
|
||||
struct ClusterConfigMessage {
|
||||
string ClientName<64>;
|
||||
string ClientVersion<64>;
|
||||
Folder Folders<64>;
|
||||
Folder Folders<>;
|
||||
Option Options<64>;
|
||||
}
|
||||
|
||||
@@ -621,9 +595,6 @@ func (o ClusterConfigMessage) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("ClientVersion", l, 64)
|
||||
}
|
||||
xw.WriteString(o.ClientVersion)
|
||||
if l := len(o.Folders); l > 64 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("Folders", l, 64)
|
||||
}
|
||||
xw.WriteUint32(uint32(len(o.Folders)))
|
||||
for i := range o.Folders {
|
||||
_, err := o.Folders[i].encodeXDR(xw)
|
||||
@@ -659,9 +630,6 @@ func (o *ClusterConfigMessage) decodeXDR(xr *xdr.Reader) error {
|
||||
o.ClientName = xr.ReadStringMax(64)
|
||||
o.ClientVersion = xr.ReadStringMax(64)
|
||||
_FoldersSize := int(xr.ReadUint32())
|
||||
if _FoldersSize > 64 {
|
||||
return xdr.ElementSizeExceeded("Folders", _FoldersSize, 64)
|
||||
}
|
||||
o.Folders = make([]Folder, _FoldersSize)
|
||||
for i := range o.Folders {
|
||||
(&o.Folders[i]).decodeXDR(xr)
|
||||
@@ -789,7 +757,7 @@ Device Structure:
|
||||
struct Device {
|
||||
opaque ID<32>;
|
||||
unsigned int Flags;
|
||||
unsigned hyper MaxLocalVersion;
|
||||
hyper MaxLocalVersion;
|
||||
}
|
||||
|
||||
*/
|
||||
@@ -824,7 +792,7 @@ func (o Device) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
}
|
||||
xw.WriteBytes(o.ID)
|
||||
xw.WriteUint32(o.Flags)
|
||||
xw.WriteUint64(o.MaxLocalVersion)
|
||||
xw.WriteUint64(uint64(o.MaxLocalVersion))
|
||||
return xw.Tot(), xw.Error()
|
||||
}
|
||||
|
||||
@@ -842,7 +810,7 @@ func (o *Device) UnmarshalXDR(bs []byte) error {
|
||||
func (o *Device) decodeXDR(xr *xdr.Reader) error {
|
||||
o.ID = xr.ReadBytesMax(32)
|
||||
o.Flags = xr.ReadUint32()
|
||||
o.MaxLocalVersion = xr.ReadUint64()
|
||||
o.MaxLocalVersion = int64(xr.ReadUint64())
|
||||
return xr.Error()
|
||||
}
|
||||
|
||||
@@ -940,10 +908,13 @@ CloseMessage Structure:
|
||||
\ Reason (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Code |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
struct CloseMessage {
|
||||
string Reason<1024>;
|
||||
int Code;
|
||||
}
|
||||
|
||||
*/
|
||||
@@ -977,6 +948,7 @@ func (o CloseMessage) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("Reason", l, 1024)
|
||||
}
|
||||
xw.WriteString(o.Reason)
|
||||
xw.WriteUint32(uint32(o.Code))
|
||||
return xw.Tot(), xw.Error()
|
||||
}
|
||||
|
||||
@@ -993,6 +965,7 @@ func (o *CloseMessage) UnmarshalXDR(bs []byte) error {
|
||||
|
||||
func (o *CloseMessage) decodeXDR(xr *xdr.Reader) error {
|
||||
o.Reason = xr.ReadStringMax(1024)
|
||||
o.Code = int32(xr.ReadUint32())
|
||||
return xr.Error()
|
||||
}
|
||||
|
||||
@@ -1,17 +1,4 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation, either version 3 of the License, or (at your option)
|
||||
// any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
// Copyright (C) 2014 The Protocol Authors.
|
||||
|
||||
// +build darwin
|
||||
|
||||
@@ -1,17 +1,4 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation, either version 3 of the License, or (at your option)
|
||||
// any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
// Copyright (C) 2014 The Protocol Authors.
|
||||
|
||||
// +build !windows,!darwin
|
||||
|
||||
@@ -1,17 +1,4 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation, either version 3 of the License, or (at your option)
|
||||
// any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
// Copyright (C) 2014 The Protocol Authors.
|
||||
|
||||
// +build windows
|
||||
|
||||
@@ -1,17 +1,4 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation, either version 3 of the License, or (at your option)
|
||||
// any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
// Copyright (C) 2014 The Protocol Authors.
|
||||
|
||||
package protocol
|
||||
|
||||
@@ -56,6 +43,8 @@ const (
|
||||
FlagSymlink = 1 << 16
|
||||
FlagSymlinkMissingTarget = 1 << 17
|
||||
|
||||
FlagsAll = (1 << 18) - 1
|
||||
|
||||
SymlinkTypeMask = FlagDirectory | FlagSymlinkMissingTarget
|
||||
)
|
||||
|
||||
@@ -71,6 +60,10 @@ var (
|
||||
ErrClosed = errors.New("connection closed")
|
||||
)
|
||||
|
||||
// Specific variants of empty messages...
|
||||
type pingMessage struct{ EmptyMessage }
|
||||
type pongMessage struct{ EmptyMessage }
|
||||
|
||||
type Model interface {
|
||||
// An index was received from the peer device
|
||||
Index(deviceID DeviceID, folder string, files []FileInfo)
|
||||
@@ -113,7 +106,7 @@ type rawConnection struct {
|
||||
closed chan struct{}
|
||||
once sync.Once
|
||||
|
||||
compressionThreshold int // compress messages larger than this many bytes
|
||||
compression Compression
|
||||
|
||||
rdbuf0 []byte // used & reused by readMessage
|
||||
rdbuf1 []byte // used & reused by readMessage
|
||||
@@ -133,30 +126,30 @@ type encodable interface {
|
||||
AppendXDR([]byte) ([]byte, error)
|
||||
}
|
||||
|
||||
type isEofer interface {
|
||||
IsEOF() bool
|
||||
}
|
||||
|
||||
const (
|
||||
pingTimeout = 30 * time.Second
|
||||
pingIdleTime = 60 * time.Second
|
||||
)
|
||||
|
||||
func NewConnection(deviceID DeviceID, reader io.Reader, writer io.Writer, receiver Model, name string, compress bool) Connection {
|
||||
func NewConnection(deviceID DeviceID, reader io.Reader, writer io.Writer, receiver Model, name string, compress Compression) Connection {
|
||||
cr := &countingReader{Reader: reader}
|
||||
cw := &countingWriter{Writer: writer}
|
||||
|
||||
compThres := 1<<31 - 1 // compression disabled
|
||||
if compress {
|
||||
compThres = 128 // compress messages that are 128 bytes long or larger
|
||||
}
|
||||
c := rawConnection{
|
||||
id: deviceID,
|
||||
name: name,
|
||||
receiver: nativeModel{receiver},
|
||||
state: stateInitial,
|
||||
cr: cr,
|
||||
cw: cw,
|
||||
outbox: make(chan hdrMsg),
|
||||
nextID: make(chan int),
|
||||
closed: make(chan struct{}),
|
||||
compressionThreshold: compThres,
|
||||
id: deviceID,
|
||||
name: name,
|
||||
receiver: nativeModel{receiver},
|
||||
state: stateInitial,
|
||||
cr: cr,
|
||||
cw: cw,
|
||||
outbox: make(chan hdrMsg),
|
||||
nextID: make(chan int),
|
||||
closed: make(chan struct{}),
|
||||
compression: compress,
|
||||
}
|
||||
|
||||
go c.readerLoop()
|
||||
@@ -183,7 +176,10 @@ func (c *rawConnection) Index(folder string, idx []FileInfo) error {
|
||||
default:
|
||||
}
|
||||
c.idxMut.Lock()
|
||||
c.send(-1, messageTypeIndex, IndexMessage{folder, idx})
|
||||
c.send(-1, messageTypeIndex, IndexMessage{
|
||||
Folder: folder,
|
||||
Files: idx,
|
||||
})
|
||||
c.idxMut.Unlock()
|
||||
return nil
|
||||
}
|
||||
@@ -196,7 +192,10 @@ func (c *rawConnection) IndexUpdate(folder string, idx []FileInfo) error {
|
||||
default:
|
||||
}
|
||||
c.idxMut.Lock()
|
||||
c.send(-1, messageTypeIndexUpdate, IndexMessage{folder, idx})
|
||||
c.send(-1, messageTypeIndexUpdate, IndexMessage{
|
||||
Folder: folder,
|
||||
Files: idx,
|
||||
})
|
||||
c.idxMut.Unlock()
|
||||
return nil
|
||||
}
|
||||
@@ -218,7 +217,12 @@ func (c *rawConnection) Request(folder string, name string, offset int64, size i
|
||||
c.awaiting[id] = rc
|
||||
c.awaitingMut.Unlock()
|
||||
|
||||
ok := c.send(id, messageTypeRequest, RequestMessage{folder, name, uint64(offset), uint32(size)})
|
||||
ok := c.send(id, messageTypeRequest, RequestMessage{
|
||||
Folder: folder,
|
||||
Name: name,
|
||||
Offset: offset,
|
||||
Size: int32(size),
|
||||
})
|
||||
if !ok {
|
||||
return nil, ErrClosed
|
||||
}
|
||||
@@ -274,48 +278,60 @@ func (c *rawConnection) readerLoop() (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
switch hdr.msgType {
|
||||
case messageTypeIndex:
|
||||
if c.state < stateCCRcvd {
|
||||
return fmt.Errorf("protocol error: index message in state %d", c.state)
|
||||
switch msg := msg.(type) {
|
||||
case IndexMessage:
|
||||
if msg.Flags != 0 {
|
||||
// We don't currently support or expect any flags.
|
||||
return fmt.Errorf("protocol error: unknown flags 0x%x in Index(Update) message", msg.Flags)
|
||||
}
|
||||
c.handleIndex(msg.(IndexMessage))
|
||||
c.state = stateIdxRcvd
|
||||
|
||||
case messageTypeIndexUpdate:
|
||||
if c.state < stateIdxRcvd {
|
||||
return fmt.Errorf("protocol error: index update message in state %d", c.state)
|
||||
switch hdr.msgType {
|
||||
case messageTypeIndex:
|
||||
if c.state < stateCCRcvd {
|
||||
return fmt.Errorf("protocol error: index message in state %d", c.state)
|
||||
}
|
||||
c.handleIndex(msg)
|
||||
c.state = stateIdxRcvd
|
||||
|
||||
case messageTypeIndexUpdate:
|
||||
if c.state < stateIdxRcvd {
|
||||
return fmt.Errorf("protocol error: index update message in state %d", c.state)
|
||||
}
|
||||
c.handleIndexUpdate(msg)
|
||||
}
|
||||
c.handleIndexUpdate(msg.(IndexMessage))
|
||||
|
||||
case messageTypeRequest:
|
||||
case RequestMessage:
|
||||
if msg.Flags != 0 {
|
||||
// We don't currently support or expect any flags.
|
||||
return fmt.Errorf("protocol error: unknown flags 0x%x in Request message", msg.Flags)
|
||||
}
|
||||
if c.state < stateIdxRcvd {
|
||||
return fmt.Errorf("protocol error: request message in state %d", c.state)
|
||||
}
|
||||
// Requests are handled asynchronously
|
||||
go c.handleRequest(hdr.msgID, msg.(RequestMessage))
|
||||
go c.handleRequest(hdr.msgID, msg)
|
||||
|
||||
case messageTypeResponse:
|
||||
case ResponseMessage:
|
||||
if c.state < stateIdxRcvd {
|
||||
return fmt.Errorf("protocol error: response message in state %d", c.state)
|
||||
}
|
||||
c.handleResponse(hdr.msgID, msg.(ResponseMessage))
|
||||
c.handleResponse(hdr.msgID, msg)
|
||||
|
||||
case messageTypePing:
|
||||
c.send(hdr.msgID, messageTypePong, EmptyMessage{})
|
||||
case pingMessage:
|
||||
c.send(hdr.msgID, messageTypePong, pongMessage{})
|
||||
|
||||
case messageTypePong:
|
||||
case pongMessage:
|
||||
c.handlePong(hdr.msgID)
|
||||
|
||||
case messageTypeClusterConfig:
|
||||
case ClusterConfigMessage:
|
||||
if c.state != stateInitial {
|
||||
return fmt.Errorf("protocol error: cluster config message in state %d", c.state)
|
||||
}
|
||||
go c.receiver.ClusterConfig(c.id, msg.(ClusterConfigMessage))
|
||||
go c.receiver.ClusterConfig(c.id, msg)
|
||||
c.state = stateCCRcvd
|
||||
|
||||
case messageTypeClose:
|
||||
return errors.New(msg.(CloseMessage).Reason)
|
||||
case CloseMessage:
|
||||
return errors.New(msg.Reason)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("protocol error: %s: unknown message type %#x", c.id, hdr.msgType)
|
||||
@@ -341,6 +357,11 @@ func (c *rawConnection) readMessage() (hdr header, msg encodable, err error) {
|
||||
l.Debugf("read header %v (msglen=%d)", hdr, msglen)
|
||||
}
|
||||
|
||||
if hdr.version != 0 {
|
||||
err = fmt.Errorf("unknown protocol version 0x%x", hdr.version)
|
||||
return
|
||||
}
|
||||
|
||||
if cap(c.rdbuf0) < msglen {
|
||||
c.rdbuf0 = make([]byte, msglen)
|
||||
} else {
|
||||
@@ -376,33 +397,58 @@ func (c *rawConnection) readMessage() (hdr header, msg encodable, err error) {
|
||||
}
|
||||
}
|
||||
|
||||
// We check each returned error for the XDRError.IsEOF() method.
|
||||
// IsEOF()==true here means that the message contained fewer fields than
|
||||
// expected. It does not signify an EOF on the socket, because we've
|
||||
// successfully read a size value and that many bytes already. New fields
|
||||
// we expected but the other peer didn't send should be interpreted as
|
||||
// zero/nil, and if that's not valid we'll verify it somewhere else.
|
||||
|
||||
switch hdr.msgType {
|
||||
case messageTypeIndex, messageTypeIndexUpdate:
|
||||
var idx IndexMessage
|
||||
err = idx.UnmarshalXDR(msgBuf)
|
||||
if xdrErr, ok := err.(isEofer); ok && xdrErr.IsEOF() {
|
||||
err = nil
|
||||
}
|
||||
msg = idx
|
||||
|
||||
case messageTypeRequest:
|
||||
var req RequestMessage
|
||||
err = req.UnmarshalXDR(msgBuf)
|
||||
if xdrErr, ok := err.(isEofer); ok && xdrErr.IsEOF() {
|
||||
err = nil
|
||||
}
|
||||
msg = req
|
||||
|
||||
case messageTypeResponse:
|
||||
var resp ResponseMessage
|
||||
err = resp.UnmarshalXDR(msgBuf)
|
||||
if xdrErr, ok := err.(isEofer); ok && xdrErr.IsEOF() {
|
||||
err = nil
|
||||
}
|
||||
msg = resp
|
||||
|
||||
case messageTypePing, messageTypePong:
|
||||
msg = EmptyMessage{}
|
||||
case messageTypePing:
|
||||
msg = pingMessage{}
|
||||
|
||||
case messageTypePong:
|
||||
msg = pongMessage{}
|
||||
|
||||
case messageTypeClusterConfig:
|
||||
var cc ClusterConfigMessage
|
||||
err = cc.UnmarshalXDR(msgBuf)
|
||||
if xdrErr, ok := err.(isEofer); ok && xdrErr.IsEOF() {
|
||||
err = nil
|
||||
}
|
||||
msg = cc
|
||||
|
||||
case messageTypeClose:
|
||||
var cm CloseMessage
|
||||
err = cm.UnmarshalXDR(msgBuf)
|
||||
if xdrErr, ok := err.(isEofer); ok && xdrErr.IsEOF() {
|
||||
err = nil
|
||||
}
|
||||
msg = cm
|
||||
|
||||
default:
|
||||
@@ -416,20 +462,48 @@ func (c *rawConnection) handleIndex(im IndexMessage) {
|
||||
if debug {
|
||||
l.Debugf("Index(%v, %v, %d files)", c.id, im.Folder, len(im.Files))
|
||||
}
|
||||
c.receiver.Index(c.id, im.Folder, im.Files)
|
||||
c.receiver.Index(c.id, im.Folder, filterIndexMessageFiles(im.Files))
|
||||
}
|
||||
|
||||
func (c *rawConnection) handleIndexUpdate(im IndexMessage) {
|
||||
if debug {
|
||||
l.Debugf("queueing IndexUpdate(%v, %v, %d files)", c.id, im.Folder, len(im.Files))
|
||||
}
|
||||
c.receiver.IndexUpdate(c.id, im.Folder, im.Files)
|
||||
c.receiver.IndexUpdate(c.id, im.Folder, filterIndexMessageFiles(im.Files))
|
||||
}
|
||||
|
||||
func filterIndexMessageFiles(fs []FileInfo) []FileInfo {
|
||||
var out []FileInfo
|
||||
for i, f := range fs {
|
||||
switch f.Name {
|
||||
case "", ".", "..", "/": // A few obviously invalid filenames
|
||||
l.Infof("Dropping invalid filename %q from incoming index", f.Name)
|
||||
if out == nil {
|
||||
// Most incoming updates won't contain anything invalid, so we
|
||||
// delay the allocation and copy to output slice until we
|
||||
// really need to do it, then copy all the so var valid files
|
||||
// to it.
|
||||
out = make([]FileInfo, i, len(fs)-1)
|
||||
copy(out, fs)
|
||||
}
|
||||
default:
|
||||
if out != nil {
|
||||
out = append(out, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
if out != nil {
|
||||
return out
|
||||
}
|
||||
return fs
|
||||
}
|
||||
|
||||
func (c *rawConnection) handleRequest(msgID int, req RequestMessage) {
|
||||
data, _ := c.receiver.Request(c.id, req.Folder, req.Name, int64(req.Offset), int(req.Size))
|
||||
|
||||
c.send(msgID, messageTypeResponse, ResponseMessage{data})
|
||||
c.send(msgID, messageTypeResponse, ResponseMessage{
|
||||
Data: data,
|
||||
})
|
||||
}
|
||||
|
||||
func (c *rawConnection) handleResponse(msgID int, resp ResponseMessage) {
|
||||
@@ -493,7 +567,15 @@ func (c *rawConnection) writerLoop() {
|
||||
return
|
||||
}
|
||||
|
||||
if len(uncBuf) >= c.compressionThreshold {
|
||||
compress := false
|
||||
switch c.compression {
|
||||
case CompressAlways:
|
||||
compress = true
|
||||
case CompressMetadata:
|
||||
compress = hm.hdr.msgType != messageTypeResponse
|
||||
}
|
||||
|
||||
if compress && len(uncBuf) >= compressionThreshold {
|
||||
// Use compression for large messages
|
||||
hm.hdr.compression = true
|
||||
|
||||
@@ -630,8 +712,8 @@ func (c *rawConnection) pingerLoop() {
|
||||
|
||||
type Statistics struct {
|
||||
At time.Time
|
||||
InBytesTotal uint64
|
||||
OutBytesTotal uint64
|
||||
InBytesTotal int64
|
||||
OutBytesTotal int64
|
||||
}
|
||||
|
||||
func (c *rawConnection) Statistics() Statistics {
|
||||
@@ -1,17 +1,4 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation, either version 3 of the License, or (at your option)
|
||||
// any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
// Copyright (C) 2014 The Protocol Authors.
|
||||
|
||||
package protocol
|
||||
|
||||
@@ -80,8 +67,8 @@ func TestPing(t *testing.T) {
|
||||
ar, aw := io.Pipe()
|
||||
br, bw := io.Pipe()
|
||||
|
||||
c0 := NewConnection(c0ID, ar, bw, nil, "name", true).(wireFormatConnection).next.(*rawConnection)
|
||||
c1 := NewConnection(c1ID, br, aw, nil, "name", true).(wireFormatConnection).next.(*rawConnection)
|
||||
c0 := NewConnection(c0ID, ar, bw, nil, "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
|
||||
c1 := NewConnection(c1ID, br, aw, nil, "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
|
||||
|
||||
if ok := c0.ping(); !ok {
|
||||
t.Error("c0 ping failed")
|
||||
@@ -104,8 +91,8 @@ func TestPingErr(t *testing.T) {
|
||||
eaw := &ErrPipe{PipeWriter: *aw, max: i, err: e}
|
||||
ebw := &ErrPipe{PipeWriter: *bw, max: j, err: e}
|
||||
|
||||
c0 := NewConnection(c0ID, ar, ebw, m0, "name", true).(wireFormatConnection).next.(*rawConnection)
|
||||
NewConnection(c1ID, br, eaw, m1, "name", true)
|
||||
c0 := NewConnection(c0ID, ar, ebw, m0, "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
|
||||
NewConnection(c1ID, br, eaw, m1, "name", CompressAlways)
|
||||
|
||||
res := c0.ping()
|
||||
if (i < 8 || j < 8) && res {
|
||||
@@ -180,8 +167,8 @@ func TestVersionErr(t *testing.T) {
|
||||
ar, aw := io.Pipe()
|
||||
br, bw := io.Pipe()
|
||||
|
||||
c0 := NewConnection(c0ID, ar, bw, m0, "name", true).(wireFormatConnection).next.(*rawConnection)
|
||||
NewConnection(c1ID, br, aw, m1, "name", true)
|
||||
c0 := NewConnection(c0ID, ar, bw, m0, "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
|
||||
NewConnection(c1ID, br, aw, m1, "name", CompressAlways)
|
||||
|
||||
w := xdr.NewWriter(c0.cw)
|
||||
w.WriteUint32(encodeHeader(header{
|
||||
@@ -189,7 +176,7 @@ func TestVersionErr(t *testing.T) {
|
||||
msgID: 0,
|
||||
msgType: 0,
|
||||
}))
|
||||
w.WriteUint32(0)
|
||||
w.WriteUint32(0) // Avoids reader closing due to EOF
|
||||
|
||||
if !m1.isClosed() {
|
||||
t.Error("Connection should close due to unknown version")
|
||||
@@ -203,8 +190,8 @@ func TestTypeErr(t *testing.T) {
|
||||
ar, aw := io.Pipe()
|
||||
br, bw := io.Pipe()
|
||||
|
||||
c0 := NewConnection(c0ID, ar, bw, m0, "name", true).(wireFormatConnection).next.(*rawConnection)
|
||||
NewConnection(c1ID, br, aw, m1, "name", true)
|
||||
c0 := NewConnection(c0ID, ar, bw, m0, "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
|
||||
NewConnection(c1ID, br, aw, m1, "name", CompressAlways)
|
||||
|
||||
w := xdr.NewWriter(c0.cw)
|
||||
w.WriteUint32(encodeHeader(header{
|
||||
@@ -212,7 +199,7 @@ func TestTypeErr(t *testing.T) {
|
||||
msgID: 0,
|
||||
msgType: 42,
|
||||
}))
|
||||
w.WriteUint32(0)
|
||||
w.WriteUint32(0) // Avoids reader closing due to EOF
|
||||
|
||||
if !m1.isClosed() {
|
||||
t.Error("Connection should close due to unknown message type")
|
||||
@@ -226,8 +213,8 @@ func TestClose(t *testing.T) {
|
||||
ar, aw := io.Pipe()
|
||||
br, bw := io.Pipe()
|
||||
|
||||
c0 := NewConnection(c0ID, ar, bw, m0, "name", true).(wireFormatConnection).next.(*rawConnection)
|
||||
NewConnection(c1ID, br, aw, m1, "name", true)
|
||||
c0 := NewConnection(c0ID, ar, bw, m0, "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
|
||||
NewConnection(c1ID, br, aw, m1, "name", CompressAlways)
|
||||
|
||||
c0.close(nil)
|
||||
|
||||
@@ -1,17 +1,4 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation, either version 3 of the License, or (at your option)
|
||||
// any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
// Copyright (C) 2014 The Protocol Authors.
|
||||
|
||||
package protocol
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// +build go1.3
|
||||
// +build !go1.2
|
||||
|
||||
package leveldb
|
||||
|
||||
30
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go
generated
vendored
Normal file
30
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// +build !go1.2
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkLRUCache(b *testing.B) {
|
||||
c := NewCache(NewLRU(10000))
|
||||
|
||||
b.SetParallelism(10)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
for pb.Next() {
|
||||
key := uint64(r.Intn(1000000))
|
||||
c.Get(0, key, func() (int, Value) {
|
||||
return 1, key
|
||||
}).Release()
|
||||
}
|
||||
})
|
||||
}
|
||||
753
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go
generated
vendored
753
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go
generated
vendored
@@ -8,152 +8,669 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
// SetFunc is the function that will be called by Namespace.Get to create
|
||||
// a cache object, if charge is less than one than the cache object will
|
||||
// not be registered to cache tree, if value is nil then the cache object
|
||||
// will not be created.
|
||||
type SetFunc func() (charge int, value interface{})
|
||||
|
||||
// DelFin is the function that will be called as the result of a delete operation.
|
||||
// Exist == true is indication that the object is exist, and pending == true is
|
||||
// indication of deletion already happen but haven't done yet (wait for all handles
|
||||
// to be released). And exist == false means the object doesn't exist.
|
||||
type DelFin func(exist, pending bool)
|
||||
|
||||
// PurgeFin is the function that will be called as the result of a purge operation.
|
||||
type PurgeFin func(ns, key uint64)
|
||||
|
||||
// Cache is a cache tree. A cache instance must be goroutine-safe.
|
||||
type Cache interface {
|
||||
// SetCapacity sets cache tree capacity.
|
||||
SetCapacity(capacity int)
|
||||
|
||||
// Capacity returns cache tree capacity.
|
||||
// Cacher provides interface to implements a caching functionality.
|
||||
// An implementation must be goroutine-safe.
|
||||
type Cacher interface {
|
||||
// Capacity returns cache capacity.
|
||||
Capacity() int
|
||||
|
||||
// Used returns used cache tree capacity.
|
||||
Used() int
|
||||
// SetCapacity sets cache capacity.
|
||||
SetCapacity(capacity int)
|
||||
|
||||
// Size returns entire alive cache objects size.
|
||||
Size() int
|
||||
// Promote promotes the 'cache node'.
|
||||
Promote(n *Node)
|
||||
|
||||
// NumObjects returns number of alive objects.
|
||||
NumObjects() int
|
||||
// Ban evicts the 'cache node' and prevent subsequent 'promote'.
|
||||
Ban(n *Node)
|
||||
|
||||
// GetNamespace gets cache namespace with the given id.
|
||||
// GetNamespace is never return nil.
|
||||
GetNamespace(id uint64) Namespace
|
||||
// Evict evicts the 'cache node'.
|
||||
Evict(n *Node)
|
||||
|
||||
// PurgeNamespace purges cache namespace with the given id from this cache tree.
|
||||
// Also read Namespace.Purge.
|
||||
PurgeNamespace(id uint64, fin PurgeFin)
|
||||
// EvictNS evicts 'cache node' with the given namespace.
|
||||
EvictNS(ns uint64)
|
||||
|
||||
// ZapNamespace detaches cache namespace with the given id from this cache tree.
|
||||
// Also read Namespace.Zap.
|
||||
ZapNamespace(id uint64)
|
||||
// EvictAll evicts all 'cache node'.
|
||||
EvictAll()
|
||||
|
||||
// Purge purges all cache namespace from this cache tree.
|
||||
// This is behave the same as calling Namespace.Purge method on all cache namespace.
|
||||
Purge(fin PurgeFin)
|
||||
|
||||
// Zap detaches all cache namespace from this cache tree.
|
||||
// This is behave the same as calling Namespace.Zap method on all cache namespace.
|
||||
Zap()
|
||||
// Close closes the 'cache tree'
|
||||
Close() error
|
||||
}
|
||||
|
||||
// Namespace is a cache namespace. A namespace instance must be goroutine-safe.
|
||||
type Namespace interface {
|
||||
// Get gets cache object with the given key.
|
||||
// If cache object is not found and setf is not nil, Get will atomically creates
|
||||
// the cache object by calling setf. Otherwise Get will returns nil.
|
||||
//
|
||||
// The returned cache handle should be released after use by calling Release
|
||||
// method.
|
||||
Get(key uint64, setf SetFunc) Handle
|
||||
// Value is a 'cacheable object'. It may implements util.Releaser, if
|
||||
// so the the Release method will be called once object is released.
|
||||
type Value interface{}
|
||||
|
||||
// Delete removes cache object with the given key from cache tree.
|
||||
// A deleted cache object will be released as soon as all of its handles have
|
||||
// been released.
|
||||
// Delete only happen once, subsequent delete will consider cache object doesn't
|
||||
// exist, even if the cache object ins't released yet.
|
||||
//
|
||||
// If not nil, fin will be called if the cache object doesn't exist or when
|
||||
// finally be released.
|
||||
//
|
||||
// Delete returns true if such cache object exist and never been deleted.
|
||||
Delete(key uint64, fin DelFin) bool
|
||||
|
||||
// Purge removes all cache objects within this namespace from cache tree.
|
||||
// This is the same as doing delete on all cache objects.
|
||||
//
|
||||
// If not nil, fin will be called on all cache objects when its finally be
|
||||
// released.
|
||||
Purge(fin PurgeFin)
|
||||
|
||||
// Zap detaches namespace from cache tree and release all its cache objects.
|
||||
// A zapped namespace can never be filled again.
|
||||
// Calling Get on zapped namespace will always return nil.
|
||||
Zap()
|
||||
type CacheGetter struct {
|
||||
Cache *Cache
|
||||
NS uint64
|
||||
}
|
||||
|
||||
// Handle is a cache handle.
|
||||
type Handle interface {
|
||||
// Release releases this cache handle. This method can be safely called mutiple
|
||||
// times.
|
||||
Release()
|
||||
|
||||
// Value returns value of this cache handle.
|
||||
// Value will returns nil after this cache handle have be released.
|
||||
Value() interface{}
|
||||
func (g *CacheGetter) Get(key uint64, setFunc func() (size int, value Value)) *Handle {
|
||||
return g.Cache.Get(g.NS, key, setFunc)
|
||||
}
|
||||
|
||||
// The hash tables implementation is based on:
|
||||
// "Dynamic-Sized Nonblocking Hash Tables", by Yujie Liu, Kunlong Zhang, and Michael Spear. ACM Symposium on Principles of Distributed Computing, Jul 2014.
|
||||
|
||||
const (
|
||||
DelNotExist = iota
|
||||
DelExist
|
||||
DelPendig
|
||||
mInitialSize = 1 << 4
|
||||
mOverflowThreshold = 1 << 5
|
||||
mOverflowGrowThreshold = 1 << 7
|
||||
)
|
||||
|
||||
// Namespace state.
|
||||
type nsState int
|
||||
|
||||
const (
|
||||
nsEffective nsState = iota
|
||||
nsZapped
|
||||
)
|
||||
|
||||
// Node state.
|
||||
type nodeState int
|
||||
|
||||
const (
|
||||
nodeZero nodeState = iota
|
||||
nodeEffective
|
||||
nodeEvicted
|
||||
nodeDeleted
|
||||
)
|
||||
|
||||
// Fake handle.
|
||||
type fakeHandle struct {
|
||||
value interface{}
|
||||
fin func()
|
||||
once uint32
|
||||
type mBucket struct {
|
||||
mu sync.Mutex
|
||||
node []*Node
|
||||
frozen bool
|
||||
}
|
||||
|
||||
func (h *fakeHandle) Value() interface{} {
|
||||
if atomic.LoadUint32(&h.once) == 0 {
|
||||
return h.value
|
||||
func (b *mBucket) freeze() []*Node {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if !b.frozen {
|
||||
b.frozen = true
|
||||
}
|
||||
return b.node
|
||||
}
|
||||
|
||||
func (b *mBucket) get(r *Cache, h *mNode, hash uint32, ns, key uint64, noset bool) (done, added bool, n *Node) {
|
||||
b.mu.Lock()
|
||||
|
||||
if b.frozen {
|
||||
b.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// Scan the node.
|
||||
for _, n := range b.node {
|
||||
if n.hash == hash && n.ns == ns && n.key == key {
|
||||
atomic.AddInt32(&n.ref, 1)
|
||||
b.mu.Unlock()
|
||||
return true, false, n
|
||||
}
|
||||
}
|
||||
|
||||
// Get only.
|
||||
if noset {
|
||||
b.mu.Unlock()
|
||||
return true, false, nil
|
||||
}
|
||||
|
||||
// Create node.
|
||||
n = &Node{
|
||||
r: r,
|
||||
hash: hash,
|
||||
ns: ns,
|
||||
key: key,
|
||||
ref: 1,
|
||||
}
|
||||
// Add node to bucket.
|
||||
b.node = append(b.node, n)
|
||||
bLen := len(b.node)
|
||||
b.mu.Unlock()
|
||||
|
||||
// Update counter.
|
||||
grow := atomic.AddInt32(&r.nodes, 1) >= h.growThreshold
|
||||
if bLen > mOverflowThreshold {
|
||||
grow = grow || atomic.AddInt32(&h.overflow, 1) >= mOverflowGrowThreshold
|
||||
}
|
||||
|
||||
// Grow.
|
||||
if grow && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) {
|
||||
nhLen := len(h.buckets) << 1
|
||||
nh := &mNode{
|
||||
buckets: make([]unsafe.Pointer, nhLen),
|
||||
mask: uint32(nhLen) - 1,
|
||||
pred: unsafe.Pointer(h),
|
||||
growThreshold: int32(nhLen * mOverflowThreshold),
|
||||
shrinkThreshold: int32(nhLen >> 1),
|
||||
}
|
||||
ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh))
|
||||
if !ok {
|
||||
panic("BUG: failed swapping head")
|
||||
}
|
||||
go nh.initBuckets()
|
||||
}
|
||||
|
||||
return true, true, n
|
||||
}
|
||||
|
||||
func (b *mBucket) delete(r *Cache, h *mNode, hash uint32, ns, key uint64) (done, deleted bool) {
|
||||
b.mu.Lock()
|
||||
|
||||
if b.frozen {
|
||||
b.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// Scan the node.
|
||||
var (
|
||||
n *Node
|
||||
bLen int
|
||||
)
|
||||
for i := range b.node {
|
||||
n = b.node[i]
|
||||
if n.ns == ns && n.key == key {
|
||||
if atomic.LoadInt32(&n.ref) == 0 {
|
||||
deleted = true
|
||||
|
||||
// Call releaser.
|
||||
if n.value != nil {
|
||||
if r, ok := n.value.(util.Releaser); ok {
|
||||
r.Release()
|
||||
}
|
||||
n.value = nil
|
||||
}
|
||||
|
||||
// Remove node from bucket.
|
||||
b.node = append(b.node[:i], b.node[i+1:]...)
|
||||
bLen = len(b.node)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
b.mu.Unlock()
|
||||
|
||||
if deleted {
|
||||
// Call OnDel.
|
||||
for _, f := range n.onDel {
|
||||
f()
|
||||
}
|
||||
|
||||
// Update counter.
|
||||
atomic.AddInt32(&r.size, int32(n.size)*-1)
|
||||
shrink := atomic.AddInt32(&r.nodes, -1) < h.shrinkThreshold
|
||||
if bLen >= mOverflowThreshold {
|
||||
atomic.AddInt32(&h.overflow, -1)
|
||||
}
|
||||
|
||||
// Shrink.
|
||||
if shrink && len(h.buckets) > mInitialSize && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) {
|
||||
nhLen := len(h.buckets) >> 1
|
||||
nh := &mNode{
|
||||
buckets: make([]unsafe.Pointer, nhLen),
|
||||
mask: uint32(nhLen) - 1,
|
||||
pred: unsafe.Pointer(h),
|
||||
growThreshold: int32(nhLen * mOverflowThreshold),
|
||||
shrinkThreshold: int32(nhLen >> 1),
|
||||
}
|
||||
ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh))
|
||||
if !ok {
|
||||
panic("BUG: failed swapping head")
|
||||
}
|
||||
go nh.initBuckets()
|
||||
}
|
||||
}
|
||||
|
||||
return true, deleted
|
||||
}
|
||||
|
||||
type mNode struct {
|
||||
buckets []unsafe.Pointer // []*mBucket
|
||||
mask uint32
|
||||
pred unsafe.Pointer // *mNode
|
||||
resizeInProgess int32
|
||||
|
||||
overflow int32
|
||||
growThreshold int32
|
||||
shrinkThreshold int32
|
||||
}
|
||||
|
||||
func (n *mNode) initBucket(i uint32) *mBucket {
|
||||
if b := (*mBucket)(atomic.LoadPointer(&n.buckets[i])); b != nil {
|
||||
return b
|
||||
}
|
||||
|
||||
p := (*mNode)(atomic.LoadPointer(&n.pred))
|
||||
if p != nil {
|
||||
var node []*Node
|
||||
if n.mask > p.mask {
|
||||
// Grow.
|
||||
pb := (*mBucket)(atomic.LoadPointer(&p.buckets[i&p.mask]))
|
||||
if pb == nil {
|
||||
pb = p.initBucket(i & p.mask)
|
||||
}
|
||||
m := pb.freeze()
|
||||
// Split nodes.
|
||||
for _, x := range m {
|
||||
if x.hash&n.mask == i {
|
||||
node = append(node, x)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Shrink.
|
||||
pb0 := (*mBucket)(atomic.LoadPointer(&p.buckets[i]))
|
||||
if pb0 == nil {
|
||||
pb0 = p.initBucket(i)
|
||||
}
|
||||
pb1 := (*mBucket)(atomic.LoadPointer(&p.buckets[i+uint32(len(n.buckets))]))
|
||||
if pb1 == nil {
|
||||
pb1 = p.initBucket(i + uint32(len(n.buckets)))
|
||||
}
|
||||
m0 := pb0.freeze()
|
||||
m1 := pb1.freeze()
|
||||
// Merge nodes.
|
||||
node = make([]*Node, 0, len(m0)+len(m1))
|
||||
node = append(node, m0...)
|
||||
node = append(node, m1...)
|
||||
}
|
||||
b := &mBucket{node: node}
|
||||
if atomic.CompareAndSwapPointer(&n.buckets[i], nil, unsafe.Pointer(b)) {
|
||||
if len(node) > mOverflowThreshold {
|
||||
atomic.AddInt32(&n.overflow, int32(len(node)-mOverflowThreshold))
|
||||
}
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
return (*mBucket)(atomic.LoadPointer(&n.buckets[i]))
|
||||
}
|
||||
|
||||
func (n *mNode) initBuckets() {
|
||||
for i := range n.buckets {
|
||||
n.initBucket(uint32(i))
|
||||
}
|
||||
atomic.StorePointer(&n.pred, nil)
|
||||
}
|
||||
|
||||
// Cache is a 'cache map'.
|
||||
type Cache struct {
|
||||
mu sync.RWMutex
|
||||
mHead unsafe.Pointer // *mNode
|
||||
nodes int32
|
||||
size int32
|
||||
cacher Cacher
|
||||
closed bool
|
||||
}
|
||||
|
||||
// NewCache creates a new 'cache map'. The cacher is optional and
|
||||
// may be nil.
|
||||
func NewCache(cacher Cacher) *Cache {
|
||||
h := &mNode{
|
||||
buckets: make([]unsafe.Pointer, mInitialSize),
|
||||
mask: mInitialSize - 1,
|
||||
growThreshold: int32(mInitialSize * mOverflowThreshold),
|
||||
shrinkThreshold: 0,
|
||||
}
|
||||
for i := range h.buckets {
|
||||
h.buckets[i] = unsafe.Pointer(&mBucket{})
|
||||
}
|
||||
r := &Cache{
|
||||
mHead: unsafe.Pointer(h),
|
||||
cacher: cacher,
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *Cache) getBucket(hash uint32) (*mNode, *mBucket) {
|
||||
h := (*mNode)(atomic.LoadPointer(&r.mHead))
|
||||
i := hash & h.mask
|
||||
b := (*mBucket)(atomic.LoadPointer(&h.buckets[i]))
|
||||
if b == nil {
|
||||
b = h.initBucket(i)
|
||||
}
|
||||
return h, b
|
||||
}
|
||||
|
||||
func (r *Cache) delete(n *Node) bool {
|
||||
for {
|
||||
h, b := r.getBucket(n.hash)
|
||||
done, deleted := b.delete(r, h, n.hash, n.ns, n.key)
|
||||
if done {
|
||||
return deleted
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Nodes returns number of 'cache node' in the map.
|
||||
func (r *Cache) Nodes() int {
|
||||
return int(atomic.LoadInt32(&r.nodes))
|
||||
}
|
||||
|
||||
// Size returns sums of 'cache node' size in the map.
|
||||
func (r *Cache) Size() int {
|
||||
return int(atomic.LoadInt32(&r.size))
|
||||
}
|
||||
|
||||
// Capacity returns cache capacity.
|
||||
func (r *Cache) Capacity() int {
|
||||
if r.cacher == nil {
|
||||
return 0
|
||||
}
|
||||
return r.cacher.Capacity()
|
||||
}
|
||||
|
||||
// SetCapacity sets cache capacity.
|
||||
func (r *Cache) SetCapacity(capacity int) {
|
||||
if r.cacher != nil {
|
||||
r.cacher.SetCapacity(capacity)
|
||||
}
|
||||
}
|
||||
|
||||
// Get gets 'cache node' with the given namespace and key.
|
||||
// If cache node is not found and setFunc is not nil, Get will atomically creates
|
||||
// the 'cache node' by calling setFunc. Otherwise Get will returns nil.
|
||||
//
|
||||
// The returned 'cache handle' should be released after use by calling Release
|
||||
// method.
|
||||
func (r *Cache) Get(ns, key uint64, setFunc func() (size int, value Value)) *Handle {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
if r.closed {
|
||||
return nil
|
||||
}
|
||||
|
||||
hash := murmur32(ns, key, 0xf00)
|
||||
for {
|
||||
h, b := r.getBucket(hash)
|
||||
done, _, n := b.get(r, h, hash, ns, key, setFunc == nil)
|
||||
if done {
|
||||
if n != nil {
|
||||
n.mu.Lock()
|
||||
if n.value == nil {
|
||||
if setFunc == nil {
|
||||
n.mu.Unlock()
|
||||
n.unref()
|
||||
return nil
|
||||
}
|
||||
|
||||
n.size, n.value = setFunc()
|
||||
if n.value == nil {
|
||||
n.size = 0
|
||||
n.mu.Unlock()
|
||||
n.unref()
|
||||
return nil
|
||||
}
|
||||
atomic.AddInt32(&r.size, int32(n.size))
|
||||
}
|
||||
n.mu.Unlock()
|
||||
if r.cacher != nil {
|
||||
r.cacher.Promote(n)
|
||||
}
|
||||
return &Handle{unsafe.Pointer(n)}
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *fakeHandle) Release() {
|
||||
if !atomic.CompareAndSwapUint32(&h.once, 0, 1) {
|
||||
// Delete removes and ban 'cache node' with the given namespace and key.
|
||||
// A banned 'cache node' will never inserted into the 'cache tree'. Ban
|
||||
// only attributed to the particular 'cache node', so when a 'cache node'
|
||||
// is recreated it will not be banned.
|
||||
//
|
||||
// If onDel is not nil, then it will be executed if such 'cache node'
|
||||
// doesn't exist or once the 'cache node' is released.
|
||||
//
|
||||
// Delete return true is such 'cache node' exist.
|
||||
func (r *Cache) Delete(ns, key uint64, onDel func()) bool {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
if r.closed {
|
||||
return false
|
||||
}
|
||||
|
||||
hash := murmur32(ns, key, 0xf00)
|
||||
for {
|
||||
h, b := r.getBucket(hash)
|
||||
done, _, n := b.get(r, h, hash, ns, key, true)
|
||||
if done {
|
||||
if n != nil {
|
||||
if onDel != nil {
|
||||
n.mu.Lock()
|
||||
n.onDel = append(n.onDel, onDel)
|
||||
n.mu.Unlock()
|
||||
}
|
||||
if r.cacher != nil {
|
||||
r.cacher.Ban(n)
|
||||
}
|
||||
n.unref()
|
||||
return true
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if onDel != nil {
|
||||
onDel()
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Evict evicts 'cache node' with the given namespace and key. This will
|
||||
// simply call Cacher.Evict.
|
||||
//
|
||||
// Evict return true is such 'cache node' exist.
|
||||
func (r *Cache) Evict(ns, key uint64) bool {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
if r.closed {
|
||||
return false
|
||||
}
|
||||
|
||||
hash := murmur32(ns, key, 0xf00)
|
||||
for {
|
||||
h, b := r.getBucket(hash)
|
||||
done, _, n := b.get(r, h, hash, ns, key, true)
|
||||
if done {
|
||||
if n != nil {
|
||||
if r.cacher != nil {
|
||||
r.cacher.Evict(n)
|
||||
}
|
||||
n.unref()
|
||||
return true
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// EvictNS evicts 'cache node' with the given namespace. This will
|
||||
// simply call Cacher.EvictNS.
|
||||
func (r *Cache) EvictNS(ns uint64) {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
if r.closed {
|
||||
return
|
||||
}
|
||||
if h.fin != nil {
|
||||
h.fin()
|
||||
h.fin = nil
|
||||
|
||||
if r.cacher != nil {
|
||||
r.cacher.EvictNS(ns)
|
||||
}
|
||||
}
|
||||
|
||||
// EvictAll evicts all 'cache node'. This will simply call Cacher.EvictAll.
|
||||
func (r *Cache) EvictAll() {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
if r.closed {
|
||||
return
|
||||
}
|
||||
|
||||
if r.cacher != nil {
|
||||
r.cacher.EvictAll()
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes the 'cache map' and releases all 'cache node'.
|
||||
func (r *Cache) Close() error {
|
||||
r.mu.Lock()
|
||||
if !r.closed {
|
||||
r.closed = true
|
||||
|
||||
if r.cacher != nil {
|
||||
if err := r.cacher.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
h := (*mNode)(r.mHead)
|
||||
h.initBuckets()
|
||||
|
||||
for i := range h.buckets {
|
||||
b := (*mBucket)(h.buckets[i])
|
||||
for _, n := range b.node {
|
||||
// Call releaser.
|
||||
if n.value != nil {
|
||||
if r, ok := n.value.(util.Releaser); ok {
|
||||
r.Release()
|
||||
}
|
||||
n.value = nil
|
||||
}
|
||||
|
||||
// Call OnDel.
|
||||
for _, f := range n.onDel {
|
||||
f()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
r.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Node is a 'cache node'.
|
||||
type Node struct {
|
||||
r *Cache
|
||||
|
||||
hash uint32
|
||||
ns, key uint64
|
||||
|
||||
mu sync.Mutex
|
||||
size int
|
||||
value Value
|
||||
|
||||
ref int32
|
||||
onDel []func()
|
||||
|
||||
CacheData unsafe.Pointer
|
||||
}
|
||||
|
||||
// NS returns this 'cache node' namespace.
|
||||
func (n *Node) NS() uint64 {
|
||||
return n.ns
|
||||
}
|
||||
|
||||
// Key returns this 'cache node' key.
|
||||
func (n *Node) Key() uint64 {
|
||||
return n.key
|
||||
}
|
||||
|
||||
// Size returns this 'cache node' size.
|
||||
func (n *Node) Size() int {
|
||||
return n.size
|
||||
}
|
||||
|
||||
// Value returns this 'cache node' value.
|
||||
func (n *Node) Value() Value {
|
||||
return n.value
|
||||
}
|
||||
|
||||
// Ref returns this 'cache node' ref counter.
|
||||
func (n *Node) Ref() int32 {
|
||||
return atomic.LoadInt32(&n.ref)
|
||||
}
|
||||
|
||||
// GetHandle returns an handle for this 'cache node'.
|
||||
func (n *Node) GetHandle() *Handle {
|
||||
if atomic.AddInt32(&n.ref, 1) <= 1 {
|
||||
panic("BUG: Node.GetHandle on zero ref")
|
||||
}
|
||||
return &Handle{unsafe.Pointer(n)}
|
||||
}
|
||||
|
||||
func (n *Node) unref() {
|
||||
if atomic.AddInt32(&n.ref, -1) == 0 {
|
||||
n.r.delete(n)
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Node) unrefLocked() {
|
||||
if atomic.AddInt32(&n.ref, -1) == 0 {
|
||||
n.r.mu.RLock()
|
||||
if !n.r.closed {
|
||||
n.r.delete(n)
|
||||
}
|
||||
n.r.mu.RUnlock()
|
||||
}
|
||||
}
|
||||
|
||||
type Handle struct {
|
||||
n unsafe.Pointer // *Node
|
||||
}
|
||||
|
||||
func (h *Handle) Value() Value {
|
||||
n := (*Node)(atomic.LoadPointer(&h.n))
|
||||
if n != nil {
|
||||
return n.value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *Handle) Release() {
|
||||
nPtr := atomic.LoadPointer(&h.n)
|
||||
if nPtr != nil && atomic.CompareAndSwapPointer(&h.n, nPtr, nil) {
|
||||
n := (*Node)(nPtr)
|
||||
n.unrefLocked()
|
||||
}
|
||||
}
|
||||
|
||||
func murmur32(ns, key uint64, seed uint32) uint32 {
|
||||
const (
|
||||
m = uint32(0x5bd1e995)
|
||||
r = 24
|
||||
)
|
||||
|
||||
k1 := uint32(ns >> 32)
|
||||
k2 := uint32(ns)
|
||||
k3 := uint32(key >> 32)
|
||||
k4 := uint32(key)
|
||||
|
||||
k1 *= m
|
||||
k1 ^= k1 >> r
|
||||
k1 *= m
|
||||
|
||||
k2 *= m
|
||||
k2 ^= k2 >> r
|
||||
k2 *= m
|
||||
|
||||
k3 *= m
|
||||
k3 ^= k3 >> r
|
||||
k3 *= m
|
||||
|
||||
k4 *= m
|
||||
k4 ^= k4 >> r
|
||||
k4 *= m
|
||||
|
||||
h := seed
|
||||
|
||||
h *= m
|
||||
h ^= k1
|
||||
h *= m
|
||||
h ^= k2
|
||||
h *= m
|
||||
h ^= k3
|
||||
h *= m
|
||||
h ^= k4
|
||||
|
||||
h ^= h >> 13
|
||||
h *= m
|
||||
h ^= h >> 15
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
883
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go
generated
vendored
883
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go
generated
vendored
@@ -13,11 +13,26 @@ import (
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type int32o int32
|
||||
|
||||
func (o *int32o) acquire() {
|
||||
if atomic.AddInt32((*int32)(o), 1) != 1 {
|
||||
panic("BUG: invalid ref")
|
||||
}
|
||||
}
|
||||
|
||||
func (o *int32o) Release() {
|
||||
if atomic.AddInt32((*int32)(o), -1) != 0 {
|
||||
panic("BUG: invalid ref")
|
||||
}
|
||||
}
|
||||
|
||||
type releaserFunc struct {
|
||||
fn func()
|
||||
value interface{}
|
||||
value Value
|
||||
}
|
||||
|
||||
func (r releaserFunc) Release() {
|
||||
@@ -26,8 +41,8 @@ func (r releaserFunc) Release() {
|
||||
}
|
||||
}
|
||||
|
||||
func set(ns Namespace, key uint64, value interface{}, charge int, relf func()) Handle {
|
||||
return ns.Get(key, func() (int, interface{}) {
|
||||
func set(c *Cache, ns, key uint64, value Value, charge int, relf func()) *Handle {
|
||||
return c.Get(ns, key, func() (int, Value) {
|
||||
if relf != nil {
|
||||
return charge, releaserFunc{relf, value}
|
||||
} else {
|
||||
@@ -36,7 +51,246 @@ func set(ns Namespace, key uint64, value interface{}, charge int, relf func()) H
|
||||
})
|
||||
}
|
||||
|
||||
func TestCache_HitMiss(t *testing.T) {
|
||||
func TestCacheMap(t *testing.T) {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
|
||||
nsx := []struct {
|
||||
nobjects, nhandles, concurrent, repeat int
|
||||
}{
|
||||
{10000, 400, 50, 3},
|
||||
{100000, 1000, 100, 10},
|
||||
}
|
||||
|
||||
var (
|
||||
objects [][]int32o
|
||||
handles [][]unsafe.Pointer
|
||||
)
|
||||
|
||||
for _, x := range nsx {
|
||||
objects = append(objects, make([]int32o, x.nobjects))
|
||||
handles = append(handles, make([]unsafe.Pointer, x.nhandles))
|
||||
}
|
||||
|
||||
c := NewCache(nil)
|
||||
|
||||
wg := new(sync.WaitGroup)
|
||||
var done int32
|
||||
|
||||
for ns, x := range nsx {
|
||||
for i := 0; i < x.concurrent; i++ {
|
||||
wg.Add(1)
|
||||
go func(ns, i, repeat int, objects []int32o, handles []unsafe.Pointer) {
|
||||
defer wg.Done()
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
for j := len(objects) * repeat; j >= 0; j-- {
|
||||
key := uint64(r.Intn(len(objects)))
|
||||
h := c.Get(uint64(ns), key, func() (int, Value) {
|
||||
o := &objects[key]
|
||||
o.acquire()
|
||||
return 1, o
|
||||
})
|
||||
if v := h.Value().(*int32o); v != &objects[key] {
|
||||
t.Fatalf("#%d invalid value: want=%p got=%p", ns, &objects[key], v)
|
||||
}
|
||||
if objects[key] != 1 {
|
||||
t.Fatalf("#%d invalid object %d: %d", ns, key, objects[key])
|
||||
}
|
||||
if !atomic.CompareAndSwapPointer(&handles[r.Intn(len(handles))], nil, unsafe.Pointer(h)) {
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
}(ns, i, x.repeat, objects[ns], handles[ns])
|
||||
}
|
||||
|
||||
go func(handles []unsafe.Pointer) {
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
for atomic.LoadInt32(&done) == 0 {
|
||||
i := r.Intn(len(handles))
|
||||
h := (*Handle)(atomic.LoadPointer(&handles[i]))
|
||||
if h != nil && atomic.CompareAndSwapPointer(&handles[i], unsafe.Pointer(h), nil) {
|
||||
h.Release()
|
||||
}
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
}(handles[ns])
|
||||
}
|
||||
|
||||
go func() {
|
||||
handles := make([]*Handle, 100000)
|
||||
for atomic.LoadInt32(&done) == 0 {
|
||||
for i := range handles {
|
||||
handles[i] = c.Get(999999999, uint64(i), func() (int, Value) {
|
||||
return 1, 1
|
||||
})
|
||||
}
|
||||
for _, h := range handles {
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
atomic.StoreInt32(&done, 1)
|
||||
|
||||
for _, handles0 := range handles {
|
||||
for i := range handles0 {
|
||||
h := (*Handle)(atomic.LoadPointer(&handles0[i]))
|
||||
if h != nil && atomic.CompareAndSwapPointer(&handles0[i], unsafe.Pointer(h), nil) {
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for ns, objects0 := range objects {
|
||||
for i, o := range objects0 {
|
||||
if o != 0 {
|
||||
t.Fatalf("invalid object #%d.%d: ref=%d", ns, i, o)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCacheMap_NodesAndSize(t *testing.T) {
|
||||
c := NewCache(nil)
|
||||
if c.Nodes() != 0 {
|
||||
t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes())
|
||||
}
|
||||
if c.Size() != 0 {
|
||||
t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size())
|
||||
}
|
||||
set(c, 0, 1, 1, 1, nil)
|
||||
set(c, 0, 2, 2, 2, nil)
|
||||
set(c, 1, 1, 3, 3, nil)
|
||||
set(c, 2, 1, 4, 1, nil)
|
||||
if c.Nodes() != 4 {
|
||||
t.Errorf("invalid nodes counter: want=%d got=%d", 4, c.Nodes())
|
||||
}
|
||||
if c.Size() != 7 {
|
||||
t.Errorf("invalid size counter: want=%d got=%d", 4, c.Size())
|
||||
}
|
||||
}
|
||||
|
||||
func TestLRUCache_Capacity(t *testing.T) {
|
||||
c := NewCache(NewLRU(10))
|
||||
if c.Capacity() != 10 {
|
||||
t.Errorf("invalid capacity: want=%d got=%d", 10, c.Capacity())
|
||||
}
|
||||
set(c, 0, 1, 1, 1, nil).Release()
|
||||
set(c, 0, 2, 2, 2, nil).Release()
|
||||
set(c, 1, 1, 3, 3, nil).Release()
|
||||
set(c, 2, 1, 4, 1, nil).Release()
|
||||
set(c, 2, 2, 5, 1, nil).Release()
|
||||
set(c, 2, 3, 6, 1, nil).Release()
|
||||
set(c, 2, 4, 7, 1, nil).Release()
|
||||
set(c, 2, 5, 8, 1, nil).Release()
|
||||
if c.Nodes() != 7 {
|
||||
t.Errorf("invalid nodes counter: want=%d got=%d", 7, c.Nodes())
|
||||
}
|
||||
if c.Size() != 10 {
|
||||
t.Errorf("invalid size counter: want=%d got=%d", 10, c.Size())
|
||||
}
|
||||
c.SetCapacity(9)
|
||||
if c.Capacity() != 9 {
|
||||
t.Errorf("invalid capacity: want=%d got=%d", 9, c.Capacity())
|
||||
}
|
||||
if c.Nodes() != 6 {
|
||||
t.Errorf("invalid nodes counter: want=%d got=%d", 6, c.Nodes())
|
||||
}
|
||||
if c.Size() != 8 {
|
||||
t.Errorf("invalid size counter: want=%d got=%d", 8, c.Size())
|
||||
}
|
||||
}
|
||||
|
||||
func TestCacheMap_NilValue(t *testing.T) {
|
||||
c := NewCache(NewLRU(10))
|
||||
h := c.Get(0, 0, func() (size int, value Value) {
|
||||
return 1, nil
|
||||
})
|
||||
if h != nil {
|
||||
t.Error("cache handle is non-nil")
|
||||
}
|
||||
if c.Nodes() != 0 {
|
||||
t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes())
|
||||
}
|
||||
if c.Size() != 0 {
|
||||
t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size())
|
||||
}
|
||||
}
|
||||
|
||||
func TestLRUCache_GetLatency(t *testing.T) {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
|
||||
const (
|
||||
concurrentSet = 30
|
||||
concurrentGet = 3
|
||||
duration = 3 * time.Second
|
||||
delay = 3 * time.Millisecond
|
||||
maxkey = 100000
|
||||
)
|
||||
|
||||
var (
|
||||
set, getHit, getAll int32
|
||||
getMaxLatency, getDuration int64
|
||||
)
|
||||
|
||||
c := NewCache(NewLRU(5000))
|
||||
wg := &sync.WaitGroup{}
|
||||
until := time.Now().Add(duration)
|
||||
for i := 0; i < concurrentSet; i++ {
|
||||
wg.Add(1)
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
for time.Now().Before(until) {
|
||||
c.Get(0, uint64(r.Intn(maxkey)), func() (int, Value) {
|
||||
time.Sleep(delay)
|
||||
atomic.AddInt32(&set, 1)
|
||||
return 1, 1
|
||||
}).Release()
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
for i := 0; i < concurrentGet; i++ {
|
||||
wg.Add(1)
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
for {
|
||||
mark := time.Now()
|
||||
if mark.Before(until) {
|
||||
h := c.Get(0, uint64(r.Intn(maxkey)), nil)
|
||||
latency := int64(time.Now().Sub(mark))
|
||||
m := atomic.LoadInt64(&getMaxLatency)
|
||||
if latency > m {
|
||||
atomic.CompareAndSwapInt64(&getMaxLatency, m, latency)
|
||||
}
|
||||
atomic.AddInt64(&getDuration, latency)
|
||||
if h != nil {
|
||||
atomic.AddInt32(&getHit, 1)
|
||||
h.Release()
|
||||
}
|
||||
atomic.AddInt32(&getAll, 1)
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
getAvglatency := time.Duration(getDuration) / time.Duration(getAll)
|
||||
t.Logf("set=%d getHit=%d getAll=%d getMaxLatency=%v getAvgLatency=%v",
|
||||
set, getHit, getAll, time.Duration(getMaxLatency), getAvglatency)
|
||||
|
||||
if getAvglatency > delay/3 {
|
||||
t.Errorf("get avg latency > %v: got=%v", delay/3, getAvglatency)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLRUCache_HitMiss(t *testing.T) {
|
||||
cases := []struct {
|
||||
key uint64
|
||||
value string
|
||||
@@ -54,14 +308,13 @@ func TestCache_HitMiss(t *testing.T) {
|
||||
}
|
||||
|
||||
setfin := 0
|
||||
c := NewLRUCache(1000)
|
||||
ns := c.GetNamespace(0)
|
||||
c := NewCache(NewLRU(1000))
|
||||
for i, x := range cases {
|
||||
set(ns, x.key, x.value, len(x.value), func() {
|
||||
set(c, 0, x.key, x.value, len(x.value), func() {
|
||||
setfin++
|
||||
}).Release()
|
||||
for j, y := range cases {
|
||||
h := ns.Get(y.key, nil)
|
||||
h := c.Get(0, y.key, nil)
|
||||
if j <= i {
|
||||
// should hit
|
||||
if h == nil {
|
||||
@@ -85,7 +338,7 @@ func TestCache_HitMiss(t *testing.T) {
|
||||
|
||||
for i, x := range cases {
|
||||
finalizerOk := false
|
||||
ns.Delete(x.key, func(exist, pending bool) {
|
||||
c.Delete(0, x.key, func() {
|
||||
finalizerOk = true
|
||||
})
|
||||
|
||||
@@ -94,7 +347,7 @@ func TestCache_HitMiss(t *testing.T) {
|
||||
}
|
||||
|
||||
for j, y := range cases {
|
||||
h := ns.Get(y.key, nil)
|
||||
h := c.Get(0, y.key, nil)
|
||||
if j > i {
|
||||
// should hit
|
||||
if h == nil {
|
||||
@@ -122,20 +375,19 @@ func TestCache_HitMiss(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLRUCache_Eviction(t *testing.T) {
|
||||
c := NewLRUCache(12)
|
||||
ns := c.GetNamespace(0)
|
||||
o1 := set(ns, 1, 1, 1, nil)
|
||||
set(ns, 2, 2, 1, nil).Release()
|
||||
set(ns, 3, 3, 1, nil).Release()
|
||||
set(ns, 4, 4, 1, nil).Release()
|
||||
set(ns, 5, 5, 1, nil).Release()
|
||||
if h := ns.Get(2, nil); h != nil { // 1,3,4,5,2
|
||||
c := NewCache(NewLRU(12))
|
||||
o1 := set(c, 0, 1, 1, 1, nil)
|
||||
set(c, 0, 2, 2, 1, nil).Release()
|
||||
set(c, 0, 3, 3, 1, nil).Release()
|
||||
set(c, 0, 4, 4, 1, nil).Release()
|
||||
set(c, 0, 5, 5, 1, nil).Release()
|
||||
if h := c.Get(0, 2, nil); h != nil { // 1,3,4,5,2
|
||||
h.Release()
|
||||
}
|
||||
set(ns, 9, 9, 10, nil).Release() // 5,2,9
|
||||
set(c, 0, 9, 9, 10, nil).Release() // 5,2,9
|
||||
|
||||
for _, key := range []uint64{9, 2, 5, 1} {
|
||||
h := ns.Get(key, nil)
|
||||
h := c.Get(0, key, nil)
|
||||
if h == nil {
|
||||
t.Errorf("miss for key '%d'", key)
|
||||
} else {
|
||||
@@ -147,7 +399,7 @@ func TestLRUCache_Eviction(t *testing.T) {
|
||||
}
|
||||
o1.Release()
|
||||
for _, key := range []uint64{1, 2, 5} {
|
||||
h := ns.Get(key, nil)
|
||||
h := c.Get(0, key, nil)
|
||||
if h == nil {
|
||||
t.Errorf("miss for key '%d'", key)
|
||||
} else {
|
||||
@@ -158,7 +410,7 @@ func TestLRUCache_Eviction(t *testing.T) {
|
||||
}
|
||||
}
|
||||
for _, key := range []uint64{3, 4, 9} {
|
||||
h := ns.Get(key, nil)
|
||||
h := c.Get(0, key, nil)
|
||||
if h != nil {
|
||||
t.Errorf("hit for key '%d'", key)
|
||||
if x := h.Value().(int); x != int(key) {
|
||||
@@ -169,487 +421,134 @@ func TestLRUCache_Eviction(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestLRUCache_SetGet(t *testing.T) {
|
||||
c := NewLRUCache(13)
|
||||
ns := c.GetNamespace(0)
|
||||
for i := 0; i < 200; i++ {
|
||||
n := uint64(rand.Intn(99999) % 20)
|
||||
set(ns, n, n, 1, nil).Release()
|
||||
if h := ns.Get(n, nil); h != nil {
|
||||
if h.Value() == nil {
|
||||
t.Errorf("key '%d' contains nil value", n)
|
||||
func TestLRUCache_Evict(t *testing.T) {
|
||||
c := NewCache(NewLRU(6))
|
||||
set(c, 0, 1, 1, 1, nil).Release()
|
||||
set(c, 0, 2, 2, 1, nil).Release()
|
||||
set(c, 1, 1, 4, 1, nil).Release()
|
||||
set(c, 1, 2, 5, 1, nil).Release()
|
||||
set(c, 2, 1, 6, 1, nil).Release()
|
||||
set(c, 2, 2, 7, 1, nil).Release()
|
||||
|
||||
for ns := 0; ns < 3; ns++ {
|
||||
for key := 1; key < 3; key++ {
|
||||
if h := c.Get(uint64(ns), uint64(key), nil); h != nil {
|
||||
h.Release()
|
||||
} else {
|
||||
if x := h.Value().(uint64); x != n {
|
||||
t.Errorf("invalid value for key '%d' want '%d', got '%d'", n, n, x)
|
||||
}
|
||||
t.Errorf("Cache.Get on #%d.%d return nil", ns, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ok := c.Evict(0, 1); !ok {
|
||||
t.Error("first Cache.Evict on #0.1 return false")
|
||||
}
|
||||
if ok := c.Evict(0, 1); ok {
|
||||
t.Error("second Cache.Evict on #0.1 return true")
|
||||
}
|
||||
if h := c.Get(0, 1, nil); h != nil {
|
||||
t.Errorf("Cache.Get on #0.1 return non-nil: %v", h.Value())
|
||||
}
|
||||
|
||||
c.EvictNS(1)
|
||||
if h := c.Get(1, 1, nil); h != nil {
|
||||
t.Errorf("Cache.Get on #1.1 return non-nil: %v", h.Value())
|
||||
}
|
||||
if h := c.Get(1, 2, nil); h != nil {
|
||||
t.Errorf("Cache.Get on #1.2 return non-nil: %v", h.Value())
|
||||
}
|
||||
|
||||
c.EvictAll()
|
||||
for ns := 0; ns < 3; ns++ {
|
||||
for key := 1; key < 3; key++ {
|
||||
if h := c.Get(uint64(ns), uint64(key), nil); h != nil {
|
||||
t.Errorf("Cache.Get on #%d.%d return non-nil: %v", ns, key, h.Value())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLRUCache_Delete(t *testing.T) {
|
||||
delFuncCalled := 0
|
||||
delFunc := func() {
|
||||
delFuncCalled++
|
||||
}
|
||||
|
||||
c := NewCache(NewLRU(2))
|
||||
set(c, 0, 1, 1, 1, nil).Release()
|
||||
set(c, 0, 2, 2, 1, nil).Release()
|
||||
|
||||
if ok := c.Delete(0, 1, delFunc); !ok {
|
||||
t.Error("Cache.Delete on #1 return false")
|
||||
}
|
||||
if h := c.Get(0, 1, nil); h != nil {
|
||||
t.Errorf("Cache.Get on #1 return non-nil: %v", h.Value())
|
||||
}
|
||||
if ok := c.Delete(0, 1, delFunc); ok {
|
||||
t.Error("Cache.Delete on #1 return true")
|
||||
}
|
||||
|
||||
h2 := c.Get(0, 2, nil)
|
||||
if h2 == nil {
|
||||
t.Error("Cache.Get on #2 return nil")
|
||||
}
|
||||
if ok := c.Delete(0, 2, delFunc); !ok {
|
||||
t.Error("(1) Cache.Delete on #2 return false")
|
||||
}
|
||||
if ok := c.Delete(0, 2, delFunc); !ok {
|
||||
t.Error("(2) Cache.Delete on #2 return false")
|
||||
}
|
||||
|
||||
set(c, 0, 3, 3, 1, nil).Release()
|
||||
set(c, 0, 4, 4, 1, nil).Release()
|
||||
c.Get(0, 2, nil).Release()
|
||||
|
||||
for key := 2; key <= 4; key++ {
|
||||
if h := c.Get(0, uint64(key), nil); h != nil {
|
||||
h.Release()
|
||||
} else {
|
||||
t.Errorf("key '%d' doesn't exist", n)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLRUCache_Purge(t *testing.T) {
|
||||
c := NewLRUCache(3)
|
||||
ns1 := c.GetNamespace(0)
|
||||
o1 := set(ns1, 1, 1, 1, nil)
|
||||
o2 := set(ns1, 2, 2, 1, nil)
|
||||
ns1.Purge(nil)
|
||||
set(ns1, 3, 3, 1, nil).Release()
|
||||
for _, key := range []uint64{1, 2, 3} {
|
||||
h := ns1.Get(key, nil)
|
||||
if h == nil {
|
||||
t.Errorf("miss for key '%d'", key)
|
||||
} else {
|
||||
if x := h.Value().(int); x != int(key) {
|
||||
t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
|
||||
}
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
o1.Release()
|
||||
o2.Release()
|
||||
for _, key := range []uint64{1, 2} {
|
||||
h := ns1.Get(key, nil)
|
||||
if h != nil {
|
||||
t.Errorf("hit for key '%d'", key)
|
||||
if x := h.Value().(int); x != int(key) {
|
||||
t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
|
||||
}
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type testingCacheObjectCounter struct {
|
||||
created uint
|
||||
released uint
|
||||
}
|
||||
|
||||
func (c *testingCacheObjectCounter) createOne() {
|
||||
c.created++
|
||||
}
|
||||
|
||||
func (c *testingCacheObjectCounter) releaseOne() {
|
||||
c.released++
|
||||
}
|
||||
|
||||
type testingCacheObject struct {
|
||||
t *testing.T
|
||||
cnt *testingCacheObjectCounter
|
||||
|
||||
ns, key uint64
|
||||
|
||||
releaseCalled bool
|
||||
}
|
||||
|
||||
func (x *testingCacheObject) Release() {
|
||||
if !x.releaseCalled {
|
||||
x.releaseCalled = true
|
||||
x.cnt.releaseOne()
|
||||
} else {
|
||||
x.t.Errorf("duplicate setfin NS#%d KEY#%d", x.ns, x.key)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLRUCache_ConcurrentSetGet(t *testing.T) {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
|
||||
seed := time.Now().UnixNano()
|
||||
t.Logf("seed=%d", seed)
|
||||
|
||||
const (
|
||||
N = 2000000
|
||||
M = 4000
|
||||
C = 3
|
||||
)
|
||||
|
||||
var set, get uint32
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
c := NewLRUCache(M / 4)
|
||||
for ni := uint64(0); ni < C; ni++ {
|
||||
r0 := rand.New(rand.NewSource(seed + int64(ni)))
|
||||
r1 := rand.New(rand.NewSource(seed + int64(ni) + 1))
|
||||
ns := c.GetNamespace(ni)
|
||||
|
||||
wg.Add(2)
|
||||
go func(ns Namespace, r *rand.Rand) {
|
||||
for i := 0; i < N; i++ {
|
||||
x := uint64(r.Int63n(M))
|
||||
o := ns.Get(x, func() (int, interface{}) {
|
||||
atomic.AddUint32(&set, 1)
|
||||
return 1, x
|
||||
})
|
||||
if v := o.Value().(uint64); v != x {
|
||||
t.Errorf("#%d invalid value, got=%d", x, v)
|
||||
}
|
||||
o.Release()
|
||||
}
|
||||
wg.Done()
|
||||
}(ns, r0)
|
||||
go func(ns Namespace, r *rand.Rand) {
|
||||
for i := 0; i < N; i++ {
|
||||
x := uint64(r.Int63n(M))
|
||||
o := ns.Get(x, nil)
|
||||
if o != nil {
|
||||
atomic.AddUint32(&get, 1)
|
||||
if v := o.Value().(uint64); v != x {
|
||||
t.Errorf("#%d invalid value, got=%d", x, v)
|
||||
}
|
||||
o.Release()
|
||||
}
|
||||
}
|
||||
wg.Done()
|
||||
}(ns, r1)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
t.Logf("set=%d get=%d", set, get)
|
||||
}
|
||||
|
||||
func TestLRUCache_Finalizer(t *testing.T) {
|
||||
const (
|
||||
capacity = 100
|
||||
goroutines = 100
|
||||
iterations = 10000
|
||||
keymax = 8000
|
||||
)
|
||||
|
||||
cnt := &testingCacheObjectCounter{}
|
||||
|
||||
c := NewLRUCache(capacity)
|
||||
|
||||
type instance struct {
|
||||
seed int64
|
||||
rnd *rand.Rand
|
||||
nsid uint64
|
||||
ns Namespace
|
||||
effective int
|
||||
handles []Handle
|
||||
handlesMap map[uint64]int
|
||||
|
||||
delete bool
|
||||
purge bool
|
||||
zap bool
|
||||
wantDel int
|
||||
delfinCalled int
|
||||
delfinCalledAll int
|
||||
delfinCalledEff int
|
||||
purgefinCalled int
|
||||
}
|
||||
|
||||
instanceGet := func(p *instance, key uint64) {
|
||||
h := p.ns.Get(key, func() (charge int, value interface{}) {
|
||||
to := &testingCacheObject{
|
||||
t: t, cnt: cnt,
|
||||
ns: p.nsid,
|
||||
key: key,
|
||||
}
|
||||
p.effective++
|
||||
cnt.createOne()
|
||||
return 1, releaserFunc{func() {
|
||||
to.Release()
|
||||
p.effective--
|
||||
}, to}
|
||||
})
|
||||
p.handles = append(p.handles, h)
|
||||
p.handlesMap[key] = p.handlesMap[key] + 1
|
||||
}
|
||||
instanceRelease := func(p *instance, i int) {
|
||||
h := p.handles[i]
|
||||
key := h.Value().(releaserFunc).value.(*testingCacheObject).key
|
||||
if n := p.handlesMap[key]; n == 0 {
|
||||
t.Fatal("key ref == 0")
|
||||
} else if n > 1 {
|
||||
p.handlesMap[key] = n - 1
|
||||
} else {
|
||||
delete(p.handlesMap, key)
|
||||
}
|
||||
h.Release()
|
||||
p.handles = append(p.handles[:i], p.handles[i+1:]...)
|
||||
p.handles[len(p.handles) : len(p.handles)+1][0] = nil
|
||||
}
|
||||
|
||||
seed := time.Now().UnixNano()
|
||||
t.Logf("seed=%d", seed)
|
||||
|
||||
instances := make([]*instance, goroutines)
|
||||
for i := range instances {
|
||||
p := &instance{}
|
||||
p.handlesMap = make(map[uint64]int)
|
||||
p.seed = seed + int64(i)
|
||||
p.rnd = rand.New(rand.NewSource(p.seed))
|
||||
p.nsid = uint64(i)
|
||||
p.ns = c.GetNamespace(p.nsid)
|
||||
p.delete = i%6 == 0
|
||||
p.purge = i%8 == 0
|
||||
p.zap = i%12 == 0 || i%3 == 0
|
||||
instances[i] = p
|
||||
}
|
||||
|
||||
runr := rand.New(rand.NewSource(seed - 1))
|
||||
run := func(rnd *rand.Rand, x []*instance, init func(p *instance) bool, fn func(p *instance, i int) bool) {
|
||||
var (
|
||||
rx []*instance
|
||||
rn []int
|
||||
)
|
||||
if init == nil {
|
||||
rx = append([]*instance{}, x...)
|
||||
rn = make([]int, len(x))
|
||||
} else {
|
||||
for _, p := range x {
|
||||
if init(p) {
|
||||
rx = append(rx, p)
|
||||
rn = append(rn, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
for len(rx) > 0 {
|
||||
i := rand.Intn(len(rx))
|
||||
if fn(rx[i], rn[i]) {
|
||||
rn[i]++
|
||||
} else {
|
||||
rx = append(rx[:i], rx[i+1:]...)
|
||||
rn = append(rn[:i], rn[i+1:]...)
|
||||
}
|
||||
t.Errorf("Cache.Get on #%d return nil", key)
|
||||
}
|
||||
}
|
||||
|
||||
// Get and release.
|
||||
run(runr, instances, nil, func(p *instance, i int) bool {
|
||||
if i < iterations {
|
||||
if len(p.handles) == 0 || p.rnd.Int()%2 == 0 {
|
||||
instanceGet(p, uint64(p.rnd.Intn(keymax)))
|
||||
} else {
|
||||
instanceRelease(p, p.rnd.Intn(len(p.handles)))
|
||||
}
|
||||
return true
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
})
|
||||
|
||||
if used, cap := c.Used(), c.Capacity(); used > cap {
|
||||
t.Errorf("Used > capacity, used=%d cap=%d", used, cap)
|
||||
h2.Release()
|
||||
if h := c.Get(0, 2, nil); h != nil {
|
||||
t.Errorf("Cache.Get on #2 return non-nil: %v", h.Value())
|
||||
}
|
||||
|
||||
// Check effective objects.
|
||||
for i, p := range instances {
|
||||
if int(p.effective) < len(p.handlesMap) {
|
||||
t.Errorf("#%d effective objects < acquired handle, eo=%d ah=%d", i, p.effective, len(p.handlesMap))
|
||||
}
|
||||
}
|
||||
|
||||
if want := int(cnt.created - cnt.released); c.Size() != want {
|
||||
t.Errorf("Invalid cache size, want=%d got=%d", want, c.Size())
|
||||
}
|
||||
|
||||
// First delete.
|
||||
run(runr, instances, func(p *instance) bool {
|
||||
p.wantDel = p.effective
|
||||
return p.delete
|
||||
}, func(p *instance, i int) bool {
|
||||
key := uint64(i)
|
||||
if key < keymax {
|
||||
_, wantExist := p.handlesMap[key]
|
||||
gotExist := p.ns.Delete(key, func(exist, pending bool) {
|
||||
p.delfinCalledAll++
|
||||
if exist {
|
||||
p.delfinCalledEff++
|
||||
}
|
||||
})
|
||||
if !gotExist && wantExist {
|
||||
t.Errorf("delete on NS#%d KEY#%d not found", p.nsid, key)
|
||||
}
|
||||
return true
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
})
|
||||
|
||||
// Second delete.
|
||||
run(runr, instances, func(p *instance) bool {
|
||||
p.delfinCalled = 0
|
||||
return p.delete
|
||||
}, func(p *instance, i int) bool {
|
||||
key := uint64(i)
|
||||
if key < keymax {
|
||||
gotExist := p.ns.Delete(key, func(exist, pending bool) {
|
||||
if exist && !pending {
|
||||
t.Errorf("delete fin on NS#%d KEY#%d exist and not pending for deletion", p.nsid, key)
|
||||
}
|
||||
p.delfinCalled++
|
||||
})
|
||||
if gotExist {
|
||||
t.Errorf("delete on NS#%d KEY#%d found", p.nsid, key)
|
||||
}
|
||||
return true
|
||||
} else {
|
||||
if p.delfinCalled != keymax {
|
||||
t.Errorf("(2) NS#%d not all delete fin called, diff=%d", p.nsid, keymax-p.delfinCalled)
|
||||
}
|
||||
return false
|
||||
}
|
||||
})
|
||||
|
||||
// Purge.
|
||||
run(runr, instances, func(p *instance) bool {
|
||||
return p.purge
|
||||
}, func(p *instance, i int) bool {
|
||||
p.ns.Purge(func(ns, key uint64) {
|
||||
p.purgefinCalled++
|
||||
})
|
||||
return false
|
||||
})
|
||||
|
||||
if want := int(cnt.created - cnt.released); c.Size() != want {
|
||||
t.Errorf("Invalid cache size, want=%d got=%d", want, c.Size())
|
||||
}
|
||||
|
||||
// Release.
|
||||
run(runr, instances, func(p *instance) bool {
|
||||
return !p.zap
|
||||
}, func(p *instance, i int) bool {
|
||||
if len(p.handles) > 0 {
|
||||
instanceRelease(p, len(p.handles)-1)
|
||||
return true
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
})
|
||||
|
||||
if want := int(cnt.created - cnt.released); c.Size() != want {
|
||||
t.Errorf("Invalid cache size, want=%d got=%d", want, c.Size())
|
||||
}
|
||||
|
||||
// Zap.
|
||||
run(runr, instances, func(p *instance) bool {
|
||||
return p.zap
|
||||
}, func(p *instance, i int) bool {
|
||||
p.ns.Zap()
|
||||
p.handles = nil
|
||||
p.handlesMap = nil
|
||||
return false
|
||||
})
|
||||
|
||||
if want := int(cnt.created - cnt.released); c.Size() != want {
|
||||
t.Errorf("Invalid cache size, want=%d got=%d", want, c.Size())
|
||||
}
|
||||
|
||||
if notrel, used := int(cnt.created-cnt.released), c.Used(); notrel != used {
|
||||
t.Errorf("Invalid used value, want=%d got=%d", notrel, used)
|
||||
}
|
||||
|
||||
c.Purge(nil)
|
||||
|
||||
for _, p := range instances {
|
||||
if p.delete {
|
||||
if p.delfinCalledAll != keymax {
|
||||
t.Errorf("#%d not all delete fin called, purge=%v zap=%v diff=%d", p.nsid, p.purge, p.zap, keymax-p.delfinCalledAll)
|
||||
}
|
||||
if p.delfinCalledEff != p.wantDel {
|
||||
t.Errorf("#%d not all effective delete fin called, diff=%d", p.nsid, p.wantDel-p.delfinCalledEff)
|
||||
}
|
||||
if p.purge && p.purgefinCalled > 0 {
|
||||
t.Errorf("#%d some purge fin called, delete=%v zap=%v n=%d", p.nsid, p.delete, p.zap, p.purgefinCalled)
|
||||
}
|
||||
} else {
|
||||
if p.purge {
|
||||
if p.purgefinCalled != p.wantDel {
|
||||
t.Errorf("#%d not all purge fin called, delete=%v zap=%v diff=%d", p.nsid, p.delete, p.zap, p.wantDel-p.purgefinCalled)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if cnt.created != cnt.released {
|
||||
t.Errorf("Some cache object weren't released, created=%d released=%d", cnt.created, cnt.released)
|
||||
if delFuncCalled != 4 {
|
||||
t.Errorf("delFunc isn't called 4 times: got=%d", delFuncCalled)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLRUCache_Set(b *testing.B) {
|
||||
c := NewLRUCache(0)
|
||||
ns := c.GetNamespace(0)
|
||||
b.ResetTimer()
|
||||
for i := uint64(0); i < uint64(b.N); i++ {
|
||||
set(ns, i, "", 1, nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLRUCache_Get(b *testing.B) {
|
||||
c := NewLRUCache(0)
|
||||
ns := c.GetNamespace(0)
|
||||
b.ResetTimer()
|
||||
for i := uint64(0); i < uint64(b.N); i++ {
|
||||
set(ns, i, "", 1, nil)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := uint64(0); i < uint64(b.N); i++ {
|
||||
ns.Get(i, nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLRUCache_Get2(b *testing.B) {
|
||||
c := NewLRUCache(0)
|
||||
ns := c.GetNamespace(0)
|
||||
b.ResetTimer()
|
||||
for i := uint64(0); i < uint64(b.N); i++ {
|
||||
set(ns, i, "", 1, nil)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := uint64(0); i < uint64(b.N); i++ {
|
||||
ns.Get(i, func() (charge int, value interface{}) {
|
||||
return 0, nil
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLRUCache_Release(b *testing.B) {
|
||||
c := NewLRUCache(0)
|
||||
ns := c.GetNamespace(0)
|
||||
handles := make([]Handle, b.N)
|
||||
for i := uint64(0); i < uint64(b.N); i++ {
|
||||
handles[i] = set(ns, i, "", 1, nil)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for _, h := range handles {
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLRUCache_SetRelease(b *testing.B) {
|
||||
capacity := b.N / 100
|
||||
if capacity <= 0 {
|
||||
capacity = 10
|
||||
}
|
||||
c := NewLRUCache(capacity)
|
||||
ns := c.GetNamespace(0)
|
||||
b.ResetTimer()
|
||||
for i := uint64(0); i < uint64(b.N); i++ {
|
||||
set(ns, i, "", 1, nil).Release()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLRUCache_SetReleaseTwice(b *testing.B) {
|
||||
capacity := b.N / 100
|
||||
if capacity <= 0 {
|
||||
capacity = 10
|
||||
}
|
||||
c := NewLRUCache(capacity)
|
||||
ns := c.GetNamespace(0)
|
||||
b.ResetTimer()
|
||||
|
||||
na := b.N / 2
|
||||
nb := b.N - na
|
||||
|
||||
for i := uint64(0); i < uint64(na); i++ {
|
||||
set(ns, i, "", 1, nil).Release()
|
||||
}
|
||||
|
||||
for i := uint64(0); i < uint64(nb); i++ {
|
||||
set(ns, i, "", 1, nil).Release()
|
||||
func TestLRUCache_Close(t *testing.T) {
|
||||
relFuncCalled := 0
|
||||
relFunc := func() {
|
||||
relFuncCalled++
|
||||
}
|
||||
delFuncCalled := 0
|
||||
delFunc := func() {
|
||||
delFuncCalled++
|
||||
}
|
||||
|
||||
c := NewCache(NewLRU(2))
|
||||
set(c, 0, 1, 1, 1, relFunc).Release()
|
||||
set(c, 0, 2, 2, 1, relFunc).Release()
|
||||
|
||||
h3 := set(c, 0, 3, 3, 1, relFunc)
|
||||
if h3 == nil {
|
||||
t.Error("Cache.Get on #3 return nil")
|
||||
}
|
||||
if ok := c.Delete(0, 3, delFunc); !ok {
|
||||
t.Error("Cache.Delete on #3 return false")
|
||||
}
|
||||
|
||||
c.Close()
|
||||
|
||||
if relFuncCalled != 3 {
|
||||
t.Errorf("relFunc isn't called 3 times: got=%d", relFuncCalled)
|
||||
}
|
||||
if delFuncCalled != 1 {
|
||||
t.Errorf("delFunc isn't called 1 times: got=%d", delFuncCalled)
|
||||
}
|
||||
}
|
||||
|
||||
195
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru.go
generated
vendored
Normal file
195
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru.go
generated
vendored
Normal file
@@ -0,0 +1,195 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type lruNode struct {
|
||||
n *Node
|
||||
h *Handle
|
||||
ban bool
|
||||
|
||||
next, prev *lruNode
|
||||
}
|
||||
|
||||
func (n *lruNode) insert(at *lruNode) {
|
||||
x := at.next
|
||||
at.next = n
|
||||
n.prev = at
|
||||
n.next = x
|
||||
x.prev = n
|
||||
}
|
||||
|
||||
func (n *lruNode) remove() {
|
||||
if n.prev != nil {
|
||||
n.prev.next = n.next
|
||||
n.next.prev = n.prev
|
||||
n.prev = nil
|
||||
n.next = nil
|
||||
} else {
|
||||
panic("BUG: removing removed node")
|
||||
}
|
||||
}
|
||||
|
||||
type lru struct {
|
||||
mu sync.Mutex
|
||||
capacity int
|
||||
used int
|
||||
recent lruNode
|
||||
}
|
||||
|
||||
func (r *lru) reset() {
|
||||
r.recent.next = &r.recent
|
||||
r.recent.prev = &r.recent
|
||||
r.used = 0
|
||||
}
|
||||
|
||||
func (r *lru) Capacity() int {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
return r.capacity
|
||||
}
|
||||
|
||||
func (r *lru) SetCapacity(capacity int) {
|
||||
var evicted []*lruNode
|
||||
|
||||
r.mu.Lock()
|
||||
r.capacity = capacity
|
||||
for r.used > r.capacity {
|
||||
rn := r.recent.prev
|
||||
if rn == nil {
|
||||
panic("BUG: invalid LRU used or capacity counter")
|
||||
}
|
||||
rn.remove()
|
||||
rn.n.CacheData = nil
|
||||
r.used -= rn.n.Size()
|
||||
evicted = append(evicted, rn)
|
||||
}
|
||||
r.mu.Unlock()
|
||||
|
||||
for _, rn := range evicted {
|
||||
rn.h.Release()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *lru) Promote(n *Node) {
|
||||
var evicted []*lruNode
|
||||
|
||||
r.mu.Lock()
|
||||
if n.CacheData == nil {
|
||||
if n.Size() <= r.capacity {
|
||||
rn := &lruNode{n: n, h: n.GetHandle()}
|
||||
rn.insert(&r.recent)
|
||||
n.CacheData = unsafe.Pointer(rn)
|
||||
r.used += n.Size()
|
||||
|
||||
for r.used > r.capacity {
|
||||
rn := r.recent.prev
|
||||
if rn == nil {
|
||||
panic("BUG: invalid LRU used or capacity counter")
|
||||
}
|
||||
rn.remove()
|
||||
rn.n.CacheData = nil
|
||||
r.used -= rn.n.Size()
|
||||
evicted = append(evicted, rn)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
rn := (*lruNode)(n.CacheData)
|
||||
if !rn.ban {
|
||||
rn.remove()
|
||||
rn.insert(&r.recent)
|
||||
}
|
||||
}
|
||||
r.mu.Unlock()
|
||||
|
||||
for _, rn := range evicted {
|
||||
rn.h.Release()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *lru) Ban(n *Node) {
|
||||
r.mu.Lock()
|
||||
if n.CacheData == nil {
|
||||
n.CacheData = unsafe.Pointer(&lruNode{n: n, ban: true})
|
||||
} else {
|
||||
rn := (*lruNode)(n.CacheData)
|
||||
if !rn.ban {
|
||||
rn.remove()
|
||||
rn.ban = true
|
||||
r.used -= rn.n.Size()
|
||||
r.mu.Unlock()
|
||||
|
||||
rn.h.Release()
|
||||
rn.h = nil
|
||||
return
|
||||
}
|
||||
}
|
||||
r.mu.Unlock()
|
||||
}
|
||||
|
||||
func (r *lru) Evict(n *Node) {
|
||||
r.mu.Lock()
|
||||
rn := (*lruNode)(n.CacheData)
|
||||
if rn == nil || rn.ban {
|
||||
r.mu.Unlock()
|
||||
return
|
||||
}
|
||||
n.CacheData = nil
|
||||
r.mu.Unlock()
|
||||
|
||||
rn.h.Release()
|
||||
}
|
||||
|
||||
func (r *lru) EvictNS(ns uint64) {
|
||||
var evicted []*lruNode
|
||||
|
||||
r.mu.Lock()
|
||||
for e := r.recent.prev; e != &r.recent; {
|
||||
rn := e
|
||||
e = e.prev
|
||||
if rn.n.NS() == ns {
|
||||
rn.remove()
|
||||
rn.n.CacheData = nil
|
||||
r.used -= rn.n.Size()
|
||||
evicted = append(evicted, rn)
|
||||
}
|
||||
}
|
||||
r.mu.Unlock()
|
||||
|
||||
for _, rn := range evicted {
|
||||
rn.h.Release()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *lru) EvictAll() {
|
||||
r.mu.Lock()
|
||||
back := r.recent.prev
|
||||
for rn := back; rn != &r.recent; rn = rn.prev {
|
||||
rn.n.CacheData = nil
|
||||
}
|
||||
r.reset()
|
||||
r.mu.Unlock()
|
||||
|
||||
for rn := back; rn != &r.recent; rn = rn.prev {
|
||||
rn.h.Release()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *lru) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewLRU create a new LRU-cache.
|
||||
func NewLRU(capacity int) Cacher {
|
||||
r := &lru{capacity: capacity}
|
||||
r.reset()
|
||||
return r
|
||||
}
|
||||
622
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru_cache.go
generated
vendored
622
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru_cache.go
generated
vendored
@@ -1,622 +0,0 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
// The LLRB implementation were taken from https://github.com/petar/GoLLRB.
|
||||
// Which contains the following header:
|
||||
//
|
||||
// Copyright 2010 Petar Maymounkov. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// lruCache represent a LRU cache state.
|
||||
type lruCache struct {
|
||||
mu sync.Mutex
|
||||
recent lruNode
|
||||
table map[uint64]*lruNs
|
||||
capacity int
|
||||
used, size, alive int
|
||||
}
|
||||
|
||||
// NewLRUCache creates a new initialized LRU cache with the given capacity.
|
||||
func NewLRUCache(capacity int) Cache {
|
||||
c := &lruCache{
|
||||
table: make(map[uint64]*lruNs),
|
||||
capacity: capacity,
|
||||
}
|
||||
c.recent.rNext = &c.recent
|
||||
c.recent.rPrev = &c.recent
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *lruCache) Capacity() int {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.capacity
|
||||
}
|
||||
|
||||
func (c *lruCache) Used() int {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.used
|
||||
}
|
||||
|
||||
func (c *lruCache) Size() int {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.size
|
||||
}
|
||||
|
||||
func (c *lruCache) NumObjects() int {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.alive
|
||||
}
|
||||
|
||||
// SetCapacity set cache capacity.
|
||||
func (c *lruCache) SetCapacity(capacity int) {
|
||||
c.mu.Lock()
|
||||
c.capacity = capacity
|
||||
c.evict()
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// GetNamespace return namespace object for given id.
|
||||
func (c *lruCache) GetNamespace(id uint64) Namespace {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if ns, ok := c.table[id]; ok {
|
||||
return ns
|
||||
}
|
||||
|
||||
ns := &lruNs{lru: c, id: id}
|
||||
c.table[id] = ns
|
||||
return ns
|
||||
}
|
||||
|
||||
func (c *lruCache) ZapNamespace(id uint64) {
|
||||
c.mu.Lock()
|
||||
if ns, exist := c.table[id]; exist {
|
||||
ns.zapNB()
|
||||
delete(c.table, id)
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *lruCache) PurgeNamespace(id uint64, fin PurgeFin) {
|
||||
c.mu.Lock()
|
||||
if ns, exist := c.table[id]; exist {
|
||||
ns.purgeNB(fin)
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// Purge purge entire cache.
|
||||
func (c *lruCache) Purge(fin PurgeFin) {
|
||||
c.mu.Lock()
|
||||
for _, ns := range c.table {
|
||||
ns.purgeNB(fin)
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *lruCache) Zap() {
|
||||
c.mu.Lock()
|
||||
for _, ns := range c.table {
|
||||
ns.zapNB()
|
||||
}
|
||||
c.table = make(map[uint64]*lruNs)
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *lruCache) evict() {
|
||||
top := &c.recent
|
||||
for n := c.recent.rPrev; c.used > c.capacity && n != top; {
|
||||
if n.state != nodeEffective {
|
||||
panic("evicting non effective node")
|
||||
}
|
||||
n.state = nodeEvicted
|
||||
n.rRemove()
|
||||
n.derefNB()
|
||||
c.used -= n.charge
|
||||
n = c.recent.rPrev
|
||||
}
|
||||
}
|
||||
|
||||
type lruNs struct {
|
||||
lru *lruCache
|
||||
id uint64
|
||||
rbRoot *lruNode
|
||||
state nsState
|
||||
}
|
||||
|
||||
func (ns *lruNs) rbGetOrCreateNode(h *lruNode, key uint64) (hn, n *lruNode) {
|
||||
if h == nil {
|
||||
n = &lruNode{ns: ns, key: key}
|
||||
return n, n
|
||||
}
|
||||
|
||||
if key < h.key {
|
||||
hn, n = ns.rbGetOrCreateNode(h.rbLeft, key)
|
||||
if hn != nil {
|
||||
h.rbLeft = hn
|
||||
} else {
|
||||
return nil, n
|
||||
}
|
||||
} else if key > h.key {
|
||||
hn, n = ns.rbGetOrCreateNode(h.rbRight, key)
|
||||
if hn != nil {
|
||||
h.rbRight = hn
|
||||
} else {
|
||||
return nil, n
|
||||
}
|
||||
} else {
|
||||
return nil, h
|
||||
}
|
||||
|
||||
if rbIsRed(h.rbRight) && !rbIsRed(h.rbLeft) {
|
||||
h = rbRotLeft(h)
|
||||
}
|
||||
if rbIsRed(h.rbLeft) && rbIsRed(h.rbLeft.rbLeft) {
|
||||
h = rbRotRight(h)
|
||||
}
|
||||
if rbIsRed(h.rbLeft) && rbIsRed(h.rbRight) {
|
||||
rbFlip(h)
|
||||
}
|
||||
return h, n
|
||||
}
|
||||
|
||||
func (ns *lruNs) getOrCreateNode(key uint64) *lruNode {
|
||||
hn, n := ns.rbGetOrCreateNode(ns.rbRoot, key)
|
||||
if hn != nil {
|
||||
ns.rbRoot = hn
|
||||
ns.rbRoot.rbBlack = true
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (ns *lruNs) rbGetNode(key uint64) *lruNode {
|
||||
h := ns.rbRoot
|
||||
for h != nil {
|
||||
switch {
|
||||
case key < h.key:
|
||||
h = h.rbLeft
|
||||
case key > h.key:
|
||||
h = h.rbRight
|
||||
default:
|
||||
return h
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ns *lruNs) getNode(key uint64) *lruNode {
|
||||
return ns.rbGetNode(key)
|
||||
}
|
||||
|
||||
func (ns *lruNs) rbDeleteNode(h *lruNode, key uint64) *lruNode {
|
||||
if h == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if key < h.key {
|
||||
if h.rbLeft == nil { // key not present. Nothing to delete
|
||||
return h
|
||||
}
|
||||
if !rbIsRed(h.rbLeft) && !rbIsRed(h.rbLeft.rbLeft) {
|
||||
h = rbMoveLeft(h)
|
||||
}
|
||||
h.rbLeft = ns.rbDeleteNode(h.rbLeft, key)
|
||||
} else {
|
||||
if rbIsRed(h.rbLeft) {
|
||||
h = rbRotRight(h)
|
||||
}
|
||||
// If @key equals @h.key and no right children at @h
|
||||
if h.key == key && h.rbRight == nil {
|
||||
return nil
|
||||
}
|
||||
if h.rbRight != nil && !rbIsRed(h.rbRight) && !rbIsRed(h.rbRight.rbLeft) {
|
||||
h = rbMoveRight(h)
|
||||
}
|
||||
// If @key equals @h.key, and (from above) 'h.Right != nil'
|
||||
if h.key == key {
|
||||
var x *lruNode
|
||||
h.rbRight, x = rbDeleteMin(h.rbRight)
|
||||
if x == nil {
|
||||
panic("logic")
|
||||
}
|
||||
x.rbLeft, h.rbLeft = h.rbLeft, nil
|
||||
x.rbRight, h.rbRight = h.rbRight, nil
|
||||
x.rbBlack = h.rbBlack
|
||||
h = x
|
||||
} else { // Else, @key is bigger than @h.key
|
||||
h.rbRight = ns.rbDeleteNode(h.rbRight, key)
|
||||
}
|
||||
}
|
||||
|
||||
return rbFixup(h)
|
||||
}
|
||||
|
||||
func (ns *lruNs) deleteNode(key uint64) {
|
||||
ns.rbRoot = ns.rbDeleteNode(ns.rbRoot, key)
|
||||
if ns.rbRoot != nil {
|
||||
ns.rbRoot.rbBlack = true
|
||||
}
|
||||
}
|
||||
|
||||
func (ns *lruNs) rbIterateNodes(h *lruNode, pivot uint64, iter func(n *lruNode) bool) bool {
|
||||
if h == nil {
|
||||
return true
|
||||
}
|
||||
if h.key >= pivot {
|
||||
if !ns.rbIterateNodes(h.rbLeft, pivot, iter) {
|
||||
return false
|
||||
}
|
||||
if !iter(h) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return ns.rbIterateNodes(h.rbRight, pivot, iter)
|
||||
}
|
||||
|
||||
func (ns *lruNs) iterateNodes(iter func(n *lruNode) bool) {
|
||||
ns.rbIterateNodes(ns.rbRoot, 0, iter)
|
||||
}
|
||||
|
||||
func (ns *lruNs) Get(key uint64, setf SetFunc) Handle {
|
||||
ns.lru.mu.Lock()
|
||||
defer ns.lru.mu.Unlock()
|
||||
|
||||
if ns.state != nsEffective {
|
||||
return nil
|
||||
}
|
||||
|
||||
var n *lruNode
|
||||
if setf == nil {
|
||||
n = ns.getNode(key)
|
||||
if n == nil {
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
n = ns.getOrCreateNode(key)
|
||||
}
|
||||
switch n.state {
|
||||
case nodeZero:
|
||||
charge, value := setf()
|
||||
if value == nil {
|
||||
ns.deleteNode(key)
|
||||
return nil
|
||||
}
|
||||
if charge < 0 {
|
||||
charge = 0
|
||||
}
|
||||
|
||||
n.value = value
|
||||
n.charge = charge
|
||||
n.state = nodeEvicted
|
||||
|
||||
ns.lru.size += charge
|
||||
ns.lru.alive++
|
||||
|
||||
fallthrough
|
||||
case nodeEvicted:
|
||||
if n.charge == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Insert to recent list.
|
||||
n.state = nodeEffective
|
||||
n.ref++
|
||||
ns.lru.used += n.charge
|
||||
ns.lru.evict()
|
||||
|
||||
fallthrough
|
||||
case nodeEffective:
|
||||
// Bump to front.
|
||||
n.rRemove()
|
||||
n.rInsert(&ns.lru.recent)
|
||||
case nodeDeleted:
|
||||
// Do nothing.
|
||||
default:
|
||||
panic("invalid state")
|
||||
}
|
||||
n.ref++
|
||||
|
||||
return &lruHandle{node: n}
|
||||
}
|
||||
|
||||
func (ns *lruNs) Delete(key uint64, fin DelFin) bool {
|
||||
ns.lru.mu.Lock()
|
||||
defer ns.lru.mu.Unlock()
|
||||
|
||||
if ns.state != nsEffective {
|
||||
if fin != nil {
|
||||
fin(false, false)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
n := ns.getNode(key)
|
||||
if n == nil {
|
||||
if fin != nil {
|
||||
fin(false, false)
|
||||
}
|
||||
return false
|
||||
|
||||
}
|
||||
|
||||
switch n.state {
|
||||
case nodeEffective:
|
||||
ns.lru.used -= n.charge
|
||||
n.state = nodeDeleted
|
||||
n.delfin = fin
|
||||
n.rRemove()
|
||||
n.derefNB()
|
||||
case nodeEvicted:
|
||||
n.state = nodeDeleted
|
||||
n.delfin = fin
|
||||
case nodeDeleted:
|
||||
if fin != nil {
|
||||
fin(true, true)
|
||||
}
|
||||
return false
|
||||
default:
|
||||
panic("invalid state")
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (ns *lruNs) purgeNB(fin PurgeFin) {
|
||||
if ns.state == nsEffective {
|
||||
var nodes []*lruNode
|
||||
ns.iterateNodes(func(n *lruNode) bool {
|
||||
nodes = append(nodes, n)
|
||||
return true
|
||||
})
|
||||
for _, n := range nodes {
|
||||
switch n.state {
|
||||
case nodeEffective:
|
||||
ns.lru.used -= n.charge
|
||||
n.state = nodeDeleted
|
||||
n.purgefin = fin
|
||||
n.rRemove()
|
||||
n.derefNB()
|
||||
case nodeEvicted:
|
||||
n.state = nodeDeleted
|
||||
n.purgefin = fin
|
||||
case nodeDeleted:
|
||||
default:
|
||||
panic("invalid state")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ns *lruNs) Purge(fin PurgeFin) {
|
||||
ns.lru.mu.Lock()
|
||||
ns.purgeNB(fin)
|
||||
ns.lru.mu.Unlock()
|
||||
}
|
||||
|
||||
func (ns *lruNs) zapNB() {
|
||||
if ns.state == nsEffective {
|
||||
ns.state = nsZapped
|
||||
|
||||
ns.iterateNodes(func(n *lruNode) bool {
|
||||
if n.state == nodeEffective {
|
||||
ns.lru.used -= n.charge
|
||||
n.rRemove()
|
||||
}
|
||||
ns.lru.size -= n.charge
|
||||
n.state = nodeDeleted
|
||||
n.fin()
|
||||
|
||||
return true
|
||||
})
|
||||
ns.rbRoot = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (ns *lruNs) Zap() {
|
||||
ns.lru.mu.Lock()
|
||||
ns.zapNB()
|
||||
delete(ns.lru.table, ns.id)
|
||||
ns.lru.mu.Unlock()
|
||||
}
|
||||
|
||||
type lruNode struct {
|
||||
ns *lruNs
|
||||
|
||||
rNext, rPrev *lruNode
|
||||
rbLeft, rbRight *lruNode
|
||||
rbBlack bool
|
||||
|
||||
key uint64
|
||||
value interface{}
|
||||
charge int
|
||||
ref int
|
||||
state nodeState
|
||||
delfin DelFin
|
||||
purgefin PurgeFin
|
||||
}
|
||||
|
||||
func (n *lruNode) rInsert(at *lruNode) {
|
||||
x := at.rNext
|
||||
at.rNext = n
|
||||
n.rPrev = at
|
||||
n.rNext = x
|
||||
x.rPrev = n
|
||||
}
|
||||
|
||||
func (n *lruNode) rRemove() bool {
|
||||
if n.rPrev == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
n.rPrev.rNext = n.rNext
|
||||
n.rNext.rPrev = n.rPrev
|
||||
n.rPrev = nil
|
||||
n.rNext = nil
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (n *lruNode) fin() {
|
||||
if r, ok := n.value.(util.Releaser); ok {
|
||||
r.Release()
|
||||
}
|
||||
if n.purgefin != nil {
|
||||
if n.delfin != nil {
|
||||
panic("conflicting delete and purge fin")
|
||||
}
|
||||
n.purgefin(n.ns.id, n.key)
|
||||
n.purgefin = nil
|
||||
} else if n.delfin != nil {
|
||||
n.delfin(true, false)
|
||||
n.delfin = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (n *lruNode) derefNB() {
|
||||
n.ref--
|
||||
if n.ref == 0 {
|
||||
if n.ns.state == nsEffective {
|
||||
// Remove elemement.
|
||||
n.ns.deleteNode(n.key)
|
||||
n.ns.lru.size -= n.charge
|
||||
n.ns.lru.alive--
|
||||
n.fin()
|
||||
}
|
||||
n.value = nil
|
||||
} else if n.ref < 0 {
|
||||
panic("leveldb/cache: lruCache: negative node reference")
|
||||
}
|
||||
}
|
||||
|
||||
func (n *lruNode) deref() {
|
||||
n.ns.lru.mu.Lock()
|
||||
n.derefNB()
|
||||
n.ns.lru.mu.Unlock()
|
||||
}
|
||||
|
||||
type lruHandle struct {
|
||||
node *lruNode
|
||||
once uint32
|
||||
}
|
||||
|
||||
func (h *lruHandle) Value() interface{} {
|
||||
if atomic.LoadUint32(&h.once) == 0 {
|
||||
return h.node.value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *lruHandle) Release() {
|
||||
if !atomic.CompareAndSwapUint32(&h.once, 0, 1) {
|
||||
return
|
||||
}
|
||||
h.node.deref()
|
||||
h.node = nil
|
||||
}
|
||||
|
||||
func rbIsRed(h *lruNode) bool {
|
||||
if h == nil {
|
||||
return false
|
||||
}
|
||||
return !h.rbBlack
|
||||
}
|
||||
|
||||
func rbRotLeft(h *lruNode) *lruNode {
|
||||
x := h.rbRight
|
||||
if x.rbBlack {
|
||||
panic("rotating a black link")
|
||||
}
|
||||
h.rbRight = x.rbLeft
|
||||
x.rbLeft = h
|
||||
x.rbBlack = h.rbBlack
|
||||
h.rbBlack = false
|
||||
return x
|
||||
}
|
||||
|
||||
func rbRotRight(h *lruNode) *lruNode {
|
||||
x := h.rbLeft
|
||||
if x.rbBlack {
|
||||
panic("rotating a black link")
|
||||
}
|
||||
h.rbLeft = x.rbRight
|
||||
x.rbRight = h
|
||||
x.rbBlack = h.rbBlack
|
||||
h.rbBlack = false
|
||||
return x
|
||||
}
|
||||
|
||||
func rbFlip(h *lruNode) {
|
||||
h.rbBlack = !h.rbBlack
|
||||
h.rbLeft.rbBlack = !h.rbLeft.rbBlack
|
||||
h.rbRight.rbBlack = !h.rbRight.rbBlack
|
||||
}
|
||||
|
||||
func rbMoveLeft(h *lruNode) *lruNode {
|
||||
rbFlip(h)
|
||||
if rbIsRed(h.rbRight.rbLeft) {
|
||||
h.rbRight = rbRotRight(h.rbRight)
|
||||
h = rbRotLeft(h)
|
||||
rbFlip(h)
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
func rbMoveRight(h *lruNode) *lruNode {
|
||||
rbFlip(h)
|
||||
if rbIsRed(h.rbLeft.rbLeft) {
|
||||
h = rbRotRight(h)
|
||||
rbFlip(h)
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
func rbFixup(h *lruNode) *lruNode {
|
||||
if rbIsRed(h.rbRight) {
|
||||
h = rbRotLeft(h)
|
||||
}
|
||||
|
||||
if rbIsRed(h.rbLeft) && rbIsRed(h.rbLeft.rbLeft) {
|
||||
h = rbRotRight(h)
|
||||
}
|
||||
|
||||
if rbIsRed(h.rbLeft) && rbIsRed(h.rbRight) {
|
||||
rbFlip(h)
|
||||
}
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
func rbDeleteMin(h *lruNode) (hn, n *lruNode) {
|
||||
if h == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if h.rbLeft == nil {
|
||||
return nil, h
|
||||
}
|
||||
|
||||
if !rbIsRed(h.rbLeft) && !rbIsRed(h.rbLeft.rbLeft) {
|
||||
h = rbMoveLeft(h)
|
||||
}
|
||||
|
||||
h.rbLeft, n = rbDeleteMin(h.rbLeft)
|
||||
|
||||
return rbFixup(h), n
|
||||
}
|
||||
18
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go
generated
vendored
18
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go
generated
vendored
@@ -9,14 +9,12 @@ package leveldb
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/cache"
|
||||
"github.com/syndtr/goleveldb/leveldb/filter"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||
"io"
|
||||
"math/rand"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const ctValSize = 1000
|
||||
@@ -33,8 +31,8 @@ func newDbCorruptHarnessWopt(t *testing.T, o *opt.Options) *dbCorruptHarness {
|
||||
|
||||
func newDbCorruptHarness(t *testing.T) *dbCorruptHarness {
|
||||
return newDbCorruptHarnessWopt(t, &opt.Options{
|
||||
BlockCache: cache.NewLRUCache(100),
|
||||
Strict: opt.StrictJournalChecksum,
|
||||
BlockCacheCapacity: 100,
|
||||
Strict: opt.StrictJournalChecksum,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -269,9 +267,9 @@ func TestCorruptDB_TableIndex(t *testing.T) {
|
||||
func TestCorruptDB_MissingManifest(t *testing.T) {
|
||||
rnd := rand.New(rand.NewSource(0x0badda7a))
|
||||
h := newDbCorruptHarnessWopt(t, &opt.Options{
|
||||
BlockCache: cache.NewLRUCache(100),
|
||||
Strict: opt.StrictJournalChecksum,
|
||||
WriteBuffer: 1000 * 60,
|
||||
BlockCacheCapacity: 100,
|
||||
Strict: opt.StrictJournalChecksum,
|
||||
WriteBuffer: 1000 * 60,
|
||||
})
|
||||
|
||||
h.build(1000)
|
||||
|
||||
18
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go
generated
vendored
18
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go
generated
vendored
@@ -347,12 +347,14 @@ func recoverTable(s *session, o *opt.Options) error {
|
||||
return err
|
||||
}
|
||||
iter := tr.NewIterator(nil, nil)
|
||||
iter.(iterator.ErrorCallbackSetter).SetErrorCallback(func(err error) {
|
||||
if errors.IsCorrupted(err) {
|
||||
s.logf("table@recovery block corruption @%d %q", file.Num(), err)
|
||||
tcorruptedBlock++
|
||||
}
|
||||
})
|
||||
if itererr, ok := iter.(iterator.ErrorCallbackSetter); ok {
|
||||
itererr.SetErrorCallback(func(err error) {
|
||||
if errors.IsCorrupted(err) {
|
||||
s.logf("table@recovery block corruption @%d %q", file.Num(), err)
|
||||
tcorruptedBlock++
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Scan the table.
|
||||
for iter.Next() {
|
||||
@@ -823,8 +825,8 @@ func (db *DB) GetProperty(name string) (value string, err error) {
|
||||
case p == "blockpool":
|
||||
value = fmt.Sprintf("%v", db.s.tops.bpool)
|
||||
case p == "cachedblock":
|
||||
if bc := db.s.o.GetBlockCache(); bc != nil {
|
||||
value = fmt.Sprintf("%d", bc.Size())
|
||||
if db.s.tops.bcache != nil {
|
||||
value = fmt.Sprintf("%d", db.s.tops.bcache.Size())
|
||||
} else {
|
||||
value = "<nil>"
|
||||
}
|
||||
|
||||
5
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go
generated
vendored
5
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go
generated
vendored
@@ -8,6 +8,7 @@ package leveldb
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
@@ -89,6 +90,10 @@ func (db *DB) newSnapshot() *Snapshot {
|
||||
return snap
|
||||
}
|
||||
|
||||
func (snap *Snapshot) String() string {
|
||||
return fmt.Sprintf("leveldb.Snapshot{%d}", snap.elem.seq)
|
||||
}
|
||||
|
||||
// Get gets the value for the given key. It returns ErrNotFound if
|
||||
// the DB does not contains the key.
|
||||
//
|
||||
|
||||
14
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go
generated
vendored
14
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go
generated
vendored
@@ -1271,7 +1271,7 @@ func TestDB_DeletionMarkers2(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDB_CompactionTableOpenError(t *testing.T) {
|
||||
h := newDbHarnessWopt(t, &opt.Options{CachedOpenFiles: -1})
|
||||
h := newDbHarnessWopt(t, &opt.Options{OpenFilesCacheCapacity: -1})
|
||||
defer h.close()
|
||||
|
||||
im := 10
|
||||
@@ -1629,8 +1629,8 @@ func TestDB_ManualCompaction(t *testing.T) {
|
||||
|
||||
func TestDB_BloomFilter(t *testing.T) {
|
||||
h := newDbHarnessWopt(t, &opt.Options{
|
||||
BlockCache: opt.NoCache,
|
||||
Filter: filter.NewBloomFilter(10),
|
||||
DisableBlockCache: true,
|
||||
Filter: filter.NewBloomFilter(10),
|
||||
})
|
||||
defer h.close()
|
||||
|
||||
@@ -2066,8 +2066,8 @@ func TestDB_GetProperties(t *testing.T) {
|
||||
|
||||
func TestDB_GoleveldbIssue72and83(t *testing.T) {
|
||||
h := newDbHarnessWopt(t, &opt.Options{
|
||||
WriteBuffer: 1 * opt.MiB,
|
||||
CachedOpenFiles: 3,
|
||||
WriteBuffer: 1 * opt.MiB,
|
||||
OpenFilesCacheCapacity: 3,
|
||||
})
|
||||
defer h.close()
|
||||
|
||||
@@ -2200,7 +2200,7 @@ func TestDB_GoleveldbIssue72and83(t *testing.T) {
|
||||
func TestDB_TransientError(t *testing.T) {
|
||||
h := newDbHarnessWopt(t, &opt.Options{
|
||||
WriteBuffer: 128 * opt.KiB,
|
||||
CachedOpenFiles: 3,
|
||||
OpenFilesCacheCapacity: 3,
|
||||
DisableCompactionBackoff: true,
|
||||
})
|
||||
defer h.close()
|
||||
@@ -2410,7 +2410,7 @@ func TestDB_TableCompactionBuilder(t *testing.T) {
|
||||
CompactionTableSize: 43 * opt.KiB,
|
||||
CompactionExpandLimitFactor: 1,
|
||||
CompactionGPOverlapsFactor: 1,
|
||||
BlockCache: opt.NoCache,
|
||||
DisableBlockCache: true,
|
||||
}
|
||||
s, err := newSession(stor, o)
|
||||
if err != nil {
|
||||
|
||||
2
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go
generated
vendored
2
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go
generated
vendored
@@ -112,9 +112,9 @@ func (db *DB) flush(n int) (mem *memDB, nn int, err error) {
|
||||
db.writeDelay += time.Since(start)
|
||||
db.writeDelayN++
|
||||
} else if db.writeDelayN > 0 {
|
||||
db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay)
|
||||
db.writeDelay = 0
|
||||
db.writeDelayN = 0
|
||||
db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
16
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go
generated
vendored
16
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go
generated
vendored
@@ -17,14 +17,14 @@ import (
|
||||
var _ = testutil.Defer(func() {
|
||||
Describe("Leveldb external", func() {
|
||||
o := &opt.Options{
|
||||
BlockCache: opt.NoCache,
|
||||
BlockRestartInterval: 5,
|
||||
BlockSize: 80,
|
||||
Compression: opt.NoCompression,
|
||||
CachedOpenFiles: -1,
|
||||
Strict: opt.StrictAll,
|
||||
WriteBuffer: 1000,
|
||||
CompactionTableSize: 2000,
|
||||
DisableBlockCache: true,
|
||||
BlockRestartInterval: 5,
|
||||
BlockSize: 80,
|
||||
Compression: opt.NoCompression,
|
||||
OpenFilesCacheCapacity: -1,
|
||||
Strict: opt.StrictAll,
|
||||
WriteBuffer: 1000,
|
||||
CompactionTableSize: 2000,
|
||||
}
|
||||
|
||||
Describe("write test", func() {
|
||||
|
||||
4
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go
generated
vendored
4
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go
generated
vendored
@@ -106,7 +106,7 @@ func (ik iKey) assert() {
|
||||
panic("leveldb: nil iKey")
|
||||
}
|
||||
if len(ik) < 8 {
|
||||
panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid length", ik, len(ik)))
|
||||
panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid length", []byte(ik), len(ik)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -124,7 +124,7 @@ func (ik iKey) parseNum() (seq uint64, kt kType) {
|
||||
num := ik.num()
|
||||
seq, kt = uint64(num>>8), kType(num&0xff)
|
||||
if kt > ktVal {
|
||||
panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid type %#x", ik, len(ik), kt))
|
||||
panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid type %#x", []byte(ik), len(ik), kt))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
131
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go
generated
vendored
131
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go
generated
vendored
@@ -20,8 +20,9 @@ const (
|
||||
GiB = MiB * 1024
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultBlockCacheSize = 8 * MiB
|
||||
var (
|
||||
DefaultBlockCacher = LRUCacher
|
||||
DefaultBlockCacheCapacity = 8 * MiB
|
||||
DefaultBlockRestartInterval = 16
|
||||
DefaultBlockSize = 4 * KiB
|
||||
DefaultCompactionExpandLimitFactor = 25
|
||||
@@ -33,7 +34,8 @@ const (
|
||||
DefaultCompactionTotalSize = 10 * MiB
|
||||
DefaultCompactionTotalSizeMultiplier = 10.0
|
||||
DefaultCompressionType = SnappyCompression
|
||||
DefaultCachedOpenFiles = 500
|
||||
DefaultOpenFilesCacher = LRUCacher
|
||||
DefaultOpenFilesCacheCapacity = 500
|
||||
DefaultMaxMemCompationLevel = 2
|
||||
DefaultNumLevel = 7
|
||||
DefaultWriteBuffer = 4 * MiB
|
||||
@@ -41,22 +43,33 @@ const (
|
||||
DefaultWriteL0SlowdownTrigger = 8
|
||||
)
|
||||
|
||||
type noCache struct{}
|
||||
// Cacher is a caching algorithm.
|
||||
type Cacher interface {
|
||||
New(capacity int) cache.Cacher
|
||||
}
|
||||
|
||||
func (noCache) SetCapacity(capacity int) {}
|
||||
func (noCache) Capacity() int { return 0 }
|
||||
func (noCache) Used() int { return 0 }
|
||||
func (noCache) Size() int { return 0 }
|
||||
func (noCache) NumObjects() int { return 0 }
|
||||
func (noCache) GetNamespace(id uint64) cache.Namespace { return nil }
|
||||
func (noCache) PurgeNamespace(id uint64, fin cache.PurgeFin) {}
|
||||
func (noCache) ZapNamespace(id uint64) {}
|
||||
func (noCache) Purge(fin cache.PurgeFin) {}
|
||||
func (noCache) Zap() {}
|
||||
type CacherFunc struct {
|
||||
NewFunc func(capacity int) cache.Cacher
|
||||
}
|
||||
|
||||
var NoCache cache.Cache = noCache{}
|
||||
func (f *CacherFunc) New(capacity int) cache.Cacher {
|
||||
if f.NewFunc != nil {
|
||||
return f.NewFunc(capacity)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Compression is the per-block compression algorithm to use.
|
||||
func noCacher(int) cache.Cacher { return nil }
|
||||
|
||||
var (
|
||||
// LRUCacher is the LRU-cache algorithm.
|
||||
LRUCacher = &CacherFunc{cache.NewLRU}
|
||||
|
||||
// NoCacher is the value to disable caching algorithm.
|
||||
NoCacher = &CacherFunc{}
|
||||
)
|
||||
|
||||
// Compression is the 'sorted table' block compression algorithm to use.
|
||||
type Compression uint
|
||||
|
||||
func (c Compression) String() string {
|
||||
@@ -133,16 +146,17 @@ type Options struct {
|
||||
// The default value is nil
|
||||
AltFilters []filter.Filter
|
||||
|
||||
// BlockCache provides per-block caching for LevelDB. Specify NoCache to
|
||||
// disable block caching.
|
||||
// BlockCacher provides cache algorithm for LevelDB 'sorted table' block caching.
|
||||
// Specify NoCacher to disable caching algorithm.
|
||||
//
|
||||
// By default LevelDB will create LRU-cache with capacity of BlockCacheSize.
|
||||
BlockCache cache.Cache
|
||||
// The default value is LRUCacher.
|
||||
BlockCacher Cacher
|
||||
|
||||
// BlockCacheSize defines the capacity of the default 'block cache'.
|
||||
// BlockCacheCapacity defines the capacity of the 'sorted table' block caching.
|
||||
// Use -1 for zero, this has same effect with specifying NoCacher to BlockCacher.
|
||||
//
|
||||
// The default value is 8MiB.
|
||||
BlockCacheSize int
|
||||
BlockCacheCapacity int
|
||||
|
||||
// BlockRestartInterval is the number of keys between restart points for
|
||||
// delta encoding of keys.
|
||||
@@ -156,13 +170,6 @@ type Options struct {
|
||||
// The default value is 4KiB.
|
||||
BlockSize int
|
||||
|
||||
// CachedOpenFiles defines number of open files to kept around when not
|
||||
// in-use, the counting includes still in-use files.
|
||||
// Set this to negative value to disable caching.
|
||||
//
|
||||
// The default value is 500.
|
||||
CachedOpenFiles int
|
||||
|
||||
// CompactionExpandLimitFactor limits compaction size after expanded.
|
||||
// This will be multiplied by table size limit at compaction target level.
|
||||
//
|
||||
@@ -237,11 +244,17 @@ type Options struct {
|
||||
// The default value uses the same ordering as bytes.Compare.
|
||||
Comparer comparer.Comparer
|
||||
|
||||
// Compression defines the per-block compression to use.
|
||||
// Compression defines the 'sorted table' block compression to use.
|
||||
//
|
||||
// The default value (DefaultCompression) uses snappy compression.
|
||||
Compression Compression
|
||||
|
||||
// DisableBlockCache allows disable use of cache.Cache functionality on
|
||||
// 'sorted table' block.
|
||||
//
|
||||
// The default value is false.
|
||||
DisableBlockCache bool
|
||||
|
||||
// DisableCompactionBackoff allows disable compaction retry backoff.
|
||||
//
|
||||
// The default value is false.
|
||||
@@ -288,6 +301,18 @@ type Options struct {
|
||||
// The default is 7.
|
||||
NumLevel int
|
||||
|
||||
// OpenFilesCacher provides cache algorithm for open files caching.
|
||||
// Specify NoCacher to disable caching algorithm.
|
||||
//
|
||||
// The default value is LRUCacher.
|
||||
OpenFilesCacher Cacher
|
||||
|
||||
// OpenFilesCacheCapacity defines the capacity of the open files caching.
|
||||
// Use -1 for zero, this has same effect with specifying NoCacher to OpenFilesCacher.
|
||||
//
|
||||
// The default value is 500.
|
||||
OpenFilesCacheCapacity int
|
||||
|
||||
// Strict defines the DB strict level.
|
||||
Strict Strict
|
||||
|
||||
@@ -320,18 +345,22 @@ func (o *Options) GetAltFilters() []filter.Filter {
|
||||
return o.AltFilters
|
||||
}
|
||||
|
||||
func (o *Options) GetBlockCache() cache.Cache {
|
||||
if o == nil {
|
||||
func (o *Options) GetBlockCacher() Cacher {
|
||||
if o == nil || o.BlockCacher == nil {
|
||||
return DefaultBlockCacher
|
||||
} else if o.BlockCacher == NoCacher {
|
||||
return nil
|
||||
}
|
||||
return o.BlockCache
|
||||
return o.BlockCacher
|
||||
}
|
||||
|
||||
func (o *Options) GetBlockCacheSize() int {
|
||||
if o == nil || o.BlockCacheSize <= 0 {
|
||||
return DefaultBlockCacheSize
|
||||
func (o *Options) GetBlockCacheCapacity() int {
|
||||
if o == nil || o.BlockCacheCapacity <= 0 {
|
||||
return DefaultBlockCacheCapacity
|
||||
} else if o.BlockCacheCapacity == -1 {
|
||||
return 0
|
||||
}
|
||||
return o.BlockCacheSize
|
||||
return o.BlockCacheCapacity
|
||||
}
|
||||
|
||||
func (o *Options) GetBlockRestartInterval() int {
|
||||
@@ -348,15 +377,6 @@ func (o *Options) GetBlockSize() int {
|
||||
return o.BlockSize
|
||||
}
|
||||
|
||||
func (o *Options) GetCachedOpenFiles() int {
|
||||
if o == nil || o.CachedOpenFiles == 0 {
|
||||
return DefaultCachedOpenFiles
|
||||
} else if o.CachedOpenFiles < 0 {
|
||||
return 0
|
||||
}
|
||||
return o.CachedOpenFiles
|
||||
}
|
||||
|
||||
func (o *Options) GetCompactionExpandLimit(level int) int {
|
||||
factor := DefaultCompactionExpandLimitFactor
|
||||
if o != nil && o.CompactionExpandLimitFactor > 0 {
|
||||
@@ -494,6 +514,25 @@ func (o *Options) GetNumLevel() int {
|
||||
return o.NumLevel
|
||||
}
|
||||
|
||||
func (o *Options) GetOpenFilesCacher() Cacher {
|
||||
if o == nil || o.OpenFilesCacher == nil {
|
||||
return DefaultOpenFilesCacher
|
||||
}
|
||||
if o.OpenFilesCacher == NoCacher {
|
||||
return nil
|
||||
}
|
||||
return o.OpenFilesCacher
|
||||
}
|
||||
|
||||
func (o *Options) GetOpenFilesCacheCapacity() int {
|
||||
if o == nil || o.OpenFilesCacheCapacity <= 0 {
|
||||
return DefaultOpenFilesCacheCapacity
|
||||
} else if o.OpenFilesCacheCapacity == -1 {
|
||||
return 0
|
||||
}
|
||||
return o.OpenFilesCacheCapacity
|
||||
}
|
||||
|
||||
func (o *Options) GetStrict(strict Strict) bool {
|
||||
if o == nil || o.Strict == 0 {
|
||||
return DefaultStrict&strict != 0
|
||||
|
||||
8
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go
generated
vendored
8
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go
generated
vendored
@@ -7,7 +7,6 @@
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"github.com/syndtr/goleveldb/leveldb/cache"
|
||||
"github.com/syndtr/goleveldb/leveldb/filter"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
)
|
||||
@@ -32,13 +31,6 @@ func (s *session) setOptions(o *opt.Options) {
|
||||
no.AltFilters[i] = &iFilter{filter}
|
||||
}
|
||||
}
|
||||
// Block cache.
|
||||
switch o.GetBlockCache() {
|
||||
case nil:
|
||||
no.BlockCache = cache.NewLRUCache(o.GetBlockCacheSize())
|
||||
case opt.NoCache:
|
||||
no.BlockCache = nil
|
||||
}
|
||||
// Comparer.
|
||||
s.icmp = &iComparer{o.GetComparer()}
|
||||
no.Comparer = s.icmp
|
||||
|
||||
5
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go
generated
vendored
5
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go
generated
vendored
@@ -73,7 +73,7 @@ func newSession(stor storage.Storage, o *opt.Options) (s *session, err error) {
|
||||
stCompPtrs: make([]iKey, o.GetNumLevel()),
|
||||
}
|
||||
s.setOptions(o)
|
||||
s.tops = newTableOps(s, s.o.GetCachedOpenFiles())
|
||||
s.tops = newTableOps(s)
|
||||
s.setVersion(newVersion(s))
|
||||
s.log("log@legend F·NumFile S·FileSize N·Entry C·BadEntry B·BadBlock Ke·KeyError D·DroppedEntry L·Level Q·SeqNum T·TimeElapsed")
|
||||
return
|
||||
@@ -82,9 +82,6 @@ func newSession(stor storage.Storage, o *opt.Options) (s *session, err error) {
|
||||
// Close session.
|
||||
func (s *session) close() {
|
||||
s.tops.close()
|
||||
if bc := s.o.GetBlockCache(); bc != nil {
|
||||
bc.Purge(nil)
|
||||
}
|
||||
if s.manifest != nil {
|
||||
s.manifest.Close()
|
||||
}
|
||||
|
||||
2
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go
generated
vendored
2
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go
generated
vendored
@@ -221,7 +221,7 @@ func (fs *fileStorage) GetManifest() (f File, err error) {
|
||||
fs.log(fmt.Sprintf("skipping %s: invalid file name", fn))
|
||||
continue
|
||||
}
|
||||
if _, e1 := strconv.ParseUint(fn[7:], 10, 0); e1 != nil {
|
||||
if _, e1 := strconv.ParseUint(fn[8:], 10, 0); e1 != nil {
|
||||
fs.log(fmt.Sprintf("skipping %s: invalid file num: %v", fn, e1))
|
||||
continue
|
||||
}
|
||||
|
||||
70
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go
generated
vendored
70
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go
generated
vendored
@@ -286,10 +286,10 @@ func (x *tFilesSortByNum) Less(i, j int) bool {
|
||||
|
||||
// Table operations.
|
||||
type tOps struct {
|
||||
s *session
|
||||
cache cache.Cache
|
||||
cacheNS cache.Namespace
|
||||
bpool *util.BufferPool
|
||||
s *session
|
||||
cache *cache.Cache
|
||||
bcache *cache.Cache
|
||||
bpool *util.BufferPool
|
||||
}
|
||||
|
||||
// Creates an empty table and returns table writer.
|
||||
@@ -338,26 +338,28 @@ func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) {
|
||||
|
||||
// Opens table. It returns a cache handle, which should
|
||||
// be released after use.
|
||||
func (t *tOps) open(f *tFile) (ch cache.Handle, err error) {
|
||||
func (t *tOps) open(f *tFile) (ch *cache.Handle, err error) {
|
||||
num := f.file.Num()
|
||||
ch = t.cacheNS.Get(num, func() (charge int, value interface{}) {
|
||||
ch = t.cache.Get(0, num, func() (size int, value cache.Value) {
|
||||
var r storage.Reader
|
||||
r, err = f.file.Open()
|
||||
if err != nil {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
var bcacheNS cache.Namespace
|
||||
if bc := t.s.o.GetBlockCache(); bc != nil {
|
||||
bcacheNS = bc.GetNamespace(num)
|
||||
var bcache *cache.CacheGetter
|
||||
if t.bcache != nil {
|
||||
bcache = &cache.CacheGetter{Cache: t.bcache, NS: num}
|
||||
}
|
||||
|
||||
var tr *table.Reader
|
||||
tr, err = table.NewReader(r, int64(f.size), storage.NewFileInfo(f.file), bcacheNS, t.bpool, t.s.o.Options)
|
||||
tr, err = table.NewReader(r, int64(f.size), storage.NewFileInfo(f.file), bcache, t.bpool, t.s.o.Options)
|
||||
if err != nil {
|
||||
r.Close()
|
||||
return 0, nil
|
||||
}
|
||||
return 1, tr
|
||||
|
||||
})
|
||||
if ch == nil && err == nil {
|
||||
err = ErrClosed
|
||||
@@ -412,16 +414,14 @@ func (t *tOps) newIterator(f *tFile, slice *util.Range, ro *opt.ReadOptions) ite
|
||||
// no one use the the table.
|
||||
func (t *tOps) remove(f *tFile) {
|
||||
num := f.file.Num()
|
||||
t.cacheNS.Delete(num, func(exist, pending bool) {
|
||||
if !pending {
|
||||
if err := f.file.Remove(); err != nil {
|
||||
t.s.logf("table@remove removing @%d %q", num, err)
|
||||
} else {
|
||||
t.s.logf("table@remove removed @%d", num)
|
||||
}
|
||||
if bc := t.s.o.GetBlockCache(); bc != nil {
|
||||
bc.ZapNamespace(num)
|
||||
}
|
||||
t.cache.Delete(0, num, func() {
|
||||
if err := f.file.Remove(); err != nil {
|
||||
t.s.logf("table@remove removing @%d %q", num, err)
|
||||
} else {
|
||||
t.s.logf("table@remove removed @%d", num)
|
||||
}
|
||||
if t.bcache != nil {
|
||||
t.bcache.EvictNS(num)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -429,18 +429,34 @@ func (t *tOps) remove(f *tFile) {
|
||||
// Closes the table ops instance. It will close all tables,
|
||||
// regadless still used or not.
|
||||
func (t *tOps) close() {
|
||||
t.cache.Zap()
|
||||
t.bpool.Close()
|
||||
t.cache.Close()
|
||||
if t.bcache != nil {
|
||||
t.bcache.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// Creates new initialized table ops instance.
|
||||
func newTableOps(s *session, cacheCap int) *tOps {
|
||||
c := cache.NewLRUCache(cacheCap)
|
||||
func newTableOps(s *session) *tOps {
|
||||
var (
|
||||
cacher cache.Cacher
|
||||
bcache *cache.Cache
|
||||
)
|
||||
if s.o.GetOpenFilesCacheCapacity() > 0 {
|
||||
cacher = cache.NewLRU(s.o.GetOpenFilesCacheCapacity())
|
||||
}
|
||||
if !s.o.DisableBlockCache {
|
||||
var bcacher cache.Cacher
|
||||
if s.o.GetBlockCacheCapacity() > 0 {
|
||||
bcacher = cache.NewLRU(s.o.GetBlockCacheCapacity())
|
||||
}
|
||||
bcache = cache.NewCache(bcacher)
|
||||
}
|
||||
return &tOps{
|
||||
s: s,
|
||||
cache: c,
|
||||
cacheNS: c.GetNamespace(0),
|
||||
bpool: util.NewBufferPool(s.o.GetBlockSize() + 5),
|
||||
s: s,
|
||||
cache: cache.NewCache(cacher),
|
||||
bcache: bcache,
|
||||
bpool: util.NewBufferPool(s.o.GetBlockSize() + 5),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
60
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go
generated
vendored
60
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go
generated
vendored
@@ -509,7 +509,7 @@ type Reader struct {
|
||||
mu sync.RWMutex
|
||||
fi *storage.FileInfo
|
||||
reader io.ReaderAt
|
||||
cache cache.Namespace
|
||||
cache *cache.CacheGetter
|
||||
err error
|
||||
bpool *util.BufferPool
|
||||
// Options
|
||||
@@ -613,18 +613,22 @@ func (r *Reader) readBlock(bh blockHandle, verifyChecksum bool) (*block, error)
|
||||
|
||||
func (r *Reader) readBlockCached(bh blockHandle, verifyChecksum, fillCache bool) (*block, util.Releaser, error) {
|
||||
if r.cache != nil {
|
||||
var err error
|
||||
ch := r.cache.Get(bh.offset, func() (charge int, value interface{}) {
|
||||
if !fillCache {
|
||||
return 0, nil
|
||||
}
|
||||
var b *block
|
||||
b, err = r.readBlock(bh, verifyChecksum)
|
||||
if err != nil {
|
||||
return 0, nil
|
||||
}
|
||||
return cap(b.data), b
|
||||
})
|
||||
var (
|
||||
err error
|
||||
ch *cache.Handle
|
||||
)
|
||||
if fillCache {
|
||||
ch = r.cache.Get(bh.offset, func() (size int, value cache.Value) {
|
||||
var b *block
|
||||
b, err = r.readBlock(bh, verifyChecksum)
|
||||
if err != nil {
|
||||
return 0, nil
|
||||
}
|
||||
return cap(b.data), b
|
||||
})
|
||||
} else {
|
||||
ch = r.cache.Get(bh.offset, nil)
|
||||
}
|
||||
if ch != nil {
|
||||
b, ok := ch.Value().(*block)
|
||||
if !ok {
|
||||
@@ -667,18 +671,22 @@ func (r *Reader) readFilterBlock(bh blockHandle) (*filterBlock, error) {
|
||||
|
||||
func (r *Reader) readFilterBlockCached(bh blockHandle, fillCache bool) (*filterBlock, util.Releaser, error) {
|
||||
if r.cache != nil {
|
||||
var err error
|
||||
ch := r.cache.Get(bh.offset, func() (charge int, value interface{}) {
|
||||
if !fillCache {
|
||||
return 0, nil
|
||||
}
|
||||
var b *filterBlock
|
||||
b, err = r.readFilterBlock(bh)
|
||||
if err != nil {
|
||||
return 0, nil
|
||||
}
|
||||
return cap(b.data), b
|
||||
})
|
||||
var (
|
||||
err error
|
||||
ch *cache.Handle
|
||||
)
|
||||
if fillCache {
|
||||
ch = r.cache.Get(bh.offset, func() (size int, value cache.Value) {
|
||||
var b *filterBlock
|
||||
b, err = r.readFilterBlock(bh)
|
||||
if err != nil {
|
||||
return 0, nil
|
||||
}
|
||||
return cap(b.data), b
|
||||
})
|
||||
} else {
|
||||
ch = r.cache.Get(bh.offset, nil)
|
||||
}
|
||||
if ch != nil {
|
||||
b, ok := ch.Value().(*filterBlock)
|
||||
if !ok {
|
||||
@@ -980,7 +988,7 @@ func (r *Reader) Release() {
|
||||
// The fi, cache and bpool is optional and can be nil.
|
||||
//
|
||||
// The returned table reader instance is goroutine-safe.
|
||||
func NewReader(f io.ReaderAt, size int64, fi *storage.FileInfo, cache cache.Namespace, bpool *util.BufferPool, o *opt.Options) (*Reader, error) {
|
||||
func NewReader(f io.ReaderAt, size int64, fi *storage.FileInfo, cache *cache.CacheGetter, bpool *util.BufferPool, o *opt.Options) (*Reader, error) {
|
||||
if f == nil {
|
||||
return nil, errors.New("leveldb/table: nil file")
|
||||
}
|
||||
|
||||
10
Godeps/_workspace/src/golang.org/x/text/transform/transform_test.go
generated
vendored
10
Godeps/_workspace/src/golang.org/x/text/transform/transform_test.go
generated
vendored
@@ -16,7 +16,7 @@ import (
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type lowerCaseASCII struct{ transform.NopResetter }
|
||||
type lowerCaseASCII struct{ NopResetter }
|
||||
|
||||
func (lowerCaseASCII) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
n := len(src)
|
||||
@@ -34,7 +34,7 @@ func (lowerCaseASCII) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, er
|
||||
|
||||
var errYouMentionedX = errors.New("you mentioned X")
|
||||
|
||||
type dontMentionX struct{ transform.NopResetter }
|
||||
type dontMentionX struct{ NopResetter }
|
||||
|
||||
func (dontMentionX) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
n := len(src)
|
||||
@@ -52,7 +52,7 @@ func (dontMentionX) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err
|
||||
|
||||
// doublerAtEOF is a strange Transformer that transforms "this" to "tthhiiss",
|
||||
// but only if atEOF is true.
|
||||
type doublerAtEOF struct{ transform.NopResetter }
|
||||
type doublerAtEOF struct{ NopResetter }
|
||||
|
||||
func (doublerAtEOF) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
if !atEOF {
|
||||
@@ -71,7 +71,7 @@ func (doublerAtEOF) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err
|
||||
// rleDecode and rleEncode implement a toy run-length encoding: "aabbbbbbbbbb"
|
||||
// is encoded as "2a10b". The decoding is assumed to not contain any numbers.
|
||||
|
||||
type rleDecode struct{ transform.NopResetter }
|
||||
type rleDecode struct{ NopResetter }
|
||||
|
||||
func (rleDecode) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
loop:
|
||||
@@ -104,7 +104,7 @@ loop:
|
||||
}
|
||||
|
||||
type rleEncode struct {
|
||||
transform.NopResetter
|
||||
NopResetter
|
||||
|
||||
// allowStutter means that "xxxxxxxx" can be encoded as "5x3x"
|
||||
// instead of always as "8x".
|
||||
|
||||
50
NICKS
Normal file
50
NICKS
Normal file
@@ -0,0 +1,50 @@
|
||||
# This file maps email addresses used in commits to nicks used the changelog.
|
||||
|
||||
AudriusButkevicius <audrius.butkevicius@gmail.com>
|
||||
Cathryne <cathryne.linenweaver@gmail.com> <Cathryne@users.noreply.github.com>
|
||||
KayoticSully <kayoticsully@gmail.com>
|
||||
Nutomic <me@nutomic.com>
|
||||
Rewt0r <rewt0r@gmx.com> <Rewt0r@users.noreply.github.com>
|
||||
Vilbrekin <vilbrekin@gmail.com>
|
||||
Zillode <zillode@zillode.be>
|
||||
alex2108 <register-github@alex-graf.de>
|
||||
andrew-d <andrew@du.nham.ca>
|
||||
asdil12 <dominik@heidler.eu>
|
||||
bencurthoys <ben@bencurthoys.com>
|
||||
bigbear2nd <bigbear2nd@gmail.com>
|
||||
brendanlong <self@brendanlong.com>
|
||||
bsidhom <bsidhom@gmail.com>
|
||||
calmh <jakob@nym.se>
|
||||
cdata <chris@scriptolo.gy>
|
||||
ceh <emil@hessman.se>
|
||||
cqcallaw <enlightened.despot@gmail.com>
|
||||
facastagnini <federico.castagnini@gmail.com>
|
||||
filoozoom <philippe@schommers.be>
|
||||
frioux <frew@afoolishmanifesto.com> <frioux@gmail.com>
|
||||
gillisig <gilli@vx.is>
|
||||
jedie <github.com@jensdiemer.de> <git@jensdiemer.de>
|
||||
jpjp <jamespatterson@operamail.com> <jpjp@users.noreply.github.com>
|
||||
kamadak <kamada@nanohz.org>
|
||||
kilburn <kilburn@la3.org>
|
||||
kozec <kozec@kozec.com>
|
||||
krozycki <rozycki.karol@gmail.com>
|
||||
marcindziadus <dziadus.marcin@gmail.com>
|
||||
marclaporte <marc@marclaporte.com>
|
||||
moshen <moshen.colin@gmail.com>
|
||||
mvdan <mvdan@mvdan.cc>
|
||||
pascalj <github@pascalj.com> <mail@pascal-jungblut.com>
|
||||
peterhoeg <peter@speartail.com>
|
||||
philips <brandon@ifup.org>
|
||||
piobpl <piotrb10@gmail.com>
|
||||
pluby <phill.luby@newredo.com>
|
||||
pyfisch <pyfisch@gmail.com>
|
||||
rumpelsepp <stefan@sevenbyte.org>
|
||||
qbit <qbit@deftly.net>
|
||||
sciurius <jvromans@squirrel.nl>
|
||||
seehuhn <voss@seehuhn.de>
|
||||
snnd <dw@risu.io>
|
||||
timabell <tim@timwise.co.uk>
|
||||
tnn2 <tnn@nygren.pp.se>
|
||||
tojrobinson <tully@tojr.org>
|
||||
uok <ueomkail@gmail.com> <uok@users.noreply.github.com>
|
||||
veeti <veeti.paananen@rojekti.fi>
|
||||
31
README.md
31
README.md
@@ -3,7 +3,7 @@ syncthing
|
||||
|
||||
[](http://build.syncthing.net/job/syncthing/lastBuild/)
|
||||
[](http://godoc.org/github.com/syncthing/syncthing)
|
||||
[](http://opensource.org/licenses/GPL-3.0)
|
||||
[](https://www.mozilla.org/MPL/2.0/)
|
||||
|
||||
This is the `syncthing` project. The following are the project goals:
|
||||
|
||||
@@ -11,7 +11,7 @@ This is the `syncthing` project. The following are the project goals:
|
||||
collaborating devices. The protocol should be well defined, unambiguous,
|
||||
easily understood, free to use, efficient, secure and language neutral.
|
||||
This is the [Block Exchange
|
||||
Protocol](https://github.com/syncthing/syncthing/blob/master/protocol/PROTOCOL.md).
|
||||
Protocol](https://github.com/syncthing/specs/blob/master/BEPv1.md).
|
||||
|
||||
2. Provide the reference implementation to demonstrate the usability of
|
||||
said protocol. This is the `syncthing` utility. It is the hope that
|
||||
@@ -25,7 +25,8 @@ for incompatible changes.
|
||||
Getting Started
|
||||
---------------
|
||||
|
||||
Take a look at the [getting started guide](http://discourse.syncthing.net/t/46).
|
||||
Take a look at the [getting started
|
||||
guide](https://github.com/syncthing/syncthing/wiki/Getting-Started).
|
||||
|
||||
There are a few examples for keeping syncthing running in the background
|
||||
on your system in [the etc directory](https://github.com/syncthing/syncthing/blob/master/etc).
|
||||
@@ -37,31 +38,23 @@ Building
|
||||
--------
|
||||
|
||||
Building Syncthing from source is easy, and there's a
|
||||
[guide](http://discourse.syncthing.net/t/44)
|
||||
[guide](https://github.com/syncthing/syncthing/wiki/Building).
|
||||
that describes it for both Unix and Windows.
|
||||
|
||||
Signed Releases
|
||||
---------------
|
||||
|
||||
As of v0.7.0 and onwards, git tags and release binaries are GPG signed with
|
||||
the key BCE524C7 (http://nym.se/gpg.txt). For release binaries, MD5 and
|
||||
SHA1 checksums are calculated and signed, available in the
|
||||
md5sum.txt.asc and sha1sum.txt.asc files.
|
||||
As of v0.10.15 and onwards, git tags and release binaries are GPG signed
|
||||
with the key D26E6ED000654A3E (see http://syncthing.net/security.html).
|
||||
For release binaries, MD5 and SHA1 checksums are calculated and signed,
|
||||
available in the md5sum.txt.asc and sha1sum.txt.asc files.
|
||||
|
||||
Documentation
|
||||
=============
|
||||
|
||||
The [syncthing
|
||||
documentation](http://discourse.syncthing.net/category/documentation) is
|
||||
on the discourse site.
|
||||
|
||||
License
|
||||
=======
|
||||
|
||||
All documentation and protocol specifications are licensed
|
||||
under the [Creative Commons Attribution 4.0 International
|
||||
License](http://creativecommons.org/licenses/by/4.0/).
|
||||
documentation](https://github.com/syncthing/syncthing/wiki/) is on the
|
||||
Github wiki.
|
||||
|
||||
All code is licensed under the
|
||||
[GPL](https://github.com/syncthing/syncthing/blob/master/LICENSE), v3 or
|
||||
later.
|
||||
[MPLv2](https://github.com/syncthing/syncthing/blob/master/LICENSE).
|
||||
|
||||
BIN
assets/logo-horizontal.svg
Normal file
BIN
assets/logo-horizontal.svg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 3.7 KiB |
BIN
assets/logo-only.svg
Normal file
BIN
assets/logo-only.svg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 1.8 KiB |
BIN
assets/logo-vertical.svg
Normal file
BIN
assets/logo-vertical.svg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 3.8 KiB |
133
build.go
133
build.go
@@ -1,17 +1,8 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation, either version 3 of the License, or (at your option)
|
||||
// any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// +build ignore
|
||||
|
||||
@@ -22,6 +13,7 @@ import (
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/md5"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -72,7 +64,7 @@ func main() {
|
||||
flag.Parse()
|
||||
|
||||
switch goarch {
|
||||
case "386", "amd64", "arm", "armv5", "armv6", "armv7":
|
||||
case "386", "amd64", "arm":
|
||||
break
|
||||
default:
|
||||
log.Printf("Unknown goarch %q; proceed with caution!", goarch)
|
||||
@@ -190,7 +182,12 @@ func install(pkg string, tags []string) {
|
||||
}
|
||||
|
||||
func build(pkg string, tags []string) {
|
||||
rmr("syncthing", "syncthing.exe")
|
||||
binary := "syncthing"
|
||||
if goos == "windows" {
|
||||
binary += ".exe"
|
||||
}
|
||||
|
||||
rmr(binary, binary+".md5")
|
||||
args := []string{"build", "-ldflags", ldflags()}
|
||||
if len(tags) > 0 {
|
||||
args = append(args, "-tags", strings.Join(tags, ","))
|
||||
@@ -201,6 +198,13 @@ func build(pkg string, tags []string) {
|
||||
args = append(args, pkg)
|
||||
setBuildEnv()
|
||||
runPrint("go", args...)
|
||||
|
||||
// Create an md5 checksum of the binary, to be included in the archive for
|
||||
// automatic upgrades.
|
||||
err := md5File(binary)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func buildTar() {
|
||||
@@ -213,14 +217,20 @@ func buildTar() {
|
||||
build("./cmd/syncthing", tags)
|
||||
filename := name + ".tar.gz"
|
||||
files := []archiveFile{
|
||||
{"README.md", name + "/README.txt"},
|
||||
{"LICENSE", name + "/LICENSE.txt"},
|
||||
{"AUTHORS", name + "/AUTHORS.txt"},
|
||||
{"syncthing", name + "/syncthing"},
|
||||
{src: "README.md", dst: name + "/README.txt"},
|
||||
{src: "LICENSE", dst: name + "/LICENSE.txt"},
|
||||
{src: "AUTHORS", dst: name + "/AUTHORS.txt"},
|
||||
{src: "syncthing", dst: name + "/syncthing"},
|
||||
{src: "syncthing.md5", dst: name + "/syncthing.md5"},
|
||||
}
|
||||
|
||||
for _, file := range listFiles("etc") {
|
||||
files = append(files, archiveFile{file, name + "/" + file})
|
||||
files = append(files, archiveFile{src: file, dst: name + "/" + file})
|
||||
}
|
||||
for _, file := range listFiles("extra") {
|
||||
files = append(files, archiveFile{src: file, dst: name + "/" + filepath.Base(file)})
|
||||
}
|
||||
|
||||
tarGz(filename, files)
|
||||
log.Println(filename)
|
||||
}
|
||||
@@ -235,11 +245,17 @@ func buildZip() {
|
||||
build("./cmd/syncthing", tags)
|
||||
filename := name + ".zip"
|
||||
files := []archiveFile{
|
||||
{"README.md", name + "/README.txt"},
|
||||
{"LICENSE", name + "/LICENSE.txt"},
|
||||
{"AUTHORS", name + "/AUTHORS.txt"},
|
||||
{"syncthing.exe", name + "/syncthing.exe"},
|
||||
{src: "README.md", dst: name + "/README.txt"},
|
||||
{src: "LICENSE", dst: name + "/LICENSE.txt"},
|
||||
{src: "AUTHORS", dst: name + "/AUTHORS.txt"},
|
||||
{src: "syncthing.exe", dst: name + "/syncthing.exe"},
|
||||
{src: "syncthing.exe.md5", dst: name + "/syncthing.exe.md5"},
|
||||
}
|
||||
|
||||
for _, file := range listFiles("extra") {
|
||||
files = append(files, archiveFile{src: file, dst: name + "/" + filepath.Base(file)})
|
||||
}
|
||||
|
||||
zipFile(filename, files)
|
||||
log.Println(filename)
|
||||
}
|
||||
@@ -260,15 +276,7 @@ func listFiles(dir string) []string {
|
||||
|
||||
func setBuildEnv() {
|
||||
os.Setenv("GOOS", goos)
|
||||
if strings.HasPrefix(goarch, "armv") {
|
||||
os.Setenv("GOARCH", "arm")
|
||||
os.Setenv("GOARM", goarch[4:])
|
||||
} else {
|
||||
os.Setenv("GOARCH", goarch)
|
||||
}
|
||||
if goarch == "386" {
|
||||
os.Setenv("GO386", "387")
|
||||
}
|
||||
os.Setenv("GOARCH", goarch)
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
log.Println("Warning: can't determine current dir:", err)
|
||||
@@ -284,7 +292,7 @@ func assets() {
|
||||
}
|
||||
|
||||
func xdr() {
|
||||
runPrint("go", "generate", "./internal/discover", "./internal/files", "./internal/protocol")
|
||||
runPrint("go", "generate", "./internal/discover", "./internal/db")
|
||||
}
|
||||
|
||||
func translate() {
|
||||
@@ -333,16 +341,43 @@ func rmr(paths ...string) {
|
||||
}
|
||||
}
|
||||
|
||||
func getVersion() string {
|
||||
func getReleaseVersion() (string, error) {
|
||||
fd, err := os.Open("RELEASE")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
bs, err := ioutil.ReadAll(fd)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(bytes.TrimSpace(bs)), nil
|
||||
}
|
||||
|
||||
func getGitVersion() (string, error) {
|
||||
v, err := runError("git", "describe", "--always", "--dirty")
|
||||
if err != nil {
|
||||
return "unknown-dev"
|
||||
return "", err
|
||||
}
|
||||
v = versionRe.ReplaceAllFunc(v, func(s []byte) []byte {
|
||||
s[0] = '+'
|
||||
return s
|
||||
})
|
||||
return string(v)
|
||||
return string(v), nil
|
||||
}
|
||||
|
||||
func getVersion() string {
|
||||
// First try for a RELEASE file,
|
||||
if ver, err := getReleaseVersion(); err == nil {
|
||||
return ver
|
||||
}
|
||||
// ... then see if we have a Git tag.
|
||||
if ver, err := getGitVersion(); err == nil {
|
||||
return ver
|
||||
}
|
||||
// This seems to be a dev build.
|
||||
return "unknown-dev"
|
||||
}
|
||||
|
||||
func buildStamp() int64 {
|
||||
@@ -554,3 +589,29 @@ func zipFile(out string, files []archiveFile) {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func md5File(file string) error {
|
||||
fd, err := os.Open(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
h := md5.New()
|
||||
_, err = io.Copy(h, fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
out, err := os.Create(file + ".md5")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = fmt.Fprintf(out, "%x\n", h.Sum(nil))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return out.Close()
|
||||
}
|
||||
|
||||
63
build.sh
63
build.sh
@@ -2,7 +2,7 @@
|
||||
set -euo pipefail
|
||||
IFS=$'\n\t'
|
||||
|
||||
DOCKERIMGV=1.4-1
|
||||
STTRACE=${STTRACE:-}
|
||||
|
||||
case "${1:-default}" in
|
||||
default)
|
||||
@@ -50,22 +50,29 @@ case "${1:-default}" in
|
||||
;;
|
||||
|
||||
all)
|
||||
go run build.go -goos linux -goarch amd64 tar
|
||||
go run build.go -goos linux -goarch 386 tar
|
||||
go run build.go -goos linux -goarch armv5 tar
|
||||
go run build.go -goos linux -goarch armv6 tar
|
||||
go run build.go -goos linux -goarch armv7 tar
|
||||
|
||||
go run build.go -goos freebsd -goarch amd64 tar
|
||||
go run build.go -goos freebsd -goarch 386 tar
|
||||
|
||||
go run build.go -goos openbsd -goarch amd64 tar
|
||||
go run build.go -goos openbsd -goarch 386 tar
|
||||
|
||||
go run build.go -goos darwin -goarch amd64 tar
|
||||
go run build.go -goos darwin -goarch 386 tar
|
||||
|
||||
go run build.go -goos dragonfly -goarch 386 tar
|
||||
go run build.go -goos dragonfly -goarch amd64 tar
|
||||
|
||||
go run build.go -goos freebsd -goarch 386 tar
|
||||
go run build.go -goos freebsd -goarch amd64 tar
|
||||
|
||||
go run build.go -goos linux -goarch 386 tar
|
||||
go run build.go -goos linux -goarch amd64 tar
|
||||
go run build.go -goos linux -goarch arm tar
|
||||
|
||||
go run build.go -goos netbsd -goarch 386 tar
|
||||
go run build.go -goos netbsd -goarch amd64 tar
|
||||
|
||||
go run build.go -goos openbsd -goarch 386 tar
|
||||
go run build.go -goos openbsd -goarch amd64 tar
|
||||
|
||||
go run build.go -goos solaris -goarch amd64 tar
|
||||
|
||||
go run build.go -goos windows -goarch amd64 zip
|
||||
go run build.go -goos windows -goarch 386 zip
|
||||
go run build.go -goos windows -goarch amd64 zip
|
||||
;;
|
||||
|
||||
setup)
|
||||
@@ -104,31 +111,31 @@ case "${1:-default}" in
|
||||
fi
|
||||
;;
|
||||
|
||||
docker-init)
|
||||
docker build -q -t syncthing/build:$DOCKERIMGV docker >/dev/null
|
||||
;;
|
||||
|
||||
docker-all)
|
||||
docker run --rm -h syncthing-builder -u $(id -u) -t \
|
||||
-v $(pwd):/go/src/github.com/syncthing/syncthing \
|
||||
-w /go/src/github.com/syncthing/syncthing \
|
||||
syncthing/build:$DOCKERIMGV \
|
||||
sh -c './build.sh clean && ./build.sh all && STTRACE=all ./build.sh test-cov'
|
||||
-e "STTRACE=$STTRACE" \
|
||||
syncthing/build:latest \
|
||||
sh -c './build.sh clean \
|
||||
&& go vet ./cmd/... ./internal/... \
|
||||
&& ( golint ./cmd/... ; golint ./internal/... ) | egrep -v "comment on exported|should have comment" \
|
||||
&& ./build.sh all \
|
||||
&& STTRACE=all ./build.sh test-cov'
|
||||
;;
|
||||
|
||||
docker-test)
|
||||
docker run --rm -h syncthing-builder -u $(id -u) -t \
|
||||
-v $(pwd):/tmp/syncthing \
|
||||
syncthing/build:$DOCKERIMGV \
|
||||
sh -euxc 'mkdir -p /go/src/github.com/syncthing \
|
||||
&& cd /go/src/github.com/syncthing \
|
||||
&& cp -r /tmp/syncthing syncthing \
|
||||
&& cd syncthing \
|
||||
&& ./build.sh clean \
|
||||
-v $(pwd):/go/src/github.com/syncthing/syncthing \
|
||||
-w /go/src/github.com/syncthing/syncthing \
|
||||
-e "STTRACE=$STTRACE" \
|
||||
syncthing/build:latest \
|
||||
sh -euxc './build.sh clean \
|
||||
&& go run build.go -race \
|
||||
&& export GOPATH=$(pwd)/Godeps/_workspace:$GOPATH \
|
||||
&& cd test \
|
||||
&& go test -tags integration -v -timeout 60m -short'
|
||||
&& go test -tags integration -v -timeout 60m -short \
|
||||
&& git clean -fxd .'
|
||||
;;
|
||||
|
||||
*)
|
||||
|
||||
11
changelog.sh
Executable file
11
changelog.sh
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
|
||||
since="$1"
|
||||
if [[ -z $since ]] ; then
|
||||
since="$(git describe --abbrev=0 HEAD^).."
|
||||
fi
|
||||
|
||||
git log --reverse --pretty=format:'* %s, @%aN)' "$since" | egrep 'fixes #\d|ref #\d' | sed 's/)[,. ]*,/,/' | sed 's/fixes #/#/g' | sed 's/ref #/#/g'
|
||||
|
||||
git diff "$since" -- AUTHORS
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
missing-authors() {
|
||||
for email in $(git log --format=%ae master | sort | uniq) ; do
|
||||
for email in $(git log --format=%ae HEAD | sort | uniq) ; do
|
||||
grep -q "$email" AUTHORS || echo $email
|
||||
done
|
||||
}
|
||||
@@ -13,7 +13,11 @@ no-docs-typos() {
|
||||
grep -v f2459ef3319b2f060dbcdacd0c35a1788a94b8bd |\
|
||||
grep -v b61f418bf2d1f7d5a9d7088a20a2a448e5e66801 |\
|
||||
grep -v f0621207e3953711f9ab86d99724f1d0faac45b1 |\
|
||||
grep -v f1120d7aa936c0658429edef0037792520b46334
|
||||
grep -v f1120d7aa936c0658429edef0037792520b46334 |\
|
||||
grep -v a9339d0627fff439879d157c75077f02c9fac61b |\
|
||||
grep -v 254c63763a3ad42fd82259f1767db526cff94a14 |\
|
||||
grep -v 4b76ec40c07078beaa2c5e250ed7d9bd6276a718 |\
|
||||
grep -v ffc39dfbcb34eacc3ea12327a02b6e7741a2c207
|
||||
}
|
||||
|
||||
print-missing-authors() {
|
||||
@@ -23,22 +27,24 @@ print-missing-authors() {
|
||||
}
|
||||
|
||||
print-missing-copyright() {
|
||||
find . -name \*.go | xargs grep -L 'Copyright (C)' | grep -v Godeps
|
||||
find . -name \*.go | xargs egrep -L 'Copyright \(C\)|automatically generated' | grep -v Godeps | grep -v internal/auto/
|
||||
}
|
||||
|
||||
print-line-blame() {
|
||||
for f in $(find . -name \*.go | grep -v Godep) gui/app.js gui/index.html ; do
|
||||
git blame --line-porcelain $f | grep author-mail
|
||||
done | sort | uniq -c | sort -n
|
||||
}
|
||||
echo Author emails missing in AUTHORS file:
|
||||
print-missing-authors
|
||||
echo
|
||||
authors=$(print-missing-authors)
|
||||
if [[ ! -z $authors ]] ; then
|
||||
echo '***'
|
||||
echo Author emails not in AUTHORS:
|
||||
echo $authors
|
||||
echo '***'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo Files missing copyright notice:
|
||||
print-missing-copyright
|
||||
echo
|
||||
|
||||
echo Blame lines per author:
|
||||
print-line-blame
|
||||
copy=$(print-missing-copyright)
|
||||
if [[ ! -z $copy ]] ; then
|
||||
echo ***
|
||||
echo Files missing copyright notice:
|
||||
echo $copy
|
||||
echo ***
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
@@ -1,17 +1,8 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation, either version 3 of the License, or (at your option)
|
||||
// any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// +build ignore
|
||||
|
||||
@@ -45,7 +36,7 @@ func Assets() map[string][]byte {
|
||||
var gr *gzip.Reader
|
||||
{{range $asset := .assets}}
|
||||
bs, _ = base64.StdEncoding.DecodeString("{{$asset.Data}}")
|
||||
gr, _ = gzip.NewReader(bytes.NewBuffer(bs))
|
||||
gr, _ = gzip.NewReader(bytes.NewReader(bs))
|
||||
bs, _ = ioutil.ReadAll(gr)
|
||||
assets["{{$asset.Name}}"] = bs
|
||||
{{end}}
|
||||
|
||||
168
cmd/stcompdirs/main.go
Normal file
168
cmd/stcompdirs/main.go
Normal file
@@ -0,0 +1,168 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/symlinks"
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
log.Println(compareDirectories(flag.Args()...))
|
||||
}
|
||||
|
||||
// Compare a number of directories. Returns nil if the contents are identical,
|
||||
// otherwise an error describing the first found difference.
|
||||
func compareDirectories(dirs ...string) error {
|
||||
chans := make([]chan fileInfo, len(dirs))
|
||||
for i := range chans {
|
||||
chans[i] = make(chan fileInfo)
|
||||
}
|
||||
errcs := make([]chan error, len(dirs))
|
||||
abort := make(chan struct{})
|
||||
|
||||
for i := range dirs {
|
||||
errcs[i] = startWalker(dirs[i], chans[i], abort)
|
||||
}
|
||||
|
||||
res := make([]fileInfo, len(dirs))
|
||||
for {
|
||||
numDone := 0
|
||||
for i := range chans {
|
||||
fi, ok := <-chans[i]
|
||||
if !ok {
|
||||
err, hasError := <-errcs[i]
|
||||
if hasError {
|
||||
close(abort)
|
||||
return err
|
||||
}
|
||||
numDone++
|
||||
}
|
||||
res[i] = fi
|
||||
}
|
||||
|
||||
for i := 1; i < len(res); i++ {
|
||||
if res[i] != res[0] {
|
||||
close(abort)
|
||||
if res[i].name < res[0].name {
|
||||
return fmt.Errorf("%s missing %v (present in %s)", dirs[0], res[i], dirs[i])
|
||||
} else if res[i].name > res[0].name {
|
||||
return fmt.Errorf("%s missing %v (present in %s)", dirs[i], res[0], dirs[0])
|
||||
}
|
||||
return fmt.Errorf("Mismatch; %v (%s) != %v (%s)", res[i], dirs[i], res[0], dirs[0])
|
||||
}
|
||||
}
|
||||
|
||||
if numDone == len(dirs) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type fileInfo struct {
|
||||
name string
|
||||
mode os.FileMode
|
||||
mod int64
|
||||
hash [16]byte
|
||||
}
|
||||
|
||||
func (f fileInfo) String() string {
|
||||
return fmt.Sprintf("%s %04o %d %x", f.name, f.mode, f.mod, f.hash)
|
||||
}
|
||||
|
||||
func startWalker(dir string, res chan<- fileInfo, abort <-chan struct{}) chan error {
|
||||
walker := func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rn, _ := filepath.Rel(dir, path)
|
||||
if rn == "." || rn == ".stfolder" {
|
||||
return nil
|
||||
}
|
||||
if rn == ".stversions" {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
|
||||
var f fileInfo
|
||||
if info.Mode()&os.ModeSymlink != 0 {
|
||||
f = fileInfo{
|
||||
name: rn,
|
||||
mode: os.ModeSymlink,
|
||||
}
|
||||
|
||||
tgt, _, err := symlinks.Read(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h := md5.New()
|
||||
h.Write([]byte(tgt))
|
||||
hash := h.Sum(nil)
|
||||
|
||||
copy(f.hash[:], hash)
|
||||
} else if info.IsDir() {
|
||||
f = fileInfo{
|
||||
name: rn,
|
||||
mode: info.Mode(),
|
||||
// hash and modtime zero for directories
|
||||
}
|
||||
} else {
|
||||
f = fileInfo{
|
||||
name: rn,
|
||||
mode: info.Mode(),
|
||||
mod: info.ModTime().Unix(),
|
||||
}
|
||||
sum, err := md5file(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.hash = sum
|
||||
}
|
||||
|
||||
select {
|
||||
case res <- f:
|
||||
return nil
|
||||
case <-abort:
|
||||
return errors.New("abort")
|
||||
}
|
||||
}
|
||||
|
||||
errc := make(chan error)
|
||||
go func() {
|
||||
err := filepath.Walk(dir, walker)
|
||||
close(res)
|
||||
if err != nil {
|
||||
errc <- err
|
||||
}
|
||||
close(errc)
|
||||
}()
|
||||
return errc
|
||||
}
|
||||
|
||||
func md5file(fname string) (hash [16]byte, err error) {
|
||||
f, err := os.Open(fname)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
h := md5.New()
|
||||
io.Copy(h, f)
|
||||
hb := h.Sum(nil)
|
||||
copy(hash[:], hb)
|
||||
|
||||
return
|
||||
}
|
||||
@@ -1,17 +1,8 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation, either version 3 of the License, or (at your option)
|
||||
// any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
|
||||
@@ -1,17 +1,8 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation, either version 3 of the License, or (at your option)
|
||||
// any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
@@ -21,7 +12,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/protocol"
|
||||
"github.com/syncthing/protocol"
|
||||
"github.com/syncthing/syncthing/internal/scanner"
|
||||
)
|
||||
|
||||
|
||||
43
cmd/stfinddevice/main.go
Normal file
43
cmd/stfinddevice/main.go
Normal file
@@ -0,0 +1,43 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/syncthing/protocol"
|
||||
"github.com/syncthing/syncthing/internal/discover"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.SetFlags(0)
|
||||
log.SetOutput(os.Stdout)
|
||||
|
||||
var server string
|
||||
|
||||
flag.StringVar(&server, "server", "udp4://announce.syncthing.net:22026", "Announce server")
|
||||
flag.Parse()
|
||||
|
||||
if len(flag.Args()) != 1 || server == "" {
|
||||
log.Printf("Usage: %s [-server=\"udp4://announce.syncthing.net:22026\"] <device>", os.Args[0])
|
||||
os.Exit(64)
|
||||
}
|
||||
|
||||
id, err := protocol.DeviceIDFromString(flag.Args()[0])
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
discoverer := discover.NewDiscoverer(protocol.LocalDeviceID, nil)
|
||||
discoverer.StartGlobal([]string{server}, 1)
|
||||
for _, addr := range discoverer.Lookup(id) {
|
||||
log.Println(addr)
|
||||
}
|
||||
}
|
||||
@@ -1,17 +1,8 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation, either version 3 of the License, or (at your option)
|
||||
// any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
@@ -21,8 +12,8 @@ import (
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/files"
|
||||
"github.com/syncthing/syncthing/internal/protocol"
|
||||
"github.com/syncthing/protocol"
|
||||
"github.com/syncthing/syncthing/internal/db"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
)
|
||||
|
||||
@@ -34,17 +25,17 @@ func main() {
|
||||
device := flag.String("device", "", "Device ID (blank for global)")
|
||||
flag.Parse()
|
||||
|
||||
db, err := leveldb.OpenFile(flag.Arg(0), nil)
|
||||
ldb, err := leveldb.OpenFile(flag.Arg(0), nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fs := files.NewSet(*folder, db)
|
||||
fs := db.NewFileSet(*folder, ldb)
|
||||
|
||||
if *device == "" {
|
||||
log.Printf("*** Global index for folder %q", *folder)
|
||||
fs.WithGlobalTruncated(func(fi protocol.FileIntf) bool {
|
||||
f := fi.(protocol.FileInfoTruncated)
|
||||
fs.WithGlobalTruncated(func(fi db.FileIntf) bool {
|
||||
f := fi.(db.FileInfoTruncated)
|
||||
fmt.Println(f)
|
||||
fmt.Println("\t", fs.Availability(f.Name))
|
||||
return true
|
||||
@@ -55,8 +46,8 @@ func main() {
|
||||
log.Fatal(err)
|
||||
}
|
||||
log.Printf("*** Have index for folder %q device %q", *folder, n)
|
||||
fs.WithHaveTruncated(n, func(fi protocol.FileIntf) bool {
|
||||
f := fi.(protocol.FileInfoTruncated)
|
||||
fs.WithHaveTruncated(n, func(fi db.FileIntf) bool {
|
||||
f := fi.(db.FileInfoTruncated)
|
||||
fmt.Println(f)
|
||||
return true
|
||||
})
|
||||
|
||||
50
cmd/syncthing/blockprof.go
Normal file
50
cmd/syncthing/blockprof.go
Normal file
@@ -0,0 +1,50 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
if innerProcess && os.Getenv("STBLOCKPROFILE") != "" {
|
||||
profiler := pprof.Lookup("block")
|
||||
if profiler == nil {
|
||||
panic("Couldn't find block profiler")
|
||||
}
|
||||
l.Debugln("Starting block profiling")
|
||||
go saveBlockingProfiles(profiler)
|
||||
}
|
||||
}
|
||||
|
||||
func saveBlockingProfiles(profiler *pprof.Profile) {
|
||||
runtime.SetBlockProfileRate(1)
|
||||
|
||||
t0 := time.Now()
|
||||
for t := range time.NewTicker(20 * time.Second).C {
|
||||
startms := int(t.Sub(t0).Seconds() * 1000)
|
||||
|
||||
fd, err := os.Create(fmt.Sprintf("block-%05d-%07d.pprof", syscall.Getpid(), startms))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = profiler.WriteTo(fd, 0)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = fd.Close()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
295
cmd/syncthing/connections.go
Normal file
295
cmd/syncthing/connections.go
Normal file
@@ -0,0 +1,295 @@
|
||||
// Copyright (C) 2015 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/protocol"
|
||||
"github.com/syncthing/syncthing/internal/events"
|
||||
"github.com/syncthing/syncthing/internal/model"
|
||||
)
|
||||
|
||||
func listenConnect(myID protocol.DeviceID, m *model.Model, tlsCfg *tls.Config) {
|
||||
var conns = make(chan *tls.Conn)
|
||||
|
||||
// Listen
|
||||
for _, addr := range cfg.Options().ListenAddress {
|
||||
go listenTLS(conns, addr, tlsCfg)
|
||||
}
|
||||
|
||||
// Connect
|
||||
go dialTLS(m, conns, tlsCfg)
|
||||
|
||||
next:
|
||||
for conn := range conns {
|
||||
cs := conn.ConnectionState()
|
||||
|
||||
// We should have negotiated the next level protocol "bep/1.0" as part
|
||||
// of the TLS handshake. Unfortunately this can't be a hard error,
|
||||
// because there are implementations out there that don't support
|
||||
// protocol negotiation (iOS for one...).
|
||||
if !cs.NegotiatedProtocolIsMutual || cs.NegotiatedProtocol != bepProtocolName {
|
||||
l.Infof("Peer %s did not negotiate bep/1.0", conn.RemoteAddr())
|
||||
}
|
||||
|
||||
// We should have received exactly one certificate from the other
|
||||
// side. If we didn't, they don't have a device ID and we drop the
|
||||
// connection.
|
||||
certs := cs.PeerCertificates
|
||||
if cl := len(certs); cl != 1 {
|
||||
l.Infof("Got peer certificate list of length %d != 1 from %s; protocol error", cl, conn.RemoteAddr())
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
remoteCert := certs[0]
|
||||
remoteID := protocol.NewDeviceID(remoteCert.Raw)
|
||||
|
||||
// The device ID should not be that of ourselves. It can happen
|
||||
// though, especially in the presense of NAT hairpinning, multiple
|
||||
// clients between the same NAT gateway, and global discovery.
|
||||
if remoteID == myID {
|
||||
l.Infof("Connected to myself (%s) - should not happen", remoteID)
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
// We should not already be connected to the other party. TODO: This
|
||||
// could use some better handling. If the old connection is dead but
|
||||
// hasn't timed out yet we may want to drop *that* connection and keep
|
||||
// this one. But in case we are two devices connecting to each other
|
||||
// in parallell we don't want to do that or we end up with no
|
||||
// connections still established...
|
||||
if m.ConnectedTo(remoteID) {
|
||||
l.Infof("Connected to already connected device (%s)", remoteID)
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
for deviceID, deviceCfg := range cfg.Devices() {
|
||||
if deviceID == remoteID {
|
||||
// Verify the name on the certificate. By default we set it to
|
||||
// "syncthing" when generating, but the user may have replaced
|
||||
// the certificate and used another name.
|
||||
certName := deviceCfg.CertName
|
||||
if certName == "" {
|
||||
certName = tlsDefaultCommonName
|
||||
}
|
||||
err := remoteCert.VerifyHostname(certName)
|
||||
if err != nil {
|
||||
// Incorrect certificate name is something the user most
|
||||
// likely wants to know about, since it's an advanced
|
||||
// config. Warn instead of Info.
|
||||
l.Warnf("Bad certificate from %s (%v): %v", remoteID, conn.RemoteAddr(), err)
|
||||
conn.Close()
|
||||
continue next
|
||||
}
|
||||
|
||||
// If rate limiting is set, and based on the address we should
|
||||
// limit the connection, then we wrap it in a limiter.
|
||||
|
||||
limit := shouldLimit(conn.RemoteAddr())
|
||||
|
||||
wr := io.Writer(conn)
|
||||
if limit && writeRateLimit != nil {
|
||||
wr = &limitedWriter{conn, writeRateLimit}
|
||||
}
|
||||
|
||||
rd := io.Reader(conn)
|
||||
if limit && readRateLimit != nil {
|
||||
rd = &limitedReader{conn, readRateLimit}
|
||||
}
|
||||
|
||||
name := fmt.Sprintf("%s-%s", conn.LocalAddr(), conn.RemoteAddr())
|
||||
protoConn := protocol.NewConnection(remoteID, rd, wr, m, name, deviceCfg.Compression)
|
||||
|
||||
l.Infof("Established secure connection to %s at %s", remoteID, name)
|
||||
if debugNet {
|
||||
l.Debugf("cipher suite: %04X in lan: %t", conn.ConnectionState().CipherSuite, !limit)
|
||||
}
|
||||
events.Default.Log(events.DeviceConnected, map[string]string{
|
||||
"id": remoteID.String(),
|
||||
"addr": conn.RemoteAddr().String(),
|
||||
})
|
||||
|
||||
m.AddConnection(conn, protoConn)
|
||||
continue next
|
||||
}
|
||||
}
|
||||
|
||||
if !cfg.IgnoredDevice(remoteID) {
|
||||
events.Default.Log(events.DeviceRejected, map[string]string{
|
||||
"device": remoteID.String(),
|
||||
"address": conn.RemoteAddr().String(),
|
||||
})
|
||||
l.Infof("Connection from %s with unknown device ID %s", conn.RemoteAddr(), remoteID)
|
||||
} else {
|
||||
l.Infof("Connection from %s with ignored device ID %s", conn.RemoteAddr(), remoteID)
|
||||
}
|
||||
|
||||
conn.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func listenTLS(conns chan *tls.Conn, addr string, tlsCfg *tls.Config) {
|
||||
if debugNet {
|
||||
l.Debugln("listening on", addr)
|
||||
}
|
||||
|
||||
tcaddr, err := net.ResolveTCPAddr("tcp", addr)
|
||||
if err != nil {
|
||||
l.Fatalln("listen (BEP):", err)
|
||||
}
|
||||
listener, err := net.ListenTCP("tcp", tcaddr)
|
||||
if err != nil {
|
||||
l.Fatalln("listen (BEP):", err)
|
||||
}
|
||||
|
||||
for {
|
||||
conn, err := listener.Accept()
|
||||
if err != nil {
|
||||
l.Warnln("Accepting connection:", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if debugNet {
|
||||
l.Debugln("connect from", conn.RemoteAddr())
|
||||
}
|
||||
|
||||
tcpConn := conn.(*net.TCPConn)
|
||||
setTCPOptions(tcpConn)
|
||||
|
||||
tc := tls.Server(conn, tlsCfg)
|
||||
err = tc.Handshake()
|
||||
if err != nil {
|
||||
l.Infoln("TLS handshake:", err)
|
||||
tc.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
conns <- tc
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func dialTLS(m *model.Model, conns chan *tls.Conn, tlsCfg *tls.Config) {
|
||||
delay := time.Second
|
||||
for {
|
||||
nextDevice:
|
||||
for deviceID, deviceCfg := range cfg.Devices() {
|
||||
if deviceID == myID {
|
||||
continue
|
||||
}
|
||||
|
||||
if m.ConnectedTo(deviceID) {
|
||||
continue
|
||||
}
|
||||
|
||||
var addrs []string
|
||||
for _, addr := range deviceCfg.Addresses {
|
||||
if addr == "dynamic" {
|
||||
if discoverer != nil {
|
||||
t := discoverer.Lookup(deviceID)
|
||||
if len(t) == 0 {
|
||||
continue
|
||||
}
|
||||
addrs = append(addrs, t...)
|
||||
}
|
||||
} else {
|
||||
addrs = append(addrs, addr)
|
||||
}
|
||||
}
|
||||
|
||||
for _, addr := range addrs {
|
||||
host, port, err := net.SplitHostPort(addr)
|
||||
if err != nil && strings.HasPrefix(err.Error(), "missing port") {
|
||||
// addr is on the form "1.2.3.4"
|
||||
addr = net.JoinHostPort(addr, "22000")
|
||||
} else if err == nil && port == "" {
|
||||
// addr is on the form "1.2.3.4:"
|
||||
addr = net.JoinHostPort(host, "22000")
|
||||
}
|
||||
if debugNet {
|
||||
l.Debugln("dial", deviceCfg.DeviceID, addr)
|
||||
}
|
||||
|
||||
raddr, err := net.ResolveTCPAddr("tcp", addr)
|
||||
if err != nil {
|
||||
if debugNet {
|
||||
l.Debugln(err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
conn, err := net.DialTCP("tcp", nil, raddr)
|
||||
if err != nil {
|
||||
if debugNet {
|
||||
l.Debugln(err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
setTCPOptions(conn)
|
||||
|
||||
tc := tls.Client(conn, tlsCfg)
|
||||
err = tc.Handshake()
|
||||
if err != nil {
|
||||
l.Infoln("TLS handshake:", err)
|
||||
tc.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
conns <- tc
|
||||
continue nextDevice
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(delay)
|
||||
delay *= 2
|
||||
if maxD := time.Duration(cfg.Options().ReconnectIntervalS) * time.Second; delay > maxD {
|
||||
delay = maxD
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func setTCPOptions(conn *net.TCPConn) {
|
||||
var err error
|
||||
if err = conn.SetLinger(0); err != nil {
|
||||
l.Infoln(err)
|
||||
}
|
||||
if err = conn.SetNoDelay(false); err != nil {
|
||||
l.Infoln(err)
|
||||
}
|
||||
if err = conn.SetKeepAlivePeriod(60 * time.Second); err != nil {
|
||||
l.Infoln(err)
|
||||
}
|
||||
if err = conn.SetKeepAlive(true); err != nil {
|
||||
l.Infoln(err)
|
||||
}
|
||||
}
|
||||
|
||||
func shouldLimit(addr net.Addr) bool {
|
||||
if cfg.Options().LimitBandwidthInLan {
|
||||
return true
|
||||
}
|
||||
|
||||
tcpaddr, ok := addr.(*net.TCPAddr)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
for _, lan := range lans {
|
||||
if lan.Contains(tcpaddr.IP) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return !tcpaddr.IP.IsLoopback()
|
||||
}
|
||||
@@ -1,17 +1,8 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation, either version 3 of the License, or (at your option)
|
||||
// any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
|
||||
@@ -1,17 +1,8 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation, either version 3 of the License, or (at your option)
|
||||
// any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
@@ -32,13 +23,14 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/calmh/logger"
|
||||
"github.com/syncthing/protocol"
|
||||
"github.com/syncthing/syncthing/internal/auto"
|
||||
"github.com/syncthing/syncthing/internal/config"
|
||||
"github.com/syncthing/syncthing/internal/db"
|
||||
"github.com/syncthing/syncthing/internal/discover"
|
||||
"github.com/syncthing/syncthing/internal/events"
|
||||
"github.com/syncthing/syncthing/internal/model"
|
||||
"github.com/syncthing/syncthing/internal/osutil"
|
||||
"github.com/syncthing/syncthing/internal/protocol"
|
||||
"github.com/syncthing/syncthing/internal/upgrade"
|
||||
"github.com/vitrun/qart/qr"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
@@ -70,7 +62,16 @@ func startGUI(cfg config.GUIConfiguration, assetDir string, m *model.Model) erro
|
||||
if err != nil {
|
||||
l.Infoln("Loading HTTPS certificate:", err)
|
||||
l.Infoln("Creating new HTTPS certificate")
|
||||
newCertificate(confDir, "https-")
|
||||
|
||||
// When generating the HTTPS certificate, use the system host name per
|
||||
// default. If that isn't available, use the "syncthing" default.
|
||||
var name string
|
||||
name, err = os.Hostname()
|
||||
if err != nil {
|
||||
name = tlsDefaultCommonName
|
||||
}
|
||||
|
||||
newCertificate(confDir, "https-", name)
|
||||
cert, err = loadCert(confDir, "https-")
|
||||
}
|
||||
if err != nil {
|
||||
@@ -78,7 +79,20 @@ func startGUI(cfg config.GUIConfiguration, assetDir string, m *model.Model) erro
|
||||
}
|
||||
tlsCfg := &tls.Config{
|
||||
Certificates: []tls.Certificate{cert},
|
||||
ServerName: "syncthing",
|
||||
MinVersion: tls.VersionTLS10, // No SSLv3
|
||||
CipherSuites: []uint16{
|
||||
// No RC4
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
||||
tls.TLS_RSA_WITH_AES_128_CBC_SHA,
|
||||
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
|
||||
tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
},
|
||||
}
|
||||
|
||||
rawListener, err := net.Listen("tcp", cfg.Address)
|
||||
@@ -107,7 +121,10 @@ func startGUI(cfg config.GUIConfiguration, assetDir string, m *model.Model) erro
|
||||
getRestMux.HandleFunc("/rest/system", restGetSystem)
|
||||
getRestMux.HandleFunc("/rest/upgrade", restGetUpgrade)
|
||||
getRestMux.HandleFunc("/rest/version", restGetVersion)
|
||||
getRestMux.HandleFunc("/rest/tree", withModel(m, restGetTree))
|
||||
getRestMux.HandleFunc("/rest/stats/device", withModel(m, restGetDeviceStats))
|
||||
getRestMux.HandleFunc("/rest/stats/folder", withModel(m, restGetFolderStats))
|
||||
getRestMux.HandleFunc("/rest/filestatus", withModel(m, restGetFileStatus))
|
||||
|
||||
// Debug endpoints, not for general use
|
||||
getRestMux.HandleFunc("/rest/debug/peerCompletion", withModel(m, restGetPeerCompletion))
|
||||
@@ -126,6 +143,7 @@ func startGUI(cfg config.GUIConfiguration, assetDir string, m *model.Model) erro
|
||||
postRestMux.HandleFunc("/rest/shutdown", restPostShutdown)
|
||||
postRestMux.HandleFunc("/rest/upgrade", restPostUpgrade)
|
||||
postRestMux.HandleFunc("/rest/scan", withModel(m, restPostScan))
|
||||
postRestMux.HandleFunc("/rest/bump", withModel(m, restPostBump))
|
||||
|
||||
// A handler that splits requests between the two above and disables
|
||||
// caching
|
||||
@@ -237,6 +255,24 @@ func restGetVersion(w http.ResponseWriter, r *http.Request) {
|
||||
})
|
||||
}
|
||||
|
||||
func restGetTree(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
||||
qs := r.URL.Query()
|
||||
folder := qs.Get("folder")
|
||||
prefix := qs.Get("prefix")
|
||||
dirsonly := qs.Get("dirsonly") != ""
|
||||
|
||||
levels, err := strconv.Atoi(qs.Get("levels"))
|
||||
if err != nil {
|
||||
levels = -1
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
|
||||
tree := m.GlobalDirectoryTree(folder, prefix, levels, dirsonly)
|
||||
|
||||
json.NewEncoder(w).Encode(tree)
|
||||
}
|
||||
|
||||
func restGetCompletion(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
||||
var qs = r.URL.Query()
|
||||
var folder = qs.Get("folder")
|
||||
@@ -277,6 +313,15 @@ func restGetModel(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
||||
res["state"], res["stateChanged"] = m.State(folder)
|
||||
res["version"] = m.CurrentLocalVersion(folder) + m.RemoteLocalVersion(folder)
|
||||
|
||||
ignorePatterns, _, _ := m.GetIgnores(folder)
|
||||
res["ignorePatterns"] = false
|
||||
for _, line := range ignorePatterns {
|
||||
if len(line) > 0 && !strings.HasPrefix(line, "//") {
|
||||
res["ignorePatterns"] = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
json.NewEncoder(w).Encode(res)
|
||||
}
|
||||
@@ -291,19 +336,12 @@ func restGetNeed(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
||||
var qs = r.URL.Query()
|
||||
var folder = qs.Get("folder")
|
||||
|
||||
files := m.NeedFolderFilesLimited(folder, 100) // max 100 files
|
||||
progress, queued, rest := m.NeedFolderFiles(folder, 100)
|
||||
// Convert the struct to a more loose structure, and inject the size.
|
||||
output := make([]map[string]interface{}, 0, len(files))
|
||||
for _, file := range files {
|
||||
output = append(output, map[string]interface{}{
|
||||
"Name": file.Name,
|
||||
"Flags": file.Flags,
|
||||
"Modified": file.Modified,
|
||||
"Version": file.Version,
|
||||
"LocalVersion": file.LocalVersion,
|
||||
"NumBlocks": file.NumBlocks,
|
||||
"Size": protocol.BlocksToSize(file.NumBlocks),
|
||||
})
|
||||
output := map[string][]map[string]interface{}{
|
||||
"progress": toNeedSlice(progress),
|
||||
"queued": toNeedSlice(queued),
|
||||
"rest": toNeedSlice(rest),
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
@@ -322,6 +360,33 @@ func restGetDeviceStats(m *model.Model, w http.ResponseWriter, r *http.Request)
|
||||
json.NewEncoder(w).Encode(res)
|
||||
}
|
||||
|
||||
func restGetFolderStats(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
||||
var res = m.FolderStatistics()
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
json.NewEncoder(w).Encode(res)
|
||||
}
|
||||
|
||||
func restGetFileStatus(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
||||
qs := r.URL.Query()
|
||||
folder := qs.Get("folder")
|
||||
file := qs.Get("file")
|
||||
withBlocks := qs.Get("blocks") != ""
|
||||
gf, _ := m.CurrentGlobalFile(folder, file)
|
||||
lf, _ := m.CurrentFolderFile(folder, file)
|
||||
|
||||
if !withBlocks {
|
||||
gf.Blocks = nil
|
||||
lf.Blocks = nil
|
||||
}
|
||||
|
||||
av := m.Availability(folder, file)
|
||||
json.NewEncoder(w).Encode(map[string]interface{}{
|
||||
"global": gf,
|
||||
"local": lf,
|
||||
"availability": av,
|
||||
})
|
||||
}
|
||||
|
||||
func restGetConfig(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
json.NewEncoder(w).Encode(cfg.Raw())
|
||||
@@ -334,44 +399,44 @@ func restPostConfig(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
||||
l.Warnln("decoding posted config:", err)
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
} else {
|
||||
if newCfg.GUI.Password != cfg.GUI().Password {
|
||||
if newCfg.GUI.Password != "" {
|
||||
hash, err := bcrypt.GenerateFromPassword([]byte(newCfg.GUI.Password), 0)
|
||||
if err != nil {
|
||||
l.Warnln("bcrypting password:", err)
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
} else {
|
||||
newCfg.GUI.Password = string(hash)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Start or stop usage reporting as appropriate
|
||||
|
||||
if curAcc := cfg.Options().URAccepted; newCfg.Options.URAccepted > curAcc {
|
||||
// UR was enabled
|
||||
newCfg.Options.URAccepted = usageReportVersion
|
||||
newCfg.Options.URUniqueID = randomString(6)
|
||||
err := sendUsageReport(m)
|
||||
if err != nil {
|
||||
l.Infoln("Usage report:", err)
|
||||
}
|
||||
go usageReportingLoop(m)
|
||||
} else if newCfg.Options.URAccepted < curAcc {
|
||||
// UR was disabled
|
||||
newCfg.Options.URAccepted = -1
|
||||
newCfg.Options.URUniqueID = ""
|
||||
stopUsageReporting()
|
||||
}
|
||||
|
||||
// Activate and save
|
||||
|
||||
configInSync = !config.ChangeRequiresRestart(cfg.Raw(), newCfg)
|
||||
cfg.Replace(newCfg)
|
||||
cfg.Save()
|
||||
}
|
||||
|
||||
if newCfg.GUI.Password != cfg.GUI().Password {
|
||||
if newCfg.GUI.Password != "" {
|
||||
hash, err := bcrypt.GenerateFromPassword([]byte(newCfg.GUI.Password), 0)
|
||||
if err != nil {
|
||||
l.Warnln("bcrypting password:", err)
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
}
|
||||
|
||||
newCfg.GUI.Password = string(hash)
|
||||
}
|
||||
}
|
||||
|
||||
// Start or stop usage reporting as appropriate
|
||||
|
||||
if curAcc := cfg.Options().URAccepted; newCfg.Options.URAccepted > curAcc {
|
||||
// UR was enabled
|
||||
newCfg.Options.URAccepted = usageReportVersion
|
||||
newCfg.Options.URUniqueID = randomString(8)
|
||||
err := sendUsageReport(m)
|
||||
if err != nil {
|
||||
l.Infoln("Usage report:", err)
|
||||
}
|
||||
go usageReportingLoop(m)
|
||||
} else if newCfg.Options.URAccepted < curAcc {
|
||||
// UR was disabled
|
||||
newCfg.Options.URAccepted = -1
|
||||
newCfg.Options.URUniqueID = ""
|
||||
stopUsageReporting()
|
||||
}
|
||||
|
||||
// Activate and save
|
||||
|
||||
configInSync = !config.ChangeRequiresRestart(cfg.Raw(), newCfg)
|
||||
cfg.Replace(newCfg)
|
||||
cfg.Save()
|
||||
}
|
||||
|
||||
func restGetConfigInSync(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -424,7 +489,8 @@ func restGetSystem(w http.ResponseWriter, r *http.Request) {
|
||||
cpusum += p
|
||||
}
|
||||
cpuUsageLock.RUnlock()
|
||||
res["cpuPercent"] = cpusum / 10
|
||||
res["cpuPercent"] = cpusum / float64(len(cpuUsagePercent)) / float64(runtime.NumCPU())
|
||||
res["pathSeparator"] = string(filepath.Separator)
|
||||
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
json.NewEncoder(w).Encode(res)
|
||||
@@ -548,6 +614,10 @@ func restGetEvents(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
func restGetUpgrade(w http.ResponseWriter, r *http.Request) {
|
||||
if noUpgrade {
|
||||
http.Error(w, upgrade.ErrUpgradeUnsupported.Error(), 500)
|
||||
return
|
||||
}
|
||||
rel, err := upgrade.LatestRelease(strings.Contains(Version, "-beta"))
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
@@ -598,7 +668,7 @@ func restPostUpgrade(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
if upgrade.CompareVersions(rel.Tag, Version) == 1 {
|
||||
err = upgrade.UpgradeTo(rel)
|
||||
err = upgrade.To(rel)
|
||||
if err != nil {
|
||||
l.Warnln("upgrading:", err)
|
||||
http.Error(w, err.Error(), 500)
|
||||
@@ -614,13 +684,29 @@ func restPostUpgrade(w http.ResponseWriter, r *http.Request) {
|
||||
func restPostScan(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
||||
qs := r.URL.Query()
|
||||
folder := qs.Get("folder")
|
||||
sub := qs.Get("sub")
|
||||
err := m.ScanFolderSub(folder, sub)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
if folder != "" {
|
||||
sub := qs.Get("sub")
|
||||
err := m.ScanFolderSub(folder, sub)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
}
|
||||
} else {
|
||||
errors := m.ScanFolders()
|
||||
if len(errors) > 0 {
|
||||
http.Error(w, "Error scanning folders", 500)
|
||||
json.NewEncoder(w).Encode(errors)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func restPostBump(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
||||
qs := r.URL.Query()
|
||||
folder := qs.Get("folder")
|
||||
file := qs.Get("file")
|
||||
m.BringToFront(folder, file)
|
||||
restGetNeed(m, w, r)
|
||||
}
|
||||
|
||||
func getQR(w http.ResponseWriter, r *http.Request) {
|
||||
var qs = r.URL.Query()
|
||||
var text = qs.Get("text")
|
||||
@@ -742,7 +828,24 @@ func mimeTypeForFile(file string) string {
|
||||
return "application/x-font-ttf"
|
||||
case ".woff":
|
||||
return "application/x-font-woff"
|
||||
case ".svg":
|
||||
return "image/svg+xml"
|
||||
default:
|
||||
return mime.TypeByExtension(ext)
|
||||
}
|
||||
}
|
||||
|
||||
func toNeedSlice(fs []db.FileInfoTruncated) []map[string]interface{} {
|
||||
output := make([]map[string]interface{}, len(fs))
|
||||
for i, file := range fs {
|
||||
output[i] = map[string]interface{}{
|
||||
"Name": file.Name,
|
||||
"Flags": file.Flags,
|
||||
"Modified": file.Modified,
|
||||
"Version": file.Version,
|
||||
"LocalVersion": file.LocalVersion,
|
||||
"Size": file.Size(),
|
||||
}
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
||||
@@ -1,17 +1,8 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation, either version 3 of the License, or (at your option)
|
||||
// any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
|
||||
@@ -1,24 +1,13 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation, either version 3 of the License, or (at your option)
|
||||
// any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
@@ -88,7 +77,7 @@ func validCsrfToken(token string) bool {
|
||||
}
|
||||
|
||||
func newCsrfToken() string {
|
||||
token := randomString(30)
|
||||
token := randomString(32)
|
||||
|
||||
csrfMut.Lock()
|
||||
csrfTokens = append(csrfTokens, token)
|
||||
@@ -140,13 +129,3 @@ func loadCsrfTokens() {
|
||||
csrfTokens = append(csrfTokens, s.Text())
|
||||
}
|
||||
}
|
||||
|
||||
func randomString(len int) string {
|
||||
bs := make([]byte, len)
|
||||
_, err := rand.Reader.Read(bs)
|
||||
if err != nil {
|
||||
l.Fatalln(err)
|
||||
}
|
||||
|
||||
return base64.StdEncoding.EncodeToString(bs)
|
||||
}
|
||||
|
||||
@@ -1,17 +1,8 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation, either version 3 of the License, or (at your option)
|
||||
// any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
//+build solaris
|
||||
|
||||
|
||||
@@ -1,17 +1,8 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation, either version 3 of the License, or (at your option)
|
||||
// any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
//+build !windows,!solaris
|
||||
|
||||
|
||||
@@ -1,17 +1,8 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation, either version 3 of the License, or (at your option)
|
||||
// any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
//+build windows
|
||||
|
||||
|
||||
@@ -1,17 +1,8 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation, either version 3 of the License, or (at your option)
|
||||
// any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
@@ -20,27 +11,32 @@ import (
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"strconv"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
if innerProcess && os.Getenv("STHEAPPROFILE") != "" {
|
||||
rate := 1
|
||||
if i, err := strconv.Atoi(os.Getenv("STHEAPPROFILE")); err == nil {
|
||||
rate = i
|
||||
}
|
||||
l.Debugln("Starting heap profiling")
|
||||
go saveHeapProfiles()
|
||||
go saveHeapProfiles(rate)
|
||||
}
|
||||
}
|
||||
|
||||
func saveHeapProfiles() {
|
||||
runtime.MemProfileRate = 1
|
||||
func saveHeapProfiles(rate int) {
|
||||
runtime.MemProfileRate = rate
|
||||
var memstats, prevMemstats runtime.MemStats
|
||||
|
||||
t0 := time.Now()
|
||||
for t := range time.NewTicker(250 * time.Millisecond).C {
|
||||
startms := int(t.Sub(t0).Seconds() * 1000)
|
||||
name := fmt.Sprintf("heap-%05d.pprof", syscall.Getpid())
|
||||
for {
|
||||
runtime.ReadMemStats(&memstats)
|
||||
|
||||
if memstats.HeapInuse > prevMemstats.HeapInuse {
|
||||
fd, err := os.Create(fmt.Sprintf("heap-%05d-%07d.pprof", syscall.Getpid(), startms))
|
||||
fd, err := os.Create(name + ".tmp")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -52,7 +48,16 @@ func saveHeapProfiles() {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
_ = os.Remove(name) // Error deliberately ignored
|
||||
err = os.Rename(name+".tmp", name)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
prevMemstats = memstats
|
||||
}
|
||||
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,17 +1,8 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation, either version 3 of the License, or (at your option)
|
||||
// any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
|
||||
@@ -1,17 +1,8 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation, either version 3 of the License, or (at your option)
|
||||
// any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
|
||||
@@ -1,17 +1,8 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation, either version 3 of the License, or (at your option)
|
||||
// any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
@@ -19,9 +10,7 @@ import (
|
||||
"crypto/tls"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
@@ -38,17 +27,18 @@ import (
|
||||
|
||||
"github.com/calmh/logger"
|
||||
"github.com/juju/ratelimit"
|
||||
"github.com/syncthing/protocol"
|
||||
"github.com/syncthing/syncthing/internal/config"
|
||||
"github.com/syncthing/syncthing/internal/db"
|
||||
"github.com/syncthing/syncthing/internal/discover"
|
||||
"github.com/syncthing/syncthing/internal/events"
|
||||
"github.com/syncthing/syncthing/internal/files"
|
||||
"github.com/syncthing/syncthing/internal/model"
|
||||
"github.com/syncthing/syncthing/internal/osutil"
|
||||
"github.com/syncthing/syncthing/internal/protocol"
|
||||
"github.com/syncthing/syncthing/internal/symlinks"
|
||||
"github.com/syncthing/syncthing/internal/upgrade"
|
||||
"github.com/syncthing/syncthing/internal/upnp"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
)
|
||||
@@ -73,6 +63,8 @@ const (
|
||||
exitUpgrading = 4
|
||||
)
|
||||
|
||||
const bepProtocolName = "bep/1.0"
|
||||
|
||||
var l = logger.DefaultLogger
|
||||
|
||||
func init() {
|
||||
@@ -103,10 +95,10 @@ func init() {
|
||||
}
|
||||
|
||||
var (
|
||||
cfg *config.ConfigWrapper
|
||||
cfg *config.Wrapper
|
||||
myID protocol.DeviceID
|
||||
confDir string
|
||||
logFlags int = log.Ltime
|
||||
logFlags = log.Ltime
|
||||
writeRateLimit *ratelimit.Bucket
|
||||
readRateLimit *ratelimit.Bucket
|
||||
stop = make(chan int)
|
||||
@@ -114,6 +106,7 @@ var (
|
||||
externalPort int
|
||||
igd *upnp.IGD
|
||||
cert tls.Certificate
|
||||
lans []*net.IPNet
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -143,50 +136,50 @@ Development Settings
|
||||
The following environment variables modify syncthing's behavior in ways that
|
||||
are mostly useful for developers. Use with care.
|
||||
|
||||
STGUIASSETS Directory to load GUI assets from. Overrides compiled in assets.
|
||||
STGUIASSETS Directory to load GUI assets from. Overrides compiled in assets.
|
||||
|
||||
STTRACE A comma separated string of facilities to trace. The valid
|
||||
facility strings are:
|
||||
STTRACE A comma separated string of facilities to trace. The valid
|
||||
facility strings are:
|
||||
|
||||
- "beacon" (the beacon package)
|
||||
- "discover" (the discover package)
|
||||
- "events" (the events package)
|
||||
- "files" (the files package)
|
||||
- "net" (the main package; connections & network messages)
|
||||
- "model" (the model package)
|
||||
- "scanner" (the scanner package)
|
||||
- "stats" (the stats package)
|
||||
- "upnp" (the upnp package)
|
||||
- "xdr" (the xdr package)
|
||||
- "all" (all of the above)
|
||||
- "beacon" (the beacon package)
|
||||
- "discover" (the discover package)
|
||||
- "events" (the events package)
|
||||
- "files" (the files package)
|
||||
- "net" (the main package; connections & network messages)
|
||||
- "model" (the model package)
|
||||
- "scanner" (the scanner package)
|
||||
- "stats" (the stats package)
|
||||
- "upnp" (the upnp package)
|
||||
- "xdr" (the xdr package)
|
||||
- "all" (all of the above)
|
||||
|
||||
STPROFILER Set to a listen address such as "127.0.0.1:9090" to start the
|
||||
profiler with HTTP access.
|
||||
STPROFILER Set to a listen address such as "127.0.0.1:9090" to start the
|
||||
profiler with HTTP access.
|
||||
|
||||
STCPUPROFILE Write a CPU profile to cpu-$pid.pprof on exit.
|
||||
STCPUPROFILE Write a CPU profile to cpu-$pid.pprof on exit.
|
||||
|
||||
STHEAPPROFILE Write heap profiles to heap-$pid-$timestamp.pprof each time
|
||||
heap usage increases.
|
||||
STHEAPPROFILE Write heap profiles to heap-$pid-$timestamp.pprof each time
|
||||
heap usage increases.
|
||||
|
||||
STPERFSTATS Write running performance statistics to perf-$pid.csv. Not
|
||||
supported on Windows.
|
||||
STBLOCKPROFILE Write block profiles to block-$pid-$timestamp.pprof every 20
|
||||
seconds.
|
||||
|
||||
STNOUPGRADE Disable automatic upgrades.
|
||||
STPERFSTATS Write running performance statistics to perf-$pid.csv. Not
|
||||
supported on Windows.
|
||||
|
||||
GOMAXPROCS Set the maximum number of CPU cores to use. Defaults to all
|
||||
available CPU cores.`
|
||||
STNOUPGRADE Disable automatic upgrades.
|
||||
|
||||
GOMAXPROCS Set the maximum number of CPU cores to use. Defaults to all
|
||||
available CPU cores.`
|
||||
)
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
// Command line and environment options
|
||||
var (
|
||||
reset bool
|
||||
showVersion bool
|
||||
doUpgrade bool
|
||||
doUpgradeCheck bool
|
||||
upgradeTo string
|
||||
noBrowser bool
|
||||
noConsole bool
|
||||
generateDir string
|
||||
@@ -232,6 +225,7 @@ func main() {
|
||||
flag.BoolVar(&doUpgrade, "upgrade", false, "Perform upgrade")
|
||||
flag.BoolVar(&doUpgradeCheck, "upgrade-check", false, "Check for available upgrade")
|
||||
flag.BoolVar(&showVersion, "version", false, "Show version")
|
||||
flag.StringVar(&upgradeTo, "upgrade-to", upgradeTo, "Force upgrade directly from specified URL")
|
||||
|
||||
flag.Usage = usageFor(flag.CommandLine, usage, fmt.Sprintf(extraUsage, defConfDir))
|
||||
flag.Parse()
|
||||
@@ -266,19 +260,22 @@ func main() {
|
||||
}
|
||||
|
||||
info, err := os.Stat(dir)
|
||||
if err != nil {
|
||||
l.Fatalln("generate:", err)
|
||||
}
|
||||
if !info.IsDir() {
|
||||
if err == nil && !info.IsDir() {
|
||||
l.Fatalln(dir, "is not a directory")
|
||||
}
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
err = os.MkdirAll(dir, 0700)
|
||||
if err != nil {
|
||||
l.Fatalln("generate:", err)
|
||||
}
|
||||
}
|
||||
|
||||
cert, err := loadCert(dir, "")
|
||||
if err == nil {
|
||||
l.Warnln("Key exists; will not overwrite.")
|
||||
l.Infoln("Device ID:", protocol.NewDeviceID(cert.Certificate[0]))
|
||||
} else {
|
||||
newCertificate(dir, "")
|
||||
newCertificate(dir, "", tlsDefaultCommonName)
|
||||
cert, err = loadCert(dir, "")
|
||||
myID = protocol.NewDeviceID(cert.Certificate[0])
|
||||
if err != nil {
|
||||
@@ -317,6 +314,15 @@ func main() {
|
||||
// Ensure that our home directory exists.
|
||||
ensureDir(confDir, 0700)
|
||||
|
||||
if upgradeTo != "" {
|
||||
err := upgrade.ToURL(upgradeTo)
|
||||
if err != nil {
|
||||
l.Fatalln("Upgrade:", err) // exits 1
|
||||
}
|
||||
l.Okln("Upgraded from", upgradeTo)
|
||||
return
|
||||
}
|
||||
|
||||
if doUpgrade || doUpgradeCheck {
|
||||
rel, err := upgrade.LatestRelease(IsBeta)
|
||||
if err != nil {
|
||||
@@ -332,20 +338,19 @@ func main() {
|
||||
|
||||
if doUpgrade {
|
||||
// Use leveldb database locks to protect against concurrent upgrades
|
||||
_, err = leveldb.OpenFile(filepath.Join(confDir, "index"), &opt.Options{CachedOpenFiles: 100})
|
||||
_, err = leveldb.OpenFile(filepath.Join(confDir, "index"), &opt.Options{OpenFilesCacheCapacity: 100})
|
||||
if err != nil {
|
||||
l.Fatalln("Cannot upgrade, database seems to be locked. Is another copy of Syncthing already running?")
|
||||
}
|
||||
|
||||
err = upgrade.UpgradeTo(rel)
|
||||
err = upgrade.To(rel)
|
||||
if err != nil {
|
||||
l.Fatalln("Upgrade:", err) // exits 1
|
||||
}
|
||||
l.Okf("Upgraded to %q", rel.Tag)
|
||||
return
|
||||
} else {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if reset {
|
||||
@@ -376,13 +381,17 @@ func syncthingMain() {
|
||||
// Ensure that that we have a certificate and key.
|
||||
cert, err = loadCert(confDir, "")
|
||||
if err != nil {
|
||||
newCertificate(confDir, "")
|
||||
newCertificate(confDir, "", tlsDefaultCommonName)
|
||||
cert, err = loadCert(confDir, "")
|
||||
if err != nil {
|
||||
l.Fatalln("load cert:", err)
|
||||
}
|
||||
}
|
||||
|
||||
// We reinitialize the predictable RNG with our device ID, to get a
|
||||
// sequence that is always the same but unique to this syncthing instance.
|
||||
predictableRandom.Seed(seedFromBytes(cert.Certificate[0]))
|
||||
|
||||
myID = protocol.NewDeviceID(cert.Certificate[0])
|
||||
l.SetPrefix(fmt.Sprintf("[%s] ", myID.String()[:5]))
|
||||
|
||||
@@ -446,7 +455,7 @@ func syncthingMain() {
|
||||
|
||||
tlsCfg := &tls.Config{
|
||||
Certificates: []tls.Certificate{cert},
|
||||
NextProtos: []string{"bep/1.0"},
|
||||
NextProtos: []string{bepProtocolName},
|
||||
ClientAuth: tls.RequestClientCert,
|
||||
SessionTicketsDisabled: true,
|
||||
InsecureSkipVerify: true,
|
||||
@@ -477,21 +486,35 @@ func syncthingMain() {
|
||||
readRateLimit = ratelimit.NewBucketWithRate(float64(1000*opts.MaxRecvKbps), int64(5*1000*opts.MaxRecvKbps))
|
||||
}
|
||||
|
||||
db, err := leveldb.OpenFile(filepath.Join(confDir, "index"), &opt.Options{CachedOpenFiles: 100})
|
||||
if (opts.MaxRecvKbps > 0 || opts.MaxSendKbps > 0) && !opts.LimitBandwidthInLan {
|
||||
lans, _ = osutil.GetLans()
|
||||
networks := make([]string, 0, len(lans))
|
||||
for _, lan := range lans {
|
||||
networks = append(networks, lan.String())
|
||||
}
|
||||
l.Infoln("Local networks:", strings.Join(networks, ", "))
|
||||
}
|
||||
|
||||
dbFile := filepath.Join(confDir, "index")
|
||||
dbOpts := &opt.Options{OpenFilesCacheCapacity: 100}
|
||||
ldb, err := leveldb.OpenFile(dbFile, dbOpts)
|
||||
if err != nil && errors.IsCorrupted(err) {
|
||||
ldb, err = leveldb.RecoverFile(dbFile, dbOpts)
|
||||
}
|
||||
if err != nil {
|
||||
l.Fatalln("Cannot open database:", err, "- Is another copy of Syncthing already running?")
|
||||
}
|
||||
|
||||
// Remove database entries for folders that no longer exist in the config
|
||||
folders := cfg.Folders()
|
||||
for _, folder := range files.ListFolders(db) {
|
||||
for _, folder := range db.ListFolders(ldb) {
|
||||
if _, ok := folders[folder]; !ok {
|
||||
l.Infof("Cleaning data for dropped folder %q", folder)
|
||||
files.DropFolder(db, folder)
|
||||
db.DropFolder(ldb, folder)
|
||||
}
|
||||
}
|
||||
|
||||
m := model.NewModel(cfg, myName, "syncthing", Version, db)
|
||||
m := model.NewModel(cfg, myName, "syncthing", Version, ldb)
|
||||
|
||||
sanityCheckFolders(cfg, m)
|
||||
|
||||
@@ -574,7 +597,9 @@ func syncthingMain() {
|
||||
if opts.URUniqueID == "" {
|
||||
// Previously the ID was generated from the node ID. We now need
|
||||
// to generate a new one.
|
||||
opts.URUniqueID = randomString(6)
|
||||
opts.URUniqueID = randomString(8)
|
||||
cfg.SetOptions(opts)
|
||||
cfg.Save()
|
||||
}
|
||||
go usageReportingLoop(m)
|
||||
go func() {
|
||||
@@ -596,7 +621,7 @@ func syncthingMain() {
|
||||
} else if IsRelease {
|
||||
go autoUpgrade()
|
||||
} else {
|
||||
l.Infof("No automatic upgrades; %s is not a relase version.", Version)
|
||||
l.Infof("No automatic upgrades; %s is not a release version.", Version)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -609,7 +634,7 @@ func syncthingMain() {
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
func setupGUI(cfg *config.ConfigWrapper, m *model.Model) {
|
||||
func setupGUI(cfg *config.Wrapper, m *model.Model) {
|
||||
opts := cfg.Options()
|
||||
guiCfg := overrideGUIConfig(cfg.GUI(), guiAddress, guiAuthentication, guiAPIKey)
|
||||
|
||||
@@ -644,13 +669,15 @@ func setupGUI(cfg *config.ConfigWrapper, m *model.Model) {
|
||||
}
|
||||
if opts.StartBrowser && !noBrowser && !stRestarting {
|
||||
urlOpen := fmt.Sprintf("%s://%s/", proto, net.JoinHostPort(hostOpen, strconv.Itoa(addr.Port)))
|
||||
openURL(urlOpen)
|
||||
// Can potentially block if the utility we are invoking doesn't
|
||||
// fork, and just execs, hence keep it in it's own routine.
|
||||
go openURL(urlOpen)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func sanityCheckFolders(cfg *config.ConfigWrapper, m *model.Model) {
|
||||
func sanityCheckFolders(cfg *config.Wrapper, m *model.Model) {
|
||||
nextFolder:
|
||||
for id, folder := range cfg.Folders() {
|
||||
if folder.Invalid != "" {
|
||||
@@ -755,7 +782,7 @@ func setupUPnP() {
|
||||
if len(igds) > 0 {
|
||||
// Configure the first discovered IGD only. This is a work-around until we have a better mechanism
|
||||
// for handling multiple IGDs, which will require changes to the global discovery service
|
||||
igd = igds[0]
|
||||
igd = &igds[0]
|
||||
|
||||
externalPort = setupExternalPort(igd, port)
|
||||
if externalPort == 0 {
|
||||
@@ -779,12 +806,9 @@ func setupExternalPort(igd *upnp.IGD, port int) int {
|
||||
return 0
|
||||
}
|
||||
|
||||
// We seed the random number generator with the node ID to get a
|
||||
// repeatable sequence of random external ports.
|
||||
rnd := rand.NewSource(certSeed(cert.Certificate[0]))
|
||||
for i := 0; i < 10; i++ {
|
||||
r := 1024 + int(rnd.Int63()%(65535-1024))
|
||||
err := igd.AddPortMapping(upnp.TCP, r, port, "syncthing", cfg.Options().UPnPLease*60)
|
||||
r := 1024 + predictableRandom.Intn(65535-1024)
|
||||
err := igd.AddPortMapping(upnp.TCP, r, port, fmt.Sprintf("syncthing-%d", r), cfg.Options().UPnPLease*60)
|
||||
if err == nil {
|
||||
return r
|
||||
}
|
||||
@@ -806,7 +830,7 @@ func renewUPnP(port int) {
|
||||
if len(igds) > 0 {
|
||||
// Configure the first discovered IGD only. This is a work-around until we have a better mechanism
|
||||
// for handling multiple IGDs, which will require changes to the global discovery service
|
||||
igd = igds[0]
|
||||
igd = &igds[0]
|
||||
} else {
|
||||
if debugNet {
|
||||
l.Debugln("Failed to discover IGD during UPnP port mapping renewal.")
|
||||
@@ -849,11 +873,24 @@ func renewUPnP(port int) {
|
||||
}
|
||||
|
||||
func resetFolders() {
|
||||
confDir, err := osutil.ExpandTilde(confDir)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
cfgFile := filepath.Join(confDir, "config.xml")
|
||||
cfg, err := config.Load(cfgFile, myID)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
suffix := fmt.Sprintf(".syncthing-reset-%d", time.Now().UnixNano())
|
||||
for _, folder := range cfg.Folders() {
|
||||
if _, err := os.Stat(folder.Path); err == nil {
|
||||
l.Infof("Reset: Moving %s -> %s", folder.Path, folder.Path+suffix)
|
||||
os.Rename(folder.Path, folder.Path+suffix)
|
||||
base := filepath.Base(folder.Path)
|
||||
dir := filepath.Dir(filepath.Join(folder.Path, ".."))
|
||||
l.Infof("Reset: Moving %s -> %s", folder.Path, filepath.Join(dir, base+suffix))
|
||||
os.Rename(folder.Path, filepath.Join(dir, base+suffix))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -871,234 +908,6 @@ func shutdown() {
|
||||
stop <- exitSuccess
|
||||
}
|
||||
|
||||
func listenConnect(myID protocol.DeviceID, m *model.Model, tlsCfg *tls.Config) {
|
||||
var conns = make(chan *tls.Conn)
|
||||
|
||||
// Listen
|
||||
for _, addr := range cfg.Options().ListenAddress {
|
||||
go listenTLS(conns, addr, tlsCfg)
|
||||
}
|
||||
|
||||
// Connect
|
||||
go dialTLS(m, conns, tlsCfg)
|
||||
|
||||
next:
|
||||
for conn := range conns {
|
||||
certs := conn.ConnectionState().PeerCertificates
|
||||
if cl := len(certs); cl != 1 {
|
||||
l.Infof("Got peer certificate list of length %d != 1 from %s; protocol error", cl, conn.RemoteAddr())
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
remoteCert := certs[0]
|
||||
remoteID := protocol.NewDeviceID(remoteCert.Raw)
|
||||
|
||||
if remoteID == myID {
|
||||
l.Infof("Connected to myself (%s) - should not happen", remoteID)
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
if m.ConnectedTo(remoteID) {
|
||||
l.Infof("Connected to already connected device (%s)", remoteID)
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
for deviceID, deviceCfg := range cfg.Devices() {
|
||||
if deviceID == remoteID {
|
||||
// Verify the name on the certificate. By default we set it to
|
||||
// "syncthing" when generating, but the user may have replaced
|
||||
// the certificate and used another name.
|
||||
certName := deviceCfg.CertName
|
||||
if certName == "" {
|
||||
certName = "syncthing"
|
||||
}
|
||||
err := remoteCert.VerifyHostname(certName)
|
||||
if err != nil {
|
||||
// Incorrect certificate name is something the user most
|
||||
// likely wants to know about, since it's an advanced
|
||||
// config. Warn instead of Info.
|
||||
l.Warnf("Bad certificate from %s (%v): %v", remoteID, conn.RemoteAddr(), err)
|
||||
conn.Close()
|
||||
continue next
|
||||
}
|
||||
|
||||
// If rate limiting is set, we wrap the connection in a
|
||||
// limiter.
|
||||
var wr io.Writer = conn
|
||||
if writeRateLimit != nil {
|
||||
wr = &limitedWriter{conn, writeRateLimit}
|
||||
}
|
||||
|
||||
var rd io.Reader = conn
|
||||
if readRateLimit != nil {
|
||||
rd = &limitedReader{conn, readRateLimit}
|
||||
}
|
||||
|
||||
name := fmt.Sprintf("%s-%s", conn.LocalAddr(), conn.RemoteAddr())
|
||||
protoConn := protocol.NewConnection(remoteID, rd, wr, m, name, deviceCfg.Compression)
|
||||
|
||||
l.Infof("Established secure connection to %s at %s", remoteID, name)
|
||||
if debugNet {
|
||||
l.Debugf("cipher suite %04X", conn.ConnectionState().CipherSuite)
|
||||
}
|
||||
events.Default.Log(events.DeviceConnected, map[string]string{
|
||||
"id": remoteID.String(),
|
||||
"addr": conn.RemoteAddr().String(),
|
||||
})
|
||||
|
||||
m.AddConnection(conn, protoConn)
|
||||
continue next
|
||||
}
|
||||
}
|
||||
|
||||
events.Default.Log(events.DeviceRejected, map[string]string{
|
||||
"device": remoteID.String(),
|
||||
"address": conn.RemoteAddr().String(),
|
||||
})
|
||||
l.Infof("Connection from %s with unknown device ID %s; ignoring", conn.RemoteAddr(), remoteID)
|
||||
conn.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func listenTLS(conns chan *tls.Conn, addr string, tlsCfg *tls.Config) {
|
||||
if debugNet {
|
||||
l.Debugln("listening on", addr)
|
||||
}
|
||||
|
||||
tcaddr, err := net.ResolveTCPAddr("tcp", addr)
|
||||
if err != nil {
|
||||
l.Fatalln("listen (BEP):", err)
|
||||
}
|
||||
listener, err := net.ListenTCP("tcp", tcaddr)
|
||||
if err != nil {
|
||||
l.Fatalln("listen (BEP):", err)
|
||||
}
|
||||
|
||||
for {
|
||||
conn, err := listener.Accept()
|
||||
if err != nil {
|
||||
l.Warnln("Accepting connection:", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if debugNet {
|
||||
l.Debugln("connect from", conn.RemoteAddr())
|
||||
}
|
||||
|
||||
tcpConn := conn.(*net.TCPConn)
|
||||
setTCPOptions(tcpConn)
|
||||
|
||||
tc := tls.Server(conn, tlsCfg)
|
||||
err = tc.Handshake()
|
||||
if err != nil {
|
||||
l.Infoln("TLS handshake:", err)
|
||||
tc.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
conns <- tc
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func dialTLS(m *model.Model, conns chan *tls.Conn, tlsCfg *tls.Config) {
|
||||
var delay time.Duration = 1 * time.Second
|
||||
for {
|
||||
nextDevice:
|
||||
for deviceID, deviceCfg := range cfg.Devices() {
|
||||
if deviceID == myID {
|
||||
continue
|
||||
}
|
||||
|
||||
if m.ConnectedTo(deviceID) {
|
||||
continue
|
||||
}
|
||||
|
||||
var addrs []string
|
||||
for _, addr := range deviceCfg.Addresses {
|
||||
if addr == "dynamic" {
|
||||
if discoverer != nil {
|
||||
t := discoverer.Lookup(deviceID)
|
||||
if len(t) == 0 {
|
||||
continue
|
||||
}
|
||||
addrs = append(addrs, t...)
|
||||
}
|
||||
} else {
|
||||
addrs = append(addrs, addr)
|
||||
}
|
||||
}
|
||||
|
||||
for _, addr := range addrs {
|
||||
host, port, err := net.SplitHostPort(addr)
|
||||
if err != nil && strings.HasPrefix(err.Error(), "missing port") {
|
||||
// addr is on the form "1.2.3.4"
|
||||
addr = net.JoinHostPort(addr, "22000")
|
||||
} else if err == nil && port == "" {
|
||||
// addr is on the form "1.2.3.4:"
|
||||
addr = net.JoinHostPort(host, "22000")
|
||||
}
|
||||
if debugNet {
|
||||
l.Debugln("dial", deviceCfg.DeviceID, addr)
|
||||
}
|
||||
|
||||
raddr, err := net.ResolveTCPAddr("tcp", addr)
|
||||
if err != nil {
|
||||
if debugNet {
|
||||
l.Debugln(err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
conn, err := net.DialTCP("tcp", nil, raddr)
|
||||
if err != nil {
|
||||
if debugNet {
|
||||
l.Debugln(err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
setTCPOptions(conn)
|
||||
|
||||
tc := tls.Client(conn, tlsCfg)
|
||||
err = tc.Handshake()
|
||||
if err != nil {
|
||||
l.Infoln("TLS handshake:", err)
|
||||
tc.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
conns <- tc
|
||||
continue nextDevice
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(delay)
|
||||
delay *= 2
|
||||
if maxD := time.Duration(cfg.Options().ReconnectIntervalS) * time.Second; delay > maxD {
|
||||
delay = maxD
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func setTCPOptions(conn *net.TCPConn) {
|
||||
var err error
|
||||
if err = conn.SetLinger(0); err != nil {
|
||||
l.Infoln(err)
|
||||
}
|
||||
if err = conn.SetNoDelay(false); err != nil {
|
||||
l.Infoln(err)
|
||||
}
|
||||
if err = conn.SetKeepAlivePeriod(60 * time.Second); err != nil {
|
||||
l.Infoln(err)
|
||||
}
|
||||
if err = conn.SetKeepAlive(true); err != nil {
|
||||
l.Infoln(err)
|
||||
}
|
||||
}
|
||||
|
||||
func discovery(extPort int) *discover.Discoverer {
|
||||
opts := cfg.Options()
|
||||
disc := discover.NewDiscoverer(myID, opts.ListenAddress)
|
||||
@@ -1135,7 +944,10 @@ func ensureDir(dir string, mode int) {
|
||||
func getDefaultConfDir() (string, error) {
|
||||
switch runtime.GOOS {
|
||||
case "windows":
|
||||
return filepath.Join(os.Getenv("LocalAppData"), "Syncthing"), nil
|
||||
if p := os.Getenv("LocalAppData"); p != "" {
|
||||
return filepath.Join(p, "Syncthing"), nil
|
||||
}
|
||||
return filepath.Join(os.Getenv("AppData"), "Syncthing"), nil
|
||||
|
||||
case "darwin":
|
||||
return osutil.ExpandTilde("~/Library/Application Support/Syncthing")
|
||||
@@ -1143,9 +955,8 @@ func getDefaultConfDir() (string, error) {
|
||||
default:
|
||||
if xdgCfg := os.Getenv("XDG_CONFIG_HOME"); xdgCfg != "" {
|
||||
return filepath.Join(xdgCfg, "syncthing"), nil
|
||||
} else {
|
||||
return osutil.ExpandTilde("~/.config/syncthing")
|
||||
}
|
||||
return osutil.ExpandTilde("~/.config/syncthing")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1233,36 +1044,46 @@ func standbyMonitor() {
|
||||
}
|
||||
|
||||
func autoUpgrade() {
|
||||
var skipped bool
|
||||
interval := time.Duration(cfg.Options().AutoUpgradeIntervalH) * time.Hour
|
||||
timer := time.NewTimer(0)
|
||||
sub := events.Default.Subscribe(events.DeviceConnected)
|
||||
for {
|
||||
if skipped {
|
||||
time.Sleep(interval)
|
||||
} else {
|
||||
skipped = true
|
||||
select {
|
||||
case event := <-sub.C():
|
||||
data, ok := event.Data.(map[string]string)
|
||||
if !ok || data["clientName"] != "syncthing" || upgrade.CompareVersions(data["clientVersion"], Version) != upgrade.Newer {
|
||||
continue
|
||||
}
|
||||
l.Infof("Connected to device %s with a newer version (current %q < remote %q). Checking for upgrades.", data["id"], Version, data["clientVersion"])
|
||||
case <-timer.C:
|
||||
}
|
||||
|
||||
rel, err := upgrade.LatestRelease(IsBeta)
|
||||
if err == upgrade.ErrUpgradeUnsupported {
|
||||
events.Default.Unsubscribe(sub)
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
// Don't complain too loudly here; we might simply not have
|
||||
// internet connectivity, or the upgrade server might be down.
|
||||
l.Infoln("Automatic upgrade:", err)
|
||||
timer.Reset(time.Duration(cfg.Options().AutoUpgradeIntervalH) * time.Hour)
|
||||
continue
|
||||
}
|
||||
|
||||
if upgrade.CompareVersions(rel.Tag, Version) <= 0 {
|
||||
if upgrade.CompareVersions(rel.Tag, Version) != upgrade.Newer {
|
||||
// Skip equal, older or majorly newer (incompatible) versions
|
||||
timer.Reset(time.Duration(cfg.Options().AutoUpgradeIntervalH) * time.Hour)
|
||||
continue
|
||||
}
|
||||
|
||||
l.Infof("Automatic upgrade (current %q < latest %q)", Version, rel.Tag)
|
||||
err = upgrade.UpgradeTo(rel)
|
||||
err = upgrade.To(rel)
|
||||
if err != nil {
|
||||
l.Warnln("Automatic upgrade:", err)
|
||||
timer.Reset(time.Duration(cfg.Options().AutoUpgradeIntervalH) * time.Hour)
|
||||
continue
|
||||
}
|
||||
events.Default.Unsubscribe(sub)
|
||||
l.Warnf("Automatically upgraded to version %q. Restarting in 1 minute.", rel.Tag)
|
||||
time.Sleep(time.Minute)
|
||||
stop <- exitUpgrading
|
||||
|
||||
@@ -1,17 +1,8 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation, either version 3 of the License, or (at your option)
|
||||
// any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
@@ -19,10 +10,10 @@ import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/syncthing/protocol"
|
||||
"github.com/syncthing/syncthing/internal/config"
|
||||
"github.com/syncthing/syncthing/internal/files"
|
||||
"github.com/syncthing/syncthing/internal/db"
|
||||
"github.com/syncthing/syncthing/internal/model"
|
||||
"github.com/syncthing/syncthing/internal/protocol"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||
@@ -44,11 +35,11 @@ func TestSanityCheck(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
ldb, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
|
||||
// Case 1 - new folder, directory and marker created
|
||||
|
||||
m := model.NewModel(cfg, "device", "syncthing", "dev", db)
|
||||
m := model.NewModel(cfg, "device", "syncthing", "dev", ldb)
|
||||
sanityCheckFolders(cfg, m)
|
||||
|
||||
if cfg.Folders()["folder"].Invalid != "" {
|
||||
@@ -75,7 +66,7 @@ func TestSanityCheck(t *testing.T) {
|
||||
Folders: []config.FolderConfiguration{fcfg},
|
||||
})
|
||||
|
||||
m = model.NewModel(cfg, "device", "syncthing", "dev", db)
|
||||
m = model.NewModel(cfg, "device", "syncthing", "dev", ldb)
|
||||
sanityCheckFolders(cfg, m)
|
||||
|
||||
if cfg.Folders()["folder"].Invalid != "" {
|
||||
@@ -91,12 +82,12 @@ func TestSanityCheck(t *testing.T) {
|
||||
|
||||
// Case 3 - marker missing
|
||||
|
||||
set := files.NewSet("folder", db)
|
||||
set := db.NewFileSet("folder", ldb)
|
||||
set.Update(protocol.LocalDeviceID, []protocol.FileInfo{
|
||||
{Name: "dummyfile"},
|
||||
})
|
||||
|
||||
m = model.NewModel(cfg, "device", "syncthing", "dev", db)
|
||||
m = model.NewModel(cfg, "device", "syncthing", "dev", ldb)
|
||||
sanityCheckFolders(cfg, m)
|
||||
|
||||
if cfg.Folders()["folder"].Invalid != "folder marker missing" {
|
||||
@@ -110,7 +101,7 @@ func TestSanityCheck(t *testing.T) {
|
||||
Folders: []config.FolderConfiguration{fcfg},
|
||||
})
|
||||
|
||||
m = model.NewModel(cfg, "device", "syncthing", "dev", db)
|
||||
m = model.NewModel(cfg, "device", "syncthing", "dev", ldb)
|
||||
sanityCheckFolders(cfg, m)
|
||||
|
||||
if cfg.Folders()["folder"].Invalid != "folder path missing" {
|
||||
|
||||
@@ -1,17 +1,8 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License as published by the Free
|
||||
// Software Foundation, either version 3 of the License, or (at your option)
|
||||
// any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
// more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
@@ -22,7 +13,7 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
func memorySize() (uint64, error) {
|
||||
func memorySize() (int64, error) {
|
||||
cmd := exec.Command("sysctl", "hw.memsize")
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
@@ -32,7 +23,7 @@ func memorySize() (uint64, error) {
|
||||
if len(fs) != 2 {
|
||||
return 0, errors.New("sysctl parse error")
|
||||
}
|
||||
bytes, err := strconv.ParseUint(fs[1], 10, 64)
|
||||
bytes, err := strconv.ParseInt(fs[1], 10, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user