Compare commits
529 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1188ebbb7b | ||
|
|
76b903b2e0 | ||
|
|
be38c2111f | ||
|
|
1de787fab8 | ||
|
|
81f683a61c | ||
|
|
db6f68d031 | ||
|
|
d0a1c805e9 | ||
|
|
00a654845f | ||
|
|
04dad8485a | ||
|
|
0b1475169f | ||
|
|
6ec4fbc82b | ||
|
|
18cc7a663b | ||
|
|
cf5febad47 | ||
|
|
42849af5a8 | ||
|
|
e6364407a9 | ||
|
|
480b78f2c8 | ||
|
|
fa8f339478 | ||
|
|
7776839c82 | ||
|
|
7114cacb85 | ||
|
|
e52be3d83e | ||
|
|
c9cf01e0b6 | ||
|
|
dcbf68e104 | ||
|
|
c2d8c07137 | ||
|
|
a4ed50ca85 | ||
|
|
b3788c8ea0 | ||
|
|
946c074a41 | ||
|
|
19f79afb0f | ||
|
|
af3b6f9c83 | ||
|
|
fbe42c156d | ||
|
|
a1f6cbd354 | ||
|
|
a4f052ad31 | ||
|
|
ea87bcefd6 | ||
|
|
0655991a19 | ||
|
|
f368d2278f | ||
|
|
1eb6db6ca8 | ||
|
|
a25b63e2df | ||
|
|
ffe7a2fcd7 | ||
|
|
08b5a7908f | ||
|
|
a8cd9d0154 | ||
|
|
297240facf | ||
|
|
a022b0cfff | ||
|
|
72026db599 | ||
|
|
aafc96f58f | ||
|
|
7c7e8648ff | ||
|
|
24e2ce0764 | ||
|
|
d7cb4d407b | ||
|
|
66a506e72b | ||
|
|
25a7b0a6f8 | ||
|
|
7aaa1dd8a3 | ||
|
|
2a6f164923 | ||
|
|
0f28626bb4 | ||
|
|
6ed22d0885 | ||
|
|
6715b91a6c | ||
|
|
694da60659 | ||
|
|
47fa4b0a2c | ||
|
|
8ab6b60778 | ||
|
|
e1a4f81e50 | ||
|
|
7b7e35d339 | ||
|
|
3176629410 | ||
|
|
e3ccc45d19 | ||
|
|
beec9e834e | ||
|
|
f6f0486ff9 | ||
|
|
518f446d31 | ||
|
|
fbbd510088 | ||
|
|
e440d30028 | ||
|
|
44d30c83bf | ||
|
|
7ff7b55732 | ||
|
|
44346b3a5a | ||
|
|
23a538d61a | ||
|
|
dcb5026f33 | ||
|
|
778ff9daa9 | ||
|
|
ce9dc809bc | ||
|
|
59370588dd | ||
|
|
7d434aa9c4 | ||
|
|
59ce7c0424 | ||
|
|
9a0e5a7c18 | ||
|
|
8d0019595f | ||
|
|
6ff74cfcab | ||
|
|
aa50ef4069 | ||
|
|
fa0101bd60 | ||
|
|
21f5b16e47 | ||
|
|
223a835f33 | ||
|
|
223e14b0d0 | ||
|
|
a58f69be04 | ||
|
|
e194eb1f69 | ||
|
|
672824641b | ||
|
|
6d357211b2 | ||
|
|
8e39e2889d | ||
|
|
a9ee4bb9f1 | ||
|
|
80fd6c2400 | ||
|
|
3cbe7d40d1 | ||
|
|
af0bc95de5 | ||
|
|
4bf3e7485b | ||
|
|
b701de60ce | ||
|
|
7ef2743964 | ||
|
|
a165838cbd | ||
|
|
3c77b8388c | ||
|
|
9d16f4545d | ||
|
|
d57e6808cc | ||
|
|
b71cc8a580 | ||
|
|
ac3b03881a | ||
|
|
b0d03d1f1c | ||
|
|
a2dcffcca2 | ||
|
|
9323f0faf8 | ||
|
|
f343c8ba36 | ||
|
|
502bee9a09 | ||
|
|
379e2119a8 | ||
|
|
89a29946f9 | ||
|
|
20a94fafa7 | ||
|
|
99ddf1e4ab | ||
|
|
fb778218f5 | ||
|
|
55fc3cb2c5 | ||
|
|
b779e22205 | ||
|
|
bb5b1f8f01 | ||
|
|
c1a96d4900 | ||
|
|
de298da532 | ||
|
|
6f5ca53f99 | ||
|
|
d507126101 | ||
|
|
9a25df01fe | ||
|
|
11b9212948 | ||
|
|
b4e2914b70 | ||
|
|
09b7348595 | ||
|
|
d2bb6e0c0a | ||
|
|
8632a03662 | ||
|
|
e71c78ae84 | ||
|
|
03a8027efc | ||
|
|
b7e186b370 | ||
|
|
4a69f3987f | ||
|
|
343dc486e0 | ||
|
|
5aacfd1639 | ||
|
|
06e63aedea | ||
|
|
0320194757 | ||
|
|
1753771356 | ||
|
|
bc794e7c15 | ||
|
|
eefcecc7ce | ||
|
|
3795a786c9 | ||
|
|
855a1bef89 | ||
|
|
6a67921e40 | ||
|
|
8709fec517 | ||
|
|
48245effdf | ||
|
|
16063933d1 | ||
|
|
d317f197be | ||
|
|
8ac862f50a | ||
|
|
0e996c4664 | ||
|
|
287cfee73c | ||
|
|
a6c465e929 | ||
|
|
becb5ab1dc | ||
|
|
49170bf2d8 | ||
|
|
b1205db7ac | ||
|
|
ff0cd413e6 | ||
|
|
7a56e4a0e5 | ||
|
|
d17608d0a0 | ||
|
|
0af216fea0 | ||
|
|
1287433a99 | ||
|
|
56a9964101 | ||
|
|
532b4383bf | ||
|
|
f9e2623fdc | ||
|
|
eacae83886 | ||
|
|
5fc53f59c7 | ||
|
|
7035ea3ab7 | ||
|
|
d67c0a1eda | ||
|
|
36c6a1955f | ||
|
|
f792989d9b | ||
|
|
ee398f17e1 | ||
|
|
8c4723ff43 | ||
|
|
01ae866d58 | ||
|
|
3b8ae33fe3 | ||
|
|
6f63909c65 | ||
|
|
1612baca92 | ||
|
|
4970bd7f65 | ||
|
|
a775dd2b79 | ||
|
|
137894348b | ||
|
|
ac40b27c79 | ||
|
|
9d756525ce | ||
|
|
6361172bea | ||
|
|
56b6383407 | ||
|
|
46fa5a374b | ||
|
|
7373d2eb3c | ||
|
|
4453236949 | ||
|
|
c2dc4a8e06 | ||
|
|
92a23da3ec | ||
|
|
242db26343 | ||
|
|
87701339fe | ||
|
|
4669ce0766 | ||
|
|
9bb5988b4e | ||
|
|
c513171014 | ||
|
|
da5010d37a | ||
|
|
e6b78e5d56 | ||
|
|
410d700ae3 | ||
|
|
fc173bf679 | ||
|
|
72154aa668 | ||
|
|
31b5156191 | ||
|
|
ebce5d07ac | ||
|
|
915e1ac7de | ||
|
|
b78bfc0a43 | ||
|
|
30436741a7 | ||
|
|
98734375f2 | ||
|
|
37816e3818 | ||
|
|
4bc2b3f369 | ||
|
|
00be2bf18d | ||
|
|
44290a66b7 | ||
|
|
f6cc344623 | ||
|
|
a89d487510 | ||
|
|
a0ec4467fd | ||
|
|
7dddc0de9e | ||
|
|
e7280f1eb5 | ||
|
|
bf7fcc612d | ||
|
|
cff9bbc9c5 | ||
|
|
fddca3d2d6 | ||
|
|
9db49fb45e | ||
|
|
891409aedf | ||
|
|
77e47066ed | ||
|
|
852759f904 | ||
|
|
1dbc310c9b | ||
|
|
86ca58e2a9 | ||
|
|
22280db5db | ||
|
|
8e060e23e3 | ||
|
|
6e07742fe9 | ||
|
|
04d5032055 | ||
|
|
73ae87fad1 | ||
|
|
cd05282369 | ||
|
|
ee94d53bda | ||
|
|
922e1407c2 | ||
|
|
2ea22b1850 | ||
|
|
2c1323ece6 | ||
|
|
adb7fb43cb | ||
|
|
d59fd9c22d | ||
|
|
6f743f3138 | ||
|
|
5a7fad0bcd | ||
|
|
5d2414dfa9 | ||
|
|
bef2425025 | ||
|
|
e8b4286c93 | ||
|
|
2e9bf0b67c | ||
|
|
935c273c8f | ||
|
|
b993b41847 | ||
|
|
1be40cc4fa | ||
|
|
d628b731d1 | ||
|
|
21e116aa45 | ||
|
|
d77d8ff803 | ||
|
|
31f64186ae | ||
|
|
1a703efa78 | ||
|
|
8b7b0a03eb | ||
|
|
0761d804a4 | ||
|
|
3ad42d9279 | ||
|
|
bd41e21c26 | ||
|
|
10fe23b8f2 | ||
|
|
39899e40bf | ||
|
|
5d337bb24f | ||
|
|
dd5909568f | ||
|
|
38166e976f | ||
|
|
d6a7ffe0d4 | ||
|
|
2ebc6996a2 | ||
|
|
2e840134d2 | ||
|
|
66e1be33cf | ||
|
|
591959261c | ||
|
|
459930df09 | ||
|
|
674fc566bb | ||
|
|
09832abe50 | ||
|
|
eabd2fc936 | ||
|
|
6720906ee5 | ||
|
|
abb96802cb | ||
|
|
29fa05ae05 | ||
|
|
7b43ba809b | ||
|
|
49387f9494 | ||
|
|
953482de53 | ||
|
|
8cf3a7aeda | ||
|
|
175f65aabc | ||
|
|
b8c5cf1142 | ||
|
|
236f121c4e | ||
|
|
94a392144b | ||
|
|
2467678bd4 | ||
|
|
e87c1abd4e | ||
|
|
dffc34559b | ||
|
|
80f2a9a6bf | ||
|
|
4aa6ecb122 | ||
|
|
ccfcdf7f48 | ||
|
|
4eb23a38b1 | ||
|
|
cb38213444 | ||
|
|
842b6111db | ||
|
|
ea54525a33 | ||
|
|
893cc025f9 | ||
|
|
b81c8d2e1b | ||
|
|
4b07535e86 | ||
|
|
0d2fe320a7 | ||
|
|
f294113d01 | ||
|
|
e9063c639a | ||
|
|
8d6dedc15b | ||
|
|
1bc4c1a8ac | ||
|
|
1c7af1a72e | ||
|
|
e61f424ade | ||
|
|
6d3aae32bc | ||
|
|
fa1cfd94d0 | ||
|
|
0155b6f841 | ||
|
|
f6953624dd | ||
|
|
1a5f524ae4 | ||
|
|
a4cd4cc253 | ||
|
|
1a35c440e8 | ||
|
|
2c6c84ac61 | ||
|
|
bd666daf82 | ||
|
|
ca3831c4f5 | ||
|
|
bbe0d34f43 | ||
|
|
dd364c962f | ||
|
|
50068b0b0f | ||
|
|
c49453c519 | ||
|
|
52c7804f32 | ||
|
|
19b4f3bfb4 | ||
|
|
f3ac421266 | ||
|
|
7533a61203 | ||
|
|
6355a7019b | ||
|
|
490464e170 | ||
|
|
467d338fe4 | ||
|
|
6130578d18 | ||
|
|
4389bb037d | ||
|
|
2eb8a9ef56 | ||
|
|
393798098c | ||
|
|
668eb7c398 | ||
|
|
0937f85534 | ||
|
|
cf64376dca | ||
|
|
5a98af622d | ||
|
|
4f5d0b46f7 | ||
|
|
492e92d65d | ||
|
|
181939c841 | ||
|
|
b678b4e048 | ||
|
|
1934b3a5b6 | ||
|
|
cc1d122352 | ||
|
|
a4f0b85462 | ||
|
|
7b4e1e9055 | ||
|
|
4c3cd4c9e3 | ||
|
|
46e913dc23 | ||
|
|
8f580b13df | ||
|
|
a551686d37 | ||
|
|
432c78079b | ||
|
|
f5f0e46016 | ||
|
|
b6f32b6e45 | ||
|
|
66f480519b | ||
|
|
8044522691 | ||
|
|
c6a67bd203 | ||
|
|
c6881b6d02 | ||
|
|
4489bec6ef | ||
|
|
3d71e68696 | ||
|
|
783d2da4a8 | ||
|
|
6be4b49999 | ||
|
|
68185dd93c | ||
|
|
d01ea9d6fb | ||
|
|
d91e6023eb | ||
|
|
17ed01a0c9 | ||
|
|
4b6c2d0d3d | ||
|
|
46c07bb207 | ||
|
|
eaa805b9f0 | ||
|
|
436fd0b88e | ||
|
|
f706d3c393 | ||
|
|
c58eb1d47a | ||
|
|
b4f9a55e6e | ||
|
|
1d17891286 | ||
|
|
6a3f3f5577 | ||
|
|
29913dd1e4 | ||
|
|
690837dbe5 | ||
|
|
82e80a479a | ||
|
|
bc508aee7b | ||
|
|
95247f7740 | ||
|
|
e5731229c7 | ||
|
|
52c74ad866 | ||
|
|
a28f890e83 | ||
|
|
96afcd90e3 | ||
|
|
ea61f8f597 | ||
|
|
2e44473ce4 | ||
|
|
26d6969384 | ||
|
|
2dbde224d9 | ||
|
|
8d7ed9f8bf | ||
|
|
1250850492 | ||
|
|
ebfef15fb0 | ||
|
|
ad418abf91 | ||
|
|
c7d51a26f6 | ||
|
|
2c01cc000e | ||
|
|
175769b53e | ||
|
|
22f193f042 | ||
|
|
55da600433 | ||
|
|
96b5c2ae00 | ||
|
|
b24a9e57fd | ||
|
|
07722dc33d | ||
|
|
f39f816a98 | ||
|
|
bc5b95be8a | ||
|
|
845f31b98f | ||
|
|
89b6c32cee | ||
|
|
6ee36fe361 | ||
|
|
77572d0aee | ||
|
|
37b79735bf | ||
|
|
9d9ad6de88 | ||
|
|
20b925abec | ||
|
|
7d00722bbf | ||
|
|
4ea600d34e | ||
|
|
b61d7c2428 | ||
|
|
bcc5d7c00f | ||
|
|
4a36cca703 | ||
|
|
f83ae630c1 | ||
|
|
5894f35364 | ||
|
|
c5acbf7e22 | ||
|
|
567aaf87c6 | ||
|
|
e660d683a0 | ||
|
|
685306c386 | ||
|
|
5e04274d84 | ||
|
|
3357fded14 | ||
|
|
618fc54ac2 | ||
|
|
339e058b64 | ||
|
|
102027a343 | ||
|
|
0d1df6bec3 | ||
|
|
925f60d9c3 | ||
|
|
8b3f5fda07 | ||
|
|
ac17b2c584 | ||
|
|
c67c861dc6 | ||
|
|
09ba9e6259 | ||
|
|
7775166477 | ||
|
|
0e167f5c24 | ||
|
|
a310a32371 | ||
|
|
c00e26be81 | ||
|
|
ce1a5cd2ce | ||
|
|
5c8a28d717 | ||
|
|
59c5d984af | ||
|
|
c885903ff2 | ||
|
|
e4403ca396 | ||
|
|
04912ea888 | ||
|
|
103238066d | ||
|
|
7e4f08c033 | ||
|
|
d47d82d8e1 | ||
|
|
9b9b44dd65 | ||
|
|
dc5627a2ef | ||
|
|
c1dfae1a6e | ||
|
|
7b5e4ab426 | ||
|
|
26a44068d8 | ||
|
|
602b12dcf5 | ||
|
|
a91a836224 | ||
|
|
969d7c802d | ||
|
|
4e196d408a | ||
|
|
8450ab8dab | ||
|
|
168889d999 | ||
|
|
e1339628d9 | ||
|
|
1ee190e844 | ||
|
|
aadcfed17d | ||
|
|
8f99f6eb66 | ||
|
|
a51b948f45 | ||
|
|
425f61cf34 | ||
|
|
87cc2d2313 | ||
|
|
0e2132ad3e | ||
|
|
7d9df5abc6 | ||
|
|
118cba4d9b | ||
|
|
3b2adc9a3e | ||
|
|
009b5bc72b | ||
|
|
9b541a28e6 | ||
|
|
3533429563 | ||
|
|
500230af51 | ||
|
|
4a2cbc1715 | ||
|
|
61f8fdd9e8 | ||
|
|
cfdca9f702 | ||
|
|
cbe24d0c61 | ||
|
|
50f0da6793 | ||
|
|
0b7ab0a095 | ||
|
|
3cacb48f3c | ||
|
|
f6a58151cb | ||
|
|
3404393974 | ||
|
|
6965812d79 | ||
|
|
78fb7fe9f9 | ||
|
|
24bcf6a088 | ||
|
|
25d0a363a8 | ||
|
|
d7c8075862 | ||
|
|
041b97dd25 | ||
|
|
9b85a6fb7c | ||
|
|
f407ff8861 | ||
|
|
a413b83c01 | ||
|
|
81f4de965f | ||
|
|
030b1f3467 | ||
|
|
b7a180114e | ||
|
|
4c9a26dbca | ||
|
|
e611828249 | ||
|
|
e80a9b0075 | ||
|
|
9370f9cae4 | ||
|
|
604f2c9161 | ||
|
|
4d9ca822a7 | ||
|
|
d1f3d95c96 | ||
|
|
efa0a06947 | ||
|
|
11eb241c8f | ||
|
|
ebef239a06 | ||
|
|
3d5507451b | ||
|
|
98a13204b2 | ||
|
|
c318fdc94b | ||
|
|
d0229b62da | ||
|
|
37ad20a71b | ||
|
|
fcd6ebb06e | ||
|
|
dc9c86e3a1 | ||
|
|
6bc6ae2d28 | ||
|
|
f8bedc55e5 | ||
|
|
f376c79f7f | ||
|
|
a98824b4cf | ||
|
|
860fbe48dd | ||
|
|
9d06132743 | ||
|
|
51eea3f90b | ||
|
|
27c70bdf07 | ||
|
|
f8abb8e541 | ||
|
|
c8346d0581 | ||
|
|
7aaea6d005 | ||
|
|
e3911bacde | ||
|
|
962eaa8a4b | ||
|
|
bfba18fdcb | ||
|
|
175669c61e | ||
|
|
3599b98dca | ||
|
|
d1c3be3251 | ||
|
|
b9f83c7780 | ||
|
|
cbf73ef29e | ||
|
|
db6d3b495b | ||
|
|
6ea8e2525a | ||
|
|
29296ec998 | ||
|
|
bdd265a1b1 | ||
|
|
2c9df7aad1 | ||
|
|
1fca248d4c | ||
|
|
99081ea2a0 | ||
|
|
1f62247c7e | ||
|
|
6415d1a6a5 | ||
|
|
926b08c197 | ||
|
|
aff41d0b08 | ||
|
|
5d9c968614 | ||
|
|
c020cf05e1 | ||
|
|
09e8d85b1e | ||
|
|
4d3eb134a2 | ||
|
|
b92df85893 | ||
|
|
545025ed2b | ||
|
|
3158962506 | ||
|
|
c314f74de6 | ||
|
|
65615385e7 | ||
|
|
727f35b35b | ||
|
|
07ddf7e87b |
2
.gitattributes
vendored
@@ -5,5 +5,5 @@
|
||||
vendor/** -text=auto
|
||||
|
||||
# Diffs on these files are meaningless
|
||||
gui.files.go -diff
|
||||
*.svg -diff
|
||||
*.pb.go -diff
|
||||
|
||||
7
.gitignore
vendored
@@ -1,7 +1,7 @@
|
||||
syncthing
|
||||
!gui/syncthing
|
||||
!Godeps/_workspace/src/github.com/syncthing
|
||||
/syncthing
|
||||
/stdiscosrv
|
||||
syncthing.exe
|
||||
stdiscosrv.exe
|
||||
*.tar.gz
|
||||
*.zip
|
||||
*.asc
|
||||
@@ -14,3 +14,4 @@ coverage.xml
|
||||
syncthing.sig
|
||||
RELEASE
|
||||
deb
|
||||
lib/auto/gui.files.go
|
||||
|
||||
184
AUTHORS
@@ -1,89 +1,99 @@
|
||||
# This is the official list of Syncthing authors for copyright purposes.
|
||||
# The format is:
|
||||
#
|
||||
# Name Name Name (nickname) <email1@example.com> <email2@example.com>
|
||||
#
|
||||
# The NICKS list is auto generated from this file.
|
||||
|
||||
Aaron Bieber <qbit@deftly.net>
|
||||
Adam Piggott <aD@simplypeachy.co.uk> <simplypeachy@users.noreply.github.com>
|
||||
Alessandro G. <alessandro.g89@gmail.com>
|
||||
Alexander Graf <register-github@alex-graf.de>
|
||||
Anderson Mesquita <andersonvom@gmail.com>
|
||||
Andrew Dunham <andrew@du.nham.ca>
|
||||
Antony Male <antony.male@gmail.com>
|
||||
Arthur Axel fREW Schmidt <frew@afoolishmanifesto.com> <frioux@gmail.com>
|
||||
Audrius Butkevicius <audrius.butkevicius@gmail.com>
|
||||
Bart De Vries <devriesb@gmail.com>
|
||||
Ben Curthoys <ben@bencurthoys.com>
|
||||
Ben Schulz <ueomkail@gmail.com> <uok@users.noreply.github.com>
|
||||
Ben Sidhom <bsidhom@gmail.com>
|
||||
Benny Ng <benny.tpng@gmail.com>
|
||||
Brandon Philips <brandon@ifup.org>
|
||||
Brendan Long <self@brendanlong.com>
|
||||
Brian R. Becker <brbecker@gmail.com>
|
||||
Caleb Callaway <enlightened.despot@gmail.com>
|
||||
Carsten Hagemann <moter8@gmail.com>
|
||||
Cathryne Linenweaver <cathryne.linenweaver@gmail.com> <Cathryne@users.noreply.github.com>
|
||||
Chris Howie <me@chrishowie.com>
|
||||
Chris Joel <chris@scriptolo.gy>
|
||||
Colin Kennedy <moshen.colin@gmail.com>
|
||||
Daniel Bergmann <dan.arne.bergmann@gmail.com> <brgmnn@users.noreply.github.com>
|
||||
Daniel Harte <daniel@harte.me> <daniel@danielharte.co.uk> <norgeous@users.noreply.github.com>
|
||||
Daniel Martí <mvdan@mvdan.cc>
|
||||
David Rimmer <dinosore@dbrsoftware.co.uk>
|
||||
Denis A. <denisva@gmail.com>
|
||||
Dennis Wilson <dw@risu.io>
|
||||
Dominik Heidler <dominik@heidler.eu>
|
||||
Elias Jarlebring <jarlebring@gmail.com>
|
||||
Emil Hessman <emil@hessman.se>
|
||||
Erik Meitner <e.meitner@willystreet.coop>
|
||||
Federico Castagnini <federico.castagnini@gmail.com>
|
||||
Felix Ableitner <me@nutomic.com>
|
||||
Felix Unterpaintner <bigbear2nd@gmail.com>
|
||||
Francois-Xavier Gsell <fxgsell@gmail.com>
|
||||
Frank Isemann <frank@isemann.name>
|
||||
Gilli Sigurdsson <gilli@vx.is>
|
||||
Jaakko Hannikainen <jgke@jgke.fi>
|
||||
Jacek Szafarkiewicz <szafar@linux.pl>
|
||||
Jake Peterson <jake@acogdev.com>
|
||||
Jakob Borg <jakob@nym.se>
|
||||
James Patterson <jamespatterson@operamail.com> <jpjp@users.noreply.github.com>
|
||||
Jaroslav Malec <dzardacz@gmail.com>
|
||||
Jens Diemer <github.com@jensdiemer.de> <git@jensdiemer.de>
|
||||
Jochen Voss <voss@seehuhn.de>
|
||||
Johan Vromans <jvromans@squirrel.nl>
|
||||
Karol Różycki <rozycki.karol@gmail.com>
|
||||
Kelong Cong <kc04bc@gmx.com> <kc1212@users.noreply.github.com>
|
||||
Ken'ichi Kamada <kamada@nanohz.org>
|
||||
Kevin Allen <kma1660@gmail.com>
|
||||
Lars K.W. Gohlke <lkwg82@gmx.de>
|
||||
Laurent Etiemble <laurent.etiemble@gmail.com> <laurent.etiemble@monobjc.net>
|
||||
Lode Hoste <zillode@zillode.be>
|
||||
Lord Landon Agahnim <lordlandon@gmail.com>
|
||||
Marc Laporte <marc@marclaporte.com> <marc@laporte.name>
|
||||
Marc Pujol <kilburn@la3.org>
|
||||
Marcin Dziadus <dziadus.marcin@gmail.com>
|
||||
Mateusz Naściszewski <matin1111@wp.pl>
|
||||
Matt Burke <mburke@amplify.com> <burkemw3@gmail.com>
|
||||
Max Schulze <max.schulze@online.de> <kralo@users.noreply.github.com>
|
||||
Michael Jephcote <rewt0r@gmx.com> <Rewt0r@users.noreply.github.com>
|
||||
Michael Ploujnikov <ploujj@gmail.com>
|
||||
Michael Tilli <pyfisch@gmail.com>
|
||||
Nate Morrison <natemorrison@gmail.com>
|
||||
Pascal Jungblut <github@pascalj.com> <mail@pascal-jungblut.com>
|
||||
Peter Hoeg <peter@speartail.com>
|
||||
Philippe Schommers <philippe@schommers.be>
|
||||
Phill Luby <phill.luby@newredo.com>
|
||||
Piotr Bejda <piotrb10@gmail.com>
|
||||
Ryan Sullivan <kayoticsully@gmail.com>
|
||||
Scott Klupfel <kluppy@going2blue.com>
|
||||
Sergey Mishin <ralder@yandex.ru>
|
||||
Stefan Kuntz <stefan.github@gmail.com> <Stefan.github@gmail.com>
|
||||
Stefan Tatschner <stefan@sevenbyte.org> <rumpelsepp@sevenbyte.org>
|
||||
Tim Abell <tim@timwise.co.uk>
|
||||
Tobias Nygren <tnn@nygren.pp.se>
|
||||
Tomas Cerveny <kozec@kozec.com>
|
||||
Tully Robinson <tully@tojr.org>
|
||||
Tyler Brazier <tyler@tylerbrazier.com>
|
||||
Veeti Paananen <veeti.paananen@rojekti.fi>
|
||||
Victor Buinsky <vix_booja@tut.by>
|
||||
Vil Brekin <vilbrekin@gmail.com>
|
||||
William A. Kennington III <william@wkennington.com>
|
||||
Wulf Weich <wweich@users.noreply.github.com> <wweich@gmx.de>
|
||||
Yannic A. <eipiminusone+github@gmail.com> <eipiminus1@users.noreply.github.com>
|
||||
Aaron Bieber (qbit) <qbit@deftly.net>
|
||||
Adam Piggott (simplypeachy) <aD@simplypeachy.co.uk> <simplypeachy@users.noreply.github.com>
|
||||
Alessandro G. (alessandro.g89) <alessandro.g89@gmail.com>
|
||||
Alexander Graf (alex2108) <register-github@alex-graf.de>
|
||||
Alexandre Viau (aviau) <alexandre@alexandreviau.net> <aviau@debian.org>
|
||||
Anderson Mesquita (andersonvom) <andersonvom@gmail.com>
|
||||
Andrew Dunham (andrew-d) <andrew@du.nham.ca>
|
||||
Andrey D (scienmind) <scintertech@cryptolab.net>
|
||||
Antoine Lamielle (0x010C) <antoine.lamielle@0x010c.fr> <gh@0x010c.fr>
|
||||
Antony Male (canton7) <antony.male@gmail.com>
|
||||
Arthur Axel fREW Schmidt (frioux) <frew@afoolishmanifesto.com> <frioux@gmail.com>
|
||||
Audrius Butkevicius (AudriusButkevicius) <audrius.butkevicius@gmail.com>
|
||||
Bart De Vries (mogwa1) <devriesb@gmail.com>
|
||||
Ben Curthoys (bencurthoys) <ben@bencurthoys.com>
|
||||
Ben Schulz (uok) <ueomkail@gmail.com> <uok@users.noreply.github.com>
|
||||
Ben Sidhom (bsidhom) <bsidhom@gmail.com>
|
||||
Benny Ng (tpng) <benny.tpng@gmail.com>
|
||||
Brandon Philips (philips) <brandon@ifup.org>
|
||||
Brendan Long (brendanlong) <self@brendanlong.com>
|
||||
Brian R. Becker (brbecker) <brbecker@gmail.com>
|
||||
Caleb Callaway (cqcallaw) <enlightened.despot@gmail.com>
|
||||
Carsten Hagemann (Moter8) <moter8@gmail.com>
|
||||
Cathryne Linenweaver (Cathryne) <cathryne.linenweaver@gmail.com> <Cathryne@users.noreply.github.com>
|
||||
Cedric Staniewski (xduugu) <cedric@gmx.ca>
|
||||
Chris Howie (cdhowie) <me@chrishowie.com>
|
||||
Chris Joel (cdata) <chris@scriptolo.gy>
|
||||
Colin Kennedy (moshen) <moshen.colin@gmail.com>
|
||||
Daniel Bergmann (brgmnn) <dan.arne.bergmann@gmail.com> <brgmnn@users.noreply.github.com>
|
||||
Daniel Harte (norgeous) <daniel@harte.me> <daniel@danielharte.co.uk> <norgeous@users.noreply.github.com>
|
||||
Daniel Martí (mvdan) <mvdan@mvdan.cc>
|
||||
David Rimmer (dinosore) <dinosore@dbrsoftware.co.uk>
|
||||
Denis A. (dva) <denisva@gmail.com>
|
||||
Dennis Wilson (snnd) <dw@risu.io>
|
||||
Dominik Heidler (asdil12) <dominik@heidler.eu>
|
||||
Elias Jarlebring (jarlebring) <jarlebring@gmail.com>
|
||||
Emil Hessman (ceh) <emil@hessman.se>
|
||||
Erik Meitner (WSGCSysadmin) <e.meitner@willystreet.coop>
|
||||
Federico Castagnini (facastagnini) <federico.castagnini@gmail.com>
|
||||
Felix Ableitner (Nutomic) <me@nutomic.com>
|
||||
Felix Unterpaintner (bigbear2nd) <bigbear2nd@gmail.com>
|
||||
Francois-Xavier Gsell (zukoo) <fxgsell@gmail.com>
|
||||
Frank Isemann (fti7) <frank@isemann.name>
|
||||
Gilli Sigurdsson (gillisig) <gilli@vx.is>
|
||||
Jaakko Hannikainen (jgke) <jgke@jgke.fi>
|
||||
Jacek Szafarkiewicz (hadogenes) <szafar@linux.pl>
|
||||
Jake Peterson (acogdev) <jake@acogdev.com>
|
||||
Jakob Borg (calmh) <jakob@nym.se> <jakob@kastelo.net>
|
||||
James Patterson (jpjp) <jamespatterson@operamail.com> <jpjp@users.noreply.github.com>
|
||||
Jaroslav Malec (dzarda) <dzardacz@gmail.com>
|
||||
Jens Diemer (jedie) <github.com@jensdiemer.de> <git@jensdiemer.de>
|
||||
Jochen Voss (seehuhn) <voss@seehuhn.de>
|
||||
Johan Vromans (sciurius) <jvromans@squirrel.nl>
|
||||
Karol Różycki (krozycki) <rozycki.karol@gmail.com>
|
||||
Kelong Cong (kc1212) <kc04bc@gmx.com> <kc1212@users.noreply.github.com>
|
||||
Ken'ichi Kamada (kamadak) <kamada@nanohz.org>
|
||||
Kevin Allen (ironmig) <kma1660@gmail.com>
|
||||
Lars K.W. Gohlke (lkwg82) <lkwg82@gmx.de>
|
||||
Laurent Etiemble (letiemble) <laurent.etiemble@gmail.com> <laurent.etiemble@monobjc.net>
|
||||
Lode Hoste (Zillode) <zillode@zillode.be>
|
||||
Lord Landon Agahnim (LordLandon) <lordlandon@gmail.com>
|
||||
Majed Abdulaziz (majedev) <majed.alhajry@gmail.com>
|
||||
Marc Laporte (marclaporte) <marc@marclaporte.com> <marc@laporte.name>
|
||||
Marc Pujol (kilburn) <kilburn@la3.org>
|
||||
Marcin Dziadus (marcindziadus) <dziadus.marcin@gmail.com>
|
||||
Mateusz Naściszewski (mateon1) <matin1111@wp.pl>
|
||||
Matt Burke (burkemw3) <mburke@amplify.com> <burkemw3@gmail.com>
|
||||
Max Schulze (kralo) <max.schulze@online.de> <kralo@users.noreply.github.com>
|
||||
Michael Jephcote (Rewt0r) <rewt0r@gmx.com> <Rewt0r@users.noreply.github.com>
|
||||
Michael Ploujnikov (plouj) <ploujj@gmail.com>
|
||||
Michael Tilli (pyfisch) <pyfisch@gmail.com>
|
||||
Nate Morrison (nrm21) <natemorrison@gmail.com>
|
||||
Pascal Jungblut (pascalj) <github@pascalj.com> <mail@pascal-jungblut.com>
|
||||
Peter Hoeg (peterhoeg) <peter@speartail.com>
|
||||
Philippe Schommers (filoozoom) <philippe@schommers.be>
|
||||
Phill Luby (pluby) <phill.luby@newredo.com>
|
||||
Piotr Bejda (piobpl) <piotrb10@gmail.com>
|
||||
Ryan Sullivan (KayoticSully) <kayoticsully@gmail.com>
|
||||
Scott Klupfel (kluppy) <kluppy@going2blue.com>
|
||||
Sergey Mishin (ralder) <ralder@yandex.ru>
|
||||
Stefan Kuntz (Stefan-Code) <stefan.github@gmail.com> <Stefan.github@gmail.com>
|
||||
Stefan Tatschner (rumpelsepp) <stefan@sevenbyte.org> <rumpelsepp@sevenbyte.org>
|
||||
Tim Abell (timabell) <tim@timwise.co.uk>
|
||||
Tobias Nygren (tnn2) <tnn@nygren.pp.se>
|
||||
Tomas Cerveny (kozec) <kozec@kozec.com>
|
||||
Tully Robinson (tojrobinson) <tully@tojr.org>
|
||||
Tyler Brazier (tylerbrazier) <tyler@tylerbrazier.com>
|
||||
Veeti Paananen (veeti) <veeti.paananen@rojekti.fi>
|
||||
Victor Buinsky (buinsky) <vix_booja@tut.by>
|
||||
Vil Brekin (Vilbrekin) <vilbrekin@gmail.com>
|
||||
William A. Kennington III (wkennington) <william@wkennington.com>
|
||||
Wulf Weich (wweich) <wweich@users.noreply.github.com> <wweich@gmx.de>
|
||||
Yannic A. (eipiminus1) <eipiminusone+github@gmail.com> <eipiminus1@users.noreply.github.com>
|
||||
|
||||
@@ -44,9 +44,20 @@ repository](https://github.com/syncthing/docs).
|
||||
|
||||
## Licensing
|
||||
|
||||
All contributions are made under the same MPLv2 license as the rest of
|
||||
the project, except documentation, user interface text and translation
|
||||
strings which are licensed under the Creative Commons Attribution 4.0
|
||||
International License. You retain the copyright to code you have
|
||||
written.
|
||||
All contributions are made available under the same license as the already
|
||||
existing material being contributed to. For most of the project and unless
|
||||
otherwise stated this means MPLv2, but there are exceptions:
|
||||
|
||||
- Certain commands (under cmd/...) may have a separate license, indicated by
|
||||
the presence of a LICENSE file in the corresponding directory.
|
||||
|
||||
- The documentation (man/...) is licensed under the Creative Commons
|
||||
Attribution 4.0 International License.
|
||||
|
||||
- Projects under vendor/... are copyright by and licensed from their
|
||||
respective original authors. Contributions should be made to the original
|
||||
project, not here.
|
||||
|
||||
Regardless of the license in effect, you retain the copyright to your
|
||||
contribution.
|
||||
|
||||
|
||||
@@ -1,4 +1,13 @@
|
||||
If your issue is a bug report, replace this paragraph with a description
|
||||
Do not report security issues in this bug tracker. Instead, contact
|
||||
security@syncthing.net directly - see https://syncthing.net/security.html
|
||||
for more information.
|
||||
|
||||
If your issue is a support request ("How do I get my devices to connect?"
|
||||
or similar), please use the support forum at https://forum.syncthing.net/
|
||||
where a large number of helpful people hang out. This issue tracker is for
|
||||
reporting bugs or feature requests directly to the developers.
|
||||
|
||||
If your issue is a bug report, replace this boilerplate with a description
|
||||
of the problem, being sure to include at least:
|
||||
|
||||
- what happened,
|
||||
|
||||
200
NICKS
@@ -1,88 +1,118 @@
|
||||
# This file maps email addresses used in commits to nicks used the changelog.
|
||||
# It is auto generated from the AUTHORS file by script/authors.go.
|
||||
|
||||
acogdev <jake@acogdev.com>
|
||||
alex2108 <register-github@alex-graf.de>
|
||||
alessandro.g89 <alessandro.g89@gmail.com>
|
||||
andersonvom <andersonvom@gmail.com>
|
||||
andrew-d <andrew@du.nham.ca>
|
||||
asdil12 <dominik@heidler.eu>
|
||||
AudriusButkevicius <audrius.butkevicius@gmail.com>
|
||||
bencurthoys <ben@bencurthoys.com>
|
||||
bigbear2nd <bigbear2nd@gmail.com>
|
||||
brbecker <brbecker@gmail.com>
|
||||
brendanlong <self@brendanlong.com>
|
||||
brgmnn <dan.arne.bergmann@gmail.com> <brgmnn@users.noreply.github.com>
|
||||
bsidhom <bsidhom@gmail.com>
|
||||
buinsky <vix_booja@tut.by>
|
||||
burkemw3 <mburke@amplify.com> <burkemw3@gmail.com>
|
||||
calmh <jakob@nym.se>
|
||||
canton7 <antony.male@gmail.com>
|
||||
Cathryne <cathryne.linenweaver@gmail.com> <Cathryne@users.noreply.github.com>
|
||||
cdata <chris@scriptolo.gy>
|
||||
cdhowie <me@chrishowie.com>
|
||||
ceh <emil@hessman.se>
|
||||
cqcallaw <enlightened.despot@gmail.com>
|
||||
dinosore <dinosore@dbrsoftware.co.uk>
|
||||
dva <denisva@gmail.com>
|
||||
dzarda <dzardacz@gmail.com>
|
||||
eipiminus1 <eipiminusone+github@gmail.com> <eipiminus1@users.noreply.github.com>
|
||||
facastagnini <federico.castagnini@gmail.com>
|
||||
filoozoom <philippe@schommers.be>
|
||||
frioux <frew@afoolishmanifesto.com> <frioux@gmail.com>
|
||||
fti7 <frank@isemann.name>
|
||||
gillisig <gilli@vx.is>
|
||||
hadogenes <szafar@linux.pl>
|
||||
ironmig <kma1660@gmail.com>
|
||||
jarlebring <jarlebring@gmail.com>
|
||||
jedie <github.com@jensdiemer.de> <git@jensdiemer.de>
|
||||
jgke <jgke@jgke.fi>
|
||||
jpjp <jamespatterson@operamail.com> <jpjp@users.noreply.github.com>
|
||||
kamadak <kamada@nanohz.org>
|
||||
KayoticSully <kayoticsully@gmail.com>
|
||||
kilburn <kilburn@la3.org>
|
||||
kluppy <kluppy@going2blue.com>
|
||||
kozec <kozec@kozec.com>
|
||||
kralo <max.schulze@online.de>
|
||||
krozycki <rozycki.karol@gmail.com>
|
||||
letiemble <laurent.etiemble@gmail.com> <laurent.etiemble@monobjc.net>
|
||||
LordLandon <lordlandon@gmail.com>
|
||||
lkwg82 <lkwg82@gmx.de>
|
||||
marcindziadus <dziadus.marcin@gmail.com>
|
||||
0x010C <antoine.lamielle@0x010c.fr>
|
||||
0x010C <gh@0x010c.fr>
|
||||
acogdev <jake@acogdev.com>
|
||||
alessandro.g89 <alessandro.g89@gmail.com>
|
||||
alex2108 <register-github@alex-graf.de>
|
||||
andersonvom <andersonvom@gmail.com>
|
||||
andrew-d <andrew@du.nham.ca>
|
||||
asdil12 <dominik@heidler.eu>
|
||||
AudriusButkevicius <audrius.butkevicius@gmail.com>
|
||||
aviau <alexandre@alexandreviau.net>
|
||||
aviau <aviau@debian.org>
|
||||
bencurthoys <ben@bencurthoys.com>
|
||||
bigbear2nd <bigbear2nd@gmail.com>
|
||||
brbecker <brbecker@gmail.com>
|
||||
brendanlong <self@brendanlong.com>
|
||||
brgmnn <dan.arne.bergmann@gmail.com>
|
||||
brgmnn <brgmnn@users.noreply.github.com>
|
||||
bsidhom <bsidhom@gmail.com>
|
||||
buinsky <vix_booja@tut.by>
|
||||
burkemw3 <mburke@amplify.com>
|
||||
burkemw3 <burkemw3@gmail.com>
|
||||
calmh <jakob@nym.se>
|
||||
calmh <jakob@kastelo.net>
|
||||
canton7 <antony.male@gmail.com>
|
||||
Cathryne <cathryne.linenweaver@gmail.com>
|
||||
Cathryne <Cathryne@users.noreply.github.com>
|
||||
cdata <chris@scriptolo.gy>
|
||||
cdhowie <me@chrishowie.com>
|
||||
ceh <emil@hessman.se>
|
||||
cqcallaw <enlightened.despot@gmail.com>
|
||||
dinosore <dinosore@dbrsoftware.co.uk>
|
||||
dva <denisva@gmail.com>
|
||||
dzarda <dzardacz@gmail.com>
|
||||
eipiminus1 <eipiminusone+github@gmail.com>
|
||||
eipiminus1 <eipiminus1@users.noreply.github.com>
|
||||
facastagnini <federico.castagnini@gmail.com>
|
||||
filoozoom <philippe@schommers.be>
|
||||
frioux <frew@afoolishmanifesto.com>
|
||||
frioux <frioux@gmail.com>
|
||||
fti7 <frank@isemann.name>
|
||||
gillisig <gilli@vx.is>
|
||||
hadogenes <szafar@linux.pl>
|
||||
ironmig <kma1660@gmail.com>
|
||||
jarlebring <jarlebring@gmail.com>
|
||||
jedie <github.com@jensdiemer.de>
|
||||
jedie <git@jensdiemer.de>
|
||||
jgke <jgke@jgke.fi>
|
||||
jpjp <jamespatterson@operamail.com>
|
||||
jpjp <jpjp@users.noreply.github.com>
|
||||
kamadak <kamada@nanohz.org>
|
||||
KayoticSully <kayoticsully@gmail.com>
|
||||
kc1212 <kc04bc@gmx.com>
|
||||
kc1212 <kc1212@users.noreply.github.com>
|
||||
kilburn <kilburn@la3.org>
|
||||
kluppy <kluppy@going2blue.com>
|
||||
kozec <kozec@kozec.com>
|
||||
kralo <max.schulze@online.de>
|
||||
kralo <kralo@users.noreply.github.com>
|
||||
krozycki <rozycki.karol@gmail.com>
|
||||
letiemble <laurent.etiemble@gmail.com>
|
||||
letiemble <laurent.etiemble@monobjc.net>
|
||||
lkwg82 <lkwg82@gmx.de>
|
||||
LordLandon <lordlandon@gmail.com>
|
||||
majedev <majed.alhajry@gmail.com>
|
||||
marcindziadus <dziadus.marcin@gmail.com>
|
||||
marclaporte <marc@marclaporte.com>
|
||||
mateon1 <matin1111@wp.pl>
|
||||
mogwa1 <devriesb@gmail.com>
|
||||
moshen <moshen.colin@gmail.com>
|
||||
Moter8 <moter8@gmail.com>
|
||||
mvdan <mvdan@mvdan.cc>
|
||||
norgeous <daniel@harte.me> <daniel@danielharte.co.uk> <norgeous@users.noreply.github.com>
|
||||
nrm21 <natemorrison@gmail.com>
|
||||
Nutomic <me@nutomic.com>
|
||||
pascalj <github@pascalj.com> <mail@pascal-jungblut.com>
|
||||
peterhoeg <peter@speartail.com>
|
||||
philips <brandon@ifup.org>
|
||||
piobpl <piotrb10@gmail.com>
|
||||
plouj <ploujj@gmail.com>
|
||||
pluby <phill.luby@newredo.com>
|
||||
pyfisch <pyfisch@gmail.com>
|
||||
qbit <qbit@deftly.net>
|
||||
ralder <ralder@yandex.ru>
|
||||
Rewt0r <rewt0r@gmx.com> <Rewt0r@users.noreply.github.com>
|
||||
rumpelsepp <stefan@sevenbyte.org> <rumpelsepp@sevenbyte.org>
|
||||
sciurius <jvromans@squirrel.nl>
|
||||
seehuhn <voss@seehuhn.de>
|
||||
simplypeachy <aD@simplypeachy.co.uk> <simplypeachy@users.noreply.github.com>
|
||||
snnd <dw@risu.io>
|
||||
Stefan-Code <stefan.github@gmail.com> <Stefan.github@gmail.com>
|
||||
timabell <tim@timwise.co.uk>
|
||||
tnn2 <tnn@nygren.pp.se>
|
||||
tojrobinson <tully@tojr.org>
|
||||
tpng <benny.tpng@gmail.com>
|
||||
tylerbrazier <tyler@tylerbrazier.com>
|
||||
uok <ueomkail@gmail.com> <uok@users.noreply.github.com>
|
||||
veeti <veeti.paananen@rojekti.fi>
|
||||
Vilbrekin <vilbrekin@gmail.com>
|
||||
wkennington <william@wkennington.com>
|
||||
wsgcsysadmin <e.meitner@willystreet.coo>
|
||||
wweich <wweich@users.noreply.github.com> <wweich@gmx.de>
|
||||
Zillode <zillode@zillode.be>
|
||||
zukoo <fxgsell@gmail.com>
|
||||
marclaporte <marc@laporte.name>
|
||||
mateon1 <matin1111@wp.pl>
|
||||
mogwa1 <devriesb@gmail.com>
|
||||
moshen <moshen.colin@gmail.com>
|
||||
Moter8 <moter8@gmail.com>
|
||||
mvdan <mvdan@mvdan.cc>
|
||||
norgeous <daniel@harte.me>
|
||||
norgeous <daniel@danielharte.co.uk>
|
||||
norgeous <norgeous@users.noreply.github.com>
|
||||
nrm21 <natemorrison@gmail.com>
|
||||
Nutomic <me@nutomic.com>
|
||||
pascalj <github@pascalj.com>
|
||||
pascalj <mail@pascal-jungblut.com>
|
||||
peterhoeg <peter@speartail.com>
|
||||
philips <brandon@ifup.org>
|
||||
piobpl <piotrb10@gmail.com>
|
||||
plouj <ploujj@gmail.com>
|
||||
pluby <phill.luby@newredo.com>
|
||||
pyfisch <pyfisch@gmail.com>
|
||||
qbit <qbit@deftly.net>
|
||||
ralder <ralder@yandex.ru>
|
||||
Rewt0r <rewt0r@gmx.com>
|
||||
Rewt0r <Rewt0r@users.noreply.github.com>
|
||||
rumpelsepp <stefan@sevenbyte.org>
|
||||
rumpelsepp <rumpelsepp@sevenbyte.org>
|
||||
scienmind <scintertech@cryptolab.net>
|
||||
sciurius <jvromans@squirrel.nl>
|
||||
seehuhn <voss@seehuhn.de>
|
||||
simplypeachy <aD@simplypeachy.co.uk>
|
||||
simplypeachy <simplypeachy@users.noreply.github.com>
|
||||
snnd <dw@risu.io>
|
||||
Stefan-Code <stefan.github@gmail.com>
|
||||
Stefan-Code <Stefan.github@gmail.com>
|
||||
timabell <tim@timwise.co.uk>
|
||||
tnn2 <tnn@nygren.pp.se>
|
||||
tojrobinson <tully@tojr.org>
|
||||
tpng <benny.tpng@gmail.com>
|
||||
tylerbrazier <tyler@tylerbrazier.com>
|
||||
uok <ueomkail@gmail.com>
|
||||
uok <uok@users.noreply.github.com>
|
||||
veeti <veeti.paananen@rojekti.fi>
|
||||
Vilbrekin <vilbrekin@gmail.com>
|
||||
wkennington <william@wkennington.com>
|
||||
WSGCSysadmin <e.meitner@willystreet.coop>
|
||||
wweich <wweich@users.noreply.github.com>
|
||||
wweich <wweich@gmx.de>
|
||||
xduugu <cedric@gmx.ca>
|
||||
Zillode <zillode@zillode.be>
|
||||
zukoo <fxgsell@gmail.com>
|
||||
|
||||
@@ -14,6 +14,12 @@ if new tests are not included.
|
||||
If this is a GUI change, include screenshots of the change. If not, please
|
||||
feel free to just delete this section.
|
||||
|
||||
### Documentation
|
||||
|
||||
If this is a user visible change (including API and protocol changes), add a link here
|
||||
to the corresponding pull request on https://github.com/syncthing/docs or describe
|
||||
the documentation changes necessary.
|
||||
|
||||
### Authorship
|
||||
|
||||
Every author of a code contribution (Go, Javascript, HTML, CSS etc, with the
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# Syncthing
|
||||
|
||||
[](http://build.syncthing.net/job/syncthing/lastBuild/)
|
||||
[](https://ci.appveyor.com/project/calmh/syncthing)
|
||||
[](http://godoc.org/github.com/syncthing/syncthing)
|
||||
[](https://www.mozilla.org/MPL/2.0/)
|
||||
|
||||
@@ -28,6 +27,11 @@ There are a few examples for keeping Syncthing running in the background
|
||||
on your system in [the etc directory][3]. There are also several [GUI
|
||||
implementations][11] for Windows, Mac and Linux.
|
||||
|
||||
## Vote on features/bugs
|
||||
|
||||
We'd like to encourage you to [vote][12] on issues that matter to you.
|
||||
This helps the team understand what are the biggest pain points for our users, and could potentially influence what is being worked on next.
|
||||
|
||||
## Getting in Touch
|
||||
|
||||
The first and best point of contact is the [Forum][8]. There is also an IRC
|
||||
@@ -67,3 +71,4 @@ All code is licensed under the [MPLv2 License][7].
|
||||
[9]: https://kiwiirc.com/client/irc.freenode.net/#syncthing
|
||||
[10]: https://github.com/syncthing/syncthing/issues
|
||||
[11]: http://docs.syncthing.net/users/contrib.html#gui-wrappers
|
||||
[12]: https://www.bountysource.com/teams/syncthing/issues
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
version: '{branch}-{build}'
|
||||
clone_folder: C:\src\github.com\syncthing\syncthing
|
||||
init:
|
||||
- go version
|
||||
environment:
|
||||
GOPATH: C:\
|
||||
build_script:
|
||||
- go run build.go zip
|
||||
test_script:
|
||||
- go run build.go test
|
||||
artifacts:
|
||||
- path: '*.zip'
|
||||
|
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 9.8 KiB |
|
Before Width: | Height: | Size: 23 KiB After Width: | Height: | Size: 20 KiB |
|
Before Width: | Height: | Size: 3.4 KiB After Width: | Height: | Size: 2.2 KiB |
|
Before Width: | Height: | Size: 48 KiB After Width: | Height: | Size: 40 KiB |
|
Before Width: | Height: | Size: 6.4 KiB After Width: | Height: | Size: 4.9 KiB |
|
Before Width: | Height: | Size: 24 KiB After Width: | Height: | Size: 19 KiB |
|
Before Width: | Height: | Size: 47 KiB After Width: | Height: | Size: 38 KiB |
|
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 9.8 KiB |
|
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 8.2 KiB |
BIN
assets/statusicons/default.svg
Normal file
|
After Width: | Height: | Size: 1.4 KiB |
BIN
assets/statusicons/notify.svg
Normal file
|
After Width: | Height: | Size: 1.9 KiB |
BIN
assets/statusicons/pause.svg
Normal file
|
After Width: | Height: | Size: 1.9 KiB |
BIN
assets/statusicons/sync.svg
Normal file
|
After Width: | Height: | Size: 2.1 KiB |
699
build.go
@@ -26,6 +26,7 @@ import (
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"text/template"
|
||||
"time"
|
||||
)
|
||||
@@ -38,124 +39,295 @@ var (
|
||||
version string
|
||||
goVersion float64
|
||||
race bool
|
||||
debug = os.Getenv("BUILDDEBUG") != ""
|
||||
)
|
||||
|
||||
const minGoVersion = 1.3
|
||||
type target struct {
|
||||
name string
|
||||
buildPkg string
|
||||
binaryName string
|
||||
archiveFiles []archiveFile
|
||||
debianFiles []archiveFile
|
||||
tags []string
|
||||
}
|
||||
|
||||
type archiveFile struct {
|
||||
src string
|
||||
dst string
|
||||
perm os.FileMode
|
||||
}
|
||||
|
||||
var targets = map[string]target{
|
||||
"all": {
|
||||
// Only valid for the "build" and "install" commands as it lacks all
|
||||
// the archive creation stuff.
|
||||
buildPkg: "./cmd/...",
|
||||
tags: []string{"purego"},
|
||||
},
|
||||
"syncthing": {
|
||||
// The default target for "build", "install", "tar", "zip", "deb", etc.
|
||||
name: "syncthing",
|
||||
buildPkg: "./cmd/syncthing",
|
||||
binaryName: "syncthing", // .exe will be added automatically for Windows builds
|
||||
archiveFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "{{binary}}", perm: 0755},
|
||||
{src: "README.md", dst: "README.txt", perm: 0644},
|
||||
{src: "LICENSE", dst: "LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0644},
|
||||
// All files from etc/ and extra/ added automatically in init().
|
||||
},
|
||||
debianFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0755},
|
||||
{src: "README.md", dst: "deb/usr/share/doc/syncthing/README.txt", perm: 0644},
|
||||
{src: "LICENSE", dst: "deb/usr/share/doc/syncthing/LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "deb/usr/share/doc/syncthing/AUTHORS.txt", perm: 0644},
|
||||
{src: "man/syncthing.1", dst: "deb/usr/share/man/man1/syncthing.1", perm: 0644},
|
||||
{src: "man/syncthing-config.5", dst: "deb/usr/share/man/man5/syncthing-config.5", perm: 0644},
|
||||
{src: "man/syncthing-stignore.5", dst: "deb/usr/share/man/man5/syncthing-stignore.5", perm: 0644},
|
||||
{src: "man/syncthing-device-ids.7", dst: "deb/usr/share/man/man7/syncthing-device-ids.7", perm: 0644},
|
||||
{src: "man/syncthing-event-api.7", dst: "deb/usr/share/man/man7/syncthing-event-api.7", perm: 0644},
|
||||
{src: "man/syncthing-faq.7", dst: "deb/usr/share/man/man7/syncthing-faq.7", perm: 0644},
|
||||
{src: "man/syncthing-networking.7", dst: "deb/usr/share/man/man7/syncthing-networking.7", perm: 0644},
|
||||
{src: "man/syncthing-rest-api.7", dst: "deb/usr/share/man/man7/syncthing-rest-api.7", perm: 0644},
|
||||
{src: "man/syncthing-security.7", dst: "deb/usr/share/man/man7/syncthing-security.7", perm: 0644},
|
||||
{src: "man/syncthing-versioning.7", dst: "deb/usr/share/man/man7/syncthing-versioning.7", perm: 0644},
|
||||
{src: "etc/linux-systemd/system/syncthing@.service", dst: "deb/lib/systemd/system/syncthing@.service", perm: 0644},
|
||||
{src: "etc/linux-systemd/system/syncthing-resume.service", dst: "deb/lib/systemd/system/syncthing-resume.service", perm: 0644},
|
||||
{src: "etc/linux-systemd/user/syncthing.service", dst: "deb/usr/lib/systemd/user/syncthing.service", perm: 0644},
|
||||
},
|
||||
},
|
||||
"stdiscosrv": {
|
||||
name: "stdiscosrv",
|
||||
buildPkg: "./cmd/stdiscosrv",
|
||||
binaryName: "stdiscosrv", // .exe will be added automatically for Windows builds
|
||||
archiveFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "{{binary}}", perm: 0755},
|
||||
{src: "cmd/stdiscosrv/README.md", dst: "README.txt", perm: 0644},
|
||||
{src: "cmd/stdiscosrv/LICENSE", dst: "LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0644},
|
||||
},
|
||||
debianFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0755},
|
||||
{src: "cmd/stdiscosrv/README.md", dst: "deb/usr/share/doc/stdiscosrv/README.txt", perm: 0644},
|
||||
{src: "cmd/stdiscosrv/LICENSE", dst: "deb/usr/share/doc/stdiscosrv/LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "deb/usr/share/doc/stdiscosrv/AUTHORS.txt", perm: 0644},
|
||||
{src: "man/stdiscosrv.1", dst: "deb/usr/share/man/man1/stdiscosrv.1", perm: 0644},
|
||||
},
|
||||
tags: []string{"purego"},
|
||||
},
|
||||
"strelaysrv": {
|
||||
name: "strelaysrv",
|
||||
buildPkg: "./cmd/strelaysrv",
|
||||
binaryName: "strelaysrv", // .exe will be added automatically for Windows builds
|
||||
archiveFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "{{binary}}", perm: 0755},
|
||||
{src: "cmd/strelaysrv/README.md", dst: "README.txt", perm: 0644},
|
||||
{src: "cmd/strelaysrv/LICENSE", dst: "LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0644},
|
||||
},
|
||||
debianFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0755},
|
||||
{src: "cmd/strelaysrv/README.md", dst: "deb/usr/share/doc/strelaysrv/README.txt", perm: 0644},
|
||||
{src: "cmd/strelaysrv/LICENSE", dst: "deb/usr/share/doc/strelaysrv/LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "deb/usr/share/doc/strelaysrv/AUTHORS.txt", perm: 0644},
|
||||
{src: "man/strelaysrv.1", dst: "deb/usr/share/man/man1/strelaysrv.1", perm: 0644},
|
||||
},
|
||||
},
|
||||
"strelaypoolsrv": {
|
||||
name: "strelaypoolsrv",
|
||||
buildPkg: "./cmd/strelaypoolsrv",
|
||||
binaryName: "strelaypoolsrv", // .exe will be added automatically for Windows builds
|
||||
archiveFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "{{binary}}", perm: 0755},
|
||||
{src: "cmd/strelaypoolsrv/README.md", dst: "README.txt", perm: 0644},
|
||||
{src: "cmd/strelaypoolsrv/LICENSE", dst: "LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0644},
|
||||
},
|
||||
debianFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0755},
|
||||
{src: "cmd/strelaypoolsrv/README.md", dst: "deb/usr/share/doc/relaysrv/README.txt", perm: 0644},
|
||||
{src: "cmd/strelaypoolsrv/LICENSE", dst: "deb/usr/share/doc/relaysrv/LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "deb/usr/share/doc/relaysrv/AUTHORS.txt", perm: 0644},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
// The "syncthing" target includes a few more files found in the "etc"
|
||||
// and "extra" dirs.
|
||||
syncthingPkg := targets["syncthing"]
|
||||
for _, file := range listFiles("etc") {
|
||||
syncthingPkg.archiveFiles = append(syncthingPkg.archiveFiles, archiveFile{src: file, dst: file, perm: 0644})
|
||||
}
|
||||
for _, file := range listFiles("extra") {
|
||||
syncthingPkg.archiveFiles = append(syncthingPkg.archiveFiles, archiveFile{src: file, dst: file, perm: 0644})
|
||||
}
|
||||
for _, file := range listFiles("extra") {
|
||||
syncthingPkg.debianFiles = append(syncthingPkg.debianFiles, archiveFile{src: file, dst: "deb/usr/share/doc/syncthing/" + filepath.Base(file), perm: 0644})
|
||||
}
|
||||
targets["syncthing"] = syncthingPkg
|
||||
}
|
||||
|
||||
const minGoVersion = 1.5
|
||||
|
||||
func main() {
|
||||
log.SetOutput(os.Stdout)
|
||||
log.SetFlags(0)
|
||||
|
||||
if os.Getenv("GOPATH") == "" {
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
gopath := filepath.Clean(filepath.Join(cwd, "../../../../"))
|
||||
log.Println("GOPATH is", gopath)
|
||||
os.Setenv("GOPATH", gopath)
|
||||
if debug {
|
||||
t0 := time.Now()
|
||||
defer func() {
|
||||
log.Println("... build completed in", time.Since(t0))
|
||||
}()
|
||||
}
|
||||
|
||||
if os.Getenv("GOPATH") == "" {
|
||||
setGoPath()
|
||||
}
|
||||
|
||||
// We use Go 1.5+ vendoring.
|
||||
os.Setenv("GO15VENDOREXPERIMENT", "1")
|
||||
|
||||
// Set path to $GOPATH/bin:$PATH so that we can for sure find tools we
|
||||
// might have installed during "build.go setup".
|
||||
os.Setenv("PATH", fmt.Sprintf("%s%cbin%c%s", os.Getenv("GOPATH"), os.PathSeparator, os.PathListSeparator, os.Getenv("PATH")))
|
||||
|
||||
flag.StringVar(&goarch, "goarch", runtime.GOARCH, "GOARCH")
|
||||
flag.StringVar(&goos, "goos", runtime.GOOS, "GOOS")
|
||||
flag.BoolVar(&noupgrade, "no-upgrade", noupgrade, "Disable upgrade functionality")
|
||||
flag.StringVar(&version, "version", getVersion(), "Set compiled in version string")
|
||||
flag.BoolVar(&race, "race", race, "Use race detector")
|
||||
flag.Parse()
|
||||
parseFlags()
|
||||
|
||||
checkArchitecture()
|
||||
goVersion, _ = checkRequiredGoVersion()
|
||||
|
||||
// Invoking build.go with no parameters at all builds everything (incrementally),
|
||||
// which is what you want for maximum error checking during development.
|
||||
if flag.NArg() == 0 {
|
||||
runCommand("install", targets["all"])
|
||||
runCommand("vet", target{})
|
||||
runCommand("lint", target{})
|
||||
} else {
|
||||
// with any command given but not a target, the target is
|
||||
// "syncthing". So "go run build.go install" is "go run build.go install
|
||||
// syncthing" etc.
|
||||
targetName := "syncthing"
|
||||
if flag.NArg() > 1 {
|
||||
targetName = flag.Arg(1)
|
||||
}
|
||||
target, ok := targets[targetName]
|
||||
if !ok {
|
||||
log.Fatalln("Unknown target", target)
|
||||
}
|
||||
|
||||
runCommand(flag.Arg(0), target)
|
||||
}
|
||||
}
|
||||
|
||||
func checkArchitecture() {
|
||||
switch goarch {
|
||||
case "386", "amd64", "arm", "arm64", "ppc64", "ppc64le":
|
||||
break
|
||||
default:
|
||||
log.Printf("Unknown goarch %q; proceed with caution!", goarch)
|
||||
}
|
||||
}
|
||||
|
||||
goVersion, _ = checkRequiredGoVersion()
|
||||
func runCommand(cmd string, target target) {
|
||||
switch cmd {
|
||||
case "setup":
|
||||
setup()
|
||||
|
||||
if flag.NArg() == 0 {
|
||||
case "install":
|
||||
var tags []string
|
||||
if noupgrade {
|
||||
tags = []string{"noupgrade"}
|
||||
}
|
||||
install("./cmd/...", tags)
|
||||
install(target, tags)
|
||||
|
||||
vet("./cmd/syncthing")
|
||||
vet("./lib/...")
|
||||
lint("./cmd/syncthing")
|
||||
lint("./lib/...")
|
||||
return
|
||||
}
|
||||
|
||||
for _, cmd := range flag.Args() {
|
||||
switch cmd {
|
||||
case "setup":
|
||||
setup()
|
||||
|
||||
case "install":
|
||||
pkg := "./cmd/..."
|
||||
var tags []string
|
||||
if noupgrade {
|
||||
tags = []string{"noupgrade"}
|
||||
}
|
||||
install(pkg, tags)
|
||||
|
||||
case "build":
|
||||
pkg := "./cmd/syncthing"
|
||||
var tags []string
|
||||
if noupgrade {
|
||||
tags = []string{"noupgrade"}
|
||||
}
|
||||
build(pkg, tags)
|
||||
|
||||
case "test":
|
||||
test("./lib/...", "./cmd/...")
|
||||
|
||||
case "bench":
|
||||
bench("./lib/...", "./cmd/...")
|
||||
|
||||
case "assets":
|
||||
assets()
|
||||
|
||||
case "xdr":
|
||||
xdr()
|
||||
|
||||
case "translate":
|
||||
translate()
|
||||
|
||||
case "transifex":
|
||||
transifex()
|
||||
|
||||
case "tar":
|
||||
buildTar()
|
||||
|
||||
case "zip":
|
||||
buildZip()
|
||||
|
||||
case "deb":
|
||||
buildDeb()
|
||||
|
||||
case "clean":
|
||||
clean()
|
||||
|
||||
case "vet":
|
||||
vet("./cmd/syncthing")
|
||||
vet("./lib/...")
|
||||
|
||||
case "lint":
|
||||
lint("./cmd/syncthing")
|
||||
lint("./lib/...")
|
||||
|
||||
default:
|
||||
log.Fatalf("Unknown command %q", cmd)
|
||||
case "build":
|
||||
var tags []string
|
||||
if noupgrade {
|
||||
tags = []string{"noupgrade"}
|
||||
}
|
||||
build(target, tags)
|
||||
|
||||
case "test":
|
||||
test("./lib/...", "./cmd/...")
|
||||
|
||||
case "bench":
|
||||
bench("./lib/...", "./cmd/...")
|
||||
|
||||
case "assets":
|
||||
rebuildAssets()
|
||||
|
||||
case "proto":
|
||||
proto()
|
||||
|
||||
case "translate":
|
||||
translate()
|
||||
|
||||
case "transifex":
|
||||
transifex()
|
||||
|
||||
case "tar":
|
||||
buildTar(target)
|
||||
|
||||
case "zip":
|
||||
buildZip(target)
|
||||
|
||||
case "deb":
|
||||
buildDeb(target)
|
||||
|
||||
case "clean":
|
||||
clean()
|
||||
|
||||
case "vet":
|
||||
vet("build.go")
|
||||
vet("cmd", "lib")
|
||||
|
||||
case "lint":
|
||||
lint(".")
|
||||
lint("./cmd/...")
|
||||
lint("./lib/...")
|
||||
|
||||
case "metalint":
|
||||
if isGometalinterInstalled() {
|
||||
dirs := []string{".", "./cmd/...", "./lib/..."}
|
||||
ok := gometalinter("deadcode", dirs, "test/util.go")
|
||||
ok = gometalinter("structcheck", dirs) && ok
|
||||
ok = gometalinter("varcheck", dirs) && ok
|
||||
if !ok {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
log.Fatalf("Unknown command %q", cmd)
|
||||
}
|
||||
}
|
||||
|
||||
// setGoPath sets GOPATH correctly with the assumption that we are
|
||||
// in $GOPATH/src/github.com/syncthing/syncthing.
|
||||
func setGoPath() {
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
gopath := filepath.Clean(filepath.Join(cwd, "../../../../"))
|
||||
log.Println("GOPATH is", gopath)
|
||||
os.Setenv("GOPATH", gopath)
|
||||
}
|
||||
|
||||
func parseFlags() {
|
||||
flag.StringVar(&goarch, "goarch", runtime.GOARCH, "GOARCH")
|
||||
flag.StringVar(&goos, "goos", runtime.GOOS, "GOOS")
|
||||
flag.BoolVar(&noupgrade, "no-upgrade", noupgrade, "Disable upgrade functionality")
|
||||
flag.StringVar(&version, "version", getVersion(), "Set compiled in version string")
|
||||
flag.BoolVar(&race, "race", race, "Use race detector")
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
func checkRequiredGoVersion() (float64, bool) {
|
||||
ver := run("go", "version")
|
||||
re := regexp.MustCompile(`go version go(\d+\.\d+)`)
|
||||
if m := re.FindSubmatch(ver); len(m) == 2 {
|
||||
re := regexp.MustCompile(`go(\d+\.\d+)`)
|
||||
ver := runtime.Version()
|
||||
if m := re.FindStringSubmatch(ver); len(m) == 2 {
|
||||
vs := string(m[1])
|
||||
// This is a standard go build. Verify that it's new enough.
|
||||
f, err := strconv.ParseFloat(vs, 64)
|
||||
@@ -163,7 +335,9 @@ func checkRequiredGoVersion() (float64, bool) {
|
||||
log.Printf("*** Couldn't parse Go version out of %q.\n*** This isn't known to work, proceed on your own risk.", vs)
|
||||
return 0, false
|
||||
}
|
||||
if f < minGoVersion {
|
||||
if f < 1.5 {
|
||||
log.Printf("*** Go version %.01f doesn't support the vendoring mechanism.\n*** Ensure correct dependencies in your $GOPATH.", f)
|
||||
} else if f < minGoVersion {
|
||||
log.Fatalf("*** Go version %.01f is less than required %.01f.\n*** This is known not to work, not proceeding.", f, minGoVersion)
|
||||
}
|
||||
return f, true
|
||||
@@ -174,17 +348,20 @@ func checkRequiredGoVersion() (float64, bool) {
|
||||
}
|
||||
|
||||
func setup() {
|
||||
runPrint("go", "get", "-v", "github.com/golang/lint/golint")
|
||||
runPrint("go", "get", "-v", "golang.org/x/tools/cmd/cover")
|
||||
runPrint("go", "get", "-v", "golang.org/x/tools/cmd/vet")
|
||||
runPrint("go", "get", "-v", "golang.org/x/net/html")
|
||||
runPrint("go", "get", "-v", "github.com/FiloSottile/gvt")
|
||||
runPrint("go", "get", "-v", "github.com/axw/gocov/gocov")
|
||||
runPrint("go", "get", "-v", "github.com/AlekSi/gocov-xml")
|
||||
runPrint("go", "get", "-v", "bitbucket.org/tebeka/go2xunit")
|
||||
runPrint("go", "get", "-v", "github.com/alecthomas/gometalinter")
|
||||
runPrint("go", "get", "-v", "github.com/mitchellh/go-wordwrap")
|
||||
}
|
||||
|
||||
func test(pkgs ...string) {
|
||||
setBuildEnv()
|
||||
lazyRebuildAssets()
|
||||
|
||||
useRace := runtime.GOARCH == "amd64"
|
||||
switch runtime.GOOS {
|
||||
case "darwin", "linux", "freebsd", "windows":
|
||||
@@ -193,18 +370,22 @@ func test(pkgs ...string) {
|
||||
}
|
||||
|
||||
if useRace {
|
||||
runPrint("go", append([]string{"test", "-short", "-race", "-timeout", "60s"}, pkgs...)...)
|
||||
runPrint("go", append([]string{"test", "-short", "-race", "-timeout", "60s", "-tags", "purego"}, pkgs...)...)
|
||||
} else {
|
||||
runPrint("go", append([]string{"test", "-short", "-timeout", "60s"}, pkgs...)...)
|
||||
runPrint("go", append([]string{"test", "-short", "-timeout", "60s", "-tags", "purego"}, pkgs...)...)
|
||||
}
|
||||
}
|
||||
|
||||
func bench(pkgs ...string) {
|
||||
setBuildEnv()
|
||||
lazyRebuildAssets()
|
||||
runPrint("go", append([]string{"test", "-run", "NONE", "-bench", "."}, pkgs...)...)
|
||||
}
|
||||
|
||||
func install(pkg string, tags []string) {
|
||||
func install(target target, tags []string) {
|
||||
lazyRebuildAssets()
|
||||
|
||||
tags = append(target.tags, tags...)
|
||||
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
@@ -212,90 +393,89 @@ func install(pkg string, tags []string) {
|
||||
os.Setenv("GOBIN", filepath.Join(cwd, "bin"))
|
||||
args := []string{"install", "-v", "-ldflags", ldflags()}
|
||||
if len(tags) > 0 {
|
||||
args = append(args, "-tags", strings.Join(tags, ","))
|
||||
args = append(args, "-tags", strings.Join(tags, " "))
|
||||
}
|
||||
if race {
|
||||
args = append(args, "-race")
|
||||
}
|
||||
args = append(args, pkg)
|
||||
setBuildEnv()
|
||||
args = append(args, target.buildPkg)
|
||||
|
||||
os.Setenv("GOOS", goos)
|
||||
os.Setenv("GOARCH", goarch)
|
||||
runPrint("go", args...)
|
||||
}
|
||||
|
||||
func build(pkg string, tags []string) {
|
||||
binary := "syncthing"
|
||||
if goos == "windows" {
|
||||
binary += ".exe"
|
||||
}
|
||||
func build(target target, tags []string) {
|
||||
lazyRebuildAssets()
|
||||
|
||||
rmr(binary)
|
||||
tags = append(target.tags, tags...)
|
||||
|
||||
rmr(target.binaryName)
|
||||
args := []string{"build", "-i", "-v", "-ldflags", ldflags()}
|
||||
if len(tags) > 0 {
|
||||
args = append(args, "-tags", strings.Join(tags, ","))
|
||||
args = append(args, "-tags", strings.Join(tags, " "))
|
||||
}
|
||||
if race {
|
||||
args = append(args, "-race")
|
||||
}
|
||||
args = append(args, pkg)
|
||||
setBuildEnv()
|
||||
args = append(args, target.buildPkg)
|
||||
|
||||
os.Setenv("GOOS", goos)
|
||||
os.Setenv("GOARCH", goarch)
|
||||
runPrint("go", args...)
|
||||
}
|
||||
|
||||
func buildTar() {
|
||||
name := archiveName()
|
||||
func buildTar(target target) {
|
||||
name := archiveName(target)
|
||||
filename := name + ".tar.gz"
|
||||
|
||||
var tags []string
|
||||
if noupgrade {
|
||||
tags = []string{"noupgrade"}
|
||||
name += "-noupgrade"
|
||||
}
|
||||
build("./cmd/syncthing", tags)
|
||||
filename := name + ".tar.gz"
|
||||
files := []archiveFile{
|
||||
{src: "README.md", dst: name + "/README.txt"},
|
||||
{src: "LICENSE", dst: name + "/LICENSE.txt"},
|
||||
{src: "AUTHORS", dst: name + "/AUTHORS.txt"},
|
||||
{src: "syncthing", dst: name + "/syncthing"},
|
||||
}
|
||||
|
||||
for _, file := range listFiles("etc") {
|
||||
files = append(files, archiveFile{src: file, dst: name + "/" + file})
|
||||
}
|
||||
for _, file := range listFiles("extra") {
|
||||
files = append(files, archiveFile{src: file, dst: name + "/" + filepath.Base(file)})
|
||||
}
|
||||
build(target, tags)
|
||||
|
||||
if goos == "darwin" {
|
||||
macosCodesign("syncthing")
|
||||
macosCodesign(target.binaryName)
|
||||
}
|
||||
tarGz(filename, files)
|
||||
|
||||
for i := range target.archiveFiles {
|
||||
target.archiveFiles[i].src = strings.Replace(target.archiveFiles[i].src, "{{binary}}", target.binaryName, 1)
|
||||
target.archiveFiles[i].dst = strings.Replace(target.archiveFiles[i].dst, "{{binary}}", target.binaryName, 1)
|
||||
target.archiveFiles[i].dst = name + "/" + target.archiveFiles[i].dst
|
||||
}
|
||||
|
||||
tarGz(filename, target.archiveFiles)
|
||||
log.Println(filename)
|
||||
}
|
||||
|
||||
func buildZip() {
|
||||
name := archiveName()
|
||||
func buildZip(target target) {
|
||||
target.binaryName += ".exe"
|
||||
|
||||
name := archiveName(target)
|
||||
filename := name + ".zip"
|
||||
|
||||
var tags []string
|
||||
if noupgrade {
|
||||
tags = []string{"noupgrade"}
|
||||
name += "-noupgrade"
|
||||
}
|
||||
build("./cmd/syncthing", tags)
|
||||
filename := name + ".zip"
|
||||
files := []archiveFile{
|
||||
{src: "README.md", dst: name + "/README.txt"},
|
||||
{src: "LICENSE", dst: name + "/LICENSE.txt"},
|
||||
{src: "AUTHORS", dst: name + "/AUTHORS.txt"},
|
||||
{src: "syncthing.exe", dst: name + "/syncthing.exe"},
|
||||
|
||||
build(target, tags)
|
||||
|
||||
for i := range target.archiveFiles {
|
||||
target.archiveFiles[i].src = strings.Replace(target.archiveFiles[i].src, "{{binary}}", target.binaryName, 1)
|
||||
target.archiveFiles[i].dst = strings.Replace(target.archiveFiles[i].dst, "{{binary}}", target.binaryName, 1)
|
||||
target.archiveFiles[i].dst = name + "/" + target.archiveFiles[i].dst
|
||||
}
|
||||
|
||||
for _, file := range listFiles("extra") {
|
||||
files = append(files, archiveFile{src: file, dst: name + "/" + filepath.Base(file)})
|
||||
}
|
||||
|
||||
zipFile(filename, files)
|
||||
zipFile(filename, target.archiveFiles)
|
||||
log.Println(filename)
|
||||
}
|
||||
|
||||
func buildDeb() {
|
||||
func buildDeb(target target) {
|
||||
os.RemoveAll("deb")
|
||||
|
||||
// "goarch" here is set to whatever the Debian packages expect. We correct
|
||||
@@ -309,33 +489,14 @@ func buildDeb() {
|
||||
goarch = "arm"
|
||||
}
|
||||
|
||||
build("./cmd/syncthing", []string{"noupgrade"})
|
||||
build(target, []string{"noupgrade"})
|
||||
|
||||
files := []archiveFile{
|
||||
{src: "README.md", dst: "deb/usr/share/doc/syncthing/README.txt", perm: 0644},
|
||||
{src: "LICENSE", dst: "deb/usr/share/doc/syncthing/LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "deb/usr/share/doc/syncthing/AUTHORS.txt", perm: 0644},
|
||||
{src: "syncthing", dst: "deb/usr/bin/syncthing", perm: 0755},
|
||||
{src: "man/syncthing.1", dst: "deb/usr/share/man/man1/syncthing.1", perm: 0644},
|
||||
{src: "man/syncthing-config.5", dst: "deb/usr/share/man/man5/syncthing-config.5", perm: 0644},
|
||||
{src: "man/syncthing-stignore.5", dst: "deb/usr/share/man/man5/syncthing-stignore.5", perm: 0644},
|
||||
{src: "man/syncthing-device-ids.7", dst: "deb/usr/share/man/man7/syncthing-device-ids.7", perm: 0644},
|
||||
{src: "man/syncthing-event-api.7", dst: "deb/usr/share/man/man7/syncthing-event-api.7", perm: 0644},
|
||||
{src: "man/syncthing-faq.7", dst: "deb/usr/share/man/man7/syncthing-faq.7", perm: 0644},
|
||||
{src: "man/syncthing-networking.7", dst: "deb/usr/share/man/man7/syncthing-networking.7", perm: 0644},
|
||||
{src: "man/syncthing-rest-api.7", dst: "deb/usr/share/man/man7/syncthing-rest-api.7", perm: 0644},
|
||||
{src: "man/syncthing-security.7", dst: "deb/usr/share/man/man7/syncthing-security.7", perm: 0644},
|
||||
{src: "man/syncthing-versioning.7", dst: "deb/usr/share/man/man7/syncthing-versioning.7", perm: 0644},
|
||||
{src: "etc/linux-systemd/system/syncthing@.service", dst: "deb/lib/systemd/system/syncthing@.service", perm: 0644},
|
||||
{src: "etc/linux-systemd/system/syncthing-resume.service", dst: "deb/lib/systemd/system/syncthing-resume.service", perm: 0644},
|
||||
{src: "etc/linux-systemd/user/syncthing.service", dst: "deb/usr/lib/systemd/user/syncthing.service", perm: 0644},
|
||||
for i := range target.debianFiles {
|
||||
target.debianFiles[i].src = strings.Replace(target.debianFiles[i].src, "{{binary}}", target.binaryName, 1)
|
||||
target.debianFiles[i].dst = strings.Replace(target.debianFiles[i].dst, "{{binary}}", target.binaryName, 1)
|
||||
}
|
||||
|
||||
for _, file := range listFiles("extra") {
|
||||
files = append(files, archiveFile{src: file, dst: "deb/usr/share/doc/syncthing/" + filepath.Base(file), perm: 0644})
|
||||
}
|
||||
|
||||
for _, af := range files {
|
||||
for _, af := range target.debianFiles {
|
||||
if err := copyFile(af.src, af.dst, af.perm); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
@@ -344,11 +505,14 @@ func buildDeb() {
|
||||
os.MkdirAll("deb/DEBIAN", 0755)
|
||||
|
||||
data := map[string]string{
|
||||
"name": target.name,
|
||||
"arch": debarch,
|
||||
"version": version[1:],
|
||||
"date": time.Now().Format(time.RFC1123),
|
||||
}
|
||||
for _, file := range listFiles("debian") {
|
||||
|
||||
debTemplateFiles := append(listFiles("debtpl/common"), listFiles("debtpl/"+target.name)...)
|
||||
for _, file := range debTemplateFiles {
|
||||
tpl, err := template.New(filepath.Base(file)).ParseFiles(file)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
@@ -400,19 +564,44 @@ func listFiles(dir string) []string {
|
||||
return res
|
||||
}
|
||||
|
||||
func setBuildEnv() {
|
||||
os.Setenv("GOOS", goos)
|
||||
os.Setenv("GOARCH", goarch)
|
||||
os.Setenv("GO15VENDOREXPERIMENT", "1")
|
||||
}
|
||||
|
||||
func assets() {
|
||||
setBuildEnv()
|
||||
func rebuildAssets() {
|
||||
runPipe("lib/auto/gui.files.go", "go", "run", "script/genassets.go", "gui")
|
||||
runPipe("cmd/strelaypoolsrv/auto/gui.go", "go", "run", "script/genassets.go", "cmd/strelaypoolsrv/gui")
|
||||
}
|
||||
|
||||
func xdr() {
|
||||
runPrint("go", "generate", "./lib/discover", "./lib/db", "./lib/protocol", "./lib/relay/protocol")
|
||||
func lazyRebuildAssets() {
|
||||
if shouldRebuildAssets("lib/auto/gui.files.go", "gui") || shouldRebuildAssets("cmd/strelaypoolsrv/auto/gui.go", "cmd/strelaypoolsrv/auto/gui") {
|
||||
rebuildAssets()
|
||||
}
|
||||
}
|
||||
|
||||
func shouldRebuildAssets(target, srcdir string) bool {
|
||||
info, err := os.Stat(target)
|
||||
if err != nil {
|
||||
// If the file doesn't exist, we must rebuild it
|
||||
return true
|
||||
}
|
||||
|
||||
// Check if any of the files in gui/ are newer than the asset file. If
|
||||
// so we should rebuild it.
|
||||
currentBuild := info.ModTime()
|
||||
assetsAreNewer := false
|
||||
filepath.Walk(srcdir, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if assetsAreNewer {
|
||||
return nil
|
||||
}
|
||||
assetsAreNewer = info.ModTime().After(currentBuild)
|
||||
return nil
|
||||
})
|
||||
|
||||
return assetsAreNewer
|
||||
}
|
||||
|
||||
func proto() {
|
||||
runPrint("go", "generate", "./lib/...")
|
||||
}
|
||||
|
||||
func translate() {
|
||||
@@ -429,8 +618,6 @@ func translate() {
|
||||
func transifex() {
|
||||
os.Chdir("gui/default/assets/lang")
|
||||
runPrint("go", "run", "../../../../script/transifexdl.go")
|
||||
os.Chdir("../../../..")
|
||||
assets()
|
||||
}
|
||||
|
||||
func clean() {
|
||||
@@ -554,15 +741,27 @@ func getBranchSuffix() string {
|
||||
}
|
||||
|
||||
func buildStamp() int64 {
|
||||
// If SOURCE_DATE_EPOCH is set, use that.
|
||||
if s, _ := strconv.ParseInt(os.Getenv("SOURCE_DATE_EPOCH"), 10, 64); s > 0 {
|
||||
return s
|
||||
}
|
||||
|
||||
// Try to get the timestamp of the latest commit.
|
||||
bs, err := runError("git", "show", "-s", "--format=%ct")
|
||||
if err != nil {
|
||||
// Fall back to "now".
|
||||
return time.Now().Unix()
|
||||
}
|
||||
|
||||
s, _ := strconv.ParseInt(string(bs), 10, 64)
|
||||
return s
|
||||
}
|
||||
|
||||
func buildUser() string {
|
||||
if v := os.Getenv("BUILD_USER"); v != "" {
|
||||
return v
|
||||
}
|
||||
|
||||
u, err := user.Current()
|
||||
if err != nil {
|
||||
return "unknown-user"
|
||||
@@ -571,6 +770,10 @@ func buildUser() string {
|
||||
}
|
||||
|
||||
func buildHost() string {
|
||||
if v := os.Getenv("BUILD_HOST"); v != "" {
|
||||
return v
|
||||
}
|
||||
|
||||
h, err := os.Hostname()
|
||||
if err != nil {
|
||||
return "unknown-host"
|
||||
@@ -578,13 +781,6 @@ func buildHost() string {
|
||||
return h
|
||||
}
|
||||
|
||||
func buildEnvironment() string {
|
||||
if v := os.Getenv("ENVIRONMENT"); len(v) > 0 {
|
||||
return v
|
||||
}
|
||||
return "default"
|
||||
}
|
||||
|
||||
func buildArch() string {
|
||||
os := goos
|
||||
if os == "darwin" {
|
||||
@@ -593,28 +789,31 @@ func buildArch() string {
|
||||
return fmt.Sprintf("%s-%s", os, goarch)
|
||||
}
|
||||
|
||||
func archiveName() string {
|
||||
return fmt.Sprintf("syncthing-%s-%s", buildArch(), version)
|
||||
}
|
||||
|
||||
func run(cmd string, args ...string) []byte {
|
||||
bs, err := runError(cmd, args...)
|
||||
if err != nil {
|
||||
log.Println(cmd, strings.Join(args, " "))
|
||||
log.Println(string(bs))
|
||||
log.Fatal(err)
|
||||
}
|
||||
return bytes.TrimSpace(bs)
|
||||
func archiveName(target target) string {
|
||||
return fmt.Sprintf("%s-%s-%s", target.name, buildArch(), version)
|
||||
}
|
||||
|
||||
func runError(cmd string, args ...string) ([]byte, error) {
|
||||
if debug {
|
||||
t0 := time.Now()
|
||||
log.Println("runError:", cmd, strings.Join(args, " "))
|
||||
defer func() {
|
||||
log.Println("... in", time.Since(t0))
|
||||
}()
|
||||
}
|
||||
ecmd := exec.Command(cmd, args...)
|
||||
bs, err := ecmd.CombinedOutput()
|
||||
return bytes.TrimSpace(bs), err
|
||||
}
|
||||
|
||||
func runPrint(cmd string, args ...string) {
|
||||
log.Println(cmd, strings.Join(args, " "))
|
||||
if debug {
|
||||
t0 := time.Now()
|
||||
log.Println("runPrint:", cmd, strings.Join(args, " "))
|
||||
defer func() {
|
||||
log.Println("... in", time.Since(t0))
|
||||
}()
|
||||
}
|
||||
ecmd := exec.Command(cmd, args...)
|
||||
ecmd.Stdout = os.Stdout
|
||||
ecmd.Stderr = os.Stderr
|
||||
@@ -625,7 +824,13 @@ func runPrint(cmd string, args ...string) {
|
||||
}
|
||||
|
||||
func runPipe(file, cmd string, args ...string) {
|
||||
log.Println(cmd, strings.Join(args, " "), ">", file)
|
||||
if debug {
|
||||
t0 := time.Now()
|
||||
log.Println("runPipe:", cmd, strings.Join(args, " "))
|
||||
defer func() {
|
||||
log.Println("... in", time.Since(t0))
|
||||
}()
|
||||
}
|
||||
fd, err := os.Create(file)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
@@ -640,12 +845,6 @@ func runPipe(file, cmd string, args ...string) {
|
||||
fd.Close()
|
||||
}
|
||||
|
||||
type archiveFile struct {
|
||||
src string
|
||||
dst string
|
||||
perm os.FileMode
|
||||
}
|
||||
|
||||
func tarGz(out string, files []archiveFile) {
|
||||
fd, err := os.Create(out)
|
||||
if err != nil {
|
||||
@@ -720,7 +919,7 @@ func zipFile(out string, files []archiveFile) {
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fh.Name = f.dst
|
||||
fh.Name = filepath.ToSlash(f.dst)
|
||||
fh.Method = zip.Deflate
|
||||
|
||||
if strings.HasSuffix(f.dst, ".txt") {
|
||||
@@ -761,23 +960,23 @@ func zipFile(out string, files []archiveFile) {
|
||||
}
|
||||
}
|
||||
|
||||
func vet(pkg string) {
|
||||
bs, err := runError("go", "vet", pkg)
|
||||
if err != nil && err.Error() == "exit status 3" || bytes.Contains(bs, []byte("no such tool \"vet\"")) {
|
||||
// Go said there is no go vet
|
||||
log.Println(`- No go vet, no vetting. Try "go get -u golang.org/x/tools/cmd/vet".`)
|
||||
return
|
||||
func vet(dirs ...string) {
|
||||
params := []string{"tool", "vet", "-all"}
|
||||
params = append(params, dirs...)
|
||||
bs, err := runError("go", params...)
|
||||
|
||||
if len(bs) > 0 {
|
||||
log.Printf("%s", bs)
|
||||
}
|
||||
|
||||
falseAlarmComposites := regexp.MustCompile("composite literal uses unkeyed fields")
|
||||
exitStatus := regexp.MustCompile("exit status 1")
|
||||
for _, line := range bytes.Split(bs, []byte("\n")) {
|
||||
if falseAlarmComposites.Match(line) || exitStatus.Match(line) {
|
||||
continue
|
||||
}
|
||||
if len(line) > 0 {
|
||||
log.Printf("%s", line)
|
||||
if err != nil {
|
||||
if exitStatus(err) == 3 {
|
||||
// Exit code 3, the "vet" tool is not installed
|
||||
return
|
||||
}
|
||||
|
||||
// A genuine error exit from the vet tool.
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -789,13 +988,17 @@ func lint(pkg string) {
|
||||
}
|
||||
|
||||
analCommentPolicy := regexp.MustCompile(`exported (function|method|const|type|var) [^\s]+ should have comment`)
|
||||
for _, line := range bytes.Split(bs, []byte("\n")) {
|
||||
if analCommentPolicy.Match(line) {
|
||||
for _, line := range strings.Split(string(bs), "\n") {
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
if len(line) > 0 {
|
||||
log.Printf("%s", line)
|
||||
if analCommentPolicy.MatchString(line) {
|
||||
continue
|
||||
}
|
||||
if strings.Contains(line, ".pb.go:") {
|
||||
continue
|
||||
}
|
||||
log.Println(line)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -817,3 +1020,51 @@ func macosCodesign(file string) {
|
||||
log.Println("Codesign: successfully signed", file)
|
||||
}
|
||||
}
|
||||
|
||||
func exitStatus(err error) int {
|
||||
if err, ok := err.(*exec.ExitError); ok {
|
||||
if ws, ok := err.ProcessState.Sys().(syscall.WaitStatus); ok {
|
||||
return ws.ExitStatus()
|
||||
}
|
||||
}
|
||||
|
||||
return -1
|
||||
}
|
||||
|
||||
func isGometalinterInstalled() bool {
|
||||
if _, err := runError("gometalinter", "--disable-all"); err != nil {
|
||||
log.Println("gometalinter is not installed")
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func gometalinter(linter string, dirs []string, excludes ...string) bool {
|
||||
params := []string{"--disable-all"}
|
||||
params = append(params, fmt.Sprintf("--deadline=%ds", 60))
|
||||
params = append(params, "--enable="+linter)
|
||||
|
||||
for _, exclude := range excludes {
|
||||
params = append(params, "--exclude="+exclude)
|
||||
}
|
||||
|
||||
for _, dir := range dirs {
|
||||
params = append(params, dir)
|
||||
}
|
||||
|
||||
bs, _ := runError("gometalinter", params...)
|
||||
|
||||
nerr := 0
|
||||
for _, line := range strings.Split(string(bs), "\n") {
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
if strings.Contains(line, ".pb.go:") {
|
||||
continue
|
||||
}
|
||||
log.Println(line)
|
||||
nerr++
|
||||
}
|
||||
|
||||
return nerr == 0
|
||||
}
|
||||
|
||||
56
build.sh
@@ -104,7 +104,7 @@ case "${1:-default}" in
|
||||
# For every package in the repo
|
||||
for dir in $(go list ./lib/... ./cmd/...) ; do
|
||||
# run the tests
|
||||
GOPATH="$(pwd)/Godeps/_workspace:$GOPATH" go test -race -coverprofile=profile.out $dir
|
||||
GOPATH="$(pwd)/Godeps/_workspace:$GOPATH" go test -coverprofile=profile.out $dir
|
||||
if [ -f profile.out ] ; then
|
||||
# and if there was test output, append it to coverage.out
|
||||
grep -v "mode: " profile.out >> coverage.out
|
||||
@@ -112,6 +112,11 @@ case "${1:-default}" in
|
||||
fi
|
||||
done
|
||||
|
||||
notCovered=$(egrep -c '\s0$' coverage.out)
|
||||
total=$(wc -l coverage.out | awk '{print $1}')
|
||||
coverPct=$(awk "BEGIN{print (1 - $notCovered / $total) * 100}")
|
||||
echo "Total coverage is $coverPct%"
|
||||
|
||||
gocov convert coverage.out | gocov-xml > coverage.xml
|
||||
|
||||
# This is usually run from within Jenkins. If it is, we need to
|
||||
@@ -131,55 +136,6 @@ case "${1:-default}" in
|
||||
go2xunit -output tests.xml -fail < tests.out
|
||||
;;
|
||||
|
||||
docker-all)
|
||||
img=${DOCKERIMG:-syncthing/build:latest}
|
||||
docker run --rm -h syncthing-builder -u $(id -u) -t \
|
||||
-v $(pwd):/go/src/github.com/syncthing/syncthing \
|
||||
-w /go/src/github.com/syncthing/syncthing \
|
||||
-e "STTRACE=$STTRACE" \
|
||||
"$img" \
|
||||
sh -c './build.sh clean \
|
||||
&& ./build.sh test-cov \
|
||||
&& ./build.sh bench \
|
||||
&& ./build.sh all'
|
||||
;;
|
||||
|
||||
docker-test)
|
||||
img=${DOCKERIMG:-syncthing/build:latest}
|
||||
docker run --rm -h syncthing-builder -u $(id -u) -t \
|
||||
-v $(pwd):/go/src/github.com/syncthing/syncthing \
|
||||
-w /go/src/github.com/syncthing/syncthing \
|
||||
-e "STTRACE=$STTRACE" \
|
||||
"$img" \
|
||||
sh -euxc './build.sh clean \
|
||||
&& go run build.go -race \
|
||||
&& export GOPATH=$(pwd)/Godeps/_workspace:$GOPATH \
|
||||
&& cd test \
|
||||
&& go test -tags integration -v -timeout 90m -short \
|
||||
&& git clean -fxd .'
|
||||
;;
|
||||
|
||||
docker-lint)
|
||||
img=${DOCKERIMG:-syncthing/build:latest}
|
||||
docker run --rm -h syncthing-builder -u $(id -u) -t \
|
||||
-v $(pwd):/go/src/github.com/syncthing/syncthing \
|
||||
-w /go/src/github.com/syncthing/syncthing \
|
||||
-e "STTRACE=$STTRACE" \
|
||||
"$img" \
|
||||
sh -euxc 'go run build.go lint'
|
||||
;;
|
||||
|
||||
|
||||
docker-vet)
|
||||
img=${DOCKERIMG:-syncthing/build:latest}
|
||||
docker run --rm -h syncthing-builder -u $(id -u) -t \
|
||||
-v $(pwd):/go/src/github.com/syncthing/syncthing \
|
||||
-w /go/src/github.com/syncthing/syncthing \
|
||||
-e "STTRACE=$STTRACE" \
|
||||
"$img" \
|
||||
sh -euxc 'go run build.go vet'
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "Unknown build command $1"
|
||||
;;
|
||||
|
||||
143
cmd/stbench/main.go
Normal file
@@ -0,0 +1,143 @@
|
||||
// Copyright (C) 2016 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// This doesn't build on Windows due to the Rusage stuff.
|
||||
|
||||
// +build !windows
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/rc"
|
||||
)
|
||||
|
||||
var homeDir = "h1"
|
||||
var syncthingBin = "./bin/syncthing"
|
||||
var test = "scan"
|
||||
|
||||
func main() {
|
||||
flag.StringVar(&homeDir, "home", homeDir, "Home directory location")
|
||||
flag.StringVar(&syncthingBin, "bin", syncthingBin, "Binary location")
|
||||
flag.StringVar(&test, "test", test, "Test to run")
|
||||
flag.Parse()
|
||||
|
||||
switch test {
|
||||
case "scan":
|
||||
// scan measures the resource usage required to perform the initial
|
||||
// scan, without cleaning away the database first.
|
||||
testScan()
|
||||
}
|
||||
}
|
||||
|
||||
// testScan starts a process and reports on the resource usage required to
|
||||
// perform the initial scan.
|
||||
func testScan() {
|
||||
log.Println("Starting...")
|
||||
p := rc.NewProcess("127.0.0.1:8081")
|
||||
if err := p.Start(syncthingBin, "-home", homeDir, "-no-browser"); err != nil {
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
defer p.Stop()
|
||||
|
||||
wallTime := awaitScanComplete(p)
|
||||
|
||||
report(p, wallTime)
|
||||
}
|
||||
|
||||
// awaitScanComplete waits for a folder to transition idle->scanning and
|
||||
// then scanning->idle and returns the time taken for the scan.
|
||||
func awaitScanComplete(p *rc.Process) time.Duration {
|
||||
log.Println("Awaiting scan completion...")
|
||||
var t0, t1 time.Time
|
||||
lastEvent := 0
|
||||
loop:
|
||||
for {
|
||||
evs, err := p.Events(lastEvent)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, ev := range evs {
|
||||
if ev.Type == "StateChanged" {
|
||||
data := ev.Data.(map[string]interface{})
|
||||
log.Println(ev)
|
||||
|
||||
if data["to"].(string) == "scanning" {
|
||||
t0 = ev.Time
|
||||
continue
|
||||
}
|
||||
|
||||
if !t0.IsZero() && data["to"].(string) == "idle" {
|
||||
t1 = ev.Time
|
||||
break loop
|
||||
}
|
||||
}
|
||||
lastEvent = ev.ID
|
||||
}
|
||||
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
}
|
||||
|
||||
return t1.Sub(t0)
|
||||
}
|
||||
|
||||
// report stops the given process and reports on it's resource usage in two
|
||||
// ways: human readable to stderr, and CSV to stdout.
|
||||
func report(p *rc.Process, wallTime time.Duration) {
|
||||
sv, err := p.SystemVersion()
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
ss, err := p.SystemStatus()
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
proc, err := p.Stop()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
rusage, ok := proc.SysUsage().(*syscall.Rusage)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
log.Println("Version:", sv.Version)
|
||||
log.Println("Alloc:", ss.Alloc/1024, "KiB")
|
||||
log.Println("Sys:", ss.Sys/1024, "KiB")
|
||||
log.Println("Goroutines:", ss.Goroutines)
|
||||
log.Println("Wall time:", wallTime)
|
||||
log.Println("Utime:", time.Duration(rusage.Utime.Nano()))
|
||||
log.Println("Stime:", time.Duration(rusage.Stime.Nano()))
|
||||
if runtime.GOOS == "darwin" {
|
||||
// Darwin reports in bytes, Linux seems to report in KiB even
|
||||
// though the manpage says otherwise.
|
||||
rusage.Maxrss /= 1024
|
||||
}
|
||||
log.Println("MaxRSS:", rusage.Maxrss, "KiB")
|
||||
|
||||
fmt.Printf("%s,%d,%d,%d,%.02f,%.02f,%.02f,%d\n",
|
||||
sv.Version,
|
||||
ss.Alloc/1024,
|
||||
ss.Sys/1024,
|
||||
ss.Goroutines,
|
||||
wallTime.Seconds(),
|
||||
time.Duration(rusage.Utime.Nano()).Seconds(),
|
||||
time.Duration(rusage.Stime.Nano()).Seconds(),
|
||||
rusage.Maxrss)
|
||||
}
|
||||
114
cmd/stdisco/main.go
Normal file
@@ -0,0 +1,114 @@
|
||||
// Copyright (C) 2016 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"flag"
|
||||
"log"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/beacon"
|
||||
"github.com/syncthing/syncthing/lib/discover"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
var (
|
||||
all = false // print all packets, not just first from each device/source
|
||||
fake = false // send fake packets to lure out other devices faster
|
||||
mc = "[ff12::8384]:21027"
|
||||
bc = 21027
|
||||
)
|
||||
|
||||
var (
|
||||
// Static prefix that we use when generating fake device IDs, so that we
|
||||
// can recognize them ourselves. Also makes the device ID start with
|
||||
// "STPROBE-" which is humanly recognizable.
|
||||
randomPrefix = []byte{148, 223, 23, 4, 148}
|
||||
|
||||
// Our random, fake, device ID that we use when sending announcements.
|
||||
myID = randomDeviceID()
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.BoolVar(&all, "all", all, "Print all received announcements (not only first)")
|
||||
flag.BoolVar(&fake, "fake", fake, "Send fake announcements")
|
||||
flag.StringVar(&mc, "mc", mc, "IPv6 multicast address")
|
||||
flag.IntVar(&bc, "bc", bc, "IPv4 broadcast port number")
|
||||
flag.Parse()
|
||||
|
||||
if fake {
|
||||
log.Println("My ID:", protocol.DeviceIDFromBytes(myID))
|
||||
}
|
||||
|
||||
runbeacon(beacon.NewMulticast(mc), fake)
|
||||
runbeacon(beacon.NewBroadcast(bc), fake)
|
||||
|
||||
select {}
|
||||
}
|
||||
|
||||
func runbeacon(bc beacon.Interface, fake bool) {
|
||||
go bc.Serve()
|
||||
go recv(bc)
|
||||
if fake {
|
||||
go send(bc)
|
||||
}
|
||||
}
|
||||
|
||||
// receives and prints discovery announcements
|
||||
func recv(bc beacon.Interface) {
|
||||
seen := make(map[string]bool)
|
||||
for {
|
||||
data, src := bc.Recv()
|
||||
if m := binary.BigEndian.Uint32(data); m != discover.Magic {
|
||||
log.Printf("Incorrect magic %x in announcement from %v", m, src)
|
||||
continue
|
||||
}
|
||||
|
||||
var ann discover.Announce
|
||||
ann.Unmarshal(data[4:])
|
||||
|
||||
if bytes.Equal(ann.ID, myID) {
|
||||
// This is one of our own fake packets, don't print it.
|
||||
continue
|
||||
}
|
||||
|
||||
// Print announcement details for the first packet from a given
|
||||
// device ID and source address, or if -all was given.
|
||||
key := string(ann.ID) + src.String()
|
||||
if all || !seen[key] {
|
||||
log.Printf("Announcement from %v\n", src)
|
||||
log.Printf(" %v at %s\n", protocol.DeviceIDFromBytes(ann.ID), strings.Join(ann.Addresses, ", "))
|
||||
seen[key] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// sends fake discovery announcements once every second
|
||||
func send(bc beacon.Interface) {
|
||||
ann := discover.Announce{
|
||||
ID: myID,
|
||||
Addresses: []string{"tcp://fake.example.com:12345"},
|
||||
}
|
||||
bs, _ := ann.Marshal()
|
||||
|
||||
for {
|
||||
bc.Send(bs)
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
// returns a random but recognizable device ID
|
||||
func randomDeviceID() []byte {
|
||||
var id [32]byte
|
||||
copy(id[:], randomPrefix)
|
||||
rand.Read(id[len(randomPrefix):])
|
||||
return id[:]
|
||||
}
|
||||
19
cmd/stdiscosrv/LICENSE
Normal file
@@ -0,0 +1,19 @@
|
||||
Copyright (C) 2014-2015 The Discosrv Authors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
- The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
40
cmd/stdiscosrv/README.md
Normal file
@@ -0,0 +1,40 @@
|
||||
stdiscosrv
|
||||
==========
|
||||
|
||||
[](http://build.syncthing.net/job/stdiscosrv/lastBuild/)
|
||||
|
||||
This is the global discovery server for the `syncthing` project.
|
||||
|
||||
To get it, run `go get github.com/syncthing/stdiscosrv` or download the
|
||||
[latest build](http://build.syncthing.net/job/stdiscosrv/lastSuccessfulBuild/artifact/)
|
||||
from the build server.
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
The discovery server supports `ql` and `postgres` backends.
|
||||
Specify the backend via `-db-backend` and the database DSN via `-db-dsn`.
|
||||
|
||||
By default it will use in-memory `ql` backend. If you wish to persist the
|
||||
information on disk between restarts in `ql`, specify a file DSN:
|
||||
|
||||
```bash
|
||||
$ stdiscosrv -db-dsn="file:///var/run/stdiscosrv.db"
|
||||
```
|
||||
|
||||
For `postgres`, you will need to create a database and a user with permissions
|
||||
to create tables in it, then start the stdiscosrv as follows:
|
||||
|
||||
```bash
|
||||
$ export STDISCOSRV_DB_DSN="postgres://user:password@localhost/databasename"
|
||||
$ stdiscosrv -db-backend="postgres"
|
||||
```
|
||||
|
||||
You can pass the DSN as command line option, but the value what you pass in will
|
||||
be visible in most process managers, potentially exposing the database password
|
||||
to other users.
|
||||
|
||||
In all cases, the appropriate tables and indexes will be created at first
|
||||
startup. If it doesn't exit with an error, you're fine.
|
||||
|
||||
See `stdiscosrv -help` for other options.
|
||||
75
cmd/stdiscosrv/clean.go
Normal file
@@ -0,0 +1,75 @@
|
||||
// Copyright (C) 2014-2015 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"log"
|
||||
"time"
|
||||
)
|
||||
|
||||
type cleansrv struct {
|
||||
intv time.Duration
|
||||
db *sql.DB
|
||||
prep map[string]*sql.Stmt
|
||||
}
|
||||
|
||||
func (s *cleansrv) Serve() {
|
||||
for {
|
||||
time.Sleep(next(s.intv))
|
||||
|
||||
err := s.cleanOldEntries()
|
||||
if err != nil {
|
||||
log.Println("Clean:", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *cleansrv) Stop() {
|
||||
panic("stop unimplemented")
|
||||
}
|
||||
|
||||
func (s *cleansrv) cleanOldEntries() (err error) {
|
||||
var tx *sql.Tx
|
||||
tx, err = s.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err == nil {
|
||||
err = tx.Commit()
|
||||
} else {
|
||||
tx.Rollback()
|
||||
}
|
||||
}()
|
||||
|
||||
res, err := tx.Stmt(s.prep["cleanAddress"]).Exec()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rows, _ := res.RowsAffected(); rows > 0 {
|
||||
log.Printf("Clean: %d old addresses", rows)
|
||||
}
|
||||
|
||||
res, err = tx.Stmt(s.prep["cleanDevice"]).Exec()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rows, _ := res.RowsAffected(); rows > 0 {
|
||||
log.Printf("Clean: %d old devices", rows)
|
||||
}
|
||||
|
||||
var devs, addrs int
|
||||
row := tx.Stmt(s.prep["countDevice"]).QueryRow()
|
||||
if err = row.Scan(&devs); err != nil {
|
||||
return err
|
||||
}
|
||||
row = tx.Stmt(s.prep["countAddress"]).QueryRow()
|
||||
if err = row.Scan(&addrs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("Database: %d devices, %d addresses", devs, addrs)
|
||||
return nil
|
||||
}
|
||||
32
cmd/stdiscosrv/db.go
Normal file
@@ -0,0 +1,32 @@
|
||||
// Copyright (C) 2014-2015 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type setupFunc func(db *sql.DB) error
|
||||
type compileFunc func(db *sql.DB) (map[string]*sql.Stmt, error)
|
||||
|
||||
var (
|
||||
setupFuncs = make(map[string]setupFunc)
|
||||
compileFuncs = make(map[string]compileFunc)
|
||||
)
|
||||
|
||||
func register(name string, setup setupFunc, compile compileFunc) {
|
||||
setupFuncs[name] = setup
|
||||
compileFuncs[name] = compile
|
||||
}
|
||||
|
||||
func setup(backend string, db *sql.DB) (map[string]*sql.Stmt, error) {
|
||||
setup, ok := setupFuncs[backend]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Unsupported backend")
|
||||
}
|
||||
if err := setup(db); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return compileFuncs[backend](db)
|
||||
}
|
||||
146
cmd/stdiscosrv/main.go
Normal file
@@ -0,0 +1,146 @@
|
||||
// Copyright (C) 2014-2015 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"database/sql"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
"github.com/thejerf/suture"
|
||||
)
|
||||
|
||||
const (
|
||||
minNegCache = 60 // seconds
|
||||
maxNegCache = 3600 // seconds
|
||||
maxDeviceAge = 7 * 86400 // one week, in seconds
|
||||
)
|
||||
|
||||
var (
|
||||
Version string
|
||||
BuildStamp string
|
||||
BuildUser string
|
||||
BuildHost string
|
||||
|
||||
BuildDate time.Time
|
||||
LongVersion string
|
||||
)
|
||||
|
||||
func init() {
|
||||
stamp, _ := strconv.Atoi(BuildStamp)
|
||||
BuildDate = time.Unix(int64(stamp), 0)
|
||||
|
||||
date := BuildDate.UTC().Format("2006-01-02 15:04:05 MST")
|
||||
LongVersion = fmt.Sprintf(`stdiscosrv %s (%s %s-%s) %s@%s %s`, Version, runtime.Version(), runtime.GOOS, runtime.GOARCH, BuildUser, BuildHost, date)
|
||||
}
|
||||
|
||||
var (
|
||||
lruSize = 10240
|
||||
limitAvg = 5
|
||||
limitBurst = 20
|
||||
globalStats stats
|
||||
statsFile string
|
||||
backend = "ql"
|
||||
dsn = getEnvDefault("STDISCOSRV_DB_DSN", "memory://stdiscosrv")
|
||||
certFile = "cert.pem"
|
||||
keyFile = "key.pem"
|
||||
debug = false
|
||||
useHTTP = false
|
||||
)
|
||||
|
||||
func main() {
|
||||
const (
|
||||
cleanIntv = 1 * time.Hour
|
||||
statsIntv = 5 * time.Minute
|
||||
)
|
||||
|
||||
var listen string
|
||||
|
||||
log.SetOutput(os.Stdout)
|
||||
log.SetFlags(0)
|
||||
|
||||
flag.StringVar(&listen, "listen", ":8443", "Listen address")
|
||||
flag.IntVar(&lruSize, "limit-cache", lruSize, "Limiter cache entries")
|
||||
flag.IntVar(&limitAvg, "limit-avg", limitAvg, "Allowed average package rate, per 10 s")
|
||||
flag.IntVar(&limitBurst, "limit-burst", limitBurst, "Allowed burst size, packets")
|
||||
flag.StringVar(&statsFile, "stats-file", statsFile, "File to write periodic operation stats to")
|
||||
flag.StringVar(&backend, "db-backend", backend, "Database backend to use")
|
||||
flag.StringVar(&dsn, "db-dsn", dsn, "Database DSN")
|
||||
flag.StringVar(&certFile, "cert", certFile, "Certificate file")
|
||||
flag.StringVar(&keyFile, "key", keyFile, "Key file")
|
||||
flag.BoolVar(&debug, "debug", debug, "Debug")
|
||||
flag.BoolVar(&useHTTP, "http", useHTTP, "Listen on HTTP (behind an HTTPS proxy)")
|
||||
flag.Parse()
|
||||
|
||||
log.Println(LongVersion)
|
||||
|
||||
var cert tls.Certificate
|
||||
var err error
|
||||
if !useHTTP {
|
||||
cert, err = tls.LoadX509KeyPair(certFile, keyFile)
|
||||
if err != nil {
|
||||
log.Println("Failed to load keypair. Generating one, this might take a while...")
|
||||
cert, err = tlsutil.NewCertificate(certFile, keyFile, "stdiscosrv", 3072)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to generate X509 key pair:", err)
|
||||
}
|
||||
}
|
||||
|
||||
devID := protocol.NewDeviceID(cert.Certificate[0])
|
||||
log.Println("Server device ID is", devID)
|
||||
}
|
||||
|
||||
db, err := sql.Open(backend, dsn)
|
||||
if err != nil {
|
||||
log.Fatalln("sql.Open:", err)
|
||||
}
|
||||
prep, err := setup(backend, db)
|
||||
if err != nil {
|
||||
log.Fatalln("Setup:", err)
|
||||
}
|
||||
|
||||
main := suture.NewSimple("main")
|
||||
|
||||
main.Add(&querysrv{
|
||||
addr: listen,
|
||||
cert: cert,
|
||||
db: db,
|
||||
prep: prep,
|
||||
})
|
||||
|
||||
main.Add(&cleansrv{
|
||||
intv: cleanIntv,
|
||||
db: db,
|
||||
prep: prep,
|
||||
})
|
||||
|
||||
main.Add(&statssrv{
|
||||
intv: statsIntv,
|
||||
file: statsFile,
|
||||
db: db,
|
||||
})
|
||||
|
||||
globalStats.Reset()
|
||||
main.Serve()
|
||||
}
|
||||
|
||||
func getEnvDefault(key, def string) string {
|
||||
if val := os.Getenv(key); val != "" {
|
||||
return val
|
||||
}
|
||||
return def
|
||||
}
|
||||
|
||||
func next(intv time.Duration) time.Duration {
|
||||
t0 := time.Now()
|
||||
t1 := t0.Add(intv).Truncate(intv)
|
||||
return t1.Sub(t0)
|
||||
}
|
||||
98
cmd/stdiscosrv/psql.go
Normal file
@@ -0,0 +1,98 @@
|
||||
// Copyright (C) 2014-2015 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
_ "github.com/lib/pq"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("postgres", postgresSetup, postgresCompile)
|
||||
}
|
||||
|
||||
func postgresSetup(db *sql.DB) error {
|
||||
var err error
|
||||
|
||||
db.SetMaxIdleConns(4)
|
||||
db.SetMaxOpenConns(8)
|
||||
|
||||
_, err = db.Exec(`CREATE TABLE IF NOT EXISTS Devices (
|
||||
DeviceID CHAR(63) NOT NULL PRIMARY KEY,
|
||||
Seen TIMESTAMP NOT NULL
|
||||
)`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var tmp string
|
||||
row := db.QueryRow(`SELECT 'DevicesDeviceIDIndex'::regclass`)
|
||||
if err = row.Scan(&tmp); err != nil {
|
||||
_, err = db.Exec(`CREATE INDEX DevicesDeviceIDIndex ON Devices (DeviceID)`)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
row = db.QueryRow(`SELECT 'DevicesSeenIndex'::regclass`)
|
||||
if err = row.Scan(&tmp); err != nil {
|
||||
_, err = db.Exec(`CREATE INDEX DevicesSeenIndex ON Devices (Seen)`)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = db.Exec(`CREATE TABLE IF NOT EXISTS Addresses (
|
||||
DeviceID CHAR(63) NOT NULL,
|
||||
Seen TIMESTAMP NOT NULL,
|
||||
Address VARCHAR(2048) NOT NULL
|
||||
)`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
row = db.QueryRow(`SELECT 'AddressesDeviceIDSeenIndex'::regclass`)
|
||||
if err = row.Scan(&tmp); err != nil {
|
||||
_, err = db.Exec(`CREATE INDEX AddressesDeviceIDSeenIndex ON Addresses (DeviceID, Seen)`)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
row = db.QueryRow(`SELECT 'AddressesDeviceIDAddressIndex'::regclass`)
|
||||
if err = row.Scan(&tmp); err != nil {
|
||||
_, err = db.Exec(`CREATE INDEX AddressesDeviceIDAddressIndex ON Addresses (DeviceID, Address)`)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func postgresCompile(db *sql.DB) (map[string]*sql.Stmt, error) {
|
||||
stmts := map[string]string{
|
||||
"cleanAddress": "DELETE FROM Addresses WHERE Seen < now() - '2 hour'::INTERVAL",
|
||||
"cleanDevice": fmt.Sprintf("DELETE FROM Devices WHERE Seen < now() - '%d hour'::INTERVAL", maxDeviceAge/3600),
|
||||
"countAddress": "SELECT count(*) FROM Addresses",
|
||||
"countDevice": "SELECT count(*) FROM Devices",
|
||||
"insertAddress": "INSERT INTO Addresses (DeviceID, Seen, Address) VALUES ($1, now(), $2)",
|
||||
"insertDevice": "INSERT INTO Devices (DeviceID, Seen) VALUES ($1, now())",
|
||||
"selectAddress": "SELECT Address FROM Addresses WHERE DeviceID=$1 AND Seen > now() - '1 hour'::INTERVAL ORDER BY random() LIMIT 16",
|
||||
"selectDevice": "SELECT Seen FROM Devices WHERE DeviceID=$1",
|
||||
"updateAddress": "UPDATE Addresses SET Seen=now() WHERE DeviceID=$1 AND Address=$2",
|
||||
"updateDevice": "UPDATE Devices SET Seen=now() WHERE DeviceID=$1",
|
||||
}
|
||||
|
||||
res := make(map[string]*sql.Stmt, len(stmts))
|
||||
for key, stmt := range stmts {
|
||||
prep, err := db.Prepare(stmt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res[key] = prep
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
81
cmd/stdiscosrv/ql.go
Normal file
@@ -0,0 +1,81 @@
|
||||
// Copyright (C) 2015 Audrius Butkevicius and Contributors (see the CONTRIBUTORS file).
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/cznic/ql"
|
||||
)
|
||||
|
||||
func init() {
|
||||
ql.RegisterDriver()
|
||||
register("ql", qlSetup, qlCompile)
|
||||
}
|
||||
|
||||
func qlSetup(db *sql.DB) (err error) {
|
||||
tx, err := db.Begin()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err == nil {
|
||||
err = tx.Commit()
|
||||
} else {
|
||||
tx.Rollback()
|
||||
}
|
||||
}()
|
||||
|
||||
_, err = tx.Exec(`CREATE TABLE IF NOT EXISTS Devices (
|
||||
DeviceID STRING NOT NULL,
|
||||
Seen TIME NOT NULL
|
||||
)`)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = tx.Exec(`CREATE INDEX IF NOT EXISTS DevicesDeviceIDIndex ON Devices (DeviceID)`); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
_, err = tx.Exec(`CREATE TABLE IF NOT EXISTS Addresses (
|
||||
DeviceID STRING NOT NULL,
|
||||
Seen TIME NOT NULL,
|
||||
Address STRING NOT NULL,
|
||||
)`)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
_, err = tx.Exec(`CREATE INDEX IF NOT EXISTS AddressesDeviceIDAddressIndex ON Addresses (DeviceID, Address)`)
|
||||
return
|
||||
}
|
||||
|
||||
func qlCompile(db *sql.DB) (map[string]*sql.Stmt, error) {
|
||||
stmts := map[string]string{
|
||||
"cleanAddress": `DELETE FROM Addresses WHERE Seen < now() - duration("2h")`,
|
||||
"cleanDevice": fmt.Sprintf(`DELETE FROM Devices WHERE Seen < now() - duration("%dh")`, maxDeviceAge/3600),
|
||||
"countAddress": "SELECT count(*) FROM Addresses",
|
||||
"countDevice": "SELECT count(*) FROM Devices",
|
||||
"insertAddress": "INSERT INTO Addresses (DeviceID, Seen, Address) VALUES ($1, now(), $2)",
|
||||
"insertDevice": "INSERT INTO Devices (DeviceID, Seen) VALUES ($1, now())",
|
||||
"selectAddress": `SELECT Address from Addresses WHERE DeviceID==$1 AND Seen > now() - duration("1h") LIMIT 16`,
|
||||
"selectDevice": "SELECT Seen FROM Devices WHERE DeviceID==$1",
|
||||
"updateAddress": "UPDATE Addresses Seen=now() WHERE DeviceID==$1 AND Address==$2",
|
||||
"updateDevice": "UPDATE Devices Seen=now() WHERE DeviceID==$1",
|
||||
}
|
||||
|
||||
res := make(map[string]*sql.Stmt, len(stmts))
|
||||
for key, stmt := range stmts {
|
||||
prep, err := db.Prepare(stmt)
|
||||
if err != nil {
|
||||
log.Println("Failed to compile", stmt)
|
||||
return nil, err
|
||||
}
|
||||
res[key] = prep
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
488
cmd/stdiscosrv/querysrv.go
Normal file
@@ -0,0 +1,488 @@
|
||||
// Copyright (C) 2014-2015 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/groupcache/lru"
|
||||
"github.com/juju/ratelimit"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type querysrv struct {
|
||||
addr string
|
||||
db *sql.DB
|
||||
prep map[string]*sql.Stmt
|
||||
limiter *safeCache
|
||||
cert tls.Certificate
|
||||
listener net.Listener
|
||||
}
|
||||
|
||||
type announcement struct {
|
||||
Seen time.Time `json:"seen"`
|
||||
Addresses []string `json:"addresses"`
|
||||
}
|
||||
|
||||
type safeCache struct {
|
||||
*lru.Cache
|
||||
mut sync.Mutex
|
||||
}
|
||||
|
||||
func (s *safeCache) Get(key string) (val interface{}, ok bool) {
|
||||
s.mut.Lock()
|
||||
val, ok = s.Cache.Get(key)
|
||||
s.mut.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
func (s *safeCache) Add(key string, val interface{}) {
|
||||
s.mut.Lock()
|
||||
s.Cache.Add(key, val)
|
||||
s.mut.Unlock()
|
||||
}
|
||||
|
||||
type requestID int64
|
||||
|
||||
func (i requestID) String() string {
|
||||
return fmt.Sprintf("%016x", int64(i))
|
||||
}
|
||||
|
||||
func negCacheFor(lastSeen time.Time) int {
|
||||
since := time.Since(lastSeen).Seconds()
|
||||
if since >= maxDeviceAge {
|
||||
return maxNegCache
|
||||
}
|
||||
if since < 0 {
|
||||
// That's weird
|
||||
return minNegCache
|
||||
}
|
||||
|
||||
// Return a value linearly scaled from minNegCache (at zero seconds ago)
|
||||
// to maxNegCache (at maxDeviceAge seconds ago).
|
||||
r := since / maxDeviceAge
|
||||
return int(minNegCache + r*(maxNegCache-minNegCache))
|
||||
}
|
||||
|
||||
func (s *querysrv) Serve() {
|
||||
s.limiter = &safeCache{
|
||||
Cache: lru.New(lruSize),
|
||||
}
|
||||
|
||||
if useHTTP {
|
||||
listener, err := net.Listen("tcp", s.addr)
|
||||
if err != nil {
|
||||
log.Println("Listen:", err)
|
||||
return
|
||||
}
|
||||
s.listener = listener
|
||||
} else {
|
||||
tlsCfg := &tls.Config{
|
||||
Certificates: []tls.Certificate{s.cert},
|
||||
ClientAuth: tls.RequestClientCert,
|
||||
SessionTicketsDisabled: true,
|
||||
MinVersion: tls.VersionTLS12,
|
||||
CipherSuites: []uint16{
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
||||
},
|
||||
}
|
||||
|
||||
tlsListener, err := tls.Listen("tcp", s.addr, tlsCfg)
|
||||
if err != nil {
|
||||
log.Println("Listen:", err)
|
||||
return
|
||||
}
|
||||
s.listener = tlsListener
|
||||
}
|
||||
|
||||
http.HandleFunc("/v2/", s.handler)
|
||||
http.HandleFunc("/ping", handlePing)
|
||||
|
||||
srv := &http.Server{
|
||||
ReadTimeout: 5 * time.Second,
|
||||
WriteTimeout: 5 * time.Second,
|
||||
MaxHeaderBytes: 1 << 10,
|
||||
}
|
||||
|
||||
if err := srv.Serve(s.listener); err != nil {
|
||||
log.Println("Serve:", err)
|
||||
}
|
||||
}
|
||||
|
||||
var topCtx = context.Background()
|
||||
|
||||
func (s *querysrv) handler(w http.ResponseWriter, req *http.Request) {
|
||||
reqID := requestID(rand.Int63())
|
||||
ctx := context.WithValue(topCtx, "id", reqID)
|
||||
|
||||
if debug {
|
||||
log.Println(reqID, req.Method, req.URL)
|
||||
}
|
||||
|
||||
t0 := time.Now()
|
||||
defer func() {
|
||||
diff := time.Since(t0)
|
||||
var comment string
|
||||
if diff > time.Second {
|
||||
comment = "(very slow request)"
|
||||
} else if diff > 100*time.Millisecond {
|
||||
comment = "(slow request)"
|
||||
}
|
||||
if comment != "" || debug {
|
||||
log.Println(reqID, req.Method, req.URL, "completed in", diff, comment)
|
||||
}
|
||||
}()
|
||||
|
||||
var remoteIP net.IP
|
||||
if useHTTP {
|
||||
remoteIP = net.ParseIP(req.Header.Get("X-Forwarded-For"))
|
||||
} else {
|
||||
addr, err := net.ResolveTCPAddr("tcp", req.RemoteAddr)
|
||||
if err != nil {
|
||||
log.Println("remoteAddr:", err)
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
remoteIP = addr.IP
|
||||
}
|
||||
|
||||
if s.limit(remoteIP) {
|
||||
if debug {
|
||||
log.Println(remoteIP, "is limited")
|
||||
}
|
||||
w.Header().Set("Retry-After", "60")
|
||||
http.Error(w, "Too Many Requests", 429)
|
||||
return
|
||||
}
|
||||
|
||||
switch req.Method {
|
||||
case "GET":
|
||||
s.handleGET(ctx, w, req)
|
||||
case "POST":
|
||||
s.handlePOST(ctx, remoteIP, w, req)
|
||||
default:
|
||||
globalStats.Error()
|
||||
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *querysrv) handleGET(ctx context.Context, w http.ResponseWriter, req *http.Request) {
|
||||
reqID := ctx.Value("id").(requestID)
|
||||
|
||||
deviceID, err := protocol.DeviceIDFromString(req.URL.Query().Get("device"))
|
||||
if err != nil {
|
||||
if debug {
|
||||
log.Println(reqID, "bad device param")
|
||||
}
|
||||
globalStats.Error()
|
||||
http.Error(w, "Bad Request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
var ann announcement
|
||||
|
||||
ann.Seen, err = s.getDeviceSeen(deviceID)
|
||||
negCache := strconv.Itoa(negCacheFor(ann.Seen))
|
||||
w.Header().Set("Retry-After", negCache)
|
||||
w.Header().Set("Cache-Control", "public, max-age="+negCache)
|
||||
|
||||
if err != nil {
|
||||
// The device is not in the database.
|
||||
globalStats.Query()
|
||||
http.Error(w, "Not Found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
t0 := time.Now()
|
||||
ann.Addresses, err = s.getAddresses(ctx, deviceID)
|
||||
if err != nil {
|
||||
log.Println(reqID, "getAddresses:", err)
|
||||
globalStats.Error()
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if debug {
|
||||
log.Println(reqID, "getAddresses in", time.Since(t0))
|
||||
}
|
||||
|
||||
globalStats.Query()
|
||||
|
||||
if len(ann.Addresses) == 0 {
|
||||
http.Error(w, "Not Found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
globalStats.Answer()
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(ann)
|
||||
}
|
||||
|
||||
func (s *querysrv) handlePOST(ctx context.Context, remoteIP net.IP, w http.ResponseWriter, req *http.Request) {
|
||||
reqID := ctx.Value("id").(requestID)
|
||||
|
||||
rawCert := certificateBytes(req)
|
||||
if rawCert == nil {
|
||||
if debug {
|
||||
log.Println(reqID, "no certificates")
|
||||
}
|
||||
globalStats.Error()
|
||||
http.Error(w, "Forbidden", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
|
||||
var ann announcement
|
||||
if err := json.NewDecoder(req.Body).Decode(&ann); err != nil {
|
||||
if debug {
|
||||
log.Println(reqID, "decode:", err)
|
||||
}
|
||||
globalStats.Error()
|
||||
http.Error(w, "Bad Request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
deviceID := protocol.NewDeviceID(rawCert)
|
||||
|
||||
// handleAnnounce returns *two* errors. The first indicates a problem with
|
||||
// something the client posted to us. We should return a 400 Bad Request
|
||||
// and not worry about it. The second indicates that the request was fine,
|
||||
// but something internal messed up. We should log it and respond with a
|
||||
// more apologetic 500 Internal Server Error.
|
||||
userErr, internalErr := s.handleAnnounce(ctx, remoteIP, deviceID, ann.Addresses)
|
||||
if userErr != nil {
|
||||
if debug {
|
||||
log.Println(reqID, "handleAnnounce:", userErr)
|
||||
}
|
||||
globalStats.Error()
|
||||
http.Error(w, "Bad Request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if internalErr != nil {
|
||||
log.Println(reqID, "handleAnnounce:", internalErr)
|
||||
globalStats.Error()
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
globalStats.Announce()
|
||||
|
||||
// TODO: Slowly increase this for stable clients
|
||||
w.Header().Set("Reannounce-After", "1800")
|
||||
|
||||
// We could return the lookup result here, but it's kind of unnecessarily
|
||||
// expensive to go query the database again so we let the client decide to
|
||||
// do a lookup if they really care.
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
func (s *querysrv) Stop() {
|
||||
s.listener.Close()
|
||||
}
|
||||
|
||||
func (s *querysrv) handleAnnounce(ctx context.Context, remote net.IP, deviceID protocol.DeviceID, addresses []string) (userErr, internalErr error) {
|
||||
reqID := ctx.Value("id").(requestID)
|
||||
|
||||
tx, err := s.db.Begin()
|
||||
if err != nil {
|
||||
internalErr = err
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
// Since we return from a bunch of different places, we handle
|
||||
// rollback in the defer.
|
||||
if internalErr != nil || userErr != nil {
|
||||
tx.Rollback()
|
||||
}
|
||||
}()
|
||||
|
||||
for _, annAddr := range addresses {
|
||||
uri, err := url.Parse(annAddr)
|
||||
if err != nil {
|
||||
userErr = err
|
||||
return
|
||||
}
|
||||
|
||||
host, port, err := net.SplitHostPort(uri.Host)
|
||||
if err != nil {
|
||||
userErr = err
|
||||
return
|
||||
}
|
||||
|
||||
ip := net.ParseIP(host)
|
||||
if host == "" || ip.IsUnspecified() {
|
||||
// Do not use IPv6 remote address if requested scheme is tcp4
|
||||
if uri.Scheme == "tcp4" && remote.To4() == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Do not use IPv4 remote address if requested scheme is tcp6
|
||||
if uri.Scheme == "tcp6" && remote.To4() != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
host = remote.String()
|
||||
}
|
||||
|
||||
uri.Host = net.JoinHostPort(host, port)
|
||||
|
||||
if err := s.updateAddress(ctx, tx, deviceID, uri.String()); err != nil {
|
||||
internalErr = err
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.updateDevice(ctx, tx, deviceID); err != nil {
|
||||
internalErr = err
|
||||
return
|
||||
}
|
||||
|
||||
t0 := time.Now()
|
||||
internalErr = tx.Commit()
|
||||
if debug {
|
||||
log.Println(reqID, "commit in", time.Since(t0))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *querysrv) limit(remote net.IP) bool {
|
||||
key := remote.String()
|
||||
|
||||
bkt, ok := s.limiter.Get(key)
|
||||
if ok {
|
||||
bkt := bkt.(*ratelimit.Bucket)
|
||||
if bkt.TakeAvailable(1) != 1 {
|
||||
// Rate limit exceeded; ignore packet
|
||||
return true
|
||||
}
|
||||
} else {
|
||||
// One packet per ten seconds average rate, burst ten packets
|
||||
s.limiter.Add(key, ratelimit.NewBucket(10*time.Second/time.Duration(limitAvg), int64(limitBurst)))
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *querysrv) updateDevice(ctx context.Context, tx *sql.Tx, device protocol.DeviceID) error {
|
||||
reqID := ctx.Value("id").(requestID)
|
||||
t0 := time.Now()
|
||||
res, err := tx.Stmt(s.prep["updateDevice"]).Exec(device.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if debug {
|
||||
log.Println(reqID, "updateDevice in", time.Since(t0))
|
||||
}
|
||||
|
||||
if rows, _ := res.RowsAffected(); rows == 0 {
|
||||
t0 = time.Now()
|
||||
_, err := tx.Stmt(s.prep["insertDevice"]).Exec(device.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if debug {
|
||||
log.Println(reqID, "insertDevice in", time.Since(t0))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *querysrv) updateAddress(ctx context.Context, tx *sql.Tx, device protocol.DeviceID, uri string) error {
|
||||
res, err := tx.Stmt(s.prep["updateAddress"]).Exec(device.String(), uri)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if rows, _ := res.RowsAffected(); rows == 0 {
|
||||
_, err := tx.Stmt(s.prep["insertAddress"]).Exec(device.String(), uri)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *querysrv) getAddresses(ctx context.Context, device protocol.DeviceID) ([]string, error) {
|
||||
rows, err := s.prep["selectAddress"].Query(device.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var res []string
|
||||
for rows.Next() {
|
||||
var addr string
|
||||
|
||||
err := rows.Scan(&addr)
|
||||
if err != nil {
|
||||
log.Println("Scan:", err)
|
||||
continue
|
||||
}
|
||||
res = append(res, addr)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (s *querysrv) getDeviceSeen(device protocol.DeviceID) (time.Time, error) {
|
||||
row := s.prep["selectDevice"].QueryRow(device.String())
|
||||
var seen time.Time
|
||||
if err := row.Scan(&seen); err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
return seen.In(time.UTC), nil
|
||||
}
|
||||
|
||||
func handlePing(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(204)
|
||||
}
|
||||
|
||||
func certificateBytes(req *http.Request) []byte {
|
||||
if req.TLS != nil && len(req.TLS.PeerCertificates) > 0 {
|
||||
return req.TLS.PeerCertificates[0].Raw
|
||||
}
|
||||
|
||||
if hdr := req.Header.Get("X-SSL-Cert"); hdr != "" {
|
||||
bs := []byte(hdr)
|
||||
// The certificate is in PEM format but with spaces for newlines. We
|
||||
// need to reinstate the newlines for the PEM decoder. But we need to
|
||||
// leave the spaces in the BEGIN and END lines - the first and last
|
||||
// space - alone.
|
||||
firstSpace := bytes.Index(bs, []byte(" "))
|
||||
lastSpace := bytes.LastIndex(bs, []byte(" "))
|
||||
for i := firstSpace + 1; i < lastSpace; i++ {
|
||||
if bs[i] == ' ' {
|
||||
bs[i] = '\n'
|
||||
}
|
||||
}
|
||||
block, _ := pem.Decode(bs)
|
||||
if block == nil {
|
||||
// Decoding failed
|
||||
return nil
|
||||
}
|
||||
return block.Bytes
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
141
cmd/stdiscosrv/stats.go
Normal file
@@ -0,0 +1,141 @@
|
||||
// Copyright (C) 2014-2015 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
type stats struct {
|
||||
// Incremented atomically
|
||||
announces int64
|
||||
queries int64
|
||||
answers int64
|
||||
errors int64
|
||||
}
|
||||
|
||||
func (s *stats) Announce() {
|
||||
atomic.AddInt64(&s.announces, 1)
|
||||
}
|
||||
|
||||
func (s *stats) Query() {
|
||||
atomic.AddInt64(&s.queries, 1)
|
||||
}
|
||||
|
||||
func (s *stats) Answer() {
|
||||
atomic.AddInt64(&s.answers, 1)
|
||||
}
|
||||
|
||||
func (s *stats) Error() {
|
||||
atomic.AddInt64(&s.errors, 1)
|
||||
}
|
||||
|
||||
// Reset returns a copy of the current stats and resets the counters to
|
||||
// zero.
|
||||
func (s *stats) Reset() stats {
|
||||
// Create a copy of the stats using atomic reads
|
||||
copy := stats{
|
||||
announces: atomic.LoadInt64(&s.announces),
|
||||
queries: atomic.LoadInt64(&s.queries),
|
||||
answers: atomic.LoadInt64(&s.answers),
|
||||
errors: atomic.LoadInt64(&s.errors),
|
||||
}
|
||||
|
||||
// Reset the stats by subtracting the values that we copied
|
||||
atomic.AddInt64(&s.announces, -copy.announces)
|
||||
atomic.AddInt64(&s.queries, -copy.queries)
|
||||
atomic.AddInt64(&s.answers, -copy.answers)
|
||||
atomic.AddInt64(&s.errors, -copy.errors)
|
||||
|
||||
return copy
|
||||
}
|
||||
|
||||
type statssrv struct {
|
||||
intv time.Duration
|
||||
file string
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
func (s *statssrv) Serve() {
|
||||
lastReset := time.Now()
|
||||
for {
|
||||
time.Sleep(next(s.intv))
|
||||
|
||||
stats := globalStats.Reset()
|
||||
d := time.Since(lastReset).Seconds()
|
||||
lastReset = time.Now()
|
||||
|
||||
log.Printf("Stats: %.02f announces/s, %.02f queries/s, %.02f answers/s, %.02f errors/s",
|
||||
float64(stats.announces)/d, float64(stats.queries)/d, float64(stats.answers)/d, float64(stats.errors)/d)
|
||||
|
||||
if s.file != "" {
|
||||
s.writeToFile(stats, d)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *statssrv) Stop() {
|
||||
panic("stop unimplemented")
|
||||
}
|
||||
|
||||
func (s *statssrv) writeToFile(stats stats, secs float64) {
|
||||
newLine := []byte("\n")
|
||||
|
||||
var addrs int
|
||||
row := s.db.QueryRow("SELECT COUNT(*) FROM Addresses")
|
||||
if err := row.Scan(&addrs); err != nil {
|
||||
log.Println("stats query:", err)
|
||||
return
|
||||
}
|
||||
|
||||
fd, err := os.OpenFile(s.file, os.O_RDWR|os.O_CREATE, 0666)
|
||||
if err != nil {
|
||||
log.Println("stats file:", err)
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
err = fd.Close()
|
||||
if err != nil {
|
||||
log.Println("stats file:", err)
|
||||
}
|
||||
}()
|
||||
|
||||
bs, err := ioutil.ReadAll(fd)
|
||||
if err != nil {
|
||||
log.Println("stats file:", err)
|
||||
return
|
||||
}
|
||||
lines := bytes.Split(bytes.TrimSpace(bs), newLine)
|
||||
if len(lines) > 12 {
|
||||
lines = lines[len(lines)-12:]
|
||||
}
|
||||
|
||||
latest := fmt.Sprintf("%v: %6d addresses, %8.02f announces/s, %8.02f queries/s, %8.02f answers/s, %8.02f errors/s\n",
|
||||
time.Now().UTC().Format(time.RFC3339), addrs,
|
||||
float64(stats.announces)/secs, float64(stats.queries)/secs, float64(stats.answers)/secs, float64(stats.errors)/secs)
|
||||
lines = append(lines, []byte(latest))
|
||||
|
||||
_, err = fd.Seek(0, 0)
|
||||
if err != nil {
|
||||
log.Println("stats file:", err)
|
||||
return
|
||||
}
|
||||
err = fd.Truncate(0)
|
||||
if err != nil {
|
||||
log.Println("stats file:", err)
|
||||
return
|
||||
}
|
||||
|
||||
_, err = fd.Write(bytes.Join(lines, newLine))
|
||||
if err != nil {
|
||||
log.Println("stats file:", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -40,7 +40,8 @@ func main() {
|
||||
log.Println("Lstat:")
|
||||
log.Printf(" Size: %d bytes", fi.Size())
|
||||
log.Printf(" Mode: 0%o", fi.Mode())
|
||||
log.Printf(" Time: %v (%d)", fi.ModTime(), fi.ModTime().Unix())
|
||||
log.Printf(" Time: %v", fi.ModTime())
|
||||
log.Printf(" %d.%09d", fi.ModTime().Unix(), fi.ModTime().Nanosecond())
|
||||
log.Println()
|
||||
|
||||
if !fi.Mode().IsDir() && !fi.Mode().IsRegular() {
|
||||
@@ -52,7 +53,8 @@ func main() {
|
||||
log.Println("Stat:")
|
||||
log.Printf(" Size: %d bytes", fi.Size())
|
||||
log.Printf(" Mode: 0%o", fi.Mode())
|
||||
log.Printf(" Time: %v (%d)", fi.ModTime(), fi.ModTime().Unix())
|
||||
log.Printf(" Time: %v", fi.ModTime())
|
||||
log.Printf(" %d.%09d", fi.ModTime().Unix(), fi.ModTime().Nanosecond())
|
||||
log.Println()
|
||||
}
|
||||
|
||||
|
||||
@@ -49,9 +49,8 @@ func main() {
|
||||
}
|
||||
|
||||
type checkResult struct {
|
||||
server string
|
||||
direct []string
|
||||
relays []discover.Relay
|
||||
server string
|
||||
addresses []string
|
||||
error
|
||||
}
|
||||
|
||||
@@ -67,7 +66,7 @@ func checkServers(deviceID protocol.DeviceID, servers ...string) {
|
||||
}()
|
||||
}
|
||||
|
||||
for _ = range servers {
|
||||
for range servers {
|
||||
res := <-resc
|
||||
|
||||
u, _ := url.Parse(res.server)
|
||||
@@ -76,17 +75,14 @@ func checkServers(deviceID protocol.DeviceID, servers ...string) {
|
||||
if res.error != nil {
|
||||
fmt.Println(" " + res.error.Error())
|
||||
}
|
||||
for _, addr := range res.direct {
|
||||
for _, addr := range res.addresses {
|
||||
fmt.Println(" address:", addr)
|
||||
}
|
||||
for _, rel := range res.relays {
|
||||
fmt.Printf(" relay: %s (%d ms)\n", rel.URL, rel.Latency)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func checkServer(deviceID protocol.DeviceID, server string) checkResult {
|
||||
disco, err := discover.NewGlobal(server, tls.Certificate{}, nil, nil)
|
||||
disco, err := discover.NewGlobal(server, tls.Certificate{}, nil)
|
||||
if err != nil {
|
||||
return checkResult{error: err}
|
||||
}
|
||||
@@ -98,8 +94,8 @@ func checkServer(deviceID protocol.DeviceID, server string) checkResult {
|
||||
})
|
||||
|
||||
go func() {
|
||||
direct, relays, err := disco.Lookup(deviceID)
|
||||
res <- checkResult{direct: direct, relays: relays, error: err}
|
||||
addresses, err := disco.Lookup(deviceID)
|
||||
res <- checkResult{addresses: addresses, error: err}
|
||||
}()
|
||||
|
||||
return <-res
|
||||
|
||||
124
cmd/stgenfiles/main.go
Normal file
@@ -0,0 +1,124 @@
|
||||
// Copyright (C) 2016 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
)
|
||||
|
||||
func main() {
|
||||
dir := flag.String("dir", "~/files", "Directory to generate into")
|
||||
files := flag.Int("files", 1000, "Number of files to create")
|
||||
maxExp := flag.Int("maxexp", 20, "Max size exponent")
|
||||
src := flag.String("src", "/dev/urandom", "Source of file data")
|
||||
flag.Parse()
|
||||
if err := generateFiles(*dir, *files, *maxExp, *src); err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
}
|
||||
|
||||
func generateFiles(dir string, files, maxexp int, srcname string) error {
|
||||
fd, err := os.Open(srcname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := 0; i < files; i++ {
|
||||
n := randomName()
|
||||
|
||||
if rand.Float64() < 0.05 {
|
||||
// Some files and directories are dotfiles
|
||||
n = "." + n
|
||||
}
|
||||
|
||||
p0 := filepath.Join(dir, string(n[0]), n[0:2])
|
||||
err = os.MkdirAll(p0, 0755)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
p1 := filepath.Join(p0, n)
|
||||
|
||||
s := int64(1 << uint(rand.Intn(maxexp)))
|
||||
a := int64(128 * 1024)
|
||||
if a > s {
|
||||
a = s
|
||||
}
|
||||
s += rand.Int63n(a)
|
||||
|
||||
if err := generateOneFile(fd, p1, s); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func generateOneFile(fd io.ReadSeeker, p1 string, s int64) error {
|
||||
src := io.LimitReader(&inifiteReader{fd}, int64(s))
|
||||
dst, err := os.Create(p1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = io.Copy(dst, src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dst.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_ = os.Chmod(p1, os.FileMode(rand.Intn(0777)|0400))
|
||||
|
||||
t := time.Now().Add(-time.Duration(rand.Intn(30*86400)) * time.Second)
|
||||
err = os.Chtimes(p1, t, t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func randomName() string {
|
||||
var b [16]byte
|
||||
readRand(b[:])
|
||||
return fmt.Sprintf("%x", b[:])
|
||||
}
|
||||
|
||||
func readRand(bs []byte) (int, error) {
|
||||
var r uint32
|
||||
for i := range bs {
|
||||
if i%4 == 0 {
|
||||
r = uint32(rand.Int63())
|
||||
}
|
||||
bs[i] = byte(r >> uint((i%4)*8))
|
||||
}
|
||||
return len(bs), nil
|
||||
}
|
||||
|
||||
type inifiteReader struct {
|
||||
rd io.ReadSeeker
|
||||
}
|
||||
|
||||
func (i *inifiteReader) Read(bs []byte) (int, error) {
|
||||
n, err := i.rd.Read(bs)
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
i.rd.Seek(0, 0)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
@@ -10,51 +10,71 @@ import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
)
|
||||
|
||||
func dump(ldb *leveldb.DB) {
|
||||
func dump(ldb *db.Instance) {
|
||||
it := ldb.NewIterator(nil, nil)
|
||||
var dev protocol.DeviceID
|
||||
for it.Next() {
|
||||
key := it.Key()
|
||||
switch key[0] {
|
||||
case db.KeyTypeDevice:
|
||||
folder := nulString(key[1 : 1+64])
|
||||
devBytes := key[1+64 : 1+64+32]
|
||||
name := nulString(key[1+64+32:])
|
||||
copy(dev[:], devBytes)
|
||||
fmt.Printf("[device] F:%q N:%q D:%v\n", folder, name, dev)
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
device := binary.BigEndian.Uint32(key[1+4:])
|
||||
name := nulString(key[1+4+4:])
|
||||
fmt.Printf("[device] F:%d D:%d N:%q", folder, device, name)
|
||||
|
||||
var f protocol.FileInfo
|
||||
err := f.UnmarshalXDR(it.Value())
|
||||
err := f.Unmarshal(it.Value())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Printf(" N:%q\n F:%#o\n M:%d\n V:%v\n S:%d\n B:%d\n", f.Name, f.Flags, f.Modified, f.Version, f.Size(), len(f.Blocks))
|
||||
fmt.Printf(" V:%v\n", f)
|
||||
|
||||
case db.KeyTypeGlobal:
|
||||
folder := nulString(key[1 : 1+64])
|
||||
name := nulString(key[1+64:])
|
||||
fmt.Printf("[global] F:%q N:%q V:%x\n", folder, name, it.Value())
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
name := nulString(key[1+4:])
|
||||
var flv db.VersionList
|
||||
flv.Unmarshal(it.Value())
|
||||
fmt.Printf("[global] F:%d N:%q V:%s\n", folder, name, flv)
|
||||
|
||||
case db.KeyTypeBlock:
|
||||
folder := nulString(key[1 : 1+64])
|
||||
hash := key[1+64 : 1+64+32]
|
||||
name := nulString(key[1+64+32:])
|
||||
fmt.Printf("[block] F:%q H:%x N:%q I:%d\n", folder, hash, name, binary.BigEndian.Uint32(it.Value()))
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
hash := key[1+4 : 1+4+32]
|
||||
name := nulString(key[1+4+32:])
|
||||
fmt.Printf("[block] F:%d H:%x N:%q I:%d\n", folder, hash, name, binary.BigEndian.Uint32(it.Value()))
|
||||
|
||||
case db.KeyTypeDeviceStatistic:
|
||||
fmt.Printf("[dstat]\n %x\n %x\n", it.Key(), it.Value())
|
||||
fmt.Printf("[dstat] K:%x V:%x\n", it.Key(), it.Value())
|
||||
|
||||
case db.KeyTypeFolderStatistic:
|
||||
fmt.Printf("[fstat]\n %x\n %x\n", it.Key(), it.Value())
|
||||
fmt.Printf("[fstat] K:%x V:%x\n", it.Key(), it.Value())
|
||||
|
||||
case db.KeyTypeVirtualMtime:
|
||||
fmt.Printf("[mtime]\n %x\n %x\n", it.Key(), it.Value())
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
name := nulString(key[1+4:])
|
||||
val := it.Value()
|
||||
var real, virt time.Time
|
||||
real.UnmarshalBinary(val[:len(val)/2])
|
||||
virt.UnmarshalBinary(val[len(val)/2:])
|
||||
fmt.Printf("[mtime] F:%d N:%q R:%v V:%v\n", folder, name, real, virt)
|
||||
|
||||
case db.KeyTypeFolderIdx:
|
||||
key := binary.BigEndian.Uint32(it.Key()[1:])
|
||||
fmt.Printf("[folderidx] K:%d V:%q\n", key, it.Value())
|
||||
|
||||
case db.KeyTypeDeviceIdx:
|
||||
key := binary.BigEndian.Uint32(it.Key()[1:])
|
||||
val := it.Value()
|
||||
if len(val) == 0 {
|
||||
fmt.Printf("[deviceidx] K:%d V:<nil>\n", key)
|
||||
} else {
|
||||
dev := protocol.DeviceIDFromBytes(val)
|
||||
fmt.Printf("[deviceidx] K:%d V:%s\n", key, dev)
|
||||
}
|
||||
|
||||
default:
|
||||
fmt.Printf("[???]\n %x\n %x\n", it.Key(), it.Value())
|
||||
|
||||
@@ -8,14 +8,12 @@ package main
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
)
|
||||
|
||||
// An IntHeap is a min-heap of ints.
|
||||
type SizedElement struct {
|
||||
key string
|
||||
size int
|
||||
@@ -39,33 +37,31 @@ func (h *ElementHeap) Pop() interface{} {
|
||||
return x
|
||||
}
|
||||
|
||||
func dumpsize(ldb *leveldb.DB) {
|
||||
func dumpsize(ldb *db.Instance) {
|
||||
h := &ElementHeap{}
|
||||
heap.Init(h)
|
||||
|
||||
it := ldb.NewIterator(nil, nil)
|
||||
var dev protocol.DeviceID
|
||||
var ele SizedElement
|
||||
for it.Next() {
|
||||
key := it.Key()
|
||||
switch key[0] {
|
||||
case db.KeyTypeDevice:
|
||||
folder := nulString(key[1 : 1+64])
|
||||
devBytes := key[1+64 : 1+64+32]
|
||||
name := nulString(key[1+64+32:])
|
||||
copy(dev[:], devBytes)
|
||||
ele.key = fmt.Sprintf("DEVICE:%s:%s:%s", dev, folder, name)
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
device := binary.BigEndian.Uint32(key[1+4:])
|
||||
name := nulString(key[1+4+4:])
|
||||
ele.key = fmt.Sprintf("DEVICE:%d:%d:%s", folder, device, name)
|
||||
|
||||
case db.KeyTypeGlobal:
|
||||
folder := nulString(key[1 : 1+64])
|
||||
name := nulString(key[1+64:])
|
||||
ele.key = fmt.Sprintf("GLOBAL:%s:%s", folder, name)
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
name := nulString(key[1+4:])
|
||||
ele.key = fmt.Sprintf("GLOBAL:%d:%s", folder, name)
|
||||
|
||||
case db.KeyTypeBlock:
|
||||
folder := nulString(key[1 : 1+64])
|
||||
hash := key[1+64 : 1+64+32]
|
||||
name := nulString(key[1+64+32:])
|
||||
ele.key = fmt.Sprintf("BLOCK:%s:%x:%s", folder, hash, name)
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
hash := key[1+4 : 1+4+32]
|
||||
name := nulString(key[1+4+32:])
|
||||
ele.key = fmt.Sprintf("BLOCK:%d:%x:%s", folder, hash, name)
|
||||
|
||||
case db.KeyTypeDeviceStatistic:
|
||||
ele.key = fmt.Sprintf("DEVICESTATS:%s", key[1:])
|
||||
@@ -76,6 +72,14 @@ func dumpsize(ldb *leveldb.DB) {
|
||||
case db.KeyTypeVirtualMtime:
|
||||
ele.key = fmt.Sprintf("MTIME:%s", key[1:])
|
||||
|
||||
case db.KeyTypeFolderIdx:
|
||||
id := binary.BigEndian.Uint32(key[1:])
|
||||
ele.key = fmt.Sprintf("FOLDERIDX:%d", id)
|
||||
|
||||
case db.KeyTypeDeviceIdx:
|
||||
id := binary.BigEndian.Uint32(key[1:])
|
||||
ele.key = fmt.Sprintf("DEVICEIDX:%d", id)
|
||||
|
||||
default:
|
||||
ele.key = fmt.Sprintf("UNKNOWN:%x", key)
|
||||
}
|
||||
|
||||
@@ -13,8 +13,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -28,16 +27,12 @@ func main() {
|
||||
|
||||
path := flag.Arg(0)
|
||||
if path == "" {
|
||||
path = filepath.Join(defaultConfigDir(), "index-v0.11.0.db")
|
||||
path = filepath.Join(defaultConfigDir(), "index-v0.14.0.db")
|
||||
}
|
||||
|
||||
fmt.Println("Path:", path)
|
||||
|
||||
ldb, err := leveldb.OpenFile(path, &opt.Options{
|
||||
ErrorIfMissing: true,
|
||||
Strict: opt.StrictAll,
|
||||
OpenFilesCacheCapacity: 100,
|
||||
})
|
||||
ldb, err := db.Open(path)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
22
cmd/strelaypoolsrv/LICENSE
Normal file
@@ -0,0 +1,22 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 The Syncthing Project
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
15
cmd/strelaypoolsrv/README.md
Normal file
@@ -0,0 +1,15 @@
|
||||
# relaypoolsrv
|
||||
|
||||
[](http://build.syncthing.net/job/relaypoolsrv/lastBuild/)
|
||||
|
||||
This is the relay pool server for the `syncthing` project, which allows community hosted [relaysrv](https://github.com/syncthing/relaysrv)'s to join the public pool.
|
||||
|
||||
Servers that join the pool are then advertised to users of `syncthing` as potential connection points for those who are unable to connect directly due to NAT or firewall issues.
|
||||
|
||||
There is very little reason why you'd want to run this yourself, as `relaypoolsrv` is just used for announcement and lookup of public relay servers. If you are looking to setup a private or a public relay, please check the documentation for [relaysrv](https://github.com/syncthing/relaysrv), which also explains how to join the default public pool.
|
||||
|
||||
If you still want to run it, you can run `go get github.com/syncthing/relaypoolsrv` download it or download the
|
||||
[latest build](http://build.syncthing.net/job/relaypoolsrv/lastSuccessfulBuild/artifact/)
|
||||
from the build server.
|
||||
|
||||
See `relaypoolsrv -help` for configuration options.
|
||||
1
cmd/strelaypoolsrv/auto/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
gui.go
|
||||
395
cmd/strelaypoolsrv/gui/index.html
Normal file
@@ -0,0 +1,395 @@
|
||||
<!DOCTYPE html>
|
||||
|
||||
<html lang="en" ng-app="syncthing" ng-controller="relayDataController">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<meta name="description" content="">
|
||||
<meta name="author" content="">
|
||||
|
||||
<title>Relay stats</title>
|
||||
<link href="//maxcdn.bootstrapcdn.com/bootstrap/3.3.5/css/bootstrap.min.css" rel="stylesheet">
|
||||
<link rel="stylesheet" href="//maxcdn.bootstrapcdn.com/font-awesome/4.6.1/css/font-awesome.min.css">
|
||||
|
||||
<style>
|
||||
#map {
|
||||
height: 600px;
|
||||
}
|
||||
.ng-cloak {
|
||||
display: none;
|
||||
}
|
||||
table {
|
||||
font-size: 11px !important;
|
||||
width: 100%;
|
||||
border: 1px;
|
||||
|
||||
}
|
||||
td {
|
||||
padding: 0px !important;
|
||||
}
|
||||
tfoot td {
|
||||
font-weight: bold;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
|
||||
<body class="ng-cloak">
|
||||
<div class="container">
|
||||
<h1>Relay Pool Data</h2>
|
||||
<div ng-if="relays === undefined" class="text-center">
|
||||
<img src="//cdnjs.cloudflare.com/ajax/libs/galleriffic/2.0.1/css/loader.gif"/>
|
||||
<p>Please wait while we gather data</p>
|
||||
</div>
|
||||
<div>
|
||||
<div ng-show="relays !== undefined" class="ng-hide">
|
||||
<p>
|
||||
Currently {{ relays.length }} relays online ({{ totals.goMaxProcs }} cores in total).
|
||||
</p>
|
||||
</div>
|
||||
<div id="map"></div> <!-- Can't hide the map, otherwise it freaks out -->
|
||||
<p>The circle size represents how much bytes the relay transfered relative to other relays</p>
|
||||
</div>
|
||||
<div>
|
||||
<table class="table table-striped table-condensed table">
|
||||
<thead>
|
||||
<tr>
|
||||
<th rowspan="2">Address</td>
|
||||
<th rowspan="2">
|
||||
<a ng-click="sortType = 'status.numActiveSessions || -1'; sortReverse = !sortReverse">
|
||||
Sessions
|
||||
<span ng-show="sortType == 'status.numActiveSessions || -1' && !sortReverse" class="fa fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'status.numActiveSessions || -1' && sortReverse" class="fa fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
<th rowspan="2">
|
||||
<a ng-click="sortType = 'status.numConnections || -1'; sortReverse = !sortReverse">
|
||||
Connections
|
||||
<span ng-show="sortType == 'status.numConnections || -1' && !sortReverse" class="fa fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'status.numConnections || -1' && sortReverse" class="fa fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
<th rowspan="2">
|
||||
<a ng-click="sortType = 'status.bytesProxied || -1'; sortReverse = !sortReverse">
|
||||
Data relayed
|
||||
<span ng-show="sortType == 'status.bytesProxied || -1' && !sortReverse" class="fa fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'status.bytesProxied || -1' && sortReverse" class="fa fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
<th colspan="6" class="text-center">Transfer rate in the last period</th>
|
||||
<th rowspan="2">
|
||||
<a ng-click="sortType = 'status.uptimeSeconds || -1'; sortReverse = !sortReverse">
|
||||
Uptime hours
|
||||
<span ng-show="sortType == 'status.uptimeSeconds || -1' && !sortReverse" class="fa fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'status.uptimeSeconds || -1' && sortReverse" class="fa fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
<th rowspan="2">
|
||||
<a ng-click="sortType = 'status.options[\'provided-by\'] || \'\''; sortReverse = !sortReverse">
|
||||
Provided by
|
||||
<span ng-show="sortType == 'status.options[\'provided-by\'] || \'\'' && !sortReverse" class="fa fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'status.options[\'provided-by\'] || \'\'' && sortReverse" class="fa fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th>
|
||||
<a ng-click="sortType = 'status.kbps10s1m5m15m30m60m[0] || -1'; sortReverse = !sortReverse">
|
||||
10s
|
||||
<span ng-show="sortType == 'status.kbps10s1m5m15m30m60m[0] || -1' && !sortReverse" class="fa fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'status.kbps10s1m5m15m30m60m[0] || -1' && sortReverse" class="fa fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
<th>
|
||||
<a ng-click="sortType = 'status.kbps10s1m5m15m30m60m[1] || -1'; sortReverse = !sortReverse">
|
||||
1m
|
||||
<span ng-show="sortType == 'status.kbps10s1m5m15m30m60m[1] || -1' && !sortReverse" class="fa fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'status.kbps10s1m5m15m30m60m[1] || -1' && sortReverse" class="fa fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
<th>
|
||||
<a ng-click="sortType = 'status.kbps10s1m5m15m30m60m[2] || -1'; sortReverse = !sortReverse">
|
||||
5m
|
||||
<span ng-show="sortType == 'status.kbps10s1m5m15m30m60m[2] || -1' && !sortReverse" class="fa fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'status.kbps10s1m5m15m30m60m[2] || -1' && sortReverse" class="fa fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
<th>
|
||||
<a ng-click="sortType = 'status.kbps10s1m5m15m30m60m[3] || -1'; sortReverse = !sortReverse">
|
||||
15m
|
||||
<span ng-show="sortType == 'status.kbps10s1m5m15m30m60m[3] || -1' && !sortReverse" class="fa fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'status.kbps10s1m5m15m30m60m[3] || -1' && sortReverse" class="fa fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
<th>
|
||||
<a ng-click="sortType = 'status.kbps10s1m5m15m30m60m[4] || -1'; sortReverse = !sortReverse">
|
||||
30m
|
||||
<span ng-show="sortType == 'status.kbps10s1m5m15m30m60m[4] || -1' && !sortReverse" class="fa fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'status.kbps10s1m5m15m30m60m[4] || -1' && sortReverse" class="fa fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
<th>
|
||||
<a ng-click="sortType = 'status.kbps10s1m5m15m30m60m[5] || -1'; sortReverse = !sortReverse">
|
||||
60m
|
||||
<span ng-show="sortType == 'status.kbps10s1m5m15m30m60m[5] || -1' && !sortReverse" class="fa fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'status.kbps10s1m5m15m30m60m[5] || -1' && sortReverse" class="fa fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr ng-repeat="relay in relays | orderBy:sortType:sortReverse ">
|
||||
<td>{{ relay.address }}</td>
|
||||
<td ng-if="relay.status === undefined" colspan="11" class="text-center">Looking up...</td>
|
||||
<td ng-if-start="relay.status !== undefined">{{ relay.status.numActiveSessions }}</td>
|
||||
<td>{{ relay.status.numConnections }}</td>
|
||||
<td>{{ relay.status.bytesProxied | bytes }}</td>
|
||||
<td>{{ relay.status.kbps10s1m5m15m30m60m[0] * 128 | bytes }}/s</td>
|
||||
<td>{{ relay.status.kbps10s1m5m15m30m60m[1] * 128 | bytes }}/s</td>
|
||||
<td>{{ relay.status.kbps10s1m5m15m30m60m[2] * 128 | bytes }}/s</td>
|
||||
<td>{{ relay.status.kbps10s1m5m15m30m60m[3] * 128 | bytes }}/s</td>
|
||||
<td>{{ relay.status.kbps10s1m5m15m30m60m[4] * 128 | bytes }}/s</td>
|
||||
<td>{{ relay.status.kbps10s1m5m15m30m60m[5] * 128 | bytes }}/s</td>
|
||||
<td ng-if="relay.status.uptimeSeconds != undefined">{{ relay.status.uptimeSeconds/60/60 | number:0 }}</td>
|
||||
<td ng-if="relay.status.uptimeSeconds == undefined"></td>
|
||||
<td title="{{ relay.status.options['provided-by'] || '' }}" ng-if-end>
|
||||
{{ relay.status.options['provided-by'] || '' | limitTo:50 }}
|
||||
<span ng-if="(relay.status.options['provided-by'] || '').length > 50">…
|
||||
</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
<tfoot>
|
||||
<tr>
|
||||
<td>Totals</td>
|
||||
<td>{{ totals.numActiveSessions }}</td>
|
||||
<td>{{ totals.numConnections }}</td>
|
||||
<td>{{ totals.bytesProxied | bytes }}</td>
|
||||
<td>{{ totals.kbps10s1m5m15m30m60m[0] * 128 | bytes }}/s</td>
|
||||
<td>{{ totals.kbps10s1m5m15m30m60m[1] * 128 | bytes }}/s</td>
|
||||
<td>{{ totals.kbps10s1m5m15m30m60m[2] * 128 | bytes }}/s</td>
|
||||
<td>{{ totals.kbps10s1m5m15m30m60m[3] * 128 | bytes }}/s</td>
|
||||
<td>{{ totals.kbps10s1m5m15m30m60m[4] * 128 | bytes }}/s</td>
|
||||
<td>{{ totals.kbps10s1m5m15m30m60m[5] * 128 | bytes }}/s</td>
|
||||
<td>{{ totals.uptimeSeconds/60/60 | number:0 }} hours</td>
|
||||
<td>{{ relays.length }} relays</td>
|
||||
</tr>
|
||||
</tfoor>
|
||||
</table>
|
||||
</div>
|
||||
<hr>
|
||||
<p>
|
||||
This product includes GeoLite2 data created by MaxMind, available from
|
||||
<a href="http://www.maxmind.com">http://www.maxmind.com</a>.
|
||||
</p>
|
||||
</div>
|
||||
|
||||
|
||||
<script src="//code.jquery.com/jquery-2.1.4.min.js"></script>
|
||||
<script src="//cdnjs.cloudflare.com/ajax/libs/angular.js/1.4.7/angular.min.js"></script>
|
||||
<script src="//maxcdn.bootstrapcdn.com/bootstrap/3.3.5/js/bootstrap.min.js"></script>
|
||||
<script src="//maps.googleapis.com/maps/api/js"></script>
|
||||
</body>
|
||||
|
||||
<script>
|
||||
angular.module('syncthing', [
|
||||
])
|
||||
.config(function($httpProvider) {
|
||||
$httpProvider.defaults.timeout = 5000;
|
||||
})
|
||||
.filter('bytes', function() {
|
||||
return function(bytes, precision) {
|
||||
if (isNaN(parseFloat(bytes)) || !isFinite(bytes)) return '-';
|
||||
if (typeof precision === 'undefined') precision = 1;
|
||||
|
||||
var units = ['bytes', 'kB', 'MB', 'GB', 'TB', 'PB'],
|
||||
number = Math.floor(Math.log(bytes) / Math.log(1024));
|
||||
|
||||
var value = (bytes / Math.pow(1000, Math.floor(number)));
|
||||
if (!isFinite(value)) {
|
||||
value = 0;
|
||||
precision = 0;
|
||||
}
|
||||
if (!isFinite(number)) {
|
||||
units = 'bytes';
|
||||
} else {
|
||||
units = units[number];
|
||||
}
|
||||
return value.toFixed(precision) + ' ' + units;
|
||||
}
|
||||
})
|
||||
.controller('relayDataController', ['$scope', '$rootScope', '$http', '$q', '$compile', '$timeout', function($scope, $rootScope, $http, $q, $compile, $timeout) {
|
||||
$scope.totals = {
|
||||
bytesProxied: 0,
|
||||
goMaxProcs: 0,
|
||||
kbps10s1m5m15m30m60m: [0, 0, 0, 0, 0, 0],
|
||||
numActiveSessions: 0,
|
||||
numConnections: 0,
|
||||
numPendingSessionKeys: 0,
|
||||
numProxies: 0,
|
||||
uptimeSeconds: 0,
|
||||
};
|
||||
$scope.map = new google.maps.Map(document.getElementById('map'), {
|
||||
zoom: 1,
|
||||
mapTypeId: google.maps.MapTypeId.ROADMAP
|
||||
});
|
||||
$scope.mapBounds = new google.maps.LatLngBounds();
|
||||
$scope.tooltipTemplate = $('#infoTemplate').html();
|
||||
$scope.usedLocations = {};
|
||||
$scope.sortType = 'status.numActiveSessions || -1';
|
||||
$scope.sortReverse = true;
|
||||
|
||||
$http.get("/endpoint").then(function(response) {
|
||||
$scope.relays = response.data.relays;
|
||||
var promises = [];
|
||||
angular.forEach($scope.relays, function(relay) {
|
||||
|
||||
relay.uri = constructURI(relay.url);
|
||||
relay.address = relay.url.split('/')[2];
|
||||
|
||||
addMarkerToMap(relay);
|
||||
|
||||
promises.push(getRelayStatus(relay));
|
||||
});
|
||||
|
||||
// Can only add circles once we know the totals for transfers, which means
|
||||
// we need to resolve all statuses.
|
||||
$q.all(promises).then(function() {
|
||||
angular.forEach($scope.relays, function(relay) {
|
||||
if (relay.status) {
|
||||
addCircleToMap(relay);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
$scope.map.fitBounds($scope.mapBounds);
|
||||
if ($scope.relays.length == 1) {
|
||||
$scope.map.setZoom(13);
|
||||
}
|
||||
});
|
||||
|
||||
function addMarkerToMap(relay) {
|
||||
var loc = relay.location.latitude + "," + relay.location.longitude;
|
||||
|
||||
// Deal with overlapping markers
|
||||
while (loc in $scope.usedLocations) {
|
||||
var locParts = loc.split(',');
|
||||
locParts = [parseFloat(locParts[0]), parseFloat(locParts[1])];
|
||||
locParts[Math.round(Math.random())] += 0.5 * (Math.random() >= 0.5 ? 1 : -1);
|
||||
loc = locParts.join(',');
|
||||
}
|
||||
|
||||
$scope.usedLocations[loc] = true;
|
||||
|
||||
var locParts = loc.split(',');
|
||||
|
||||
relay.marker = new google.maps.Marker({
|
||||
map: $scope.map,
|
||||
position: new google.maps.LatLng(locParts[0], locParts[1]),
|
||||
title: relay.url,
|
||||
});
|
||||
|
||||
var scope = $rootScope.$new(true);
|
||||
scope.relay = relay;
|
||||
|
||||
relay.marker.info = new google.maps.InfoWindow({
|
||||
content: $compile($scope.tooltipTemplate)(scope)[0],
|
||||
});
|
||||
|
||||
relay.marker.addListener('mouseover', function() {
|
||||
relay.marker.info.open($scope.map, relay.marker);
|
||||
});
|
||||
|
||||
relay.marker.addListener('mouseout', function() {
|
||||
relay.marker.info.close();
|
||||
});
|
||||
|
||||
$scope.mapBounds.extend(relay.marker.position);
|
||||
}
|
||||
|
||||
function addCircleToMap(relay) {
|
||||
relay.marker.circle = new google.maps.Circle({
|
||||
strokeColor: '#FF0000',
|
||||
strokeOpacity: 0.8,
|
||||
strokeWeight: 2,
|
||||
fillColor: '#FF0000',
|
||||
fillOpacity: 0.35,
|
||||
map: $scope.map,
|
||||
center: relay.marker.position,
|
||||
radius: ((relay.status.bytesProxied * 100) / $scope.totals.bytesProxied) * 10000
|
||||
});
|
||||
}
|
||||
|
||||
function getRelayStatus(relay) {
|
||||
// Normal timeout doesn't deal with relays which accept the TCP connection
|
||||
// but don't respond (some firewalls do that), so deal with it this way.
|
||||
var timeoutRequest = $q.defer();
|
||||
var resolveStatus = $q.defer();
|
||||
|
||||
$http.get("http://" + relay.uri.hostname + (relay.uri.args.statusAddr || ":22070") + "/status", { timeout: timeoutRequest.promise }).then(function (response) {
|
||||
relay.status = response.data;
|
||||
resolveStatus.resolve();
|
||||
angular.forEach($scope.totals, function(value, key) {
|
||||
if (typeof $scope.totals[key] == 'number') {
|
||||
$scope.totals[key] += response.data[key];
|
||||
} else if (typeof $scope.totals[key] == 'object' && $scope.totals[key] instanceof Array) {
|
||||
angular.forEach($scope.totals[key], function(value, index) {
|
||||
$scope.totals[key][index] += response.data[key][index];
|
||||
});
|
||||
}
|
||||
});
|
||||
}, function() {
|
||||
relay.status = null;
|
||||
resolveStatus.resolve();
|
||||
});
|
||||
|
||||
$timeout(function() {
|
||||
timeoutRequest.resolve();
|
||||
}, 5000);
|
||||
|
||||
return resolveStatus.promise;
|
||||
}
|
||||
|
||||
function constructURI(url) {
|
||||
var uri = document.createElement('a');
|
||||
|
||||
// HAX, otherwise doesn't work
|
||||
uri.href = url.replace('relay://', 'http://');
|
||||
|
||||
// Convert query string to object
|
||||
uri.args = {};
|
||||
angular.forEach(uri.search.replace(/^\?/, '').split('&'), function(query) {
|
||||
var split = query.split('=');
|
||||
uri.args[split[0]] = split[1];
|
||||
});
|
||||
|
||||
return uri;
|
||||
}
|
||||
}]);
|
||||
</script>
|
||||
|
||||
<script type="text/template" id="infoTemplate">
|
||||
<div>
|
||||
<p><b>{{ relay.uri.hostname }}</b> <span ng-if="relay.status.options['provided-by']">provided by <u>{{ relay.status.options['provided-by'] }}</u></span></p>
|
||||
<div ng-if="relay.status">
|
||||
<span ng-if="relay.status.startTime">Start time: {{ relay.status.startTime | date:"medium" }}</br></span>
|
||||
<span ng-if="relay.status.bytesProxied != undefined">Proxied: {{ relay.status.bytesProxied | bytes }}</br></span>
|
||||
<span ng-if="relay.status.numActiveSessions != undefined">Sessions: {{ relay.status.numActiveSessions }}</br></span>
|
||||
<span ng-if="relay.status.numConnections != undefined">Clients: {{ relay.status.numConnections }}</br></span>
|
||||
<span ng-if="relay.status.options.pools">Pools: {{ relay.status.options.pools.join(', ') }}</br></span>
|
||||
<span ng-if="relay.status.options['global-rate'] != undefined">
|
||||
<span ng-if="relay.status.options['global-rate'] > 0">Global rate limit: {{ relay.status.options['global-rate'] | bytes }}/s</span>
|
||||
<span ng-if="relay.status.options['global-rate'] == 0">Global rate limit: unlimited</span>
|
||||
</br>
|
||||
</span>
|
||||
<span ng-if="relay.status.options['per-session-rate'] != undefined">
|
||||
<span ng-if="relay.status.options['per-session-rate'] > 0">Session rate limit: {{ relay.status.options['per-session-rate'] | bytes }}/s</span>
|
||||
<span ng-if="relay.status.options['per-session-rate'] == 0">Session rate limit: unlimited</span>
|
||||
</br>
|
||||
</span>
|
||||
</div>
|
||||
<div ng-if="!relay.status">
|
||||
Data unavailable.
|
||||
<div>
|
||||
</div>
|
||||
</script>
|
||||
</html>
|
||||
538
cmd/strelaypoolsrv/main.go
Normal file
@@ -0,0 +1,538 @@
|
||||
// Copyright (C) 2015 Audrius Butkevicius and Contributors (see the CONTRIBUTORS file).
|
||||
|
||||
//go:generate go run genassets.go gui auto/gui.go
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"mime"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/groupcache/lru"
|
||||
"github.com/juju/ratelimit"
|
||||
|
||||
"github.com/oschwald/geoip2-golang"
|
||||
|
||||
"github.com/syncthing/syncthing/cmd/strelaypoolsrv/auto"
|
||||
"github.com/syncthing/syncthing/lib/relay/client"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
)
|
||||
|
||||
type location struct {
|
||||
Latitude float64 `json:"latitude"`
|
||||
Longitude float64 `json:"longitude"`
|
||||
}
|
||||
|
||||
type relay struct {
|
||||
URL string `json:"url"`
|
||||
Location location `json:"location"`
|
||||
uri *url.URL
|
||||
}
|
||||
|
||||
func (r relay) String() string {
|
||||
return r.URL
|
||||
}
|
||||
|
||||
type request struct {
|
||||
relay relay
|
||||
uri *url.URL
|
||||
result chan result
|
||||
}
|
||||
|
||||
type result struct {
|
||||
err error
|
||||
eviction time.Duration
|
||||
}
|
||||
|
||||
var (
|
||||
testCert tls.Certificate
|
||||
listen = ":80"
|
||||
dir string
|
||||
evictionTime = time.Hour
|
||||
debug bool
|
||||
getLRUSize = 10 << 10
|
||||
getLimitBurst int64 = 10
|
||||
getLimitAvg = 1
|
||||
postLRUSize = 1 << 10
|
||||
postLimitBurst int64 = 2
|
||||
postLimitAvg = 1
|
||||
getLimit time.Duration
|
||||
postLimit time.Duration
|
||||
permRelaysFile string
|
||||
ipHeader string
|
||||
geoipPath string
|
||||
proto string
|
||||
|
||||
getMut = sync.NewRWMutex()
|
||||
getLRUCache *lru.Cache
|
||||
|
||||
postMut = sync.NewRWMutex()
|
||||
postLRUCache *lru.Cache
|
||||
|
||||
requests = make(chan request, 10)
|
||||
|
||||
mut = sync.NewRWMutex()
|
||||
knownRelays = make([]relay, 0)
|
||||
permanentRelays = make([]relay, 0)
|
||||
evictionTimers = make(map[string]*time.Timer)
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.StringVar(&listen, "listen", listen, "Listen address")
|
||||
flag.StringVar(&dir, "keys", dir, "Directory where http-cert.pem and http-key.pem is stored for TLS listening")
|
||||
flag.BoolVar(&debug, "debug", debug, "Enable debug output")
|
||||
flag.DurationVar(&evictionTime, "eviction", evictionTime, "After how long the relay is evicted")
|
||||
flag.IntVar(&getLRUSize, "get-limit-cache", getLRUSize, "Get request limiter cache size")
|
||||
flag.IntVar(&getLimitAvg, "get-limit-avg", 2, "Allowed average get request rate, per 10 s")
|
||||
flag.Int64Var(&getLimitBurst, "get-limit-burst", getLimitBurst, "Allowed burst get requests")
|
||||
flag.IntVar(&postLRUSize, "post-limit-cache", postLRUSize, "Post request limiter cache size")
|
||||
flag.IntVar(&postLimitAvg, "post-limit-avg", 2, "Allowed average post request rate, per minute")
|
||||
flag.Int64Var(&postLimitBurst, "post-limit-burst", postLimitBurst, "Allowed burst post requests")
|
||||
flag.StringVar(&permRelaysFile, "perm-relays", "", "Path to list of permanent relays")
|
||||
flag.StringVar(&ipHeader, "ip-header", "", "Name of header which holds clients ip:port. Only meaningful when running behind a reverse proxy.")
|
||||
flag.StringVar(&geoipPath, "geoip", "GeoLite2-City.mmdb", "Path to GeoLite2-City database")
|
||||
flag.StringVar(&proto, "protocol", "tcp", "Protocol used for listening. 'tcp' for IPv4 and IPv6, 'tcp4' for IPv4, 'tcp6' for IPv6")
|
||||
|
||||
flag.Parse()
|
||||
|
||||
getLimit = 10 * time.Second / time.Duration(getLimitAvg)
|
||||
postLimit = time.Minute / time.Duration(postLimitAvg)
|
||||
|
||||
getLRUCache = lru.New(getLRUSize)
|
||||
postLRUCache = lru.New(postLRUSize)
|
||||
|
||||
var listener net.Listener
|
||||
var err error
|
||||
|
||||
if permRelaysFile != "" {
|
||||
loadPermanentRelays(permRelaysFile)
|
||||
}
|
||||
|
||||
testCert = createTestCertificate()
|
||||
|
||||
go requestProcessor()
|
||||
|
||||
if dir != "" {
|
||||
if debug {
|
||||
log.Println("Starting TLS listener on", listen)
|
||||
}
|
||||
certFile, keyFile := filepath.Join(dir, "http-cert.pem"), filepath.Join(dir, "http-key.pem")
|
||||
var cert tls.Certificate
|
||||
cert, err = tls.LoadX509KeyPair(certFile, keyFile)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to load HTTP X509 key pair:", err)
|
||||
}
|
||||
|
||||
tlsCfg := &tls.Config{
|
||||
Certificates: []tls.Certificate{cert},
|
||||
MinVersion: tls.VersionTLS10, // No SSLv3
|
||||
CipherSuites: []uint16{
|
||||
// No RC4
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
||||
tls.TLS_RSA_WITH_AES_128_CBC_SHA,
|
||||
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
|
||||
tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
},
|
||||
}
|
||||
|
||||
listener, err = tls.Listen(proto, listen, tlsCfg)
|
||||
} else {
|
||||
if debug {
|
||||
log.Println("Starting plain listener on", listen)
|
||||
}
|
||||
listener, err = net.Listen(proto, listen)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.Fatalln("listen:", err)
|
||||
}
|
||||
|
||||
handler := http.NewServeMux()
|
||||
handler.HandleFunc("/", handleAssets)
|
||||
handler.HandleFunc("/endpoint", handleRequest)
|
||||
|
||||
srv := http.Server{
|
||||
Handler: handler,
|
||||
ReadTimeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
err = srv.Serve(listener)
|
||||
if err != nil {
|
||||
log.Fatalln("serve:", err)
|
||||
}
|
||||
}
|
||||
|
||||
func handleAssets(w http.ResponseWriter, r *http.Request) {
|
||||
assets := auto.Assets()
|
||||
path := r.URL.Path[1:]
|
||||
if path == "" {
|
||||
path = "index.html"
|
||||
}
|
||||
|
||||
bs, ok := assets[path]
|
||||
if !ok {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
mtype := mimeTypeForFile(path)
|
||||
if len(mtype) != 0 {
|
||||
w.Header().Set("Content-Type", mtype)
|
||||
}
|
||||
|
||||
if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
|
||||
w.Header().Set("Content-Encoding", "gzip")
|
||||
} else {
|
||||
// ungzip if browser not send gzip accepted header
|
||||
var gr *gzip.Reader
|
||||
gr, _ = gzip.NewReader(bytes.NewReader(bs))
|
||||
bs, _ = ioutil.ReadAll(gr)
|
||||
gr.Close()
|
||||
}
|
||||
w.Header().Set("Content-Length", fmt.Sprintf("%d", len(bs)))
|
||||
|
||||
w.Write(bs)
|
||||
}
|
||||
|
||||
func mimeTypeForFile(file string) string {
|
||||
// We use a built in table of the common types since the system
|
||||
// TypeByExtension might be unreliable. But if we don't know, we delegate
|
||||
// to the system.
|
||||
ext := filepath.Ext(file)
|
||||
switch ext {
|
||||
case ".htm", ".html":
|
||||
return "text/html"
|
||||
case ".css":
|
||||
return "text/css"
|
||||
case ".js":
|
||||
return "application/javascript"
|
||||
case ".json":
|
||||
return "application/json"
|
||||
case ".png":
|
||||
return "image/png"
|
||||
case ".ttf":
|
||||
return "application/x-font-ttf"
|
||||
case ".woff":
|
||||
return "application/x-font-woff"
|
||||
case ".svg":
|
||||
return "image/svg+xml"
|
||||
default:
|
||||
return mime.TypeByExtension(ext)
|
||||
}
|
||||
}
|
||||
|
||||
func handleRequest(w http.ResponseWriter, r *http.Request) {
|
||||
if ipHeader != "" {
|
||||
r.RemoteAddr = r.Header.Get(ipHeader)
|
||||
}
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
switch r.Method {
|
||||
case "GET":
|
||||
if limit(r.RemoteAddr, getLRUCache, getMut, getLimit, int64(getLimitBurst)) {
|
||||
w.WriteHeader(429)
|
||||
return
|
||||
}
|
||||
handleGetRequest(w, r)
|
||||
case "POST":
|
||||
if limit(r.RemoteAddr, postLRUCache, postMut, postLimit, int64(postLimitBurst)) {
|
||||
w.WriteHeader(429)
|
||||
return
|
||||
}
|
||||
handlePostRequest(w, r)
|
||||
default:
|
||||
if debug {
|
||||
log.Println("Unhandled HTTP method", r.Method)
|
||||
}
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
}
|
||||
}
|
||||
|
||||
func handleGetRequest(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
mut.RLock()
|
||||
relays := append(permanentRelays, knownRelays...)
|
||||
mut.RUnlock()
|
||||
|
||||
// Shuffle
|
||||
for i := range relays {
|
||||
j := rand.Intn(i + 1)
|
||||
relays[i], relays[j] = relays[j], relays[i]
|
||||
}
|
||||
|
||||
json.NewEncoder(w).Encode(map[string][]relay{
|
||||
"relays": relays,
|
||||
})
|
||||
}
|
||||
|
||||
func handlePostRequest(w http.ResponseWriter, r *http.Request) {
|
||||
var newRelay relay
|
||||
err := json.NewDecoder(r.Body).Decode(&newRelay)
|
||||
r.Body.Close()
|
||||
|
||||
if err != nil {
|
||||
if debug {
|
||||
log.Println("Failed to parse payload")
|
||||
}
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
}
|
||||
|
||||
uri, err := url.Parse(newRelay.URL)
|
||||
if err != nil {
|
||||
if debug {
|
||||
log.Println("Failed to parse URI", newRelay.URL)
|
||||
}
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
}
|
||||
|
||||
host, port, err := net.SplitHostPort(uri.Host)
|
||||
if err != nil {
|
||||
if debug {
|
||||
log.Println("Failed to split URI", newRelay.URL)
|
||||
}
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
}
|
||||
|
||||
// Get the IP address of the client
|
||||
rhost, _, err := net.SplitHostPort(r.RemoteAddr)
|
||||
if err != nil {
|
||||
if debug {
|
||||
log.Println("Failed to split remote address", r.RemoteAddr)
|
||||
}
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
}
|
||||
|
||||
// The client did not provide an IP address, use the IP address of the client.
|
||||
if host == "" {
|
||||
uri.Host = net.JoinHostPort(rhost, port)
|
||||
newRelay.URL = uri.String()
|
||||
} else if host != rhost {
|
||||
if debug {
|
||||
log.Println("IP address advertised does not match client IP address", r.RemoteAddr, uri)
|
||||
}
|
||||
http.Error(w, "IP address does not match client IP", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
newRelay.uri = uri
|
||||
newRelay.Location = getLocation(uri.Host)
|
||||
|
||||
for _, current := range permanentRelays {
|
||||
if current.uri.Host == newRelay.uri.Host {
|
||||
if debug {
|
||||
log.Println("Asked to add a relay", newRelay, "which exists in permanent list")
|
||||
}
|
||||
http.Error(w, "Invalid request", 500)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
reschan := make(chan result)
|
||||
|
||||
select {
|
||||
case requests <- request{newRelay, uri, reschan}:
|
||||
result := <-reschan
|
||||
if result.err != nil {
|
||||
http.Error(w, result.err.Error(), 500)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
json.NewEncoder(w).Encode(map[string]time.Duration{
|
||||
"evictionIn": result.eviction,
|
||||
})
|
||||
|
||||
default:
|
||||
if debug {
|
||||
log.Println("Dropping request")
|
||||
}
|
||||
w.WriteHeader(429)
|
||||
}
|
||||
}
|
||||
|
||||
func requestProcessor() {
|
||||
for request := range requests {
|
||||
if debug {
|
||||
log.Println("Request for", request.relay)
|
||||
}
|
||||
if !client.TestRelay(request.uri, []tls.Certificate{testCert}, time.Second, 2*time.Second, 3) {
|
||||
if debug {
|
||||
log.Println("Test for relay", request.relay, "failed")
|
||||
}
|
||||
request.result <- result{fmt.Errorf("test failed"), 0}
|
||||
continue
|
||||
}
|
||||
|
||||
mut.Lock()
|
||||
timer, ok := evictionTimers[request.relay.uri.Host]
|
||||
if ok {
|
||||
if debug {
|
||||
log.Println("Stopping existing timer for", request.relay)
|
||||
}
|
||||
timer.Stop()
|
||||
}
|
||||
|
||||
for i, current := range knownRelays {
|
||||
if current.uri.Host == request.relay.uri.Host {
|
||||
if debug {
|
||||
log.Println("Relay", request.relay, "already exists")
|
||||
}
|
||||
|
||||
// Evict the old entry anyway, as configuration might have changed.
|
||||
last := len(knownRelays) - 1
|
||||
knownRelays[i] = knownRelays[last]
|
||||
knownRelays = knownRelays[:last]
|
||||
|
||||
goto found
|
||||
}
|
||||
}
|
||||
|
||||
if debug {
|
||||
log.Println("Adding new relay", request.relay)
|
||||
}
|
||||
|
||||
found:
|
||||
|
||||
knownRelays = append(knownRelays, request.relay)
|
||||
|
||||
evictionTimers[request.relay.uri.Host] = time.AfterFunc(evictionTime, evict(request.relay))
|
||||
mut.Unlock()
|
||||
request.result <- result{nil, evictionTime}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func evict(relay relay) func() {
|
||||
return func() {
|
||||
mut.Lock()
|
||||
defer mut.Unlock()
|
||||
if debug {
|
||||
log.Println("Evicting", relay)
|
||||
}
|
||||
for i, current := range knownRelays {
|
||||
if current.uri.Host == relay.uri.Host {
|
||||
if debug {
|
||||
log.Println("Evicted", relay)
|
||||
}
|
||||
last := len(knownRelays) - 1
|
||||
knownRelays[i] = knownRelays[last]
|
||||
knownRelays = knownRelays[:last]
|
||||
}
|
||||
}
|
||||
delete(evictionTimers, relay.uri.Host)
|
||||
}
|
||||
}
|
||||
|
||||
func limit(addr string, cache *lru.Cache, lock sync.RWMutex, rate time.Duration, burst int64) bool {
|
||||
host, _, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
lock.RLock()
|
||||
bkt, ok := cache.Get(host)
|
||||
lock.RUnlock()
|
||||
if ok {
|
||||
bkt := bkt.(*ratelimit.Bucket)
|
||||
if bkt.TakeAvailable(1) != 1 {
|
||||
// Rate limit
|
||||
return true
|
||||
}
|
||||
} else {
|
||||
lock.Lock()
|
||||
cache.Add(host, ratelimit.NewBucket(rate, burst))
|
||||
lock.Unlock()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func loadPermanentRelays(file string) {
|
||||
content, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, line := range strings.Split(string(content), "\n") {
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
uri, err := url.Parse(line)
|
||||
if err != nil {
|
||||
if debug {
|
||||
log.Println("Skipping permanent relay", line, "due to parse error", err)
|
||||
}
|
||||
continue
|
||||
|
||||
}
|
||||
|
||||
permanentRelays = append(permanentRelays, relay{
|
||||
URL: line,
|
||||
Location: getLocation(uri.Host),
|
||||
uri: uri,
|
||||
})
|
||||
if debug {
|
||||
log.Println("Adding permanent relay", line)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func createTestCertificate() tls.Certificate {
|
||||
tmpDir, err := ioutil.TempDir("", "relaypoolsrv")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
certFile, keyFile := filepath.Join(tmpDir, "cert.pem"), filepath.Join(tmpDir, "key.pem")
|
||||
cert, err := tlsutil.NewCertificate(certFile, keyFile, "relaypoolsrv", 3072)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to create test X509 key pair:", err)
|
||||
}
|
||||
|
||||
return cert
|
||||
}
|
||||
|
||||
func getLocation(host string) location {
|
||||
db, err := geoip2.Open(geoipPath)
|
||||
if err != nil {
|
||||
return location{}
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
addr, err := net.ResolveTCPAddr("tcp", host)
|
||||
if err != nil {
|
||||
return location{}
|
||||
}
|
||||
|
||||
city, err := db.City(addr.IP)
|
||||
if err != nil {
|
||||
return location{}
|
||||
}
|
||||
|
||||
return location{
|
||||
Latitude: city.Location.Latitude,
|
||||
Longitude: city.Location.Longitude,
|
||||
}
|
||||
}
|
||||
22
cmd/strelaysrv/LICENSE
Normal file
@@ -0,0 +1,22 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 The Syncthing Project
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
141
cmd/strelaysrv/README.md
Normal file
@@ -0,0 +1,141 @@
|
||||
strelaysrv
|
||||
==========
|
||||
|
||||
[](http://build.syncthing.net/job/strelaysrv/lastBuild/)
|
||||
|
||||
This is the relay server for the `syncthing` project.
|
||||
|
||||
To get it, run `go get github.com/syncthing/strelaysrv` or download the
|
||||
[latest build](http://build.syncthing.net/job/strelaysrv/lastSuccessfulBuild/artifact/)
|
||||
from the build server.
|
||||
|
||||
:exclamation:Warnings:exclamation: - Read or regret
|
||||
-----
|
||||
|
||||
By default, all relay servers will join the default public relay pool, which means that the relay server will be availble for public use, and **will consume your bandwidth** helping others to connect.
|
||||
|
||||
If you wish to disable this behaviour, please specify `-pools=""` argument.
|
||||
|
||||
Please note that `strelaysrv` is only usable by `syncthing` **version v0.12 and onwards**.
|
||||
|
||||
To run `strelaysrv` you need to have port 22067 available to the internet, which means you might need to allow it through your firewall if you **have a public IP, or setup a port-forwarding** (22067 to 22067) if you are behind a router.
|
||||
|
||||
Furthermore, **by default strelaysrv will also expose a /status HTTP endpoint on port 22070**, which is used by the pool servers to peek at metrics of the strelaysrv, such as what are the current transfer rates, how many clients are connected, etc, etc. If you wish this information to be available, similarlly you might want to allow it through your firewall, or port-forward it (22070 to 22070) on your NAT device.
|
||||
|
||||
This is **not mandatory** for the strelaysrv to function, and is used only to gather metrics and present them in the overview page of the pool server, displaying stats about the specific relay.
|
||||
|
||||
At the point of writing the endpoint output looks as follows:
|
||||
|
||||
```
|
||||
{
|
||||
"bytesProxied": 0,
|
||||
"goArch": "amd64",
|
||||
"goMaxProcs": 1,
|
||||
"goNumRoutine": 13,
|
||||
"goOS": "linux",
|
||||
"goVersion": "go1.6",
|
||||
"kbps10s1m5m15m30m60m": [
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0
|
||||
],
|
||||
"numActiveSessions": 0,
|
||||
"numConnections": 0,
|
||||
"numPendingSessionKeys": 2,
|
||||
"numProxies": 0,
|
||||
"options": {
|
||||
"global-rate": 0,
|
||||
"message-timeout": 60,
|
||||
"network-timeout": 120,
|
||||
"per-session-rate": 0,
|
||||
"ping-interval": 60,
|
||||
"pools": [
|
||||
"https://relays.syncthing.net/endpoint"
|
||||
],
|
||||
"provided-by": ""
|
||||
},
|
||||
"startTime": "2016-03-06T12:53:07.090847749-05:00",
|
||||
"uptimeSeconds": 17
|
||||
}
|
||||
```
|
||||
|
||||
If you wish to disable the /status endpoint, provide `-status-srv=""` as one of the arguments when starting the strelaysrv.
|
||||
|
||||
Running for public use
|
||||
----
|
||||
Make sure you have a public IP with port 22067 open, or make sure you have port-forwarding (22067 to 22067) if you are behind a router.
|
||||
|
||||
Run the `strelaysrv` with no arguments (or `-debug` if you want more output), and that should be enough for the server to join the public relay pool.
|
||||
You should see a message saying:
|
||||
```
|
||||
2015/09/21 22:45:46 pool.go:60: Joined https://relays.syncthing.net/endpoint rejoining in 48m0s
|
||||
```
|
||||
|
||||
See `strelaysrv -help` for other options, such as rate limits, timeout intervals, etc.
|
||||
|
||||
Running for private use
|
||||
-----
|
||||
|
||||
Once you've started the `strelaysrv`, it will generate a key pair and print an URI:
|
||||
```bash
|
||||
relay://:22067/?id=EZQOIDM-6DDD4ZI-DJ65NSM-4OQWRAT-EIKSMJO-OZ552BO-WQZEGYY-STS5RQM&pingInterval=1m0s&networkTimeout=2m0s&sessionLimitBps=0&globalLimitBps=0&statusAddr=:22070
|
||||
```
|
||||
|
||||
This URI contains partial address of the relay server, as well as it's options which in the future may be taken into account when choosing the best suitable relay out of multiple available.
|
||||
|
||||
Because `-listen` option was not used, the `strelaysrv` does not know it's external IP, therefore you should replace the host part of the URI with your public IP address on which the `strelaysrv` will be available:
|
||||
|
||||
```bash
|
||||
relay://123.123.123.123:22067/?id=EZQOIDM-6DDD4ZI-DJ65NSM-4OQWRAT-EIKSMJO-OZ552BO-WQZEGYY-STS5RQM&pingInterval=1m0s&networkTimeout=2m0s&sessionLimitBps=0&globalLimitBps=0&statusAddr=:22070
|
||||
```
|
||||
|
||||
If you do not care about certificate pinning (improved security) or do not care about passing verbose settings to the clients, you can shorten the URL to just the host part:
|
||||
|
||||
```bash
|
||||
relay://123.123.123.123:22067
|
||||
```
|
||||
|
||||
This URI can then be used in `syncthing` as one of the relay servers.
|
||||
|
||||
See `strelaysrv -help` for other options, such as rate limits, timeout intervals, etc.
|
||||
|
||||
Other items available in this repo
|
||||
----
|
||||
##### testutil
|
||||
A test utility which can be used to test connectivity of a relay server.
|
||||
You need to generate two x509 key pairs (key.pem and cert.pem), one for the client, another one for the server, in separate directories.
|
||||
Afterwards, start the client:
|
||||
```bash
|
||||
./testutil -relay="relay://uri.of.relay" -keys=certs/client/ -join
|
||||
```
|
||||
|
||||
This prints out the client ID:
|
||||
```
|
||||
2015/09/21 23:00:52 main.go:42: ID: BG2C5ZA-W7XPFDO-LH222Z6-65F3HJX-ADFTGRT-3SBFIGM-KV26O2Q-E5RMRQ2
|
||||
```
|
||||
|
||||
In the other terminal run the following:
|
||||
|
||||
```bash
|
||||
./testutil -relay="relay://uri.of.relay" -keys=certs/server/ -connect=BG2C5ZA-W7XPFDO-LH222Z6-65F3HJX-ADFTGRT-3SBFIGM-KV26O2Q-E5RMRQ2
|
||||
```
|
||||
|
||||
Which should then give you an interactive prompt, where you can type things in one terminal, and they get relayed to the other terminal.
|
||||
|
||||
Relay related libraries used by this repo
|
||||
----
|
||||
##### Relay protocol definition.
|
||||
|
||||
[Available here](https://github.com/syncthing/syncthing/tree/master/lib/relay/protocol)
|
||||
|
||||
|
||||
##### Relay client
|
||||
|
||||
Only used by the testutil.
|
||||
|
||||
[Available here](https://github.com/syncthing/syncthing/tree/master/lib/relay/client)
|
||||
|
||||
|
||||
17
cmd/strelaysrv/etc/linux-systemd/strelaysrv.service
Normal file
@@ -0,0 +1,17 @@
|
||||
[Unit]
|
||||
Description=Syncthing relay server
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
User=strelaysrv
|
||||
Group=strelaysrv
|
||||
ExecStart=/usr/bin/strelaysrv
|
||||
WorkingDirectory=/var/lib/strelaysrv
|
||||
|
||||
PrivateTmp=true
|
||||
ProtectSystem=full
|
||||
ProtectHome=true
|
||||
NoNewPrivileges=true
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
351
cmd/strelaysrv/listener.go
Normal file
@@ -0,0 +1,351 @@
|
||||
// Copyright (C) 2015 Audrius Butkevicius and Contributors.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"encoding/hex"
|
||||
"log"
|
||||
"net"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
syncthingprotocol "github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/relay/protocol"
|
||||
)
|
||||
|
||||
var (
|
||||
outboxesMut = sync.RWMutex{}
|
||||
outboxes = make(map[syncthingprotocol.DeviceID]chan interface{})
|
||||
numConnections int64
|
||||
)
|
||||
|
||||
func listener(proto, addr string, config *tls.Config) {
|
||||
tcpListener, err := net.Listen("tcp", addr)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
listener := tlsutil.DowngradingListener{
|
||||
Listener: tcpListener,
|
||||
}
|
||||
|
||||
for {
|
||||
conn, isTLS, err := listener.AcceptNoWrapTLS()
|
||||
if err != nil {
|
||||
if debug {
|
||||
log.Println("Listener failed to accept connection from", conn.RemoteAddr(), ". Possibly a TCP Ping.")
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
setTCPOptions(conn)
|
||||
|
||||
if debug {
|
||||
log.Println("Listener accepted connection from", conn.RemoteAddr(), "tls", isTLS)
|
||||
}
|
||||
|
||||
if isTLS {
|
||||
go protocolConnectionHandler(conn, config)
|
||||
} else {
|
||||
go sessionConnectionHandler(conn)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func protocolConnectionHandler(tcpConn net.Conn, config *tls.Config) {
|
||||
conn := tls.Server(tcpConn, config)
|
||||
err := conn.Handshake()
|
||||
if err != nil {
|
||||
if debug {
|
||||
log.Println("Protocol connection TLS handshake:", conn.RemoteAddr(), err)
|
||||
}
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
|
||||
state := conn.ConnectionState()
|
||||
if (!state.NegotiatedProtocolIsMutual || state.NegotiatedProtocol != protocol.ProtocolName) && debug {
|
||||
log.Println("Protocol negotiation error")
|
||||
}
|
||||
|
||||
certs := state.PeerCertificates
|
||||
if len(certs) != 1 {
|
||||
if debug {
|
||||
log.Println("Certificate list error")
|
||||
}
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
|
||||
id := syncthingprotocol.NewDeviceID(certs[0].Raw)
|
||||
|
||||
messages := make(chan interface{})
|
||||
errors := make(chan error, 1)
|
||||
outbox := make(chan interface{})
|
||||
|
||||
// Read messages from the connection and send them on the messages
|
||||
// channel. When there is an error, send it on the error channel and
|
||||
// return. Applies also when the connection gets closed, so the pattern
|
||||
// below is to close the connection on error, then wait for the error
|
||||
// signal from messageReader to exit.
|
||||
go messageReader(conn, messages, errors)
|
||||
|
||||
pingTicker := time.NewTicker(pingInterval)
|
||||
timeoutTicker := time.NewTimer(networkTimeout)
|
||||
joined := false
|
||||
|
||||
for {
|
||||
select {
|
||||
case message := <-messages:
|
||||
timeoutTicker.Reset(networkTimeout)
|
||||
if debug {
|
||||
log.Printf("Message %T from %s", message, id)
|
||||
}
|
||||
|
||||
switch msg := message.(type) {
|
||||
case protocol.JoinRelayRequest:
|
||||
if atomic.LoadInt32(&overLimit) > 0 {
|
||||
protocol.WriteMessage(conn, protocol.RelayFull{})
|
||||
if debug {
|
||||
log.Println("Refusing join request from", id, "due to being over limits")
|
||||
}
|
||||
conn.Close()
|
||||
limitCheckTimer.Reset(time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
outboxesMut.RLock()
|
||||
_, ok := outboxes[id]
|
||||
outboxesMut.RUnlock()
|
||||
if ok {
|
||||
protocol.WriteMessage(conn, protocol.ResponseAlreadyConnected)
|
||||
if debug {
|
||||
log.Println("Already have a peer with the same ID", id, conn.RemoteAddr())
|
||||
}
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
outboxesMut.Lock()
|
||||
outboxes[id] = outbox
|
||||
outboxesMut.Unlock()
|
||||
joined = true
|
||||
|
||||
protocol.WriteMessage(conn, protocol.ResponseSuccess)
|
||||
|
||||
case protocol.ConnectRequest:
|
||||
requestedPeer := syncthingprotocol.DeviceIDFromBytes(msg.ID)
|
||||
outboxesMut.RLock()
|
||||
peerOutbox, ok := outboxes[requestedPeer]
|
||||
outboxesMut.RUnlock()
|
||||
if !ok {
|
||||
if debug {
|
||||
log.Println(id, "is looking for", requestedPeer, "which does not exist")
|
||||
}
|
||||
protocol.WriteMessage(conn, protocol.ResponseNotFound)
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
// requestedPeer is the server, id is the client
|
||||
ses := newSession(requestedPeer, id, sessionLimiter, globalLimiter)
|
||||
|
||||
go ses.Serve()
|
||||
|
||||
clientInvitation := ses.GetClientInvitationMessage()
|
||||
serverInvitation := ses.GetServerInvitationMessage()
|
||||
|
||||
if err := protocol.WriteMessage(conn, clientInvitation); err != nil {
|
||||
if debug {
|
||||
log.Printf("Error sending invitation from %s to client: %s", id, err)
|
||||
}
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
select {
|
||||
case peerOutbox <- serverInvitation:
|
||||
if debug {
|
||||
log.Println("Sent invitation from", id, "to", requestedPeer)
|
||||
}
|
||||
default:
|
||||
if debug {
|
||||
log.Println("Could not send invitation from", id, "to", requestedPeer, "as peer disconnected")
|
||||
}
|
||||
|
||||
}
|
||||
conn.Close()
|
||||
|
||||
case protocol.Ping:
|
||||
if err := protocol.WriteMessage(conn, protocol.Pong{}); err != nil {
|
||||
if debug {
|
||||
log.Println("Error writing pong:", err)
|
||||
}
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
case protocol.Pong:
|
||||
// Nothing
|
||||
|
||||
default:
|
||||
if debug {
|
||||
log.Printf("Unknown message %s: %T", id, message)
|
||||
}
|
||||
protocol.WriteMessage(conn, protocol.ResponseUnexpectedMessage)
|
||||
conn.Close()
|
||||
}
|
||||
|
||||
case err := <-errors:
|
||||
if debug {
|
||||
log.Printf("Closing connection %s: %s", id, err)
|
||||
}
|
||||
close(outbox)
|
||||
|
||||
// Potentially closing a second time.
|
||||
conn.Close()
|
||||
|
||||
if joined {
|
||||
// Only delete the outbox if the client is joined, as it might be
|
||||
// a lookup request coming from the same client.
|
||||
outboxesMut.Lock()
|
||||
delete(outboxes, id)
|
||||
outboxesMut.Unlock()
|
||||
// Also, kill all sessions related to this node, as it probably
|
||||
// went offline. This is for the other end to realize the client
|
||||
// is no longer there faster. This also helps resolve
|
||||
// 'already connected' errors when one of the sides is
|
||||
// restarting, and connecting to the other peer before the other
|
||||
// peer even realised that the node has gone away.
|
||||
dropSessions(id)
|
||||
}
|
||||
return
|
||||
|
||||
case <-pingTicker.C:
|
||||
if !joined {
|
||||
if debug {
|
||||
log.Println(id, "didn't join within", pingInterval)
|
||||
}
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
if err := protocol.WriteMessage(conn, protocol.Ping{}); err != nil {
|
||||
if debug {
|
||||
log.Println(id, err)
|
||||
}
|
||||
conn.Close()
|
||||
}
|
||||
|
||||
if atomic.LoadInt32(&overLimit) > 0 && !hasSessions(id) {
|
||||
if debug {
|
||||
log.Println("Dropping", id, "as it has no sessions and we are over our limits")
|
||||
}
|
||||
protocol.WriteMessage(conn, protocol.RelayFull{})
|
||||
conn.Close()
|
||||
|
||||
limitCheckTimer.Reset(time.Second)
|
||||
}
|
||||
|
||||
case <-timeoutTicker.C:
|
||||
// We should receive a error from the reader loop, which will cause
|
||||
// us to quit this loop.
|
||||
if debug {
|
||||
log.Printf("%s timed out", id)
|
||||
}
|
||||
conn.Close()
|
||||
|
||||
case msg := <-outbox:
|
||||
if msg == nil {
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
if debug {
|
||||
log.Printf("Sending message %T to %s", msg, id)
|
||||
}
|
||||
if err := protocol.WriteMessage(conn, msg); err != nil {
|
||||
if debug {
|
||||
log.Println(id, err)
|
||||
}
|
||||
conn.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func sessionConnectionHandler(conn net.Conn) {
|
||||
if err := conn.SetDeadline(time.Now().Add(messageTimeout)); err != nil {
|
||||
if debug {
|
||||
log.Println("Weird error setting deadline:", err, "on", conn.RemoteAddr())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
message, err := protocol.ReadMessage(conn)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
switch msg := message.(type) {
|
||||
case protocol.JoinSessionRequest:
|
||||
ses := findSession(string(msg.Key))
|
||||
if debug {
|
||||
log.Println(conn.RemoteAddr(), "session lookup", ses, hex.EncodeToString(msg.Key)[:5])
|
||||
}
|
||||
|
||||
if ses == nil {
|
||||
protocol.WriteMessage(conn, protocol.ResponseNotFound)
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
|
||||
if !ses.AddConnection(conn) {
|
||||
if debug {
|
||||
log.Println("Failed to add", conn.RemoteAddr(), "to session", ses)
|
||||
}
|
||||
protocol.WriteMessage(conn, protocol.ResponseAlreadyConnected)
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
|
||||
if err := protocol.WriteMessage(conn, protocol.ResponseSuccess); err != nil {
|
||||
if debug {
|
||||
log.Println("Failed to send session join response to ", conn.RemoteAddr(), "for", ses)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err := conn.SetDeadline(time.Time{}); err != nil {
|
||||
if debug {
|
||||
log.Println("Weird error setting deadline:", err, "on", conn.RemoteAddr())
|
||||
}
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
|
||||
default:
|
||||
if debug {
|
||||
log.Println("Unexpected message from", conn.RemoteAddr(), message)
|
||||
}
|
||||
protocol.WriteMessage(conn, protocol.ResponseUnexpectedMessage)
|
||||
conn.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func messageReader(conn net.Conn, messages chan<- interface{}, errors chan<- error) {
|
||||
atomic.AddInt64(&numConnections, 1)
|
||||
defer atomic.AddInt64(&numConnections, -1)
|
||||
|
||||
for {
|
||||
msg, err := protocol.ReadMessage(conn)
|
||||
if err != nil {
|
||||
errors <- err
|
||||
return
|
||||
}
|
||||
messages <- msg
|
||||
}
|
||||
}
|
||||
286
cmd/strelaysrv/main.go
Normal file
@@ -0,0 +1,286 @@
|
||||
// Copyright (C) 2015 Audrius Butkevicius and Contributors.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/juju/ratelimit"
|
||||
"github.com/syncthing/syncthing/lib/osutil"
|
||||
"github.com/syncthing/syncthing/lib/relay/protocol"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/nat"
|
||||
_ "github.com/syncthing/syncthing/lib/pmp"
|
||||
_ "github.com/syncthing/syncthing/lib/upnp"
|
||||
|
||||
syncthingprotocol "github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
var (
|
||||
Version string
|
||||
BuildStamp string
|
||||
BuildUser string
|
||||
BuildHost string
|
||||
|
||||
BuildDate time.Time
|
||||
LongVersion string
|
||||
)
|
||||
|
||||
func init() {
|
||||
stamp, _ := strconv.Atoi(BuildStamp)
|
||||
BuildDate = time.Unix(int64(stamp), 0)
|
||||
|
||||
date := BuildDate.UTC().Format("2006-01-02 15:04:05 MST")
|
||||
LongVersion = fmt.Sprintf(`strelaysrv %s (%s %s-%s) %s@%s %s`, Version, runtime.Version(), runtime.GOOS, runtime.GOARCH, BuildUser, BuildHost, date)
|
||||
}
|
||||
|
||||
var (
|
||||
listen string
|
||||
debug bool
|
||||
proto string
|
||||
|
||||
sessionAddress []byte
|
||||
sessionPort uint16
|
||||
|
||||
networkTimeout = 2 * time.Minute
|
||||
pingInterval = time.Minute
|
||||
messageTimeout = time.Minute
|
||||
|
||||
limitCheckTimer *time.Timer
|
||||
|
||||
sessionLimitBps int
|
||||
globalLimitBps int
|
||||
overLimit int32
|
||||
descriptorLimit int64
|
||||
sessionLimiter *ratelimit.Bucket
|
||||
globalLimiter *ratelimit.Bucket
|
||||
|
||||
statusAddr string
|
||||
poolAddrs string
|
||||
pools []string
|
||||
providedBy string
|
||||
defaultPoolAddrs = "https://relays.syncthing.net/endpoint"
|
||||
|
||||
natEnabled bool
|
||||
natLease int
|
||||
natRenewal int
|
||||
natTimeout int
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.SetFlags(log.Lshortfile | log.LstdFlags)
|
||||
|
||||
var dir, extAddress, proto string
|
||||
|
||||
flag.StringVar(&listen, "listen", ":22067", "Protocol listen address")
|
||||
flag.StringVar(&dir, "keys", ".", "Directory where cert.pem and key.pem is stored")
|
||||
flag.DurationVar(&networkTimeout, "network-timeout", networkTimeout, "Timeout for network operations between the client and the relay.\n\tIf no data is received between the client and the relay in this period of time, the connection is terminated.\n\tFurthermore, if no data is sent between either clients being relayed within this period of time, the session is also terminated.")
|
||||
flag.DurationVar(&pingInterval, "ping-interval", pingInterval, "How often pings are sent")
|
||||
flag.DurationVar(&messageTimeout, "message-timeout", messageTimeout, "Maximum amount of time we wait for relevant messages to arrive")
|
||||
flag.IntVar(&sessionLimitBps, "per-session-rate", sessionLimitBps, "Per session rate limit, in bytes/s")
|
||||
flag.IntVar(&globalLimitBps, "global-rate", globalLimitBps, "Global rate limit, in bytes/s")
|
||||
flag.BoolVar(&debug, "debug", debug, "Enable debug output")
|
||||
flag.StringVar(&statusAddr, "status-srv", ":22070", "Listen address for status service (blank to disable)")
|
||||
flag.StringVar(&poolAddrs, "pools", defaultPoolAddrs, "Comma separated list of relay pool addresses to join")
|
||||
flag.StringVar(&providedBy, "provided-by", "", "An optional description about who provides the relay")
|
||||
flag.StringVar(&extAddress, "ext-address", "", "An optional address to advertise as being available on.\n\tAllows listening on an unprivileged port with port forwarding from e.g. 443, and be connected to on port 443.")
|
||||
flag.StringVar(&proto, "protocol", "tcp", "Protocol used for listening. 'tcp' for IPv4 and IPv6, 'tcp4' for IPv4, 'tcp6' for IPv6")
|
||||
flag.BoolVar(&natEnabled, "nat", false, "Use UPnP/NAT-PMP to acquire external port mapping")
|
||||
flag.IntVar(&natLease, "nat-lease", 60, "NAT lease length in minutes")
|
||||
flag.IntVar(&natRenewal, "nat-renewal", 30, "NAT renewal frequency in minutes")
|
||||
flag.IntVar(&natTimeout, "nat-timeout", 10, "NAT discovery timeout in seconds")
|
||||
flag.Parse()
|
||||
|
||||
if extAddress == "" {
|
||||
extAddress = listen
|
||||
}
|
||||
|
||||
if len(providedBy) > 30 {
|
||||
log.Fatal("Provided-by cannot be longer than 30 characters")
|
||||
}
|
||||
|
||||
addr, err := net.ResolveTCPAddr(proto, extAddress)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
log.Println(LongVersion)
|
||||
|
||||
maxDescriptors, err := osutil.MaximizeOpenFileLimit()
|
||||
if maxDescriptors > 0 {
|
||||
// Assume that 20% of FD's are leaked/unaccounted for.
|
||||
descriptorLimit = int64(maxDescriptors*80) / 100
|
||||
log.Println("Connection limit", descriptorLimit)
|
||||
|
||||
go monitorLimits()
|
||||
} else if err != nil && runtime.GOOS != "windows" {
|
||||
log.Println("Assuming no connection limit, due to error retrieving rlimits:", err)
|
||||
}
|
||||
|
||||
sessionAddress = addr.IP[:]
|
||||
sessionPort = uint16(addr.Port)
|
||||
|
||||
certFile, keyFile := filepath.Join(dir, "cert.pem"), filepath.Join(dir, "key.pem")
|
||||
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||
if err != nil {
|
||||
log.Println("Failed to load keypair. Generating one, this might take a while...")
|
||||
cert, err = tlsutil.NewCertificate(certFile, keyFile, "strelaysrv", 3072)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to generate X509 key pair:", err)
|
||||
}
|
||||
}
|
||||
|
||||
tlsCfg := &tls.Config{
|
||||
Certificates: []tls.Certificate{cert},
|
||||
NextProtos: []string{protocol.ProtocolName},
|
||||
ClientAuth: tls.RequestClientCert,
|
||||
SessionTicketsDisabled: true,
|
||||
InsecureSkipVerify: true,
|
||||
MinVersion: tls.VersionTLS12,
|
||||
CipherSuites: []uint16{
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
||||
},
|
||||
}
|
||||
|
||||
id := syncthingprotocol.NewDeviceID(cert.Certificate[0])
|
||||
if debug {
|
||||
log.Println("ID:", id)
|
||||
}
|
||||
|
||||
wrapper := config.Wrap("config", config.New(id))
|
||||
wrapper.SetOptions(config.OptionsConfiguration{
|
||||
NATLeaseM: natLease,
|
||||
NATRenewalM: natRenewal,
|
||||
NATTimeoutS: natTimeout,
|
||||
})
|
||||
natSvc := nat.NewService(id, wrapper)
|
||||
mapping := mapping{natSvc.NewMapping(nat.TCP, addr.IP, addr.Port)}
|
||||
|
||||
if natEnabled {
|
||||
go natSvc.Serve()
|
||||
found := make(chan struct{})
|
||||
mapping.OnChanged(func(_ *nat.Mapping, _, _ []nat.Address) {
|
||||
select {
|
||||
case found <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
})
|
||||
|
||||
// Need to wait a few extra seconds, since NAT library waits exactly natTimeout seconds on all interfaces.
|
||||
timeout := time.Duration(natTimeout+2) * time.Second
|
||||
log.Printf("Waiting %s to acquire NAT mapping", timeout)
|
||||
|
||||
select {
|
||||
case <-found:
|
||||
log.Printf("Found NAT mapping: %s", mapping.ExternalAddresses())
|
||||
case <-time.After(timeout):
|
||||
log.Println("Timeout out waiting for NAT mapping.")
|
||||
}
|
||||
}
|
||||
|
||||
if sessionLimitBps > 0 {
|
||||
sessionLimiter = ratelimit.NewBucketWithRate(float64(sessionLimitBps), int64(2*sessionLimitBps))
|
||||
}
|
||||
if globalLimitBps > 0 {
|
||||
globalLimiter = ratelimit.NewBucketWithRate(float64(globalLimitBps), int64(2*globalLimitBps))
|
||||
}
|
||||
|
||||
if statusAddr != "" {
|
||||
go statusService(statusAddr)
|
||||
}
|
||||
|
||||
uri, err := url.Parse(fmt.Sprintf("relay://%s/?id=%s&pingInterval=%s&networkTimeout=%s&sessionLimitBps=%d&globalLimitBps=%d&statusAddr=%s&providedBy=%s", mapping.Address(), id, pingInterval, networkTimeout, sessionLimitBps, globalLimitBps, statusAddr, providedBy))
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to construct URI", err)
|
||||
}
|
||||
|
||||
log.Println("URI:", uri.String())
|
||||
|
||||
if poolAddrs == defaultPoolAddrs {
|
||||
log.Println("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
|
||||
log.Println("!! Joining default relay pools, this relay will be available for public use. !!")
|
||||
log.Println(`!! Use the -pools="" command line option to make the relay private. !!`)
|
||||
log.Println("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
|
||||
}
|
||||
|
||||
pools = strings.Split(poolAddrs, ",")
|
||||
for _, pool := range pools {
|
||||
pool = strings.TrimSpace(pool)
|
||||
if len(pool) > 0 {
|
||||
go poolHandler(pool, uri, mapping)
|
||||
}
|
||||
}
|
||||
|
||||
go listener(proto, listen, tlsCfg)
|
||||
|
||||
sigs := make(chan os.Signal, 1)
|
||||
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
|
||||
<-sigs
|
||||
|
||||
// Gracefully close all connections, hoping that clients will be faster
|
||||
// to realize that the relay is now gone.
|
||||
|
||||
sessionMut.RLock()
|
||||
for _, session := range activeSessions {
|
||||
session.CloseConns()
|
||||
}
|
||||
|
||||
for _, session := range pendingSessions {
|
||||
session.CloseConns()
|
||||
}
|
||||
sessionMut.RUnlock()
|
||||
|
||||
outboxesMut.RLock()
|
||||
for _, outbox := range outboxes {
|
||||
close(outbox)
|
||||
}
|
||||
outboxesMut.RUnlock()
|
||||
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
}
|
||||
|
||||
func monitorLimits() {
|
||||
limitCheckTimer = time.NewTimer(time.Minute)
|
||||
for range limitCheckTimer.C {
|
||||
if atomic.LoadInt64(&numConnections)+atomic.LoadInt64(&numProxies) > descriptorLimit {
|
||||
atomic.StoreInt32(&overLimit, 1)
|
||||
log.Println("Gone past our connection limits. Starting to refuse new/drop idle connections.")
|
||||
} else if atomic.CompareAndSwapInt32(&overLimit, 1, 0) {
|
||||
log.Println("Dropped below our connection limits. Accepting new connections.")
|
||||
}
|
||||
limitCheckTimer.Reset(time.Minute)
|
||||
}
|
||||
}
|
||||
|
||||
type mapping struct {
|
||||
*nat.Mapping
|
||||
}
|
||||
|
||||
func (m *mapping) Address() nat.Address {
|
||||
ext := m.ExternalAddresses()
|
||||
if len(ext) > 0 {
|
||||
return ext[0]
|
||||
}
|
||||
return m.Mapping.Address()
|
||||
}
|
||||
66
cmd/strelaysrv/pool.go
Normal file
@@ -0,0 +1,66 @@
|
||||
// Copyright (C) 2015 Audrius Butkevicius and Contributors.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
)
|
||||
|
||||
func poolHandler(pool string, uri *url.URL, mapping mapping) {
|
||||
if debug {
|
||||
log.Println("Joining", pool)
|
||||
}
|
||||
for {
|
||||
uriCopy := *uri
|
||||
uriCopy.Host = mapping.Address().String()
|
||||
|
||||
var b bytes.Buffer
|
||||
json.NewEncoder(&b).Encode(struct {
|
||||
URL string `json:"url"`
|
||||
}{
|
||||
uriCopy.String(),
|
||||
})
|
||||
|
||||
resp, err := http.Post(pool, "application/json", &b)
|
||||
if err != nil {
|
||||
log.Println("Error joining pool", pool, err)
|
||||
} else if resp.StatusCode == 500 {
|
||||
bs, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
log.Println("Failed to join", pool, "due to an internal server error. Could not read response:", err)
|
||||
} else {
|
||||
log.Println("Failed to join", pool, "due to an internal server error:", string(bs))
|
||||
}
|
||||
resp.Body.Close()
|
||||
} else if resp.StatusCode == 429 {
|
||||
log.Println(pool, "under load, will retry in a minute")
|
||||
time.Sleep(time.Minute)
|
||||
continue
|
||||
} else if resp.StatusCode == 401 {
|
||||
log.Println(pool, "failed to join due to IP address not matching external address. Aborting")
|
||||
return
|
||||
} else if resp.StatusCode == 200 {
|
||||
var x struct {
|
||||
EvictionIn time.Duration `json:"evictionIn"`
|
||||
}
|
||||
err := json.NewDecoder(resp.Body).Decode(&x)
|
||||
if err == nil {
|
||||
rejoin := x.EvictionIn - (x.EvictionIn / 5)
|
||||
log.Println("Joined", pool, "rejoining in", rejoin)
|
||||
time.Sleep(rejoin)
|
||||
continue
|
||||
} else {
|
||||
log.Println("Failed to deserialize response", err)
|
||||
}
|
||||
} else {
|
||||
log.Println(pool, "unknown response type from server", resp.StatusCode)
|
||||
}
|
||||
time.Sleep(time.Hour)
|
||||
}
|
||||
}
|
||||
326
cmd/strelaysrv/session.go
Normal file
@@ -0,0 +1,326 @@
|
||||
// Copyright (C) 2015 Audrius Butkevicius and Contributors.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/juju/ratelimit"
|
||||
"github.com/syncthing/syncthing/lib/relay/protocol"
|
||||
|
||||
syncthingprotocol "github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
var (
|
||||
sessionMut = sync.RWMutex{}
|
||||
activeSessions = make([]*session, 0)
|
||||
pendingSessions = make(map[string]*session, 0)
|
||||
numProxies int64
|
||||
bytesProxied int64
|
||||
)
|
||||
|
||||
func newSession(serverid, clientid syncthingprotocol.DeviceID, sessionRateLimit, globalRateLimit *ratelimit.Bucket) *session {
|
||||
serverkey := make([]byte, 32)
|
||||
_, err := rand.Read(serverkey)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
clientkey := make([]byte, 32)
|
||||
_, err = rand.Read(clientkey)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
ses := &session{
|
||||
serverkey: serverkey,
|
||||
serverid: serverid,
|
||||
clientkey: clientkey,
|
||||
clientid: clientid,
|
||||
rateLimit: makeRateLimitFunc(sessionRateLimit, globalRateLimit),
|
||||
connsChan: make(chan net.Conn),
|
||||
conns: make([]net.Conn, 0, 2),
|
||||
}
|
||||
|
||||
if debug {
|
||||
log.Println("New session", ses)
|
||||
}
|
||||
|
||||
sessionMut.Lock()
|
||||
pendingSessions[string(ses.serverkey)] = ses
|
||||
pendingSessions[string(ses.clientkey)] = ses
|
||||
sessionMut.Unlock()
|
||||
|
||||
return ses
|
||||
}
|
||||
|
||||
func findSession(key string) *session {
|
||||
sessionMut.Lock()
|
||||
defer sessionMut.Unlock()
|
||||
ses, ok := pendingSessions[key]
|
||||
if !ok {
|
||||
return nil
|
||||
|
||||
}
|
||||
delete(pendingSessions, key)
|
||||
return ses
|
||||
}
|
||||
|
||||
func dropSessions(id syncthingprotocol.DeviceID) {
|
||||
sessionMut.RLock()
|
||||
for _, session := range activeSessions {
|
||||
if session.HasParticipant(id) {
|
||||
if debug {
|
||||
log.Println("Dropping session", session, "involving", id)
|
||||
}
|
||||
session.CloseConns()
|
||||
}
|
||||
}
|
||||
sessionMut.RUnlock()
|
||||
}
|
||||
|
||||
func hasSessions(id syncthingprotocol.DeviceID) bool {
|
||||
sessionMut.RLock()
|
||||
has := false
|
||||
for _, session := range activeSessions {
|
||||
if session.HasParticipant(id) {
|
||||
has = true
|
||||
break
|
||||
}
|
||||
}
|
||||
sessionMut.RUnlock()
|
||||
return has
|
||||
}
|
||||
|
||||
type session struct {
|
||||
mut sync.Mutex
|
||||
|
||||
serverkey []byte
|
||||
serverid syncthingprotocol.DeviceID
|
||||
|
||||
clientkey []byte
|
||||
clientid syncthingprotocol.DeviceID
|
||||
|
||||
rateLimit func(bytes int64)
|
||||
|
||||
connsChan chan net.Conn
|
||||
conns []net.Conn
|
||||
}
|
||||
|
||||
func (s *session) AddConnection(conn net.Conn) bool {
|
||||
if debug {
|
||||
log.Println("New connection for", s, "from", conn.RemoteAddr())
|
||||
}
|
||||
|
||||
select {
|
||||
case s.connsChan <- conn:
|
||||
return true
|
||||
default:
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *session) Serve() {
|
||||
timedout := time.After(messageTimeout)
|
||||
|
||||
if debug {
|
||||
log.Println("Session", s, "serving")
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case conn := <-s.connsChan:
|
||||
s.mut.Lock()
|
||||
s.conns = append(s.conns, conn)
|
||||
s.mut.Unlock()
|
||||
// We're the only ones mutating s.conns, hence we are free to read it.
|
||||
if len(s.conns) < 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
close(s.connsChan)
|
||||
|
||||
if debug {
|
||||
log.Println("Session", s, "starting between", s.conns[0].RemoteAddr(), "and", s.conns[1].RemoteAddr())
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(2)
|
||||
|
||||
var err0 error
|
||||
go func() {
|
||||
err0 = s.proxy(s.conns[0], s.conns[1])
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
var err1 error
|
||||
go func() {
|
||||
err1 = s.proxy(s.conns[1], s.conns[0])
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
sessionMut.Lock()
|
||||
activeSessions = append(activeSessions, s)
|
||||
sessionMut.Unlock()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
if debug {
|
||||
log.Println("Session", s, "ended, outcomes:", err0, "and", err1)
|
||||
}
|
||||
goto done
|
||||
|
||||
case <-timedout:
|
||||
if debug {
|
||||
log.Println("Session", s, "timed out")
|
||||
}
|
||||
goto done
|
||||
}
|
||||
}
|
||||
done:
|
||||
// We can end up here in 3 cases:
|
||||
// 1. Timeout joining, in which case there are potentially entries in pendingSessions
|
||||
// 2. General session end/timeout, in which case there are entries in activeSessions
|
||||
// 3. Protocol handler calls dropSession as one of it's clients disconnects.
|
||||
|
||||
sessionMut.Lock()
|
||||
delete(pendingSessions, string(s.serverkey))
|
||||
delete(pendingSessions, string(s.clientkey))
|
||||
|
||||
for i, session := range activeSessions {
|
||||
if session == s {
|
||||
l := len(activeSessions) - 1
|
||||
activeSessions[i] = activeSessions[l]
|
||||
activeSessions[l] = nil
|
||||
activeSessions = activeSessions[:l]
|
||||
}
|
||||
}
|
||||
sessionMut.Unlock()
|
||||
|
||||
// If we are here because of case 2 or 3, we are potentially closing some or
|
||||
// all connections a second time.
|
||||
s.CloseConns()
|
||||
|
||||
if debug {
|
||||
log.Println("Session", s, "stopping")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *session) GetClientInvitationMessage() protocol.SessionInvitation {
|
||||
return protocol.SessionInvitation{
|
||||
From: s.serverid[:],
|
||||
Key: []byte(s.clientkey),
|
||||
Address: sessionAddress,
|
||||
Port: sessionPort,
|
||||
ServerSocket: false,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *session) GetServerInvitationMessage() protocol.SessionInvitation {
|
||||
return protocol.SessionInvitation{
|
||||
From: s.clientid[:],
|
||||
Key: []byte(s.serverkey),
|
||||
Address: sessionAddress,
|
||||
Port: sessionPort,
|
||||
ServerSocket: true,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *session) HasParticipant(id syncthingprotocol.DeviceID) bool {
|
||||
return s.clientid == id || s.serverid == id
|
||||
}
|
||||
|
||||
func (s *session) CloseConns() {
|
||||
s.mut.Lock()
|
||||
for _, conn := range s.conns {
|
||||
conn.Close()
|
||||
}
|
||||
s.mut.Unlock()
|
||||
}
|
||||
|
||||
func (s *session) proxy(c1, c2 net.Conn) error {
|
||||
if debug {
|
||||
log.Println("Proxy", c1.RemoteAddr(), "->", c2.RemoteAddr())
|
||||
}
|
||||
|
||||
atomic.AddInt64(&numProxies, 1)
|
||||
defer atomic.AddInt64(&numProxies, -1)
|
||||
|
||||
buf := make([]byte, 65536)
|
||||
for {
|
||||
c1.SetReadDeadline(time.Now().Add(networkTimeout))
|
||||
n, err := c1.Read(buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
atomic.AddInt64(&bytesProxied, int64(n))
|
||||
|
||||
if debug {
|
||||
log.Printf("%d bytes from %s to %s", n, c1.RemoteAddr(), c2.RemoteAddr())
|
||||
}
|
||||
|
||||
if s.rateLimit != nil {
|
||||
s.rateLimit(int64(n))
|
||||
}
|
||||
|
||||
c2.SetWriteDeadline(time.Now().Add(networkTimeout))
|
||||
_, err = c2.Write(buf[:n])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *session) String() string {
|
||||
return fmt.Sprintf("<%s/%s>", hex.EncodeToString(s.clientkey)[:5], hex.EncodeToString(s.serverkey)[:5])
|
||||
}
|
||||
|
||||
func makeRateLimitFunc(sessionRateLimit, globalRateLimit *ratelimit.Bucket) func(int64) {
|
||||
// This may be a case of super duper premature optimization... We build an
|
||||
// optimized function to do the rate limiting here based on what we need
|
||||
// to do and then use it in the loop.
|
||||
|
||||
if sessionRateLimit == nil && globalRateLimit == nil {
|
||||
// No limiting needed. We could equally well return a func(int64){} and
|
||||
// not do a nil check were we use it, but I think the nil check there
|
||||
// makes it clear that there will be no limiting if none is
|
||||
// configured...
|
||||
return nil
|
||||
}
|
||||
|
||||
if sessionRateLimit == nil {
|
||||
// We only have a global limiter
|
||||
return func(bytes int64) {
|
||||
globalRateLimit.Wait(bytes)
|
||||
}
|
||||
}
|
||||
|
||||
if globalRateLimit == nil {
|
||||
// We only have a session limiter
|
||||
return func(bytes int64) {
|
||||
sessionRateLimit.Wait(bytes)
|
||||
}
|
||||
}
|
||||
|
||||
// We have both. Queue the bytes on both the global and session specific
|
||||
// rate limiters. Wait for both in parallell, so that the actual send
|
||||
// happens when both conditions are satisfied. In practice this just means
|
||||
// wait the longer of the two times.
|
||||
return func(bytes int64) {
|
||||
t0 := sessionRateLimit.Take(bytes)
|
||||
t1 := globalRateLimit.Take(bytes)
|
||||
if t0 > t1 {
|
||||
time.Sleep(t0)
|
||||
} else {
|
||||
time.Sleep(t1)
|
||||
}
|
||||
}
|
||||
}
|
||||
111
cmd/strelaysrv/status.go
Normal file
@@ -0,0 +1,111 @@
|
||||
// Copyright (C) 2015 Audrius Butkevicius and Contributors.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"log"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
var rc *rateCalculator
|
||||
|
||||
func statusService(addr string) {
|
||||
rc = newRateCalculator(360, 10*time.Second, &bytesProxied)
|
||||
|
||||
http.HandleFunc("/status", getStatus)
|
||||
if err := http.ListenAndServe(addr, nil); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func getStatus(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
status := make(map[string]interface{})
|
||||
|
||||
sessionMut.Lock()
|
||||
// This can potentially be double the number of pending sessions, as each session has two keys, one for each side.
|
||||
status["startTime"] = rc.startTime
|
||||
status["uptimeSeconds"] = time.Since(rc.startTime) / time.Second
|
||||
status["numPendingSessionKeys"] = len(pendingSessions)
|
||||
status["numActiveSessions"] = len(activeSessions)
|
||||
sessionMut.Unlock()
|
||||
status["numConnections"] = atomic.LoadInt64(&numConnections)
|
||||
status["numProxies"] = atomic.LoadInt64(&numProxies)
|
||||
status["bytesProxied"] = atomic.LoadInt64(&bytesProxied)
|
||||
status["goVersion"] = runtime.Version()
|
||||
status["goOS"] = runtime.GOOS
|
||||
status["goArch"] = runtime.GOARCH
|
||||
status["goMaxProcs"] = runtime.GOMAXPROCS(-1)
|
||||
status["goNumRoutine"] = runtime.NumGoroutine()
|
||||
status["kbps10s1m5m15m30m60m"] = []int64{
|
||||
rc.rate(10/10) * 8 / 1000,
|
||||
rc.rate(60/10) * 8 / 1000,
|
||||
rc.rate(5*60/10) * 8 / 1000,
|
||||
rc.rate(15*60/10) * 8 / 1000,
|
||||
rc.rate(30*60/10) * 8 / 1000,
|
||||
rc.rate(60*60/10) * 8 / 1000,
|
||||
}
|
||||
status["options"] = map[string]interface{}{
|
||||
"network-timeout": networkTimeout / time.Second,
|
||||
"ping-interval": pingInterval / time.Second,
|
||||
"message-timeout": messageTimeout / time.Second,
|
||||
"per-session-rate": sessionLimitBps,
|
||||
"global-rate": globalLimitBps,
|
||||
"pools": pools,
|
||||
"provided-by": providedBy,
|
||||
}
|
||||
|
||||
bs, err := json.MarshalIndent(status, "", " ")
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write(bs)
|
||||
}
|
||||
|
||||
type rateCalculator struct {
|
||||
rates []int64
|
||||
prev int64
|
||||
counter *int64
|
||||
startTime time.Time
|
||||
}
|
||||
|
||||
func newRateCalculator(keepIntervals int, interval time.Duration, counter *int64) *rateCalculator {
|
||||
r := &rateCalculator{
|
||||
rates: make([]int64, keepIntervals),
|
||||
counter: counter,
|
||||
startTime: time.Now(),
|
||||
}
|
||||
|
||||
go r.updateRates(interval)
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *rateCalculator) updateRates(interval time.Duration) {
|
||||
for {
|
||||
now := time.Now()
|
||||
next := now.Truncate(interval).Add(interval)
|
||||
time.Sleep(next.Sub(now))
|
||||
|
||||
cur := atomic.LoadInt64(r.counter)
|
||||
rate := int64(float64(cur-r.prev) / interval.Seconds())
|
||||
copy(r.rates[1:], r.rates)
|
||||
r.rates[0] = rate
|
||||
r.prev = cur
|
||||
}
|
||||
}
|
||||
|
||||
func (r *rateCalculator) rate(periods int) int64 {
|
||||
var tot int64
|
||||
for i := 0; i < periods; i++ {
|
||||
tot += r.rates[i]
|
||||
}
|
||||
return tot / int64(periods)
|
||||
}
|
||||
152
cmd/strelaysrv/testutil/main.go
Normal file
@@ -0,0 +1,152 @@
|
||||
// Copyright (C) 2015 Audrius Butkevicius and Contributors (see the CONTRIBUTORS file).
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/tls"
|
||||
"flag"
|
||||
"log"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
syncthingprotocol "github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/relay/client"
|
||||
"github.com/syncthing/syncthing/lib/relay/protocol"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.SetOutput(os.Stdout)
|
||||
log.SetFlags(log.LstdFlags | log.Lshortfile)
|
||||
|
||||
var connect, relay, dir string
|
||||
var join, test bool
|
||||
|
||||
flag.StringVar(&connect, "connect", "", "Device ID to which to connect to")
|
||||
flag.BoolVar(&join, "join", false, "Join relay")
|
||||
flag.BoolVar(&test, "test", false, "Generic relay test")
|
||||
flag.StringVar(&relay, "relay", "relay://127.0.0.1:22067", "Relay address")
|
||||
flag.StringVar(&dir, "keys", ".", "Directory where cert.pem and key.pem is stored")
|
||||
|
||||
flag.Parse()
|
||||
|
||||
certFile, keyFile := filepath.Join(dir, "cert.pem"), filepath.Join(dir, "key.pem")
|
||||
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to load X509 key pair:", err)
|
||||
}
|
||||
|
||||
id := syncthingprotocol.NewDeviceID(cert.Certificate[0])
|
||||
log.Println("ID:", id)
|
||||
|
||||
uri, err := url.Parse(relay)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
stdin := make(chan string)
|
||||
|
||||
go stdinReader(stdin)
|
||||
|
||||
if join {
|
||||
log.Println("Creating client")
|
||||
relay, err := client.NewClient(uri, []tls.Certificate{cert}, nil, 10*time.Second)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
log.Println("Created client")
|
||||
|
||||
go relay.Serve()
|
||||
|
||||
recv := make(chan protocol.SessionInvitation)
|
||||
|
||||
go func() {
|
||||
log.Println("Starting invitation receiver")
|
||||
for invite := range relay.Invitations() {
|
||||
select {
|
||||
case recv <- invite:
|
||||
log.Println("Received invitation", invite)
|
||||
default:
|
||||
log.Println("Discarding invitation", invite)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
conn, err := client.JoinSession(<-recv)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to join", err)
|
||||
}
|
||||
log.Println("Joined", conn.RemoteAddr(), conn.LocalAddr())
|
||||
connectToStdio(stdin, conn)
|
||||
log.Println("Finished", conn.RemoteAddr(), conn.LocalAddr())
|
||||
}
|
||||
} else if connect != "" {
|
||||
id, err := syncthingprotocol.DeviceIDFromString(connect)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
invite, err := client.GetInvitationFromRelay(uri, id, []tls.Certificate{cert}, 10*time.Second)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
log.Println("Received invitation", invite)
|
||||
conn, err := client.JoinSession(invite)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to join", err)
|
||||
}
|
||||
log.Println("Joined", conn.RemoteAddr(), conn.LocalAddr())
|
||||
connectToStdio(stdin, conn)
|
||||
log.Println("Finished", conn.RemoteAddr(), conn.LocalAddr())
|
||||
} else if test {
|
||||
if client.TestRelay(uri, []tls.Certificate{cert}, time.Second, 2*time.Second, 4) {
|
||||
log.Println("OK")
|
||||
} else {
|
||||
log.Println("FAIL")
|
||||
}
|
||||
} else {
|
||||
log.Fatal("Requires either join or connect")
|
||||
}
|
||||
}
|
||||
|
||||
func stdinReader(c chan<- string) {
|
||||
scanner := bufio.NewScanner(os.Stdin)
|
||||
for scanner.Scan() {
|
||||
c <- scanner.Text()
|
||||
c <- "\n"
|
||||
}
|
||||
}
|
||||
|
||||
func connectToStdio(stdin <-chan string, conn net.Conn) {
|
||||
go func() {
|
||||
|
||||
}()
|
||||
|
||||
buf := make([]byte, 1024)
|
||||
for {
|
||||
conn.SetReadDeadline(time.Now().Add(time.Millisecond))
|
||||
n, err := conn.Read(buf[0:])
|
||||
if err != nil {
|
||||
nerr, ok := err.(net.Error)
|
||||
if !ok || !nerr.Timeout() {
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
os.Stdout.Write(buf[:n])
|
||||
|
||||
select {
|
||||
case msg := <-stdin:
|
||||
_, err := conn.Write([]byte(msg))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
28
cmd/strelaysrv/utils.go
Normal file
@@ -0,0 +1,28 @@
|
||||
// Copyright (C) 2015 Audrius Butkevicius and Contributors.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
)
|
||||
|
||||
func setTCPOptions(conn net.Conn) error {
|
||||
tcpConn, ok := conn.(*net.TCPConn)
|
||||
if !ok {
|
||||
return errors.New("Not a TCP connection")
|
||||
}
|
||||
if err := tcpConn.SetLinger(0); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := tcpConn.SetNoDelay(true); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := tcpConn.SetKeepAlivePeriod(networkTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := tcpConn.SetKeepAlive(true); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -8,6 +8,7 @@ package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
@@ -31,7 +32,7 @@ Where command is one of:
|
||||
gen
|
||||
- generate a new key pair
|
||||
|
||||
sign <privkeyfile> <datafile>
|
||||
sign <privkeyfile> [datafile]
|
||||
- sign a file
|
||||
|
||||
verify <signaturefile> <datafile>
|
||||
@@ -72,13 +73,19 @@ func sign(keyname, dataname string) {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fd, err := os.Open(dataname)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
var input io.Reader
|
||||
if dataname == "-" || dataname == "" {
|
||||
input = os.Stdin
|
||||
} else {
|
||||
fd, err := os.Open(dataname)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer fd.Close()
|
||||
input = fd
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
sig, err := signature.Sign(privkey, fd)
|
||||
sig, err := signature.Sign(privkey, input)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
212
cmd/stvanity/main.go
Normal file
@@ -0,0 +1,212 @@
|
||||
// Copyright (C) 2016 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/pem"
|
||||
"flag"
|
||||
"fmt"
|
||||
"math/big"
|
||||
mr "math/rand"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
type result struct {
|
||||
id protocol.DeviceID
|
||||
priv *ecdsa.PrivateKey
|
||||
derBytes []byte
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
prefix := strings.ToUpper(strings.Replace(flag.Arg(0), "-", "", -1))
|
||||
if len(prefix) > 7 {
|
||||
prefix = prefix[:7] + "-" + prefix[7:]
|
||||
}
|
||||
|
||||
found := make(chan result)
|
||||
stop := make(chan struct{})
|
||||
var count int64
|
||||
|
||||
// Print periodic progress reports.
|
||||
go printProgress(prefix, &count)
|
||||
|
||||
// Run one certificate generator per CPU core.
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < runtime.GOMAXPROCS(-1); i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
generatePrefixed(prefix, &count, found, stop)
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
// Save the result, when one has been found.
|
||||
res := <-found
|
||||
close(stop)
|
||||
wg.Wait()
|
||||
|
||||
fmt.Println("Found", res.id)
|
||||
saveCert(res.priv, res.derBytes)
|
||||
fmt.Println("Saved to cert.pem, key.pem")
|
||||
}
|
||||
|
||||
// Try certificates until one is found that has the prefix at the start of
|
||||
// the resulting device ID. Increments count atomically, sends the result to
|
||||
// found, returns when stop is closed.
|
||||
func generatePrefixed(prefix string, count *int64, found chan<- result, stop <-chan struct{}) {
|
||||
notBefore := time.Now()
|
||||
notAfter := time.Date(2049, 12, 31, 23, 59, 59, 0, time.UTC)
|
||||
|
||||
template := x509.Certificate{
|
||||
SerialNumber: new(big.Int).SetInt64(mr.Int63()),
|
||||
Subject: pkix.Name{
|
||||
CommonName: "syncthing",
|
||||
},
|
||||
NotBefore: notBefore,
|
||||
NotAfter: notAfter,
|
||||
|
||||
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
|
||||
BasicConstraintsValid: true,
|
||||
}
|
||||
|
||||
priv, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-stop:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
id := protocol.NewDeviceID(derBytes)
|
||||
atomic.AddInt64(count, 1)
|
||||
|
||||
if strings.HasPrefix(id.String(), prefix) {
|
||||
select {
|
||||
case found <- result{id, priv, derBytes}:
|
||||
case <-stop:
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func printProgress(prefix string, count *int64) {
|
||||
started := time.Now()
|
||||
wantBits := 5 * len(prefix)
|
||||
if wantBits > 63 {
|
||||
fmt.Printf("Want %d bits for prefix %q, refusing to boil the ocean.\n", wantBits, prefix)
|
||||
os.Exit(1)
|
||||
}
|
||||
expectedIterations := float64(int(1) << uint(wantBits))
|
||||
fmt.Printf("Want %d bits for prefix %q, about %.2g certs to test (statistically speaking)\n", wantBits, prefix, expectedIterations)
|
||||
|
||||
for range time.NewTicker(15 * time.Second).C {
|
||||
tried := atomic.LoadInt64(count)
|
||||
elapsed := time.Since(started)
|
||||
rate := float64(tried) / elapsed.Seconds()
|
||||
expected := timeStr(expectedIterations / rate)
|
||||
fmt.Printf("Trying %.0f certs/s, tested %d so far in %v, expect ~%s total time to complete\n", rate, tried, elapsed/time.Second*time.Second, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func saveCert(priv interface{}, derBytes []byte) {
|
||||
certOut, err := os.Create("cert.pem")
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
err = pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
err = certOut.Close()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
keyOut, err := os.OpenFile("key.pem", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
block, err := pemBlockForKey(priv)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
err = pem.Encode(keyOut, block)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
err = keyOut.Close()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func pemBlockForKey(priv interface{}) (*pem.Block, error) {
|
||||
switch k := priv.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)}, nil
|
||||
case *ecdsa.PrivateKey:
|
||||
b, err := x509.MarshalECPrivateKey(k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b}, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown key type")
|
||||
}
|
||||
}
|
||||
|
||||
func timeStr(seconds float64) string {
|
||||
if seconds < 60 {
|
||||
return fmt.Sprintf("%.0fs", seconds)
|
||||
}
|
||||
if seconds < 3600 {
|
||||
return fmt.Sprintf("%.0fm", seconds/60)
|
||||
}
|
||||
if seconds < 86400 {
|
||||
return fmt.Sprintf("%.0fh", seconds/3600)
|
||||
}
|
||||
if seconds < 86400*365 {
|
||||
return fmt.Sprintf("%.0f days", seconds/3600)
|
||||
}
|
||||
return fmt.Sprintf("%.0f years", seconds/86400/365)
|
||||
}
|
||||
@@ -1,127 +0,0 @@
|
||||
// Copyright (C) 2015 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
)
|
||||
|
||||
type addressLister struct {
|
||||
upnpService *upnpService
|
||||
cfg *config.Wrapper
|
||||
}
|
||||
|
||||
func newAddressLister(upnpService *upnpService, cfg *config.Wrapper) *addressLister {
|
||||
return &addressLister{
|
||||
upnpService: upnpService,
|
||||
cfg: cfg,
|
||||
}
|
||||
}
|
||||
|
||||
// ExternalAddresses returns a list of addresses that are our best guess for
|
||||
// where we are reachable from the outside. As a special case, we may return
|
||||
// one or more addresses with an empty IP address (0.0.0.0 or ::) and just
|
||||
// port number - this means that the outside address of a NAT gateway should
|
||||
// be substituted.
|
||||
func (e *addressLister) ExternalAddresses() []string {
|
||||
return e.addresses(false)
|
||||
}
|
||||
|
||||
// AllAddresses returns a list of addresses that are our best guess for where
|
||||
// we are reachable from the local network. Same conditions as
|
||||
// ExternalAddresses, but private IPv4 addresses are included.
|
||||
func (e *addressLister) AllAddresses() []string {
|
||||
return e.addresses(true)
|
||||
}
|
||||
|
||||
func (e *addressLister) addresses(includePrivateIPV4 bool) []string {
|
||||
var addrs []string
|
||||
|
||||
// Grab our listen addresses from the config. Unspecified ones are passed
|
||||
// on verbatim (to be interpreted by a global discovery server or local
|
||||
// discovery peer). Public addresses are passed on verbatim. Private
|
||||
// addresses are filtered.
|
||||
for _, addrStr := range e.cfg.Options().ListenAddress {
|
||||
addrURL, err := url.Parse(addrStr)
|
||||
if err != nil {
|
||||
l.Infoln("Listen address", addrStr, "is invalid:", err)
|
||||
continue
|
||||
}
|
||||
addr, err := net.ResolveTCPAddr("tcp", addrURL.Host)
|
||||
if err != nil {
|
||||
l.Infoln("Listen address", addrStr, "is invalid:", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if addr.IP == nil || addr.IP.IsUnspecified() {
|
||||
// Address like 0.0.0.0:22000 or [::]:22000 or :22000; include as is.
|
||||
addrs = append(addrs, tcpAddr(addr.String()))
|
||||
} else if isPublicIPv4(addr.IP) || isPublicIPv6(addr.IP) {
|
||||
// A public address; include as is.
|
||||
addrs = append(addrs, tcpAddr(addr.String()))
|
||||
} else if includePrivateIPV4 && addr.IP.To4().IsGlobalUnicast() {
|
||||
// A private IPv4 address.
|
||||
addrs = append(addrs, tcpAddr(addr.String()))
|
||||
}
|
||||
}
|
||||
|
||||
// Get an external port mapping from the upnpService, if it has one. If so,
|
||||
// add it as another unspecified address.
|
||||
if e.upnpService != nil {
|
||||
if port := e.upnpService.ExternalPort(); port != 0 {
|
||||
addrs = append(addrs, fmt.Sprintf("tcp://:%d", port))
|
||||
}
|
||||
}
|
||||
|
||||
return addrs
|
||||
}
|
||||
|
||||
func isPublicIPv4(ip net.IP) bool {
|
||||
ip = ip.To4()
|
||||
if ip == nil {
|
||||
// Not an IPv4 address (IPv6)
|
||||
return false
|
||||
}
|
||||
|
||||
// IsGlobalUnicast below only checks that it's not link local or
|
||||
// multicast, and we want to exclude private (NAT:ed) addresses as well.
|
||||
rfc1918 := []net.IPNet{
|
||||
{IP: net.IP{10, 0, 0, 0}, Mask: net.IPMask{255, 0, 0, 0}},
|
||||
{IP: net.IP{172, 16, 0, 0}, Mask: net.IPMask{255, 240, 0, 0}},
|
||||
{IP: net.IP{192, 168, 0, 0}, Mask: net.IPMask{255, 255, 0, 0}},
|
||||
}
|
||||
for _, n := range rfc1918 {
|
||||
if n.Contains(ip) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return ip.IsGlobalUnicast()
|
||||
}
|
||||
|
||||
func isPublicIPv6(ip net.IP) bool {
|
||||
if ip.To4() != nil {
|
||||
// Not an IPv6 address (IPv4)
|
||||
// (To16() returns a v6 mapped v4 address so can't be used to check
|
||||
// that it's an actual v6 address)
|
||||
return false
|
||||
}
|
||||
|
||||
return ip.IsGlobalUnicast()
|
||||
}
|
||||
|
||||
func tcpAddr(host string) string {
|
||||
u := url.URL{
|
||||
Scheme: "tcp",
|
||||
Host: host,
|
||||
}
|
||||
return u.String()
|
||||
}
|
||||
@@ -7,34 +7,32 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rcrowley/go-metrics"
|
||||
"github.com/syncthing/syncthing/lib/auto"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
"github.com/syncthing/syncthing/lib/discover"
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
"github.com/syncthing/syncthing/lib/logger"
|
||||
"github.com/syncthing/syncthing/lib/model"
|
||||
"github.com/syncthing/syncthing/lib/osutil"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/relay"
|
||||
"github.com/syncthing/syncthing/lib/rand"
|
||||
"github.com/syncthing/syncthing/lib/stats"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
@@ -44,29 +42,25 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
configInSync = true
|
||||
startTime = time.Now()
|
||||
startTime = time.Now()
|
||||
)
|
||||
|
||||
type apiService struct {
|
||||
id protocol.DeviceID
|
||||
cfg configIntf
|
||||
httpsCertFile string
|
||||
httpsKeyFile string
|
||||
assetDir string
|
||||
themes []string
|
||||
model modelIntf
|
||||
eventSub events.BufferedSubscription
|
||||
discoverer discover.CachingMux
|
||||
relayService relay.Service
|
||||
fss *folderSummaryService
|
||||
systemConfigMut sync.Mutex // serializes posts to /rest/system/config
|
||||
stop chan struct{} // signals intentional stop
|
||||
configChanged chan struct{} // signals intentional listener close due to config change
|
||||
started chan struct{} // signals startup complete, for testing only
|
||||
|
||||
listener net.Listener
|
||||
listenerMut sync.Mutex
|
||||
id protocol.DeviceID
|
||||
cfg configIntf
|
||||
httpsCertFile string
|
||||
httpsKeyFile string
|
||||
statics *staticsServer
|
||||
model modelIntf
|
||||
eventSub events.BufferedSubscription
|
||||
discoverer discover.CachingMux
|
||||
connectionsService connectionsIntf
|
||||
fss *folderSummaryService
|
||||
systemConfigMut sync.Mutex // serializes posts to /rest/system/config
|
||||
stop chan struct{} // signals intentional stop
|
||||
configChanged chan struct{} // signals intentional listener close due to config change
|
||||
started chan string // signals startup complete by sending the listener address, for testing only
|
||||
startedOnce bool // the service has started successfully at least once
|
||||
|
||||
guiErrors logger.Recorder
|
||||
systemLog logger.Recorder
|
||||
@@ -74,7 +68,7 @@ type apiService struct {
|
||||
|
||||
type modelIntf interface {
|
||||
GlobalDirectoryTree(folder, prefix string, levels int, dirsonly bool) map[string]interface{}
|
||||
Completion(device protocol.DeviceID, folder string) float64
|
||||
Completion(device protocol.DeviceID, folder string) model.FolderCompletion
|
||||
Override(folder string)
|
||||
NeedFolderFiles(folder string, page, perpage int) ([]db.FileInfoTruncated, []db.FileInfoTruncated, []db.FileInfoTruncated, int)
|
||||
NeedSize(folder string) (nfiles int, bytes int64)
|
||||
@@ -84,7 +78,7 @@ type modelIntf interface {
|
||||
CurrentFolderFile(folder string, file string) (protocol.FileInfo, bool)
|
||||
CurrentGlobalFile(folder string, file string) (protocol.FileInfo, bool)
|
||||
ResetFolder(folder string)
|
||||
Availability(folder, file string) []protocol.DeviceID
|
||||
Availability(folder, file string, version protocol.Vector, block protocol.BlockInfo) []model.Availability
|
||||
GetIgnores(folder string) ([]string, []string, error)
|
||||
SetIgnores(folder string, content []string) error
|
||||
PauseDevice(device protocol.DeviceID)
|
||||
@@ -92,13 +86,13 @@ type modelIntf interface {
|
||||
DelayScan(folder string, next time.Duration)
|
||||
ScanFolder(folder string) error
|
||||
ScanFolders() map[string]error
|
||||
ScanFolderSubs(folder string, subs []string) error
|
||||
ScanFolderSubdirs(folder string, subs []string) error
|
||||
BringToFront(folder, file string)
|
||||
ConnectedTo(deviceID protocol.DeviceID) bool
|
||||
GlobalSize(folder string) (nfiles, deleted int, bytes int64)
|
||||
LocalSize(folder string) (nfiles, deleted int, bytes int64)
|
||||
CurrentLocalVersion(folder string) (int64, bool)
|
||||
RemoteLocalVersion(folder string) (int64, bool)
|
||||
CurrentSequence(folder string) (int64, bool)
|
||||
RemoteSequence(folder string) (int64, bool)
|
||||
State(folder string) (string, time.Time, error)
|
||||
}
|
||||
|
||||
@@ -106,54 +100,38 @@ type configIntf interface {
|
||||
GUI() config.GUIConfiguration
|
||||
Raw() config.Configuration
|
||||
Options() config.OptionsConfiguration
|
||||
Replace(cfg config.Configuration) config.CommitResponse
|
||||
Replace(cfg config.Configuration) error
|
||||
Subscribe(c config.Committer)
|
||||
Folders() map[string]config.FolderConfiguration
|
||||
Devices() map[protocol.DeviceID]config.DeviceConfiguration
|
||||
Save() error
|
||||
ListenAddresses() []string
|
||||
RequiresRestart() bool
|
||||
}
|
||||
|
||||
func newAPIService(id protocol.DeviceID, cfg configIntf, httpsCertFile, httpsKeyFile, assetDir string, m modelIntf, eventSub events.BufferedSubscription, discoverer discover.CachingMux, relayService relay.Service, errors, systemLog logger.Recorder) (*apiService, error) {
|
||||
type connectionsIntf interface {
|
||||
Status() map[string]interface{}
|
||||
}
|
||||
|
||||
func newAPIService(id protocol.DeviceID, cfg configIntf, httpsCertFile, httpsKeyFile, assetDir string, m modelIntf, eventSub events.BufferedSubscription, discoverer discover.CachingMux, connectionsService connectionsIntf, errors, systemLog logger.Recorder) *apiService {
|
||||
service := &apiService{
|
||||
id: id,
|
||||
cfg: cfg,
|
||||
httpsCertFile: httpsCertFile,
|
||||
httpsKeyFile: httpsKeyFile,
|
||||
assetDir: assetDir,
|
||||
model: m,
|
||||
eventSub: eventSub,
|
||||
discoverer: discoverer,
|
||||
relayService: relayService,
|
||||
systemConfigMut: sync.NewMutex(),
|
||||
stop: make(chan struct{}),
|
||||
configChanged: make(chan struct{}),
|
||||
listenerMut: sync.NewMutex(),
|
||||
guiErrors: errors,
|
||||
systemLog: systemLog,
|
||||
id: id,
|
||||
cfg: cfg,
|
||||
httpsCertFile: httpsCertFile,
|
||||
httpsKeyFile: httpsKeyFile,
|
||||
statics: newStaticsServer(cfg.GUI().Theme, assetDir),
|
||||
model: m,
|
||||
eventSub: eventSub,
|
||||
discoverer: discoverer,
|
||||
connectionsService: connectionsService,
|
||||
systemConfigMut: sync.NewMutex(),
|
||||
stop: make(chan struct{}),
|
||||
configChanged: make(chan struct{}),
|
||||
guiErrors: errors,
|
||||
systemLog: systemLog,
|
||||
}
|
||||
|
||||
seen := make(map[string]struct{})
|
||||
// Load themes from compiled in assets.
|
||||
for file := range auto.Assets() {
|
||||
theme := strings.Split(file, "/")[0]
|
||||
if _, ok := seen[theme]; !ok {
|
||||
seen[theme] = struct{}{}
|
||||
service.themes = append(service.themes, theme)
|
||||
}
|
||||
}
|
||||
if assetDir != "" {
|
||||
// Load any extra themes from the asset override dir.
|
||||
for _, dir := range dirNames(assetDir) {
|
||||
if _, ok := seen[dir]; !ok {
|
||||
seen[dir] = struct{}{}
|
||||
service.themes = append(service.themes, dir)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var err error
|
||||
service.listener, err = service.getListener(cfg.GUI())
|
||||
return service, err
|
||||
return service
|
||||
}
|
||||
|
||||
func (s *apiService) getListener(guiCfg config.GUIConfiguration) (net.Listener, error) {
|
||||
@@ -198,7 +176,10 @@ func (s *apiService) getListener(guiCfg config.GUIConfiguration) (net.Listener,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
listener := &tlsutil.DowngradingListener{rawListener, tlsCfg}
|
||||
listener := &tlsutil.DowngradingListener{
|
||||
Listener: rawListener,
|
||||
TLSConfig: tlsCfg,
|
||||
}
|
||||
return listener, nil
|
||||
}
|
||||
|
||||
@@ -217,9 +198,22 @@ func sendJSON(w http.ResponseWriter, jsonObject interface{}) {
|
||||
}
|
||||
|
||||
func (s *apiService) Serve() {
|
||||
s.listenerMut.Lock()
|
||||
listener := s.listener
|
||||
s.listenerMut.Unlock()
|
||||
listener, err := s.getListener(s.cfg.GUI())
|
||||
if err != nil {
|
||||
if !s.startedOnce {
|
||||
// This is during initialization. A failure here should be fatal
|
||||
// as there will be no way for the user to communicate with us
|
||||
// otherwise anyway.
|
||||
l.Fatalln("Starting API/GUI:", err)
|
||||
}
|
||||
|
||||
// We let this be a loud user-visible warning as it may be the only
|
||||
// indication they get that the GUI won't be available on startup.
|
||||
l.Warnln("Starting API/GUI:", err)
|
||||
return
|
||||
}
|
||||
s.startedOnce = true
|
||||
defer listener.Close()
|
||||
|
||||
if listener == nil {
|
||||
// Not much we can do here other than exit quickly. The supervisor
|
||||
@@ -241,6 +235,7 @@ func (s *apiService) Serve() {
|
||||
getRestMux.HandleFunc("/rest/svc/deviceid", s.getDeviceID) // id
|
||||
getRestMux.HandleFunc("/rest/svc/lang", s.getLang) // -
|
||||
getRestMux.HandleFunc("/rest/svc/report", s.getReport) // -
|
||||
getRestMux.HandleFunc("/rest/svc/random/string", s.getRandomString) // [length]
|
||||
getRestMux.HandleFunc("/rest/system/browse", s.getSystemBrowse) // current
|
||||
getRestMux.HandleFunc("/rest/system/config", s.getSystemConfig) // -
|
||||
getRestMux.HandleFunc("/rest/system/config/insync", s.getSystemConfigInsync) // -
|
||||
@@ -274,8 +269,12 @@ func (s *apiService) Serve() {
|
||||
postRestMux.HandleFunc("/rest/system/debug", s.postSystemDebug) // [enable] [disable]
|
||||
|
||||
// Debug endpoints, not for general use
|
||||
getRestMux.HandleFunc("/rest/debug/peerCompletion", s.getPeerCompletion)
|
||||
getRestMux.HandleFunc("/rest/debug/httpmetrics", s.getSystemHTTPMetrics)
|
||||
debugMux := http.NewServeMux()
|
||||
debugMux.HandleFunc("/rest/debug/peerCompletion", s.getPeerCompletion)
|
||||
debugMux.HandleFunc("/rest/debug/httpmetrics", s.getSystemHTTPMetrics)
|
||||
debugMux.HandleFunc("/rest/debug/cpuprof", s.getCPUProf) // duration
|
||||
debugMux.HandleFunc("/rest/debug/heapprof", s.getHeapProf)
|
||||
getRestMux.Handle("/rest/debug/", s.whenDebugging(debugMux))
|
||||
|
||||
// A handler that splits requests between the two above and disables
|
||||
// caching
|
||||
@@ -287,25 +286,16 @@ func (s *apiService) Serve() {
|
||||
mux.HandleFunc("/qr/", s.getQR)
|
||||
|
||||
// Serve compiled in assets unless an asset directory was set (for development)
|
||||
assets := &embeddedStatic{
|
||||
theme: s.cfg.GUI().Theme,
|
||||
lastModified: time.Now(),
|
||||
mut: sync.NewRWMutex(),
|
||||
assetDir: s.assetDir,
|
||||
assets: auto.Assets(),
|
||||
}
|
||||
mux.Handle("/", assets)
|
||||
mux.Handle("/", s.statics)
|
||||
|
||||
s.cfg.Subscribe(assets)
|
||||
// Handle the special meta.js path
|
||||
mux.HandleFunc("/meta.js", s.getJSMetadata)
|
||||
|
||||
guiCfg := s.cfg.GUI()
|
||||
|
||||
// Add the CORS handling
|
||||
handler := corsMiddleware(mux)
|
||||
|
||||
// Wrap everything in CSRF protection. The /rest prefix should be
|
||||
// protected, other requests will grant cookies.
|
||||
handler = csrfMiddleware(s.id.String()[:5], "/rest", guiCfg, handler)
|
||||
handler := csrfMiddleware(s.id.String()[:5], "/rest", guiCfg, mux)
|
||||
|
||||
// Add our version and ID as a header to responses
|
||||
handler = withDetailsMiddleware(s.id, handler)
|
||||
@@ -320,6 +310,9 @@ func (s *apiService) Serve() {
|
||||
handler = redirectToHTTPSMiddleware(handler)
|
||||
}
|
||||
|
||||
// Add the CORS handling
|
||||
handler = corsMiddleware(handler)
|
||||
|
||||
handler = debugMiddleware(handler)
|
||||
|
||||
srv := http.Server{
|
||||
@@ -335,33 +328,33 @@ func (s *apiService) Serve() {
|
||||
l.Infoln("Access the GUI via the following URL:", guiCfg.URL())
|
||||
if s.started != nil {
|
||||
// only set when run by the tests
|
||||
close(s.started)
|
||||
s.started <- listener.Addr().String()
|
||||
}
|
||||
err := srv.Serve(listener)
|
||||
|
||||
// The return could be due to an intentional close. Wait for the stop
|
||||
// signal before returning. IF there is no stop signal within a second, we
|
||||
// assume it was unintentional and log the error before retrying.
|
||||
// Serve in the background
|
||||
|
||||
serveError := make(chan error, 1)
|
||||
go func() {
|
||||
serveError <- srv.Serve(listener)
|
||||
}()
|
||||
|
||||
// Wait for stop, restart or error signals
|
||||
|
||||
select {
|
||||
case <-s.stop:
|
||||
// Shutting down permanently
|
||||
l.Debugln("shutting down (stop)")
|
||||
case <-s.configChanged:
|
||||
case <-time.After(time.Second):
|
||||
l.Warnln("API:", err)
|
||||
// Soft restart due to configuration change
|
||||
l.Debugln("restarting (config changed)")
|
||||
case <-serveError:
|
||||
// Restart due to listen/serve failure
|
||||
l.Warnln("GUI/API:", err, "(restarting)")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *apiService) Stop() {
|
||||
s.listenerMut.Lock()
|
||||
listener := s.listener
|
||||
s.listenerMut.Unlock()
|
||||
|
||||
close(s.stop)
|
||||
|
||||
// listener may be nil here if we've had a config change to a broken
|
||||
// configuration, in which case we shouldn't try to close it.
|
||||
if listener != nil {
|
||||
listener.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *apiService) String() string {
|
||||
@@ -369,35 +362,25 @@ func (s *apiService) String() string {
|
||||
}
|
||||
|
||||
func (s *apiService) VerifyConfiguration(from, to config.Configuration) error {
|
||||
if _, err := net.ResolveTCPAddr("tcp", to.GUI.Address()); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *apiService) CommitConfiguration(from, to config.Configuration) bool {
|
||||
// No action required when this changes, so mask the fact that it changed at all.
|
||||
from.GUI.Debugging = to.GUI.Debugging
|
||||
|
||||
if to.GUI == from.GUI {
|
||||
return true
|
||||
}
|
||||
|
||||
// Order here is important. We must close the listener to stop Serve(). We
|
||||
// must create a new listener before Serve() starts again. We can't create
|
||||
// a new listener on the same port before the previous listener is closed.
|
||||
// To assist in this little dance the Serve() method will wait for a
|
||||
// signal on the configChanged channel after the listener has closed.
|
||||
|
||||
s.listenerMut.Lock()
|
||||
defer s.listenerMut.Unlock()
|
||||
|
||||
s.listener.Close()
|
||||
|
||||
var err error
|
||||
s.listener, err = s.getListener(to.GUI)
|
||||
if err != nil {
|
||||
// Ideally this should be a verification error, but we check it by
|
||||
// creating a new listener which requires shutting down the previous
|
||||
// one first, which is too destructive for the VerifyConfiguration
|
||||
// method.
|
||||
return false
|
||||
if to.GUI.Theme != from.GUI.Theme {
|
||||
s.statics.setTheme(to.GUI.Theme)
|
||||
}
|
||||
|
||||
// Tell the serve loop to restart
|
||||
s.configChanged <- struct{}{}
|
||||
|
||||
return true
|
||||
@@ -448,14 +431,10 @@ func corsMiddleware(next http.Handler) http.Handler {
|
||||
// when the browser initiate a POST request.
|
||||
//
|
||||
// As the OPTIONS request is unauthorized, this handler must be the first
|
||||
// of the chain.
|
||||
// of the chain (hence added at the end).
|
||||
//
|
||||
// See https://www.w3.org/TR/cors/ for details.
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Add a generous access-control-allow-origin header since we may be
|
||||
// redirecting REST requests over protocols
|
||||
w.Header().Add("Access-Control-Allow-Origin", "*")
|
||||
|
||||
// Process OPTIONS requests
|
||||
if r.Method == "OPTIONS" {
|
||||
// Only GET/POST Methods are supported
|
||||
@@ -516,10 +495,30 @@ func withDetailsMiddleware(id protocol.DeviceID, h http.Handler) http.Handler {
|
||||
})
|
||||
}
|
||||
|
||||
func (s *apiService) whenDebugging(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if s.cfg.GUI().Debugging {
|
||||
h.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
http.Error(w, "Debugging disabled", http.StatusBadRequest)
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
func (s *apiService) restPing(w http.ResponseWriter, r *http.Request) {
|
||||
sendJSON(w, map[string]string{"ping": "pong"})
|
||||
}
|
||||
|
||||
func (s *apiService) getJSMetadata(w http.ResponseWriter, r *http.Request) {
|
||||
meta, _ := json.Marshal(map[string]string{
|
||||
"deviceID": s.id.String(),
|
||||
})
|
||||
w.Header().Set("Content-Type", "application/javascript")
|
||||
fmt.Fprintf(w, "var metadata = %s;\n", meta)
|
||||
}
|
||||
|
||||
func (s *apiService) getSystemVersion(w http.ResponseWriter, r *http.Request) {
|
||||
sendJSON(w, map[string]string{
|
||||
"version": Version,
|
||||
@@ -584,8 +583,11 @@ func (s *apiService) getDBCompletion(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
sendJSON(w, map[string]float64{
|
||||
"completion": s.model.Completion(device, folder),
|
||||
comp := s.model.Completion(device, folder)
|
||||
sendJSON(w, map[string]interface{}{
|
||||
"completion": comp.CompletionPct,
|
||||
"needBytes": comp.NeedBytes,
|
||||
"globalBytes": comp.GlobalBytes,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -598,7 +600,7 @@ func (s *apiService) getDBStatus(w http.ResponseWriter, r *http.Request) {
|
||||
func folderSummary(cfg configIntf, m modelIntf, folder string) map[string]interface{} {
|
||||
var res = make(map[string]interface{})
|
||||
|
||||
res["invalid"] = cfg.Folders()[folder].Invalid
|
||||
res["invalid"] = "" // Deprecated, retains external API for now
|
||||
|
||||
globalFiles, globalDeleted, globalBytes := m.GlobalSize(folder)
|
||||
res["globalFiles"], res["globalDeleted"], res["globalBytes"] = globalFiles, globalDeleted, globalBytes
|
||||
@@ -617,10 +619,11 @@ func folderSummary(cfg configIntf, m modelIntf, folder string) map[string]interf
|
||||
res["error"] = err.Error()
|
||||
}
|
||||
|
||||
lv, _ := m.CurrentLocalVersion(folder)
|
||||
rv, _ := m.RemoteLocalVersion(folder)
|
||||
ourSeq, _ := m.CurrentSequence(folder)
|
||||
remoteSeq, _ := m.RemoteSequence(folder)
|
||||
|
||||
res["version"] = lv + rv
|
||||
res["version"] = ourSeq + remoteSeq // legacy
|
||||
res["sequence"] = ourSeq + remoteSeq // new name
|
||||
|
||||
ignorePatterns, _, _ := m.GetIgnores(folder)
|
||||
res["ignorePatterns"] = false
|
||||
@@ -692,7 +695,7 @@ func (s *apiService) getDBFile(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
av := s.model.Availability(folder, file)
|
||||
av := s.model.Availability(folder, file, protocol.Vector{}, protocol.BlockInfo{})
|
||||
sendJSON(w, map[string]interface{}{
|
||||
"global": jsonFileInfo(gf),
|
||||
"local": jsonFileInfo(lf),
|
||||
@@ -709,8 +712,9 @@ func (s *apiService) postSystemConfig(w http.ResponseWriter, r *http.Request) {
|
||||
defer s.systemConfigMut.Unlock()
|
||||
|
||||
to, err := config.ReadJSON(r.Body, myID)
|
||||
r.Body.Close()
|
||||
if err != nil {
|
||||
l.Warnln("decoding posted config:", err)
|
||||
l.Warnln("Decoding posted config:", err)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
@@ -733,7 +737,7 @@ func (s *apiService) postSystemConfig(w http.ResponseWriter, r *http.Request) {
|
||||
if curAcc := s.cfg.Options().URAccepted; to.Options.URAccepted > curAcc {
|
||||
// UR was enabled
|
||||
to.Options.URAccepted = usageReportVersion
|
||||
to.Options.URUniqueID = randomString(8)
|
||||
to.Options.URUniqueID = rand.String(8)
|
||||
} else if to.Options.URAccepted < curAcc {
|
||||
// UR was disabled
|
||||
to.Options.URAccepted = -1
|
||||
@@ -742,13 +746,19 @@ func (s *apiService) postSystemConfig(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// Activate and save
|
||||
|
||||
resp := s.cfg.Replace(to)
|
||||
configInSync = !resp.RequiresRestart
|
||||
s.cfg.Save()
|
||||
if err := s.cfg.Replace(to); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.cfg.Save(); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (s *apiService) getSystemConfigInsync(w http.ResponseWriter, r *http.Request) {
|
||||
sendJSON(w, map[string]bool{"configInSync": configInSync})
|
||||
sendJSON(w, map[string]bool{"configInSync": !s.cfg.RequiresRestart()})
|
||||
}
|
||||
|
||||
func (s *apiService) postSystemRestart(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -820,18 +830,9 @@ func (s *apiService) getSystemStatus(w http.ResponseWriter, r *http.Request) {
|
||||
res["discoveryMethods"] = discoMethods
|
||||
res["discoveryErrors"] = discoErrors
|
||||
}
|
||||
if s.relayService != nil {
|
||||
res["relaysEnabled"] = true
|
||||
relayClientStatus := make(map[string]bool)
|
||||
relayClientLatency := make(map[string]int)
|
||||
for _, relay := range s.relayService.Relays() {
|
||||
latency, ok := s.relayService.RelayStatus(relay)
|
||||
relayClientStatus[relay] = ok
|
||||
relayClientLatency[relay] = int(latency / time.Millisecond)
|
||||
}
|
||||
res["relayClientStatus"] = relayClientStatus
|
||||
res["relayClientLatency"] = relayClientLatency
|
||||
}
|
||||
|
||||
res["connectionServiceStatus"] = s.connectionsService.Status()
|
||||
|
||||
cpuUsageLock.RLock()
|
||||
var cpusum float64
|
||||
for _, p := range cpuUsagePercent {
|
||||
@@ -842,7 +843,6 @@ func (s *apiService) getSystemStatus(w http.ResponseWriter, r *http.Request) {
|
||||
res["pathSeparator"] = string(filepath.Separator)
|
||||
res["uptime"] = int(time.Since(startTime).Seconds())
|
||||
res["startTime"] = startTime
|
||||
res["themes"] = s.themes
|
||||
|
||||
sendJSON(w, res)
|
||||
}
|
||||
@@ -922,6 +922,16 @@ func (s *apiService) getReport(w http.ResponseWriter, r *http.Request) {
|
||||
sendJSON(w, reportData(s.cfg, s.model))
|
||||
}
|
||||
|
||||
func (s *apiService) getRandomString(w http.ResponseWriter, r *http.Request) {
|
||||
length := 32
|
||||
if val, _ := strconv.Atoi(r.URL.Query().Get("length")); val > 0 {
|
||||
length = val
|
||||
}
|
||||
str := rand.String(length)
|
||||
|
||||
sendJSON(w, map[string]string{"random": str})
|
||||
}
|
||||
|
||||
func (s *apiService) getDBIgnores(w http.ResponseWriter, r *http.Request) {
|
||||
qs := r.URL.Query()
|
||||
|
||||
@@ -933,17 +943,22 @@ func (s *apiService) getDBIgnores(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
sendJSON(w, map[string][]string{
|
||||
"ignore": ignores,
|
||||
"patterns": patterns,
|
||||
"expanded": patterns,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *apiService) postDBIgnores(w http.ResponseWriter, r *http.Request) {
|
||||
qs := r.URL.Query()
|
||||
|
||||
var data map[string][]string
|
||||
err := json.NewDecoder(r.Body).Decode(&data)
|
||||
bs, err := ioutil.ReadAll(r.Body)
|
||||
r.Body.Close()
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
}
|
||||
|
||||
var data map[string][]string
|
||||
err = json.Unmarshal(bs, &data)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
@@ -1086,7 +1101,7 @@ func (s *apiService) postDBScan(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
subs := qs["sub"]
|
||||
err = s.model.ScanFolderSubs(folder, subs)
|
||||
err = s.model.ScanFolderSubdirs(folder, subs)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
@@ -1130,7 +1145,7 @@ func (s *apiService) getPeerCompletion(w http.ResponseWriter, r *http.Request) {
|
||||
for _, device := range folder.DeviceIDs() {
|
||||
deviceStr := device.String()
|
||||
if s.model.ConnectedTo(device) {
|
||||
tot[deviceStr] += s.model.Completion(device, folder.ID)
|
||||
tot[deviceStr] += s.model.Completion(device, folder.ID).CompletionPct
|
||||
} else {
|
||||
tot[deviceStr] = 0
|
||||
}
|
||||
@@ -1149,150 +1164,55 @@ func (s *apiService) getPeerCompletion(w http.ResponseWriter, r *http.Request) {
|
||||
func (s *apiService) getSystemBrowse(w http.ResponseWriter, r *http.Request) {
|
||||
qs := r.URL.Query()
|
||||
current := qs.Get("current")
|
||||
if current == "" {
|
||||
if roots, err := osutil.GetFilesystemRoots(); err == nil {
|
||||
sendJSON(w, roots)
|
||||
} else {
|
||||
http.Error(w, err.Error(), 500)
|
||||
}
|
||||
return
|
||||
}
|
||||
search, _ := osutil.ExpandTilde(current)
|
||||
pathSeparator := string(os.PathSeparator)
|
||||
if strings.HasSuffix(current, pathSeparator) && !strings.HasSuffix(search, pathSeparator) {
|
||||
search = search + pathSeparator
|
||||
}
|
||||
subdirectories, _ := osutil.Glob(search + "*")
|
||||
ret := make([]string, 0, 10)
|
||||
ret := make([]string, 0, len(subdirectories))
|
||||
for _, subdirectory := range subdirectories {
|
||||
info, err := os.Stat(subdirectory)
|
||||
if err == nil && info.IsDir() {
|
||||
ret = append(ret, subdirectory+pathSeparator)
|
||||
if len(ret) > 9 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sendJSON(w, ret)
|
||||
}
|
||||
|
||||
type embeddedStatic struct {
|
||||
theme string
|
||||
lastModified time.Time
|
||||
mut sync.RWMutex
|
||||
assetDir string
|
||||
assets map[string][]byte
|
||||
func (s *apiService) getCPUProf(w http.ResponseWriter, r *http.Request) {
|
||||
duration, err := time.ParseDuration(r.FormValue("duration"))
|
||||
if err != nil {
|
||||
duration = 30 * time.Second
|
||||
}
|
||||
|
||||
filename := fmt.Sprintf("syncthing-cpu-%s-%s-%s-%s.pprof", runtime.GOOS, runtime.GOARCH, Version, time.Now().Format("150405")) // hhmmss
|
||||
|
||||
w.Header().Set("Content-Type", "application/octet-stream")
|
||||
w.Header().Set("Content-Disposition", "attachment; filename="+filename)
|
||||
|
||||
pprof.StartCPUProfile(w)
|
||||
time.Sleep(duration)
|
||||
pprof.StopCPUProfile()
|
||||
}
|
||||
|
||||
func (s embeddedStatic) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
file := r.URL.Path
|
||||
func (s *apiService) getHeapProf(w http.ResponseWriter, r *http.Request) {
|
||||
filename := fmt.Sprintf("syncthing-heap-%s-%s-%s-%s.pprof", runtime.GOOS, runtime.GOARCH, Version, time.Now().Format("150405")) // hhmmss
|
||||
|
||||
if file[0] == '/' {
|
||||
file = file[1:]
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/octet-stream")
|
||||
w.Header().Set("Content-Disposition", "attachment; filename="+filename)
|
||||
|
||||
if len(file) == 0 {
|
||||
file = "index.html"
|
||||
}
|
||||
|
||||
s.mut.RLock()
|
||||
theme := s.theme
|
||||
modified := s.lastModified
|
||||
s.mut.RUnlock()
|
||||
|
||||
// Check for an override for the current theme.
|
||||
if s.assetDir != "" {
|
||||
p := filepath.Join(s.assetDir, s.theme, filepath.FromSlash(file))
|
||||
if _, err := os.Stat(p); err == nil {
|
||||
http.ServeFile(w, r, p)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Check for a compiled in asset for the current theme.
|
||||
bs, ok := s.assets[theme+"/"+file]
|
||||
if !ok {
|
||||
// Check for an overriden default asset.
|
||||
if s.assetDir != "" {
|
||||
p := filepath.Join(s.assetDir, config.DefaultTheme, filepath.FromSlash(file))
|
||||
if _, err := os.Stat(p); err == nil {
|
||||
http.ServeFile(w, r, p)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Check for a compiled in default asset.
|
||||
bs, ok = s.assets[config.DefaultTheme+"/"+file]
|
||||
if !ok {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if modifiedSince, err := time.Parse(r.Header.Get("If-Modified-Since"), http.TimeFormat); err == nil && modified.Before(modifiedSince) {
|
||||
w.WriteHeader(http.StatusNotModified)
|
||||
return
|
||||
}
|
||||
|
||||
mtype := s.mimeTypeForFile(file)
|
||||
if len(mtype) != 0 {
|
||||
w.Header().Set("Content-Type", mtype)
|
||||
}
|
||||
if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
|
||||
w.Header().Set("Content-Encoding", "gzip")
|
||||
} else {
|
||||
// ungzip if browser not send gzip accepted header
|
||||
var gr *gzip.Reader
|
||||
gr, _ = gzip.NewReader(bytes.NewReader(bs))
|
||||
bs, _ = ioutil.ReadAll(gr)
|
||||
gr.Close()
|
||||
}
|
||||
w.Header().Set("Content-Length", fmt.Sprintf("%d", len(bs)))
|
||||
w.Header().Set("Last-Modified", modified.Format(http.TimeFormat))
|
||||
w.Header().Set("Cache-Control", "public")
|
||||
|
||||
w.Write(bs)
|
||||
}
|
||||
|
||||
func (s embeddedStatic) mimeTypeForFile(file string) string {
|
||||
// We use a built in table of the common types since the system
|
||||
// TypeByExtension might be unreliable. But if we don't know, we delegate
|
||||
// to the system.
|
||||
ext := filepath.Ext(file)
|
||||
switch ext {
|
||||
case ".htm", ".html":
|
||||
return "text/html"
|
||||
case ".css":
|
||||
return "text/css"
|
||||
case ".js":
|
||||
return "application/javascript"
|
||||
case ".json":
|
||||
return "application/json"
|
||||
case ".png":
|
||||
return "image/png"
|
||||
case ".ttf":
|
||||
return "application/x-font-ttf"
|
||||
case ".woff":
|
||||
return "application/x-font-woff"
|
||||
case ".svg":
|
||||
return "image/svg+xml"
|
||||
default:
|
||||
return mime.TypeByExtension(ext)
|
||||
}
|
||||
}
|
||||
|
||||
// VerifyConfiguration implements the config.Committer interface
|
||||
func (s *embeddedStatic) VerifyConfiguration(from, to config.Configuration) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CommitConfiguration implements the config.Committer interface
|
||||
func (s *embeddedStatic) CommitConfiguration(from, to config.Configuration) bool {
|
||||
s.mut.Lock()
|
||||
if s.theme != to.GUI.Theme {
|
||||
s.theme = to.GUI.Theme
|
||||
s.lastModified = time.Now()
|
||||
}
|
||||
s.mut.Unlock()
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *embeddedStatic) String() string {
|
||||
return fmt.Sprintf("embeddedStatic@%p", s)
|
||||
runtime.GC()
|
||||
pprof.WriteHeapProfile(w)
|
||||
}
|
||||
|
||||
func (s *apiService) toNeedSlice(fs []db.FileInfoTruncated) []jsonDBFileInfo {
|
||||
@@ -1309,13 +1229,17 @@ type jsonFileInfo protocol.FileInfo
|
||||
|
||||
func (f jsonFileInfo) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(map[string]interface{}{
|
||||
"name": f.Name,
|
||||
"size": protocol.FileInfo(f).Size(),
|
||||
"flags": fmt.Sprintf("%#o", f.Flags),
|
||||
"modified": time.Unix(f.Modified, 0),
|
||||
"localVersion": f.LocalVersion,
|
||||
"numBlocks": len(f.Blocks),
|
||||
"version": jsonVersionVector(f.Version),
|
||||
"name": f.Name,
|
||||
"type": f.Type,
|
||||
"size": f.Size,
|
||||
"permissions": fmt.Sprintf("%#o", f.Permissions),
|
||||
"deleted": f.Deleted,
|
||||
"invalid": f.Invalid,
|
||||
"noPermissions": f.NoPermissions,
|
||||
"modified": protocol.FileInfo(f).ModTime(),
|
||||
"sequence": f.Sequence,
|
||||
"numBlocks": len(f.Blocks),
|
||||
"version": jsonVersionVector(f.Version),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1323,20 +1247,23 @@ type jsonDBFileInfo db.FileInfoTruncated
|
||||
|
||||
func (f jsonDBFileInfo) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(map[string]interface{}{
|
||||
"name": f.Name,
|
||||
"size": db.FileInfoTruncated(f).Size(),
|
||||
"flags": fmt.Sprintf("%#o", f.Flags),
|
||||
"modified": time.Unix(f.Modified, 0),
|
||||
"localVersion": f.LocalVersion,
|
||||
"version": jsonVersionVector(f.Version),
|
||||
"name": f.Name,
|
||||
"type": f.Type,
|
||||
"size": f.Size,
|
||||
"permissions": fmt.Sprintf("%#o", f.Permissions),
|
||||
"deleted": f.Deleted,
|
||||
"invalid": f.Invalid,
|
||||
"noPermissions": f.NoPermissions,
|
||||
"modified": db.FileInfoTruncated(f).ModTime(),
|
||||
"sequence": f.Sequence,
|
||||
})
|
||||
}
|
||||
|
||||
type jsonVersionVector protocol.Vector
|
||||
|
||||
func (v jsonVersionVector) MarshalJSON() ([]byte, error) {
|
||||
res := make([]string, len(v))
|
||||
for i, c := range v {
|
||||
res := make([]string, len(v.Counters))
|
||||
for i, c := range v.Counters {
|
||||
res[i] = fmt.Sprintf("%v:%d", c.ID, c.Value)
|
||||
}
|
||||
return json.Marshal(res)
|
||||
|
||||
@@ -9,13 +9,13 @@ package main
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
"github.com/syncthing/syncthing/lib/rand"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
)
|
||||
@@ -77,20 +77,43 @@ func basicAuthAndSessionMiddleware(cookieName string, cfg config.GUIConfiguratio
|
||||
return
|
||||
}
|
||||
|
||||
// Check if the username is correct, assuming it was sent as UTF-8
|
||||
username := string(fields[0])
|
||||
if username != cfg.User {
|
||||
emitLoginAttempt(false, username)
|
||||
error()
|
||||
return
|
||||
if username == cfg.User {
|
||||
goto usernameOK
|
||||
}
|
||||
|
||||
if err := bcrypt.CompareHashAndPassword([]byte(cfg.Password), fields[1]); err != nil {
|
||||
emitLoginAttempt(false, username)
|
||||
error()
|
||||
return
|
||||
// ... check it again, converting it from assumed ISO-8859-1 to UTF-8
|
||||
username = string(iso88591ToUTF8(fields[0]))
|
||||
if username == cfg.User {
|
||||
goto usernameOK
|
||||
}
|
||||
|
||||
sessionid := randomString(32)
|
||||
// Neither of the possible interpretations match the configured username
|
||||
emitLoginAttempt(false, username)
|
||||
error()
|
||||
return
|
||||
|
||||
usernameOK:
|
||||
// Check password as given (assumes UTF-8 encoding)
|
||||
password := fields[1]
|
||||
if err := bcrypt.CompareHashAndPassword([]byte(cfg.Password), password); err == nil {
|
||||
goto passwordOK
|
||||
}
|
||||
|
||||
// ... check it again, converting it from assumed ISO-8859-1 to UTF-8
|
||||
password = iso88591ToUTF8(password)
|
||||
if err := bcrypt.CompareHashAndPassword([]byte(cfg.Password), password); err == nil {
|
||||
goto passwordOK
|
||||
}
|
||||
|
||||
// Neither of the attempts to verify the password checked out
|
||||
emitLoginAttempt(false, username)
|
||||
error()
|
||||
return
|
||||
|
||||
passwordOK:
|
||||
sessionid := rand.String(32)
|
||||
sessionsMut.Lock()
|
||||
sessions[sessionid] = true
|
||||
sessionsMut.Unlock()
|
||||
@@ -104,3 +127,15 @@ func basicAuthAndSessionMiddleware(cookieName string, cfg config.GUIConfiguratio
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// Convert an ISO-8859-1 encoded byte string to UTF-8. Works by the
|
||||
// principle that ISO-8859-1 bytes are equivalent to unicode code points,
|
||||
// that a rune slice is a list of code points, and that stringifying a slice
|
||||
// of runes generates UTF-8 in Go.
|
||||
func iso88591ToUTF8(s []byte) []byte {
|
||||
runes := make([]rune, len(s))
|
||||
for i := range s {
|
||||
runes[i] = rune(s[i])
|
||||
}
|
||||
return []byte(string(runes))
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/osutil"
|
||||
"github.com/syncthing/syncthing/lib/rand"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
)
|
||||
|
||||
@@ -40,7 +41,15 @@ func csrfMiddleware(unique string, prefix string, cfg config.GUIConfiguration, n
|
||||
return
|
||||
}
|
||||
|
||||
// Allow requests for the front page, and set a CSRF cookie if there isn't already a valid one.
|
||||
if strings.HasPrefix(r.URL.Path, "/rest/debug") {
|
||||
// Debugging functions are only available when explicitly
|
||||
// enabled, and can be accessed without a CSRF token
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// Allow requests for anything not under the protected path prefix,
|
||||
// and set a CSRF cookie if there isn't already a valid one.
|
||||
if !strings.HasPrefix(r.URL.Path, prefix) {
|
||||
cookie, err := r.Cookie("CSRF-Token-" + unique)
|
||||
if err != nil || !validCsrfToken(cookie.Value) {
|
||||
@@ -55,18 +64,6 @@ func csrfMiddleware(unique string, prefix string, cfg config.GUIConfiguration, n
|
||||
return
|
||||
}
|
||||
|
||||
if r.Method == "GET" {
|
||||
// Allow GET requests unconditionally, but if we got the CSRF
|
||||
// token cookie do the verification anyway so we keep the
|
||||
// csrfTokens list sorted by recent usage. We don't care about the
|
||||
// outcome of the validity check.
|
||||
if cookie, err := r.Cookie("CSRF-Token-" + unique); err == nil {
|
||||
validCsrfToken(cookie.Value)
|
||||
}
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// Verify the CSRF token
|
||||
token := r.Header.Get("X-CSRF-Token-" + unique)
|
||||
if !validCsrfToken(token) {
|
||||
@@ -97,7 +94,7 @@ func validCsrfToken(token string) bool {
|
||||
}
|
||||
|
||||
func newCsrfToken() string {
|
||||
token := randomString(32)
|
||||
token := rand.String(32)
|
||||
|
||||
csrfMut.Lock()
|
||||
csrfTokens = append([]string{token}, csrfTokens...)
|
||||
|
||||
@@ -78,7 +78,7 @@ func trackCPUUsage() {
|
||||
var prevTime = time.Now().UnixNano()
|
||||
var rusage prusage_t
|
||||
var pid = os.Getpid()
|
||||
for _ = range time.NewTicker(time.Second).C {
|
||||
for range time.NewTicker(time.Second).C {
|
||||
err := solarisPrusage(pid, &rusage)
|
||||
if err != nil {
|
||||
l.Warnln("getting prusage:", err)
|
||||
|
||||
176
cmd/syncthing/gui_statics.go
Normal file
@@ -0,0 +1,176 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/auto"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
)
|
||||
|
||||
type staticsServer struct {
|
||||
assetDir string
|
||||
assets map[string][]byte
|
||||
availableThemes []string
|
||||
|
||||
mut sync.RWMutex
|
||||
theme string
|
||||
}
|
||||
|
||||
func newStaticsServer(theme, assetDir string) *staticsServer {
|
||||
s := &staticsServer{
|
||||
assetDir: assetDir,
|
||||
assets: auto.Assets(),
|
||||
mut: sync.NewRWMutex(),
|
||||
theme: theme,
|
||||
}
|
||||
|
||||
seen := make(map[string]struct{})
|
||||
// Load themes from compiled in assets.
|
||||
for file := range auto.Assets() {
|
||||
theme := strings.Split(file, "/")[0]
|
||||
if _, ok := seen[theme]; !ok {
|
||||
seen[theme] = struct{}{}
|
||||
s.availableThemes = append(s.availableThemes, theme)
|
||||
}
|
||||
}
|
||||
if assetDir != "" {
|
||||
// Load any extra themes from the asset override dir.
|
||||
for _, dir := range dirNames(assetDir) {
|
||||
if _, ok := seen[dir]; !ok {
|
||||
seen[dir] = struct{}{}
|
||||
s.availableThemes = append(s.availableThemes, dir)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *staticsServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.URL.Path {
|
||||
case "/themes.json":
|
||||
s.serveThemes(w, r)
|
||||
default:
|
||||
s.serveAsset(w, r)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *staticsServer) serveAsset(w http.ResponseWriter, r *http.Request) {
|
||||
file := r.URL.Path
|
||||
|
||||
if file[0] == '/' {
|
||||
file = file[1:]
|
||||
}
|
||||
|
||||
if len(file) == 0 {
|
||||
file = "index.html"
|
||||
}
|
||||
|
||||
s.mut.RLock()
|
||||
theme := s.theme
|
||||
s.mut.RUnlock()
|
||||
|
||||
// Check for an override for the current theme.
|
||||
if s.assetDir != "" {
|
||||
p := filepath.Join(s.assetDir, theme, filepath.FromSlash(file))
|
||||
if _, err := os.Stat(p); err == nil {
|
||||
http.ServeFile(w, r, p)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Check for a compiled in asset for the current theme.
|
||||
bs, ok := s.assets[theme+"/"+file]
|
||||
if !ok {
|
||||
// Check for an overridden default asset.
|
||||
if s.assetDir != "" {
|
||||
p := filepath.Join(s.assetDir, config.DefaultTheme, filepath.FromSlash(file))
|
||||
if _, err := os.Stat(p); err == nil {
|
||||
http.ServeFile(w, r, p)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Check for a compiled in default asset.
|
||||
bs, ok = s.assets[config.DefaultTheme+"/"+file]
|
||||
if !ok {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
mtype := s.mimeTypeForFile(file)
|
||||
if len(mtype) != 0 {
|
||||
w.Header().Set("Content-Type", mtype)
|
||||
}
|
||||
if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
|
||||
w.Header().Set("Content-Encoding", "gzip")
|
||||
} else {
|
||||
// ungzip if browser not send gzip accepted header
|
||||
var gr *gzip.Reader
|
||||
gr, _ = gzip.NewReader(bytes.NewReader(bs))
|
||||
bs, _ = ioutil.ReadAll(gr)
|
||||
gr.Close()
|
||||
}
|
||||
w.Header().Set("Content-Length", fmt.Sprintf("%d", len(bs)))
|
||||
|
||||
w.Write(bs)
|
||||
}
|
||||
|
||||
func (s *staticsServer) serveThemes(w http.ResponseWriter, r *http.Request) {
|
||||
sendJSON(w, map[string][]string{
|
||||
"themes": s.availableThemes,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *staticsServer) mimeTypeForFile(file string) string {
|
||||
// We use a built in table of the common types since the system
|
||||
// TypeByExtension might be unreliable. But if we don't know, we delegate
|
||||
// to the system.
|
||||
ext := filepath.Ext(file)
|
||||
switch ext {
|
||||
case ".htm", ".html":
|
||||
return "text/html"
|
||||
case ".css":
|
||||
return "text/css"
|
||||
case ".js":
|
||||
return "application/javascript"
|
||||
case ".json":
|
||||
return "application/json"
|
||||
case ".png":
|
||||
return "image/png"
|
||||
case ".ttf":
|
||||
return "application/x-font-ttf"
|
||||
case ".woff":
|
||||
return "application/x-font-woff"
|
||||
case ".svg":
|
||||
return "image/svg+xml"
|
||||
default:
|
||||
return mime.TypeByExtension(ext)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *staticsServer) setTheme(theme string) {
|
||||
s.mut.Lock()
|
||||
s.theme = theme
|
||||
s.mut.Unlock()
|
||||
}
|
||||
|
||||
func (s *staticsServer) String() string {
|
||||
return fmt.Sprintf("staticsServer@%p", s)
|
||||
}
|
||||
@@ -9,10 +9,16 @@ package main
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/d4l3k/messagediff"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
@@ -63,11 +69,8 @@ func TestStopAfterBrokenConfig(t *testing.T) {
|
||||
}
|
||||
w := config.Wrap("/dev/null", cfg)
|
||||
|
||||
srv, err := newAPIService(protocol.LocalDeviceID, w, "../../test/h1/https-cert.pem", "../../test/h1/https-key.pem", "", nil, nil, nil, nil, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
srv.started = make(chan struct{})
|
||||
srv := newAPIService(protocol.LocalDeviceID, w, "../../test/h1/https-cert.pem", "../../test/h1/https-key.pem", "", nil, nil, nil, nil, nil, nil)
|
||||
srv.started = make(chan string)
|
||||
|
||||
sup := suture.NewSimple("test")
|
||||
sup.Add(srv)
|
||||
@@ -85,8 +88,8 @@ func TestStopAfterBrokenConfig(t *testing.T) {
|
||||
RawUseTLS: false,
|
||||
},
|
||||
}
|
||||
if srv.CommitConfiguration(cfg, newCfg) {
|
||||
t.Fatal("Config commit should have failed")
|
||||
if err := srv.VerifyConfiguration(cfg, newCfg); err == nil {
|
||||
t.Fatal("Verify config should have failed")
|
||||
}
|
||||
|
||||
// Nonetheless, it should be fine to Stop() it without panic.
|
||||
@@ -114,7 +117,7 @@ func TestAssetsDir(t *testing.T) {
|
||||
gw.Close()
|
||||
foo := buf.Bytes()
|
||||
|
||||
e := embeddedStatic{
|
||||
e := &staticsServer{
|
||||
theme: "foo",
|
||||
mut: sync.NewRWMutex(),
|
||||
assetDir: "testdata",
|
||||
@@ -133,13 +136,13 @@ func TestAssetsDir(t *testing.T) {
|
||||
// assetsdir/foo/a exists, overrides compiled in
|
||||
expectURLToContain(t, s.URL+"/a", "overridden-foo")
|
||||
|
||||
// foo/b is compiled in, default/b is overriden, return compiled in
|
||||
// foo/b is compiled in, default/b is overridden, return compiled in
|
||||
expectURLToContain(t, s.URL+"/b", "foo")
|
||||
|
||||
// only exists as compiled in default/c so use that
|
||||
expectURLToContain(t, s.URL+"/c", "default")
|
||||
|
||||
// only exists as overriden default/d so use that
|
||||
// only exists as overridden default/d so use that
|
||||
expectURLToContain(t, s.URL+"/d", "overridden-default")
|
||||
}
|
||||
|
||||
@@ -175,3 +178,491 @@ func TestDirNames(t *testing.T) {
|
||||
t.Errorf("Unexpected dirNames return: %#v\n%s", names, diff)
|
||||
}
|
||||
}
|
||||
|
||||
type httpTestCase struct {
|
||||
URL string // URL to check
|
||||
Code int // Expected result code
|
||||
Type string // Expected content type
|
||||
Prefix string // Expected result prefix
|
||||
Timeout time.Duration // Defaults to a second
|
||||
}
|
||||
|
||||
func TestAPIServiceRequests(t *testing.T) {
|
||||
const testAPIKey = "foobarbaz"
|
||||
cfg := new(mockedConfig)
|
||||
cfg.gui.APIKey = testAPIKey
|
||||
baseURL, err := startHTTP(cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cases := []httpTestCase{
|
||||
// /rest/db
|
||||
{
|
||||
URL: "/rest/db/completion?device=" + protocol.LocalDeviceID.String() + "&folder=default",
|
||||
Code: 200,
|
||||
Type: "application/json",
|
||||
Prefix: "{",
|
||||
},
|
||||
{
|
||||
URL: "/rest/db/file?folder=default&file=something",
|
||||
Code: 404,
|
||||
},
|
||||
{
|
||||
URL: "/rest/db/ignores?folder=default",
|
||||
Code: 200,
|
||||
Type: "application/json",
|
||||
Prefix: "{",
|
||||
},
|
||||
{
|
||||
URL: "/rest/db/need?folder=default",
|
||||
Code: 200,
|
||||
Type: "application/json",
|
||||
Prefix: "{",
|
||||
},
|
||||
{
|
||||
URL: "/rest/db/status?folder=default",
|
||||
Code: 200,
|
||||
Type: "application/json",
|
||||
Prefix: "{",
|
||||
},
|
||||
{
|
||||
URL: "/rest/db/browse?folder=default",
|
||||
Code: 200,
|
||||
Type: "application/json",
|
||||
Prefix: "null",
|
||||
},
|
||||
|
||||
// /rest/stats
|
||||
{
|
||||
URL: "/rest/stats/device",
|
||||
Code: 200,
|
||||
Type: "application/json",
|
||||
Prefix: "null",
|
||||
},
|
||||
{
|
||||
URL: "/rest/stats/folder",
|
||||
Code: 200,
|
||||
Type: "application/json",
|
||||
Prefix: "null",
|
||||
},
|
||||
|
||||
// /rest/svc
|
||||
{
|
||||
URL: "/rest/svc/deviceid?id=" + protocol.LocalDeviceID.String(),
|
||||
Code: 200,
|
||||
Type: "application/json",
|
||||
Prefix: "{",
|
||||
},
|
||||
{
|
||||
URL: "/rest/svc/lang",
|
||||
Code: 200,
|
||||
Type: "application/json",
|
||||
Prefix: "[",
|
||||
},
|
||||
{
|
||||
URL: "/rest/svc/report",
|
||||
Code: 200,
|
||||
Type: "application/json",
|
||||
Prefix: "{",
|
||||
Timeout: 5 * time.Second,
|
||||
},
|
||||
|
||||
// /rest/system
|
||||
{
|
||||
URL: "/rest/system/browse?current=~",
|
||||
Code: 200,
|
||||
Type: "application/json",
|
||||
Prefix: "[",
|
||||
},
|
||||
{
|
||||
URL: "/rest/system/config",
|
||||
Code: 200,
|
||||
Type: "application/json",
|
||||
Prefix: "{",
|
||||
},
|
||||
{
|
||||
URL: "/rest/system/config/insync",
|
||||
Code: 200,
|
||||
Type: "application/json",
|
||||
Prefix: "{",
|
||||
},
|
||||
{
|
||||
URL: "/rest/system/connections",
|
||||
Code: 200,
|
||||
Type: "application/json",
|
||||
Prefix: "null",
|
||||
},
|
||||
{
|
||||
URL: "/rest/system/discovery",
|
||||
Code: 200,
|
||||
Type: "application/json",
|
||||
Prefix: "{",
|
||||
},
|
||||
{
|
||||
URL: "/rest/system/error?since=0",
|
||||
Code: 200,
|
||||
Type: "application/json",
|
||||
Prefix: "{",
|
||||
},
|
||||
{
|
||||
URL: "/rest/system/ping",
|
||||
Code: 200,
|
||||
Type: "application/json",
|
||||
Prefix: "{",
|
||||
},
|
||||
{
|
||||
URL: "/rest/system/status",
|
||||
Code: 200,
|
||||
Type: "application/json",
|
||||
Prefix: "{",
|
||||
},
|
||||
{
|
||||
URL: "/rest/system/version",
|
||||
Code: 200,
|
||||
Type: "application/json",
|
||||
Prefix: "{",
|
||||
},
|
||||
{
|
||||
URL: "/rest/system/debug",
|
||||
Code: 200,
|
||||
Type: "application/json",
|
||||
Prefix: "{",
|
||||
},
|
||||
{
|
||||
URL: "/rest/system/log?since=0",
|
||||
Code: 200,
|
||||
Type: "application/json",
|
||||
Prefix: "{",
|
||||
},
|
||||
{
|
||||
URL: "/rest/system/log.txt?since=0",
|
||||
Code: 200,
|
||||
Type: "text/plain",
|
||||
Prefix: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Log("Testing", tc.URL, "...")
|
||||
testHTTPRequest(t, baseURL, tc, testAPIKey)
|
||||
}
|
||||
}
|
||||
|
||||
// testHTTPRequest tries the given test case, comparing the result code,
|
||||
// content type, and result prefix.
|
||||
func testHTTPRequest(t *testing.T, baseURL string, tc httpTestCase, apikey string) {
|
||||
timeout := time.Second
|
||||
if tc.Timeout > 0 {
|
||||
timeout = tc.Timeout
|
||||
}
|
||||
cli := &http.Client{
|
||||
Timeout: timeout,
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", baseURL+tc.URL, nil)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error requesting %s: %v", tc.URL, err)
|
||||
return
|
||||
}
|
||||
req.Header.Set("X-API-Key", apikey)
|
||||
|
||||
resp, err := cli.Do(req)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error requesting %s: %v", tc.URL, err)
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != tc.Code {
|
||||
t.Errorf("Get on %s should have returned status code %d, not %s", tc.URL, tc.Code, resp.Status)
|
||||
return
|
||||
}
|
||||
|
||||
ct := resp.Header.Get("Content-Type")
|
||||
if !strings.HasPrefix(ct, tc.Type) {
|
||||
t.Errorf("The content type on %s should be %q, not %q", tc.URL, tc.Type, ct)
|
||||
return
|
||||
}
|
||||
|
||||
data, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error reading %s: %v", tc.URL, err)
|
||||
return
|
||||
}
|
||||
|
||||
if !bytes.HasPrefix(data, []byte(tc.Prefix)) {
|
||||
t.Errorf("Returned data from %s does not have prefix %q: %s", tc.URL, tc.Prefix, data)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func TestHTTPLogin(t *testing.T) {
|
||||
cfg := new(mockedConfig)
|
||||
cfg.gui.User = "üser"
|
||||
cfg.gui.Password = "$2a$10$IdIZTxTg/dCNuNEGlmLynOjqg4B1FvDKuIV5e0BB3pnWVHNb8.GSq" // bcrypt of "räksmörgås" in UTF-8
|
||||
baseURL, err := startHTTP(cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify rejection when not using authorization
|
||||
|
||||
req, _ := http.NewRequest("GET", baseURL, nil)
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if resp.StatusCode != http.StatusUnauthorized {
|
||||
t.Errorf("Unexpected non-401 return code %d for unauthed request", resp.StatusCode)
|
||||
}
|
||||
|
||||
// Verify that incorrect password is rejected
|
||||
|
||||
req.SetBasicAuth("üser", "rksmrgs")
|
||||
resp, err = http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if resp.StatusCode != http.StatusUnauthorized {
|
||||
t.Errorf("Unexpected non-401 return code %d for incorrect password", resp.StatusCode)
|
||||
}
|
||||
|
||||
// Verify that incorrect username is rejected
|
||||
|
||||
req.SetBasicAuth("user", "räksmörgås") // string literals in Go source code are in UTF-8
|
||||
resp, err = http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if resp.StatusCode != http.StatusUnauthorized {
|
||||
t.Errorf("Unexpected non-401 return code %d for incorrect username", resp.StatusCode)
|
||||
}
|
||||
|
||||
// Verify that UTF-8 auth works
|
||||
|
||||
req.SetBasicAuth("üser", "räksmörgås") // string literals in Go source code are in UTF-8
|
||||
resp, err = http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Errorf("Unexpected non-200 return code %d for authed request (UTF-8)", resp.StatusCode)
|
||||
}
|
||||
|
||||
// Verify that ISO-8859-1 auth
|
||||
|
||||
req.SetBasicAuth("\xfcser", "r\xe4ksm\xf6rg\xe5s") // escaped ISO-8859-1
|
||||
resp, err = http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Errorf("Unexpected non-200 return code %d for authed request (ISO-8859-1)", resp.StatusCode)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func startHTTP(cfg *mockedConfig) (string, error) {
|
||||
model := new(mockedModel)
|
||||
httpsCertFile := "../../test/h1/https-cert.pem"
|
||||
httpsKeyFile := "../../test/h1/https-key.pem"
|
||||
assetDir := "../../gui"
|
||||
eventSub := new(mockedEventSub)
|
||||
discoverer := new(mockedCachingMux)
|
||||
connections := new(mockedConnections)
|
||||
errorLog := new(mockedLoggerRecorder)
|
||||
systemLog := new(mockedLoggerRecorder)
|
||||
addrChan := make(chan string)
|
||||
|
||||
// Instantiate the API service
|
||||
svc := newAPIService(protocol.LocalDeviceID, cfg, httpsCertFile, httpsKeyFile, assetDir, model,
|
||||
eventSub, discoverer, connections, errorLog, systemLog)
|
||||
svc.started = addrChan
|
||||
|
||||
// Actually start the API service
|
||||
supervisor := suture.NewSimple("API test")
|
||||
supervisor.Add(svc)
|
||||
supervisor.ServeBackground()
|
||||
|
||||
// Make sure the API service is listening, and get the URL to use.
|
||||
addr := <-addrChan
|
||||
tcpAddr, err := net.ResolveTCPAddr("tcp", addr)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Weird address from API service: %v", err)
|
||||
}
|
||||
baseURL := fmt.Sprintf("http://127.0.0.1:%d", tcpAddr.Port)
|
||||
|
||||
return baseURL, nil
|
||||
}
|
||||
|
||||
func TestCSRFRequired(t *testing.T) {
|
||||
const testAPIKey = "foobarbaz"
|
||||
cfg := new(mockedConfig)
|
||||
cfg.gui.APIKey = testAPIKey
|
||||
baseURL, err := startHTTP(cfg)
|
||||
|
||||
cli := &http.Client{
|
||||
Timeout: time.Second,
|
||||
}
|
||||
|
||||
// Getting the base URL (i.e. "/") should succeed.
|
||||
|
||||
resp, err := cli.Get(baseURL)
|
||||
if err != nil {
|
||||
t.Fatal("Unexpected error from getting base URL:", err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Fatal("Getting base URL should succeed, not", resp.Status)
|
||||
}
|
||||
|
||||
// Find the returned CSRF token for future use
|
||||
|
||||
var csrfTokenName, csrfTokenValue string
|
||||
for _, cookie := range resp.Cookies() {
|
||||
if strings.HasPrefix(cookie.Name, "CSRF-Token") {
|
||||
csrfTokenName = cookie.Name
|
||||
csrfTokenValue = cookie.Value
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Calling on /rest without a token should fail
|
||||
|
||||
resp, err = cli.Get(baseURL + "/rest/system/config")
|
||||
if err != nil {
|
||||
t.Fatal("Unexpected error from getting /rest/system/config:", err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusForbidden {
|
||||
t.Fatal("Getting /rest/system/config without CSRF token should fail, not", resp.Status)
|
||||
}
|
||||
|
||||
// Calling on /rest with a token should succeed
|
||||
|
||||
req, _ := http.NewRequest("GET", baseURL+"/rest/system/config", nil)
|
||||
req.Header.Set("X-"+csrfTokenName, csrfTokenValue)
|
||||
resp, err = cli.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal("Unexpected error from getting /rest/system/config:", err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Fatal("Getting /rest/system/config with CSRF token should succeed, not", resp.Status)
|
||||
}
|
||||
|
||||
// Calling on /rest with the API key should succeed
|
||||
|
||||
req, _ = http.NewRequest("GET", baseURL+"/rest/system/config", nil)
|
||||
req.Header.Set("X-API-Key", testAPIKey)
|
||||
resp, err = cli.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal("Unexpected error from getting /rest/system/config:", err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Fatal("Getting /rest/system/config with API key should succeed, not", resp.Status)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRandomString(t *testing.T) {
|
||||
const testAPIKey = "foobarbaz"
|
||||
cfg := new(mockedConfig)
|
||||
cfg.gui.APIKey = testAPIKey
|
||||
baseURL, err := startHTTP(cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cli := &http.Client{
|
||||
Timeout: time.Second,
|
||||
}
|
||||
|
||||
// The default should be to return a 32 character random string
|
||||
|
||||
for _, url := range []string{"/rest/svc/random/string", "/rest/svc/random/string?length=-1", "/rest/svc/random/string?length=yo"} {
|
||||
req, _ := http.NewRequest("GET", baseURL+url, nil)
|
||||
req.Header.Set("X-API-Key", testAPIKey)
|
||||
resp, err := cli.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var res map[string]string
|
||||
if err := json.NewDecoder(resp.Body).Decode(&res); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(res["random"]) != 32 {
|
||||
t.Errorf("Expected 32 random characters, got %q of length %d", res["random"], len(res["random"]))
|
||||
}
|
||||
}
|
||||
|
||||
// We can ask for a different length if we like
|
||||
|
||||
req, _ := http.NewRequest("GET", baseURL+"/rest/svc/random/string?length=27", nil)
|
||||
req.Header.Set("X-API-Key", testAPIKey)
|
||||
resp, err := cli.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var res map[string]string
|
||||
if err := json.NewDecoder(resp.Body).Decode(&res); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(res["random"]) != 27 {
|
||||
t.Errorf("Expected 27 random characters, got %q of length %d", res["random"], len(res["random"]))
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigPostOK(t *testing.T) {
|
||||
cfg := bytes.NewBuffer([]byte(`{
|
||||
"version": 15,
|
||||
"folders": [
|
||||
{"id": "foo"}
|
||||
]
|
||||
}`))
|
||||
|
||||
resp, err := testConfigPost(cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Error("Expected 200 OK, not", resp.Status)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigPostDupFolder(t *testing.T) {
|
||||
cfg := bytes.NewBuffer([]byte(`{
|
||||
"version": 15,
|
||||
"folders": [
|
||||
{"id": "foo"},
|
||||
{"id": "foo"}
|
||||
]
|
||||
}`))
|
||||
|
||||
resp, err := testConfigPost(cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if resp.StatusCode != http.StatusBadRequest {
|
||||
t.Error("Expected 400 Bad Request, not", resp.Status)
|
||||
}
|
||||
}
|
||||
|
||||
func testConfigPost(data io.Reader) (*http.Response, error) {
|
||||
const testAPIKey = "foobarbaz"
|
||||
cfg := new(mockedConfig)
|
||||
cfg.gui.APIKey = testAPIKey
|
||||
baseURL, err := startHTTP(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cli := &http.Client{
|
||||
Timeout: time.Second,
|
||||
}
|
||||
|
||||
req, _ := http.NewRequest("POST", baseURL+"/rest/system/config", data)
|
||||
req.Header.Set("X-API-Key", testAPIKey)
|
||||
return cli.Do(req)
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ func trackCPUUsage() {
|
||||
var prevUsage int64
|
||||
var prevTime = time.Now().UnixNano()
|
||||
var rusage syscall.Rusage
|
||||
for _ = range time.NewTicker(time.Second).C {
|
||||
for range time.NewTicker(time.Second).C {
|
||||
syscall.Getrusage(syscall.RUSAGE_SELF, &rusage)
|
||||
curTime := time.Now().UnixNano()
|
||||
timeDiff := curTime - prevTime
|
||||
|
||||
@@ -34,7 +34,7 @@ func trackCPUUsage() {
|
||||
prevTime := ctime.Nanoseconds()
|
||||
prevUsage := ktime.Nanoseconds() + utime.Nanoseconds() // Always overflows
|
||||
|
||||
for _ = range time.NewTicker(time.Second).C {
|
||||
for range time.NewTicker(time.Second).C {
|
||||
err := syscall.GetProcessTimes(handle, &ctime, &etime, &ktime, &utime)
|
||||
if err != nil {
|
||||
continue
|
||||
@@ -42,6 +42,10 @@ func trackCPUUsage() {
|
||||
|
||||
curTime := time.Now().UnixNano()
|
||||
timeDiff := curTime - prevTime
|
||||
// This is sometimes 0, no clue why.
|
||||
if timeDiff == 0 {
|
||||
continue
|
||||
}
|
||||
curUsage := ktime.Nanoseconds() + utime.Nanoseconds()
|
||||
usageDiff := curUsage - prevUsage
|
||||
cpuUsageLock.Lock()
|
||||
|
||||
@@ -48,7 +48,7 @@ var locations = map[locationEnum]string{
|
||||
locKeyFile: "${config}/key.pem",
|
||||
locHTTPSCertFile: "${config}/https-cert.pem",
|
||||
locHTTPSKeyFile: "${config}/https-key.pem",
|
||||
locDatabase: "${config}/index-v0.13.0.db",
|
||||
locDatabase: "${config}/index-v0.14.0.db",
|
||||
locLogFile: "${config}/syncthing.log", // -logfile on Windows
|
||||
locCsrfTokens: "${config}/csrftokens.txt",
|
||||
locPanicLog: "${config}/panic-${timestamp}.log",
|
||||
|
||||
@@ -16,10 +16,10 @@ import (
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
@@ -40,7 +40,7 @@ import (
|
||||
"github.com/syncthing/syncthing/lib/model"
|
||||
"github.com/syncthing/syncthing/lib/osutil"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/relay"
|
||||
"github.com/syncthing/syncthing/lib/rand"
|
||||
"github.com/syncthing/syncthing/lib/symlinks"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
"github.com/syncthing/syncthing/lib/upgrade"
|
||||
@@ -50,7 +50,7 @@ import (
|
||||
|
||||
var (
|
||||
Version = "unknown-dev"
|
||||
Codename = "Copper Cockroach"
|
||||
Codename = "Dysprosium Dragonfly"
|
||||
BuildStamp = "0"
|
||||
BuildDate time.Time
|
||||
BuildHost = "unknown"
|
||||
@@ -115,7 +115,6 @@ func init() {
|
||||
var (
|
||||
myID protocol.DeviceID
|
||||
stop = make(chan int)
|
||||
cert tls.Certificate
|
||||
lans []*net.IPNet
|
||||
)
|
||||
|
||||
@@ -478,8 +477,13 @@ func performUpgrade(release upgrade.Release) {
|
||||
|
||||
func upgradeViaRest() error {
|
||||
cfg, _ := loadConfig()
|
||||
target := cfg.GUI().URL()
|
||||
r, _ := http.NewRequest("POST", target+"/rest/system/upgrade", nil)
|
||||
u, err := url.Parse(cfg.GUI().URL())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
u.Path = path.Join(u.Path, "rest/system/upgrade")
|
||||
target := u.String()
|
||||
r, _ := http.NewRequest("POST", target, nil)
|
||||
r.Header.Set("X-API-Key", cfg.GUI().APIKey)
|
||||
|
||||
tr := &http.Transport{
|
||||
@@ -534,8 +538,10 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
errors := logger.NewRecorder(l, logger.LevelWarn, maxSystemErrors, 0)
|
||||
systemLog := logger.NewRecorder(l, logger.LevelDebug, maxSystemLog, initialSystemLog)
|
||||
|
||||
// Event subscription for the API; must start early to catch the early events.
|
||||
apiSub := events.NewBufferedSubscription(events.Default.Subscribe(events.AllEvents), 1000)
|
||||
// Event subscription for the API; must start early to catch the early
|
||||
// events. The LocalChangeDetected event might overwhelm the event
|
||||
// receiver in some situations so we will not subscribe to it here.
|
||||
apiSub := events.NewBufferedSubscription(events.Default.Subscribe(events.AllEvents&^events.LocalChangeDetected), 1000)
|
||||
|
||||
if len(os.Getenv("GOMAXPROCS")) == 0 {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
@@ -556,10 +562,6 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
}
|
||||
}
|
||||
|
||||
// We reinitialize the predictable RNG with our device ID, to get a
|
||||
// sequence that is always the same but unique to this syncthing instance.
|
||||
predictableRandom.Seed(seedFromBytes(cert.Certificate[0]))
|
||||
|
||||
myID = protocol.NewDeviceID(cert.Certificate[0])
|
||||
l.SetPrefix(fmt.Sprintf("[%s] ", myID.String()[:5]))
|
||||
|
||||
@@ -661,15 +663,14 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
}
|
||||
}
|
||||
|
||||
// Pack and optimize the database
|
||||
if err := ldb.Compact(); err != nil {
|
||||
// I don't think this is fatal, but who knows. If it is, we'll surely
|
||||
// get an error when trying to write to the db later.
|
||||
l.Infoln("Compacting database:", err)
|
||||
if cfg.Raw().OriginalVersion == 15 {
|
||||
// The config version 15->16 migration is about handling ignores and
|
||||
// delta indexes and requires that we drop existing indexes that
|
||||
// have been incorrectly ignore filtered.
|
||||
ldb.DropDeltaIndexIDs()
|
||||
}
|
||||
|
||||
m := model.NewModel(cfg, myID, myDeviceName(cfg), "syncthing", Version, ldb, protectedFiles)
|
||||
cfg.Subscribe(m)
|
||||
|
||||
if t := os.Getenv("STDEADLOCKTIMEOUT"); len(t) > 0 {
|
||||
it, err := strconv.Atoi(t)
|
||||
@@ -686,74 +687,28 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
}
|
||||
}
|
||||
|
||||
// Clear out old indexes for other devices. Otherwise we'll start up and
|
||||
// start needing a bunch of files which are nowhere to be found. This
|
||||
// needs to be changed when we correctly do persistent indexes.
|
||||
// Add and start folders
|
||||
for _, folderCfg := range cfg.Folders() {
|
||||
m.AddFolder(folderCfg)
|
||||
for _, device := range folderCfg.DeviceIDs() {
|
||||
if device == myID {
|
||||
continue
|
||||
}
|
||||
m.Index(device, folderCfg.ID, nil, 0, nil)
|
||||
}
|
||||
// Routine to pull blocks from other devices to synchronize the local
|
||||
// folder. Does not run when we are in read only (publish only) mode.
|
||||
if folderCfg.ReadOnly {
|
||||
m.StartFolderRO(folderCfg.ID)
|
||||
} else {
|
||||
m.StartFolderRW(folderCfg.ID)
|
||||
}
|
||||
m.StartFolder(folderCfg.ID)
|
||||
}
|
||||
|
||||
mainService.Add(m)
|
||||
|
||||
// The default port we announce, possibly modified by setupUPnP next.
|
||||
|
||||
uri, err := url.Parse(opts.ListenAddress[0])
|
||||
if err != nil {
|
||||
l.Fatalf("Failed to parse listen address %s: %v", opts.ListenAddress[0], err)
|
||||
}
|
||||
|
||||
addr, err := net.ResolveTCPAddr("tcp", uri.Host)
|
||||
if err != nil {
|
||||
l.Fatalln("Bad listen address:", err)
|
||||
}
|
||||
|
||||
// The externalAddr tracks our external addresses for discovery purposes.
|
||||
|
||||
var addrList *addressLister
|
||||
|
||||
// Start UPnP
|
||||
|
||||
if opts.UPnPEnabled {
|
||||
upnpService := newUPnPService(cfg, addr.Port)
|
||||
mainService.Add(upnpService)
|
||||
|
||||
// The external address tracker needs to know about the UPnP service
|
||||
// so it can check for an external mapped port.
|
||||
addrList = newAddressLister(upnpService, cfg)
|
||||
} else {
|
||||
addrList = newAddressLister(nil, cfg)
|
||||
}
|
||||
|
||||
// Start relay management
|
||||
|
||||
var relayService relay.Service
|
||||
if opts.RelaysEnabled {
|
||||
relayService = relay.NewService(cfg, tlsCfg)
|
||||
mainService.Add(relayService)
|
||||
}
|
||||
|
||||
// Start discovery
|
||||
|
||||
cachedDiscovery := discover.NewCachingMux()
|
||||
mainService.Add(cachedDiscovery)
|
||||
|
||||
// Start connection management
|
||||
|
||||
connectionsService := connections.NewService(cfg, myID, m, tlsCfg, cachedDiscovery, bepProtocolName, tlsDefaultCommonName, lans)
|
||||
mainService.Add(connectionsService)
|
||||
|
||||
if cfg.Options().GlobalAnnEnabled {
|
||||
for _, srv := range cfg.GlobalDiscoveryServers() {
|
||||
l.Infoln("Using discovery server", srv)
|
||||
gd, err := discover.NewGlobal(srv, cert, addrList, relayService)
|
||||
gd, err := discover.NewGlobal(srv, cert, connectionsService)
|
||||
if err != nil {
|
||||
l.Warnln("Global discovery:", err)
|
||||
continue
|
||||
@@ -768,14 +723,14 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
|
||||
if cfg.Options().LocalAnnEnabled {
|
||||
// v4 broadcasts
|
||||
bcd, err := discover.NewLocal(myID, fmt.Sprintf(":%d", cfg.Options().LocalAnnPort), addrList, relayService)
|
||||
bcd, err := discover.NewLocal(myID, fmt.Sprintf(":%d", cfg.Options().LocalAnnPort), connectionsService)
|
||||
if err != nil {
|
||||
l.Warnln("IPv4 local discovery:", err)
|
||||
} else {
|
||||
cachedDiscovery.Add(bcd, 0, 0, ipv4LocalDiscoveryPriority)
|
||||
}
|
||||
// v6 multicasts
|
||||
mcd, err := discover.NewLocal(myID, cfg.Options().LocalAnnMCAddr, addrList, relayService)
|
||||
mcd, err := discover.NewLocal(myID, cfg.Options().LocalAnnMCAddr, connectionsService)
|
||||
if err != nil {
|
||||
l.Warnln("IPv6 local discovery:", err)
|
||||
} else {
|
||||
@@ -785,12 +740,7 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
|
||||
// GUI
|
||||
|
||||
setupGUI(mainService, cfg, m, apiSub, cachedDiscovery, relayService, errors, systemLog, runtimeOptions)
|
||||
|
||||
// Start connection management
|
||||
|
||||
connectionService := connections.NewConnectionService(cfg, myID, m, tlsCfg, cachedDiscovery, relayService, bepProtocolName, tlsDefaultCommonName, lans)
|
||||
mainService.Add(connectionService)
|
||||
setupGUI(mainService, cfg, m, apiSub, cachedDiscovery, connectionsService, errors, systemLog, runtimeOptions)
|
||||
|
||||
if runtimeOptions.cpuProfile {
|
||||
f, err := os.Create(fmt.Sprintf("cpu-%d.pprof", os.Getpid()))
|
||||
@@ -816,7 +766,7 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
if opts.URUniqueID == "" {
|
||||
// Previously the ID was generated from the node ID. We now need
|
||||
// to generate a new one.
|
||||
opts.URUniqueID = randomString(8)
|
||||
opts.URUniqueID = rand.String(8)
|
||||
cfg.SetOptions(opts)
|
||||
cfg.Save()
|
||||
}
|
||||
@@ -834,10 +784,8 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
if opts.AutoUpgradeIntervalH > 0 {
|
||||
if noUpgrade {
|
||||
l.Infof("No automatic upgrades; STNOUPGRADE environment variable defined.")
|
||||
} else if IsRelease {
|
||||
go autoUpgrade(cfg)
|
||||
} else {
|
||||
l.Infof("No automatic upgrades; %s is not a release version.", Version)
|
||||
go autoUpgrade(cfg)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -913,7 +861,6 @@ func loadConfig() (*config.Wrapper, error) {
|
||||
cfg, err := config.Load(cfgFile, myID)
|
||||
|
||||
if err != nil {
|
||||
l.Infoln("Error loading config file; using defaults for now")
|
||||
myName, _ := os.Hostname()
|
||||
newCfg := defaultConfig(myName)
|
||||
cfg = config.Wrap(cfgFile, newCfg)
|
||||
@@ -972,7 +919,7 @@ func startAuditing(mainService *suture.Supervisor) {
|
||||
l.Infoln("Audit log in", auditFile)
|
||||
}
|
||||
|
||||
func setupGUI(mainService *suture.Supervisor, cfg *config.Wrapper, m *model.Model, apiSub events.BufferedSubscription, discoverer discover.CachingMux, relayService relay.Service, errors, systemLog logger.Recorder, runtimeOptions RuntimeOptions) {
|
||||
func setupGUI(mainService *suture.Supervisor, cfg *config.Wrapper, m *model.Model, apiSub events.BufferedSubscription, discoverer discover.CachingMux, connectionsService *connections.Service, errors, systemLog logger.Recorder, runtimeOptions RuntimeOptions) {
|
||||
guiCfg := cfg.GUI()
|
||||
|
||||
if !guiCfg.Enabled {
|
||||
@@ -983,10 +930,7 @@ func setupGUI(mainService *suture.Supervisor, cfg *config.Wrapper, m *model.Mode
|
||||
l.Warnln("Insecure admin access is enabled.")
|
||||
}
|
||||
|
||||
api, err := newAPIService(myID, cfg, locations[locHTTPSCertFile], locations[locHTTPSKeyFile], runtimeOptions.assetDir, m, apiSub, discoverer, relayService, errors, systemLog)
|
||||
if err != nil {
|
||||
l.Fatalln("Cannot start GUI:", err)
|
||||
}
|
||||
api := newAPIService(myID, cfg, locations[locHTTPSCertFile], locations[locHTTPSKeyFile], runtimeOptions.assetDir, m, apiSub, discoverer, connectionsService, errors, systemLog)
|
||||
cfg.Subscribe(api)
|
||||
mainService.Add(api)
|
||||
|
||||
@@ -1002,8 +946,9 @@ func defaultConfig(myName string) config.Configuration {
|
||||
|
||||
if !noDefaultFolder {
|
||||
l.Infoln("Default folder created and/or linked to new config")
|
||||
|
||||
defaultFolder = config.NewFolderConfiguration("default", locations[locDefFolder])
|
||||
folderID := strings.ToLower(rand.String(5) + "-" + rand.String(5))
|
||||
defaultFolder = config.NewFolderConfiguration(folderID, locations[locDefFolder])
|
||||
defaultFolder.Label = "Default Folder (" + folderID + ")"
|
||||
defaultFolder.RescanIntervalS = 60
|
||||
defaultFolder.MinDiskFreePct = 1
|
||||
defaultFolder.Devices = []config.FolderDeviceConfiguration{{DeviceID: myID}}
|
||||
@@ -1032,7 +977,15 @@ func defaultConfig(myName string) config.Configuration {
|
||||
if err != nil {
|
||||
l.Fatalln("get free port (BEP):", err)
|
||||
}
|
||||
newCfg.Options.ListenAddress = []string{fmt.Sprintf("tcp://0.0.0.0:%d", port)}
|
||||
if port == 22000 {
|
||||
newCfg.Options.ListenAddresses = []string{"default"}
|
||||
} else {
|
||||
newCfg.Options.ListenAddresses = []string{
|
||||
fmt.Sprintf("tcp://%s", net.JoinHostPort("0.0.0.0", strconv.Itoa(port))),
|
||||
"dynamic+https://relays.syncthing.net/endpoint",
|
||||
}
|
||||
}
|
||||
|
||||
return newCfg
|
||||
}
|
||||
|
||||
@@ -1171,13 +1124,16 @@ func autoUpgrade(cfg *config.Wrapper) {
|
||||
// suitable time after they have gone out of fashion.
|
||||
func cleanConfigDirectory() {
|
||||
patterns := map[string]time.Duration{
|
||||
"panic-*.log": 7 * 24 * time.Hour, // keep panic logs for a week
|
||||
"audit-*.log": 7 * 24 * time.Hour, // keep audit logs for a week
|
||||
"index": 14 * 24 * time.Hour, // keep old index format for two weeks
|
||||
"index*.converted": 14 * 24 * time.Hour, // keep old converted indexes for two weeks
|
||||
"config.xml.v*": 30 * 24 * time.Hour, // old config versions for a month
|
||||
"*.idx.gz": 30 * 24 * time.Hour, // these should for sure no longer exist
|
||||
"backup-of-v0.8": 30 * 24 * time.Hour, // these neither
|
||||
"panic-*.log": 7 * 24 * time.Hour, // keep panic logs for a week
|
||||
"audit-*.log": 7 * 24 * time.Hour, // keep audit logs for a week
|
||||
"index": 14 * 24 * time.Hour, // keep old index format for two weeks
|
||||
"index-v0.11.0.db": 14 * 24 * time.Hour, // keep old index format for two weeks
|
||||
"index-v0.13.0.db": 14 * 24 * time.Hour, // keep old index format for two weeks
|
||||
"index*.converted": 14 * 24 * time.Hour, // keep old converted indexes for two weeks
|
||||
"config.xml.v*": 30 * 24 * time.Hour, // old config versions for a month
|
||||
"*.idx.gz": 30 * 24 * time.Hour, // these should for sure no longer exist
|
||||
"backup-of-v0.8": 30 * 24 * time.Hour, // these neither
|
||||
"tmp-index-sorter.*": time.Minute, // these should never exist on startup
|
||||
}
|
||||
|
||||
for pat, dur := range patterns {
|
||||
|
||||
@@ -7,151 +7,12 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
"github.com/syncthing/syncthing/lib/model"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
func TestFolderErrors(t *testing.T) {
|
||||
// This test intentionally avoids starting the folders. If they are
|
||||
// started, they will perform an initial scan, which will create missing
|
||||
// folder markers and race with the stuff we do in the test.
|
||||
|
||||
fcfg := config.FolderConfiguration{
|
||||
ID: "folder",
|
||||
RawPath: "testdata/testfolder",
|
||||
}
|
||||
cfg := config.Wrap("/tmp/test", config.Configuration{
|
||||
Folders: []config.FolderConfiguration{fcfg},
|
||||
})
|
||||
|
||||
for _, file := range []string{".stfolder", "testfolder/.stfolder", "testfolder"} {
|
||||
if err := os.Remove("testdata/" + file); err != nil && !os.IsNotExist(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
ldb := db.OpenMemory()
|
||||
|
||||
// Case 1 - new folder, directory and marker created
|
||||
|
||||
m := model.NewModel(cfg, protocol.LocalDeviceID, "device", "syncthing", "dev", ldb, nil)
|
||||
m.AddFolder(fcfg)
|
||||
|
||||
if err := m.CheckFolderHealth("folder"); err != nil {
|
||||
t.Error("Unexpected error", cfg.Folders()["folder"].Invalid)
|
||||
}
|
||||
|
||||
s, err := os.Stat("testdata/testfolder")
|
||||
if err != nil || !s.IsDir() {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
_, err = os.Stat("testdata/testfolder/.stfolder")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if err := os.Remove("testdata/testfolder/.stfolder"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.Remove("testdata/testfolder/"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Case 2 - new folder, marker created
|
||||
|
||||
fcfg.RawPath = "testdata/"
|
||||
cfg = config.Wrap("/tmp/test", config.Configuration{
|
||||
Folders: []config.FolderConfiguration{fcfg},
|
||||
})
|
||||
|
||||
m = model.NewModel(cfg, protocol.LocalDeviceID, "device", "syncthing", "dev", ldb, nil)
|
||||
m.AddFolder(fcfg)
|
||||
|
||||
if err := m.CheckFolderHealth("folder"); err != nil {
|
||||
t.Error("Unexpected error", cfg.Folders()["folder"].Invalid)
|
||||
}
|
||||
|
||||
_, err = os.Stat("testdata/.stfolder")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if err := os.Remove("testdata/.stfolder"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Case 3 - Folder marker missing
|
||||
|
||||
set := db.NewFileSet("folder", ldb)
|
||||
set.Update(protocol.LocalDeviceID, []protocol.FileInfo{
|
||||
{Name: "dummyfile"},
|
||||
})
|
||||
|
||||
m = model.NewModel(cfg, protocol.LocalDeviceID, "device", "syncthing", "dev", ldb, nil)
|
||||
m.AddFolder(fcfg)
|
||||
|
||||
if err := m.CheckFolderHealth("folder"); err == nil || err.Error() != "folder marker missing" {
|
||||
t.Error("Incorrect error: Folder marker missing !=", m.CheckFolderHealth("folder"))
|
||||
}
|
||||
|
||||
// Case 3.1 - recover after folder marker missing
|
||||
|
||||
if err = fcfg.CreateMarker(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if err := m.CheckFolderHealth("folder"); err != nil {
|
||||
t.Error("Unexpected error", cfg.Folders()["folder"].Invalid)
|
||||
}
|
||||
|
||||
// Case 4 - Folder path missing
|
||||
|
||||
if err := os.Remove("testdata/testfolder/.stfolder"); err != nil && !os.IsNotExist(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.Remove("testdata/testfolder"); err != nil && !os.IsNotExist(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fcfg.RawPath = "testdata/testfolder"
|
||||
cfg = config.Wrap("testdata/subfolder", config.Configuration{
|
||||
Folders: []config.FolderConfiguration{fcfg},
|
||||
})
|
||||
|
||||
m = model.NewModel(cfg, protocol.LocalDeviceID, "device", "syncthing", "dev", ldb, nil)
|
||||
m.AddFolder(fcfg)
|
||||
|
||||
if err := m.CheckFolderHealth("folder"); err == nil || err.Error() != "folder path missing" {
|
||||
t.Error("Incorrect error: Folder path missing !=", m.CheckFolderHealth("folder"))
|
||||
}
|
||||
|
||||
// Case 4.1 - recover after folder path missing
|
||||
|
||||
if err := os.Mkdir("testdata/testfolder", 0700); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := m.CheckFolderHealth("folder"); err == nil || err.Error() != "folder marker missing" {
|
||||
t.Error("Incorrect error: Folder marker missing !=", m.CheckFolderHealth("folder"))
|
||||
}
|
||||
|
||||
// Case 4.2 - recover after missing marker
|
||||
|
||||
if err = fcfg.CreateMarker(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if err := m.CheckFolderHealth("folder"); err != nil {
|
||||
t.Error("Unexpected error", cfg.Folders()["folder"].Invalid)
|
||||
}
|
||||
}
|
||||
|
||||
func TestShortIDCheck(t *testing.T) {
|
||||
cfg := config.Wrap("/tmp/test", config.Configuration{
|
||||
Devices: []config.DeviceConfiguration{
|
||||
|
||||
@@ -20,9 +20,8 @@ var (
|
||||
func memorySize() (int64, error) {
|
||||
var memoryStatusEx [64]byte
|
||||
binary.LittleEndian.PutUint32(memoryStatusEx[:], 64)
|
||||
p := uintptr(unsafe.Pointer(&memoryStatusEx[0]))
|
||||
|
||||
ret, _, callErr := syscall.Syscall(uintptr(globalMemoryStatusEx), 1, p, 0, 0)
|
||||
ret, _, callErr := syscall.Syscall(uintptr(globalMemoryStatusEx), 1, uintptr(unsafe.Pointer(&memoryStatusEx[0])), 0, 0)
|
||||
if ret == 0 {
|
||||
return 0, callErr
|
||||
}
|
||||
|
||||
54
cmd/syncthing/mocked_config_test.go
Normal file
@@ -0,0 +1,54 @@
|
||||
// Copyright (C) 2016 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
type mockedConfig struct {
|
||||
gui config.GUIConfiguration
|
||||
}
|
||||
|
||||
func (c *mockedConfig) GUI() config.GUIConfiguration {
|
||||
return c.gui
|
||||
}
|
||||
|
||||
func (c *mockedConfig) ListenAddresses() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *mockedConfig) Raw() config.Configuration {
|
||||
return config.Configuration{}
|
||||
}
|
||||
|
||||
func (c *mockedConfig) Options() config.OptionsConfiguration {
|
||||
return config.OptionsConfiguration{}
|
||||
}
|
||||
|
||||
func (c *mockedConfig) Replace(cfg config.Configuration) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *mockedConfig) Subscribe(cm config.Committer) {}
|
||||
|
||||
func (c *mockedConfig) Folders() map[string]config.FolderConfiguration {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *mockedConfig) Devices() map[protocol.DeviceID]config.DeviceConfiguration {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *mockedConfig) Save() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *mockedConfig) RequiresRestart() bool {
|
||||
return false
|
||||
}
|
||||
13
cmd/syncthing/mocked_connections_test.go
Normal file
@@ -0,0 +1,13 @@
|
||||
// Copyright (C) 2016 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
type mockedConnections struct{}
|
||||
|
||||
func (m *mockedConnections) Status() map[string]interface{} {
|
||||
return nil
|
||||
}
|
||||
52
cmd/syncthing/mocked_discovery_test.go
Normal file
@@ -0,0 +1,52 @@
|
||||
// Copyright (C) 2016 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/discover"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
type mockedCachingMux struct{}
|
||||
|
||||
// from suture.Service
|
||||
|
||||
func (m *mockedCachingMux) Serve() {
|
||||
select {}
|
||||
}
|
||||
|
||||
func (m *mockedCachingMux) Stop() {
|
||||
}
|
||||
|
||||
// from events.Finder
|
||||
|
||||
func (m *mockedCachingMux) Lookup(deviceID protocol.DeviceID) (direct []string, err error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *mockedCachingMux) Error() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockedCachingMux) String() string {
|
||||
return "mockedCachingMux"
|
||||
}
|
||||
|
||||
func (m *mockedCachingMux) Cache() map[protocol.DeviceID]discover.CacheEntry {
|
||||
return nil
|
||||
}
|
||||
|
||||
// from events.CachingMux
|
||||
|
||||
func (m *mockedCachingMux) Add(finder discover.Finder, cacheTime, negCacheTime time.Duration, priority int) {
|
||||
}
|
||||
|
||||
func (m *mockedCachingMux) ChildErrors() map[string]error {
|
||||
return nil
|
||||
}
|
||||
15
cmd/syncthing/mocked_events_test.go
Normal file
@@ -0,0 +1,15 @@
|
||||
// Copyright (C) 2016 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import "github.com/syncthing/syncthing/lib/events"
|
||||
|
||||
type mockedEventSub struct{}
|
||||
|
||||
func (s *mockedEventSub) Since(id int, into []events.Event) []events.Event {
|
||||
select {}
|
||||
}
|
||||
26
cmd/syncthing/mocked_logger_test.go
Normal file
@@ -0,0 +1,26 @@
|
||||
// Copyright (C) 2016 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/logger"
|
||||
)
|
||||
|
||||
type mockedLoggerRecorder struct{}
|
||||
|
||||
func (r *mockedLoggerRecorder) Since(t time.Time) []logger.Line {
|
||||
return []logger.Line{
|
||||
{
|
||||
When: time.Now(),
|
||||
Message: "Test message",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (r *mockedLoggerRecorder) Clear() {}
|
||||
116
cmd/syncthing/mocked_model_test.go
Normal file
@@ -0,0 +1,116 @@
|
||||
// Copyright (C) 2016 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
"github.com/syncthing/syncthing/lib/model"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/stats"
|
||||
)
|
||||
|
||||
type mockedModel struct{}
|
||||
|
||||
func (m *mockedModel) GlobalDirectoryTree(folder, prefix string, levels int, dirsonly bool) map[string]interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockedModel) Completion(device protocol.DeviceID, folder string) model.FolderCompletion {
|
||||
return model.FolderCompletion{}
|
||||
}
|
||||
|
||||
func (m *mockedModel) Override(folder string) {}
|
||||
|
||||
func (m *mockedModel) NeedFolderFiles(folder string, page, perpage int) ([]db.FileInfoTruncated, []db.FileInfoTruncated, []db.FileInfoTruncated, int) {
|
||||
return nil, nil, nil, 0
|
||||
}
|
||||
|
||||
func (m *mockedModel) NeedSize(folder string) (nfiles int, bytes int64) {
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
func (m *mockedModel) ConnectionStats() map[string]interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockedModel) DeviceStatistics() map[string]stats.DeviceStatistics {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockedModel) FolderStatistics() map[string]stats.FolderStatistics {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockedModel) CurrentFolderFile(folder string, file string) (protocol.FileInfo, bool) {
|
||||
return protocol.FileInfo{}, false
|
||||
}
|
||||
|
||||
func (m *mockedModel) CurrentGlobalFile(folder string, file string) (protocol.FileInfo, bool) {
|
||||
return protocol.FileInfo{}, false
|
||||
}
|
||||
|
||||
func (m *mockedModel) ResetFolder(folder string) {
|
||||
}
|
||||
|
||||
func (m *mockedModel) Availability(folder, file string, version protocol.Vector, block protocol.BlockInfo) []model.Availability {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockedModel) GetIgnores(folder string) ([]string, []string, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
func (m *mockedModel) SetIgnores(folder string, content []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockedModel) PauseDevice(device protocol.DeviceID) {
|
||||
}
|
||||
|
||||
func (m *mockedModel) ResumeDevice(device protocol.DeviceID) {}
|
||||
|
||||
func (m *mockedModel) DelayScan(folder string, next time.Duration) {}
|
||||
|
||||
func (m *mockedModel) ScanFolder(folder string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockedModel) ScanFolders() map[string]error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockedModel) ScanFolderSubdirs(folder string, subs []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockedModel) BringToFront(folder, file string) {}
|
||||
|
||||
func (m *mockedModel) ConnectedTo(deviceID protocol.DeviceID) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *mockedModel) GlobalSize(folder string) (nfiles, deleted int, bytes int64) {
|
||||
return 0, 0, 0
|
||||
}
|
||||
|
||||
func (m *mockedModel) LocalSize(folder string) (nfiles, deleted int, bytes int64) {
|
||||
return 0, 0, 0
|
||||
}
|
||||
|
||||
func (m *mockedModel) CurrentSequence(folder string) (int64, bool) {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
func (m *mockedModel) RemoteSequence(folder string) (int64, bool) {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
func (m *mockedModel) State(folder string) (string, time.Time, error) {
|
||||
return "", time.Time{}, nil
|
||||
}
|
||||
@@ -1,59 +0,0 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
cryptoRand "crypto/rand"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
mathRand "math/rand"
|
||||
)
|
||||
|
||||
// randomCharset contains the characters that can make up a randomString().
|
||||
const randomCharset = "01234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-"
|
||||
|
||||
// predictableRandom is an RNG that will always have the same sequence. It
|
||||
// will be seeded with the device ID during startup, so that the sequence is
|
||||
// predictable but varies between instances.
|
||||
var predictableRandom = mathRand.New(mathRand.NewSource(42))
|
||||
|
||||
func init() {
|
||||
// The default RNG should be seeded with something good.
|
||||
mathRand.Seed(randomInt64())
|
||||
}
|
||||
|
||||
// randomString returns a string of random characters (taken from
|
||||
// randomCharset) of the specified length.
|
||||
func randomString(l int) string {
|
||||
bs := make([]byte, l)
|
||||
for i := range bs {
|
||||
bs[i] = randomCharset[mathRand.Intn(len(randomCharset))]
|
||||
}
|
||||
return string(bs)
|
||||
}
|
||||
|
||||
// randomInt64 returns a strongly random int64, slowly
|
||||
func randomInt64() int64 {
|
||||
var bs [8]byte
|
||||
_, err := io.ReadFull(cryptoRand.Reader, bs[:])
|
||||
if err != nil {
|
||||
panic("randomness failure: " + err.Error())
|
||||
}
|
||||
return seedFromBytes(bs[:])
|
||||
}
|
||||
|
||||
// seedFromBytes calculates a weak 64 bit hash from the given byte slice,
|
||||
// suitable for use a predictable random seed.
|
||||
func seedFromBytes(bs []byte) int64 {
|
||||
h := md5.New()
|
||||
h.Write(bs)
|
||||
s := h.Sum(nil)
|
||||
// The MD5 hash of the byte slice is 16 bytes long. We interpret it as two
|
||||
// uint64s and XOR them together.
|
||||
return int64(binary.BigEndian.Uint64(s[0:]) ^ binary.BigEndian.Uint64(s[8:]))
|
||||
}
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
"github.com/thejerf/suture"
|
||||
)
|
||||
@@ -59,7 +60,7 @@ func (c *folderSummaryService) Stop() {
|
||||
// listenForUpdates subscribes to the event bus and makes note of folders that
|
||||
// need their data recalculated.
|
||||
func (c *folderSummaryService) listenForUpdates() {
|
||||
sub := events.Default.Subscribe(events.LocalIndexUpdated | events.RemoteIndexUpdated | events.StateChanged)
|
||||
sub := events.Default.Subscribe(events.LocalIndexUpdated | events.RemoteIndexUpdated | events.StateChanged | events.RemoteDownloadProgress | events.DeviceConnected)
|
||||
defer events.Default.Unsubscribe(sub)
|
||||
|
||||
for {
|
||||
@@ -67,8 +68,31 @@ func (c *folderSummaryService) listenForUpdates() {
|
||||
|
||||
select {
|
||||
case ev := <-sub.C():
|
||||
// Whenever the local or remote index is updated for a given
|
||||
// folder we make a note of it.
|
||||
if ev.Type == events.DeviceConnected {
|
||||
// When a device connects we schedule a refresh of all
|
||||
// folders shared with that device.
|
||||
|
||||
data := ev.Data.(map[string]string)
|
||||
deviceID, _ := protocol.DeviceIDFromString(data["id"])
|
||||
|
||||
c.foldersMut.Lock()
|
||||
nextFolder:
|
||||
for _, folder := range c.cfg.Folders() {
|
||||
for _, dev := range folder.Devices {
|
||||
if dev.DeviceID == deviceID {
|
||||
c.folders[folder.ID] = struct{}{}
|
||||
continue nextFolder
|
||||
}
|
||||
}
|
||||
}
|
||||
c.foldersMut.Unlock()
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
// The other events all have a "folder" attribute that they
|
||||
// affect. Whenever the local or remote index is updated for a
|
||||
// given folder we make a note of it.
|
||||
|
||||
data := ev.Data.(map[string]interface{})
|
||||
folder := data["folder"].(string)
|
||||
@@ -183,9 +207,11 @@ func (c *folderSummaryService) sendSummary(folder string) {
|
||||
// remote device.
|
||||
comp := c.model.Completion(devCfg.DeviceID, folder)
|
||||
events.Default.Log(events.FolderCompletion, map[string]interface{}{
|
||||
"folder": folder,
|
||||
"device": devCfg.DeviceID.String(),
|
||||
"completion": comp,
|
||||
"folder": folder,
|
||||
"device": devCfg.DeviceID.String(),
|
||||
"completion": comp.CompletionPct,
|
||||
"needBytes": comp.NeedBytes,
|
||||
"globalBytes": comp.GlobalBytes,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,132 +0,0 @@
|
||||
// Copyright (C) 2015 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
"github.com/syncthing/syncthing/lib/upnp"
|
||||
)
|
||||
|
||||
// The UPnP service runs a loop for discovery of IGDs (Internet Gateway
|
||||
// Devices) and setup/renewal of a port mapping.
|
||||
type upnpService struct {
|
||||
cfg *config.Wrapper
|
||||
localPort int
|
||||
extPort int
|
||||
extPortMut sync.Mutex
|
||||
stop chan struct{}
|
||||
}
|
||||
|
||||
func newUPnPService(cfg *config.Wrapper, localPort int) *upnpService {
|
||||
return &upnpService{
|
||||
cfg: cfg,
|
||||
localPort: localPort,
|
||||
extPortMut: sync.NewMutex(),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *upnpService) Serve() {
|
||||
foundIGD := true
|
||||
s.stop = make(chan struct{})
|
||||
|
||||
for {
|
||||
igds := upnp.Discover(time.Duration(s.cfg.Options().UPnPTimeoutS) * time.Second)
|
||||
if len(igds) > 0 {
|
||||
foundIGD = true
|
||||
s.extPortMut.Lock()
|
||||
oldExtPort := s.extPort
|
||||
s.extPortMut.Unlock()
|
||||
|
||||
newExtPort := s.tryIGDs(igds, oldExtPort)
|
||||
|
||||
s.extPortMut.Lock()
|
||||
s.extPort = newExtPort
|
||||
s.extPortMut.Unlock()
|
||||
} else if foundIGD {
|
||||
// Only print a notice if we've previously found an IGD or this is
|
||||
// the first time around.
|
||||
foundIGD = false
|
||||
l.Infof("No UPnP device detected")
|
||||
}
|
||||
|
||||
d := time.Duration(s.cfg.Options().UPnPRenewalM) * time.Minute
|
||||
if d == 0 {
|
||||
// We always want to do renewal so lets just pick a nice sane number.
|
||||
d = 30 * time.Minute
|
||||
}
|
||||
|
||||
select {
|
||||
case <-s.stop:
|
||||
return
|
||||
case <-time.After(d):
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *upnpService) Stop() {
|
||||
close(s.stop)
|
||||
}
|
||||
|
||||
func (s *upnpService) ExternalPort() int {
|
||||
s.extPortMut.Lock()
|
||||
port := s.extPort
|
||||
s.extPortMut.Unlock()
|
||||
return port
|
||||
}
|
||||
|
||||
func (s *upnpService) tryIGDs(igds []upnp.IGD, prevExtPort int) int {
|
||||
// Lets try all the IGDs we found and use the first one that works.
|
||||
// TODO: Use all of them, and sort out the resulting mess to the
|
||||
// discovery announcement code...
|
||||
for _, igd := range igds {
|
||||
extPort, err := s.tryIGD(igd, prevExtPort)
|
||||
if err != nil {
|
||||
l.Warnf("Failed to set UPnP port mapping: external port %d on device %s.", extPort, igd.FriendlyIdentifier())
|
||||
continue
|
||||
}
|
||||
|
||||
if extPort != prevExtPort {
|
||||
l.Infof("New UPnP port mapping: external port %d to local port %d.", extPort, s.localPort)
|
||||
events.Default.Log(events.ExternalPortMappingChanged, map[string]int{"port": extPort})
|
||||
}
|
||||
l.Debugf("Created/updated UPnP port mapping for external port %d on device %s.", extPort, igd.FriendlyIdentifier())
|
||||
return extPort
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func (s *upnpService) tryIGD(igd upnp.IGD, suggestedPort int) (int, error) {
|
||||
var err error
|
||||
leaseTime := s.cfg.Options().UPnPLeaseM * 60
|
||||
|
||||
if suggestedPort != 0 {
|
||||
// First try renewing our existing mapping.
|
||||
name := fmt.Sprintf("syncthing-%d", suggestedPort)
|
||||
err = igd.AddPortMapping(upnp.TCP, suggestedPort, s.localPort, name, leaseTime)
|
||||
if err == nil {
|
||||
return suggestedPort, nil
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
// Then try up to ten random ports.
|
||||
extPort := 1024 + predictableRandom.Intn(65535-1024)
|
||||
name := fmt.Sprintf("syncthing-%d", extPort)
|
||||
err = igd.AddPortMapping(upnp.TCP, extPort, s.localPort, name, leaseTime)
|
||||
if err == nil {
|
||||
return extPort, nil
|
||||
}
|
||||
}
|
||||
|
||||
return 0, err
|
||||
}
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"net/http"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
@@ -133,7 +134,7 @@ func reportData(cfg configIntf, m modelIntf) map[string]interface{} {
|
||||
for _, cfg := range cfg.Folders() {
|
||||
rescanIntvs = append(rescanIntvs, cfg.RescanIntervalS)
|
||||
|
||||
if cfg.ReadOnly {
|
||||
if cfg.Type == config.FolderTypeReadOnly {
|
||||
folderUses["readonly"]++
|
||||
}
|
||||
if cfg.IgnorePerms {
|
||||
@@ -203,16 +204,16 @@ func reportData(cfg configIntf, m modelIntf) map[string]interface{} {
|
||||
}
|
||||
|
||||
defaultRelayServers, otherRelayServers := 0, 0
|
||||
for _, addr := range cfg.Options().RelayServers {
|
||||
switch addr {
|
||||
case "dynamic+https://relays.syncthing.net/endpoint":
|
||||
for _, addr := range cfg.ListenAddresses() {
|
||||
switch {
|
||||
case addr == "dynamic+https://relays.syncthing.net/endpoint":
|
||||
defaultRelayServers++
|
||||
default:
|
||||
case strings.HasPrefix(addr, "relay://") || strings.HasPrefix(addr, "dynamic+http"):
|
||||
otherRelayServers++
|
||||
}
|
||||
}
|
||||
res["relays"] = map[string]interface{}{
|
||||
"enabled": cfg.Options().RelaysEnabled,
|
||||
"enabled": defaultRelayServers+otherAnnounceServers > 0,
|
||||
"defaultServers": defaultRelayServers,
|
||||
"otherServers": otherRelayServers,
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
)
|
||||
@@ -73,15 +72,18 @@ func (s *verboseService) formatEvent(ev events.Event) string {
|
||||
|
||||
case events.Starting:
|
||||
return fmt.Sprintf("Starting up (%s)", ev.Data.(map[string]string)["home"])
|
||||
|
||||
case events.StartupComplete:
|
||||
return "Startup complete"
|
||||
|
||||
case events.DeviceDiscovered:
|
||||
data := ev.Data.(map[string]interface{})
|
||||
return fmt.Sprintf("Discovered device %v at %v", data["device"], data["addrs"])
|
||||
|
||||
case events.DeviceConnected:
|
||||
data := ev.Data.(map[string]string)
|
||||
return fmt.Sprintf("Connected to device %v at %v (type %s)", data["id"], data["addr"], data["type"])
|
||||
|
||||
case events.DeviceDisconnected:
|
||||
data := ev.Data.(map[string]string)
|
||||
return fmt.Sprintf("Disconnected from device %v", data["id"])
|
||||
@@ -90,6 +92,11 @@ func (s *verboseService) formatEvent(ev events.Event) string {
|
||||
data := ev.Data.(map[string]interface{})
|
||||
return fmt.Sprintf("Folder %q is now %v", data["folder"], data["to"])
|
||||
|
||||
case events.LocalChangeDetected:
|
||||
data := ev.Data.(map[string]string)
|
||||
// Local change detected in folder "foo": modified file /Users/jb/whatever
|
||||
return fmt.Sprintf("Local change detected in folder %q: %s %s %s", data["folder"], data["action"], data["type"], data["path"])
|
||||
|
||||
case events.RemoteIndexUpdated:
|
||||
data := ev.Data.(map[string]interface{})
|
||||
return fmt.Sprintf("Device %v sent an index update for %q with %d items", data["device"], data["folder"], data["items"])
|
||||
@@ -97,6 +104,7 @@ func (s *verboseService) formatEvent(ev events.Event) string {
|
||||
case events.DeviceRejected:
|
||||
data := ev.Data.(map[string]interface{})
|
||||
return fmt.Sprintf("Rejected connection from device %v at %v", data["device"], data["address"])
|
||||
|
||||
case events.FolderRejected:
|
||||
data := ev.Data.(map[string]string)
|
||||
return fmt.Sprintf("Rejected unshared folder %q from device %v", data["folder"], data["device"])
|
||||
@@ -104,6 +112,7 @@ func (s *verboseService) formatEvent(ev events.Event) string {
|
||||
case events.ItemStarted:
|
||||
data := ev.Data.(map[string]string)
|
||||
return fmt.Sprintf("Started syncing %q / %q (%v %v)", data["folder"], data["item"], data["action"], data["type"])
|
||||
|
||||
case events.ItemFinished:
|
||||
data := ev.Data.(map[string]interface{})
|
||||
if err, ok := data["error"].(*string); ok && err != nil {
|
||||
@@ -120,13 +129,18 @@ func (s *verboseService) formatEvent(ev events.Event) string {
|
||||
case events.FolderCompletion:
|
||||
data := ev.Data.(map[string]interface{})
|
||||
return fmt.Sprintf("Completion for folder %q on device %v is %v%%", data["folder"], data["device"], data["completion"])
|
||||
|
||||
case events.FolderSummary:
|
||||
data := ev.Data.(map[string]interface{})
|
||||
sum := data["summary"].(map[string]interface{})
|
||||
delete(sum, "invalid")
|
||||
delete(sum, "ignorePatterns")
|
||||
delete(sum, "stateChanged")
|
||||
return fmt.Sprintf("Summary for folder %q is %v", data["folder"], data["summary"])
|
||||
sum := make(map[string]interface{})
|
||||
for k, v := range data["summary"].(map[string]interface{}) {
|
||||
if k == "invalid" || k == "ignorePatterns" || k == "stateChanged" {
|
||||
continue
|
||||
}
|
||||
sum[k] = v
|
||||
}
|
||||
return fmt.Sprintf("Summary for folder %q is %v", data["folder"], sum)
|
||||
|
||||
case events.FolderScanProgress:
|
||||
data := ev.Data.(map[string]interface{})
|
||||
folder := data["folder"].(string)
|
||||
@@ -143,19 +157,19 @@ func (s *verboseService) formatEvent(ev events.Event) string {
|
||||
data := ev.Data.(map[string]string)
|
||||
device := data["device"]
|
||||
return fmt.Sprintf("Device %v was paused", device)
|
||||
|
||||
case events.DeviceResumed:
|
||||
data := ev.Data.(map[string]string)
|
||||
device := data["device"]
|
||||
return fmt.Sprintf("Device %v was resumed", device)
|
||||
|
||||
case events.ExternalPortMappingChanged:
|
||||
data := ev.Data.(map[string]int)
|
||||
port := data["port"]
|
||||
return fmt.Sprintf("External port mapping changed; new port is %d.", port)
|
||||
case events.RelayStateChanged:
|
||||
data := ev.Data.(map[string][]string)
|
||||
newRelays := data["new"]
|
||||
return fmt.Sprintf("Relay state changed; connected relay(s) are %s.", strings.Join(newRelays, ", "))
|
||||
case events.ListenAddressesChanged:
|
||||
data := ev.Data.(map[string]interface{})
|
||||
address := data["address"]
|
||||
lan := data["lan"]
|
||||
wan := data["wan"]
|
||||
return fmt.Sprintf("Listen address %s resolution has changed: lan addresses: %s wan addresses: %s", address, lan, wan)
|
||||
|
||||
case events.LoginAttempt:
|
||||
data := ev.Data.(map[string]interface{})
|
||||
username := data["username"].(string)
|
||||
@@ -166,7 +180,6 @@ func (s *verboseService) formatEvent(ev events.Event) string {
|
||||
success = "failed"
|
||||
}
|
||||
return fmt.Sprintf("Login %s for username %s.", success, username)
|
||||
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s %#v", ev.Type, ev)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
syncthing ({{.version}}); urgency=medium
|
||||
{{.name}} ({{.version}}); urgency=medium
|
||||
|
||||
* Packaging of {{.version}}.
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
[Unit]
|
||||
Description=Restart Syncthing after resume
|
||||
Documentation=man:syncthing(1)
|
||||
After=suspend.target
|
||||
|
||||
[Service]
|
||||
|
||||
@@ -1,479 +0,0 @@
|
||||
/*
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
*/
|
||||
|
||||
body {
|
||||
padding-bottom: 70px;
|
||||
font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
|
||||
}
|
||||
|
||||
h1, h2, h3, h4, h5 {
|
||||
font-family: "Raleway", "Helvetica Neue", Helvetica, Arial, sans-serif;
|
||||
line-height: 1.25;
|
||||
}
|
||||
|
||||
ul+h5 {
|
||||
margin-top: 1.5em;
|
||||
}
|
||||
|
||||
#content {
|
||||
margin-bottom: 50px;
|
||||
}
|
||||
|
||||
.panel-progress {
|
||||
background: #3498db;
|
||||
height: 3px;
|
||||
left: 0;
|
||||
position: absolute;
|
||||
top: 0;
|
||||
display: block;
|
||||
}
|
||||
|
||||
.panel-title {
|
||||
position: relative;
|
||||
text-overflow: ellipsis;
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
identicon {
|
||||
display: inline-block;
|
||||
position: relative;
|
||||
width: 1em;
|
||||
height: 1em;
|
||||
line-height: 1;
|
||||
margin-right: 5px;
|
||||
}
|
||||
|
||||
.identicon {
|
||||
width: 1em;
|
||||
height: 1em;
|
||||
}
|
||||
|
||||
.identicon rect {
|
||||
fill: #333;
|
||||
}
|
||||
|
||||
.checkbox {
|
||||
margin-top: 0px;
|
||||
}
|
||||
|
||||
.checkbox input[type="checkbox"], .radio input[type="radio"] {
|
||||
float: none; /* issue #1197 */
|
||||
}
|
||||
|
||||
.popover {
|
||||
max-width: none;
|
||||
min-width: 250px;
|
||||
}
|
||||
|
||||
.tooltip{
|
||||
word-wrap:break-word;
|
||||
}
|
||||
|
||||
.panel-heading .fa, .modal-header .fa {
|
||||
margin-right: 10px;
|
||||
}
|
||||
|
||||
.panel-heading {
|
||||
position: relative;
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
.text-monospace {
|
||||
font-family: Menlo, Monaco, Consolas, "Courier New", monospace;
|
||||
}
|
||||
|
||||
.table-condensed>thead>tr>th, .table-condensed>tbody>tr>th, .table-condensed>tfoot>tr>th, .table-condensed>thead>tr>td, .table-condensed>tbody>tr>td, .table-condensed>tfoot>tr>td {
|
||||
border-top: none;
|
||||
}
|
||||
|
||||
.logo {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
top: -5px;
|
||||
position: relative;
|
||||
}
|
||||
|
||||
.list-no-bullet {
|
||||
list-style-type: none
|
||||
}
|
||||
|
||||
.li-column {
|
||||
display: inline-block;
|
||||
min-width: 7em;
|
||||
margin-right: 1em;
|
||||
background-color: rgb(236, 240, 241);
|
||||
border-radius: 3px;
|
||||
padding: 1px 4px;
|
||||
margin: 2px 2px;
|
||||
}
|
||||
.li-column span.data {
|
||||
margin-left: 0.5em;
|
||||
min-width: 10em;
|
||||
text-align: right;
|
||||
display: inline-block;
|
||||
}
|
||||
|
||||
.ng-cloak {
|
||||
display: none !important;
|
||||
}
|
||||
|
||||
.table th {
|
||||
white-space: nowrap;
|
||||
font-weight: 400;
|
||||
}
|
||||
|
||||
.table td {
|
||||
padding-left: 20px !important;
|
||||
}
|
||||
|
||||
.table td.small-data {
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
table.table-condensed {
|
||||
table-layout: fixed;
|
||||
}
|
||||
table.table-condensed td {
|
||||
overflow: hidden;
|
||||
text-overflow: ellipsis;
|
||||
white-space: nowrap;
|
||||
}
|
||||
@media (max-width:767px) {
|
||||
table.table-condensed td {
|
||||
/* for mobile phones to allow linebreaks in long repro folder/shared with
|
||||
* columns. */
|
||||
white-space: normal;
|
||||
}
|
||||
}
|
||||
|
||||
.navbar-right {
|
||||
/* to align with panel */
|
||||
padding-right: 15px;
|
||||
}
|
||||
|
||||
/**
|
||||
* Menu for select language
|
||||
*/
|
||||
|
||||
@media (min-width:480px) and (max-width:649px) {
|
||||
*[language-select] > .dropdown-menu {
|
||||
width: 230px;
|
||||
}
|
||||
}
|
||||
|
||||
@media (min-width:650px) {
|
||||
*[language-select] > .dropdown-menu > li {
|
||||
width: 50%;
|
||||
float: left;
|
||||
}
|
||||
*[language-select] > .dropdown-menu {
|
||||
width: 440px;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@media (max-width:479px) {
|
||||
.dropdown-menu {
|
||||
padding-top: 55px;
|
||||
}
|
||||
|
||||
nav .dropdown-toggle {
|
||||
font-size: 14px;
|
||||
}
|
||||
|
||||
.dropdown-toggle {
|
||||
float: left;
|
||||
}
|
||||
|
||||
.navbar-brand {
|
||||
padding-left: 0;
|
||||
padding-top: 16px;
|
||||
}
|
||||
|
||||
.navbar-nav .open .dropdown-menu > li > a {
|
||||
padding: 12px 15px 12px 25px;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
.panel-body .table-condensed {
|
||||
margin-bottom: 0;
|
||||
}
|
||||
|
||||
.dl-horizontal.dl-narrow dt {
|
||||
width: 40px;
|
||||
}
|
||||
|
||||
.dl-horizontal.dl-narrow dd {
|
||||
margin-left: 60px;
|
||||
}
|
||||
|
||||
/**
|
||||
* Progress bars with centered text
|
||||
*/
|
||||
|
||||
.progress {
|
||||
margin-bottom: 0px;
|
||||
position: relative;
|
||||
}
|
||||
|
||||
.progress span.frontal {
|
||||
text-align: center;
|
||||
position: absolute;
|
||||
display: block;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.three-columns {
|
||||
-webkit-column-count: 3;
|
||||
-moz-column-count: 3;
|
||||
column-count: 3;
|
||||
}
|
||||
|
||||
.two-columns {
|
||||
-webkit-column-count: 2;
|
||||
-moz-column-count: 2;
|
||||
column-count: 2;
|
||||
}
|
||||
|
||||
ul.three-columns li, ul.two-columns li {
|
||||
padding-left: 0.5em;
|
||||
text-indent: -0.5em;
|
||||
}
|
||||
|
||||
/** Footer nav on small devices **/
|
||||
|
||||
@media (max-width: 1199px) {
|
||||
body {
|
||||
padding-bottom: 0;
|
||||
}
|
||||
|
||||
.navbar-fixed-bottom {
|
||||
position: static;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
|
||||
Dark theme customizations start here.
|
||||
|
||||
Author: alessandro.g89
|
||||
Source: https://userstyles.org/styles/122502/syncthing-dark
|
||||
|
||||
**/
|
||||
|
||||
body {
|
||||
color: #aaa !important;
|
||||
background-color: black !important;
|
||||
}
|
||||
|
||||
a:hover,a:focus,a.focus{
|
||||
outline: none !important;
|
||||
}
|
||||
|
||||
|
||||
/* navbar */
|
||||
.navbar {
|
||||
background-color: #333 !important;
|
||||
border-color: #333 !important;
|
||||
border-width: 2px !important;
|
||||
}
|
||||
|
||||
.navbar-text, .dropdown>a, .dropdown-menu>li>a, .hidden-xs>a, .navbar-link {
|
||||
color: #aaa !important;
|
||||
}
|
||||
|
||||
.dropdown-menu {
|
||||
border-color: #333 !important;
|
||||
border-width: 2px !important;
|
||||
background-color: #222 !important;
|
||||
}
|
||||
|
||||
.dropdown-menu>li>a:hover, .dropdown-menu>li>a:focus {
|
||||
color: #fff !important;
|
||||
background-color: #333 !important;
|
||||
}
|
||||
|
||||
.open>.dropdown-toggle, .dropdown-toggle:hover {
|
||||
border-color: #333 !important;
|
||||
background-color: #222 !important;
|
||||
}
|
||||
|
||||
.divider {
|
||||
background-color: #333 !important;
|
||||
height: 2px !important;
|
||||
}
|
||||
|
||||
li.hidden-xs:hover, .navbar-link:hover, .navbar-link:focus {
|
||||
outline: none !important;
|
||||
border-color: #333 !important;
|
||||
background-color: #222 !important;
|
||||
}
|
||||
|
||||
.dropdown-menu>.active>a {
|
||||
color: #fff !important;
|
||||
background-color: #217dbb !important;
|
||||
}
|
||||
|
||||
|
||||
/* main panel */
|
||||
.panel {
|
||||
background-color: #111 !important;
|
||||
border-width: 2px !important;
|
||||
}
|
||||
|
||||
.panel-default {
|
||||
border-color: #222 !important;
|
||||
}
|
||||
|
||||
.panel-default>.panel-heading {
|
||||
color: #aaa !important;
|
||||
border-color: #222 !important;
|
||||
background-color: #222 !important;
|
||||
}
|
||||
|
||||
.panel-footer {
|
||||
background-color: #111 !important;
|
||||
border-width: 0 !important;
|
||||
}
|
||||
|
||||
.table-striped>tbody>tr:nth-of-type(odd) {
|
||||
background-color: #181818 !important;
|
||||
}
|
||||
|
||||
.panel-group .panel-heading+.panel-collapse>.panel-body, .panel-group .panel-heading+.panel-collapse>.list-group {
|
||||
border-top: 1px solid #222 !important;
|
||||
}
|
||||
|
||||
.identicon>rect {
|
||||
fill: #aaa !important;
|
||||
}
|
||||
|
||||
/* buttons */
|
||||
.btn {
|
||||
border-radius: 3px !important;
|
||||
border-width: 0px !important;
|
||||
}
|
||||
|
||||
.btn:hover, .btn:focus, .btn.focus {
|
||||
outline: none !important;
|
||||
}
|
||||
.btn-default {
|
||||
color: #aaa !important;
|
||||
background-color: #333 !important;
|
||||
}
|
||||
|
||||
.btn-default:hover, .btn-default:focus, .btn-default.focus {
|
||||
color: #fff !important;
|
||||
background-color: #484848 !important;
|
||||
}
|
||||
|
||||
.btn-primary {
|
||||
background-color: #217dbb !important;
|
||||
}
|
||||
|
||||
.btn-primary:hover, .btn-primary:focus, .btn-primary.focus {
|
||||
background-color: #3498db !important;
|
||||
}
|
||||
|
||||
.btn-warning {
|
||||
background-color: #c29d0b !important;
|
||||
}
|
||||
|
||||
.btn-warning:hover, .btn-warning:focus, .btn-warning.focus {
|
||||
background-color: #f1c40f !important;
|
||||
}
|
||||
|
||||
.btn-danger {
|
||||
background-color: #d62c1a !important;
|
||||
}
|
||||
|
||||
.btn-danger:hover, .btn-danger:focus, .btn-danger.focus {
|
||||
background-color: #e74c3c !important;
|
||||
}
|
||||
|
||||
|
||||
/* modal dialogs */
|
||||
.modal-header {
|
||||
border-color: #222 !important;
|
||||
background-color: #222;
|
||||
}
|
||||
|
||||
.modal-content {
|
||||
border-color: #666 !important;
|
||||
border-width: 2px !important;
|
||||
background-color: #111 !important;
|
||||
}
|
||||
|
||||
.modal-footer {
|
||||
border-color: #111 !important;
|
||||
background-color: #111 !important;
|
||||
}
|
||||
|
||||
.alert-warning {
|
||||
background-color: #c29d0b !important;
|
||||
}
|
||||
|
||||
.alert-danger {
|
||||
background-color: #d62c1a !important;
|
||||
}
|
||||
|
||||
.help-block {
|
||||
color: #aaa !important;
|
||||
}
|
||||
|
||||
.form-control {
|
||||
color: #aaa !important;
|
||||
border-color: #444 !important;
|
||||
background-color: black !important;
|
||||
}
|
||||
|
||||
code.ng-binding{
|
||||
color: #f99 !important;
|
||||
background-color: #444 !important;
|
||||
}
|
||||
|
||||
.well, .form-control[readonly="readonly"] { /* read-only fields*/
|
||||
color: #666 !important;
|
||||
border-color: #444 !important;
|
||||
background-color: #111 !important;
|
||||
}
|
||||
|
||||
/* buttons for pagination */
|
||||
.pagination>li>a, .pagination>li>span {
|
||||
background-color: #333 !important;
|
||||
border-color: #484848 !important;
|
||||
}
|
||||
|
||||
.pagination>li>a:hover, .pagination>li>a:focus, .pagination>li>a.focus {
|
||||
background-color: #484848 !important;
|
||||
}
|
||||
|
||||
|
||||
/* progess bars */
|
||||
.progress-bar {
|
||||
background-color: #217dbb !important;
|
||||
}
|
||||
|
||||
.progress-bar-success {
|
||||
background-color: #0A8522 !important;
|
||||
}
|
||||
|
||||
.progress-bar-info {
|
||||
background-color: #9b59b6 !important;
|
||||
}
|
||||
|
||||
.progress-bar-warning {
|
||||
background-color: #c29d0b !important;
|
||||
}
|
||||
|
||||
.progress-bar-danger {
|
||||
background-color: #d62c1a !important;
|
||||
}
|
||||
245
gui/dark/assets/css/theme.css
Normal file
@@ -0,0 +1,245 @@
|
||||
/**
|
||||
|
||||
Dark theme
|
||||
|
||||
Author: alessandro.g89
|
||||
Source: https://userstyles.org/styles/122502/syncthing-dark
|
||||
|
||||
**/
|
||||
|
||||
body {
|
||||
color: #aaa !important;
|
||||
background-color: black !important;
|
||||
}
|
||||
|
||||
a:hover,a:focus,a.focus{
|
||||
outline: none !important;
|
||||
}
|
||||
|
||||
|
||||
/* navbar */
|
||||
.navbar {
|
||||
background-color: #333 !important;
|
||||
border-color: #333 !important;
|
||||
border-width: 2px !important;
|
||||
}
|
||||
|
||||
.navbar-text, .dropdown>a, .dropdown-menu>li>a, .hidden-xs>a, .navbar-link {
|
||||
color: #aaa !important;
|
||||
}
|
||||
|
||||
.dropdown-menu {
|
||||
border-color: #333 !important;
|
||||
border-width: 2px !important;
|
||||
background-color: #222 !important;
|
||||
}
|
||||
|
||||
.dropdown-menu>li>a:hover, .dropdown-menu>li>a:focus {
|
||||
color: #fff !important;
|
||||
background-color: #333 !important;
|
||||
}
|
||||
|
||||
.open>.dropdown-toggle, .dropdown-toggle:hover {
|
||||
border-color: #333 !important;
|
||||
background-color: #222 !important;
|
||||
}
|
||||
|
||||
.divider {
|
||||
background-color: #333 !important;
|
||||
height: 2px !important;
|
||||
}
|
||||
|
||||
li.hidden-xs:hover, .navbar-link:hover, .navbar-link:focus {
|
||||
outline: none !important;
|
||||
border-color: #333 !important;
|
||||
background-color: #222 !important;
|
||||
}
|
||||
|
||||
.dropdown-menu>.active>a {
|
||||
color: #fff !important;
|
||||
background-color: #217dbb !important;
|
||||
}
|
||||
|
||||
|
||||
/* main panel */
|
||||
.panel {
|
||||
background-color: #111 !important;
|
||||
border-width: 2px !important;
|
||||
}
|
||||
|
||||
.panel-default {
|
||||
border-color: #222 !important;
|
||||
}
|
||||
|
||||
.panel-default > .panel-heading {
|
||||
color: #aaa !important;
|
||||
border-color: #222 !important;
|
||||
background-color: #222 !important;
|
||||
}
|
||||
.panel-warning > .panel-heading {
|
||||
color: #222 !important;
|
||||
}
|
||||
|
||||
.panel-progress {
|
||||
background: #3498db;
|
||||
}
|
||||
|
||||
.panel-footer {
|
||||
background-color: #111 !important;
|
||||
border-width: 0 !important;
|
||||
}
|
||||
|
||||
.table-striped>tbody>tr:nth-of-type(odd) {
|
||||
background-color: #181818 !important;
|
||||
}
|
||||
|
||||
.panel-group .panel-heading+.panel-collapse>.panel-body, .panel-group .panel-heading+.panel-collapse>.list-group {
|
||||
border-top: 1px solid #222 !important;
|
||||
}
|
||||
|
||||
.identicon rect {
|
||||
fill: #aaa;
|
||||
}
|
||||
|
||||
.panel-warning .identicon rect {
|
||||
fill: #222;
|
||||
}
|
||||
|
||||
.panel-heading:hover, .panel-heading:focus {
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
|
||||
/* buttons */
|
||||
.btn {
|
||||
border-radius: 3px !important;
|
||||
border-width: 0px !important;
|
||||
}
|
||||
|
||||
.btn:hover, .btn:focus, .btn.focus {
|
||||
outline: none !important;
|
||||
}
|
||||
.btn-default {
|
||||
color: #aaa !important;
|
||||
background-color: #333 !important;
|
||||
}
|
||||
|
||||
.btn-default:hover, .btn-default:focus, .btn-default.focus {
|
||||
color: #fff !important;
|
||||
background-color: #484848 !important;
|
||||
}
|
||||
|
||||
.btn-primary {
|
||||
background-color: #217dbb !important;
|
||||
}
|
||||
|
||||
.btn-primary:hover, .btn-primary:focus, .btn-primary.focus {
|
||||
background-color: #3498db !important;
|
||||
}
|
||||
|
||||
.btn-warning {
|
||||
background-color: #c29d0b !important;
|
||||
}
|
||||
|
||||
.btn-warning:hover, .btn-warning:focus, .btn-warning.focus {
|
||||
background-color: #f1c40f !important;
|
||||
}
|
||||
|
||||
.btn-danger {
|
||||
background-color: #d62c1a !important;
|
||||
}
|
||||
|
||||
.btn-danger:hover, .btn-danger:focus, .btn-danger.focus {
|
||||
background-color: #e74c3c !important;
|
||||
}
|
||||
|
||||
|
||||
/* modal dialogs */
|
||||
.modal-header {
|
||||
border-bottom-color: #222 !important;
|
||||
}
|
||||
|
||||
.modal-header:not(.alert) {
|
||||
background-color: #222;
|
||||
}
|
||||
|
||||
.alert-info {
|
||||
color: #222 !important;
|
||||
}
|
||||
|
||||
.alert-warning {
|
||||
color: #222 !important;
|
||||
}
|
||||
|
||||
.alert-danger {
|
||||
color: #222 !important;
|
||||
background-color: #d62c1a !important;
|
||||
}
|
||||
|
||||
.modal-content {
|
||||
border-color: #666 !important;
|
||||
border-width: 2px !important;
|
||||
background-color: #111 !important;
|
||||
}
|
||||
|
||||
.modal-footer {
|
||||
border-color: #111 !important;
|
||||
background-color: #111 !important;
|
||||
}
|
||||
|
||||
.help-block {
|
||||
color: #aaa !important;
|
||||
}
|
||||
|
||||
.form-control {
|
||||
color: #aaa !important;
|
||||
border-color: #444 !important;
|
||||
background-color: black !important;
|
||||
}
|
||||
|
||||
code.ng-binding{
|
||||
color: #f99 !important;
|
||||
background-color: #444 !important;
|
||||
}
|
||||
|
||||
.well, .form-control[readonly="readonly"], .popover { /* read-only fields*/
|
||||
color: #666 !important;
|
||||
border-color: #444 !important;
|
||||
background-color: #111 !important;
|
||||
}
|
||||
|
||||
/* buttons for pagination */
|
||||
.pagination>li>a, .pagination>li>span {
|
||||
background-color: #333 !important;
|
||||
border-color: #484848 !important;
|
||||
}
|
||||
|
||||
.pagination>li>a:hover, .pagination>li>a:focus, .pagination>li>a.focus {
|
||||
background-color: #484848 !important;
|
||||
}
|
||||
|
||||
|
||||
/* progress bars */
|
||||
.progress-bar {
|
||||
background-color: #217dbb !important;
|
||||
}
|
||||
|
||||
.progress-bar-success {
|
||||
background-color: #0A8522 !important;
|
||||
}
|
||||
|
||||
.progress-bar-info {
|
||||
background-color: #9b59b6 !important;
|
||||
}
|
||||
|
||||
.progress-bar-warning {
|
||||
background-color: #c29d0b !important;
|
||||
}
|
||||
|
||||
.progress-bar-danger {
|
||||
background-color: #d62c1a !important;
|
||||
}
|
||||
|
||||
.progress .frontal {
|
||||
color: #222;
|
||||
}
|
||||
11
gui/default/assets/css/dev.css
Normal file
@@ -0,0 +1,11 @@
|
||||
.dev-top-bar{
|
||||
background-color: yellow;
|
||||
}
|
||||
|
||||
.dev-error .hasCount{
|
||||
background-color: red;
|
||||
}
|
||||
|
||||
.dev-warn .hasCount{
|
||||
background-color: yellow;
|
||||
}
|
||||
@@ -26,7 +26,6 @@ ul+h5 {
|
||||
}
|
||||
|
||||
.panel-progress {
|
||||
background: #3498db;
|
||||
height: 3px;
|
||||
left: 0;
|
||||
position: absolute;
|
||||
@@ -34,33 +33,6 @@ ul+h5 {
|
||||
display: block;
|
||||
}
|
||||
|
||||
.panel-title {
|
||||
position: relative;
|
||||
text-overflow: ellipsis;
|
||||
overflow: hidden;
|
||||
}
|
||||
.panel-title a:hover {
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
identicon {
|
||||
display: inline-block;
|
||||
position: relative;
|
||||
width: 1em;
|
||||
height: 1em;
|
||||
line-height: 1;
|
||||
margin-right: 5px;
|
||||
}
|
||||
|
||||
.identicon {
|
||||
width: 1em;
|
||||
height: 1em;
|
||||
}
|
||||
|
||||
.identicon rect {
|
||||
fill: #333;
|
||||
}
|
||||
|
||||
.checkbox {
|
||||
margin-top: 0px;
|
||||
}
|
||||
@@ -74,19 +46,10 @@ identicon {
|
||||
min-width: 250px;
|
||||
}
|
||||
|
||||
.tooltip{
|
||||
.tooltip {
|
||||
word-wrap:break-word;
|
||||
}
|
||||
|
||||
.panel-heading .fa, .modal-header .fa {
|
||||
margin-right: 10px;
|
||||
}
|
||||
|
||||
.panel-heading {
|
||||
position: relative;
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
.text-monospace {
|
||||
font-family: Menlo, Monaco, Consolas, "Courier New", monospace;
|
||||
}
|
||||
@@ -103,15 +66,13 @@ identicon {
|
||||
}
|
||||
|
||||
.list-no-bullet {
|
||||
list-style-type: none
|
||||
list-style-type: none;
|
||||
}
|
||||
|
||||
.li-column {
|
||||
display: inline-block;
|
||||
min-width: 7em;
|
||||
margin-right: 1em;
|
||||
background-color: rgb(236, 240, 241);
|
||||
border-radius: 3px;
|
||||
padding: 1px 4px;
|
||||
margin: 2px 2px;
|
||||
}
|
||||
@@ -132,7 +93,7 @@ identicon {
|
||||
}
|
||||
|
||||
.table td {
|
||||
padding-left: 20px !important;
|
||||
/*padding-left: 20px !important;*/
|
||||
}
|
||||
|
||||
.table td.small-data {
|
||||
@@ -152,18 +113,16 @@ table.table-condensed td.no-overflow-ellipse {
|
||||
white-space: normal;
|
||||
}
|
||||
|
||||
.folder-advanced{
|
||||
background-color: hsla(0,0%,99%,1);
|
||||
border: 1px solid hsla(0, 0%, 95%, 1);
|
||||
.folder-advanced {
|
||||
padding: 1rem;
|
||||
margin-bottom: 15px;
|
||||
}
|
||||
|
||||
.folder-advanced-toggle{
|
||||
.folder-advanced-toggle {
|
||||
cursor: pointer;
|
||||
}
|
||||
.folder-advanced-toggle .collapse,
|
||||
.folder-advanced-toggle.collapsed .expand{
|
||||
.folder-advanced-toggle.collapsed .expand {
|
||||
display: inline-block;
|
||||
}
|
||||
|
||||
@@ -172,64 +131,29 @@ table.table-condensed td.no-overflow-ellipse {
|
||||
display: none;
|
||||
}
|
||||
|
||||
@media (max-width:767px) {
|
||||
table.table-condensed td {
|
||||
/* for mobile phones to allow linebreaks in long repro folder/shared with
|
||||
* columns. */
|
||||
white-space: normal;
|
||||
}
|
||||
*[language-select] > .dropdown-menu {
|
||||
width: 450px;
|
||||
}
|
||||
|
||||
*[language-select] > .dropdown-menu > li {
|
||||
float: left;
|
||||
width: 50%;
|
||||
}
|
||||
|
||||
*[language-select] > .dropdown-menu > li > a {
|
||||
overflow: hidden;
|
||||
text-overflow: ellipsis;
|
||||
}
|
||||
|
||||
.nav>li{
|
||||
float: left;
|
||||
}
|
||||
.navbar-right {
|
||||
/* to align with panel */
|
||||
padding-right: 15px;
|
||||
float: right;
|
||||
}
|
||||
|
||||
/**
|
||||
* Menu for select language
|
||||
*/
|
||||
|
||||
@media (min-width:480px) and (max-width:649px) {
|
||||
*[language-select] > .dropdown-menu {
|
||||
width: 230px;
|
||||
}
|
||||
}
|
||||
|
||||
@media (min-width:650px) {
|
||||
*[language-select] > .dropdown-menu > li {
|
||||
width: 50%;
|
||||
float: left;
|
||||
}
|
||||
*[language-select] > .dropdown-menu {
|
||||
width: 440px;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@media (max-width:479px) {
|
||||
.dropdown-menu {
|
||||
padding-top: 55px;
|
||||
}
|
||||
|
||||
nav .dropdown-toggle {
|
||||
font-size: 14px;
|
||||
}
|
||||
|
||||
.dropdown-toggle {
|
||||
float: left;
|
||||
}
|
||||
|
||||
.navbar-brand {
|
||||
padding-left: 0;
|
||||
padding-top: 16px;
|
||||
}
|
||||
|
||||
.navbar-nav .open .dropdown-menu > li > a {
|
||||
padding: 12px 15px 12px 25px;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
.panel-body .table-condensed {
|
||||
margin-bottom: 0;
|
||||
}
|
||||
@@ -242,6 +166,56 @@ table.table-condensed td.no-overflow-ellipse {
|
||||
margin-left: 60px;
|
||||
}
|
||||
|
||||
/**
|
||||
* Panel, Model and Accordion Title bars
|
||||
*/
|
||||
|
||||
.panel-icon {
|
||||
float: left;
|
||||
margin-right: 15px;
|
||||
margin-top: 0.125em;
|
||||
margin-bottom: 0.125em;
|
||||
line-height: 1;
|
||||
}
|
||||
|
||||
.modal-title .panel-icon {
|
||||
margin-top: 0.25em;
|
||||
margin-bottom: 0.25em;
|
||||
}
|
||||
|
||||
button.panel-heading {
|
||||
display: block;
|
||||
position: relative;
|
||||
width: 100%;
|
||||
text-align: left;
|
||||
border-top-width: 0;
|
||||
border-left-width: 0;
|
||||
border-right-width: 0;
|
||||
border-radius: 0 !important;
|
||||
}
|
||||
|
||||
.panel-heading .panel-title-text {
|
||||
text-overflow: ellipsis;
|
||||
overflow: hidden;
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
.panel-heading .panel-status {
|
||||
margin-left:15px;
|
||||
}
|
||||
|
||||
identicon {
|
||||
width: 1em;
|
||||
height: 1em;
|
||||
line-height: 1;
|
||||
}
|
||||
|
||||
.identicon {
|
||||
width: 1em;
|
||||
height: 1em;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Progress bars with centered text
|
||||
*/
|
||||
@@ -276,7 +250,6 @@ ul.three-columns li, ul.two-columns li {
|
||||
}
|
||||
|
||||
/** Footer nav on small devices **/
|
||||
|
||||
@media (max-width: 1199px) {
|
||||
/* Stay at the end of the page, with space reserved for the footer
|
||||
usually taking up two rows. */
|
||||
@@ -303,7 +276,57 @@ ul.three-columns li, ul.two-columns li {
|
||||
padding-bottom: 0px;
|
||||
}
|
||||
|
||||
.navbar-brand {
|
||||
margin: 3.25px -15px;
|
||||
}
|
||||
|
||||
.navbar-fixed-bottom {
|
||||
position: relative;
|
||||
}
|
||||
|
||||
.navbar-nav .open .dropdown-menu {
|
||||
position: absolute;
|
||||
left: auto;
|
||||
right: 0;
|
||||
background-color: #ffffff;
|
||||
border: 1px solid #cccccc;
|
||||
border: 1px solid rgba(0, 0, 0, 0.15);
|
||||
-webkit-box-shadow: 0 6px 12px rgba(0, 0, 0, 0.175);
|
||||
box-shadow: 0 6px 12px rgba(0, 0, 0, 0.175);
|
||||
border-radius: 2px;
|
||||
}
|
||||
|
||||
*[language-select] {
|
||||
position: static !important;
|
||||
}
|
||||
|
||||
*[language-select] > .dropdown-menu {
|
||||
margin-left: 15px;
|
||||
margin-right: 15px;
|
||||
margin-top: -12px !important;
|
||||
max-width: 450px;
|
||||
height: 265px;
|
||||
overflow-y: scroll;
|
||||
}
|
||||
|
||||
table.table-condensed td {
|
||||
/* for mobile phones to allow linebreaks in long repro folder/shared with
|
||||
* columns. */
|
||||
white-space: normal;
|
||||
}
|
||||
}
|
||||
|
||||
@media (max-width:479px) {
|
||||
|
||||
nav .dropdown-toggle {
|
||||
font-size: 1em;
|
||||
}
|
||||
|
||||
.navbar-nav .open .dropdown-menu > li > a {
|
||||
padding: 12px 15px 12px 25px;
|
||||
}
|
||||
|
||||
.navbar-fixed-bottom li {
|
||||
width: 100%;
|
||||
}
|
||||
}
|
||||
29
gui/default/assets/css/theme.css
Normal file
@@ -0,0 +1,29 @@
|
||||
/*
|
||||
// Copyright (C) 2016 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
*/
|
||||
|
||||
.panel-progress {
|
||||
background: #3498db;
|
||||
}
|
||||
|
||||
.identicon rect {
|
||||
fill: #333;
|
||||
}
|
||||
|
||||
.panel-warning .identicon rect {
|
||||
fill: #fff;
|
||||
}
|
||||
|
||||
.li-column {
|
||||
background-color: rgb(236, 240, 241);
|
||||
border-radius: 3px;
|
||||
}
|
||||
|
||||
.panel-heading:hover, .panel-heading:focus {
|
||||
text-decoration: none;
|
||||
}
|
||||
BIN
gui/default/assets/img/favicon-default.png
Normal file
|
After Width: | Height: | Size: 4.0 KiB |
BIN
gui/default/assets/img/favicon-notify.png
Normal file
|
After Width: | Height: | Size: 3.5 KiB |
BIN
gui/default/assets/img/favicon-pause.png
Normal file
|
After Width: | Height: | Size: 3.5 KiB |
BIN
gui/default/assets/img/favicon-sync.png
Normal file
|
After Width: | Height: | Size: 4.0 KiB |
|
Before Width: | Height: | Size: 6.4 KiB |
@@ -4,25 +4,27 @@
|
||||
"A new major version may not be compatible with previous versions.": "Нова основна версия, която може да не е съвмеситима с предишни версии.",
|
||||
"API Key": "API Ключ",
|
||||
"About": "За програмата",
|
||||
"Actions": "Действия",
|
||||
"Actions": "Меню",
|
||||
"Add": "Добави",
|
||||
"Add Device": "Добави устройство",
|
||||
"Add Folder": "Добави папка",
|
||||
"Add Remote Device": "Добави ново устройство",
|
||||
"Add new folder?": "Добави нова папка?",
|
||||
"Address": "Адрес",
|
||||
"Addresses": "Адреси",
|
||||
"Advanced": "Допълнителни",
|
||||
"Advanced Configuration": "Допълнителни настройки",
|
||||
"Advanced settings": "Допълнителни настройки",
|
||||
"All Data": "Всички данни",
|
||||
"Allow Anonymous Usage Reporting?": "Разреши анонимно докладване за употребата на програмата?",
|
||||
"Alphabetic": "Азбучен ред",
|
||||
"An external command handles the versioning. It has to remove the file from the synced folder.": "Друга команда се занимава с версиите. Тази команда трябва да премахни файла от синхронизираната папка.",
|
||||
"Anonymous Usage Reporting": "Анонимен доклад",
|
||||
"Any devices configured on an introducer device will be added to this device as well.": "Устройства настроени на introducer компютъра също ще бъдат добавени към този компютър.",
|
||||
"Any devices configured on an introducer device will be added to this device as well.": "Устройства настроени да представят други устройства също ще бъдат добавени към това устройство.",
|
||||
"Automatic upgrades": "Автоматично обновяване",
|
||||
"Be careful!": "Внимание!",
|
||||
"Bugs": "Бъгове",
|
||||
"CPU Utilization": "Процесор в употреба",
|
||||
"CPU Utilization": "Използван процесор",
|
||||
"Changelog": "Списък с промени",
|
||||
"Clean out after": "Изчисти след",
|
||||
"Close": "Затвори",
|
||||
@@ -30,15 +32,18 @@
|
||||
"Comment, when used at the start of a line": "Коментар, използван в началото на реда",
|
||||
"Compression": "Компресиране",
|
||||
"Connection Error": "Грешка при свързването",
|
||||
"Connection Type": "Вид връзка",
|
||||
"Copied from elsewhere": "Копиране от някъде другаде",
|
||||
"Copied from original": "Копиран от оригинала",
|
||||
"Copyright © 2014-2016 the following Contributors:": "Всички правата запазени © 2014-2016 Сътрудници:",
|
||||
"Copyright © 2015 the following Contributors:": "Всички правата запазени © 2015 Сътрудници:",
|
||||
"Danger!": "Опасност!",
|
||||
"Delete": "Изтрий",
|
||||
"Deleted": "Изтрито",
|
||||
"Device \"{%name%}\" ({%device%} at {%address%}) wants to connect. Add new device?": "Устройство \"{{name}}\" ({{device}}) на {{address}} желае да се свърже. Добави ново устройство?",
|
||||
"Device ID": "Идентификатор на устройство",
|
||||
"Device Identification": "Идентификатор на устройство",
|
||||
"Device Name": "Име на устройство",
|
||||
"Device Name": "Име на устройството",
|
||||
"Device {%device%} ({%address%}) wants to connect. Add new device?": "Устройство {{device}} ({{address}}) желае да се свърже. Добави ново устройство?",
|
||||
"Devices": "Устройства",
|
||||
"Disconnected": "Не е свързано",
|
||||
@@ -51,6 +56,7 @@
|
||||
"Edit Device": "Промени устройство",
|
||||
"Edit Folder": "Промени папка",
|
||||
"Editing": "Променяне",
|
||||
"Enable NAT traversal": "Разреши NAT traversal",
|
||||
"Enable Relaying": "Разреши препращане",
|
||||
"Enable UPnP": "Включи UPnP",
|
||||
"Enter comma separated (\"tcp://ip:port\", \"tcp://host:port\") addresses or \"dynamic\" to perform automatic discovery of the address.": "Въведете адреси разделени със запетая (\"tcp://ip:port\", \"tcp://host:port\") или \"dynamic\", за да автоматично откриване на наличните адреси.",
|
||||
@@ -66,12 +72,14 @@
|
||||
"Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "Защитава файловете от промени направени на други устройства, но промените направени на това устройство ще бъдат синхронизирани с останалите устройства.",
|
||||
"Folder": "Папка",
|
||||
"Folder ID": "Идентификатор на папката",
|
||||
"Folder Label": "Етикет на папката",
|
||||
"Folder Master": "Главна папка",
|
||||
"Folder Path": "Път до папката",
|
||||
"Folder Type": "Вид папка",
|
||||
"Folders": "Папки",
|
||||
"GUI": "Потребителски интерфейс",
|
||||
"GUI Authentication Password": "Парола за потребителския интерфейс",
|
||||
"GUI Authentication User": "Потребител за потребителския интерфейс",
|
||||
"GUI Authentication Password": "Парола за интерфейса",
|
||||
"GUI Authentication User": "Потребителско име за интерфейса",
|
||||
"GUI Listen Addresses": "Адрес за свързване с потребителския интерфейс",
|
||||
"Generate": "Генерирай",
|
||||
"Global Discovery": "Глобално откриване",
|
||||
@@ -85,34 +93,39 @@
|
||||
"Ignore Permissions": "Игнорирай правата за достъп",
|
||||
"Incoming Rate Limit (KiB/s)": "Лимит на скоростта за сваляне (KiB/s)",
|
||||
"Incorrect configuration may damage your folder contents and render Syncthing inoperable.": "Неправилни настройки могат да повредят файловете и да попречат на синхронизирането.",
|
||||
"Introducer": "Introducer",
|
||||
"Introducer": "Може да предлага други устройства",
|
||||
"Inversion of the given condition (i.e. do not exclude)": "Обратното на даденото условие (пр. не изключвай)",
|
||||
"Keep Versions": "Пази версии",
|
||||
"Largest First": " Първо най-големите",
|
||||
"Last File Received": "Последния получен файл",
|
||||
"Last seen": "Последно видян",
|
||||
"Last Scan": "Последно сканиран",
|
||||
"Last seen": "Последно видяно",
|
||||
"Later": "По-късно",
|
||||
"Listeners": "Синхронизиращи устройства",
|
||||
"Local Discovery": "Локално откриване",
|
||||
"Local State": "Локално състояние",
|
||||
"Local State (Total)": "Локално състояние (Общо)",
|
||||
"Local State (Total)": "Локално състояние (общо)",
|
||||
"Major Upgrade": "Основно Обновяване",
|
||||
"Master": "Главен",
|
||||
"Maximum Age": "Максимална възраст",
|
||||
"Metadata Only": "Само мета информация",
|
||||
"Minimum Free Disk Space": "Минимално свободно дисково пространство",
|
||||
"Move to top of queue": "Премести в началото на опашката",
|
||||
"Multi level wildcard (matches multiple directory levels)": "Маска на много нива (покрива папки с много нива)",
|
||||
"Never": "Никога",
|
||||
"Never": "никога",
|
||||
"New Device": "Ново устройство",
|
||||
"New Folder": "Нова папка",
|
||||
"Newest First": "Първо най-новите",
|
||||
"No": "Не",
|
||||
"No File Versioning": "Без версии",
|
||||
"Normal": "Нормален",
|
||||
"Notice": "Известие",
|
||||
"OK": "ОК",
|
||||
"Off": "Изключено",
|
||||
"Oldest First": "Първо най-старите",
|
||||
"Optional descriptive label for the folder. Can be different on each device.": "Допълнително разяснеие за етикета на папката. Може да бъде различно всяко устройство.",
|
||||
"Options": "Настройки",
|
||||
"Out of Sync": "Несинхронизирано",
|
||||
"Out of Sync": "Несинхронизирана",
|
||||
"Out of Sync Items": "Несинхронизирани елементи",
|
||||
"Outgoing Rate Limit (KiB/s)": "Лимит на скорост за качване (KiB/s)",
|
||||
"Override Changes": "Наложи локалните промени",
|
||||
@@ -126,15 +139,17 @@
|
||||
"Preview": "Преглед",
|
||||
"Preview Usage Report": "Разгледай доклада за използване",
|
||||
"Quick guide to supported patterns": "Бърз наръчник към поддържаните шаблони",
|
||||
"RAM Utilization": "RAM в употреба",
|
||||
"RAM Utilization": "Използван RAM",
|
||||
"Random": "Произволен",
|
||||
"Relay Servers": "Препращащи сървъри",
|
||||
"Relayed via": "Препратено през",
|
||||
"Relays": "Препращачи",
|
||||
"Release Notes": "Бележки по обновяването",
|
||||
"Remote Devices": "Чужди устройства",
|
||||
"Remove": "Премахни",
|
||||
"Rescan": "Сканирай повторно",
|
||||
"Rescan All": "Сканирай повторно всички",
|
||||
"Required identifier for the folder. Must be the same on all cluster devices.": "Задължителен идентификатор за тази папка. Трябва да бъде един и същ на всички устройства.",
|
||||
"Rescan": "Сканирай",
|
||||
"Rescan All": "Сканирай всички",
|
||||
"Rescan Interval": "Интервал за повторно сканиране",
|
||||
"Restart": "Рестартирай",
|
||||
"Restart Needed": "Изисква се рестартиране",
|
||||
@@ -172,12 +187,13 @@
|
||||
"Sync Protocol Listen Addresses": "Адрес за слушане на синхронизиращия протокол",
|
||||
"Syncing": "Синхронизиране",
|
||||
"Syncthing has been shut down.": "Syncthing е спрян.",
|
||||
"Syncthing includes the following software or portions thereof:": "Syncthing включва следният софтуер пълно или частично:",
|
||||
"Syncthing includes the following software or portions thereof:": "Syncthing уползотворява частично или изцяло следните софтуерни продукти:",
|
||||
"Syncthing is restarting.": "Syncthing се рестартира",
|
||||
"Syncthing is upgrading.": "Syncthing се обновява.",
|
||||
"Syncthing seems to be down, or there is a problem with your Internet connection. Retrying…": "Syncthing изглежда не е включен, или има проблем с интерент връзката. Повторен опит...",
|
||||
"Syncthing seems to be experiencing a problem processing your request. Please refresh the page or restart Syncthing if the problem persists.": "Syncthing има проблем при обработването на заявката. Моля, презаредете браузъра или рестартирайте Syncthing ако проблемът продължи.",
|
||||
"The Syncthing admin interface is configured to allow remote access without a password.": "Администраторския панел на Syncthing е настроен да приема дистанционни връзки без парола.",
|
||||
"The aggregated statistics are publicly available at the URL below.": "Сумарната статистика е публично достъпна на посочения по-долу адрес.",
|
||||
"The aggregated statistics are publicly available at {%url%}.": "Сумарната статистика е публично достъпна на {{url}}.",
|
||||
"The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "Конфигурацията е запазена, но не е активирана. Syncthing трябва да рестартира, за да се активира новата конфигурация.",
|
||||
"The device ID cannot be blank.": "Полето идентификатор на устройство не може да бъде празно.",
|
||||
@@ -203,13 +219,14 @@
|
||||
"The rate limit must be a non-negative number (0: no limit)": "Ограничението на скоростта трябва да бъде положително число (0: неограничено)",
|
||||
"The rescan interval must be a non-negative number of seconds.": "Интервала на сканиране трябва да бъде не отрицателно число в секунди.",
|
||||
"They are retried automatically and will be synced when the error is resolved.": "Ще бъдат спрени и автоматично синхронизирани, когато грешката бъде оправена.",
|
||||
"This Device": "Вашето устройство",
|
||||
"This can easily give hackers access to read and change any files on your computer.": "Това дава лесен достъп на хакери да разглеждат и променят всякакви файлове на компютъра Ви.",
|
||||
"This is a major version upgrade.": "Това е нова основна версия.",
|
||||
"Trash Can File Versioning": "Само на файловете в кошчето",
|
||||
"Unknown": "Неясно",
|
||||
"Unshared": "Несподелена",
|
||||
"Unused": "Неизползван",
|
||||
"Up to Date": "Синхронизирано",
|
||||
"Up to Date": "Синхронизирана",
|
||||
"Updated": "Обновено",
|
||||
"Upgrade": "Обнови",
|
||||
"Upgrade To {%version%}": "Обновен до {{version}}",
|
||||
@@ -220,6 +237,7 @@
|
||||
"Version": "Версия",
|
||||
"Versions Path": "Път до версиите",
|
||||
"Versions are automatically deleted if they are older than the maximum age or exceed the number of files allowed in an interval.": "Версиите биват изтривани автоматично, когато са по-стари от максималната възраст или надминават броя файлове разрешени в даден интервал.",
|
||||
"Warning, this path is a subdirectory of an existing folder \"{%otherFolder%}\".": "Внимание, това е вътрешна папка на вече съществуваща папка \"{{otherFolder}}\".",
|
||||
"When adding a new device, keep in mind that this device must be added on the other side too.": "Когато добавяш ново устройство помни, че твоето устройство също трябва да бъде добавено от другата страна.",
|
||||
"When adding a new folder, keep in mind that the Folder ID is used to tie folders together between devices. They are case sensitive and must match exactly between all devices.": "Когато добавяш нов идентификатор на папка помни, че той се използва за свързване на папките на различни устройства. Главни/малки букви са от значение и трябва да са еднакви на всички устройства.",
|
||||
"Yes": "Да",
|
||||
@@ -227,5 +245,7 @@
|
||||
"days": "дни",
|
||||
"full documentation": "пълна документация",
|
||||
"items": "елемента",
|
||||
"{%device%} wants to share folder \"{%folder%}\".": "{{device}} желае да сподели папка \"{{folder}}\"."
|
||||
"{%device%} wants to share folder \"{%folder%}\".": "{{device}} желае да сподели папка \"{{folder}}\".",
|
||||
"{%device%} wants to share folder \"{%folderLabel%}\" ({%folder%}).": "{{device}} желае е да сподели папка \"{{folderLabel}}\" ({{folder}}).",
|
||||
"{%device%} wants to share folder \"{%folderlabel%}\" ({%folder%}).": "{{device}} желае да сподели папка \"{{folderlabel}}\" ({{folder}})."
|
||||
}
|
||||
@@ -8,11 +8,13 @@
|
||||
"Add": "Afegir",
|
||||
"Add Device": "Afegir dispositiu",
|
||||
"Add Folder": "Afegir carpeta",
|
||||
"Add Remote Device": "Add Remote Device",
|
||||
"Add new folder?": "Afegir nova carpeta?",
|
||||
"Address": "Adreça",
|
||||
"Addresses": "Adreces",
|
||||
"Advanced": "Avançat",
|
||||
"Advanced Configuration": "Configuració Avançada",
|
||||
"Advanced settings": "Advanced settings",
|
||||
"All Data": "Totes les dades",
|
||||
"Allow Anonymous Usage Reporting?": "Permetre l'enviament anònim d'informes d'ús?",
|
||||
"Alphabetic": "Alfabètic",
|
||||
@@ -30,12 +32,15 @@
|
||||
"Comment, when used at the start of a line": "Comentari quan és usat al principi d'una línia",
|
||||
"Compression": "Compressió",
|
||||
"Connection Error": "Error de connexió",
|
||||
"Connection Type": "Connection Type",
|
||||
"Copied from elsewhere": "Copiat d'un altre lloc",
|
||||
"Copied from original": "Copiat de l'original",
|
||||
"Copyright © 2014-2016 the following Contributors:": "Copyright © 2014-2016 the following Contributors:",
|
||||
"Copyright © 2015 the following Contributors:": "Copyright © 2015 els següents col·laboradors:",
|
||||
"Danger!": "Perill!",
|
||||
"Delete": "Esborrar",
|
||||
"Deleted": "Esborrat",
|
||||
"Device \"{%name%}\" ({%device%} at {%address%}) wants to connect. Add new device?": "Device \"{{name}}\" ({{device}} at {{address}}) wants to connect. Add new device?",
|
||||
"Device ID": "ID del dispositiu",
|
||||
"Device Identification": "Identificació del dispositiu",
|
||||
"Device Name": "Nom del dispositiu",
|
||||
@@ -51,6 +56,7 @@
|
||||
"Edit Device": "Modificar dispositiu",
|
||||
"Edit Folder": "Modificar carpeta",
|
||||
"Editing": "Modificant",
|
||||
"Enable NAT traversal": "Enable NAT traversal",
|
||||
"Enable Relaying": "Enable Relaying",
|
||||
"Enable UPnP": "Habilitat UPnP",
|
||||
"Enter comma separated (\"tcp://ip:port\", \"tcp://host:port\") addresses or \"dynamic\" to perform automatic discovery of the address.": "Introdueix adreces separades per comes (\"tcp://ip:port\", \"tcp://host:port\") o \"dinàmic\" per realitzar descobriments automàtics de l'adreça.",
|
||||
@@ -66,8 +72,10 @@
|
||||
"Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "Els fitxers estan protegits de canvis fets per altres dispositius, però els canvis fets en aquest dispositiu seran enviats a la resta del cluster.",
|
||||
"Folder": "Carpeta",
|
||||
"Folder ID": "ID de carpeta",
|
||||
"Folder Label": "Folder Label",
|
||||
"Folder Master": "Carpeta mestra",
|
||||
"Folder Path": "Camí de carpeta",
|
||||
"Folder Type": "Folder Type",
|
||||
"Folders": "Carpetes",
|
||||
"GUI": "GUI",
|
||||
"GUI Authentication Password": "Contrasenya d'autenticació GUI",
|
||||
@@ -90,12 +98,15 @@
|
||||
"Keep Versions": "Mantenir Versions",
|
||||
"Largest First": "Més gran primer",
|
||||
"Last File Received": "Últim fitxer rebut",
|
||||
"Last Scan": "Last Scan",
|
||||
"Last seen": "Vist per última vegada",
|
||||
"Later": "Després",
|
||||
"Listeners": "Listeners",
|
||||
"Local Discovery": "Descobriment Local",
|
||||
"Local State": "Estat local",
|
||||
"Local State (Total)": "Estat local (Total)",
|
||||
"Major Upgrade": "Actualització major",
|
||||
"Master": "Master",
|
||||
"Maximum Age": "Antiguitat Màxima",
|
||||
"Metadata Only": "Només metadades",
|
||||
"Minimum Free Disk Space": "Espai de disc lliure mínim",
|
||||
@@ -107,10 +118,12 @@
|
||||
"Newest First": "Més nou primer",
|
||||
"No": "No",
|
||||
"No File Versioning": "Sense Versionat de Fitxer",
|
||||
"Normal": "Normal",
|
||||
"Notice": "Avís",
|
||||
"OK": "OK",
|
||||
"Off": "Desactivar",
|
||||
"Oldest First": "Més antic primer",
|
||||
"Optional descriptive label for the folder. Can be different on each device.": "Optional descriptive label for the folder. Can be different on each device.",
|
||||
"Options": "Opcions",
|
||||
"Out of Sync": "Fora de sincronia",
|
||||
"Out of Sync Items": "Arxius encara no sincronitzats",
|
||||
@@ -132,7 +145,9 @@
|
||||
"Relayed via": "Retransmés a través",
|
||||
"Relays": "Repetidors",
|
||||
"Release Notes": "Notes de llançament",
|
||||
"Remote Devices": "Remote Devices",
|
||||
"Remove": "Esborrar",
|
||||
"Required identifier for the folder. Must be the same on all cluster devices.": "Required identifier for the folder. Must be the same on all cluster devices.",
|
||||
"Rescan": "Re-escanejar",
|
||||
"Rescan All": "Re-escanejar tot",
|
||||
"Rescan Interval": "Interval de re-escaneig",
|
||||
@@ -178,6 +193,7 @@
|
||||
"Syncthing seems to be down, or there is a problem with your Internet connection. Retrying…": "Synthing sembla parat, o hi ha algun problema amb la connexió a Internet. Reintentant...",
|
||||
"Syncthing seems to be experiencing a problem processing your request. Please refresh the page or restart Syncthing if the problem persists.": "Sembla ser que Syncthing està tinguent problemes per processar la teva petició. Si us plau, refresca la pàgina o reinicia Syncthing si el problema persisteix.",
|
||||
"The Syncthing admin interface is configured to allow remote access without a password.": "La interfície d'administració de Syncthing està configurada per permetre l'accés remot sense contrasenya.",
|
||||
"The aggregated statistics are publicly available at the URL below.": "The aggregated statistics are publicly available at the URL below.",
|
||||
"The aggregated statistics are publicly available at {%url%}.": "Les estadístiques agregades estan públicament disponibles a {{url}}.",
|
||||
"The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "La configuració s'ha guardar però no s'ha activat. S'ha de reiniciar el synthing per activar la nova configuració.",
|
||||
"The device ID cannot be blank.": "El ID del dispositiu no pot estar en blanc.",
|
||||
@@ -203,6 +219,7 @@
|
||||
"The rate limit must be a non-negative number (0: no limit)": "El límit de velocitat ha de ser un nombre positiu (0: sense límit)",
|
||||
"The rescan interval must be a non-negative number of seconds.": "El interval de re-escaneig ha der ser un nombre positiu de segons.",
|
||||
"They are retried automatically and will be synced when the error is resolved.": "Són reintentats automàticament i seran sincronitzats quan l'error estigui resolt.",
|
||||
"This Device": "This Device",
|
||||
"This can easily give hackers access to read and change any files on your computer.": "Això pot donar facilment accés a hackers per llegir i canviar qualsevol fitxer del teu ordinador.",
|
||||
"This is a major version upgrade.": "Aquesta és una actualització de versió major.",
|
||||
"Trash Can File Versioning": "Paperera de versionat de fitxers",
|
||||
@@ -220,6 +237,7 @@
|
||||
"Version": "Versió",
|
||||
"Versions Path": "Carpeta de les Versions",
|
||||
"Versions are automatically deleted if they are older than the maximum age or exceed the number of files allowed in an interval.": "Les versions son automàticament eliminades si son més antigues que el màxim d'antiguitat o si excedeixen del nombre de fitxers permesos en un interval.",
|
||||
"Warning, this path is a subdirectory of an existing folder \"{%otherFolder%}\".": "Warning, this path is a subdirectory of an existing folder \"{{otherFolder}}\".",
|
||||
"When adding a new device, keep in mind that this device must be added on the other side too.": "Quan s'afegeix un nou dispositiu, recorda que aquest dispositiu tambè s'ha d'afegir a l'altre banda.",
|
||||
"When adding a new folder, keep in mind that the Folder ID is used to tie folders together between devices. They are case sensitive and must match exactly between all devices.": "Quan s'afegeix una nova carpeta recorda que el ID d'aquesta s'utilitza per lligar repositoris entre els dispositius. Es distingeix entre majúscules i minúscules i ha de ser exactament iguals entre tots els dispositius.",
|
||||
"Yes": "Si",
|
||||
@@ -227,5 +245,7 @@
|
||||
"days": "dies",
|
||||
"full documentation": "documentació sencera",
|
||||
"items": "Elements",
|
||||
"{%device%} wants to share folder \"{%folder%}\".": "{{device}} vol compartir la carpeta \"{{folder}}\"."
|
||||
"{%device%} wants to share folder \"{%folder%}\".": "{{device}} vol compartir la carpeta \"{{folder}}\".",
|
||||
"{%device%} wants to share folder \"{%folderLabel%}\" ({%folder%}).": "{{device}} wants to share folder \"{{folderLabel}}\" ({{folder}}).",
|
||||
"{%device%} wants to share folder \"{%folderlabel%}\" ({%folder%}).": "{{device}} wants to share folder \"{{folderlabel}}\" ({{folder}})."
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"A device with that ID is already added.": "A device with that ID is already added.",
|
||||
"A device with that ID is already added.": "Un dispositiu amb eixa ID ja s'ha afegit.",
|
||||
"A negative number of days doesn't make sense.": "Un nombre negatiu de dies no té sentit.",
|
||||
"A new major version may not be compatible with previous versions.": "Una nova versión amb canvis importants pot no ser compatible amb versions prèvies.",
|
||||
"API Key": "Clau API",
|
||||
@@ -8,11 +8,13 @@
|
||||
"Add": "Afegir",
|
||||
"Add Device": "Afegir dispositiu",
|
||||
"Add Folder": "Afegir carpeta",
|
||||
"Add Remote Device": "Afegir Dispositiu Remot.",
|
||||
"Add new folder?": "Afegir nova carpeta?",
|
||||
"Address": "Direcció",
|
||||
"Addresses": "Direccions",
|
||||
"Advanced": "Avançat",
|
||||
"Advanced Configuration": "Configuració avançada",
|
||||
"Advanced settings": "Ajustos avançats.",
|
||||
"All Data": "Totes les dades",
|
||||
"Allow Anonymous Usage Reporting?": "Permetre informes d'ús anònim?",
|
||||
"Alphabetic": "Alfabètic",
|
||||
@@ -30,12 +32,15 @@
|
||||
"Comment, when used at the start of a line": "Comentar, quant s'utilitza al principi d'una línia",
|
||||
"Compression": "Compresió",
|
||||
"Connection Error": "Error de connexió",
|
||||
"Connection Type": "Tipus de connexió",
|
||||
"Copied from elsewhere": "Copiat de qualsevol lloc",
|
||||
"Copied from original": "Copiat de l'original",
|
||||
"Copyright © 2014-2016 the following Contributors:": "Copyright © 2014-2016 els següents Col·laboradors:",
|
||||
"Copyright © 2015 the following Contributors:": "Copyright © 2015 els següents Col·laboradors:",
|
||||
"Danger!": "Perill!",
|
||||
"Delete": "Esborrar",
|
||||
"Deleted": "Esborrat",
|
||||
"Device \"{%name%}\" ({%device%} at {%address%}) wants to connect. Add new device?": "Dispositiu \"{{name}}\" ({{device}} a l'adreça {{address}}) vol connectar. Afegir nou dispositiu?",
|
||||
"Device ID": "ID del dispositiu",
|
||||
"Device Identification": "Identificació del dispositiu",
|
||||
"Device Name": "Nom del dispositiu",
|
||||
@@ -51,7 +56,8 @@
|
||||
"Edit Device": "Editar dispositiu",
|
||||
"Edit Folder": "Editar carpeta",
|
||||
"Editing": "Editant",
|
||||
"Enable Relaying": "Enable Relaying",
|
||||
"Enable NAT traversal": "Permetre NAT transversal",
|
||||
"Enable Relaying": "Permetre Transmissions",
|
||||
"Enable UPnP": "Activar UPnp",
|
||||
"Enter comma separated (\"tcp://ip:port\", \"tcp://host:port\") addresses or \"dynamic\" to perform automatic discovery of the address.": "Introdueix adreces separades per coma (\"tcp://ip:port\", \"tcp://host:port\") o \"dynamic\" per a realitzar el descobriment automàtic de l'adreça.",
|
||||
"Enter ignore patterns, one per line.": "Introduïr patrons a ignorar, un per línia.",
|
||||
@@ -66,8 +72,10 @@
|
||||
"Files are protected from changes made on other devices, but changes made on this device will be sent to the rest of the cluster.": "Els fitxers són protegits dels canvis fets en altres dispositius, però els canvis fets en aquest dispositiu seràn enviats a la resta del grup (cluster).",
|
||||
"Folder": "Carpeta",
|
||||
"Folder ID": "ID de carpeta",
|
||||
"Folder Label": "Etiqueta de la Carpeta",
|
||||
"Folder Master": "Carpeta principal",
|
||||
"Folder Path": "Ruta de la carpeta",
|
||||
"Folder Type": "Tipus de carpeta",
|
||||
"Folders": "Carpetes",
|
||||
"GUI": "IGU (Interfície Gràfica d'Usuari)",
|
||||
"GUI Authentication Password": "Password d'autenticació de l'Interfície Gràfica d'Usuari (GUI)",
|
||||
@@ -75,8 +83,8 @@
|
||||
"GUI Listen Addresses": "Direcció d'escolta de l'Interfície Gràfica d'Usuari (GUI)",
|
||||
"Generate": "Generar",
|
||||
"Global Discovery": "Descobriment global",
|
||||
"Global Discovery Server": "Servidor de descobriment global",
|
||||
"Global Discovery Servers": "Global Discovery Servers",
|
||||
"Global Discovery Server": "Servidor de Descobriment Global",
|
||||
"Global Discovery Servers": "Servidors de Descobriment Global",
|
||||
"Global State": "Estat global",
|
||||
"Help": "Ajuda",
|
||||
"Home page": "Pàgina inicial",
|
||||
@@ -90,12 +98,15 @@
|
||||
"Keep Versions": "Mantindre versions",
|
||||
"Largest First": "El més gran primer",
|
||||
"Last File Received": "Darrer fitxer rebut",
|
||||
"Last Scan": "Últim escaneig",
|
||||
"Last seen": "Vist per última vegada",
|
||||
"Later": "Més tard",
|
||||
"Listeners": "Escoltants",
|
||||
"Local Discovery": "Descobriment local",
|
||||
"Local State": "Estat local",
|
||||
"Local State (Total)": "Estat Local (Total)",
|
||||
"Major Upgrade": "Actualització important",
|
||||
"Master": "Mestre",
|
||||
"Maximum Age": "Edat màxima",
|
||||
"Metadata Only": "Sols metadades",
|
||||
"Minimum Free Disk Space": "Espai minim de disc lliure",
|
||||
@@ -107,10 +118,12 @@
|
||||
"Newest First": "El més nou primer",
|
||||
"No": "No",
|
||||
"No File Versioning": "Sense versionat de fitxer",
|
||||
"Normal": "Normal",
|
||||
"Notice": "Avís",
|
||||
"OK": "OK",
|
||||
"Off": "Off",
|
||||
"Oldest First": "El més vell primer",
|
||||
"Optional descriptive label for the folder. Can be different on each device.": "Etiqueta descriptiva opcional per la carpeta. Pot ser diferent en cada dispositiu.",
|
||||
"Options": "Opcions",
|
||||
"Out of Sync": "Sense sincronització",
|
||||
"Out of Sync Items": "Dispositius sense sincronitzar",
|
||||
@@ -128,11 +141,13 @@
|
||||
"Quick guide to supported patterns": "Guía ràpida de patrons suportats",
|
||||
"RAM Utilization": "Utilització de la RAM",
|
||||
"Random": "Aleatori",
|
||||
"Relay Servers": "Relay Servers",
|
||||
"Relay Servers": "Servidors de Transmissió",
|
||||
"Relayed via": "Transmitit via",
|
||||
"Relays": "Transmissions",
|
||||
"Release Notes": "Notes de la versió",
|
||||
"Remote Devices": "Dispositius Remots",
|
||||
"Remove": "Eliminar",
|
||||
"Required identifier for the folder. Must be the same on all cluster devices.": "Identificador necessari per la carpeta. Deu ser el mateix en tots els dispositius del cluster.",
|
||||
"Rescan": "Tornar a buscar",
|
||||
"Rescan All": "Tornar a buscar tot",
|
||||
"Rescan Interval": "Interval de nova busca",
|
||||
@@ -155,7 +170,7 @@
|
||||
"Shared With": "Compartit amb",
|
||||
"Short identifier for the folder. Must be the same on all cluster devices.": "Identificador curt per a la carpeta. Deu ser el mateix en tots els dispositius del grup (cluster).",
|
||||
"Show ID": "Mostrar ID",
|
||||
"Show QR": "Show QR",
|
||||
"Show QR": "Mostrar QR",
|
||||
"Shown instead of Device ID in the cluster status. Will be advertised to other devices as an optional default name.": "Mostrat en lloc de l'ID del dispositiu en l'estat del grup (cluster). S'anunciarà als altres dispositius com el nom opcional per defecte.",
|
||||
"Shown instead of Device ID in the cluster status. Will be updated to the name the device advertises if left empty.": "Mostrat en lloc de l'ID del dispositiu en l'estat del grup (cluster). S'actualitzarà al nom que el dispositiu anuncia si es deixa buit.",
|
||||
"Shutdown": "Apagar",
|
||||
@@ -178,10 +193,11 @@
|
||||
"Syncthing seems to be down, or there is a problem with your Internet connection. Retrying…": "Syncthing pareix apagat o hi ha un problema amb la connexió a Internet. Tornant a intentar...",
|
||||
"Syncthing seems to be experiencing a problem processing your request. Please refresh the page or restart Syncthing if the problem persists.": "Syncthing pareix que té un problema processant la seua sol·licitud. Per favor, refresque la pàgina o reinicie Syncthing si el problema persistix.",
|
||||
"The Syncthing admin interface is configured to allow remote access without a password.": "L'interfície d'administració de Syncthing està configurat per a permetre l'accés remot sense una contrasenya.",
|
||||
"The aggregated statistics are publicly available at the URL below.": "Les estadístiques agregades estàn disponibles en la URL que figura a continuació.",
|
||||
"The aggregated statistics are publicly available at {%url%}.": "Les estadístiques agregades estan disponibles públicament en {{url}}.",
|
||||
"The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "La configuració ha sigut gravada però no activada. Syncthing deu reiniciar per tal d'activar la nova configuració.",
|
||||
"The device ID cannot be blank.": "L'ID del dispositiu no pot estar buida.",
|
||||
"The device ID to enter here can be found in the \"Actions > Show ID\" dialog on the other device. Spaces and dashes are optional (ignored).": "The device ID to enter here can be found in the \"Actions > Show ID\" dialog on the other device. Spaces and dashes are optional (ignored).",
|
||||
"The device ID to enter here can be found in the \"Actions > Show ID\" dialog on the other device. Spaces and dashes are optional (ignored).": "L'ID del dispositiu que hi ha que introduïr ací es pot trobar en el menú \"Accions > Mostrar ID\" en l'altre dispositiu. Els espais i les barres son opcionals (ignorats).",
|
||||
"The device ID to enter here can be found in the \"Edit > Show ID\" dialog on the other device. Spaces and dashes are optional (ignored).": "L'ID del dispositiu que hi ha que introduïr ací es pot trobar en el menú \"Editar > Mostrar ID\" en l'altre dispositiu. Els espais i les barres son opcionals (ignorats).",
|
||||
"The encrypted usage report is sent daily. It is used to track common platforms, folder sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.": "L'informe encriptat d'ús s'envia diariament. S'utilitza per a rastrejar plataformes comuns, tamanys de carpetes i versions de l'aplicació. Si el conjunt de dades enviat a l'informe es canvia, se li demanarà a vosté l'autorització altra vegada.\n",
|
||||
"The entered device ID does not look valid. It should be a 52 or 56 character string consisting of letters and numbers, with spaces and dashes being optional.": "L'ID del dispositiu introduïda no pareix vàlida. Deuria ser una cadena de 52 o 56 caracters consistents en lletres i nombre, amb espais i barres opcionals.",
|
||||
@@ -203,6 +219,7 @@
|
||||
"The rate limit must be a non-negative number (0: no limit)": "El llímit del ritme deu ser un nombre no negatiu (0: sense llímit)",
|
||||
"The rescan interval must be a non-negative number of seconds.": "L'interval de reescaneig deu ser un nombre positiu de segons.",
|
||||
"They are retried automatically and will be synced when the error is resolved.": "Es reintenta automàticament i es sincronitzaràn quant el resolga l'error.",
|
||||
"This Device": "Aquest Dispositiu",
|
||||
"This can easily give hackers access to read and change any files on your computer.": "Açò pot donar accés fàcilment als hackers per a llegir i canviar qualsevol fitxer al teu ordinador.",
|
||||
"This is a major version upgrade.": "Aquesta és una actualització important de la versió.",
|
||||
"Trash Can File Versioning": "Versionat d'arxius de la paperera",
|
||||
@@ -220,6 +237,7 @@
|
||||
"Version": "Versió",
|
||||
"Versions Path": "Ruta de les versions",
|
||||
"Versions are automatically deleted if they are older than the maximum age or exceed the number of files allowed in an interval.": "Les versions s'esborren automàticament si són més antigues que l'edat màxima o excedixen el nombre de fitxer permesos en un interval.",
|
||||
"Warning, this path is a subdirectory of an existing folder \"{%otherFolder%}\".": "Perill! Aquesta ruta és un subdirectori d'una carpeta que ja existeix nomenada \"{{otherFolder}}\".",
|
||||
"When adding a new device, keep in mind that this device must be added on the other side too.": "Quant s'afig un nou dispositiu, hi ha que tindre en compte que aquest dispositiu deu ser afegit també en l'altre costat.",
|
||||
"When adding a new folder, keep in mind that the Folder ID is used to tie folders together between devices. They are case sensitive and must match exactly between all devices.": "Quant s'afig una nova carpeta, hi ha que tindre en compte que l'ID de la carpeta s'utilitza per a juntar les carpetes entre dispositius. Són sensibles a les majúscules i deuen coincidir exactament entre tots els dispositius.",
|
||||
"Yes": "Sí",
|
||||
@@ -227,5 +245,7 @@
|
||||
"days": "dies",
|
||||
"full documentation": "Documentació completa",
|
||||
"items": "Elements",
|
||||
"{%device%} wants to share folder \"{%folder%}\".": "{{device}} vol compartit la carpeta \"{{folder}}\"."
|
||||
"{%device%} wants to share folder \"{%folder%}\".": "{{device}} vol compartit la carpeta \"{{folder}}\".",
|
||||
"{%device%} wants to share folder \"{%folderLabel%}\" ({%folder%}).": "{{device}} vol compartir la carpeta \"{{folderLabel}}\" ({{folder}}).",
|
||||
"{%device%} wants to share folder \"{%folderlabel%}\" ({%folder%}).": "{{device}} vol compartir la carpeta \"{{folderlabel}}\" ({{folder}})."
|
||||
}
|
||||