Compare commits
815 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
77578e8aac | ||
|
|
1fc2ab444b | ||
|
|
fa5c890ff6 | ||
|
|
a3c17f8f81 | ||
|
|
f1f21bf220 | ||
|
|
54155cb42d | ||
|
|
414c58174b | ||
|
|
94acc20dd6 | ||
|
|
8e9119eedf | ||
|
|
a04b92332f | ||
|
|
0ad10b0fee | ||
|
|
0ca2ed7ad7 | ||
|
|
fa4226cae4 | ||
|
|
cc63236a2e | ||
|
|
6623657ef3 | ||
|
|
4405117bea | ||
|
|
e371800878 | ||
|
|
7a47646534 | ||
|
|
d475ad7ce1 | ||
|
|
7c8418f493 | ||
|
|
200a7fc844 | ||
|
|
5a38e0ba3f | ||
|
|
c77490c32d | ||
|
|
b75c9f2bbb | ||
|
|
322bedbb04 | ||
|
|
487655b365 | ||
|
|
03c678a810 | ||
|
|
b79f8aceb8 | ||
|
|
92e8c4303a | ||
|
|
e735a3a25c | ||
|
|
8d13e01342 | ||
|
|
34f8cc8620 | ||
|
|
add10c98fa | ||
|
|
6503326073 | ||
|
|
db1dc9985a | ||
|
|
3641c97667 | ||
|
|
8f214fe4a9 | ||
|
|
98cfc204ca | ||
|
|
d0061c172c | ||
|
|
93a04158fd | ||
|
|
d862e79133 | ||
|
|
68c1a0b9b4 | ||
|
|
2a38d2a3d2 | ||
|
|
9c4175715a | ||
|
|
d637148cca | ||
|
|
3395992abd | ||
|
|
7a15fef3b8 | ||
|
|
9667a0a618 | ||
|
|
7346113742 | ||
|
|
3f1fa04725 | ||
|
|
719c313b23 | ||
|
|
3d11efc9e0 | ||
|
|
3dca7cd694 | ||
|
|
803da92ca9 | ||
|
|
b49bbe82dd | ||
|
|
3959eb26fb | ||
|
|
1235cead35 | ||
|
|
dd6bb6d5fd | ||
|
|
91d37f35bc | ||
|
|
51518490c6 | ||
|
|
2c10beed0b | ||
|
|
8f3f787a34 | ||
|
|
3522d451df | ||
|
|
cf3114b56d | ||
|
|
3d95135638 | ||
|
|
4db662e576 | ||
|
|
1d15b8be9b | ||
|
|
d25b15263a | ||
|
|
7931d956f7 | ||
|
|
c9afabf09f | ||
|
|
c262f48bfe | ||
|
|
8c108b4d20 | ||
|
|
ec137c9522 | ||
|
|
b17d7d8126 | ||
|
|
43569d8d36 | ||
|
|
c4b527e5e9 | ||
|
|
6386d079b0 | ||
|
|
d2699a20fc | ||
|
|
0b854dff9d | ||
|
|
9de6cdddfd | ||
|
|
5045842f4f | ||
|
|
e0405de5bf | ||
|
|
d6fbfc3545 | ||
|
|
bdaef44765 | ||
|
|
488444354b | ||
|
|
26654df48c | ||
|
|
7f5e236dd7 | ||
|
|
914b09fd1f | ||
|
|
e9f05d138f | ||
|
|
10894695c6 | ||
|
|
6b188ebcf3 | ||
|
|
57e3f9e64b | ||
|
|
b5694ca788 | ||
|
|
bcfd18ceb1 | ||
|
|
f689512a3f | ||
|
|
dd1f7a5ab7 | ||
|
|
d48e46a29c | ||
|
|
75460be98d | ||
|
|
ce0456b5ac | ||
|
|
e3e028c988 | ||
|
|
da34f27546 | ||
|
|
c205fdd77e | ||
|
|
ae4206f362 | ||
|
|
391665e322 | ||
|
|
5521759b23 | ||
|
|
f0492c4eb3 | ||
|
|
b1edf12257 | ||
|
|
2579e8f715 | ||
|
|
a1bcc15458 | ||
|
|
49c1527724 | ||
|
|
da35820fd5 | ||
|
|
79eac61b09 | ||
|
|
2ff08e6c84 | ||
|
|
25b314f5f1 | ||
|
|
c5e0c47989 | ||
|
|
4253f22680 | ||
|
|
bdb56d91b9 | ||
|
|
c3820fbbf2 | ||
|
|
cbdb036b69 | ||
|
|
b75b4190c8 | ||
|
|
1ad547fb65 | ||
|
|
ac46db78d7 | ||
|
|
53a7c7bd49 | ||
|
|
e667fcb472 | ||
|
|
ee92ee0190 | ||
|
|
3e49b73f70 | ||
|
|
236e206764 | ||
|
|
ed771f5c64 | ||
|
|
c6dd777fd6 | ||
|
|
1caa683ec1 | ||
|
|
88dfd634e5 | ||
|
|
5c27796471 | ||
|
|
a54365424e | ||
|
|
ef35a7a4cb | ||
|
|
601a4fac1a | ||
|
|
f4ccc69422 | ||
|
|
ceea5ebeb3 | ||
|
|
f35e1ac0c5 | ||
|
|
0e76f9d93b | ||
|
|
1b08176583 | ||
|
|
b3e2665a79 | ||
|
|
81af29e3e2 | ||
|
|
0da0774ce4 | ||
|
|
151004d645 | ||
|
|
4e3fdfaeef | ||
|
|
1c29a93013 | ||
|
|
689f13f4f1 | ||
|
|
0558565a95 | ||
|
|
b84c4e1417 | ||
|
|
416811a2a9 | ||
|
|
c20d612736 | ||
|
|
3475a9ab0a | ||
|
|
d7adee05a8 | ||
|
|
73cbd17e17 | ||
|
|
7260629bc0 | ||
|
|
566c348b00 | ||
|
|
190f153b92 | ||
|
|
c56c48a777 | ||
|
|
98d22b88a0 | ||
|
|
28449f9f5b | ||
|
|
a0e2e7a962 | ||
|
|
fb6d453c74 | ||
|
|
89f8be40c6 | ||
|
|
96fba1b322 | ||
|
|
0c68e1e510 | ||
|
|
22903df2c1 | ||
|
|
10618e80a3 | ||
|
|
161326c548 | ||
|
|
f7fc0c1d3e | ||
|
|
120e6eab2c | ||
|
|
72de47df00 | ||
|
|
dfe23e8d53 | ||
|
|
2b1c942d56 | ||
|
|
204f125ab3 | ||
|
|
73f9c7d174 | ||
|
|
c4ba580cbb | ||
|
|
9fda9642d3 | ||
|
|
dfd2c464b6 | ||
|
|
5a1ee7f0b0 | ||
|
|
fdcbd54cd7 | ||
|
|
e14741a58c | ||
|
|
67acef1794 | ||
|
|
237893ead3 | ||
|
|
2590536ef3 | ||
|
|
dc91995475 | ||
|
|
3655c97850 | ||
|
|
c0f3f06cfb | ||
|
|
05450ca034 | ||
|
|
c005e61151 | ||
|
|
63e0b53e8b | ||
|
|
1f586c0fdd | ||
|
|
f1a073501f | ||
|
|
a72f5379fb | ||
|
|
8cccecceba | ||
|
|
81418d724a | ||
|
|
3eb7a9373a | ||
|
|
ac510b26e2 | ||
|
|
20f8b4fd57 | ||
|
|
1c9361a818 | ||
|
|
35e87e23fd | ||
|
|
e03be9158b | ||
|
|
9833d13762 | ||
|
|
6ec7d711d8 | ||
|
|
22a4d49ed0 | ||
|
|
ddca8d91fa | ||
|
|
ee36e2d46d | ||
|
|
de49ea594a | ||
|
|
6d4fa27ea7 | ||
|
|
9587b89d9d | ||
|
|
79c7f7193b | ||
|
|
2c4b92d410 | ||
|
|
dd78177ae0 | ||
|
|
bd55ec79d2 | ||
|
|
1313ba8c0a | ||
|
|
842e873a94 | ||
|
|
d4c4b1fb4c | ||
|
|
68f1c6ccab | ||
|
|
4c8aa14e07 | ||
|
|
bd1c29ee32 | ||
|
|
9b1c592fb7 | ||
|
|
f36f00e87b | ||
|
|
dbb3a34887 | ||
|
|
929a4d0c0c | ||
|
|
c953cdc375 | ||
|
|
c20c17e3c6 | ||
|
|
1a1e35d998 | ||
|
|
8d2a31e38e | ||
|
|
fe377a166a | ||
|
|
5770d80bf9 | ||
|
|
7a16dbd31d | ||
|
|
926c88cfc4 | ||
|
|
29d010ec0e | ||
|
|
920274bce4 | ||
|
|
2ebd6ad77f | ||
|
|
79dd6918f2 | ||
|
|
79f7f50c4d | ||
|
|
987718baf8 | ||
|
|
4fb9c143ac | ||
|
|
ec62888539 | ||
|
|
8c34a76f7a | ||
|
|
6809d38cde | ||
|
|
69ae4aa024 | ||
|
|
8e8b867fba | ||
|
|
0a118d2979 | ||
|
|
8daaa5d0d2 | ||
|
|
eb14f85a57 | ||
|
|
c69c3c7c36 | ||
|
|
54911d44c5 | ||
|
|
c765f7be8d | ||
|
|
44bdaf3ac2 | ||
|
|
fc16e49cb0 | ||
|
|
f5a310ad64 | ||
|
|
01e50eb3fa | ||
|
|
722b81c6f0 | ||
|
|
f0efa2b974 | ||
|
|
bab7c8ebbf | ||
|
|
0725e3af38 | ||
|
|
dd7bb6c4b8 | ||
|
|
d41c131364 | ||
|
|
47f22ff3e5 | ||
|
|
744c2e82b5 | ||
|
|
ead7281c20 | ||
|
|
aa3ef49dd7 | ||
|
|
5c067661f4 | ||
|
|
226da976dc | ||
|
|
ba17cc0a11 | ||
|
|
9e0afb7d8a | ||
|
|
9e7d50bc76 | ||
|
|
d7d5687faa | ||
|
|
21eb098dd2 | ||
|
|
2f770f8bfb | ||
|
|
f09698d845 | ||
|
|
3fbcb024a7 | ||
|
|
b8c1c0e048 | ||
|
|
2d47242d54 | ||
|
|
66a7829eee | ||
|
|
9c67bd2550 | ||
|
|
f67c5a2fd6 | ||
|
|
0437f6dd66 | ||
|
|
11b35d650d | ||
|
|
b279e261a1 | ||
|
|
263402f80a | ||
|
|
920a83ec7a | ||
|
|
3c2ac3522c | ||
|
|
9fdaa637a8 | ||
|
|
81a9d7f2b9 | ||
|
|
d8d3f05164 | ||
|
|
653be136ee | ||
|
|
398c356f22 | ||
|
|
542b76f687 | ||
|
|
abb8a1914a | ||
|
|
163d335078 | ||
|
|
0582836820 | ||
|
|
bb15776ae6 | ||
|
|
dde9d4c9eb | ||
|
|
1ef75be1c6 | ||
|
|
3582783972 | ||
|
|
5070d52f2f | ||
|
|
7b07ed6580 | ||
|
|
f6a2b6252a | ||
|
|
a9b03de99a | ||
|
|
a7f7058636 | ||
|
|
8ce9b026e9 | ||
|
|
0dcf2f1bc8 | ||
|
|
99922feb3b | ||
|
|
2fd1dca905 | ||
|
|
e3cf718998 | ||
|
|
7157917a16 | ||
|
|
3266aae1c3 | ||
|
|
63194a37f6 | ||
|
|
cabe94552a | ||
|
|
48a229a0cd | ||
|
|
e4db86836b | ||
|
|
913a85c571 | ||
|
|
ed4f6fc4b3 | ||
|
|
9da422f1c5 | ||
|
|
ab1739ba34 | ||
|
|
fc1430aa92 | ||
|
|
3cde608eda | ||
|
|
2898552f4b | ||
|
|
911c148c71 | ||
|
|
91568a173a | ||
|
|
e57f5499a1 | ||
|
|
9abb7b71a9 | ||
|
|
724c354d62 | ||
|
|
c44779094d | ||
|
|
eeedab4091 | ||
|
|
8559e20237 | ||
|
|
26730eb083 | ||
|
|
4160ce674d | ||
|
|
2dbeea21c4 | ||
|
|
a2b8485a89 | ||
|
|
5bb74ee61c | ||
|
|
462fde5e7d | ||
|
|
f1e83a57cd | ||
|
|
cc9a9fb390 | ||
|
|
8fbcceb742 | ||
|
|
d3a251e6d9 | ||
|
|
be80b26c18 | ||
|
|
1574b7d834 | ||
|
|
51e10e344d | ||
|
|
0d55d8c5b0 | ||
|
|
1392589d36 | ||
|
|
548a324256 | ||
|
|
a8a0bc356a | ||
|
|
faee1d5a8d | ||
|
|
3088dac33b | ||
|
|
2641062c17 | ||
|
|
95c738ea28 | ||
|
|
562d2f67a6 | ||
|
|
ba6aff4a1b | ||
|
|
bb23e3940e | ||
|
|
94e4370c7e | ||
|
|
38d28c3f4a | ||
|
|
f60b424d70 | ||
|
|
a1a91d5ef4 | ||
|
|
bfb48b5dde | ||
|
|
2860813a8e | ||
|
|
72538e350d | ||
|
|
59f3d1445f | ||
|
|
0b88cf1d03 | ||
|
|
56e2ba29d0 | ||
|
|
6ec9b84674 | ||
|
|
afd15392b1 | ||
|
|
ae4cc94a9d | ||
|
|
3f9b75b7b3 | ||
|
|
ec2b097313 | ||
|
|
caaab462bc | ||
|
|
da413b823b | ||
|
|
14937e7dd2 | ||
|
|
3418497f3d | ||
|
|
e408f1061a | ||
|
|
c08fe4e2c5 | ||
|
|
b7e21984a1 | ||
|
|
7fba8cf759 | ||
|
|
0296c23685 | ||
|
|
1cdfef4d6a | ||
|
|
cead20ec91 | ||
|
|
74dd051d51 | ||
|
|
5473285010 | ||
|
|
4b3adfa21c | ||
|
|
7c37301c91 | ||
|
|
d9040f8038 | ||
|
|
f41606c0b0 | ||
|
|
31d9750579 | ||
|
|
173fb97832 | ||
|
|
81248c3f56 | ||
|
|
2914a0a0a5 | ||
|
|
815588daba | ||
|
|
6152eb6d6d | ||
|
|
ff0ebc196c | ||
|
|
4e8c8d7e2c | ||
|
|
b8a90b7eaa | ||
|
|
60e7ca4a4c | ||
|
|
3a3c8ec6b8 | ||
|
|
05c37e58c1 | ||
|
|
d203dd4770 | ||
|
|
29ccf10d0b | ||
|
|
b49df09fec | ||
|
|
ce3e117976 | ||
|
|
309795198d | ||
|
|
7db00132b2 | ||
|
|
76a2862b7a | ||
|
|
215503b4f7 | ||
|
|
54d4010f1a | ||
|
|
cb1b53cfb5 | ||
|
|
96e8f94833 | ||
|
|
1e54a3e801 | ||
|
|
fe9c2b9857 | ||
|
|
2a2177e7fa | ||
|
|
d1d565e58b | ||
|
|
891ff383ec | ||
|
|
d322ebd0b9 | ||
|
|
50190236bb | ||
|
|
d5a0f91cb4 | ||
|
|
467c1b26fb | ||
|
|
3cabecda04 | ||
|
|
6d3160b0ab | ||
|
|
d328e0fb75 | ||
|
|
5f01afb7ea | ||
|
|
6fe2fa5ff0 | ||
|
|
b371b1fe34 | ||
|
|
90c0a39df8 | ||
|
|
70c5a5dff1 | ||
|
|
da0b7cc7f2 | ||
|
|
139e9b144e | ||
|
|
77c0a19451 | ||
|
|
58cbd19742 | ||
|
|
9bf6917ae8 | ||
|
|
897cca0a82 | ||
|
|
6af09c61be | ||
|
|
c3c7798446 | ||
|
|
06dc91fadf | ||
|
|
526cab538a | ||
|
|
81d19a00aa | ||
|
|
ca755ec9e0 | ||
|
|
4f6206cb2d | ||
|
|
7fb53ec954 | ||
|
|
d8b5070ca8 | ||
|
|
5e99d38412 | ||
|
|
3990014073 | ||
|
|
3e51206a6b | ||
|
|
7569b75d61 | ||
|
|
8fcabac518 | ||
|
|
abb0cfde72 | ||
|
|
7990ffcc60 | ||
|
|
49910a1d85 | ||
|
|
46a143e80e | ||
|
|
69b7f26e4c | ||
|
|
5b37d0356c | ||
|
|
1188ebbb7b | ||
|
|
76b903b2e0 | ||
|
|
be38c2111f | ||
|
|
1de787fab8 | ||
|
|
81f683a61c | ||
|
|
db6f68d031 | ||
|
|
d0a1c805e9 | ||
|
|
00a654845f | ||
|
|
04dad8485a | ||
|
|
0b1475169f | ||
|
|
6ec4fbc82b | ||
|
|
18cc7a663b | ||
|
|
cf5febad47 | ||
|
|
42849af5a8 | ||
|
|
e6364407a9 | ||
|
|
480b78f2c8 | ||
|
|
fa8f339478 | ||
|
|
7776839c82 | ||
|
|
7114cacb85 | ||
|
|
e52be3d83e | ||
|
|
c9cf01e0b6 | ||
|
|
dcbf68e104 | ||
|
|
c2d8c07137 | ||
|
|
a4ed50ca85 | ||
|
|
b3788c8ea0 | ||
|
|
946c074a41 | ||
|
|
19f79afb0f | ||
|
|
af3b6f9c83 | ||
|
|
fbe42c156d | ||
|
|
a1f6cbd354 | ||
|
|
a4f052ad31 | ||
|
|
ea87bcefd6 | ||
|
|
0655991a19 | ||
|
|
f368d2278f | ||
|
|
1eb6db6ca8 | ||
|
|
a25b63e2df | ||
|
|
ffe7a2fcd7 | ||
|
|
08b5a7908f | ||
|
|
a8cd9d0154 | ||
|
|
297240facf | ||
|
|
a022b0cfff | ||
|
|
72026db599 | ||
|
|
aafc96f58f | ||
|
|
7c7e8648ff | ||
|
|
24e2ce0764 | ||
|
|
d7cb4d407b | ||
|
|
66a506e72b | ||
|
|
25a7b0a6f8 | ||
|
|
7aaa1dd8a3 | ||
|
|
2a6f164923 | ||
|
|
0f28626bb4 | ||
|
|
6ed22d0885 | ||
|
|
6715b91a6c | ||
|
|
694da60659 | ||
|
|
47fa4b0a2c | ||
|
|
8ab6b60778 | ||
|
|
e1a4f81e50 | ||
|
|
7b7e35d339 | ||
|
|
3176629410 | ||
|
|
e3ccc45d19 | ||
|
|
beec9e834e | ||
|
|
f6f0486ff9 | ||
|
|
518f446d31 | ||
|
|
fbbd510088 | ||
|
|
e440d30028 | ||
|
|
44d30c83bf | ||
|
|
7ff7b55732 | ||
|
|
44346b3a5a | ||
|
|
23a538d61a | ||
|
|
dcb5026f33 | ||
|
|
778ff9daa9 | ||
|
|
ce9dc809bc | ||
|
|
59370588dd | ||
|
|
7d434aa9c4 | ||
|
|
59ce7c0424 | ||
|
|
9a0e5a7c18 | ||
|
|
8d0019595f | ||
|
|
6ff74cfcab | ||
|
|
aa50ef4069 | ||
|
|
fa0101bd60 | ||
|
|
21f5b16e47 | ||
|
|
223a835f33 | ||
|
|
223e14b0d0 | ||
|
|
a58f69be04 | ||
|
|
e194eb1f69 | ||
|
|
672824641b | ||
|
|
6d357211b2 | ||
|
|
8e39e2889d | ||
|
|
a9ee4bb9f1 | ||
|
|
80fd6c2400 | ||
|
|
3cbe7d40d1 | ||
|
|
af0bc95de5 | ||
|
|
4bf3e7485b | ||
|
|
b701de60ce | ||
|
|
7ef2743964 | ||
|
|
a165838cbd | ||
|
|
3c77b8388c | ||
|
|
9d16f4545d | ||
|
|
d57e6808cc | ||
|
|
b71cc8a580 | ||
|
|
ac3b03881a | ||
|
|
b0d03d1f1c | ||
|
|
a2dcffcca2 | ||
|
|
9323f0faf8 | ||
|
|
f343c8ba36 | ||
|
|
502bee9a09 | ||
|
|
379e2119a8 | ||
|
|
89a29946f9 | ||
|
|
20a94fafa7 | ||
|
|
99ddf1e4ab | ||
|
|
fb778218f5 | ||
|
|
55fc3cb2c5 | ||
|
|
b779e22205 | ||
|
|
bb5b1f8f01 | ||
|
|
c1a96d4900 | ||
|
|
de298da532 | ||
|
|
6f5ca53f99 | ||
|
|
d507126101 | ||
|
|
9a25df01fe | ||
|
|
11b9212948 | ||
|
|
b4e2914b70 | ||
|
|
09b7348595 | ||
|
|
d2bb6e0c0a | ||
|
|
8632a03662 | ||
|
|
e71c78ae84 | ||
|
|
03a8027efc | ||
|
|
b7e186b370 | ||
|
|
4a69f3987f | ||
|
|
343dc486e0 | ||
|
|
5aacfd1639 | ||
|
|
06e63aedea | ||
|
|
0320194757 | ||
|
|
1753771356 | ||
|
|
bc794e7c15 | ||
|
|
eefcecc7ce | ||
|
|
3795a786c9 | ||
|
|
855a1bef89 | ||
|
|
6a67921e40 | ||
|
|
8709fec517 | ||
|
|
48245effdf | ||
|
|
16063933d1 | ||
|
|
d317f197be | ||
|
|
8ac862f50a | ||
|
|
0e996c4664 | ||
|
|
287cfee73c | ||
|
|
a6c465e929 | ||
|
|
becb5ab1dc | ||
|
|
49170bf2d8 | ||
|
|
b1205db7ac | ||
|
|
ff0cd413e6 | ||
|
|
7a56e4a0e5 | ||
|
|
d17608d0a0 | ||
|
|
0af216fea0 | ||
|
|
1287433a99 | ||
|
|
56a9964101 | ||
|
|
532b4383bf | ||
|
|
f9e2623fdc | ||
|
|
eacae83886 | ||
|
|
5fc53f59c7 | ||
|
|
7035ea3ab7 | ||
|
|
d67c0a1eda | ||
|
|
36c6a1955f | ||
|
|
f792989d9b | ||
|
|
ee398f17e1 | ||
|
|
8c4723ff43 | ||
|
|
01ae866d58 | ||
|
|
3b8ae33fe3 | ||
|
|
6f63909c65 | ||
|
|
1612baca92 | ||
|
|
4970bd7f65 | ||
|
|
a775dd2b79 | ||
|
|
137894348b | ||
|
|
ac40b27c79 | ||
|
|
9d756525ce | ||
|
|
6361172bea | ||
|
|
56b6383407 | ||
|
|
46fa5a374b | ||
|
|
7373d2eb3c | ||
|
|
4453236949 | ||
|
|
c2dc4a8e06 | ||
|
|
92a23da3ec | ||
|
|
242db26343 | ||
|
|
87701339fe | ||
|
|
4669ce0766 | ||
|
|
7dddc0de9e | ||
|
|
7b43ba809b | ||
|
|
175f65aabc | ||
|
|
94a392144b | ||
|
|
e9063c639a | ||
|
|
8d6dedc15b | ||
|
|
1bc4c1a8ac | ||
|
|
6d3aae32bc | ||
|
|
1a35c440e8 | ||
|
|
2c6c84ac61 | ||
|
|
bd666daf82 | ||
|
|
ca3831c4f5 | ||
|
|
bbe0d34f43 | ||
|
|
dd364c962f | ||
|
|
50068b0b0f | ||
|
|
96afcd90e3 | ||
|
|
ea61f8f597 | ||
|
|
2e44473ce4 | ||
|
|
26d6969384 | ||
|
|
2dbde224d9 | ||
|
|
8d7ed9f8bf | ||
|
|
1250850492 | ||
|
|
ebfef15fb0 | ||
|
|
ad418abf91 | ||
|
|
c7d51a26f6 | ||
|
|
2c01cc000e | ||
|
|
175769b53e | ||
|
|
22f193f042 | ||
|
|
55da600433 | ||
|
|
96b5c2ae00 | ||
|
|
b24a9e57fd | ||
|
|
07722dc33d | ||
|
|
f39f816a98 | ||
|
|
bc5b95be8a | ||
|
|
845f31b98f | ||
|
|
89b6c32cee | ||
|
|
6ee36fe361 | ||
|
|
77572d0aee | ||
|
|
37b79735bf | ||
|
|
9d9ad6de88 | ||
|
|
20b925abec | ||
|
|
7d00722bbf | ||
|
|
4ea600d34e | ||
|
|
b61d7c2428 | ||
|
|
bcc5d7c00f | ||
|
|
4a36cca703 | ||
|
|
f83ae630c1 | ||
|
|
5894f35364 | ||
|
|
c5acbf7e22 | ||
|
|
567aaf87c6 | ||
|
|
e660d683a0 | ||
|
|
685306c386 | ||
|
|
5e04274d84 | ||
|
|
3357fded14 | ||
|
|
618fc54ac2 | ||
|
|
339e058b64 | ||
|
|
102027a343 | ||
|
|
0d1df6bec3 | ||
|
|
925f60d9c3 | ||
|
|
8b3f5fda07 | ||
|
|
ac17b2c584 | ||
|
|
c67c861dc6 | ||
|
|
09ba9e6259 | ||
|
|
7775166477 | ||
|
|
0e167f5c24 | ||
|
|
a310a32371 | ||
|
|
c00e26be81 | ||
|
|
ce1a5cd2ce | ||
|
|
5c8a28d717 | ||
|
|
59c5d984af | ||
|
|
c885903ff2 | ||
|
|
e4403ca396 | ||
|
|
04912ea888 | ||
|
|
103238066d | ||
|
|
7e4f08c033 | ||
|
|
d47d82d8e1 | ||
|
|
9b9b44dd65 | ||
|
|
dc5627a2ef | ||
|
|
c1dfae1a6e | ||
|
|
7b5e4ab426 | ||
|
|
26a44068d8 | ||
|
|
602b12dcf5 | ||
|
|
a91a836224 | ||
|
|
969d7c802d | ||
|
|
4e196d408a | ||
|
|
8450ab8dab | ||
|
|
168889d999 | ||
|
|
e1339628d9 | ||
|
|
1ee190e844 | ||
|
|
aadcfed17d | ||
|
|
8f99f6eb66 | ||
|
|
a51b948f45 | ||
|
|
425f61cf34 | ||
|
|
87cc2d2313 | ||
|
|
0e2132ad3e | ||
|
|
7d9df5abc6 | ||
|
|
118cba4d9b | ||
|
|
3b2adc9a3e | ||
|
|
009b5bc72b | ||
|
|
9b541a28e6 | ||
|
|
3533429563 | ||
|
|
500230af51 | ||
|
|
4a2cbc1715 | ||
|
|
61f8fdd9e8 | ||
|
|
cfdca9f702 | ||
|
|
cbe24d0c61 | ||
|
|
50f0da6793 | ||
|
|
0b7ab0a095 | ||
|
|
3cacb48f3c | ||
|
|
f6a58151cb | ||
|
|
3404393974 | ||
|
|
6965812d79 | ||
|
|
78fb7fe9f9 | ||
|
|
24bcf6a088 | ||
|
|
25d0a363a8 | ||
|
|
d7c8075862 | ||
|
|
041b97dd25 | ||
|
|
9b85a6fb7c | ||
|
|
f407ff8861 | ||
|
|
a413b83c01 | ||
|
|
81f4de965f | ||
|
|
030b1f3467 | ||
|
|
b7a180114e | ||
|
|
4c9a26dbca | ||
|
|
e611828249 | ||
|
|
e80a9b0075 | ||
|
|
9370f9cae4 | ||
|
|
604f2c9161 | ||
|
|
4d9ca822a7 | ||
|
|
d1f3d95c96 | ||
|
|
efa0a06947 | ||
|
|
11eb241c8f | ||
|
|
ebef239a06 | ||
|
|
3d5507451b | ||
|
|
98a13204b2 | ||
|
|
c318fdc94b | ||
|
|
d0229b62da | ||
|
|
37ad20a71b | ||
|
|
fcd6ebb06e | ||
|
|
dc9c86e3a1 | ||
|
|
6bc6ae2d28 | ||
|
|
f8bedc55e5 | ||
|
|
f376c79f7f | ||
|
|
a98824b4cf | ||
|
|
860fbe48dd | ||
|
|
9d06132743 | ||
|
|
51eea3f90b | ||
|
|
27c70bdf07 | ||
|
|
f8abb8e541 | ||
|
|
c8346d0581 | ||
|
|
7aaea6d005 | ||
|
|
e3911bacde | ||
|
|
962eaa8a4b | ||
|
|
bfba18fdcb | ||
|
|
175669c61e | ||
|
|
3599b98dca | ||
|
|
d1c3be3251 | ||
|
|
b9f83c7780 | ||
|
|
cbf73ef29e | ||
|
|
db6d3b495b | ||
|
|
6ea8e2525a | ||
|
|
29296ec998 | ||
|
|
bdd265a1b1 | ||
|
|
2c9df7aad1 | ||
|
|
1fca248d4c | ||
|
|
99081ea2a0 | ||
|
|
1f62247c7e | ||
|
|
6415d1a6a5 | ||
|
|
926b08c197 | ||
|
|
aff41d0b08 | ||
|
|
5d9c968614 | ||
|
|
c020cf05e1 | ||
|
|
09e8d85b1e | ||
|
|
4d3eb134a2 | ||
|
|
b92df85893 | ||
|
|
545025ed2b | ||
|
|
3158962506 | ||
|
|
c314f74de6 | ||
|
|
65615385e7 | ||
|
|
727f35b35b | ||
|
|
07ddf7e87b |
9
.gitignore
vendored
@@ -1,11 +1,11 @@
|
||||
syncthing
|
||||
!gui/syncthing
|
||||
!debian/syncthing
|
||||
!Godeps/_workspace/src/github.com/syncthing
|
||||
/syncthing
|
||||
/stdiscosrv
|
||||
syncthing.exe
|
||||
stdiscosrv.exe
|
||||
*.tar.gz
|
||||
*.zip
|
||||
*.asc
|
||||
*.deb
|
||||
.jshintrc
|
||||
coverage.out
|
||||
files/pidx
|
||||
@@ -16,3 +16,4 @@ syncthing.sig
|
||||
RELEASE
|
||||
deb
|
||||
lib/auto/gui.files.go
|
||||
snapcraft.yaml
|
||||
|
||||
206
AUTHORS
@@ -1,90 +1,120 @@
|
||||
# This is the official list of Syncthing authors for copyright purposes.
|
||||
# The format is:
|
||||
#
|
||||
# Name Name Name (nickname) <email1@example.com> <email2@example.com>
|
||||
#
|
||||
# The NICKS list is auto generated from this file.
|
||||
|
||||
Aaron Bieber <qbit@deftly.net>
|
||||
Adam Piggott <aD@simplypeachy.co.uk> <simplypeachy@users.noreply.github.com>
|
||||
Alessandro G. <alessandro.g89@gmail.com>
|
||||
Alexander Graf <register-github@alex-graf.de>
|
||||
Anderson Mesquita <andersonvom@gmail.com>
|
||||
Andrew Dunham <andrew@du.nham.ca>
|
||||
Antony Male <antony.male@gmail.com>
|
||||
Arthur Axel fREW Schmidt <frew@afoolishmanifesto.com> <frioux@gmail.com>
|
||||
Alexandre Viau <alexandre@alexandreviau.net> <aviau@debian.org>
|
||||
Audrius Butkevicius <audrius.butkevicius@gmail.com>
|
||||
Bart De Vries <devriesb@gmail.com>
|
||||
Ben Curthoys <ben@bencurthoys.com>
|
||||
Ben Schulz <ueomkail@gmail.com> <uok@users.noreply.github.com>
|
||||
Ben Sidhom <bsidhom@gmail.com>
|
||||
Benny Ng <benny.tpng@gmail.com>
|
||||
Brandon Philips <brandon@ifup.org>
|
||||
Brendan Long <self@brendanlong.com>
|
||||
Brian R. Becker <brbecker@gmail.com>
|
||||
Caleb Callaway <enlightened.despot@gmail.com>
|
||||
Carsten Hagemann <moter8@gmail.com>
|
||||
Cathryne Linenweaver <cathryne.linenweaver@gmail.com> <Cathryne@users.noreply.github.com>
|
||||
Chris Howie <me@chrishowie.com>
|
||||
Chris Joel <chris@scriptolo.gy>
|
||||
Colin Kennedy <moshen.colin@gmail.com>
|
||||
Daniel Bergmann <dan.arne.bergmann@gmail.com> <brgmnn@users.noreply.github.com>
|
||||
Daniel Harte <daniel@harte.me> <daniel@danielharte.co.uk> <norgeous@users.noreply.github.com>
|
||||
Daniel Martí <mvdan@mvdan.cc>
|
||||
David Rimmer <dinosore@dbrsoftware.co.uk>
|
||||
Denis A. <denisva@gmail.com>
|
||||
Dennis Wilson <dw@risu.io>
|
||||
Dominik Heidler <dominik@heidler.eu>
|
||||
Elias Jarlebring <jarlebring@gmail.com>
|
||||
Emil Hessman <emil@hessman.se>
|
||||
Erik Meitner <e.meitner@willystreet.coop>
|
||||
Federico Castagnini <federico.castagnini@gmail.com>
|
||||
Felix Ableitner <me@nutomic.com>
|
||||
Felix Unterpaintner <bigbear2nd@gmail.com>
|
||||
Francois-Xavier Gsell <fxgsell@gmail.com>
|
||||
Frank Isemann <frank@isemann.name>
|
||||
Gilli Sigurdsson <gilli@vx.is>
|
||||
Jaakko Hannikainen <jgke@jgke.fi>
|
||||
Jacek Szafarkiewicz <szafar@linux.pl>
|
||||
Jake Peterson <jake@acogdev.com>
|
||||
Jakob Borg <jakob@nym.se>
|
||||
James Patterson <jamespatterson@operamail.com> <jpjp@users.noreply.github.com>
|
||||
Jaroslav Malec <dzardacz@gmail.com>
|
||||
Jens Diemer <github.com@jensdiemer.de> <git@jensdiemer.de>
|
||||
Jochen Voss <voss@seehuhn.de>
|
||||
Johan Vromans <jvromans@squirrel.nl>
|
||||
Karol Różycki <rozycki.karol@gmail.com>
|
||||
Kelong Cong <kc04bc@gmx.com> <kc1212@users.noreply.github.com>
|
||||
Ken'ichi Kamada <kamada@nanohz.org>
|
||||
Kevin Allen <kma1660@gmail.com>
|
||||
Lars K.W. Gohlke <lkwg82@gmx.de>
|
||||
Laurent Etiemble <laurent.etiemble@gmail.com> <laurent.etiemble@monobjc.net>
|
||||
Lode Hoste <zillode@zillode.be>
|
||||
Lord Landon Agahnim <lordlandon@gmail.com>
|
||||
Marc Laporte <marc@marclaporte.com> <marc@laporte.name>
|
||||
Marc Pujol <kilburn@la3.org>
|
||||
Marcin Dziadus <dziadus.marcin@gmail.com>
|
||||
Mateusz Naściszewski <matin1111@wp.pl>
|
||||
Matt Burke <mburke@amplify.com> <burkemw3@gmail.com>
|
||||
Max Schulze <max.schulze@online.de> <kralo@users.noreply.github.com>
|
||||
Michael Jephcote <rewt0r@gmx.com> <Rewt0r@users.noreply.github.com>
|
||||
Michael Ploujnikov <ploujj@gmail.com>
|
||||
Michael Tilli <pyfisch@gmail.com>
|
||||
Nate Morrison <natemorrison@gmail.com>
|
||||
Pascal Jungblut <github@pascalj.com> <mail@pascal-jungblut.com>
|
||||
Peter Hoeg <peter@speartail.com>
|
||||
Philippe Schommers <philippe@schommers.be>
|
||||
Phill Luby <phill.luby@newredo.com>
|
||||
Piotr Bejda <piotrb10@gmail.com>
|
||||
Ryan Sullivan <kayoticsully@gmail.com>
|
||||
Scott Klupfel <kluppy@going2blue.com>
|
||||
Sergey Mishin <ralder@yandex.ru>
|
||||
Stefan Kuntz <stefan.github@gmail.com> <Stefan.github@gmail.com>
|
||||
Stefan Tatschner <stefan@sevenbyte.org> <rumpelsepp@sevenbyte.org>
|
||||
Tim Abell <tim@timwise.co.uk>
|
||||
Tobias Nygren <tnn@nygren.pp.se>
|
||||
Tomas Cerveny <kozec@kozec.com>
|
||||
Tully Robinson <tully@tojr.org>
|
||||
Tyler Brazier <tyler@tylerbrazier.com>
|
||||
Veeti Paananen <veeti.paananen@rojekti.fi>
|
||||
Victor Buinsky <vix_booja@tut.by>
|
||||
Vil Brekin <vilbrekin@gmail.com>
|
||||
William A. Kennington III <william@wkennington.com>
|
||||
Wulf Weich <wweich@users.noreply.github.com> <wweich@gmx.de>
|
||||
Yannic A. <eipiminusone+github@gmail.com> <eipiminus1@users.noreply.github.com>
|
||||
Aaron Bieber (qbit) <qbit@deftly.net>
|
||||
Adam Piggott (ProactiveServices) <aD@simplypeachy.co.uk> <simplypeachy@users.noreply.github.com> <ProactiveServices@users.noreply.github.com>
|
||||
Adel Qalieh (adelq) <aqalieh95@gmail.com> <adelq@users.noreply.github.com>
|
||||
Alessandro G. (alessandro.g89) <alessandro.g89@gmail.com>
|
||||
Alexander Graf (alex2108) <register-github@alex-graf.de>
|
||||
Alexandre Viau (aviau) <alexandre@alexandreviau.net> <aviau@debian.org>
|
||||
Anderson Mesquita (andersonvom) <andersonvom@gmail.com>
|
||||
Andrew Dunham (andrew-d) <andrew@du.nham.ca>
|
||||
Andrey D (scienmind) <scintertech@cryptolab.net>
|
||||
Antoine Lamielle (0x010C) <antoine.lamielle@0x010c.fr> <gh@0x010c.fr>
|
||||
Antony Male (canton7) <antony.male@gmail.com>
|
||||
Arthur Axel fREW Schmidt (frioux) <frew@afoolishmanifesto.com> <frioux@gmail.com>
|
||||
Audrius Butkevicius (AudriusButkevicius) <audrius.butkevicius@gmail.com>
|
||||
Bart De Vries (mogwa1) <devriesb@gmail.com>
|
||||
Ben Curthoys (bencurthoys) <ben@bencurthoys.com>
|
||||
Ben Schulz (uok) <ueomkail@gmail.com> <uok@users.noreply.github.com>
|
||||
Ben Shepherd (benshep) <bjashepherd@gmail.com>
|
||||
Ben Sidhom (bsidhom) <bsidhom@gmail.com>
|
||||
Benny Ng (tpng) <benny.tpng@gmail.com>
|
||||
Brandon Philips (philips) <brandon@ifup.org>
|
||||
Brendan Long (brendanlong) <self@brendanlong.com>
|
||||
Brian R. Becker (brbecker) <brbecker@gmail.com>
|
||||
Caleb Callaway (cqcallaw) <enlightened.despot@gmail.com>
|
||||
Carsten Hagemann (Moter8) <moter8@gmail.com>
|
||||
Cathryne Linenweaver (Cathryne) <cathryne.linenweaver@gmail.com> <Cathryne@users.noreply.github.com>
|
||||
Cedric Staniewski (xduugu) <cedric@gmx.ca>
|
||||
Chris Howie (cdhowie) <me@chrishowie.com>
|
||||
Chris Joel (cdata) <chris@scriptolo.gy>
|
||||
Colin Kennedy (moshen) <moshen.colin@gmail.com>
|
||||
Daniel Bergmann (brgmnn) <dan.arne.bergmann@gmail.com> <brgmnn@users.noreply.github.com>
|
||||
Daniel Harte (norgeous) <daniel@harte.me> <daniel@danielharte.co.uk> <norgeous@users.noreply.github.com>
|
||||
Daniel Martí (mvdan) <mvdan@mvdan.cc>
|
||||
Darshil Chanpura (dtchanpura) <dtchanpura@gmail.com> <dcprime314@gmail.com>
|
||||
David Rimmer (dinosore) <dinosore@dbrsoftware.co.uk>
|
||||
Denis A. (dva) <denisva@gmail.com>
|
||||
Dennis Wilson (snnd) <dw@risu.io>
|
||||
Dominik Heidler (asdil12) <dominik@heidler.eu>
|
||||
Elias Jarlebring (jarlebring) <jarlebring@gmail.com>
|
||||
Emil Hessman (ceh) <emil@hessman.se>
|
||||
Erik Meitner (WSGCSysadmin) <e.meitner@willystreet.coop>
|
||||
Federico Castagnini (facastagnini) <federico.castagnini@gmail.com>
|
||||
Felix Ableitner (Nutomic) <me@nutomic.com>
|
||||
Felix Unterpaintner (bigbear2nd) <bigbear2nd@gmail.com>
|
||||
Francois-Xavier Gsell (zukoo) <fxgsell@gmail.com>
|
||||
Frank Isemann (fti7) <frank@isemann.name>
|
||||
Gilli Sigurdsson (gillisig) <gilli@vx.is>
|
||||
Heiko Zuerker (Smiley73) <heiko@zuerker.org>
|
||||
Jaakko Hannikainen (jgke) <jgke@jgke.fi>
|
||||
Jacek Szafarkiewicz (hadogenes) <szafar@linux.pl>
|
||||
Jake Peterson (acogdev) <jake@acogdev.com>
|
||||
Jakob Borg (calmh) <jakob@nym.se> <jakob@kastelo.net>
|
||||
James Patterson (jpjp) <jamespatterson@operamail.com> <jpjp@users.noreply.github.com>
|
||||
Jaroslav Malec (dzarda) <dzardacz@gmail.com>
|
||||
Jaya Chithra (jayachithra) <s.k.jayachithra@gmail.com>
|
||||
Jens Diemer (jedie) <github.com@jensdiemer.de> <git@jensdiemer.de>
|
||||
Jochen Voss (seehuhn) <voss@seehuhn.de>
|
||||
Johan Vromans (sciurius) <jvromans@squirrel.nl>
|
||||
Jose Manuel Delicado (jmdaweb) <jmdaweb@hotmail.com> <jmdaweb@users.noreply.github.com>
|
||||
Karol Różycki (krozycki) <rozycki.karol@gmail.com>
|
||||
Kelong Cong (kc1212) <kc04bc@gmx.com> <kc1212@users.noreply.github.com>
|
||||
Ken'ichi Kamada (kamadak) <kamada@nanohz.org>
|
||||
Kevin Allen (ironmig) <kma1660@gmail.com>
|
||||
Kevin White, Jr. (kwhite17) <kevinwhite1710@gmail.com>
|
||||
Kurt Fitzner (Kudalufi) <kurt@va1der.ca> <kurt.fitzner@gmail.com>
|
||||
Lars K.W. Gohlke (lkwg82) <lkwg82@gmx.de>
|
||||
Laurent Etiemble (letiemble) <laurent.etiemble@gmail.com> <laurent.etiemble@monobjc.net>
|
||||
Leo Arias (elopio) <yo@elopio.net>
|
||||
Liu Siyuan (liusy182) <liusy182@gmail.com> <liusy182@hotmail.com>
|
||||
Lode Hoste (Zillode) <zillode@zillode.be>
|
||||
Lord Landon Agahnim (LordLandon) <lordlandon@gmail.com>
|
||||
Majed Abdulaziz (majedev) <majed.alhajry@gmail.com>
|
||||
Marc Laporte (marclaporte) <marc@marclaporte.com> <marc@laporte.name>
|
||||
Marc Pujol (kilburn) <kilburn@la3.org>
|
||||
Marcin Dziadus (marcindziadus) <dziadus.marcin@gmail.com>
|
||||
Mark Pulford (mpx) <mark@kyne.com.au>
|
||||
Mateusz Naściszewski (mateon1) <matin1111@wp.pl>
|
||||
Matt Burke (burkemw3) <mburke@amplify.com> <burkemw3@gmail.com>
|
||||
Max Schulze (kralo) <max.schulze@online.de> <kralo@users.noreply.github.com>
|
||||
Michael Jephcote (Rewt0r) <rewt0r@gmx.com> <Rewt0r@users.noreply.github.com>
|
||||
Michael Ploujnikov (plouj) <ploujj@gmail.com>
|
||||
Michael Tilli (pyfisch) <pyfisch@gmail.com>
|
||||
Nate Morrison (nrm21) <natemorrison@gmail.com>
|
||||
Niels Peter Roest (Niller303) <nielsproest@hotmail.com> <seje.niels@hotmail.com>
|
||||
Pascal Jungblut (pascalj) <github@pascalj.com> <mail@pascal-jungblut.com>
|
||||
Peter Hoeg (peterhoeg) <peter@speartail.com>
|
||||
Philippe Schommers (filoozoom) <philippe@schommers.be>
|
||||
Phill Luby (pluby) <phill.luby@newredo.com>
|
||||
Piotr Bejda (piobpl) <piotrb10@gmail.com>
|
||||
Robert Carosi (nov1n) <robert@carosi.nl>
|
||||
Roman Zaynetdinov (zaynetro) <romanznet@gmail.com>
|
||||
Ross Smith II (rasa) <ross@smithii.com>
|
||||
Ryan Sullivan (KayoticSully) <kayoticsully@gmail.com>
|
||||
Sacheendra Talluri (sacheendra) <sacheendra.t@gmail.com>
|
||||
Scott Klupfel (kluppy) <kluppy@going2blue.com>
|
||||
Sergey Mishin (ralder) <ralder@yandex.ru>
|
||||
Simon Frei (imsodin) <freisim93@gmail.com>
|
||||
Stefan Kuntz (Stefan-Code) <stefan.github@gmail.com> <Stefan.github@gmail.com>
|
||||
Stefan Tatschner (rumpelsepp) <stefan@sevenbyte.org> <rumpelsepp@sevenbyte.org>
|
||||
Suhas Gundimeda (snugghash) <suhas.gundimeda@gmail.com> <snugghash@gmail.com>
|
||||
Tim Abell (timabell) <tim@timwise.co.uk>
|
||||
Tim Howes (timhowes) <timhowes@berkeley.edu>
|
||||
Tobias Nygren (tnn2) <tnn@nygren.pp.se>
|
||||
Tomas Cerveny (kozec) <kozec@kozec.com>
|
||||
Tully Robinson (tojrobinson) <tully@tojr.org>
|
||||
Tyler Brazier (tylerbrazier) <tyler@tylerbrazier.com>
|
||||
Unrud (Unrud) <unrud@openaliasbox.org> <Unrud@users.noreply.github.com>
|
||||
Veeti Paananen (veeti) <veeti.paananen@rojekti.fi>
|
||||
Victor Buinsky (buinsky) <vix_booja@tut.by>
|
||||
Vil Brekin (Vilbrekin) <vilbrekin@gmail.com>
|
||||
William A. Kennington III (wkennington) <william@wkennington.com>
|
||||
Wulf Weich (wweich) <wweich@users.noreply.github.com> <wweich@gmx.de> <wulf@weich-kr.de>
|
||||
Xavier O. (damajor) <damajor@gmail.com>
|
||||
Yannic A. (eipiminus1) <eipiminusone+github@gmail.com> <eipiminus1@users.noreply.github.com>
|
||||
|
||||
@@ -33,20 +33,31 @@ latest info on Transifex.
|
||||
|
||||
Every contribution is welcome. If you want to contribute but are unsure
|
||||
where to start, any open issues are fair game! See the [Contribution
|
||||
Guidelines](http://docs.syncthing.net/dev/contributing.html) for the full
|
||||
Guidelines](https://docs.syncthing.net/dev/contributing.html) for the full
|
||||
story on committing code.
|
||||
|
||||
## Contributing Documentation
|
||||
|
||||
Updates to the [documentation site](http://docs.syncthing.net/) can be
|
||||
Updates to the [documentation site](https://docs.syncthing.net/) can be
|
||||
made as pull requests on the [documentation
|
||||
repository](https://github.com/syncthing/docs).
|
||||
|
||||
## Licensing
|
||||
|
||||
All contributions are made under the same MPLv2 license as the rest of
|
||||
the project, except documentation, user interface text and translation
|
||||
strings which are licensed under the Creative Commons Attribution 4.0
|
||||
International License. You retain the copyright to code you have
|
||||
written.
|
||||
All contributions are made available under the same license as the already
|
||||
existing material being contributed to. For most of the project and unless
|
||||
otherwise stated this means MPLv2, but there are exceptions:
|
||||
|
||||
- Certain commands (under cmd/...) may have a separate license, indicated by
|
||||
the presence of a LICENSE file in the corresponding directory.
|
||||
|
||||
- The documentation (man/...) is licensed under the Creative Commons
|
||||
Attribution 4.0 International License.
|
||||
|
||||
- Projects under vendor/... are copyright by and licensed from their
|
||||
respective original authors. Contributions should be made to the original
|
||||
project, not here.
|
||||
|
||||
Regardless of the license in effect, you retain the copyright to your
|
||||
contribution.
|
||||
|
||||
|
||||
83
GOALS.md
Normal file
@@ -0,0 +1,83 @@
|
||||
# The Syncthing Goals
|
||||
|
||||
Syncthing is a **continuous file synchronization program**. It synchronizes
|
||||
files between two or more computers. We strive to fulfill the goals below.
|
||||
The goals are listed in order of importance, the most important one being
|
||||
the first.
|
||||
|
||||
> "Syncing files" here is precise. It means we specifically exclude things
|
||||
> that are not files - calendar items, instant messages, and so on. If those
|
||||
> are in fact stored as files on disk, they can of course be synced as
|
||||
> files.
|
||||
|
||||
Syncthing should be:
|
||||
|
||||
### 1. Safe From Data Loss
|
||||
|
||||
Protecting the user's data is paramount. We take every reasonable precaution
|
||||
to avoid corrupting the user's files.
|
||||
|
||||
> This is the overriding goal, without which synchronizing files becomes
|
||||
> pointless. This means that we do not make unsafe trade offs for the sake
|
||||
> of performance or, in some cases, even usability.
|
||||
|
||||
### 2. Secure Against Attackers
|
||||
|
||||
Again, protecting the user's data is paramount. Regardless of our other
|
||||
goals we must never allow the user's data to be susceptible to eavesdropping
|
||||
or modification by unauthorized parties.
|
||||
|
||||
> This should be understood in context. It is not necessarily reasonable to
|
||||
> expect Syncthing to be resistant against well equipped state level
|
||||
> attackers. We will however do our best. Note also that this is different
|
||||
> from anonymity which is not, currently, a goal.
|
||||
|
||||
### 3. Easy to Use
|
||||
|
||||
Syncthing should be approachable, understandable and inclusive.
|
||||
|
||||
> Complex concepts and maths form the base of Synchting's functionality.
|
||||
> This should nonetheless be abstracted or hidden to a degree where
|
||||
> Syncthing is usable by the general public.
|
||||
|
||||
### 4. Automatic
|
||||
|
||||
User interaction should be required only when absolutely necessary.
|
||||
|
||||
> Specifically this means that changes to files are picked up without
|
||||
> prompting, conflicts are resolved without prompting and connections are
|
||||
> maintained without prompting. We only prompt the user when it is required
|
||||
> to fulfill one of the (overriding) Secure, Safe or Easy goals.
|
||||
|
||||
### 5. Universally Available
|
||||
|
||||
Syncthing should run on every common computer. We are mindful that the
|
||||
latest technology is not always available to any given individual.
|
||||
|
||||
> Computers include desktops, laptops, servers, virtual machines, small
|
||||
> general purpose computers such as Raspberry Pis and, *where possible*,
|
||||
> tablets and phones. NAS appliances, toasters, cars, firearms, thermostats
|
||||
> and so on may include computing capabitilies but it is not our goal for
|
||||
> Syncthing to run smoothly on these devices.
|
||||
|
||||
### 6. For Individuals
|
||||
|
||||
Syncthing is primarily about empowering the individual user with safe,
|
||||
secure and easy to use file synchronization.
|
||||
|
||||
> We acknowledge that it's also useful in an enterprise setting and include
|
||||
> functionality to support that. If this is in conflict with the
|
||||
> requirements of the individual, those will however take priority.
|
||||
|
||||
### 7. Everything Else
|
||||
|
||||
There are many things we care about that don't make it on to the list. It is
|
||||
fine to optimize for these values as well, as long as they are not in
|
||||
conflict with the stated goals above.
|
||||
|
||||
> For example, performance is a thing we care about. We just don't care more
|
||||
> about it than safety, security, etc. Maintainability of the code base and
|
||||
> providing entertainment value for the maintainers are also things that
|
||||
> matter. It is understood that there are aspects of Syncthing that are
|
||||
> suboptimal or even in opposition with the goals above. However, we
|
||||
> continuously strive to align Syncthing more and more with these goals.
|
||||
2
LICENSE
@@ -357,7 +357,7 @@ Exhibit A - Source Code Form License Notice
|
||||
|
||||
This Source Code Form is subject to the terms of the Mozilla Public
|
||||
License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular
|
||||
file, then You may include the notice in a location (such as a LICENSE
|
||||
|
||||
232
NICKS
@@ -1,89 +1,149 @@
|
||||
# This file maps email addresses used in commits to nicks used the changelog.
|
||||
# It is auto generated from the AUTHORS file by script/authors.go.
|
||||
|
||||
acogdev <jake@acogdev.com>
|
||||
alex2108 <register-github@alex-graf.de>
|
||||
alessandro.g89 <alessandro.g89@gmail.com>
|
||||
andersonvom <andersonvom@gmail.com>
|
||||
andrew-d <andrew@du.nham.ca>
|
||||
asdil12 <dominik@heidler.eu>
|
||||
AudriusButkevicius <audrius.butkevicius@gmail.com>
|
||||
aviau <alexandre@alexandreviau.net> <aviau@debian.org>
|
||||
bencurthoys <ben@bencurthoys.com>
|
||||
bigbear2nd <bigbear2nd@gmail.com>
|
||||
brbecker <brbecker@gmail.com>
|
||||
brendanlong <self@brendanlong.com>
|
||||
brgmnn <dan.arne.bergmann@gmail.com> <brgmnn@users.noreply.github.com>
|
||||
bsidhom <bsidhom@gmail.com>
|
||||
buinsky <vix_booja@tut.by>
|
||||
burkemw3 <mburke@amplify.com> <burkemw3@gmail.com>
|
||||
calmh <jakob@nym.se>
|
||||
canton7 <antony.male@gmail.com>
|
||||
Cathryne <cathryne.linenweaver@gmail.com> <Cathryne@users.noreply.github.com>
|
||||
cdata <chris@scriptolo.gy>
|
||||
cdhowie <me@chrishowie.com>
|
||||
ceh <emil@hessman.se>
|
||||
cqcallaw <enlightened.despot@gmail.com>
|
||||
dinosore <dinosore@dbrsoftware.co.uk>
|
||||
dva <denisva@gmail.com>
|
||||
dzarda <dzardacz@gmail.com>
|
||||
eipiminus1 <eipiminusone+github@gmail.com> <eipiminus1@users.noreply.github.com>
|
||||
facastagnini <federico.castagnini@gmail.com>
|
||||
filoozoom <philippe@schommers.be>
|
||||
frioux <frew@afoolishmanifesto.com> <frioux@gmail.com>
|
||||
fti7 <frank@isemann.name>
|
||||
gillisig <gilli@vx.is>
|
||||
hadogenes <szafar@linux.pl>
|
||||
ironmig <kma1660@gmail.com>
|
||||
jarlebring <jarlebring@gmail.com>
|
||||
jedie <github.com@jensdiemer.de> <git@jensdiemer.de>
|
||||
jgke <jgke@jgke.fi>
|
||||
jpjp <jamespatterson@operamail.com> <jpjp@users.noreply.github.com>
|
||||
kamadak <kamada@nanohz.org>
|
||||
KayoticSully <kayoticsully@gmail.com>
|
||||
kilburn <kilburn@la3.org>
|
||||
kluppy <kluppy@going2blue.com>
|
||||
kozec <kozec@kozec.com>
|
||||
kralo <max.schulze@online.de>
|
||||
krozycki <rozycki.karol@gmail.com>
|
||||
letiemble <laurent.etiemble@gmail.com> <laurent.etiemble@monobjc.net>
|
||||
LordLandon <lordlandon@gmail.com>
|
||||
lkwg82 <lkwg82@gmx.de>
|
||||
marcindziadus <dziadus.marcin@gmail.com>
|
||||
0x010C <antoine.lamielle@0x010c.fr>
|
||||
0x010C <gh@0x010c.fr>
|
||||
acogdev <jake@acogdev.com>
|
||||
adelq <aqalieh95@gmail.com>
|
||||
adelq <adelq@users.noreply.github.com>
|
||||
alessandro.g89 <alessandro.g89@gmail.com>
|
||||
alex2108 <register-github@alex-graf.de>
|
||||
andersonvom <andersonvom@gmail.com>
|
||||
andrew-d <andrew@du.nham.ca>
|
||||
asdil12 <dominik@heidler.eu>
|
||||
AudriusButkevicius <audrius.butkevicius@gmail.com>
|
||||
aviau <alexandre@alexandreviau.net>
|
||||
aviau <aviau@debian.org>
|
||||
bencurthoys <ben@bencurthoys.com>
|
||||
benshep <bjashepherd@gmail.com>
|
||||
bigbear2nd <bigbear2nd@gmail.com>
|
||||
brbecker <brbecker@gmail.com>
|
||||
brendanlong <self@brendanlong.com>
|
||||
brgmnn <dan.arne.bergmann@gmail.com>
|
||||
brgmnn <brgmnn@users.noreply.github.com>
|
||||
bsidhom <bsidhom@gmail.com>
|
||||
buinsky <vix_booja@tut.by>
|
||||
burkemw3 <mburke@amplify.com>
|
||||
burkemw3 <burkemw3@gmail.com>
|
||||
calmh <jakob@nym.se>
|
||||
calmh <jakob@kastelo.net>
|
||||
canton7 <antony.male@gmail.com>
|
||||
Cathryne <cathryne.linenweaver@gmail.com>
|
||||
Cathryne <Cathryne@users.noreply.github.com>
|
||||
cdata <chris@scriptolo.gy>
|
||||
cdhowie <me@chrishowie.com>
|
||||
ceh <emil@hessman.se>
|
||||
cqcallaw <enlightened.despot@gmail.com>
|
||||
damajor <damajor@gmail.com>
|
||||
dinosore <dinosore@dbrsoftware.co.uk>
|
||||
dtchanpura <dtchanpura@gmail.com>
|
||||
dtchanpura <dcprime314@gmail.com>
|
||||
dva <denisva@gmail.com>
|
||||
dzarda <dzardacz@gmail.com>
|
||||
eipiminus1 <eipiminusone+github@gmail.com>
|
||||
eipiminus1 <eipiminus1@users.noreply.github.com>
|
||||
elopio <yo@elopio.net>
|
||||
facastagnini <federico.castagnini@gmail.com>
|
||||
filoozoom <philippe@schommers.be>
|
||||
frioux <frew@afoolishmanifesto.com>
|
||||
frioux <frioux@gmail.com>
|
||||
fti7 <frank@isemann.name>
|
||||
gillisig <gilli@vx.is>
|
||||
hadogenes <szafar@linux.pl>
|
||||
imsodin <freisim93@gmail.com>
|
||||
ironmig <kma1660@gmail.com>
|
||||
jarlebring <jarlebring@gmail.com>
|
||||
jayachithra <s.k.jayachithra@gmail.com>
|
||||
jedie <github.com@jensdiemer.de>
|
||||
jedie <git@jensdiemer.de>
|
||||
jgke <jgke@jgke.fi>
|
||||
jmdaweb <jmdaweb@hotmail.com>
|
||||
jmdaweb <jmdaweb@users.noreply.github.com>
|
||||
jpjp <jamespatterson@operamail.com>
|
||||
jpjp <jpjp@users.noreply.github.com>
|
||||
kamadak <kamada@nanohz.org>
|
||||
KayoticSully <kayoticsully@gmail.com>
|
||||
kc1212 <kc04bc@gmx.com>
|
||||
kc1212 <kc1212@users.noreply.github.com>
|
||||
kilburn <kilburn@la3.org>
|
||||
kluppy <kluppy@going2blue.com>
|
||||
kozec <kozec@kozec.com>
|
||||
kralo <max.schulze@online.de>
|
||||
kralo <kralo@users.noreply.github.com>
|
||||
krozycki <rozycki.karol@gmail.com>
|
||||
Kudalufi <kurt@va1der.ca>
|
||||
Kudalufi <kurt.fitzner@gmail.com>
|
||||
kwhite17 <kevinwhite1710@gmail.com>
|
||||
letiemble <laurent.etiemble@gmail.com>
|
||||
letiemble <laurent.etiemble@monobjc.net>
|
||||
liusy182 <liusy182@gmail.com>
|
||||
liusy182 <liusy182@hotmail.com>
|
||||
lkwg82 <lkwg82@gmx.de>
|
||||
LordLandon <lordlandon@gmail.com>
|
||||
majedev <majed.alhajry@gmail.com>
|
||||
marcindziadus <dziadus.marcin@gmail.com>
|
||||
marclaporte <marc@marclaporte.com>
|
||||
mateon1 <matin1111@wp.pl>
|
||||
mogwa1 <devriesb@gmail.com>
|
||||
moshen <moshen.colin@gmail.com>
|
||||
Moter8 <moter8@gmail.com>
|
||||
mvdan <mvdan@mvdan.cc>
|
||||
norgeous <daniel@harte.me> <daniel@danielharte.co.uk> <norgeous@users.noreply.github.com>
|
||||
nrm21 <natemorrison@gmail.com>
|
||||
Nutomic <me@nutomic.com>
|
||||
pascalj <github@pascalj.com> <mail@pascal-jungblut.com>
|
||||
peterhoeg <peter@speartail.com>
|
||||
philips <brandon@ifup.org>
|
||||
piobpl <piotrb10@gmail.com>
|
||||
plouj <ploujj@gmail.com>
|
||||
pluby <phill.luby@newredo.com>
|
||||
pyfisch <pyfisch@gmail.com>
|
||||
qbit <qbit@deftly.net>
|
||||
ralder <ralder@yandex.ru>
|
||||
Rewt0r <rewt0r@gmx.com> <Rewt0r@users.noreply.github.com>
|
||||
rumpelsepp <stefan@sevenbyte.org> <rumpelsepp@sevenbyte.org>
|
||||
sciurius <jvromans@squirrel.nl>
|
||||
seehuhn <voss@seehuhn.de>
|
||||
simplypeachy <aD@simplypeachy.co.uk> <simplypeachy@users.noreply.github.com>
|
||||
snnd <dw@risu.io>
|
||||
Stefan-Code <stefan.github@gmail.com> <Stefan.github@gmail.com>
|
||||
timabell <tim@timwise.co.uk>
|
||||
tnn2 <tnn@nygren.pp.se>
|
||||
tojrobinson <tully@tojr.org>
|
||||
tpng <benny.tpng@gmail.com>
|
||||
tylerbrazier <tyler@tylerbrazier.com>
|
||||
uok <ueomkail@gmail.com> <uok@users.noreply.github.com>
|
||||
veeti <veeti.paananen@rojekti.fi>
|
||||
Vilbrekin <vilbrekin@gmail.com>
|
||||
wkennington <william@wkennington.com>
|
||||
wsgcsysadmin <e.meitner@willystreet.coo>
|
||||
wweich <wweich@users.noreply.github.com> <wweich@gmx.de>
|
||||
Zillode <zillode@zillode.be>
|
||||
zukoo <fxgsell@gmail.com>
|
||||
marclaporte <marc@laporte.name>
|
||||
mateon1 <matin1111@wp.pl>
|
||||
mogwa1 <devriesb@gmail.com>
|
||||
moshen <moshen.colin@gmail.com>
|
||||
Moter8 <moter8@gmail.com>
|
||||
mpx <mark@kyne.com.au>
|
||||
mvdan <mvdan@mvdan.cc>
|
||||
Niller303 <nielsproest@hotmail.com>
|
||||
Niller303 <seje.niels@hotmail.com>
|
||||
norgeous <daniel@harte.me>
|
||||
norgeous <daniel@danielharte.co.uk>
|
||||
norgeous <norgeous@users.noreply.github.com>
|
||||
nov1n <robert@carosi.nl>
|
||||
nrm21 <natemorrison@gmail.com>
|
||||
Nutomic <me@nutomic.com>
|
||||
pascalj <github@pascalj.com>
|
||||
pascalj <mail@pascal-jungblut.com>
|
||||
peterhoeg <peter@speartail.com>
|
||||
philips <brandon@ifup.org>
|
||||
piobpl <piotrb10@gmail.com>
|
||||
plouj <ploujj@gmail.com>
|
||||
pluby <phill.luby@newredo.com>
|
||||
ProactiveServices <aD@simplypeachy.co.uk>
|
||||
ProactiveServices <simplypeachy@users.noreply.github.com>
|
||||
ProactiveServices <ProactiveServices@users.noreply.github.com>
|
||||
pyfisch <pyfisch@gmail.com>
|
||||
qbit <qbit@deftly.net>
|
||||
ralder <ralder@yandex.ru>
|
||||
rasa <ross@smithii.com>
|
||||
Rewt0r <rewt0r@gmx.com>
|
||||
Rewt0r <Rewt0r@users.noreply.github.com>
|
||||
rumpelsepp <stefan@sevenbyte.org>
|
||||
rumpelsepp <rumpelsepp@sevenbyte.org>
|
||||
sacheendra <sacheendra.t@gmail.com>
|
||||
scienmind <scintertech@cryptolab.net>
|
||||
sciurius <jvromans@squirrel.nl>
|
||||
seehuhn <voss@seehuhn.de>
|
||||
Smiley73 <heiko@zuerker.org>
|
||||
snnd <dw@risu.io>
|
||||
snugghash <suhas.gundimeda@gmail.com>
|
||||
snugghash <snugghash@gmail.com>
|
||||
Stefan-Code <stefan.github@gmail.com>
|
||||
Stefan-Code <Stefan.github@gmail.com>
|
||||
timabell <tim@timwise.co.uk>
|
||||
timhowes <timhowes@berkeley.edu>
|
||||
tnn2 <tnn@nygren.pp.se>
|
||||
tojrobinson <tully@tojr.org>
|
||||
tpng <benny.tpng@gmail.com>
|
||||
tylerbrazier <tyler@tylerbrazier.com>
|
||||
Unrud <unrud@openaliasbox.org>
|
||||
Unrud <Unrud@users.noreply.github.com>
|
||||
uok <ueomkail@gmail.com>
|
||||
uok <uok@users.noreply.github.com>
|
||||
veeti <veeti.paananen@rojekti.fi>
|
||||
Vilbrekin <vilbrekin@gmail.com>
|
||||
wkennington <william@wkennington.com>
|
||||
WSGCSysadmin <e.meitner@willystreet.coop>
|
||||
wweich <wweich@users.noreply.github.com>
|
||||
wweich <wweich@gmx.de>
|
||||
wweich <wulf@weich-kr.de>
|
||||
xduugu <cedric@gmx.ca>
|
||||
zaynetro <romanznet@gmail.com>
|
||||
Zillode <zillode@zillode.be>
|
||||
zukoo <fxgsell@gmail.com>
|
||||
|
||||
86
README.md
@@ -1,23 +1,60 @@
|
||||
# Syncthing
|
||||
[![Syncthing][14]][15]
|
||||
|
||||
[](http://build.syncthing.net/job/syncthing/lastBuild/)
|
||||
[](http://godoc.org/github.com/syncthing/syncthing)
|
||||
---
|
||||
|
||||
[](https://build.syncthing.net/job/syncthing/lastBuild/)
|
||||
[](https://build.syncthing.net/job/syncthing/lastBuild/)
|
||||
[](https://build.syncthing.net/job/syncthing/lastBuild/)
|
||||
[](https://build.syncthing.net/job/syncthing/lastBuild/)
|
||||
[](https://godoc.org/github.com/syncthing/syncthing)
|
||||
[](https://www.mozilla.org/MPL/2.0/)
|
||||
[](https://bestpractices.coreinfrastructure.org/projects/88)
|
||||
[](https://goreportcard.com/report/github.com/syncthing/syncthing)
|
||||
|
||||
This is the Syncthing project which pursues the following goals:
|
||||
## Goals
|
||||
|
||||
1. Define a protocol for synchronization of a folder between a number of
|
||||
collaborating devices. This protocol should be well defined, unambiguous,
|
||||
easily understood, free to use, efficient, secure and language neutral.
|
||||
This is called the [Block Exchange Protocol][1].
|
||||
Syncthing is a **continuous file synchronization program**. It synchronizes
|
||||
files between two or more computers. We strive to fulfill the goals below.
|
||||
The goals are listed in order of importance, the most important one being
|
||||
the first. This is the summary version of the goal list - for more
|
||||
commentary, see the full [Goals document][13].
|
||||
|
||||
2. Provide the reference implementation to demonstrate the usability of
|
||||
said protocol. This is the `syncthing` utility. We hope that
|
||||
alternative, compatible implementations of the protocol will arise.
|
||||
Syncthing should be:
|
||||
|
||||
The two are evolving together; the protocol is not to be considered
|
||||
stable until Syncthing 1.0 is released, at which point it is locked down
|
||||
for incompatible changes.
|
||||
1. Safe From Data Loss
|
||||
|
||||
Protecting the user's data is paramount. We take every reasonable
|
||||
precaution to avoid corrupting the user's files.
|
||||
|
||||
2. Secure Against Attackers
|
||||
|
||||
Again, protecting the user's data is paramount. Regardless of our other
|
||||
goals we must never allow the user's data to be susceptible to
|
||||
eavesdropping or modification by unauthorized parties.
|
||||
|
||||
3. Easy to Use
|
||||
|
||||
Syncthing should be approachable, understandable and inclusive.
|
||||
|
||||
4. Automatic
|
||||
|
||||
User interaction should be required only when absolutely necessary.
|
||||
|
||||
5. Universally Available
|
||||
|
||||
Syncthing should run on every common computer. We are mindful that the
|
||||
latest technology is not always available to any given individual.
|
||||
|
||||
6. For Individuals
|
||||
|
||||
Syncthing is primarily about empowering the individual user with safe,
|
||||
secure and easy to use file synchronization.
|
||||
|
||||
7. Everything Else
|
||||
|
||||
There are many things we care about that don't make it on to the list. It
|
||||
is fine to optimize for these values, as long as they are not in conflict
|
||||
with the stated goals above.
|
||||
|
||||
## Getting Started
|
||||
|
||||
@@ -27,6 +64,11 @@ There are a few examples for keeping Syncthing running in the background
|
||||
on your system in [the etc directory][3]. There are also several [GUI
|
||||
implementations][11] for Windows, Mac and Linux.
|
||||
|
||||
## Vote on features/bugs
|
||||
|
||||
We'd like to encourage you to [vote][12] on issues that matter to you.
|
||||
This helps the team understand what are the biggest pain points for our users, and could potentially influence what is being worked on next.
|
||||
|
||||
## Getting in Touch
|
||||
|
||||
The first and best point of contact is the [Forum][8]. There is also an IRC
|
||||
@@ -55,14 +97,18 @@ Please see the [Syncthing documentation site][6].
|
||||
|
||||
All code is licensed under the [MPLv2 License][7].
|
||||
|
||||
[1]: http://docs.syncthing.net/specs/bep-v1.html
|
||||
[2]: http://docs.syncthing.net/intro/getting-started.html
|
||||
[1]: https://docs.syncthing.net/specs/bep-v1.html
|
||||
[2]: https://docs.syncthing.net/intro/getting-started.html
|
||||
[3]: https://github.com/syncthing/syncthing/blob/master/etc
|
||||
[4]: http://www.freenode.net/irc_servers.shtml
|
||||
[5]: http://docs.syncthing.net/dev/building.html
|
||||
[6]: http://docs.syncthing.net/
|
||||
[4]: https://www.freenode.net/
|
||||
[5]: https://docs.syncthing.net/dev/building.html
|
||||
[6]: https://docs.syncthing.net/
|
||||
[7]: https://github.com/syncthing/syncthing/blob/master/LICENSE
|
||||
[8]: https://forum.syncthing.net/
|
||||
[9]: https://kiwiirc.com/client/irc.freenode.net/#syncthing
|
||||
[10]: https://github.com/syncthing/syncthing/issues
|
||||
[11]: http://docs.syncthing.net/users/contrib.html#gui-wrappers
|
||||
[11]: https://docs.syncthing.net/users/contrib.html#gui-wrappers
|
||||
[12]: https://www.bountysource.com/teams/syncthing/issues
|
||||
[13]: https://github.com/syncthing/syncthing/blob/master/GOALS.md
|
||||
[14]: assets/logo-text-128.png
|
||||
[15]: https://syncthing.net/
|
||||
|
||||
|
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 9.8 KiB |
|
Before Width: | Height: | Size: 23 KiB After Width: | Height: | Size: 20 KiB |
|
Before Width: | Height: | Size: 3.4 KiB After Width: | Height: | Size: 2.2 KiB |
|
Before Width: | Height: | Size: 48 KiB After Width: | Height: | Size: 40 KiB |
|
Before Width: | Height: | Size: 6.4 KiB After Width: | Height: | Size: 4.9 KiB |
|
Before Width: | Height: | Size: 24 KiB After Width: | Height: | Size: 19 KiB |
|
Before Width: | Height: | Size: 47 KiB After Width: | Height: | Size: 38 KiB |
|
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 9.8 KiB |
|
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 8.2 KiB |
BIN
assets/statusicons/default.svg
Normal file
|
After Width: | Height: | Size: 1.4 KiB |
BIN
assets/statusicons/notify.svg
Normal file
|
After Width: | Height: | Size: 1.9 KiB |
BIN
assets/statusicons/pause.svg
Normal file
|
After Width: | Height: | Size: 1.9 KiB |
BIN
assets/statusicons/sync.svg
Normal file
|
After Width: | Height: | Size: 2.1 KiB |
BIN
assets/syncthing_folder_icon.icns
Normal file
82
build.sh
@@ -61,9 +61,9 @@ case "${1:-default}" in
|
||||
prerelease)
|
||||
go run script/authors.go
|
||||
build transifex
|
||||
git add -A gui/default/assets/ lib/auto/
|
||||
pushd man ; ./refresh.sh ; popd
|
||||
git add -A man
|
||||
git add -A gui man
|
||||
git commit -m 'gui, man: Update docs & translations'
|
||||
;;
|
||||
|
||||
noupgrade)
|
||||
@@ -93,35 +93,6 @@ case "${1:-default}" in
|
||||
done
|
||||
;;
|
||||
|
||||
test-cov)
|
||||
ulimit -t 600 &>/dev/null || true
|
||||
ulimit -d 512000 &>/dev/null || true
|
||||
ulimit -m 512000 &>/dev/null || true
|
||||
|
||||
echo "mode: set" > coverage.out
|
||||
fail=0
|
||||
|
||||
# For every package in the repo
|
||||
for dir in $(go list ./lib/... ./cmd/...) ; do
|
||||
# run the tests
|
||||
GOPATH="$(pwd)/Godeps/_workspace:$GOPATH" go test -race -coverprofile=profile.out $dir
|
||||
if [ -f profile.out ] ; then
|
||||
# and if there was test output, append it to coverage.out
|
||||
grep -v "mode: " profile.out >> coverage.out
|
||||
rm profile.out
|
||||
fi
|
||||
done
|
||||
|
||||
gocov convert coverage.out | gocov-xml > coverage.xml
|
||||
|
||||
# This is usually run from within Jenkins. If it is, we need to
|
||||
# tweak the paths in coverage.xml so cobertura finds the
|
||||
# source.
|
||||
if [[ "${WORKSPACE:-default}" != "default" ]] ; then
|
||||
sed "s#$WORKSPACE##g" < coverage.xml > coverage.xml.new && mv coverage.xml.new coverage.xml
|
||||
fi
|
||||
;;
|
||||
|
||||
test-xunit)
|
||||
ulimit -t 600 &>/dev/null || true
|
||||
ulimit -d 512000 &>/dev/null || true
|
||||
@@ -131,55 +102,6 @@ case "${1:-default}" in
|
||||
go2xunit -output tests.xml -fail < tests.out
|
||||
;;
|
||||
|
||||
docker-all)
|
||||
img=${DOCKERIMG:-syncthing/build:latest}
|
||||
docker run --rm -h syncthing-builder -u $(id -u) -t \
|
||||
-v $(pwd):/go/src/github.com/syncthing/syncthing \
|
||||
-w /go/src/github.com/syncthing/syncthing \
|
||||
-e "STTRACE=$STTRACE" \
|
||||
"$img" \
|
||||
sh -c './build.sh clean \
|
||||
&& ./build.sh test-cov \
|
||||
&& ./build.sh bench \
|
||||
&& ./build.sh all'
|
||||
;;
|
||||
|
||||
docker-test)
|
||||
img=${DOCKERIMG:-syncthing/build:latest}
|
||||
docker run --rm -h syncthing-builder -u $(id -u) -t \
|
||||
-v $(pwd):/go/src/github.com/syncthing/syncthing \
|
||||
-w /go/src/github.com/syncthing/syncthing \
|
||||
-e "STTRACE=$STTRACE" \
|
||||
"$img" \
|
||||
sh -euxc './build.sh clean \
|
||||
&& go run build.go -race \
|
||||
&& export GOPATH=$(pwd)/Godeps/_workspace:$GOPATH \
|
||||
&& cd test \
|
||||
&& go test -tags integration -v -timeout 90m -short \
|
||||
&& git clean -fxd .'
|
||||
;;
|
||||
|
||||
docker-lint)
|
||||
img=${DOCKERIMG:-syncthing/build:latest}
|
||||
docker run --rm -h syncthing-builder -u $(id -u) -t \
|
||||
-v $(pwd):/go/src/github.com/syncthing/syncthing \
|
||||
-w /go/src/github.com/syncthing/syncthing \
|
||||
-e "STTRACE=$STTRACE" \
|
||||
"$img" \
|
||||
sh -euxc 'go run build.go lint'
|
||||
;;
|
||||
|
||||
|
||||
docker-vet)
|
||||
img=${DOCKERIMG:-syncthing/build:latest}
|
||||
docker run --rm -h syncthing-builder -u $(id -u) -t \
|
||||
-v $(pwd):/go/src/github.com/syncthing/syncthing \
|
||||
-w /go/src/github.com/syncthing/syncthing \
|
||||
-e "STTRACE=$STTRACE" \
|
||||
"$img" \
|
||||
sh -euxc 'go run build.go vet'
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "Unknown build command $1"
|
||||
;;
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
// This doesn't build on Windows due to the Rusage stuff.
|
||||
|
||||
|
||||
19
cmd/stcli/LICENSE
Normal file
@@ -0,0 +1,19 @@
|
||||
Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
- The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
115
cmd/stcli/client.go
Normal file
@@ -0,0 +1,115 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/AudriusButkevicius/cli"
|
||||
)
|
||||
|
||||
type APIClient struct {
|
||||
httpClient http.Client
|
||||
endpoint string
|
||||
apikey string
|
||||
username string
|
||||
password string
|
||||
id string
|
||||
csrf string
|
||||
}
|
||||
|
||||
var instance *APIClient
|
||||
|
||||
func getClient(c *cli.Context) *APIClient {
|
||||
if instance != nil {
|
||||
return instance
|
||||
}
|
||||
endpoint := c.GlobalString("endpoint")
|
||||
if !strings.HasPrefix(endpoint, "http") {
|
||||
endpoint = "http://" + endpoint
|
||||
}
|
||||
httpClient := http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: c.GlobalBool("insecure"),
|
||||
},
|
||||
},
|
||||
}
|
||||
client := APIClient{
|
||||
httpClient: httpClient,
|
||||
endpoint: endpoint,
|
||||
apikey: c.GlobalString("apikey"),
|
||||
username: c.GlobalString("username"),
|
||||
password: c.GlobalString("password"),
|
||||
}
|
||||
|
||||
if client.apikey == "" {
|
||||
request, err := http.NewRequest("GET", client.endpoint, nil)
|
||||
die(err)
|
||||
response := client.handleRequest(request)
|
||||
client.id = response.Header.Get("X-Syncthing-ID")
|
||||
if client.id == "" {
|
||||
die("Failed to get device ID")
|
||||
}
|
||||
for _, item := range response.Cookies() {
|
||||
if item.Name == "CSRF-Token-"+client.id[:5] {
|
||||
client.csrf = item.Value
|
||||
goto csrffound
|
||||
}
|
||||
}
|
||||
die("Failed to get CSRF token")
|
||||
csrffound:
|
||||
}
|
||||
instance = &client
|
||||
return &client
|
||||
}
|
||||
|
||||
func (client *APIClient) handleRequest(request *http.Request) *http.Response {
|
||||
if client.apikey != "" {
|
||||
request.Header.Set("X-API-Key", client.apikey)
|
||||
}
|
||||
if client.username != "" || client.password != "" {
|
||||
request.SetBasicAuth(client.username, client.password)
|
||||
}
|
||||
if client.csrf != "" {
|
||||
request.Header.Set("X-CSRF-Token-"+client.id[:5], client.csrf)
|
||||
}
|
||||
|
||||
response, err := client.httpClient.Do(request)
|
||||
die(err)
|
||||
|
||||
if response.StatusCode == 404 {
|
||||
die("Invalid endpoint or API call")
|
||||
} else if response.StatusCode == 401 {
|
||||
die("Invalid username or password")
|
||||
} else if response.StatusCode == 403 {
|
||||
if client.apikey == "" {
|
||||
die("Invalid CSRF token")
|
||||
}
|
||||
die("Invalid API key")
|
||||
} else if response.StatusCode != 200 {
|
||||
body := strings.TrimSpace(string(responseToBArray(response)))
|
||||
if body != "" {
|
||||
die(body)
|
||||
}
|
||||
die("Unknown HTTP status returned: " + response.Status)
|
||||
}
|
||||
return response
|
||||
}
|
||||
|
||||
func httpGet(c *cli.Context, url string) *http.Response {
|
||||
client := getClient(c)
|
||||
request, err := http.NewRequest("GET", client.endpoint+"/rest/"+url, nil)
|
||||
die(err)
|
||||
return client.handleRequest(request)
|
||||
}
|
||||
|
||||
func httpPost(c *cli.Context, url string, body string) *http.Response {
|
||||
client := getClient(c)
|
||||
request, err := http.NewRequest("POST", client.endpoint+"/rest/"+url, bytes.NewBufferString(body))
|
||||
die(err)
|
||||
return client.handleRequest(request)
|
||||
}
|
||||
188
cmd/stcli/cmd_devices.go
Normal file
@@ -0,0 +1,188 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/AudriusButkevicius/cli"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cliCommands = append(cliCommands, cli.Command{
|
||||
Name: "devices",
|
||||
HideHelp: true,
|
||||
Usage: "Device command group",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "list",
|
||||
Usage: "List registered devices",
|
||||
Requires: &cli.Requires{},
|
||||
Action: devicesList,
|
||||
},
|
||||
{
|
||||
Name: "add",
|
||||
Usage: "Add a new device",
|
||||
Requires: &cli.Requires{"device id", "device name?"},
|
||||
Action: devicesAdd,
|
||||
},
|
||||
{
|
||||
Name: "remove",
|
||||
Usage: "Remove an existing device",
|
||||
Requires: &cli.Requires{"device id"},
|
||||
Action: devicesRemove,
|
||||
},
|
||||
{
|
||||
Name: "get",
|
||||
Usage: "Get a property of a device",
|
||||
Requires: &cli.Requires{"device id", "property"},
|
||||
Action: devicesGet,
|
||||
},
|
||||
{
|
||||
Name: "set",
|
||||
Usage: "Set a property of a device",
|
||||
Requires: &cli.Requires{"device id", "property", "value..."},
|
||||
Action: devicesSet,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func devicesList(c *cli.Context) {
|
||||
cfg := getConfig(c)
|
||||
first := true
|
||||
writer := newTableWriter()
|
||||
for _, device := range cfg.Devices {
|
||||
if !first {
|
||||
fmt.Fprintln(writer)
|
||||
}
|
||||
fmt.Fprintln(writer, "ID:\t", device.DeviceID, "\t")
|
||||
fmt.Fprintln(writer, "Name:\t", device.Name, "\t(name)")
|
||||
fmt.Fprintln(writer, "Address:\t", strings.Join(device.Addresses, " "), "\t(address)")
|
||||
fmt.Fprintln(writer, "Compression:\t", device.Compression, "\t(compression)")
|
||||
fmt.Fprintln(writer, "Certificate name:\t", device.CertName, "\t(certname)")
|
||||
fmt.Fprintln(writer, "Introducer:\t", device.Introducer, "\t(introducer)")
|
||||
first = false
|
||||
}
|
||||
writer.Flush()
|
||||
}
|
||||
|
||||
func devicesAdd(c *cli.Context) {
|
||||
nid := c.Args()[0]
|
||||
id := parseDeviceID(nid)
|
||||
|
||||
newDevice := config.DeviceConfiguration{
|
||||
DeviceID: id,
|
||||
Name: nid,
|
||||
Addresses: []string{"dynamic"},
|
||||
}
|
||||
|
||||
if len(c.Args()) > 1 {
|
||||
newDevice.Name = c.Args()[1]
|
||||
}
|
||||
|
||||
if len(c.Args()) > 2 {
|
||||
addresses := c.Args()[2:]
|
||||
for _, item := range addresses {
|
||||
if item == "dynamic" {
|
||||
continue
|
||||
}
|
||||
validAddress(item)
|
||||
}
|
||||
newDevice.Addresses = addresses
|
||||
}
|
||||
|
||||
cfg := getConfig(c)
|
||||
for _, device := range cfg.Devices {
|
||||
if device.DeviceID == id {
|
||||
die("Device " + nid + " already exists")
|
||||
}
|
||||
}
|
||||
cfg.Devices = append(cfg.Devices, newDevice)
|
||||
setConfig(c, cfg)
|
||||
}
|
||||
|
||||
func devicesRemove(c *cli.Context) {
|
||||
nid := c.Args()[0]
|
||||
id := parseDeviceID(nid)
|
||||
if nid == getMyID(c) {
|
||||
die("Cannot remove yourself")
|
||||
}
|
||||
cfg := getConfig(c)
|
||||
for i, device := range cfg.Devices {
|
||||
if device.DeviceID == id {
|
||||
last := len(cfg.Devices) - 1
|
||||
cfg.Devices[i] = cfg.Devices[last]
|
||||
cfg.Devices = cfg.Devices[:last]
|
||||
setConfig(c, cfg)
|
||||
return
|
||||
}
|
||||
}
|
||||
die("Device " + nid + " not found")
|
||||
}
|
||||
|
||||
func devicesGet(c *cli.Context) {
|
||||
nid := c.Args()[0]
|
||||
id := parseDeviceID(nid)
|
||||
arg := c.Args()[1]
|
||||
cfg := getConfig(c)
|
||||
for _, device := range cfg.Devices {
|
||||
if device.DeviceID != id {
|
||||
continue
|
||||
}
|
||||
switch strings.ToLower(arg) {
|
||||
case "name":
|
||||
fmt.Println(device.Name)
|
||||
case "address":
|
||||
fmt.Println(strings.Join(device.Addresses, "\n"))
|
||||
case "compression":
|
||||
fmt.Println(device.Compression.String())
|
||||
case "certname":
|
||||
fmt.Println(device.CertName)
|
||||
case "introducer":
|
||||
fmt.Println(device.Introducer)
|
||||
default:
|
||||
die("Invalid property: " + arg + "\nAvailable properties: name, address, compression, certname, introducer")
|
||||
}
|
||||
return
|
||||
}
|
||||
die("Device " + nid + " not found")
|
||||
}
|
||||
|
||||
func devicesSet(c *cli.Context) {
|
||||
nid := c.Args()[0]
|
||||
id := parseDeviceID(nid)
|
||||
arg := c.Args()[1]
|
||||
config := getConfig(c)
|
||||
for i, device := range config.Devices {
|
||||
if device.DeviceID != id {
|
||||
continue
|
||||
}
|
||||
switch strings.ToLower(arg) {
|
||||
case "name":
|
||||
config.Devices[i].Name = strings.Join(c.Args()[2:], " ")
|
||||
case "address":
|
||||
for _, item := range c.Args()[2:] {
|
||||
if item == "dynamic" {
|
||||
continue
|
||||
}
|
||||
validAddress(item)
|
||||
}
|
||||
config.Devices[i].Addresses = c.Args()[2:]
|
||||
case "compression":
|
||||
err := config.Devices[i].Compression.UnmarshalText([]byte(c.Args()[2]))
|
||||
die(err)
|
||||
case "certname":
|
||||
config.Devices[i].CertName = strings.Join(c.Args()[2:], " ")
|
||||
case "introducer":
|
||||
config.Devices[i].Introducer = parseBool(c.Args()[2])
|
||||
default:
|
||||
die("Invalid property: " + arg + "\nAvailable properties: name, address, compression, certname, introducer")
|
||||
}
|
||||
setConfig(c, config)
|
||||
return
|
||||
}
|
||||
die("Device " + nid + " not found")
|
||||
}
|
||||
67
cmd/stcli/cmd_errors.go
Normal file
@@ -0,0 +1,67 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/AudriusButkevicius/cli"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cliCommands = append(cliCommands, cli.Command{
|
||||
Name: "errors",
|
||||
HideHelp: true,
|
||||
Usage: "Error command group",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "show",
|
||||
Usage: "Show pending errors",
|
||||
Requires: &cli.Requires{},
|
||||
Action: errorsShow,
|
||||
},
|
||||
{
|
||||
Name: "push",
|
||||
Usage: "Push an error to active clients",
|
||||
Requires: &cli.Requires{"error message..."},
|
||||
Action: errorsPush,
|
||||
},
|
||||
{
|
||||
Name: "clear",
|
||||
Usage: "Clear pending errors",
|
||||
Requires: &cli.Requires{},
|
||||
Action: wrappedHTTPPost("system/error/clear"),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func errorsShow(c *cli.Context) {
|
||||
response := httpGet(c, "system/error")
|
||||
var data map[string][]map[string]interface{}
|
||||
json.Unmarshal(responseToBArray(response), &data)
|
||||
writer := newTableWriter()
|
||||
for _, item := range data["errors"] {
|
||||
time := item["time"].(string)[:19]
|
||||
time = strings.Replace(time, "T", " ", 1)
|
||||
err := item["error"].(string)
|
||||
err = strings.TrimSpace(err)
|
||||
fmt.Fprintln(writer, time+":\t"+err)
|
||||
}
|
||||
writer.Flush()
|
||||
}
|
||||
|
||||
func errorsPush(c *cli.Context) {
|
||||
err := strings.Join(c.Args(), " ")
|
||||
response := httpPost(c, "system/error", strings.TrimSpace(err))
|
||||
if response.StatusCode != 200 {
|
||||
err = fmt.Sprint("Failed to push error\nStatus code: ", response.StatusCode)
|
||||
body := string(responseToBArray(response))
|
||||
if body != "" {
|
||||
err += "\nBody: " + body
|
||||
}
|
||||
die(err)
|
||||
}
|
||||
}
|
||||
351
cmd/stcli/cmd_folders.go
Normal file
@@ -0,0 +1,351 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/AudriusButkevicius/cli"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cliCommands = append(cliCommands, cli.Command{
|
||||
Name: "folders",
|
||||
HideHelp: true,
|
||||
Usage: "Folder command group",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "list",
|
||||
Usage: "List available folders",
|
||||
Requires: &cli.Requires{},
|
||||
Action: foldersList,
|
||||
},
|
||||
{
|
||||
Name: "add",
|
||||
Usage: "Add a new folder",
|
||||
Requires: &cli.Requires{"folder id", "directory"},
|
||||
Action: foldersAdd,
|
||||
},
|
||||
{
|
||||
Name: "remove",
|
||||
Usage: "Remove an existing folder",
|
||||
Requires: &cli.Requires{"folder id"},
|
||||
Action: foldersRemove,
|
||||
},
|
||||
{
|
||||
Name: "override",
|
||||
Usage: "Override changes from other nodes for a master folder",
|
||||
Requires: &cli.Requires{"folder id"},
|
||||
Action: foldersOverride,
|
||||
},
|
||||
{
|
||||
Name: "get",
|
||||
Usage: "Get a property of a folder",
|
||||
Requires: &cli.Requires{"folder id", "property"},
|
||||
Action: foldersGet,
|
||||
},
|
||||
{
|
||||
Name: "set",
|
||||
Usage: "Set a property of a folder",
|
||||
Requires: &cli.Requires{"folder id", "property", "value..."},
|
||||
Action: foldersSet,
|
||||
},
|
||||
{
|
||||
Name: "unset",
|
||||
Usage: "Unset a property of a folder",
|
||||
Requires: &cli.Requires{"folder id", "property"},
|
||||
Action: foldersUnset,
|
||||
},
|
||||
{
|
||||
Name: "devices",
|
||||
Usage: "Folder devices command group",
|
||||
HideHelp: true,
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "list",
|
||||
Usage: "List of devices which the folder is shared with",
|
||||
Requires: &cli.Requires{"folder id"},
|
||||
Action: foldersDevicesList,
|
||||
},
|
||||
{
|
||||
Name: "add",
|
||||
Usage: "Share a folder with a device",
|
||||
Requires: &cli.Requires{"folder id", "device id"},
|
||||
Action: foldersDevicesAdd,
|
||||
},
|
||||
{
|
||||
Name: "remove",
|
||||
Usage: "Unshare a folder with a device",
|
||||
Requires: &cli.Requires{"folder id", "device id"},
|
||||
Action: foldersDevicesRemove,
|
||||
},
|
||||
{
|
||||
Name: "clear",
|
||||
Usage: "Unshare a folder with all devices",
|
||||
Requires: &cli.Requires{"folder id"},
|
||||
Action: foldersDevicesClear,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func foldersList(c *cli.Context) {
|
||||
cfg := getConfig(c)
|
||||
first := true
|
||||
writer := newTableWriter()
|
||||
for _, folder := range cfg.Folders {
|
||||
if !first {
|
||||
fmt.Fprintln(writer)
|
||||
}
|
||||
fmt.Fprintln(writer, "ID:\t", folder.ID, "\t")
|
||||
fmt.Fprintln(writer, "Path:\t", folder.RawPath, "\t(directory)")
|
||||
fmt.Fprintln(writer, "Folder type:\t", folder.Type, "\t(type)")
|
||||
fmt.Fprintln(writer, "Ignore permissions:\t", folder.IgnorePerms, "\t(permissions)")
|
||||
fmt.Fprintln(writer, "Rescan interval in seconds:\t", folder.RescanIntervalS, "\t(rescan)")
|
||||
|
||||
if folder.Versioning.Type != "" {
|
||||
fmt.Fprintln(writer, "Versioning:\t", folder.Versioning.Type, "\t(versioning)")
|
||||
for key, value := range folder.Versioning.Params {
|
||||
fmt.Fprintf(writer, "Versioning %s:\t %s \t(versioning-%s)\n", key, value, key)
|
||||
}
|
||||
}
|
||||
first = false
|
||||
}
|
||||
writer.Flush()
|
||||
}
|
||||
|
||||
func foldersAdd(c *cli.Context) {
|
||||
cfg := getConfig(c)
|
||||
abs, err := filepath.Abs(c.Args()[1])
|
||||
die(err)
|
||||
folder := config.FolderConfiguration{
|
||||
ID: c.Args()[0],
|
||||
RawPath: filepath.Clean(abs),
|
||||
}
|
||||
cfg.Folders = append(cfg.Folders, folder)
|
||||
setConfig(c, cfg)
|
||||
}
|
||||
|
||||
func foldersRemove(c *cli.Context) {
|
||||
cfg := getConfig(c)
|
||||
rid := c.Args()[0]
|
||||
for i, folder := range cfg.Folders {
|
||||
if folder.ID == rid {
|
||||
last := len(cfg.Folders) - 1
|
||||
cfg.Folders[i] = cfg.Folders[last]
|
||||
cfg.Folders = cfg.Folders[:last]
|
||||
setConfig(c, cfg)
|
||||
return
|
||||
}
|
||||
}
|
||||
die("Folder " + rid + " not found")
|
||||
}
|
||||
|
||||
func foldersOverride(c *cli.Context) {
|
||||
cfg := getConfig(c)
|
||||
rid := c.Args()[0]
|
||||
for _, folder := range cfg.Folders {
|
||||
if folder.ID == rid && folder.Type == config.FolderTypeSendOnly {
|
||||
response := httpPost(c, "db/override", "")
|
||||
if response.StatusCode != 200 {
|
||||
err := fmt.Sprint("Failed to override changes\nStatus code: ", response.StatusCode)
|
||||
body := string(responseToBArray(response))
|
||||
if body != "" {
|
||||
err += "\nBody: " + body
|
||||
}
|
||||
die(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
die("Folder " + rid + " not found or folder not master")
|
||||
}
|
||||
|
||||
func foldersGet(c *cli.Context) {
|
||||
cfg := getConfig(c)
|
||||
rid := c.Args()[0]
|
||||
arg := strings.ToLower(c.Args()[1])
|
||||
for _, folder := range cfg.Folders {
|
||||
if folder.ID != rid {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(arg, "versioning-") {
|
||||
arg = arg[11:]
|
||||
value, ok := folder.Versioning.Params[arg]
|
||||
if ok {
|
||||
fmt.Println(value)
|
||||
return
|
||||
}
|
||||
die("Versioning property " + c.Args()[1][11:] + " not found")
|
||||
}
|
||||
switch arg {
|
||||
case "directory":
|
||||
fmt.Println(folder.RawPath)
|
||||
case "type":
|
||||
fmt.Println(folder.Type)
|
||||
case "permissions":
|
||||
fmt.Println(folder.IgnorePerms)
|
||||
case "rescan":
|
||||
fmt.Println(folder.RescanIntervalS)
|
||||
case "versioning":
|
||||
if folder.Versioning.Type != "" {
|
||||
fmt.Println(folder.Versioning.Type)
|
||||
}
|
||||
default:
|
||||
die("Invalid property: " + c.Args()[1] + "\nAvailable properties: directory, type, permissions, versioning, versioning-<key>")
|
||||
}
|
||||
return
|
||||
}
|
||||
die("Folder " + rid + " not found")
|
||||
}
|
||||
|
||||
func foldersSet(c *cli.Context) {
|
||||
rid := c.Args()[0]
|
||||
arg := strings.ToLower(c.Args()[1])
|
||||
val := strings.Join(c.Args()[2:], " ")
|
||||
cfg := getConfig(c)
|
||||
for i, folder := range cfg.Folders {
|
||||
if folder.ID != rid {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(arg, "versioning-") {
|
||||
cfg.Folders[i].Versioning.Params[arg[11:]] = val
|
||||
setConfig(c, cfg)
|
||||
return
|
||||
}
|
||||
switch arg {
|
||||
case "directory":
|
||||
cfg.Folders[i].RawPath = val
|
||||
case "type":
|
||||
var t config.FolderType
|
||||
if err := t.UnmarshalText([]byte(val)); err != nil {
|
||||
die("Invalid folder type: " + err.Error())
|
||||
}
|
||||
cfg.Folders[i].Type = t
|
||||
case "permissions":
|
||||
cfg.Folders[i].IgnorePerms = parseBool(val)
|
||||
case "rescan":
|
||||
cfg.Folders[i].RescanIntervalS = parseInt(val)
|
||||
case "versioning":
|
||||
cfg.Folders[i].Versioning.Type = val
|
||||
default:
|
||||
die("Invalid property: " + c.Args()[1] + "\nAvailable properties: directory, master, permissions, versioning, versioning-<key>")
|
||||
}
|
||||
setConfig(c, cfg)
|
||||
return
|
||||
}
|
||||
die("Folder " + rid + " not found")
|
||||
}
|
||||
|
||||
func foldersUnset(c *cli.Context) {
|
||||
rid := c.Args()[0]
|
||||
arg := strings.ToLower(c.Args()[1])
|
||||
cfg := getConfig(c)
|
||||
for i, folder := range cfg.Folders {
|
||||
if folder.ID != rid {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(arg, "versioning-") {
|
||||
arg = arg[11:]
|
||||
if _, ok := folder.Versioning.Params[arg]; ok {
|
||||
delete(cfg.Folders[i].Versioning.Params, arg)
|
||||
setConfig(c, cfg)
|
||||
return
|
||||
}
|
||||
die("Versioning property " + c.Args()[1][11:] + " not found")
|
||||
}
|
||||
switch arg {
|
||||
case "versioning":
|
||||
cfg.Folders[i].Versioning.Type = ""
|
||||
cfg.Folders[i].Versioning.Params = make(map[string]string)
|
||||
default:
|
||||
die("Invalid property: " + c.Args()[1] + "\nAvailable properties: versioning, versioning-<key>")
|
||||
}
|
||||
setConfig(c, cfg)
|
||||
return
|
||||
}
|
||||
die("Folder " + rid + " not found")
|
||||
}
|
||||
|
||||
func foldersDevicesList(c *cli.Context) {
|
||||
rid := c.Args()[0]
|
||||
cfg := getConfig(c)
|
||||
for _, folder := range cfg.Folders {
|
||||
if folder.ID != rid {
|
||||
continue
|
||||
}
|
||||
for _, device := range folder.Devices {
|
||||
fmt.Println(device.DeviceID)
|
||||
}
|
||||
return
|
||||
}
|
||||
die("Folder " + rid + " not found")
|
||||
}
|
||||
|
||||
func foldersDevicesAdd(c *cli.Context) {
|
||||
rid := c.Args()[0]
|
||||
nid := parseDeviceID(c.Args()[1])
|
||||
cfg := getConfig(c)
|
||||
for i, folder := range cfg.Folders {
|
||||
if folder.ID != rid {
|
||||
continue
|
||||
}
|
||||
for _, device := range folder.Devices {
|
||||
if device.DeviceID == nid {
|
||||
die("Device " + c.Args()[1] + " is already part of this folder")
|
||||
}
|
||||
}
|
||||
for _, device := range cfg.Devices {
|
||||
if device.DeviceID == nid {
|
||||
cfg.Folders[i].Devices = append(folder.Devices, config.FolderDeviceConfiguration{
|
||||
DeviceID: device.DeviceID,
|
||||
})
|
||||
setConfig(c, cfg)
|
||||
return
|
||||
}
|
||||
}
|
||||
die("Device " + c.Args()[1] + " not found in device list")
|
||||
}
|
||||
die("Folder " + rid + " not found")
|
||||
}
|
||||
|
||||
func foldersDevicesRemove(c *cli.Context) {
|
||||
rid := c.Args()[0]
|
||||
nid := parseDeviceID(c.Args()[1])
|
||||
cfg := getConfig(c)
|
||||
for ri, folder := range cfg.Folders {
|
||||
if folder.ID != rid {
|
||||
continue
|
||||
}
|
||||
for ni, device := range folder.Devices {
|
||||
if device.DeviceID == nid {
|
||||
last := len(folder.Devices) - 1
|
||||
cfg.Folders[ri].Devices[ni] = folder.Devices[last]
|
||||
cfg.Folders[ri].Devices = cfg.Folders[ri].Devices[:last]
|
||||
setConfig(c, cfg)
|
||||
return
|
||||
}
|
||||
}
|
||||
die("Device " + c.Args()[1] + " not found")
|
||||
}
|
||||
die("Folder " + rid + " not found")
|
||||
}
|
||||
|
||||
func foldersDevicesClear(c *cli.Context) {
|
||||
rid := c.Args()[0]
|
||||
cfg := getConfig(c)
|
||||
for i, folder := range cfg.Folders {
|
||||
if folder.ID != rid {
|
||||
continue
|
||||
}
|
||||
cfg.Folders[i].Devices = []config.FolderDeviceConfiguration{}
|
||||
setConfig(c, cfg)
|
||||
return
|
||||
}
|
||||
die("Folder " + rid + " not found")
|
||||
}
|
||||
78
cmd/stcli/cmd_general.go
Normal file
@@ -0,0 +1,78 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/AudriusButkevicius/cli"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cliCommands = append(cliCommands, []cli.Command{
|
||||
{
|
||||
Name: "id",
|
||||
Usage: "Get ID of the Syncthing client",
|
||||
Requires: &cli.Requires{},
|
||||
Action: generalID,
|
||||
},
|
||||
{
|
||||
Name: "status",
|
||||
Usage: "Configuration status, whether or not a restart is required for changes to take effect",
|
||||
Requires: &cli.Requires{},
|
||||
Action: generalStatus,
|
||||
},
|
||||
{
|
||||
Name: "restart",
|
||||
Usage: "Restart syncthing",
|
||||
Requires: &cli.Requires{},
|
||||
Action: wrappedHTTPPost("system/restart"),
|
||||
},
|
||||
{
|
||||
Name: "shutdown",
|
||||
Usage: "Shutdown syncthing",
|
||||
Requires: &cli.Requires{},
|
||||
Action: wrappedHTTPPost("system/shutdown"),
|
||||
},
|
||||
{
|
||||
Name: "reset",
|
||||
Usage: "Reset syncthing deleting all folders and devices",
|
||||
Requires: &cli.Requires{},
|
||||
Action: wrappedHTTPPost("system/reset"),
|
||||
},
|
||||
{
|
||||
Name: "upgrade",
|
||||
Usage: "Upgrade syncthing (if a newer version is available)",
|
||||
Requires: &cli.Requires{},
|
||||
Action: wrappedHTTPPost("system/upgrade"),
|
||||
},
|
||||
{
|
||||
Name: "version",
|
||||
Usage: "Syncthing client version",
|
||||
Requires: &cli.Requires{},
|
||||
Action: generalVersion,
|
||||
},
|
||||
}...)
|
||||
}
|
||||
|
||||
func generalID(c *cli.Context) {
|
||||
fmt.Println(getMyID(c))
|
||||
}
|
||||
|
||||
func generalStatus(c *cli.Context) {
|
||||
response := httpGet(c, "system/config/insync")
|
||||
var status struct{ ConfigInSync bool }
|
||||
json.Unmarshal(responseToBArray(response), &status)
|
||||
if !status.ConfigInSync {
|
||||
die("Config out of sync")
|
||||
}
|
||||
fmt.Println("Config in sync")
|
||||
}
|
||||
|
||||
func generalVersion(c *cli.Context) {
|
||||
response := httpGet(c, "system/version")
|
||||
version := make(map[string]interface{})
|
||||
json.Unmarshal(responseToBArray(response), &version)
|
||||
prettyPrintJSON(version)
|
||||
}
|
||||
127
cmd/stcli/cmd_gui.go
Normal file
@@ -0,0 +1,127 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/AudriusButkevicius/cli"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cliCommands = append(cliCommands, cli.Command{
|
||||
Name: "gui",
|
||||
HideHelp: true,
|
||||
Usage: "GUI command group",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "dump",
|
||||
Usage: "Show all GUI configuration settings",
|
||||
Requires: &cli.Requires{},
|
||||
Action: guiDump,
|
||||
},
|
||||
{
|
||||
Name: "get",
|
||||
Usage: "Get a GUI configuration setting",
|
||||
Requires: &cli.Requires{"setting"},
|
||||
Action: guiGet,
|
||||
},
|
||||
{
|
||||
Name: "set",
|
||||
Usage: "Set a GUI configuration setting",
|
||||
Requires: &cli.Requires{"setting", "value"},
|
||||
Action: guiSet,
|
||||
},
|
||||
{
|
||||
Name: "unset",
|
||||
Usage: "Unset a GUI configuration setting",
|
||||
Requires: &cli.Requires{"setting"},
|
||||
Action: guiUnset,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func guiDump(c *cli.Context) {
|
||||
cfg := getConfig(c).GUI
|
||||
writer := newTableWriter()
|
||||
fmt.Fprintln(writer, "Enabled:\t", cfg.Enabled, "\t(enabled)")
|
||||
fmt.Fprintln(writer, "Use HTTPS:\t", cfg.UseTLS(), "\t(tls)")
|
||||
fmt.Fprintln(writer, "Listen Addresses:\t", cfg.Address(), "\t(address)")
|
||||
if cfg.User != "" {
|
||||
fmt.Fprintln(writer, "Authentication User:\t", cfg.User, "\t(username)")
|
||||
fmt.Fprintln(writer, "Authentication Password:\t", cfg.Password, "\t(password)")
|
||||
}
|
||||
if cfg.APIKey != "" {
|
||||
fmt.Fprintln(writer, "API Key:\t", cfg.APIKey, "\t(apikey)")
|
||||
}
|
||||
writer.Flush()
|
||||
}
|
||||
|
||||
func guiGet(c *cli.Context) {
|
||||
cfg := getConfig(c).GUI
|
||||
arg := c.Args()[0]
|
||||
switch strings.ToLower(arg) {
|
||||
case "enabled":
|
||||
fmt.Println(cfg.Enabled)
|
||||
case "tls":
|
||||
fmt.Println(cfg.UseTLS())
|
||||
case "address":
|
||||
fmt.Println(cfg.Address())
|
||||
case "user":
|
||||
if cfg.User != "" {
|
||||
fmt.Println(cfg.User)
|
||||
}
|
||||
case "password":
|
||||
if cfg.User != "" {
|
||||
fmt.Println(cfg.Password)
|
||||
}
|
||||
case "apikey":
|
||||
if cfg.APIKey != "" {
|
||||
fmt.Println(cfg.APIKey)
|
||||
}
|
||||
default:
|
||||
die("Invalid setting: " + arg + "\nAvailable settings: enabled, tls, address, user, password, apikey")
|
||||
}
|
||||
}
|
||||
|
||||
func guiSet(c *cli.Context) {
|
||||
cfg := getConfig(c)
|
||||
arg := c.Args()[0]
|
||||
val := c.Args()[1]
|
||||
switch strings.ToLower(arg) {
|
||||
case "enabled":
|
||||
cfg.GUI.Enabled = parseBool(val)
|
||||
case "tls":
|
||||
cfg.GUI.RawUseTLS = parseBool(val)
|
||||
case "address":
|
||||
validAddress(val)
|
||||
cfg.GUI.RawAddress = val
|
||||
case "user":
|
||||
cfg.GUI.User = val
|
||||
case "password":
|
||||
cfg.GUI.Password = val
|
||||
case "apikey":
|
||||
cfg.GUI.APIKey = val
|
||||
default:
|
||||
die("Invalid setting: " + arg + "\nAvailable settings: enabled, tls, address, user, password, apikey")
|
||||
}
|
||||
setConfig(c, cfg)
|
||||
}
|
||||
|
||||
func guiUnset(c *cli.Context) {
|
||||
cfg := getConfig(c)
|
||||
arg := c.Args()[0]
|
||||
switch strings.ToLower(arg) {
|
||||
case "user":
|
||||
cfg.GUI.User = ""
|
||||
case "password":
|
||||
cfg.GUI.Password = ""
|
||||
case "apikey":
|
||||
cfg.GUI.APIKey = ""
|
||||
default:
|
||||
die("Invalid setting: " + arg + "\nAvailable settings: user, password, apikey")
|
||||
}
|
||||
setConfig(c, cfg)
|
||||
}
|
||||
173
cmd/stcli/cmd_options.go
Normal file
@@ -0,0 +1,173 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/AudriusButkevicius/cli"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cliCommands = append(cliCommands, cli.Command{
|
||||
Name: "options",
|
||||
HideHelp: true,
|
||||
Usage: "Options command group",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "dump",
|
||||
Usage: "Show all Syncthing option settings",
|
||||
Requires: &cli.Requires{},
|
||||
Action: optionsDump,
|
||||
},
|
||||
{
|
||||
Name: "get",
|
||||
Usage: "Get a Syncthing option setting",
|
||||
Requires: &cli.Requires{"setting"},
|
||||
Action: optionsGet,
|
||||
},
|
||||
{
|
||||
Name: "set",
|
||||
Usage: "Set a Syncthing option setting",
|
||||
Requires: &cli.Requires{"setting", "value..."},
|
||||
Action: optionsSet,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func optionsDump(c *cli.Context) {
|
||||
cfg := getConfig(c).Options
|
||||
writer := newTableWriter()
|
||||
|
||||
fmt.Fprintln(writer, "Sync protocol listen addresses:\t", strings.Join(cfg.ListenAddresses, " "), "\t(addresses)")
|
||||
fmt.Fprintln(writer, "Global discovery enabled:\t", cfg.GlobalAnnEnabled, "\t(globalannenabled)")
|
||||
fmt.Fprintln(writer, "Global discovery servers:\t", strings.Join(cfg.GlobalAnnServers, " "), "\t(globalannserver)")
|
||||
|
||||
fmt.Fprintln(writer, "Local discovery enabled:\t", cfg.LocalAnnEnabled, "\t(localannenabled)")
|
||||
fmt.Fprintln(writer, "Local discovery port:\t", cfg.LocalAnnPort, "\t(localannport)")
|
||||
|
||||
fmt.Fprintln(writer, "Outgoing rate limit in KiB/s:\t", cfg.MaxSendKbps, "\t(maxsend)")
|
||||
fmt.Fprintln(writer, "Incoming rate limit in KiB/s:\t", cfg.MaxRecvKbps, "\t(maxrecv)")
|
||||
fmt.Fprintln(writer, "Reconnect interval in seconds:\t", cfg.ReconnectIntervalS, "\t(reconnect)")
|
||||
fmt.Fprintln(writer, "Start browser:\t", cfg.StartBrowser, "\t(browser)")
|
||||
fmt.Fprintln(writer, "Enable UPnP:\t", cfg.NATEnabled, "\t(nat)")
|
||||
fmt.Fprintln(writer, "UPnP Lease in minutes:\t", cfg.NATLeaseM, "\t(natlease)")
|
||||
fmt.Fprintln(writer, "UPnP Renewal period in minutes:\t", cfg.NATRenewalM, "\t(natrenew)")
|
||||
fmt.Fprintln(writer, "Restart on Wake Up:\t", cfg.RestartOnWakeup, "\t(wake)")
|
||||
|
||||
reporting := "unrecognized value"
|
||||
switch cfg.URAccepted {
|
||||
case -1:
|
||||
reporting = "false"
|
||||
case 0:
|
||||
reporting = "undecided/false"
|
||||
case 1:
|
||||
reporting = "true"
|
||||
}
|
||||
fmt.Fprintln(writer, "Anonymous usage reporting:\t", reporting, "\t(reporting)")
|
||||
|
||||
writer.Flush()
|
||||
}
|
||||
|
||||
func optionsGet(c *cli.Context) {
|
||||
cfg := getConfig(c).Options
|
||||
arg := c.Args()[0]
|
||||
switch strings.ToLower(arg) {
|
||||
case "address":
|
||||
fmt.Println(strings.Join(cfg.ListenAddresses, "\n"))
|
||||
case "globalannenabled":
|
||||
fmt.Println(cfg.GlobalAnnEnabled)
|
||||
case "globalannservers":
|
||||
fmt.Println(strings.Join(cfg.GlobalAnnServers, "\n"))
|
||||
case "localannenabled":
|
||||
fmt.Println(cfg.LocalAnnEnabled)
|
||||
case "localannport":
|
||||
fmt.Println(cfg.LocalAnnPort)
|
||||
case "maxsend":
|
||||
fmt.Println(cfg.MaxSendKbps)
|
||||
case "maxrecv":
|
||||
fmt.Println(cfg.MaxRecvKbps)
|
||||
case "reconnect":
|
||||
fmt.Println(cfg.ReconnectIntervalS)
|
||||
case "browser":
|
||||
fmt.Println(cfg.StartBrowser)
|
||||
case "nat":
|
||||
fmt.Println(cfg.NATEnabled)
|
||||
case "natlease":
|
||||
fmt.Println(cfg.NATLeaseM)
|
||||
case "natrenew":
|
||||
fmt.Println(cfg.NATRenewalM)
|
||||
case "reporting":
|
||||
switch cfg.URAccepted {
|
||||
case -1:
|
||||
fmt.Println("false")
|
||||
case 0:
|
||||
fmt.Println("undecided/false")
|
||||
case 1:
|
||||
fmt.Println("true")
|
||||
default:
|
||||
fmt.Println("unknown")
|
||||
}
|
||||
case "wake":
|
||||
fmt.Println(cfg.RestartOnWakeup)
|
||||
default:
|
||||
die("Invalid setting: " + arg + "\nAvailable settings: address, globalannenabled, globalannserver, localannenabled, localannport, maxsend, maxrecv, reconnect, browser, upnp, upnplease, upnprenew, reporting, wake")
|
||||
}
|
||||
}
|
||||
|
||||
func optionsSet(c *cli.Context) {
|
||||
config := getConfig(c)
|
||||
arg := c.Args()[0]
|
||||
val := c.Args()[1]
|
||||
switch strings.ToLower(arg) {
|
||||
case "address":
|
||||
for _, item := range c.Args().Tail() {
|
||||
validAddress(item)
|
||||
}
|
||||
config.Options.ListenAddresses = c.Args().Tail()
|
||||
case "globalannenabled":
|
||||
config.Options.GlobalAnnEnabled = parseBool(val)
|
||||
case "globalannserver":
|
||||
for _, item := range c.Args().Tail() {
|
||||
validAddress(item)
|
||||
}
|
||||
config.Options.GlobalAnnServers = c.Args().Tail()
|
||||
case "localannenabled":
|
||||
config.Options.LocalAnnEnabled = parseBool(val)
|
||||
case "localannport":
|
||||
config.Options.LocalAnnPort = parsePort(val)
|
||||
case "maxsend":
|
||||
config.Options.MaxSendKbps = parseUint(val)
|
||||
case "maxrecv":
|
||||
config.Options.MaxRecvKbps = parseUint(val)
|
||||
case "reconnect":
|
||||
config.Options.ReconnectIntervalS = parseUint(val)
|
||||
case "browser":
|
||||
config.Options.StartBrowser = parseBool(val)
|
||||
case "nat":
|
||||
config.Options.NATEnabled = parseBool(val)
|
||||
case "natlease":
|
||||
config.Options.NATLeaseM = parseUint(val)
|
||||
case "natrenew":
|
||||
config.Options.NATRenewalM = parseUint(val)
|
||||
case "reporting":
|
||||
switch strings.ToLower(val) {
|
||||
case "u", "undecided", "unset":
|
||||
config.Options.URAccepted = 0
|
||||
default:
|
||||
boolvalue := parseBool(val)
|
||||
if boolvalue {
|
||||
config.Options.URAccepted = 1
|
||||
} else {
|
||||
config.Options.URAccepted = -1
|
||||
}
|
||||
}
|
||||
case "wake":
|
||||
config.Options.RestartOnWakeup = parseBool(val)
|
||||
default:
|
||||
die("Invalid setting: " + arg + "\nAvailable settings: address, globalannenabled, globalannserver, localannenabled, localannport, maxsend, maxrecv, reconnect, browser, upnp, upnplease, upnprenew, reporting, wake")
|
||||
}
|
||||
setConfig(c, config)
|
||||
}
|
||||
72
cmd/stcli/cmd_report.go
Normal file
@@ -0,0 +1,72 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/AudriusButkevicius/cli"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cliCommands = append(cliCommands, cli.Command{
|
||||
Name: "report",
|
||||
HideHelp: true,
|
||||
Usage: "Reporting command group",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "system",
|
||||
Usage: "Report system state",
|
||||
Requires: &cli.Requires{},
|
||||
Action: reportSystem,
|
||||
},
|
||||
{
|
||||
Name: "connections",
|
||||
Usage: "Report about connections to other devices",
|
||||
Requires: &cli.Requires{},
|
||||
Action: reportConnections,
|
||||
},
|
||||
{
|
||||
Name: "usage",
|
||||
Usage: "Usage report",
|
||||
Requires: &cli.Requires{},
|
||||
Action: reportUsage,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func reportSystem(c *cli.Context) {
|
||||
response := httpGet(c, "system/status")
|
||||
data := make(map[string]interface{})
|
||||
json.Unmarshal(responseToBArray(response), &data)
|
||||
prettyPrintJSON(data)
|
||||
}
|
||||
|
||||
func reportConnections(c *cli.Context) {
|
||||
response := httpGet(c, "system/connections")
|
||||
data := make(map[string]map[string]interface{})
|
||||
json.Unmarshal(responseToBArray(response), &data)
|
||||
var overall map[string]interface{}
|
||||
for key, value := range data {
|
||||
if key == "total" {
|
||||
overall = value
|
||||
continue
|
||||
}
|
||||
value["Device ID"] = key
|
||||
prettyPrintJSON(value)
|
||||
fmt.Println()
|
||||
}
|
||||
if overall != nil {
|
||||
fmt.Println("=== Overall statistics ===")
|
||||
prettyPrintJSON(overall)
|
||||
}
|
||||
}
|
||||
|
||||
func reportUsage(c *cli.Context) {
|
||||
response := httpGet(c, "svc/report")
|
||||
report := make(map[string]interface{})
|
||||
json.Unmarshal(responseToBArray(response), &report)
|
||||
prettyPrintJSON(report)
|
||||
}
|
||||
31
cmd/stcli/labels.go
Normal file
@@ -0,0 +1,31 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
package main
|
||||
|
||||
var jsonAttributeLabels = map[string]string{
|
||||
"folderMaxMiB": "Largest folder size in MiB",
|
||||
"folderMaxFiles": "Largest folder file count",
|
||||
"longVersion": "Long version",
|
||||
"totMiB": "Total size in MiB",
|
||||
"totFiles": "Total files",
|
||||
"uniqueID": "Unique ID",
|
||||
"numFolders": "Folder count",
|
||||
"numDevices": "Device count",
|
||||
"memoryUsageMiB": "Memory usage in MiB",
|
||||
"memorySize": "Total memory in MiB",
|
||||
"sha256Perf": "SHA256 Benchmark",
|
||||
"At": "Last contacted",
|
||||
"Completion": "Percent complete",
|
||||
"InBytesTotal": "Total bytes received",
|
||||
"OutBytesTotal": "Total bytes sent",
|
||||
"ClientVersion": "Client version",
|
||||
"alloc": "Memory allocated in bytes",
|
||||
"sys": "Memory using in bytes",
|
||||
"cpuPercent": "CPU load in percent",
|
||||
"extAnnounceOK": "External announcments working",
|
||||
"goroutines": "Number of Go routines",
|
||||
"myID": "Client ID",
|
||||
"tilde": "Tilde expands to",
|
||||
"arch": "Architecture",
|
||||
"os": "OS",
|
||||
}
|
||||
63
cmd/stcli/main.go
Normal file
@@ -0,0 +1,63 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/AudriusButkevicius/cli"
|
||||
)
|
||||
|
||||
type ByAlphabet []cli.Command
|
||||
|
||||
func (a ByAlphabet) Len() int { return len(a) }
|
||||
func (a ByAlphabet) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a ByAlphabet) Less(i, j int) bool { return a[i].Name < a[j].Name }
|
||||
|
||||
var cliCommands []cli.Command
|
||||
|
||||
func main() {
|
||||
app := cli.NewApp()
|
||||
app.Name = "syncthing-cli"
|
||||
app.Author = "Audrius Butkevičius"
|
||||
app.Email = "audrius.butkevicius@gmail.com"
|
||||
app.Usage = "Syncthing command line interface"
|
||||
app.Version = "0.1"
|
||||
app.HideHelp = true
|
||||
|
||||
app.Flags = []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "endpoint, e",
|
||||
Value: "http://127.0.0.1:8384",
|
||||
Usage: "End point to connect to",
|
||||
EnvVar: "STENDPOINT",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "apikey, k",
|
||||
Value: "",
|
||||
Usage: "API Key",
|
||||
EnvVar: "STAPIKEY",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "username, u",
|
||||
Value: "",
|
||||
Usage: "Username",
|
||||
EnvVar: "STUSERNAME",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "password, p",
|
||||
Value: "",
|
||||
Usage: "Password",
|
||||
EnvVar: "STPASSWORD",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "insecure, i",
|
||||
Usage: "Do not verify SSL certificate",
|
||||
EnvVar: "STINSECURE",
|
||||
},
|
||||
}
|
||||
|
||||
sort.Sort(ByAlphabet(cliCommands))
|
||||
app.Commands = cliCommands
|
||||
app.RunAndExitOnError()
|
||||
}
|
||||
165
cmd/stcli/utils.go
Normal file
@@ -0,0 +1,165 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"unicode"
|
||||
|
||||
"github.com/AudriusButkevicius/cli"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
func responseToBArray(response *http.Response) []byte {
|
||||
defer response.Body.Close()
|
||||
bytes, err := ioutil.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
die(err)
|
||||
}
|
||||
return bytes
|
||||
}
|
||||
|
||||
func die(vals ...interface{}) {
|
||||
if len(vals) > 1 || vals[0] != nil {
|
||||
os.Stderr.WriteString(fmt.Sprintln(vals...))
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func wrappedHTTPPost(url string) func(c *cli.Context) {
|
||||
return func(c *cli.Context) {
|
||||
httpPost(c, url, "")
|
||||
}
|
||||
}
|
||||
|
||||
func prettyPrintJSON(json map[string]interface{}) {
|
||||
writer := newTableWriter()
|
||||
remap := make(map[string]interface{})
|
||||
for k, v := range json {
|
||||
key, ok := jsonAttributeLabels[k]
|
||||
if !ok {
|
||||
key = firstUpper(k)
|
||||
}
|
||||
remap[key] = v
|
||||
}
|
||||
|
||||
jsonKeys := make([]string, 0, len(remap))
|
||||
for key := range remap {
|
||||
jsonKeys = append(jsonKeys, key)
|
||||
}
|
||||
sort.Strings(jsonKeys)
|
||||
for _, k := range jsonKeys {
|
||||
var value string
|
||||
rvalue := remap[k]
|
||||
switch rvalue.(type) {
|
||||
case int, int16, int32, int64, uint, uint16, uint32, uint64, float32, float64:
|
||||
value = fmt.Sprintf("%.0f", rvalue)
|
||||
default:
|
||||
value = fmt.Sprint(rvalue)
|
||||
}
|
||||
if value == "" {
|
||||
continue
|
||||
}
|
||||
fmt.Fprintln(writer, k+":\t"+value)
|
||||
}
|
||||
writer.Flush()
|
||||
}
|
||||
|
||||
func firstUpper(str string) string {
|
||||
for i, v := range str {
|
||||
return string(unicode.ToUpper(v)) + str[i+1:]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func newTableWriter() *tabwriter.Writer {
|
||||
writer := new(tabwriter.Writer)
|
||||
writer.Init(os.Stdout, 0, 8, 0, '\t', 0)
|
||||
return writer
|
||||
}
|
||||
|
||||
func getMyID(c *cli.Context) string {
|
||||
response := httpGet(c, "system/status")
|
||||
data := make(map[string]interface{})
|
||||
json.Unmarshal(responseToBArray(response), &data)
|
||||
return data["myID"].(string)
|
||||
}
|
||||
|
||||
func getConfig(c *cli.Context) config.Configuration {
|
||||
response := httpGet(c, "system/config")
|
||||
config := config.Configuration{}
|
||||
json.Unmarshal(responseToBArray(response), &config)
|
||||
return config
|
||||
}
|
||||
|
||||
func setConfig(c *cli.Context, cfg config.Configuration) {
|
||||
body, err := json.Marshal(cfg)
|
||||
die(err)
|
||||
response := httpPost(c, "system/config", string(body))
|
||||
if response.StatusCode != 200 {
|
||||
die("Unexpected status code", response.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func parseBool(input string) bool {
|
||||
val, err := strconv.ParseBool(input)
|
||||
if err != nil {
|
||||
die(input + " is not a valid value for a boolean")
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
func parseInt(input string) int {
|
||||
val, err := strconv.ParseInt(input, 0, 64)
|
||||
if err != nil {
|
||||
die(input + " is not a valid value for an integer")
|
||||
}
|
||||
return int(val)
|
||||
}
|
||||
|
||||
func parseUint(input string) int {
|
||||
val, err := strconv.ParseUint(input, 0, 64)
|
||||
if err != nil {
|
||||
die(input + " is not a valid value for an unsigned integer")
|
||||
}
|
||||
return int(val)
|
||||
}
|
||||
|
||||
func parsePort(input string) int {
|
||||
port := parseUint(input)
|
||||
if port < 1 || port > 65535 {
|
||||
die(input + " is not a valid port\nExpected value between 1 and 65535")
|
||||
}
|
||||
return port
|
||||
}
|
||||
|
||||
func validAddress(input string) {
|
||||
tokens := strings.Split(input, ":")
|
||||
if len(tokens) != 2 {
|
||||
die(input + " is not a valid value for an address\nExpected format <ip or hostname>:<port>")
|
||||
}
|
||||
matched, err := regexp.MatchString("^[a-zA-Z0-9]+([-a-zA-Z0-9.]+[-a-zA-Z0-9]+)?$", tokens[0])
|
||||
die(err)
|
||||
if !matched {
|
||||
die(input + " is not a valid value for an address\nExpected format <ip or hostname>:<port>")
|
||||
}
|
||||
parsePort(tokens[1])
|
||||
}
|
||||
|
||||
func parseDeviceID(input string) protocol.DeviceID {
|
||||
device, err := protocol.DeviceIDFromString(input)
|
||||
if err != nil {
|
||||
die(input + " is not a valid device id")
|
||||
}
|
||||
return device
|
||||
}
|
||||
@@ -2,7 +2,7 @@
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
@@ -15,8 +15,6 @@ import (
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/symlinks"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -104,7 +102,7 @@ func startWalker(dir string, res chan<- fileInfo, abort <-chan struct{}) chan er
|
||||
mode: os.ModeSymlink,
|
||||
}
|
||||
|
||||
tgt, _, err := symlinks.Read(path)
|
||||
tgt, err := os.Readlink(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -2,13 +2,13 @@
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"flag"
|
||||
"log"
|
||||
"strings"
|
||||
@@ -44,7 +44,7 @@ func main() {
|
||||
flag.Parse()
|
||||
|
||||
if fake {
|
||||
log.Println("My ID:", protocol.DeviceIDFromBytes(myID))
|
||||
log.Println("My ID:", myID)
|
||||
}
|
||||
|
||||
runbeacon(beacon.NewMulticast(mc), fake)
|
||||
@@ -66,24 +66,25 @@ func recv(bc beacon.Interface) {
|
||||
seen := make(map[string]bool)
|
||||
for {
|
||||
data, src := bc.Recv()
|
||||
var ann discover.Announce
|
||||
ann.UnmarshalXDR(data)
|
||||
if m := binary.BigEndian.Uint32(data); m != discover.Magic {
|
||||
log.Printf("Incorrect magic %x in announcement from %v", m, src)
|
||||
continue
|
||||
}
|
||||
|
||||
if bytes.Equal(ann.This.ID, myID) {
|
||||
var ann discover.Announce
|
||||
ann.Unmarshal(data[4:])
|
||||
|
||||
if ann.ID == myID {
|
||||
// This is one of our own fake packets, don't print it.
|
||||
continue
|
||||
}
|
||||
|
||||
// Print announcement details for the first packet from a given
|
||||
// device ID and source address, or if -all was given.
|
||||
key := string(ann.This.ID) + src.String()
|
||||
key := ann.ID.String() + src.String()
|
||||
if all || !seen[key] {
|
||||
log.Printf("Announcement from %v\n", src)
|
||||
log.Printf(" %v at %s\n", protocol.DeviceIDFromBytes(ann.This.ID), strings.Join(addrStrs(ann.This), ", "))
|
||||
|
||||
for _, dev := range ann.Extra {
|
||||
log.Printf(" %v at %s\n", protocol.DeviceIDFromBytes(dev.ID), strings.Join(addrStrs(dev), ", "))
|
||||
}
|
||||
log.Printf(" %v at %s\n", ann.ID, strings.Join(ann.Addresses, ", "))
|
||||
seen[key] = true
|
||||
}
|
||||
}
|
||||
@@ -92,15 +93,10 @@ func recv(bc beacon.Interface) {
|
||||
// sends fake discovery announcements once every second
|
||||
func send(bc beacon.Interface) {
|
||||
ann := discover.Announce{
|
||||
Magic: discover.AnnouncementMagic,
|
||||
This: discover.Device{
|
||||
ID: myID,
|
||||
Addresses: []discover.Address{
|
||||
{URL: "tcp://fake.example.com:12345"},
|
||||
},
|
||||
},
|
||||
ID: myID,
|
||||
Addresses: []string{"tcp://fake.example.com:12345"},
|
||||
}
|
||||
bs, _ := ann.MarshalXDR()
|
||||
bs, _ := ann.Marshal()
|
||||
|
||||
for {
|
||||
bc.Send(bs)
|
||||
@@ -108,19 +104,10 @@ func send(bc beacon.Interface) {
|
||||
}
|
||||
}
|
||||
|
||||
// returns the list of address URLs
|
||||
func addrStrs(dev discover.Device) []string {
|
||||
ss := make([]string, len(dev.Addresses))
|
||||
for i, addr := range dev.Addresses {
|
||||
ss[i] = addr.URL
|
||||
}
|
||||
return ss
|
||||
}
|
||||
|
||||
// returns a random but recognizable device ID
|
||||
func randomDeviceID() []byte {
|
||||
var id [32]byte
|
||||
func randomDeviceID() protocol.DeviceID {
|
||||
var id protocol.DeviceID
|
||||
copy(id[:], randomPrefix)
|
||||
rand.Read(id[len(randomPrefix):])
|
||||
return id[:]
|
||||
return id
|
||||
}
|
||||
|
||||
19
cmd/stdiscosrv/LICENSE
Normal file
@@ -0,0 +1,19 @@
|
||||
Copyright (C) 2014-2015 The Discosrv Authors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
- The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
38
cmd/stdiscosrv/README.md
Normal file
@@ -0,0 +1,38 @@
|
||||
stdiscosrv
|
||||
==========
|
||||
|
||||
This is the global discovery server for the `syncthing` project.
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
The discovery server supports `ql` and `postgres` backends.
|
||||
Specify the backend via `-db-backend` and the database DSN via `-db-dsn`.
|
||||
|
||||
By default it will use in-memory `ql` backend. If you wish to persist the
|
||||
information on disk between restarts in `ql`, specify a file DSN:
|
||||
|
||||
```bash
|
||||
$ stdiscosrv -db-dsn="file:///var/run/stdiscosrv.db"
|
||||
```
|
||||
|
||||
For `postgres`, you will need to create a database and a user with permissions
|
||||
to create tables in it, then start the stdiscosrv as follows:
|
||||
|
||||
```bash
|
||||
$ export STDISCOSRV_DB_DSN="postgres://user:password@localhost/databasename"
|
||||
$ stdiscosrv -db-backend="postgres"
|
||||
```
|
||||
|
||||
You can pass the DSN as command line option, but the value what you pass in will
|
||||
be visible in most process managers, potentially exposing the database password
|
||||
to other users.
|
||||
|
||||
In all cases, the appropriate tables and indexes will be created at first
|
||||
startup. If it doesn't exit with an error, you're fine.
|
||||
|
||||
See `stdiscosrv -help` for other options.
|
||||
|
||||
##### Third-party attribution
|
||||
|
||||
[cznic/lldb](https://github.com/cznic/lldb), Copyright (C) 2014 The lldb Authors.
|
||||
75
cmd/stdiscosrv/clean.go
Normal file
@@ -0,0 +1,75 @@
|
||||
// Copyright (C) 2014-2015 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"log"
|
||||
"time"
|
||||
)
|
||||
|
||||
type cleansrv struct {
|
||||
intv time.Duration
|
||||
db *sql.DB
|
||||
prep map[string]*sql.Stmt
|
||||
}
|
||||
|
||||
func (s *cleansrv) Serve() {
|
||||
for {
|
||||
time.Sleep(next(s.intv))
|
||||
|
||||
err := s.cleanOldEntries()
|
||||
if err != nil {
|
||||
log.Println("Clean:", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *cleansrv) Stop() {
|
||||
panic("stop unimplemented")
|
||||
}
|
||||
|
||||
func (s *cleansrv) cleanOldEntries() (err error) {
|
||||
var tx *sql.Tx
|
||||
tx, err = s.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err == nil {
|
||||
err = tx.Commit()
|
||||
} else {
|
||||
tx.Rollback()
|
||||
}
|
||||
}()
|
||||
|
||||
res, err := tx.Stmt(s.prep["cleanAddress"]).Exec()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rows, _ := res.RowsAffected(); rows > 0 {
|
||||
log.Printf("Clean: %d old addresses", rows)
|
||||
}
|
||||
|
||||
res, err = tx.Stmt(s.prep["cleanDevice"]).Exec()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rows, _ := res.RowsAffected(); rows > 0 {
|
||||
log.Printf("Clean: %d old devices", rows)
|
||||
}
|
||||
|
||||
var devs, addrs int
|
||||
row := tx.Stmt(s.prep["countDevice"]).QueryRow()
|
||||
if err = row.Scan(&devs); err != nil {
|
||||
return err
|
||||
}
|
||||
row = tx.Stmt(s.prep["countAddress"]).QueryRow()
|
||||
if err = row.Scan(&addrs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("Database: %d devices, %d addresses", devs, addrs)
|
||||
return nil
|
||||
}
|
||||
32
cmd/stdiscosrv/db.go
Normal file
@@ -0,0 +1,32 @@
|
||||
// Copyright (C) 2014-2015 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type setupFunc func(db *sql.DB) error
|
||||
type compileFunc func(db *sql.DB) (map[string]*sql.Stmt, error)
|
||||
|
||||
var (
|
||||
setupFuncs = make(map[string]setupFunc)
|
||||
compileFuncs = make(map[string]compileFunc)
|
||||
)
|
||||
|
||||
func register(name string, setup setupFunc, compile compileFunc) {
|
||||
setupFuncs[name] = setup
|
||||
compileFuncs[name] = compile
|
||||
}
|
||||
|
||||
func setup(backend string, db *sql.DB) (map[string]*sql.Stmt, error) {
|
||||
setup, ok := setupFuncs[backend]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Unsupported backend")
|
||||
}
|
||||
if err := setup(db); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return compileFuncs[backend](db)
|
||||
}
|
||||
146
cmd/stdiscosrv/main.go
Normal file
@@ -0,0 +1,146 @@
|
||||
// Copyright (C) 2014-2015 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"database/sql"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
"github.com/thejerf/suture"
|
||||
)
|
||||
|
||||
const (
|
||||
minNegCache = 60 // seconds
|
||||
maxNegCache = 3600 // seconds
|
||||
maxDeviceAge = 7 * 86400 // one week, in seconds
|
||||
)
|
||||
|
||||
var (
|
||||
Version string
|
||||
BuildStamp string
|
||||
BuildUser string
|
||||
BuildHost string
|
||||
|
||||
BuildDate time.Time
|
||||
LongVersion string
|
||||
)
|
||||
|
||||
func init() {
|
||||
stamp, _ := strconv.Atoi(BuildStamp)
|
||||
BuildDate = time.Unix(int64(stamp), 0)
|
||||
|
||||
date := BuildDate.UTC().Format("2006-01-02 15:04:05 MST")
|
||||
LongVersion = fmt.Sprintf(`stdiscosrv %s (%s %s-%s) %s@%s %s`, Version, runtime.Version(), runtime.GOOS, runtime.GOARCH, BuildUser, BuildHost, date)
|
||||
}
|
||||
|
||||
var (
|
||||
lruSize = 10240
|
||||
limitAvg = 5
|
||||
limitBurst = 20
|
||||
globalStats stats
|
||||
statsFile string
|
||||
backend = "ql"
|
||||
dsn = getEnvDefault("STDISCOSRV_DB_DSN", "memory://stdiscosrv")
|
||||
certFile = "cert.pem"
|
||||
keyFile = "key.pem"
|
||||
debug = false
|
||||
useHTTP = false
|
||||
)
|
||||
|
||||
func main() {
|
||||
const (
|
||||
cleanIntv = 1 * time.Hour
|
||||
statsIntv = 5 * time.Minute
|
||||
)
|
||||
|
||||
var listen string
|
||||
|
||||
log.SetOutput(os.Stdout)
|
||||
log.SetFlags(0)
|
||||
|
||||
flag.StringVar(&listen, "listen", ":8443", "Listen address")
|
||||
flag.IntVar(&lruSize, "limit-cache", lruSize, "Limiter cache entries")
|
||||
flag.IntVar(&limitAvg, "limit-avg", limitAvg, "Allowed average package rate, per 10 s")
|
||||
flag.IntVar(&limitBurst, "limit-burst", limitBurst, "Allowed burst size, packets")
|
||||
flag.StringVar(&statsFile, "stats-file", statsFile, "File to write periodic operation stats to")
|
||||
flag.StringVar(&backend, "db-backend", backend, "Database backend to use")
|
||||
flag.StringVar(&dsn, "db-dsn", dsn, "Database DSN")
|
||||
flag.StringVar(&certFile, "cert", certFile, "Certificate file")
|
||||
flag.StringVar(&keyFile, "key", keyFile, "Key file")
|
||||
flag.BoolVar(&debug, "debug", debug, "Debug")
|
||||
flag.BoolVar(&useHTTP, "http", useHTTP, "Listen on HTTP (behind an HTTPS proxy)")
|
||||
flag.Parse()
|
||||
|
||||
log.Println(LongVersion)
|
||||
|
||||
var cert tls.Certificate
|
||||
var err error
|
||||
if !useHTTP {
|
||||
cert, err = tls.LoadX509KeyPair(certFile, keyFile)
|
||||
if err != nil {
|
||||
log.Println("Failed to load keypair. Generating one, this might take a while...")
|
||||
cert, err = tlsutil.NewCertificate(certFile, keyFile, "stdiscosrv", 3072)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to generate X509 key pair:", err)
|
||||
}
|
||||
}
|
||||
|
||||
devID := protocol.NewDeviceID(cert.Certificate[0])
|
||||
log.Println("Server device ID is", devID)
|
||||
}
|
||||
|
||||
db, err := sql.Open(backend, dsn)
|
||||
if err != nil {
|
||||
log.Fatalln("sql.Open:", err)
|
||||
}
|
||||
prep, err := setup(backend, db)
|
||||
if err != nil {
|
||||
log.Fatalln("Setup:", err)
|
||||
}
|
||||
|
||||
main := suture.NewSimple("main")
|
||||
|
||||
main.Add(&querysrv{
|
||||
addr: listen,
|
||||
cert: cert,
|
||||
db: db,
|
||||
prep: prep,
|
||||
})
|
||||
|
||||
main.Add(&cleansrv{
|
||||
intv: cleanIntv,
|
||||
db: db,
|
||||
prep: prep,
|
||||
})
|
||||
|
||||
main.Add(&statssrv{
|
||||
intv: statsIntv,
|
||||
file: statsFile,
|
||||
db: db,
|
||||
})
|
||||
|
||||
globalStats.Reset()
|
||||
main.Serve()
|
||||
}
|
||||
|
||||
func getEnvDefault(key, def string) string {
|
||||
if val := os.Getenv(key); val != "" {
|
||||
return val
|
||||
}
|
||||
return def
|
||||
}
|
||||
|
||||
func next(intv time.Duration) time.Duration {
|
||||
t0 := time.Now()
|
||||
t1 := t0.Add(intv).Truncate(intv)
|
||||
return t1.Sub(t0)
|
||||
}
|
||||
98
cmd/stdiscosrv/psql.go
Normal file
@@ -0,0 +1,98 @@
|
||||
// Copyright (C) 2014-2015 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
_ "github.com/lib/pq"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("postgres", postgresSetup, postgresCompile)
|
||||
}
|
||||
|
||||
func postgresSetup(db *sql.DB) error {
|
||||
var err error
|
||||
|
||||
db.SetMaxIdleConns(4)
|
||||
db.SetMaxOpenConns(8)
|
||||
|
||||
_, err = db.Exec(`CREATE TABLE IF NOT EXISTS Devices (
|
||||
DeviceID CHAR(63) NOT NULL PRIMARY KEY,
|
||||
Seen TIMESTAMP NOT NULL
|
||||
)`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var tmp string
|
||||
row := db.QueryRow(`SELECT 'DevicesDeviceIDIndex'::regclass`)
|
||||
if err = row.Scan(&tmp); err != nil {
|
||||
_, err = db.Exec(`CREATE INDEX DevicesDeviceIDIndex ON Devices (DeviceID)`)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
row = db.QueryRow(`SELECT 'DevicesSeenIndex'::regclass`)
|
||||
if err = row.Scan(&tmp); err != nil {
|
||||
_, err = db.Exec(`CREATE INDEX DevicesSeenIndex ON Devices (Seen)`)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = db.Exec(`CREATE TABLE IF NOT EXISTS Addresses (
|
||||
DeviceID CHAR(63) NOT NULL,
|
||||
Seen TIMESTAMP NOT NULL,
|
||||
Address VARCHAR(2048) NOT NULL
|
||||
)`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
row = db.QueryRow(`SELECT 'AddressesDeviceIDSeenIndex'::regclass`)
|
||||
if err = row.Scan(&tmp); err != nil {
|
||||
_, err = db.Exec(`CREATE INDEX AddressesDeviceIDSeenIndex ON Addresses (DeviceID, Seen)`)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
row = db.QueryRow(`SELECT 'AddressesDeviceIDAddressIndex'::regclass`)
|
||||
if err = row.Scan(&tmp); err != nil {
|
||||
_, err = db.Exec(`CREATE INDEX AddressesDeviceIDAddressIndex ON Addresses (DeviceID, Address)`)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func postgresCompile(db *sql.DB) (map[string]*sql.Stmt, error) {
|
||||
stmts := map[string]string{
|
||||
"cleanAddress": "DELETE FROM Addresses WHERE Seen < now() - '2 hour'::INTERVAL",
|
||||
"cleanDevice": fmt.Sprintf("DELETE FROM Devices WHERE Seen < now() - '%d hour'::INTERVAL", maxDeviceAge/3600),
|
||||
"countAddress": "SELECT count(*) FROM Addresses",
|
||||
"countDevice": "SELECT count(*) FROM Devices",
|
||||
"insertAddress": "INSERT INTO Addresses (DeviceID, Seen, Address) VALUES ($1, now(), $2)",
|
||||
"insertDevice": "INSERT INTO Devices (DeviceID, Seen) VALUES ($1, now())",
|
||||
"selectAddress": "SELECT Address FROM Addresses WHERE DeviceID=$1 AND Seen > now() - '1 hour'::INTERVAL ORDER BY random() LIMIT 16",
|
||||
"selectDevice": "SELECT Seen FROM Devices WHERE DeviceID=$1",
|
||||
"updateAddress": "UPDATE Addresses SET Seen=now() WHERE DeviceID=$1 AND Address=$2",
|
||||
"updateDevice": "UPDATE Devices SET Seen=now() WHERE DeviceID=$1",
|
||||
}
|
||||
|
||||
res := make(map[string]*sql.Stmt, len(stmts))
|
||||
for key, stmt := range stmts {
|
||||
prep, err := db.Prepare(stmt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res[key] = prep
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
81
cmd/stdiscosrv/ql.go
Normal file
@@ -0,0 +1,81 @@
|
||||
// Copyright (C) 2015 Audrius Butkevicius and Contributors (see the CONTRIBUTORS file).
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/cznic/ql"
|
||||
)
|
||||
|
||||
func init() {
|
||||
ql.RegisterDriver()
|
||||
register("ql", qlSetup, qlCompile)
|
||||
}
|
||||
|
||||
func qlSetup(db *sql.DB) (err error) {
|
||||
tx, err := db.Begin()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err == nil {
|
||||
err = tx.Commit()
|
||||
} else {
|
||||
tx.Rollback()
|
||||
}
|
||||
}()
|
||||
|
||||
_, err = tx.Exec(`CREATE TABLE IF NOT EXISTS Devices (
|
||||
DeviceID STRING NOT NULL,
|
||||
Seen TIME NOT NULL
|
||||
)`)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = tx.Exec(`CREATE INDEX IF NOT EXISTS DevicesDeviceIDIndex ON Devices (DeviceID)`); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
_, err = tx.Exec(`CREATE TABLE IF NOT EXISTS Addresses (
|
||||
DeviceID STRING NOT NULL,
|
||||
Seen TIME NOT NULL,
|
||||
Address STRING NOT NULL,
|
||||
)`)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
_, err = tx.Exec(`CREATE INDEX IF NOT EXISTS AddressesDeviceIDAddressIndex ON Addresses (DeviceID, Address)`)
|
||||
return
|
||||
}
|
||||
|
||||
func qlCompile(db *sql.DB) (map[string]*sql.Stmt, error) {
|
||||
stmts := map[string]string{
|
||||
"cleanAddress": `DELETE FROM Addresses WHERE Seen < now() - duration("2h")`,
|
||||
"cleanDevice": fmt.Sprintf(`DELETE FROM Devices WHERE Seen < now() - duration("%dh")`, maxDeviceAge/3600),
|
||||
"countAddress": "SELECT count(*) FROM Addresses",
|
||||
"countDevice": "SELECT count(*) FROM Devices",
|
||||
"insertAddress": "INSERT INTO Addresses (DeviceID, Seen, Address) VALUES ($1, now(), $2)",
|
||||
"insertDevice": "INSERT INTO Devices (DeviceID, Seen) VALUES ($1, now())",
|
||||
"selectAddress": `SELECT Address from Addresses WHERE DeviceID==$1 AND Seen > now() - duration("1h") LIMIT 16`,
|
||||
"selectDevice": "SELECT Seen FROM Devices WHERE DeviceID==$1",
|
||||
"updateAddress": "UPDATE Addresses Seen=now() WHERE DeviceID==$1 AND Address==$2",
|
||||
"updateDevice": "UPDATE Devices Seen=now() WHERE DeviceID==$1",
|
||||
}
|
||||
|
||||
res := make(map[string]*sql.Stmt, len(stmts))
|
||||
for key, stmt := range stmts {
|
||||
prep, err := db.Prepare(stmt)
|
||||
if err != nil {
|
||||
log.Println("Failed to compile", stmt)
|
||||
return nil, err
|
||||
}
|
||||
res[key] = prep
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
492
cmd/stdiscosrv/querysrv.go
Normal file
@@ -0,0 +1,492 @@
|
||||
// Copyright (C) 2014-2015 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/groupcache/lru"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
type querysrv struct {
|
||||
addr string
|
||||
db *sql.DB
|
||||
prep map[string]*sql.Stmt
|
||||
limiter *safeCache
|
||||
cert tls.Certificate
|
||||
listener net.Listener
|
||||
}
|
||||
|
||||
type announcement struct {
|
||||
Seen time.Time `json:"seen"`
|
||||
Addresses []string `json:"addresses"`
|
||||
}
|
||||
|
||||
type safeCache struct {
|
||||
*lru.Cache
|
||||
mut sync.Mutex
|
||||
}
|
||||
|
||||
func (s *safeCache) Get(key string) (val interface{}, ok bool) {
|
||||
s.mut.Lock()
|
||||
val, ok = s.Cache.Get(key)
|
||||
s.mut.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
func (s *safeCache) Add(key string, val interface{}) {
|
||||
s.mut.Lock()
|
||||
s.Cache.Add(key, val)
|
||||
s.mut.Unlock()
|
||||
}
|
||||
|
||||
type requestID int64
|
||||
|
||||
func (i requestID) String() string {
|
||||
return fmt.Sprintf("%016x", int64(i))
|
||||
}
|
||||
|
||||
type contextKey int
|
||||
|
||||
const idKey contextKey = iota
|
||||
|
||||
func negCacheFor(lastSeen time.Time) int {
|
||||
since := time.Since(lastSeen).Seconds()
|
||||
if since >= maxDeviceAge {
|
||||
return maxNegCache
|
||||
}
|
||||
if since < 0 {
|
||||
// That's weird
|
||||
return minNegCache
|
||||
}
|
||||
|
||||
// Return a value linearly scaled from minNegCache (at zero seconds ago)
|
||||
// to maxNegCache (at maxDeviceAge seconds ago).
|
||||
r := since / maxDeviceAge
|
||||
return int(minNegCache + r*(maxNegCache-minNegCache))
|
||||
}
|
||||
|
||||
func (s *querysrv) Serve() {
|
||||
s.limiter = &safeCache{
|
||||
Cache: lru.New(lruSize),
|
||||
}
|
||||
|
||||
if useHTTP {
|
||||
listener, err := net.Listen("tcp", s.addr)
|
||||
if err != nil {
|
||||
log.Println("Listen:", err)
|
||||
return
|
||||
}
|
||||
s.listener = listener
|
||||
} else {
|
||||
tlsCfg := &tls.Config{
|
||||
Certificates: []tls.Certificate{s.cert},
|
||||
ClientAuth: tls.RequestClientCert,
|
||||
SessionTicketsDisabled: true,
|
||||
MinVersion: tls.VersionTLS12,
|
||||
CipherSuites: []uint16{
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
||||
},
|
||||
}
|
||||
|
||||
tlsListener, err := tls.Listen("tcp", s.addr, tlsCfg)
|
||||
if err != nil {
|
||||
log.Println("Listen:", err)
|
||||
return
|
||||
}
|
||||
s.listener = tlsListener
|
||||
}
|
||||
|
||||
http.HandleFunc("/v2/", s.handler)
|
||||
http.HandleFunc("/ping", handlePing)
|
||||
|
||||
srv := &http.Server{
|
||||
ReadTimeout: 5 * time.Second,
|
||||
WriteTimeout: 5 * time.Second,
|
||||
MaxHeaderBytes: 1 << 10,
|
||||
}
|
||||
|
||||
if err := srv.Serve(s.listener); err != nil {
|
||||
log.Println("Serve:", err)
|
||||
}
|
||||
}
|
||||
|
||||
var topCtx = context.Background()
|
||||
|
||||
func (s *querysrv) handler(w http.ResponseWriter, req *http.Request) {
|
||||
reqID := requestID(rand.Int63())
|
||||
ctx := context.WithValue(topCtx, idKey, reqID)
|
||||
|
||||
if debug {
|
||||
log.Println(reqID, req.Method, req.URL)
|
||||
}
|
||||
|
||||
t0 := time.Now()
|
||||
defer func() {
|
||||
diff := time.Since(t0)
|
||||
var comment string
|
||||
if diff > time.Second {
|
||||
comment = "(very slow request)"
|
||||
} else if diff > 100*time.Millisecond {
|
||||
comment = "(slow request)"
|
||||
}
|
||||
if comment != "" || debug {
|
||||
log.Println(reqID, req.Method, req.URL, "completed in", diff, comment)
|
||||
}
|
||||
}()
|
||||
|
||||
var remoteIP net.IP
|
||||
if useHTTP {
|
||||
remoteIP = net.ParseIP(req.Header.Get("X-Forwarded-For"))
|
||||
} else {
|
||||
addr, err := net.ResolveTCPAddr("tcp", req.RemoteAddr)
|
||||
if err != nil {
|
||||
log.Println("remoteAddr:", err)
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
remoteIP = addr.IP
|
||||
}
|
||||
|
||||
if s.limit(remoteIP) {
|
||||
if debug {
|
||||
log.Println(remoteIP, "is limited")
|
||||
}
|
||||
w.Header().Set("Retry-After", "60")
|
||||
http.Error(w, "Too Many Requests", 429)
|
||||
return
|
||||
}
|
||||
|
||||
switch req.Method {
|
||||
case "GET":
|
||||
s.handleGET(ctx, w, req)
|
||||
case "POST":
|
||||
s.handlePOST(ctx, remoteIP, w, req)
|
||||
default:
|
||||
globalStats.Error()
|
||||
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *querysrv) handleGET(ctx context.Context, w http.ResponseWriter, req *http.Request) {
|
||||
reqID := ctx.Value(idKey).(requestID)
|
||||
|
||||
deviceID, err := protocol.DeviceIDFromString(req.URL.Query().Get("device"))
|
||||
if err != nil {
|
||||
if debug {
|
||||
log.Println(reqID, "bad device param")
|
||||
}
|
||||
globalStats.Error()
|
||||
http.Error(w, "Bad Request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
var ann announcement
|
||||
|
||||
ann.Seen, err = s.getDeviceSeen(deviceID)
|
||||
negCache := strconv.Itoa(negCacheFor(ann.Seen))
|
||||
w.Header().Set("Retry-After", negCache)
|
||||
w.Header().Set("Cache-Control", "public, max-age="+negCache)
|
||||
|
||||
if err != nil {
|
||||
// The device is not in the database.
|
||||
globalStats.Query()
|
||||
http.Error(w, "Not Found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
t0 := time.Now()
|
||||
ann.Addresses, err = s.getAddresses(ctx, deviceID)
|
||||
if err != nil {
|
||||
log.Println(reqID, "getAddresses:", err)
|
||||
globalStats.Error()
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if debug {
|
||||
log.Println(reqID, "getAddresses in", time.Since(t0))
|
||||
}
|
||||
|
||||
globalStats.Query()
|
||||
|
||||
if len(ann.Addresses) == 0 {
|
||||
http.Error(w, "Not Found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
globalStats.Answer()
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(ann)
|
||||
}
|
||||
|
||||
func (s *querysrv) handlePOST(ctx context.Context, remoteIP net.IP, w http.ResponseWriter, req *http.Request) {
|
||||
reqID := ctx.Value(idKey).(requestID)
|
||||
|
||||
rawCert := certificateBytes(req)
|
||||
if rawCert == nil {
|
||||
if debug {
|
||||
log.Println(reqID, "no certificates")
|
||||
}
|
||||
globalStats.Error()
|
||||
http.Error(w, "Forbidden", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
|
||||
var ann announcement
|
||||
if err := json.NewDecoder(req.Body).Decode(&ann); err != nil {
|
||||
if debug {
|
||||
log.Println(reqID, "decode:", err)
|
||||
}
|
||||
globalStats.Error()
|
||||
http.Error(w, "Bad Request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
deviceID := protocol.NewDeviceID(rawCert)
|
||||
|
||||
// handleAnnounce returns *two* errors. The first indicates a problem with
|
||||
// something the client posted to us. We should return a 400 Bad Request
|
||||
// and not worry about it. The second indicates that the request was fine,
|
||||
// but something internal messed up. We should log it and respond with a
|
||||
// more apologetic 500 Internal Server Error.
|
||||
userErr, internalErr := s.handleAnnounce(ctx, remoteIP, deviceID, ann.Addresses)
|
||||
if userErr != nil {
|
||||
if debug {
|
||||
log.Println(reqID, "handleAnnounce:", userErr)
|
||||
}
|
||||
globalStats.Error()
|
||||
http.Error(w, "Bad Request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if internalErr != nil {
|
||||
log.Println(reqID, "handleAnnounce:", internalErr)
|
||||
globalStats.Error()
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
globalStats.Announce()
|
||||
|
||||
// TODO: Slowly increase this for stable clients
|
||||
w.Header().Set("Reannounce-After", "1800")
|
||||
|
||||
// We could return the lookup result here, but it's kind of unnecessarily
|
||||
// expensive to go query the database again so we let the client decide to
|
||||
// do a lookup if they really care.
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
func (s *querysrv) Stop() {
|
||||
s.listener.Close()
|
||||
}
|
||||
|
||||
func (s *querysrv) handleAnnounce(ctx context.Context, remote net.IP, deviceID protocol.DeviceID, addresses []string) (userErr, internalErr error) {
|
||||
reqID := ctx.Value(idKey).(requestID)
|
||||
|
||||
tx, err := s.db.Begin()
|
||||
if err != nil {
|
||||
internalErr = err
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
// Since we return from a bunch of different places, we handle
|
||||
// rollback in the defer.
|
||||
if internalErr != nil || userErr != nil {
|
||||
tx.Rollback()
|
||||
}
|
||||
}()
|
||||
|
||||
for _, annAddr := range addresses {
|
||||
uri, err := url.Parse(annAddr)
|
||||
if err != nil {
|
||||
userErr = err
|
||||
return
|
||||
}
|
||||
|
||||
host, port, err := net.SplitHostPort(uri.Host)
|
||||
if err != nil {
|
||||
userErr = err
|
||||
return
|
||||
}
|
||||
|
||||
ip := net.ParseIP(host)
|
||||
if host == "" || ip.IsUnspecified() {
|
||||
// Do not use IPv6 remote address if requested scheme is tcp4
|
||||
if uri.Scheme == "tcp4" && remote.To4() == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Do not use IPv4 remote address if requested scheme is tcp6
|
||||
if uri.Scheme == "tcp6" && remote.To4() != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
host = remote.String()
|
||||
}
|
||||
|
||||
uri.Host = net.JoinHostPort(host, port)
|
||||
|
||||
if err := s.updateAddress(ctx, tx, deviceID, uri.String()); err != nil {
|
||||
internalErr = err
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.updateDevice(ctx, tx, deviceID); err != nil {
|
||||
internalErr = err
|
||||
return
|
||||
}
|
||||
|
||||
t0 := time.Now()
|
||||
internalErr = tx.Commit()
|
||||
if debug {
|
||||
log.Println(reqID, "commit in", time.Since(t0))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *querysrv) limit(remote net.IP) bool {
|
||||
key := remote.String()
|
||||
|
||||
bkt, ok := s.limiter.Get(key)
|
||||
if ok {
|
||||
bkt := bkt.(*rate.Limiter)
|
||||
if !bkt.Allow() {
|
||||
// Rate limit exceeded; ignore packet
|
||||
return true
|
||||
}
|
||||
} else {
|
||||
// limitAvg is in packets per ten seconds.
|
||||
s.limiter.Add(key, rate.NewLimiter(rate.Limit(limitAvg)/10, limitBurst))
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *querysrv) updateDevice(ctx context.Context, tx *sql.Tx, device protocol.DeviceID) error {
|
||||
reqID := ctx.Value(idKey).(requestID)
|
||||
t0 := time.Now()
|
||||
res, err := tx.Stmt(s.prep["updateDevice"]).Exec(device.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if debug {
|
||||
log.Println(reqID, "updateDevice in", time.Since(t0))
|
||||
}
|
||||
|
||||
if rows, _ := res.RowsAffected(); rows == 0 {
|
||||
t0 = time.Now()
|
||||
_, err := tx.Stmt(s.prep["insertDevice"]).Exec(device.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if debug {
|
||||
log.Println(reqID, "insertDevice in", time.Since(t0))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *querysrv) updateAddress(ctx context.Context, tx *sql.Tx, device protocol.DeviceID, uri string) error {
|
||||
res, err := tx.Stmt(s.prep["updateAddress"]).Exec(device.String(), uri)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if rows, _ := res.RowsAffected(); rows == 0 {
|
||||
_, err := tx.Stmt(s.prep["insertAddress"]).Exec(device.String(), uri)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *querysrv) getAddresses(ctx context.Context, device protocol.DeviceID) ([]string, error) {
|
||||
rows, err := s.prep["selectAddress"].Query(device.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var res []string
|
||||
for rows.Next() {
|
||||
var addr string
|
||||
|
||||
err := rows.Scan(&addr)
|
||||
if err != nil {
|
||||
log.Println("Scan:", err)
|
||||
continue
|
||||
}
|
||||
res = append(res, addr)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (s *querysrv) getDeviceSeen(device protocol.DeviceID) (time.Time, error) {
|
||||
row := s.prep["selectDevice"].QueryRow(device.String())
|
||||
var seen time.Time
|
||||
if err := row.Scan(&seen); err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
return seen.In(time.UTC), nil
|
||||
}
|
||||
|
||||
func handlePing(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(204)
|
||||
}
|
||||
|
||||
func certificateBytes(req *http.Request) []byte {
|
||||
if req.TLS != nil && len(req.TLS.PeerCertificates) > 0 {
|
||||
return req.TLS.PeerCertificates[0].Raw
|
||||
}
|
||||
|
||||
if hdr := req.Header.Get("X-SSL-Cert"); hdr != "" {
|
||||
bs := []byte(hdr)
|
||||
// The certificate is in PEM format but with spaces for newlines. We
|
||||
// need to reinstate the newlines for the PEM decoder. But we need to
|
||||
// leave the spaces in the BEGIN and END lines - the first and last
|
||||
// space - alone.
|
||||
firstSpace := bytes.Index(bs, []byte(" "))
|
||||
lastSpace := bytes.LastIndex(bs, []byte(" "))
|
||||
for i := firstSpace + 1; i < lastSpace; i++ {
|
||||
if bs[i] == ' ' {
|
||||
bs[i] = '\n'
|
||||
}
|
||||
}
|
||||
block, _ := pem.Decode(bs)
|
||||
if block == nil {
|
||||
// Decoding failed
|
||||
return nil
|
||||
}
|
||||
return block.Bytes
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
141
cmd/stdiscosrv/stats.go
Normal file
@@ -0,0 +1,141 @@
|
||||
// Copyright (C) 2014-2015 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
type stats struct {
|
||||
// Incremented atomically
|
||||
announces int64
|
||||
queries int64
|
||||
answers int64
|
||||
errors int64
|
||||
}
|
||||
|
||||
func (s *stats) Announce() {
|
||||
atomic.AddInt64(&s.announces, 1)
|
||||
}
|
||||
|
||||
func (s *stats) Query() {
|
||||
atomic.AddInt64(&s.queries, 1)
|
||||
}
|
||||
|
||||
func (s *stats) Answer() {
|
||||
atomic.AddInt64(&s.answers, 1)
|
||||
}
|
||||
|
||||
func (s *stats) Error() {
|
||||
atomic.AddInt64(&s.errors, 1)
|
||||
}
|
||||
|
||||
// Reset returns a copy of the current stats and resets the counters to
|
||||
// zero.
|
||||
func (s *stats) Reset() stats {
|
||||
// Create a copy of the stats using atomic reads
|
||||
copy := stats{
|
||||
announces: atomic.LoadInt64(&s.announces),
|
||||
queries: atomic.LoadInt64(&s.queries),
|
||||
answers: atomic.LoadInt64(&s.answers),
|
||||
errors: atomic.LoadInt64(&s.errors),
|
||||
}
|
||||
|
||||
// Reset the stats by subtracting the values that we copied
|
||||
atomic.AddInt64(&s.announces, -copy.announces)
|
||||
atomic.AddInt64(&s.queries, -copy.queries)
|
||||
atomic.AddInt64(&s.answers, -copy.answers)
|
||||
atomic.AddInt64(&s.errors, -copy.errors)
|
||||
|
||||
return copy
|
||||
}
|
||||
|
||||
type statssrv struct {
|
||||
intv time.Duration
|
||||
file string
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
func (s *statssrv) Serve() {
|
||||
lastReset := time.Now()
|
||||
for {
|
||||
time.Sleep(next(s.intv))
|
||||
|
||||
stats := globalStats.Reset()
|
||||
d := time.Since(lastReset).Seconds()
|
||||
lastReset = time.Now()
|
||||
|
||||
log.Printf("Stats: %.02f announces/s, %.02f queries/s, %.02f answers/s, %.02f errors/s",
|
||||
float64(stats.announces)/d, float64(stats.queries)/d, float64(stats.answers)/d, float64(stats.errors)/d)
|
||||
|
||||
if s.file != "" {
|
||||
s.writeToFile(stats, d)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *statssrv) Stop() {
|
||||
panic("stop unimplemented")
|
||||
}
|
||||
|
||||
func (s *statssrv) writeToFile(stats stats, secs float64) {
|
||||
newLine := []byte("\n")
|
||||
|
||||
var addrs int
|
||||
row := s.db.QueryRow("SELECT COUNT(*) FROM Addresses")
|
||||
if err := row.Scan(&addrs); err != nil {
|
||||
log.Println("stats query:", err)
|
||||
return
|
||||
}
|
||||
|
||||
fd, err := os.OpenFile(s.file, os.O_RDWR|os.O_CREATE, 0666)
|
||||
if err != nil {
|
||||
log.Println("stats file:", err)
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
err = fd.Close()
|
||||
if err != nil {
|
||||
log.Println("stats file:", err)
|
||||
}
|
||||
}()
|
||||
|
||||
bs, err := ioutil.ReadAll(fd)
|
||||
if err != nil {
|
||||
log.Println("stats file:", err)
|
||||
return
|
||||
}
|
||||
lines := bytes.Split(bytes.TrimSpace(bs), newLine)
|
||||
if len(lines) > 12 {
|
||||
lines = lines[len(lines)-12:]
|
||||
}
|
||||
|
||||
latest := fmt.Sprintf("%v: %6d addresses, %8.02f announces/s, %8.02f queries/s, %8.02f answers/s, %8.02f errors/s\n",
|
||||
time.Now().UTC().Format(time.RFC3339), addrs,
|
||||
float64(stats.announces)/secs, float64(stats.queries)/secs, float64(stats.answers)/secs, float64(stats.errors)/secs)
|
||||
lines = append(lines, []byte(latest))
|
||||
|
||||
_, err = fd.Seek(0, 0)
|
||||
if err != nil {
|
||||
log.Println("stats file:", err)
|
||||
return
|
||||
}
|
||||
err = fd.Truncate(0)
|
||||
if err != nil {
|
||||
log.Println("stats file:", err)
|
||||
return
|
||||
}
|
||||
|
||||
_, err = fd.Write(bytes.Join(lines, newLine))
|
||||
if err != nil {
|
||||
log.Println("stats file:", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -2,7 +2,7 @@
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
|
||||
@@ -2,11 +2,12 @@
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"log"
|
||||
"os"
|
||||
@@ -40,7 +41,8 @@ func main() {
|
||||
log.Println("Lstat:")
|
||||
log.Printf(" Size: %d bytes", fi.Size())
|
||||
log.Printf(" Mode: 0%o", fi.Mode())
|
||||
log.Printf(" Time: %v (%d)", fi.ModTime(), fi.ModTime().Unix())
|
||||
log.Printf(" Time: %v", fi.ModTime())
|
||||
log.Printf(" %d.%09d", fi.ModTime().Unix(), fi.ModTime().Nanosecond())
|
||||
log.Println()
|
||||
|
||||
if !fi.Mode().IsDir() && !fi.Mode().IsRegular() {
|
||||
@@ -52,7 +54,8 @@ func main() {
|
||||
log.Println("Stat:")
|
||||
log.Printf(" Size: %d bytes", fi.Size())
|
||||
log.Printf(" Mode: 0%o", fi.Mode())
|
||||
log.Printf(" Time: %v (%d)", fi.ModTime(), fi.ModTime().Unix())
|
||||
log.Printf(" Time: %v", fi.ModTime())
|
||||
log.Printf(" %d.%09d", fi.ModTime().Unix(), fi.ModTime().Nanosecond())
|
||||
log.Println()
|
||||
}
|
||||
|
||||
@@ -68,7 +71,7 @@ func main() {
|
||||
if *standardBlocks || blockSize < protocol.BlockSize {
|
||||
blockSize = protocol.BlockSize
|
||||
}
|
||||
bs, err := scanner.Blocks(fd, blockSize, fi.Size(), nil)
|
||||
bs, err := scanner.Blocks(context.TODO(), fd, blockSize, fi.Size(), nil, true)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
@@ -66,7 +66,7 @@ func checkServers(deviceID protocol.DeviceID, servers ...string) {
|
||||
}()
|
||||
}
|
||||
|
||||
for _ = range servers {
|
||||
for range servers {
|
||||
res := <-resc
|
||||
|
||||
u, _ := url.Parse(res.server)
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
@@ -66,7 +66,7 @@ func generateFiles(dir string, files, maxexp int, srcname string) error {
|
||||
}
|
||||
|
||||
func generateOneFile(fd io.ReadSeeker, p1 string, s int64) error {
|
||||
src := io.LimitReader(&inifiteReader{fd}, int64(s))
|
||||
src := io.LimitReader(&inifiteReader{fd}, s)
|
||||
dst, err := os.Create(p1)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -85,12 +85,7 @@ func generateOneFile(fd io.ReadSeeker, p1 string, s int64) error {
|
||||
_ = os.Chmod(p1, os.FileMode(rand.Intn(0777)|0400))
|
||||
|
||||
t := time.Now().Add(-time.Duration(rand.Intn(30*86400)) * time.Second)
|
||||
err = os.Chtimes(p1, t, t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return os.Chtimes(p1, t, t)
|
||||
}
|
||||
|
||||
func randomName() string {
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
@@ -10,51 +10,71 @@ import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
)
|
||||
|
||||
func dump(ldb *leveldb.DB) {
|
||||
func dump(ldb *db.Instance) {
|
||||
it := ldb.NewIterator(nil, nil)
|
||||
var dev protocol.DeviceID
|
||||
for it.Next() {
|
||||
key := it.Key()
|
||||
switch key[0] {
|
||||
case db.KeyTypeDevice:
|
||||
folder := nulString(key[1 : 1+64])
|
||||
devBytes := key[1+64 : 1+64+32]
|
||||
name := nulString(key[1+64+32:])
|
||||
copy(dev[:], devBytes)
|
||||
fmt.Printf("[device] F:%q N:%q D:%v\n", folder, name, dev)
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
device := binary.BigEndian.Uint32(key[1+4:])
|
||||
name := nulString(key[1+4+4:])
|
||||
fmt.Printf("[device] F:%d D:%d N:%q", folder, device, name)
|
||||
|
||||
var f protocol.FileInfo
|
||||
err := f.UnmarshalXDR(it.Value())
|
||||
err := f.Unmarshal(it.Value())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Printf(" N:%q\n F:%#o\n M:%d\n V:%v\n S:%d\n B:%d\n", f.Name, f.Flags, f.Modified, f.Version, f.Size(), len(f.Blocks))
|
||||
fmt.Printf(" V:%v\n", f)
|
||||
|
||||
case db.KeyTypeGlobal:
|
||||
folder := nulString(key[1 : 1+64])
|
||||
name := nulString(key[1+64:])
|
||||
fmt.Printf("[global] F:%q N:%q V:%x\n", folder, name, it.Value())
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
name := nulString(key[1+4:])
|
||||
var flv db.VersionList
|
||||
flv.Unmarshal(it.Value())
|
||||
fmt.Printf("[global] F:%d N:%q V:%s\n", folder, name, flv)
|
||||
|
||||
case db.KeyTypeBlock:
|
||||
folder := nulString(key[1 : 1+64])
|
||||
hash := key[1+64 : 1+64+32]
|
||||
name := nulString(key[1+64+32:])
|
||||
fmt.Printf("[block] F:%q H:%x N:%q I:%d\n", folder, hash, name, binary.BigEndian.Uint32(it.Value()))
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
hash := key[1+4 : 1+4+32]
|
||||
name := nulString(key[1+4+32:])
|
||||
fmt.Printf("[block] F:%d H:%x N:%q I:%d\n", folder, hash, name, binary.BigEndian.Uint32(it.Value()))
|
||||
|
||||
case db.KeyTypeDeviceStatistic:
|
||||
fmt.Printf("[dstat]\n %x\n %x\n", it.Key(), it.Value())
|
||||
fmt.Printf("[dstat] K:%x V:%x\n", it.Key(), it.Value())
|
||||
|
||||
case db.KeyTypeFolderStatistic:
|
||||
fmt.Printf("[fstat]\n %x\n %x\n", it.Key(), it.Value())
|
||||
fmt.Printf("[fstat] K:%x V:%x\n", it.Key(), it.Value())
|
||||
|
||||
case db.KeyTypeVirtualMtime:
|
||||
fmt.Printf("[mtime]\n %x\n %x\n", it.Key(), it.Value())
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
name := nulString(key[1+4:])
|
||||
val := it.Value()
|
||||
var real, virt time.Time
|
||||
real.UnmarshalBinary(val[:len(val)/2])
|
||||
virt.UnmarshalBinary(val[len(val)/2:])
|
||||
fmt.Printf("[mtime] F:%d N:%q R:%v V:%v\n", folder, name, real, virt)
|
||||
|
||||
case db.KeyTypeFolderIdx:
|
||||
key := binary.BigEndian.Uint32(it.Key()[1:])
|
||||
fmt.Printf("[folderidx] K:%d V:%q\n", key, it.Value())
|
||||
|
||||
case db.KeyTypeDeviceIdx:
|
||||
key := binary.BigEndian.Uint32(it.Key()[1:])
|
||||
val := it.Value()
|
||||
if len(val) == 0 {
|
||||
fmt.Printf("[deviceidx] K:%d V:<nil>\n", key)
|
||||
} else {
|
||||
dev := protocol.DeviceIDFromBytes(val)
|
||||
fmt.Printf("[deviceidx] K:%d V:%s\n", key, dev)
|
||||
}
|
||||
|
||||
default:
|
||||
fmt.Printf("[???]\n %x\n %x\n", it.Key(), it.Value())
|
||||
|
||||
@@ -2,17 +2,16 @@
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
)
|
||||
|
||||
type SizedElement struct {
|
||||
@@ -38,33 +37,31 @@ func (h *ElementHeap) Pop() interface{} {
|
||||
return x
|
||||
}
|
||||
|
||||
func dumpsize(ldb *leveldb.DB) {
|
||||
func dumpsize(ldb *db.Instance) {
|
||||
h := &ElementHeap{}
|
||||
heap.Init(h)
|
||||
|
||||
it := ldb.NewIterator(nil, nil)
|
||||
var dev protocol.DeviceID
|
||||
var ele SizedElement
|
||||
for it.Next() {
|
||||
key := it.Key()
|
||||
switch key[0] {
|
||||
case db.KeyTypeDevice:
|
||||
folder := nulString(key[1 : 1+64])
|
||||
devBytes := key[1+64 : 1+64+32]
|
||||
name := nulString(key[1+64+32:])
|
||||
copy(dev[:], devBytes)
|
||||
ele.key = fmt.Sprintf("DEVICE:%s:%s:%s", dev, folder, name)
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
device := binary.BigEndian.Uint32(key[1+4:])
|
||||
name := nulString(key[1+4+4:])
|
||||
ele.key = fmt.Sprintf("DEVICE:%d:%d:%s", folder, device, name)
|
||||
|
||||
case db.KeyTypeGlobal:
|
||||
folder := nulString(key[1 : 1+64])
|
||||
name := nulString(key[1+64:])
|
||||
ele.key = fmt.Sprintf("GLOBAL:%s:%s", folder, name)
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
name := nulString(key[1+4:])
|
||||
ele.key = fmt.Sprintf("GLOBAL:%d:%s", folder, name)
|
||||
|
||||
case db.KeyTypeBlock:
|
||||
folder := nulString(key[1 : 1+64])
|
||||
hash := key[1+64 : 1+64+32]
|
||||
name := nulString(key[1+64+32:])
|
||||
ele.key = fmt.Sprintf("BLOCK:%s:%x:%s", folder, hash, name)
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
hash := key[1+4 : 1+4+32]
|
||||
name := nulString(key[1+4+32:])
|
||||
ele.key = fmt.Sprintf("BLOCK:%d:%x:%s", folder, hash, name)
|
||||
|
||||
case db.KeyTypeDeviceStatistic:
|
||||
ele.key = fmt.Sprintf("DEVICESTATS:%s", key[1:])
|
||||
@@ -75,6 +72,14 @@ func dumpsize(ldb *leveldb.DB) {
|
||||
case db.KeyTypeVirtualMtime:
|
||||
ele.key = fmt.Sprintf("MTIME:%s", key[1:])
|
||||
|
||||
case db.KeyTypeFolderIdx:
|
||||
id := binary.BigEndian.Uint32(key[1:])
|
||||
ele.key = fmt.Sprintf("FOLDERIDX:%d", id)
|
||||
|
||||
case db.KeyTypeDeviceIdx:
|
||||
id := binary.BigEndian.Uint32(key[1:])
|
||||
ele.key = fmt.Sprintf("DEVICEIDX:%d", id)
|
||||
|
||||
default:
|
||||
ele.key = fmt.Sprintf("UNKNOWN:%x", key)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
@@ -13,8 +13,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -28,16 +27,12 @@ func main() {
|
||||
|
||||
path := flag.Arg(0)
|
||||
if path == "" {
|
||||
path = filepath.Join(defaultConfigDir(), "index-v0.11.0.db")
|
||||
path = filepath.Join(defaultConfigDir(), "index-v0.14.0.db")
|
||||
}
|
||||
|
||||
fmt.Println("Path:", path)
|
||||
|
||||
ldb, err := leveldb.OpenFile(path, &opt.Options{
|
||||
ErrorIfMissing: true,
|
||||
Strict: opt.StrictAll,
|
||||
OpenFilesCacheCapacity: 100,
|
||||
})
|
||||
ldb, err := db.Open(path)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
|
||||
22
cmd/strelaypoolsrv/LICENSE
Normal file
@@ -0,0 +1,22 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 The Syncthing Project
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
24
cmd/strelaypoolsrv/README.md
Normal file
@@ -0,0 +1,24 @@
|
||||
# relaypoolsrv
|
||||
|
||||
This is the relay pool server for the `syncthing` project, which allows
|
||||
community hosted [relaysrv](https://github.com/syncthing/relaysrv)'s to join
|
||||
the public pool.
|
||||
|
||||
Servers that join the pool are then advertised to users of `syncthing` as
|
||||
potential connection points for those who are unable to connect directly due
|
||||
to NAT or firewall issues.
|
||||
|
||||
There is very little reason why you'd want to run this yourself, as
|
||||
`relaypoolsrv` is just used for announcement and lookup of public relay
|
||||
servers. If you are looking to setup a private or a public relay, please
|
||||
check the documentation for
|
||||
[relaysrv](https://github.com/syncthing/relaysrv), which also explains how
|
||||
to join the default public pool.
|
||||
|
||||
See `relaypoolsrv -help` for configuration options.
|
||||
|
||||
##### Third-party attributions
|
||||
|
||||
[oschwald/geoip2-golang](https://github.com/oschwald/geoip2-golang), [oschwald/maxminddb-golang](https://github.com/oschwald/maxminddb-golang), Copyright (C) 2015 [Gregory J. Oschwald](mailto:oschwald@gmail.com).
|
||||
|
||||
[lib/pq](https://github.com/lib/pq)</a>, Copyright (C) 2011-2013 'pq' Contributors Portions Copyright (C) 2011 Blake Mizerany.
|
||||
1
cmd/strelaypoolsrv/auto/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
gui.go
|
||||
411
cmd/strelaypoolsrv/gui/index.html
Normal file
@@ -0,0 +1,411 @@
|
||||
<!DOCTYPE html>
|
||||
|
||||
<html lang="en" ng-app="syncthing" ng-controller="relayDataController">
|
||||
<head>
|
||||
<meta charset="utf-8"/>
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=edge"/>
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0"/>
|
||||
<meta name="description" content=""/>
|
||||
<meta name="author" content=""/>
|
||||
|
||||
<title>Relay stats</title>
|
||||
<link href="//maxcdn.bootstrapcdn.com/bootstrap/3.3.5/css/bootstrap.min.css" rel="stylesheet"/>
|
||||
<link rel="stylesheet" href="//maxcdn.bootstrapcdn.com/font-awesome/4.6.1/css/font-awesome.min.css"/>
|
||||
|
||||
<style>
|
||||
#map {
|
||||
height: 600px;
|
||||
}
|
||||
.ng-cloak {
|
||||
display: none;
|
||||
}
|
||||
table {
|
||||
font-size: 11px !important;
|
||||
width: 100%;
|
||||
border: 1px;
|
||||
|
||||
}
|
||||
td {
|
||||
padding: 0px !important;
|
||||
}
|
||||
tfoot td {
|
||||
font-weight: bold;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
|
||||
<body class="ng-cloak">
|
||||
<div class="container">
|
||||
<h1>Relay Pool Data</h1>
|
||||
<div ng-if="relays === undefined" class="text-center">
|
||||
<img src="//cdnjs.cloudflare.com/ajax/libs/galleriffic/2.0.1/css/loader.gif" alt=""/>
|
||||
<p>Please wait while we gather data</p>
|
||||
</div>
|
||||
<div>
|
||||
<div ng-show="relays !== undefined" class="ng-hide">
|
||||
<p>
|
||||
Currently {{ relays.length }} relays online ({{ totals.goMaxProcs }} cores in total).
|
||||
</p>
|
||||
</div>
|
||||
<div id="map"></div> <!-- Can't hide the map, otherwise it freaks out -->
|
||||
<p>The circle size represents how much bytes the relay transferred relative to other relays</p>
|
||||
</div>
|
||||
<div>
|
||||
<table class="table table-striped table-condensed table">
|
||||
<thead>
|
||||
<tr>
|
||||
<th rowspan="2">Address</td>
|
||||
<th rowspan="2">
|
||||
<a ng-click="sortType = 'status.numActiveSessions'; sortReverse = !sortReverse">
|
||||
Sessions
|
||||
<span ng-show="sortType == 'status.numActiveSessions' && !sortReverse" class="fa fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'status.numActiveSessions' && sortReverse" class="fa fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
<th rowspan="2">
|
||||
<a ng-click="sortType = 'status.numConnections'; sortReverse = !sortReverse">
|
||||
Connections
|
||||
<span ng-show="sortType == 'status.numConnections' && !sortReverse" class="fa fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'status.numConnections' && sortReverse" class="fa fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
<th rowspan="2">
|
||||
<a ng-click="sortType = 'status.bytesProxied'; sortReverse = !sortReverse">
|
||||
Data relayed
|
||||
<span ng-show="sortType == 'status.bytesProxied' && !sortReverse" class="fa fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'status.bytesProxied' && sortReverse" class="fa fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
<th colspan="6" class="text-center">Transfer rate in the last period</th>
|
||||
<th rowspan="2">
|
||||
<a ng-click="sortType = 'status.uptimeSeconds'; sortReverse = !sortReverse">
|
||||
Uptime hours
|
||||
<span ng-show="sortType == 'status.uptimeSeconds' && !sortReverse" class="fa fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'status.uptimeSeconds' && sortReverse" class="fa fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
<th rowspan="2">
|
||||
<a ng-click="sortType = 'status.options[\'provided-by\'] || \'\''; sortReverse = !sortReverse">
|
||||
Provided by
|
||||
<span ng-show="sortType == 'status.options[\'provided-by\'] || \'\'' && !sortReverse" class="fa fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'status.options[\'provided-by\'] || \'\'' && sortReverse" class="fa fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th>
|
||||
<a ng-click="sortType = 'status.kbps10s1m5m15m30m60m[0]'; sortReverse = !sortReverse">
|
||||
10s
|
||||
<span ng-show="sortType == 'status.kbps10s1m5m15m30m60m[0]' && !sortReverse" class="fa fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'status.kbps10s1m5m15m30m60m[0]' && sortReverse" class="fa fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
<th>
|
||||
<a ng-click="sortType = 'status.kbps10s1m5m15m30m60m[1]'; sortReverse = !sortReverse">
|
||||
1m
|
||||
<span ng-show="sortType == 'status.kbps10s1m5m15m30m60m[1]' && !sortReverse" class="fa fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'status.kbps10s1m5m15m30m60m[1]' && sortReverse" class="fa fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
<th>
|
||||
<a ng-click="sortType = 'status.kbps10s1m5m15m30m60m[2]'; sortReverse = !sortReverse">
|
||||
5m
|
||||
<span ng-show="sortType == 'status.kbps10s1m5m15m30m60m[2]' && !sortReverse" class="fa fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'status.kbps10s1m5m15m30m60m[2]' && sortReverse" class="fa fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
<th>
|
||||
<a ng-click="sortType = 'status.kbps10s1m5m15m30m60m[3]'; sortReverse = !sortReverse">
|
||||
15m
|
||||
<span ng-show="sortType == 'status.kbps10s1m5m15m30m60m[3]' && !sortReverse" class="fa fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'status.kbps10s1m5m15m30m60m[3]' && sortReverse" class="fa fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
<th>
|
||||
<a ng-click="sortType = 'status.kbps10s1m5m15m30m60m[4]'; sortReverse = !sortReverse">
|
||||
30m
|
||||
<span ng-show="sortType == 'status.kbps10s1m5m15m30m60m[4]' && !sortReverse" class="fa fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'status.kbps10s1m5m15m30m60m[4]' && sortReverse" class="fa fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
<th>
|
||||
<a ng-click="sortType = 'status.kbps10s1m5m15m30m60m[5]'; sortReverse = !sortReverse">
|
||||
60m
|
||||
<span ng-show="sortType == 'status.kbps10s1m5m15m30m60m[5]' && !sortReverse" class="fa fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'status.kbps10s1m5m15m30m60m[5]' && sortReverse" class="fa fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr ng-repeat="relay in relays | orderBy:sortType:sortReverse:sortCompare" ng-mouseover="relay.showMarker()" ng-mouseleave="relay.hideMarker()">
|
||||
<td>{{ relay.address }}</td>
|
||||
<td ng-if="relay.status === undefined" colspan="11" class="text-center">Looking up...</td>
|
||||
<td ng-if-start="relay.status !== undefined">{{ relay.status.numActiveSessions }}</td>
|
||||
<td>{{ relay.status.numConnections }}</td>
|
||||
<td>{{ relay.status.bytesProxied | bytes }}</td>
|
||||
<td>{{ relay.status.kbps10s1m5m15m30m60m[0] * 128 | bytes }}/s</td>
|
||||
<td>{{ relay.status.kbps10s1m5m15m30m60m[1] * 128 | bytes }}/s</td>
|
||||
<td>{{ relay.status.kbps10s1m5m15m30m60m[2] * 128 | bytes }}/s</td>
|
||||
<td>{{ relay.status.kbps10s1m5m15m30m60m[3] * 128 | bytes }}/s</td>
|
||||
<td>{{ relay.status.kbps10s1m5m15m30m60m[4] * 128 | bytes }}/s</td>
|
||||
<td>{{ relay.status.kbps10s1m5m15m30m60m[5] * 128 | bytes }}/s</td>
|
||||
<td ng-if="relay.status.uptimeSeconds != undefined">{{ relay.status.uptimeSeconds/60/60 | number:0 }}</td>
|
||||
<td ng-if="relay.status.uptimeSeconds == undefined"></td>
|
||||
<td title="{{ relay.status.options['provided-by'] || '' }}" ng-if-end>
|
||||
{{ relay.status.options['provided-by'] || '' | limitTo:50 }}
|
||||
<span ng-if="(relay.status.options['provided-by'] || '').length > 50">…
|
||||
</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
<tfoot>
|
||||
<tr>
|
||||
<td>Totals</td>
|
||||
<td>{{ totals.numActiveSessions }}</td>
|
||||
<td>{{ totals.numConnections }}</td>
|
||||
<td>{{ totals.bytesProxied | bytes }}</td>
|
||||
<td>{{ totals.kbps10s1m5m15m30m60m[0] * 128 | bytes }}/s</td>
|
||||
<td>{{ totals.kbps10s1m5m15m30m60m[1] * 128 | bytes }}/s</td>
|
||||
<td>{{ totals.kbps10s1m5m15m30m60m[2] * 128 | bytes }}/s</td>
|
||||
<td>{{ totals.kbps10s1m5m15m30m60m[3] * 128 | bytes }}/s</td>
|
||||
<td>{{ totals.kbps10s1m5m15m30m60m[4] * 128 | bytes }}/s</td>
|
||||
<td>{{ totals.kbps10s1m5m15m30m60m[5] * 128 | bytes }}/s</td>
|
||||
<td>{{ totals.uptimeSeconds/60/60 | number:0 }} hours</td>
|
||||
<td>{{ relays.length }} relays</td>
|
||||
</tr>
|
||||
</tfoor>
|
||||
</table>
|
||||
</div>
|
||||
<hr>
|
||||
<p>
|
||||
This product includes GeoLite2 data created by MaxMind, available from
|
||||
<a href="http://www.maxmind.com">http://www.maxmind.com</a>.
|
||||
</p>
|
||||
</div>
|
||||
|
||||
|
||||
<script type="text/javascript" src="//code.jquery.com/jquery-2.1.4.min.js"></script>
|
||||
<script type="text/javascript" src="//cdnjs.cloudflare.com/ajax/libs/angular.js/1.5.8/angular.min.js"></script>
|
||||
<script type="text/javascript" src="//maxcdn.bootstrapcdn.com/bootstrap/3.3.5/js/bootstrap.min.js"></script>
|
||||
<script type="text/javascript" src="//maps.googleapis.com/maps/api/js"></script>
|
||||
</body>
|
||||
|
||||
<script>
|
||||
angular.module('syncthing', [
|
||||
])
|
||||
.config(function($httpProvider) {
|
||||
$httpProvider.defaults.timeout = 5000;
|
||||
})
|
||||
.filter('bytes', function() {
|
||||
return function(bytes, precision) {
|
||||
if (isNaN(parseFloat(bytes)) || !isFinite(bytes)) return '-';
|
||||
if (typeof precision === 'undefined') precision = 1;
|
||||
|
||||
var units = ['bytes', 'kB', 'MB', 'GB', 'TB', 'PB'],
|
||||
number = Math.floor(Math.log(bytes) / Math.log(1024));
|
||||
|
||||
var value = (bytes / Math.pow(1000, Math.floor(number)));
|
||||
if (!isFinite(value)) {
|
||||
value = 0;
|
||||
precision = 0;
|
||||
}
|
||||
if (!isFinite(number)) {
|
||||
units = 'bytes';
|
||||
} else {
|
||||
units = units[number];
|
||||
}
|
||||
return value.toFixed(precision) + ' ' + units;
|
||||
}
|
||||
})
|
||||
.controller('relayDataController', ['$scope', '$rootScope', '$http', '$q', '$compile', '$timeout', function($scope, $rootScope, $http, $q, $compile, $timeout) {
|
||||
$scope.totals = {
|
||||
bytesProxied: 0,
|
||||
goMaxProcs: 0,
|
||||
kbps10s1m5m15m30m60m: [0, 0, 0, 0, 0, 0],
|
||||
numActiveSessions: 0,
|
||||
numConnections: 0,
|
||||
numPendingSessionKeys: 0,
|
||||
numProxies: 0,
|
||||
uptimeSeconds: 0,
|
||||
};
|
||||
$scope.map = new google.maps.Map(document.getElementById('map'), {
|
||||
zoom: 1,
|
||||
mapTypeId: google.maps.MapTypeId.ROADMAP
|
||||
});
|
||||
$scope.mapBounds = new google.maps.LatLngBounds();
|
||||
$scope.tooltipTemplate = $('#infoTemplate').html();
|
||||
$scope.usedLocations = {};
|
||||
$scope.sortType = 'status.numActiveSessions';
|
||||
$scope.sortReverse = true;
|
||||
$scope.sortCompare = function(a, b) {
|
||||
if (a.value == b.value) {
|
||||
return 0;
|
||||
}
|
||||
if (a.type == "undefined") {
|
||||
return -1;
|
||||
}
|
||||
if (b.type == "undefined") {
|
||||
return 1;
|
||||
}
|
||||
return a.value > b.value ? 1 : -1;
|
||||
}
|
||||
|
||||
$http.get("/endpoint").then(function(response) {
|
||||
$scope.relays = response.data.relays;
|
||||
var promises = [];
|
||||
angular.forEach($scope.relays, function(relay) {
|
||||
|
||||
relay.uri = constructURI(relay.url);
|
||||
relay.address = relay.url.split('/')[2];
|
||||
|
||||
addMarkerToMap(relay);
|
||||
|
||||
promises.push(getRelayStatus(relay));
|
||||
});
|
||||
|
||||
// Can only add circles once we know the totals for transfers, which means
|
||||
// we need to resolve all statuses.
|
||||
$q.all(promises).then(function() {
|
||||
angular.forEach($scope.relays, function(relay) {
|
||||
if (relay.status) {
|
||||
addCircleToMap(relay);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
$scope.map.fitBounds($scope.mapBounds);
|
||||
if ($scope.relays.length == 1) {
|
||||
$scope.map.setZoom(13);
|
||||
}
|
||||
});
|
||||
|
||||
function addMarkerToMap(relay) {
|
||||
var loc = relay.location.latitude + "," + relay.location.longitude;
|
||||
|
||||
// Deal with overlapping markers
|
||||
while (loc in $scope.usedLocations) {
|
||||
var locParts = loc.split(',');
|
||||
locParts = [parseFloat(locParts[0]), parseFloat(locParts[1])];
|
||||
locParts[Math.round(Math.random())] += 0.5 * (Math.random() >= 0.5 ? 1 : -1);
|
||||
loc = locParts.join(',');
|
||||
}
|
||||
|
||||
$scope.usedLocations[loc] = true;
|
||||
|
||||
var locParts = loc.split(',');
|
||||
|
||||
relay.marker = new google.maps.Marker({
|
||||
map: $scope.map,
|
||||
position: new google.maps.LatLng(locParts[0], locParts[1]),
|
||||
title: relay.url,
|
||||
});
|
||||
|
||||
var scope = $rootScope.$new(true);
|
||||
scope.relay = relay;
|
||||
|
||||
relay.marker.info = new google.maps.InfoWindow({
|
||||
content: $compile($scope.tooltipTemplate)(scope)[0],
|
||||
});
|
||||
|
||||
relay.showMarker = function() {
|
||||
relay.marker.info.open($scope.map, relay.marker);
|
||||
}
|
||||
|
||||
relay.hideMarker = function() {
|
||||
relay.marker.info.close();
|
||||
}
|
||||
|
||||
relay.marker.addListener('mouseover', relay.showMarker);
|
||||
relay.marker.addListener('mouseout', relay.hideMarker);
|
||||
|
||||
$scope.mapBounds.extend(relay.marker.position);
|
||||
}
|
||||
|
||||
function addCircleToMap(relay) {
|
||||
relay.marker.circle = new google.maps.Circle({
|
||||
strokeColor: '#FF0000',
|
||||
strokeOpacity: 0.8,
|
||||
strokeWeight: 2,
|
||||
fillColor: '#FF0000',
|
||||
fillOpacity: 0.35,
|
||||
map: $scope.map,
|
||||
center: relay.marker.position,
|
||||
radius: ((relay.status.bytesProxied * 100) / $scope.totals.bytesProxied) * 10000
|
||||
});
|
||||
}
|
||||
|
||||
function getRelayStatus(relay) {
|
||||
// Normal timeout doesn't deal with relays which accept the TCP connection
|
||||
// but don't respond (some firewalls do that), so deal with it this way.
|
||||
var timeoutRequest = $q.defer();
|
||||
var resolveStatus = $q.defer();
|
||||
|
||||
|
||||
$http.get("http://" + relay.uri.hostname + ':' + ((relay.uri.args.statusAddr && relay.uri.args.statusAddr.split(':')[1]) || "22070") + "/status", { timeout: timeoutRequest.promise }).then(function (response) {
|
||||
relay.status = response.data;
|
||||
resolveStatus.resolve();
|
||||
angular.forEach($scope.totals, function(value, key) {
|
||||
if (typeof $scope.totals[key] == 'number') {
|
||||
$scope.totals[key] += response.data[key];
|
||||
} else if (typeof $scope.totals[key] == 'object' && $scope.totals[key] instanceof Array) {
|
||||
angular.forEach($scope.totals[key], function(value, index) {
|
||||
$scope.totals[key][index] += response.data[key][index];
|
||||
});
|
||||
}
|
||||
});
|
||||
}, function() {
|
||||
relay.status = null;
|
||||
resolveStatus.resolve();
|
||||
});
|
||||
|
||||
$timeout(function() {
|
||||
timeoutRequest.resolve();
|
||||
}, 5000);
|
||||
|
||||
return resolveStatus.promise;
|
||||
}
|
||||
|
||||
function constructURI(url) {
|
||||
var uri = document.createElement('a');
|
||||
|
||||
// HAX, otherwise doesn't work
|
||||
uri.href = url.replace('relay://', 'http://');
|
||||
|
||||
// Convert query string to object
|
||||
uri.args = {};
|
||||
angular.forEach(uri.search.replace(/^\?/, '').split('&'), function(query) {
|
||||
var split = query.split('=');
|
||||
uri.args[split[0]] = split[1];
|
||||
});
|
||||
|
||||
return uri;
|
||||
}
|
||||
}]);
|
||||
</script>
|
||||
|
||||
<script type="text/template" id="infoTemplate">
|
||||
<div>
|
||||
<p><b>{{ relay.uri.hostname }}</b> <span ng-if="relay.status.options['provided-by']">provided by <u>{{ relay.status.options['provided-by'] }}</u></span></p>
|
||||
<div ng-if="relay.status">
|
||||
<span ng-if="relay.status.startTime">Start time: {{ relay.status.startTime | date:"medium" }}</br></span>
|
||||
<span ng-if="relay.status.bytesProxied != undefined">Proxied: {{ relay.status.bytesProxied | bytes }}</br></span>
|
||||
<span ng-if="relay.status.numActiveSessions != undefined">Sessions: {{ relay.status.numActiveSessions }}</br></span>
|
||||
<span ng-if="relay.status.numConnections != undefined">Clients: {{ relay.status.numConnections }}</br></span>
|
||||
<span ng-if="relay.status.options.pools">Pools: {{ relay.status.options.pools.join(', ') }}</br></span>
|
||||
<span ng-if="relay.status.options['global-rate'] != undefined">
|
||||
<span ng-if="relay.status.options['global-rate'] > 0">Global rate limit: {{ relay.status.options['global-rate'] | bytes }}/s</span>
|
||||
<span ng-if="relay.status.options['global-rate'] == 0">Global rate limit: unlimited</span>
|
||||
<br/>
|
||||
</span>
|
||||
<span ng-if="relay.status.options['per-session-rate'] != undefined">
|
||||
<span ng-if="relay.status.options['per-session-rate'] > 0">Session rate limit: {{ relay.status.options['per-session-rate'] | bytes }}/s</span>
|
||||
<span ng-if="relay.status.options['per-session-rate'] == 0">Session rate limit: unlimited</span>
|
||||
<br/>
|
||||
</span>
|
||||
</div>
|
||||
<div ng-if="!relay.status">
|
||||
Data unavailable.
|
||||
<div>
|
||||
</div>
|
||||
</script>
|
||||
</html>
|
||||
537
cmd/strelaypoolsrv/main.go
Normal file
@@ -0,0 +1,537 @@
|
||||
// Copyright (C) 2015 Audrius Butkevicius and Contributors (see the CONTRIBUTORS file).
|
||||
|
||||
//go:generate go run ../../script/genassets.go gui >auto/gui.go
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"mime"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/groupcache/lru"
|
||||
"github.com/oschwald/geoip2-golang"
|
||||
"github.com/syncthing/syncthing/cmd/strelaypoolsrv/auto"
|
||||
"github.com/syncthing/syncthing/lib/relay/client"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
type location struct {
|
||||
Latitude float64 `json:"latitude"`
|
||||
Longitude float64 `json:"longitude"`
|
||||
}
|
||||
|
||||
type relay struct {
|
||||
URL string `json:"url"`
|
||||
Location location `json:"location"`
|
||||
uri *url.URL
|
||||
}
|
||||
|
||||
func (r relay) String() string {
|
||||
return r.URL
|
||||
}
|
||||
|
||||
type request struct {
|
||||
relay relay
|
||||
uri *url.URL
|
||||
result chan result
|
||||
}
|
||||
|
||||
type result struct {
|
||||
err error
|
||||
eviction time.Duration
|
||||
}
|
||||
|
||||
var (
|
||||
testCert tls.Certificate
|
||||
listen = ":80"
|
||||
dir string
|
||||
evictionTime = time.Hour
|
||||
debug bool
|
||||
getLRUSize = 10 << 10
|
||||
getLimitBurst = 10
|
||||
getLimitAvg = 1
|
||||
postLRUSize = 1 << 10
|
||||
postLimitBurst = 2
|
||||
postLimitAvg = 1
|
||||
getLimit time.Duration
|
||||
postLimit time.Duration
|
||||
permRelaysFile string
|
||||
ipHeader string
|
||||
geoipPath string
|
||||
proto string
|
||||
|
||||
getMut = sync.NewRWMutex()
|
||||
getLRUCache *lru.Cache
|
||||
|
||||
postMut = sync.NewRWMutex()
|
||||
postLRUCache *lru.Cache
|
||||
|
||||
requests = make(chan request, 10)
|
||||
|
||||
mut = sync.NewRWMutex()
|
||||
knownRelays = make([]relay, 0)
|
||||
permanentRelays = make([]relay, 0)
|
||||
evictionTimers = make(map[string]*time.Timer)
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.StringVar(&listen, "listen", listen, "Listen address")
|
||||
flag.StringVar(&dir, "keys", dir, "Directory where http-cert.pem and http-key.pem is stored for TLS listening")
|
||||
flag.BoolVar(&debug, "debug", debug, "Enable debug output")
|
||||
flag.DurationVar(&evictionTime, "eviction", evictionTime, "After how long the relay is evicted")
|
||||
flag.IntVar(&getLRUSize, "get-limit-cache", getLRUSize, "Get request limiter cache size")
|
||||
flag.IntVar(&getLimitAvg, "get-limit-avg", 2, "Allowed average get request rate, per 10 s")
|
||||
flag.IntVar(&getLimitBurst, "get-limit-burst", getLimitBurst, "Allowed burst get requests")
|
||||
flag.IntVar(&postLRUSize, "post-limit-cache", postLRUSize, "Post request limiter cache size")
|
||||
flag.IntVar(&postLimitAvg, "post-limit-avg", 2, "Allowed average post request rate, per minute")
|
||||
flag.IntVar(&postLimitBurst, "post-limit-burst", postLimitBurst, "Allowed burst post requests")
|
||||
flag.StringVar(&permRelaysFile, "perm-relays", "", "Path to list of permanent relays")
|
||||
flag.StringVar(&ipHeader, "ip-header", "", "Name of header which holds clients ip:port. Only meaningful when running behind a reverse proxy.")
|
||||
flag.StringVar(&geoipPath, "geoip", "GeoLite2-City.mmdb", "Path to GeoLite2-City database")
|
||||
flag.StringVar(&proto, "protocol", "tcp", "Protocol used for listening. 'tcp' for IPv4 and IPv6, 'tcp4' for IPv4, 'tcp6' for IPv6")
|
||||
|
||||
flag.Parse()
|
||||
|
||||
getLimit = 10 * time.Second / time.Duration(getLimitAvg)
|
||||
postLimit = time.Minute / time.Duration(postLimitAvg)
|
||||
|
||||
getLRUCache = lru.New(getLRUSize)
|
||||
postLRUCache = lru.New(postLRUSize)
|
||||
|
||||
var listener net.Listener
|
||||
var err error
|
||||
|
||||
if permRelaysFile != "" {
|
||||
loadPermanentRelays(permRelaysFile)
|
||||
}
|
||||
|
||||
testCert = createTestCertificate()
|
||||
|
||||
go requestProcessor()
|
||||
|
||||
if dir != "" {
|
||||
if debug {
|
||||
log.Println("Starting TLS listener on", listen)
|
||||
}
|
||||
certFile, keyFile := filepath.Join(dir, "http-cert.pem"), filepath.Join(dir, "http-key.pem")
|
||||
var cert tls.Certificate
|
||||
cert, err = tls.LoadX509KeyPair(certFile, keyFile)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to load HTTP X509 key pair:", err)
|
||||
}
|
||||
|
||||
tlsCfg := &tls.Config{
|
||||
Certificates: []tls.Certificate{cert},
|
||||
MinVersion: tls.VersionTLS10, // No SSLv3
|
||||
CipherSuites: []uint16{
|
||||
// No RC4
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
||||
tls.TLS_RSA_WITH_AES_128_CBC_SHA,
|
||||
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
|
||||
tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
},
|
||||
}
|
||||
|
||||
listener, err = tls.Listen(proto, listen, tlsCfg)
|
||||
} else {
|
||||
if debug {
|
||||
log.Println("Starting plain listener on", listen)
|
||||
}
|
||||
listener, err = net.Listen(proto, listen)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.Fatalln("listen:", err)
|
||||
}
|
||||
|
||||
handler := http.NewServeMux()
|
||||
handler.HandleFunc("/", handleAssets)
|
||||
handler.HandleFunc("/endpoint", handleRequest)
|
||||
|
||||
srv := http.Server{
|
||||
Handler: handler,
|
||||
ReadTimeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
err = srv.Serve(listener)
|
||||
if err != nil {
|
||||
log.Fatalln("serve:", err)
|
||||
}
|
||||
}
|
||||
|
||||
func handleAssets(w http.ResponseWriter, r *http.Request) {
|
||||
assets := auto.Assets()
|
||||
path := r.URL.Path[1:]
|
||||
if path == "" {
|
||||
path = "index.html"
|
||||
}
|
||||
|
||||
bs, ok := assets[path]
|
||||
if !ok {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
mtype := mimeTypeForFile(path)
|
||||
if len(mtype) != 0 {
|
||||
w.Header().Set("Content-Type", mtype)
|
||||
}
|
||||
|
||||
if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
|
||||
w.Header().Set("Content-Encoding", "gzip")
|
||||
} else {
|
||||
// ungzip if browser not send gzip accepted header
|
||||
var gr *gzip.Reader
|
||||
gr, _ = gzip.NewReader(bytes.NewReader(bs))
|
||||
bs, _ = ioutil.ReadAll(gr)
|
||||
gr.Close()
|
||||
}
|
||||
w.Header().Set("Content-Length", fmt.Sprintf("%d", len(bs)))
|
||||
|
||||
w.Write(bs)
|
||||
}
|
||||
|
||||
func mimeTypeForFile(file string) string {
|
||||
// We use a built in table of the common types since the system
|
||||
// TypeByExtension might be unreliable. But if we don't know, we delegate
|
||||
// to the system.
|
||||
ext := filepath.Ext(file)
|
||||
switch ext {
|
||||
case ".htm", ".html":
|
||||
return "text/html"
|
||||
case ".css":
|
||||
return "text/css"
|
||||
case ".js":
|
||||
return "application/javascript"
|
||||
case ".json":
|
||||
return "application/json"
|
||||
case ".png":
|
||||
return "image/png"
|
||||
case ".ttf":
|
||||
return "application/x-font-ttf"
|
||||
case ".woff":
|
||||
return "application/x-font-woff"
|
||||
case ".svg":
|
||||
return "image/svg+xml"
|
||||
default:
|
||||
return mime.TypeByExtension(ext)
|
||||
}
|
||||
}
|
||||
|
||||
func handleRequest(w http.ResponseWriter, r *http.Request) {
|
||||
if ipHeader != "" {
|
||||
r.RemoteAddr = r.Header.Get(ipHeader)
|
||||
}
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
switch r.Method {
|
||||
case "GET":
|
||||
if limit(r.RemoteAddr, getLRUCache, getMut, getLimit, getLimitBurst) {
|
||||
w.WriteHeader(429)
|
||||
return
|
||||
}
|
||||
handleGetRequest(w, r)
|
||||
case "POST":
|
||||
if limit(r.RemoteAddr, postLRUCache, postMut, postLimit, postLimitBurst) {
|
||||
w.WriteHeader(429)
|
||||
return
|
||||
}
|
||||
handlePostRequest(w, r)
|
||||
default:
|
||||
if debug {
|
||||
log.Println("Unhandled HTTP method", r.Method)
|
||||
}
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
}
|
||||
}
|
||||
|
||||
func handleGetRequest(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
mut.RLock()
|
||||
relays := append(permanentRelays, knownRelays...)
|
||||
mut.RUnlock()
|
||||
|
||||
// Shuffle
|
||||
for i := range relays {
|
||||
j := rand.Intn(i + 1)
|
||||
relays[i], relays[j] = relays[j], relays[i]
|
||||
}
|
||||
|
||||
json.NewEncoder(w).Encode(map[string][]relay{
|
||||
"relays": relays,
|
||||
})
|
||||
}
|
||||
|
||||
func handlePostRequest(w http.ResponseWriter, r *http.Request) {
|
||||
var newRelay relay
|
||||
err := json.NewDecoder(r.Body).Decode(&newRelay)
|
||||
r.Body.Close()
|
||||
|
||||
if err != nil {
|
||||
if debug {
|
||||
log.Println("Failed to parse payload")
|
||||
}
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
}
|
||||
|
||||
uri, err := url.Parse(newRelay.URL)
|
||||
if err != nil {
|
||||
if debug {
|
||||
log.Println("Failed to parse URI", newRelay.URL)
|
||||
}
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
}
|
||||
|
||||
host, port, err := net.SplitHostPort(uri.Host)
|
||||
if err != nil {
|
||||
if debug {
|
||||
log.Println("Failed to split URI", newRelay.URL)
|
||||
}
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
}
|
||||
|
||||
// Get the IP address of the client
|
||||
rhost, _, err := net.SplitHostPort(r.RemoteAddr)
|
||||
if err != nil {
|
||||
if debug {
|
||||
log.Println("Failed to split remote address", r.RemoteAddr)
|
||||
}
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
}
|
||||
|
||||
ip := net.ParseIP(host)
|
||||
// The client did not provide an IP address, use the IP address of the client.
|
||||
if ip == nil || ip.IsUnspecified() {
|
||||
uri.Host = net.JoinHostPort(rhost, port)
|
||||
newRelay.URL = uri.String()
|
||||
} else if host != rhost {
|
||||
if debug {
|
||||
log.Println("IP address advertised does not match client IP address", r.RemoteAddr, uri)
|
||||
}
|
||||
http.Error(w, "IP address does not match client IP", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
newRelay.uri = uri
|
||||
newRelay.Location = getLocation(uri.Host)
|
||||
|
||||
for _, current := range permanentRelays {
|
||||
if current.uri.Host == newRelay.uri.Host {
|
||||
if debug {
|
||||
log.Println("Asked to add a relay", newRelay, "which exists in permanent list")
|
||||
}
|
||||
http.Error(w, "Invalid request", 500)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
reschan := make(chan result)
|
||||
|
||||
select {
|
||||
case requests <- request{newRelay, uri, reschan}:
|
||||
result := <-reschan
|
||||
if result.err != nil {
|
||||
http.Error(w, result.err.Error(), 500)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
json.NewEncoder(w).Encode(map[string]time.Duration{
|
||||
"evictionIn": result.eviction,
|
||||
})
|
||||
|
||||
default:
|
||||
if debug {
|
||||
log.Println("Dropping request")
|
||||
}
|
||||
w.WriteHeader(429)
|
||||
}
|
||||
}
|
||||
|
||||
func requestProcessor() {
|
||||
for request := range requests {
|
||||
if debug {
|
||||
log.Println("Request for", request.relay)
|
||||
}
|
||||
if !client.TestRelay(request.uri, []tls.Certificate{testCert}, time.Second, 2*time.Second, 3) {
|
||||
if debug {
|
||||
log.Println("Test for relay", request.relay, "failed")
|
||||
}
|
||||
request.result <- result{fmt.Errorf("test failed"), 0}
|
||||
continue
|
||||
}
|
||||
|
||||
mut.Lock()
|
||||
timer, ok := evictionTimers[request.relay.uri.Host]
|
||||
if ok {
|
||||
if debug {
|
||||
log.Println("Stopping existing timer for", request.relay)
|
||||
}
|
||||
timer.Stop()
|
||||
}
|
||||
|
||||
for i, current := range knownRelays {
|
||||
if current.uri.Host == request.relay.uri.Host {
|
||||
if debug {
|
||||
log.Println("Relay", request.relay, "already exists")
|
||||
}
|
||||
|
||||
// Evict the old entry anyway, as configuration might have changed.
|
||||
last := len(knownRelays) - 1
|
||||
knownRelays[i] = knownRelays[last]
|
||||
knownRelays = knownRelays[:last]
|
||||
|
||||
goto found
|
||||
}
|
||||
}
|
||||
|
||||
if debug {
|
||||
log.Println("Adding new relay", request.relay)
|
||||
}
|
||||
|
||||
found:
|
||||
|
||||
knownRelays = append(knownRelays, request.relay)
|
||||
|
||||
evictionTimers[request.relay.uri.Host] = time.AfterFunc(evictionTime, evict(request.relay))
|
||||
mut.Unlock()
|
||||
request.result <- result{nil, evictionTime}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func evict(relay relay) func() {
|
||||
return func() {
|
||||
mut.Lock()
|
||||
defer mut.Unlock()
|
||||
if debug {
|
||||
log.Println("Evicting", relay)
|
||||
}
|
||||
for i, current := range knownRelays {
|
||||
if current.uri.Host == relay.uri.Host {
|
||||
if debug {
|
||||
log.Println("Evicted", relay)
|
||||
}
|
||||
last := len(knownRelays) - 1
|
||||
knownRelays[i] = knownRelays[last]
|
||||
knownRelays = knownRelays[:last]
|
||||
}
|
||||
}
|
||||
delete(evictionTimers, relay.uri.Host)
|
||||
}
|
||||
}
|
||||
|
||||
func limit(addr string, cache *lru.Cache, lock sync.RWMutex, intv time.Duration, burst int) bool {
|
||||
host, _, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
lock.RLock()
|
||||
bkt, ok := cache.Get(host)
|
||||
lock.RUnlock()
|
||||
if ok {
|
||||
bkt := bkt.(*rate.Limiter)
|
||||
if !bkt.Allow() {
|
||||
// Rate limit
|
||||
return true
|
||||
}
|
||||
} else {
|
||||
lock.Lock()
|
||||
cache.Add(host, rate.NewLimiter(rate.Every(intv), burst))
|
||||
lock.Unlock()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func loadPermanentRelays(file string) {
|
||||
content, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, line := range strings.Split(string(content), "\n") {
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
uri, err := url.Parse(line)
|
||||
if err != nil {
|
||||
if debug {
|
||||
log.Println("Skipping permanent relay", line, "due to parse error", err)
|
||||
}
|
||||
continue
|
||||
|
||||
}
|
||||
|
||||
permanentRelays = append(permanentRelays, relay{
|
||||
URL: line,
|
||||
Location: getLocation(uri.Host),
|
||||
uri: uri,
|
||||
})
|
||||
if debug {
|
||||
log.Println("Adding permanent relay", line)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func createTestCertificate() tls.Certificate {
|
||||
tmpDir, err := ioutil.TempDir("", "relaypoolsrv")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
certFile, keyFile := filepath.Join(tmpDir, "cert.pem"), filepath.Join(tmpDir, "key.pem")
|
||||
cert, err := tlsutil.NewCertificate(certFile, keyFile, "relaypoolsrv", 3072)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to create test X509 key pair:", err)
|
||||
}
|
||||
|
||||
return cert
|
||||
}
|
||||
|
||||
func getLocation(host string) location {
|
||||
db, err := geoip2.Open(geoipPath)
|
||||
if err != nil {
|
||||
return location{}
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
addr, err := net.ResolveTCPAddr("tcp", host)
|
||||
if err != nil {
|
||||
return location{}
|
||||
}
|
||||
|
||||
city, err := db.City(addr.IP)
|
||||
if err != nil {
|
||||
return location{}
|
||||
}
|
||||
|
||||
return location{
|
||||
Latitude: city.Location.Latitude,
|
||||
Longitude: city.Location.Longitude,
|
||||
}
|
||||
}
|
||||
22
cmd/strelaysrv/LICENSE
Normal file
@@ -0,0 +1,22 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 The Syncthing Project
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
131
cmd/strelaysrv/README.md
Normal file
@@ -0,0 +1,131 @@
|
||||
strelaysrv
|
||||
==========
|
||||
|
||||
This is the relay server for the `syncthing` project.
|
||||
|
||||
:exclamation:Warnings:exclamation: - Read or regret
|
||||
-----
|
||||
|
||||
By default, all relay servers will join to the default public relay pool, which means that the relay server will be available for public use, and **will consume your bandwidth** helping others to connect.
|
||||
|
||||
If you wish to disable this behaviour, please specify the `-pools=""` argument.
|
||||
|
||||
Please note that `strelaysrv` is only usable by `syncthing` **version v0.12 and onwards**.
|
||||
|
||||
To run `strelaysrv` you need to have port 22067 available to the internet, which means you might need to port forward it and/or allow it through your firewall.
|
||||
|
||||
Furthermore, by default `strelaysrv` will also expose a /status HTTP endpoint on port 22070, which is used by the pool servers to read metrics of the `strelaysrv`, such as the current transfer rates, how many clients are connected, etc. If you wish this information to be available you may need to port forward and allow it through your firewall. This is not mandatory for the `strelaysrv` to function, and is used only to gather metrics and present them in the overview page of the pool server.
|
||||
|
||||
At the point of writing the endpoint output looks as follows:
|
||||
|
||||
```
|
||||
{
|
||||
"bytesProxied": 0,
|
||||
"goArch": "amd64",
|
||||
"goMaxProcs": 1,
|
||||
"goNumRoutine": 13,
|
||||
"goOS": "linux",
|
||||
"goVersion": "go1.6",
|
||||
"kbps10s1m5m15m30m60m": [
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0
|
||||
],
|
||||
"numActiveSessions": 0,
|
||||
"numConnections": 0,
|
||||
"numPendingSessionKeys": 2,
|
||||
"numProxies": 0,
|
||||
"options": {
|
||||
"global-rate": 0,
|
||||
"message-timeout": 60,
|
||||
"network-timeout": 120,
|
||||
"per-session-rate": 0,
|
||||
"ping-interval": 60,
|
||||
"pools": [
|
||||
"https://relays.syncthing.net/endpoint"
|
||||
],
|
||||
"provided-by": ""
|
||||
},
|
||||
"startTime": "2016-03-06T12:53:07.090847749-05:00",
|
||||
"uptimeSeconds": 17
|
||||
}
|
||||
```
|
||||
|
||||
If you wish to disable the /status endpoint, provide `-status-srv=""` as one of the arguments when starting the strelaysrv.
|
||||
|
||||
Running for public use
|
||||
----
|
||||
Make sure you have a public IP with port 22067 open, or have forwarded port 22067 if you are behind a NAT.
|
||||
|
||||
Run the `strelaysrv` with no arguments (or `-debug` if you want more output), and that should be enough for the server to join the public relay pool.
|
||||
You should see a message saying:
|
||||
```
|
||||
2015/09/21 22:45:46 pool.go:60: Joined https://relays.syncthing.net/endpoint rejoining in 48m0s
|
||||
```
|
||||
|
||||
See `strelaysrv -help` for other options, such as rate limits, timeout intervals, etc.
|
||||
|
||||
Running for private use
|
||||
-----
|
||||
|
||||
Once you've started the `strelaysrv`, it will generate a key pair and print a URI:
|
||||
```bash
|
||||
relay://:22067/?id=EZQOIDM-6DDD4ZI-DJ65NSM-4OQWRAT-EIKSMJO-OZ552BO-WQZEGYY-STS5RQM&pingInterval=1m0s&networkTimeout=2m0s&sessionLimitBps=0&globalLimitBps=0&statusAddr=:22070
|
||||
```
|
||||
|
||||
This URI contains a partial address of the relay server, as well as its options which in the future may be taken into account when choosing the most suitable relay.
|
||||
|
||||
Because the `-listen` option was not used `strelaysrv` does not know its external IP, therefore you should replace the host part of the URI with your public IP address on which the `strelaysrv` will be available:
|
||||
|
||||
```bash
|
||||
relay://192.0.2.1:22067/?id=EZQOIDM-6DDD4ZI-DJ65NSM-4OQWRAT-EIKSMJO-OZ552BO-WQZEGYY-STS5RQM&pingInterval=1m0s&networkTimeout=2m0s&sessionLimitBps=0&globalLimitBps=0&statusAddr=:22070
|
||||
```
|
||||
|
||||
If you do not care about certificate pinning (improved security) or do not care about passing verbose settings to the clients, you can shorten the URL to just the host part:
|
||||
|
||||
```bash
|
||||
relay://192.0.2.1:22067
|
||||
```
|
||||
|
||||
This URI can then be used in `syncthing` clients as one of the relay servers by adding the URI to the "Sync Protocol Listen Address" field, under Actions and Settings.
|
||||
|
||||
See `strelaysrv -help` for other options, such as rate limits, timeout intervals, etc.
|
||||
|
||||
Other items available in this repo
|
||||
----
|
||||
##### testutil
|
||||
A test utility which can be used to test the connectivity of a relay server.
|
||||
You need to generate two x509 key pairs (key.pem and cert.pem), one for the client and one for the server, in separate directories.
|
||||
Afterwards, start the client:
|
||||
```bash
|
||||
./testutil -relay="relay://192.0.2.1:22067" -keys=certs/client/ -join
|
||||
```
|
||||
|
||||
This prints out the client ID:
|
||||
```
|
||||
2015/09/21 23:00:52 main.go:42: ID: BG2C5ZA-W7XPFDO-LH222Z6-65F3HJX-ADFTGRT-3SBFIGM-KV26O2Q-E5RMRQ2
|
||||
```
|
||||
|
||||
In the other terminal run the following:
|
||||
|
||||
```bash
|
||||
./testutil -relay="relay://192.0.2.1:22067" -keys=certs/server/ -connect=BG2C5ZA-W7XPFDO-LH222Z6-65F3HJX-ADFTGRT-3SBFIGM-KV26O2Q-E5RMRQ2
|
||||
```
|
||||
|
||||
Which should then give you an interactive prompt, where you can type things in one terminal, and they get relayed to the other terminal.
|
||||
|
||||
Relay related libraries used by this repo
|
||||
----
|
||||
##### Relay protocol definition.
|
||||
|
||||
[Available here](https://github.com/syncthing/syncthing/tree/master/lib/relay/protocol)
|
||||
|
||||
|
||||
##### Relay client
|
||||
|
||||
Only used by the testutil.
|
||||
|
||||
[Available here](https://github.com/syncthing/syncthing/tree/master/lib/relay/client)
|
||||
17
cmd/strelaysrv/etc/linux-systemd/strelaysrv.service
Normal file
@@ -0,0 +1,17 @@
|
||||
[Unit]
|
||||
Description=Syncthing relay server
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
User=strelaysrv
|
||||
Group=strelaysrv
|
||||
ExecStart=/usr/bin/strelaysrv
|
||||
WorkingDirectory=/var/lib/strelaysrv
|
||||
|
||||
PrivateTmp=true
|
||||
ProtectSystem=full
|
||||
ProtectHome=true
|
||||
NoNewPrivileges=true
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
355
cmd/strelaysrv/listener.go
Normal file
@@ -0,0 +1,355 @@
|
||||
// Copyright (C) 2015 Audrius Butkevicius and Contributors.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"encoding/hex"
|
||||
"log"
|
||||
"net"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
syncthingprotocol "github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/relay/protocol"
|
||||
)
|
||||
|
||||
var (
|
||||
outboxesMut = sync.RWMutex{}
|
||||
outboxes = make(map[syncthingprotocol.DeviceID]chan interface{})
|
||||
numConnections int64
|
||||
)
|
||||
|
||||
func listener(proto, addr string, config *tls.Config) {
|
||||
tcpListener, err := net.Listen("tcp", addr)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
listener := tlsutil.DowngradingListener{
|
||||
Listener: tcpListener,
|
||||
}
|
||||
|
||||
for {
|
||||
conn, isTLS, err := listener.AcceptNoWrapTLS()
|
||||
if err != nil {
|
||||
if debug {
|
||||
log.Println("Listener failed to accept connection from", conn.RemoteAddr(), ". Possibly a TCP Ping.")
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
setTCPOptions(conn)
|
||||
|
||||
if debug {
|
||||
log.Println("Listener accepted connection from", conn.RemoteAddr(), "tls", isTLS)
|
||||
}
|
||||
|
||||
if isTLS {
|
||||
go protocolConnectionHandler(conn, config)
|
||||
} else {
|
||||
go sessionConnectionHandler(conn)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func protocolConnectionHandler(tcpConn net.Conn, config *tls.Config) {
|
||||
conn := tls.Server(tcpConn, config)
|
||||
if err := conn.SetDeadline(time.Now().Add(messageTimeout)); err != nil {
|
||||
if debug {
|
||||
log.Println("Weird error setting deadline:", err, "on", conn.RemoteAddr())
|
||||
}
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
err := conn.Handshake()
|
||||
if err != nil {
|
||||
if debug {
|
||||
log.Println("Protocol connection TLS handshake:", conn.RemoteAddr(), err)
|
||||
}
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
|
||||
state := conn.ConnectionState()
|
||||
if (!state.NegotiatedProtocolIsMutual || state.NegotiatedProtocol != protocol.ProtocolName) && debug {
|
||||
log.Println("Protocol negotiation error")
|
||||
}
|
||||
|
||||
certs := state.PeerCertificates
|
||||
if len(certs) != 1 {
|
||||
if debug {
|
||||
log.Println("Certificate list error")
|
||||
}
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
conn.SetDeadline(time.Time{})
|
||||
|
||||
id := syncthingprotocol.NewDeviceID(certs[0].Raw)
|
||||
|
||||
messages := make(chan interface{})
|
||||
errors := make(chan error, 1)
|
||||
outbox := make(chan interface{})
|
||||
|
||||
// Read messages from the connection and send them on the messages
|
||||
// channel. When there is an error, send it on the error channel and
|
||||
// return. Applies also when the connection gets closed, so the pattern
|
||||
// below is to close the connection on error, then wait for the error
|
||||
// signal from messageReader to exit.
|
||||
go messageReader(conn, messages, errors)
|
||||
|
||||
pingTicker := time.NewTicker(pingInterval)
|
||||
timeoutTicker := time.NewTimer(networkTimeout)
|
||||
joined := false
|
||||
|
||||
for {
|
||||
select {
|
||||
case message := <-messages:
|
||||
timeoutTicker.Reset(networkTimeout)
|
||||
if debug {
|
||||
log.Printf("Message %T from %s", message, id)
|
||||
}
|
||||
|
||||
switch msg := message.(type) {
|
||||
case protocol.JoinRelayRequest:
|
||||
if atomic.LoadInt32(&overLimit) > 0 {
|
||||
protocol.WriteMessage(conn, protocol.RelayFull{})
|
||||
if debug {
|
||||
log.Println("Refusing join request from", id, "due to being over limits")
|
||||
}
|
||||
conn.Close()
|
||||
limitCheckTimer.Reset(time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
outboxesMut.RLock()
|
||||
_, ok := outboxes[id]
|
||||
outboxesMut.RUnlock()
|
||||
if ok {
|
||||
protocol.WriteMessage(conn, protocol.ResponseAlreadyConnected)
|
||||
if debug {
|
||||
log.Println("Already have a peer with the same ID", id, conn.RemoteAddr())
|
||||
}
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
outboxesMut.Lock()
|
||||
outboxes[id] = outbox
|
||||
outboxesMut.Unlock()
|
||||
joined = true
|
||||
|
||||
protocol.WriteMessage(conn, protocol.ResponseSuccess)
|
||||
|
||||
case protocol.ConnectRequest:
|
||||
requestedPeer := syncthingprotocol.DeviceIDFromBytes(msg.ID)
|
||||
outboxesMut.RLock()
|
||||
peerOutbox, ok := outboxes[requestedPeer]
|
||||
outboxesMut.RUnlock()
|
||||
if !ok {
|
||||
if debug {
|
||||
log.Println(id, "is looking for", requestedPeer, "which does not exist")
|
||||
}
|
||||
protocol.WriteMessage(conn, protocol.ResponseNotFound)
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
// requestedPeer is the server, id is the client
|
||||
ses := newSession(requestedPeer, id, sessionLimiter, globalLimiter)
|
||||
|
||||
go ses.Serve()
|
||||
|
||||
clientInvitation := ses.GetClientInvitationMessage()
|
||||
serverInvitation := ses.GetServerInvitationMessage()
|
||||
|
||||
if err := protocol.WriteMessage(conn, clientInvitation); err != nil {
|
||||
if debug {
|
||||
log.Printf("Error sending invitation from %s to client: %s", id, err)
|
||||
}
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
select {
|
||||
case peerOutbox <- serverInvitation:
|
||||
if debug {
|
||||
log.Println("Sent invitation from", id, "to", requestedPeer)
|
||||
}
|
||||
case <-time.After(time.Second):
|
||||
if debug {
|
||||
log.Println("Could not send invitation from", id, "to", requestedPeer, "as peer disconnected")
|
||||
}
|
||||
|
||||
}
|
||||
conn.Close()
|
||||
|
||||
case protocol.Ping:
|
||||
if err := protocol.WriteMessage(conn, protocol.Pong{}); err != nil {
|
||||
if debug {
|
||||
log.Println("Error writing pong:", err)
|
||||
}
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
case protocol.Pong:
|
||||
// Nothing
|
||||
|
||||
default:
|
||||
if debug {
|
||||
log.Printf("Unknown message %s: %T", id, message)
|
||||
}
|
||||
protocol.WriteMessage(conn, protocol.ResponseUnexpectedMessage)
|
||||
conn.Close()
|
||||
}
|
||||
|
||||
case err := <-errors:
|
||||
if debug {
|
||||
log.Printf("Closing connection %s: %s", id, err)
|
||||
}
|
||||
|
||||
// Potentially closing a second time.
|
||||
conn.Close()
|
||||
|
||||
if joined {
|
||||
// Only delete the outbox if the client is joined, as it might be
|
||||
// a lookup request coming from the same client.
|
||||
outboxesMut.Lock()
|
||||
delete(outboxes, id)
|
||||
outboxesMut.Unlock()
|
||||
// Also, kill all sessions related to this node, as it probably
|
||||
// went offline. This is for the other end to realize the client
|
||||
// is no longer there faster. This also helps resolve
|
||||
// 'already connected' errors when one of the sides is
|
||||
// restarting, and connecting to the other peer before the other
|
||||
// peer even realised that the node has gone away.
|
||||
dropSessions(id)
|
||||
}
|
||||
return
|
||||
|
||||
case <-pingTicker.C:
|
||||
if !joined {
|
||||
if debug {
|
||||
log.Println(id, "didn't join within", pingInterval)
|
||||
}
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
if err := protocol.WriteMessage(conn, protocol.Ping{}); err != nil {
|
||||
if debug {
|
||||
log.Println(id, err)
|
||||
}
|
||||
conn.Close()
|
||||
}
|
||||
|
||||
if atomic.LoadInt32(&overLimit) > 0 && !hasSessions(id) {
|
||||
if debug {
|
||||
log.Println("Dropping", id, "as it has no sessions and we are over our limits")
|
||||
}
|
||||
protocol.WriteMessage(conn, protocol.RelayFull{})
|
||||
conn.Close()
|
||||
|
||||
limitCheckTimer.Reset(time.Second)
|
||||
}
|
||||
|
||||
case <-timeoutTicker.C:
|
||||
// We should receive a error from the reader loop, which will cause
|
||||
// us to quit this loop.
|
||||
if debug {
|
||||
log.Printf("%s timed out", id)
|
||||
}
|
||||
conn.Close()
|
||||
|
||||
case msg := <-outbox:
|
||||
if debug {
|
||||
log.Printf("Sending message %T to %s", msg, id)
|
||||
}
|
||||
if err := protocol.WriteMessage(conn, msg); err != nil {
|
||||
if debug {
|
||||
log.Println(id, err)
|
||||
}
|
||||
conn.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func sessionConnectionHandler(conn net.Conn) {
|
||||
if err := conn.SetDeadline(time.Now().Add(messageTimeout)); err != nil {
|
||||
if debug {
|
||||
log.Println("Weird error setting deadline:", err, "on", conn.RemoteAddr())
|
||||
}
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
|
||||
message, err := protocol.ReadMessage(conn)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
switch msg := message.(type) {
|
||||
case protocol.JoinSessionRequest:
|
||||
ses := findSession(string(msg.Key))
|
||||
if debug {
|
||||
log.Println(conn.RemoteAddr(), "session lookup", ses, hex.EncodeToString(msg.Key)[:5])
|
||||
}
|
||||
|
||||
if ses == nil {
|
||||
protocol.WriteMessage(conn, protocol.ResponseNotFound)
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
|
||||
if !ses.AddConnection(conn) {
|
||||
if debug {
|
||||
log.Println("Failed to add", conn.RemoteAddr(), "to session", ses)
|
||||
}
|
||||
protocol.WriteMessage(conn, protocol.ResponseAlreadyConnected)
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
|
||||
if err := protocol.WriteMessage(conn, protocol.ResponseSuccess); err != nil {
|
||||
if debug {
|
||||
log.Println("Failed to send session join response to ", conn.RemoteAddr(), "for", ses)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err := conn.SetDeadline(time.Time{}); err != nil {
|
||||
if debug {
|
||||
log.Println("Weird error setting deadline:", err, "on", conn.RemoteAddr())
|
||||
}
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
|
||||
default:
|
||||
if debug {
|
||||
log.Println("Unexpected message from", conn.RemoteAddr(), message)
|
||||
}
|
||||
protocol.WriteMessage(conn, protocol.ResponseUnexpectedMessage)
|
||||
conn.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func messageReader(conn net.Conn, messages chan<- interface{}, errors chan<- error) {
|
||||
atomic.AddInt64(&numConnections, 1)
|
||||
defer atomic.AddInt64(&numConnections, -1)
|
||||
|
||||
for {
|
||||
msg, err := protocol.ReadMessage(conn)
|
||||
if err != nil {
|
||||
errors <- err
|
||||
return
|
||||
}
|
||||
messages <- msg
|
||||
}
|
||||
}
|
||||
301
cmd/strelaysrv/main.go
Normal file
@@ -0,0 +1,301 @@
|
||||
// Copyright (C) 2015 Audrius Butkevicius and Contributors.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/osutil"
|
||||
"github.com/syncthing/syncthing/lib/relay/protocol"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
"golang.org/x/time/rate"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/nat"
|
||||
_ "github.com/syncthing/syncthing/lib/pmp"
|
||||
_ "github.com/syncthing/syncthing/lib/upnp"
|
||||
|
||||
syncthingprotocol "github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
var (
|
||||
Version string
|
||||
BuildStamp string
|
||||
BuildUser string
|
||||
BuildHost string
|
||||
|
||||
BuildDate time.Time
|
||||
LongVersion string
|
||||
)
|
||||
|
||||
func init() {
|
||||
stamp, _ := strconv.Atoi(BuildStamp)
|
||||
BuildDate = time.Unix(int64(stamp), 0)
|
||||
|
||||
date := BuildDate.UTC().Format("2006-01-02 15:04:05 MST")
|
||||
LongVersion = fmt.Sprintf(`strelaysrv %s (%s %s-%s) %s@%s %s`, Version, runtime.Version(), runtime.GOOS, runtime.GOARCH, BuildUser, BuildHost, date)
|
||||
}
|
||||
|
||||
var (
|
||||
listen string
|
||||
debug bool
|
||||
|
||||
sessionAddress []byte
|
||||
sessionPort uint16
|
||||
|
||||
networkTimeout = 2 * time.Minute
|
||||
pingInterval = time.Minute
|
||||
messageTimeout = time.Minute
|
||||
|
||||
limitCheckTimer *time.Timer
|
||||
|
||||
sessionLimitBps int
|
||||
globalLimitBps int
|
||||
overLimit int32
|
||||
descriptorLimit int64
|
||||
sessionLimiter *rate.Limiter
|
||||
globalLimiter *rate.Limiter
|
||||
|
||||
statusAddr string
|
||||
poolAddrs string
|
||||
pools []string
|
||||
providedBy string
|
||||
defaultPoolAddrs = "https://relays.syncthing.net/endpoint"
|
||||
|
||||
natEnabled bool
|
||||
natLease int
|
||||
natRenewal int
|
||||
natTimeout int
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.SetFlags(log.Lshortfile | log.LstdFlags)
|
||||
|
||||
var dir, extAddress, proto string
|
||||
|
||||
flag.StringVar(&listen, "listen", ":22067", "Protocol listen address")
|
||||
flag.StringVar(&dir, "keys", ".", "Directory where cert.pem and key.pem is stored")
|
||||
flag.DurationVar(&networkTimeout, "network-timeout", networkTimeout, "Timeout for network operations between the client and the relay.\n\tIf no data is received between the client and the relay in this period of time, the connection is terminated.\n\tFurthermore, if no data is sent between either clients being relayed within this period of time, the session is also terminated.")
|
||||
flag.DurationVar(&pingInterval, "ping-interval", pingInterval, "How often pings are sent")
|
||||
flag.DurationVar(&messageTimeout, "message-timeout", messageTimeout, "Maximum amount of time we wait for relevant messages to arrive")
|
||||
flag.IntVar(&sessionLimitBps, "per-session-rate", sessionLimitBps, "Per session rate limit, in bytes/s")
|
||||
flag.IntVar(&globalLimitBps, "global-rate", globalLimitBps, "Global rate limit, in bytes/s")
|
||||
flag.BoolVar(&debug, "debug", debug, "Enable debug output")
|
||||
flag.StringVar(&statusAddr, "status-srv", ":22070", "Listen address for status service (blank to disable)")
|
||||
flag.StringVar(&poolAddrs, "pools", defaultPoolAddrs, "Comma separated list of relay pool addresses to join")
|
||||
flag.StringVar(&providedBy, "provided-by", "", "An optional description about who provides the relay")
|
||||
flag.StringVar(&extAddress, "ext-address", "", "An optional address to advertise as being available on.\n\tAllows listening on an unprivileged port with port forwarding from e.g. 443, and be connected to on port 443.")
|
||||
flag.StringVar(&proto, "protocol", "tcp", "Protocol used for listening. 'tcp' for IPv4 and IPv6, 'tcp4' for IPv4, 'tcp6' for IPv6")
|
||||
flag.BoolVar(&natEnabled, "nat", false, "Use UPnP/NAT-PMP to acquire external port mapping")
|
||||
flag.IntVar(&natLease, "nat-lease", 60, "NAT lease length in minutes")
|
||||
flag.IntVar(&natRenewal, "nat-renewal", 30, "NAT renewal frequency in minutes")
|
||||
flag.IntVar(&natTimeout, "nat-timeout", 10, "NAT discovery timeout in seconds")
|
||||
flag.Parse()
|
||||
|
||||
if extAddress == "" {
|
||||
extAddress = listen
|
||||
}
|
||||
|
||||
if len(providedBy) > 30 {
|
||||
log.Fatal("Provided-by cannot be longer than 30 characters")
|
||||
}
|
||||
|
||||
addr, err := net.ResolveTCPAddr(proto, extAddress)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
laddr, err := net.ResolveTCPAddr(proto, listen)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if laddr.IP != nil && !laddr.IP.IsUnspecified() {
|
||||
laddr.Port = 0
|
||||
transport, ok := http.DefaultTransport.(*http.Transport)
|
||||
if ok {
|
||||
transport.Dial = (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
LocalAddr: laddr,
|
||||
}).Dial
|
||||
}
|
||||
}
|
||||
|
||||
log.Println(LongVersion)
|
||||
|
||||
maxDescriptors, err := osutil.MaximizeOpenFileLimit()
|
||||
if maxDescriptors > 0 {
|
||||
// Assume that 20% of FD's are leaked/unaccounted for.
|
||||
descriptorLimit = int64(maxDescriptors*80) / 100
|
||||
log.Println("Connection limit", descriptorLimit)
|
||||
|
||||
go monitorLimits()
|
||||
} else if err != nil && runtime.GOOS != "windows" {
|
||||
log.Println("Assuming no connection limit, due to error retrieving rlimits:", err)
|
||||
}
|
||||
|
||||
sessionAddress = addr.IP[:]
|
||||
sessionPort = uint16(addr.Port)
|
||||
|
||||
certFile, keyFile := filepath.Join(dir, "cert.pem"), filepath.Join(dir, "key.pem")
|
||||
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||
if err != nil {
|
||||
log.Println("Failed to load keypair. Generating one, this might take a while...")
|
||||
cert, err = tlsutil.NewCertificate(certFile, keyFile, "strelaysrv", 3072)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to generate X509 key pair:", err)
|
||||
}
|
||||
}
|
||||
|
||||
tlsCfg := &tls.Config{
|
||||
Certificates: []tls.Certificate{cert},
|
||||
NextProtos: []string{protocol.ProtocolName},
|
||||
ClientAuth: tls.RequestClientCert,
|
||||
SessionTicketsDisabled: true,
|
||||
InsecureSkipVerify: true,
|
||||
MinVersion: tls.VersionTLS12,
|
||||
CipherSuites: []uint16{
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
||||
},
|
||||
}
|
||||
|
||||
id := syncthingprotocol.NewDeviceID(cert.Certificate[0])
|
||||
if debug {
|
||||
log.Println("ID:", id)
|
||||
}
|
||||
|
||||
wrapper := config.Wrap("config", config.New(id))
|
||||
wrapper.SetOptions(config.OptionsConfiguration{
|
||||
NATLeaseM: natLease,
|
||||
NATRenewalM: natRenewal,
|
||||
NATTimeoutS: natTimeout,
|
||||
})
|
||||
natSvc := nat.NewService(id, wrapper)
|
||||
mapping := mapping{natSvc.NewMapping(nat.TCP, addr.IP, addr.Port)}
|
||||
|
||||
if natEnabled {
|
||||
go natSvc.Serve()
|
||||
found := make(chan struct{})
|
||||
mapping.OnChanged(func(_ *nat.Mapping, _, _ []nat.Address) {
|
||||
select {
|
||||
case found <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
})
|
||||
|
||||
// Need to wait a few extra seconds, since NAT library waits exactly natTimeout seconds on all interfaces.
|
||||
timeout := time.Duration(natTimeout+2) * time.Second
|
||||
log.Printf("Waiting %s to acquire NAT mapping", timeout)
|
||||
|
||||
select {
|
||||
case <-found:
|
||||
log.Printf("Found NAT mapping: %s", mapping.ExternalAddresses())
|
||||
case <-time.After(timeout):
|
||||
log.Println("Timeout out waiting for NAT mapping.")
|
||||
}
|
||||
}
|
||||
|
||||
if sessionLimitBps > 0 {
|
||||
sessionLimiter = rate.NewLimiter(rate.Limit(sessionLimitBps), 2*sessionLimitBps)
|
||||
}
|
||||
if globalLimitBps > 0 {
|
||||
globalLimiter = rate.NewLimiter(rate.Limit(globalLimitBps), 2*globalLimitBps)
|
||||
}
|
||||
|
||||
if statusAddr != "" {
|
||||
go statusService(statusAddr)
|
||||
}
|
||||
|
||||
uri, err := url.Parse(fmt.Sprintf("relay://%s/?id=%s&pingInterval=%s&networkTimeout=%s&sessionLimitBps=%d&globalLimitBps=%d&statusAddr=%s&providedBy=%s", mapping.Address(), id, pingInterval, networkTimeout, sessionLimitBps, globalLimitBps, statusAddr, providedBy))
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to construct URI", err)
|
||||
}
|
||||
|
||||
log.Println("URI:", uri.String())
|
||||
|
||||
if poolAddrs == defaultPoolAddrs {
|
||||
log.Println("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
|
||||
log.Println("!! Joining default relay pools, this relay will be available for public use. !!")
|
||||
log.Println(`!! Use the -pools="" command line option to make the relay private. !!`)
|
||||
log.Println("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
|
||||
}
|
||||
|
||||
pools = strings.Split(poolAddrs, ",")
|
||||
for _, pool := range pools {
|
||||
pool = strings.TrimSpace(pool)
|
||||
if len(pool) > 0 {
|
||||
go poolHandler(pool, uri, mapping)
|
||||
}
|
||||
}
|
||||
|
||||
go listener(proto, listen, tlsCfg)
|
||||
|
||||
sigs := make(chan os.Signal, 1)
|
||||
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
|
||||
<-sigs
|
||||
|
||||
// Gracefully close all connections, hoping that clients will be faster
|
||||
// to realize that the relay is now gone.
|
||||
|
||||
sessionMut.RLock()
|
||||
for _, session := range activeSessions {
|
||||
session.CloseConns()
|
||||
}
|
||||
|
||||
for _, session := range pendingSessions {
|
||||
session.CloseConns()
|
||||
}
|
||||
sessionMut.RUnlock()
|
||||
|
||||
outboxesMut.RLock()
|
||||
for _, outbox := range outboxes {
|
||||
close(outbox)
|
||||
}
|
||||
outboxesMut.RUnlock()
|
||||
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
}
|
||||
|
||||
func monitorLimits() {
|
||||
limitCheckTimer = time.NewTimer(time.Minute)
|
||||
for range limitCheckTimer.C {
|
||||
if atomic.LoadInt64(&numConnections)+atomic.LoadInt64(&numProxies) > descriptorLimit {
|
||||
atomic.StoreInt32(&overLimit, 1)
|
||||
log.Println("Gone past our connection limits. Starting to refuse new/drop idle connections.")
|
||||
} else if atomic.CompareAndSwapInt32(&overLimit, 1, 0) {
|
||||
log.Println("Dropped below our connection limits. Accepting new connections.")
|
||||
}
|
||||
limitCheckTimer.Reset(time.Minute)
|
||||
}
|
||||
}
|
||||
|
||||
type mapping struct {
|
||||
*nat.Mapping
|
||||
}
|
||||
|
||||
func (m *mapping) Address() nat.Address {
|
||||
ext := m.ExternalAddresses()
|
||||
if len(ext) > 0 {
|
||||
return ext[0]
|
||||
}
|
||||
return m.Mapping.Address()
|
||||
}
|
||||
66
cmd/strelaysrv/pool.go
Normal file
@@ -0,0 +1,66 @@
|
||||
// Copyright (C) 2015 Audrius Butkevicius and Contributors.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
)
|
||||
|
||||
func poolHandler(pool string, uri *url.URL, mapping mapping) {
|
||||
if debug {
|
||||
log.Println("Joining", pool)
|
||||
}
|
||||
for {
|
||||
uriCopy := *uri
|
||||
uriCopy.Host = mapping.Address().String()
|
||||
|
||||
var b bytes.Buffer
|
||||
json.NewEncoder(&b).Encode(struct {
|
||||
URL string `json:"url"`
|
||||
}{
|
||||
uriCopy.String(),
|
||||
})
|
||||
|
||||
resp, err := http.Post(pool, "application/json", &b)
|
||||
if err != nil {
|
||||
log.Println("Error joining pool", pool, err)
|
||||
} else if resp.StatusCode == 500 {
|
||||
bs, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
log.Println("Failed to join", pool, "due to an internal server error. Could not read response:", err)
|
||||
} else {
|
||||
log.Println("Failed to join", pool, "due to an internal server error:", string(bs))
|
||||
}
|
||||
resp.Body.Close()
|
||||
} else if resp.StatusCode == 429 {
|
||||
log.Println(pool, "under load, will retry in a minute")
|
||||
time.Sleep(time.Minute)
|
||||
continue
|
||||
} else if resp.StatusCode == 401 {
|
||||
log.Println(pool, "failed to join due to IP address not matching external address. Aborting")
|
||||
return
|
||||
} else if resp.StatusCode == 200 {
|
||||
var x struct {
|
||||
EvictionIn time.Duration `json:"evictionIn"`
|
||||
}
|
||||
err := json.NewDecoder(resp.Body).Decode(&x)
|
||||
if err == nil {
|
||||
rejoin := x.EvictionIn - (x.EvictionIn / 5)
|
||||
log.Println("Joined", pool, "rejoining in", rejoin)
|
||||
time.Sleep(rejoin)
|
||||
continue
|
||||
} else {
|
||||
log.Println("Failed to deserialize response", err)
|
||||
}
|
||||
} else {
|
||||
log.Println(pool, "unknown response type from server", resp.StatusCode)
|
||||
}
|
||||
time.Sleep(time.Hour)
|
||||
}
|
||||
}
|
||||
353
cmd/strelaysrv/session.go
Normal file
@@ -0,0 +1,353 @@
|
||||
// Copyright (C) 2015 Audrius Butkevicius and Contributors.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"log"
|
||||
"math"
|
||||
"net"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"golang.org/x/time/rate"
|
||||
|
||||
syncthingprotocol "github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/relay/protocol"
|
||||
)
|
||||
|
||||
var (
|
||||
sessionMut = sync.RWMutex{}
|
||||
activeSessions = make([]*session, 0)
|
||||
pendingSessions = make(map[string]*session, 0)
|
||||
numProxies int64
|
||||
bytesProxied int64
|
||||
)
|
||||
|
||||
func newSession(serverid, clientid syncthingprotocol.DeviceID, sessionRateLimit, globalRateLimit *rate.Limiter) *session {
|
||||
serverkey := make([]byte, 32)
|
||||
_, err := rand.Read(serverkey)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
clientkey := make([]byte, 32)
|
||||
_, err = rand.Read(clientkey)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
ses := &session{
|
||||
serverkey: serverkey,
|
||||
serverid: serverid,
|
||||
clientkey: clientkey,
|
||||
clientid: clientid,
|
||||
rateLimit: makeRateLimitFunc(sessionRateLimit, globalRateLimit),
|
||||
connsChan: make(chan net.Conn),
|
||||
conns: make([]net.Conn, 0, 2),
|
||||
}
|
||||
|
||||
if debug {
|
||||
log.Println("New session", ses)
|
||||
}
|
||||
|
||||
sessionMut.Lock()
|
||||
pendingSessions[string(ses.serverkey)] = ses
|
||||
pendingSessions[string(ses.clientkey)] = ses
|
||||
sessionMut.Unlock()
|
||||
|
||||
return ses
|
||||
}
|
||||
|
||||
func findSession(key string) *session {
|
||||
sessionMut.Lock()
|
||||
defer sessionMut.Unlock()
|
||||
ses, ok := pendingSessions[key]
|
||||
if !ok {
|
||||
return nil
|
||||
|
||||
}
|
||||
delete(pendingSessions, key)
|
||||
return ses
|
||||
}
|
||||
|
||||
func dropSessions(id syncthingprotocol.DeviceID) {
|
||||
sessionMut.RLock()
|
||||
for _, session := range activeSessions {
|
||||
if session.HasParticipant(id) {
|
||||
if debug {
|
||||
log.Println("Dropping session", session, "involving", id)
|
||||
}
|
||||
session.CloseConns()
|
||||
}
|
||||
}
|
||||
sessionMut.RUnlock()
|
||||
}
|
||||
|
||||
func hasSessions(id syncthingprotocol.DeviceID) bool {
|
||||
sessionMut.RLock()
|
||||
has := false
|
||||
for _, session := range activeSessions {
|
||||
if session.HasParticipant(id) {
|
||||
has = true
|
||||
break
|
||||
}
|
||||
}
|
||||
sessionMut.RUnlock()
|
||||
return has
|
||||
}
|
||||
|
||||
type session struct {
|
||||
mut sync.Mutex
|
||||
|
||||
serverkey []byte
|
||||
serverid syncthingprotocol.DeviceID
|
||||
|
||||
clientkey []byte
|
||||
clientid syncthingprotocol.DeviceID
|
||||
|
||||
rateLimit func(bytes int)
|
||||
|
||||
connsChan chan net.Conn
|
||||
conns []net.Conn
|
||||
}
|
||||
|
||||
func (s *session) AddConnection(conn net.Conn) bool {
|
||||
if debug {
|
||||
log.Println("New connection for", s, "from", conn.RemoteAddr())
|
||||
}
|
||||
|
||||
select {
|
||||
case s.connsChan <- conn:
|
||||
return true
|
||||
default:
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *session) Serve() {
|
||||
timedout := time.After(messageTimeout)
|
||||
|
||||
if debug {
|
||||
log.Println("Session", s, "serving")
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case conn := <-s.connsChan:
|
||||
s.mut.Lock()
|
||||
s.conns = append(s.conns, conn)
|
||||
s.mut.Unlock()
|
||||
// We're the only ones mutating s.conns, hence we are free to read it.
|
||||
if len(s.conns) < 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
close(s.connsChan)
|
||||
|
||||
if debug {
|
||||
log.Println("Session", s, "starting between", s.conns[0].RemoteAddr(), "and", s.conns[1].RemoteAddr())
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(2)
|
||||
|
||||
var err0 error
|
||||
go func() {
|
||||
err0 = s.proxy(s.conns[0], s.conns[1])
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
var err1 error
|
||||
go func() {
|
||||
err1 = s.proxy(s.conns[1], s.conns[0])
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
sessionMut.Lock()
|
||||
activeSessions = append(activeSessions, s)
|
||||
sessionMut.Unlock()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
if debug {
|
||||
log.Println("Session", s, "ended, outcomes:", err0, "and", err1)
|
||||
}
|
||||
goto done
|
||||
|
||||
case <-timedout:
|
||||
if debug {
|
||||
log.Println("Session", s, "timed out")
|
||||
}
|
||||
goto done
|
||||
}
|
||||
}
|
||||
done:
|
||||
// We can end up here in 3 cases:
|
||||
// 1. Timeout joining, in which case there are potentially entries in pendingSessions
|
||||
// 2. General session end/timeout, in which case there are entries in activeSessions
|
||||
// 3. Protocol handler calls dropSession as one of it's clients disconnects.
|
||||
|
||||
sessionMut.Lock()
|
||||
delete(pendingSessions, string(s.serverkey))
|
||||
delete(pendingSessions, string(s.clientkey))
|
||||
|
||||
for i, session := range activeSessions {
|
||||
if session == s {
|
||||
l := len(activeSessions) - 1
|
||||
activeSessions[i] = activeSessions[l]
|
||||
activeSessions[l] = nil
|
||||
activeSessions = activeSessions[:l]
|
||||
}
|
||||
}
|
||||
sessionMut.Unlock()
|
||||
|
||||
// If we are here because of case 2 or 3, we are potentially closing some or
|
||||
// all connections a second time.
|
||||
s.CloseConns()
|
||||
|
||||
if debug {
|
||||
log.Println("Session", s, "stopping")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *session) GetClientInvitationMessage() protocol.SessionInvitation {
|
||||
return protocol.SessionInvitation{
|
||||
From: s.serverid[:],
|
||||
Key: s.clientkey,
|
||||
Address: sessionAddress,
|
||||
Port: sessionPort,
|
||||
ServerSocket: false,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *session) GetServerInvitationMessage() protocol.SessionInvitation {
|
||||
return protocol.SessionInvitation{
|
||||
From: s.clientid[:],
|
||||
Key: s.serverkey,
|
||||
Address: sessionAddress,
|
||||
Port: sessionPort,
|
||||
ServerSocket: true,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *session) HasParticipant(id syncthingprotocol.DeviceID) bool {
|
||||
return s.clientid == id || s.serverid == id
|
||||
}
|
||||
|
||||
func (s *session) CloseConns() {
|
||||
s.mut.Lock()
|
||||
for _, conn := range s.conns {
|
||||
conn.Close()
|
||||
}
|
||||
s.mut.Unlock()
|
||||
}
|
||||
|
||||
func (s *session) proxy(c1, c2 net.Conn) error {
|
||||
if debug {
|
||||
log.Println("Proxy", c1.RemoteAddr(), "->", c2.RemoteAddr())
|
||||
}
|
||||
|
||||
atomic.AddInt64(&numProxies, 1)
|
||||
defer atomic.AddInt64(&numProxies, -1)
|
||||
|
||||
buf := make([]byte, 65536)
|
||||
for {
|
||||
c1.SetReadDeadline(time.Now().Add(networkTimeout))
|
||||
n, err := c1.Read(buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
atomic.AddInt64(&bytesProxied, int64(n))
|
||||
|
||||
if debug {
|
||||
log.Printf("%d bytes from %s to %s", n, c1.RemoteAddr(), c2.RemoteAddr())
|
||||
}
|
||||
|
||||
if s.rateLimit != nil {
|
||||
s.rateLimit(n)
|
||||
}
|
||||
|
||||
c2.SetWriteDeadline(time.Now().Add(networkTimeout))
|
||||
_, err = c2.Write(buf[:n])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *session) String() string {
|
||||
return fmt.Sprintf("<%s/%s>", hex.EncodeToString(s.clientkey)[:5], hex.EncodeToString(s.serverkey)[:5])
|
||||
}
|
||||
|
||||
func makeRateLimitFunc(sessionRateLimit, globalRateLimit *rate.Limiter) func(int) {
|
||||
// This may be a case of super duper premature optimization... We build an
|
||||
// optimized function to do the rate limiting here based on what we need
|
||||
// to do and then use it in the loop.
|
||||
|
||||
if sessionRateLimit == nil && globalRateLimit == nil {
|
||||
// No limiting needed. We could equally well return a func(int64){} and
|
||||
// not do a nil check were we use it, but I think the nil check there
|
||||
// makes it clear that there will be no limiting if none is
|
||||
// configured...
|
||||
return nil
|
||||
}
|
||||
|
||||
if sessionRateLimit == nil {
|
||||
// We only have a global limiter
|
||||
return func(bytes int) {
|
||||
take(bytes, globalRateLimit)
|
||||
}
|
||||
}
|
||||
|
||||
if globalRateLimit == nil {
|
||||
// We only have a session limiter
|
||||
return func(bytes int) {
|
||||
take(bytes, sessionRateLimit)
|
||||
}
|
||||
}
|
||||
|
||||
// We have both. Queue the bytes on both the global and session specific
|
||||
// rate limiters.
|
||||
return func(bytes int) {
|
||||
take(bytes, sessionRateLimit, globalRateLimit)
|
||||
}
|
||||
}
|
||||
|
||||
// take is a utility function to consume tokens from a set of rate.Limiters.
|
||||
// Tokens are consumed in parallel on all limiters, respecting their
|
||||
// individual burst sizes.
|
||||
func take(tokens int, ls ...*rate.Limiter) {
|
||||
// minBurst is the smallest burst size supported by all limiters.
|
||||
minBurst := int(math.MaxInt32)
|
||||
for _, l := range ls {
|
||||
if burst := l.Burst(); burst < minBurst {
|
||||
minBurst = burst
|
||||
}
|
||||
}
|
||||
|
||||
for tokens > 0 {
|
||||
// chunk is how many tokens we can consume at a time
|
||||
chunk := tokens
|
||||
if chunk > minBurst {
|
||||
chunk = minBurst
|
||||
}
|
||||
|
||||
// maxDelay is the longest delay mandated by any of the limiters for
|
||||
// the chosen chunk size.
|
||||
var maxDelay time.Duration
|
||||
for _, l := range ls {
|
||||
res := l.ReserveN(time.Now(), chunk)
|
||||
if del := res.Delay(); del > maxDelay {
|
||||
maxDelay = del
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(maxDelay)
|
||||
tokens -= chunk
|
||||
}
|
||||
}
|
||||
119
cmd/strelaysrv/status.go
Normal file
@@ -0,0 +1,119 @@
|
||||
// Copyright (C) 2015 Audrius Butkevicius and Contributors.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"log"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
var rc *rateCalculator
|
||||
|
||||
func statusService(addr string) {
|
||||
rc = newRateCalculator(360, 10*time.Second, &bytesProxied)
|
||||
|
||||
handler := http.NewServeMux()
|
||||
handler.HandleFunc("/status", getStatus)
|
||||
|
||||
srv := http.Server{
|
||||
Addr: addr,
|
||||
Handler: handler,
|
||||
ReadTimeout: 15 * time.Second,
|
||||
}
|
||||
srv.SetKeepAlivesEnabled(false)
|
||||
if err := srv.ListenAndServe(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func getStatus(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
status := make(map[string]interface{})
|
||||
|
||||
sessionMut.Lock()
|
||||
// This can potentially be double the number of pending sessions, as each session has two keys, one for each side.
|
||||
status["startTime"] = rc.startTime
|
||||
status["uptimeSeconds"] = time.Since(rc.startTime) / time.Second
|
||||
status["numPendingSessionKeys"] = len(pendingSessions)
|
||||
status["numActiveSessions"] = len(activeSessions)
|
||||
sessionMut.Unlock()
|
||||
status["numConnections"] = atomic.LoadInt64(&numConnections)
|
||||
status["numProxies"] = atomic.LoadInt64(&numProxies)
|
||||
status["bytesProxied"] = atomic.LoadInt64(&bytesProxied)
|
||||
status["goVersion"] = runtime.Version()
|
||||
status["goOS"] = runtime.GOOS
|
||||
status["goArch"] = runtime.GOARCH
|
||||
status["goMaxProcs"] = runtime.GOMAXPROCS(-1)
|
||||
status["goNumRoutine"] = runtime.NumGoroutine()
|
||||
status["kbps10s1m5m15m30m60m"] = []int64{
|
||||
rc.rate(1) * 8 / 1000, // each interval is 10s
|
||||
rc.rate(60/10) * 8 / 1000,
|
||||
rc.rate(5*60/10) * 8 / 1000,
|
||||
rc.rate(15*60/10) * 8 / 1000,
|
||||
rc.rate(30*60/10) * 8 / 1000,
|
||||
rc.rate(60*60/10) * 8 / 1000,
|
||||
}
|
||||
status["options"] = map[string]interface{}{
|
||||
"network-timeout": networkTimeout / time.Second,
|
||||
"ping-interval": pingInterval / time.Second,
|
||||
"message-timeout": messageTimeout / time.Second,
|
||||
"per-session-rate": sessionLimitBps,
|
||||
"global-rate": globalLimitBps,
|
||||
"pools": pools,
|
||||
"provided-by": providedBy,
|
||||
}
|
||||
|
||||
bs, err := json.MarshalIndent(status, "", " ")
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write(bs)
|
||||
}
|
||||
|
||||
type rateCalculator struct {
|
||||
rates []int64
|
||||
prev int64
|
||||
counter *int64
|
||||
startTime time.Time
|
||||
}
|
||||
|
||||
func newRateCalculator(keepIntervals int, interval time.Duration, counter *int64) *rateCalculator {
|
||||
r := &rateCalculator{
|
||||
rates: make([]int64, keepIntervals),
|
||||
counter: counter,
|
||||
startTime: time.Now(),
|
||||
}
|
||||
|
||||
go r.updateRates(interval)
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *rateCalculator) updateRates(interval time.Duration) {
|
||||
for {
|
||||
now := time.Now()
|
||||
next := now.Truncate(interval).Add(interval)
|
||||
time.Sleep(next.Sub(now))
|
||||
|
||||
cur := atomic.LoadInt64(r.counter)
|
||||
rate := int64(float64(cur-r.prev) / interval.Seconds())
|
||||
copy(r.rates[1:], r.rates)
|
||||
r.rates[0] = rate
|
||||
r.prev = cur
|
||||
}
|
||||
}
|
||||
|
||||
func (r *rateCalculator) rate(periods int) int64 {
|
||||
var tot int64
|
||||
for i := 0; i < periods; i++ {
|
||||
tot += r.rates[i]
|
||||
}
|
||||
return tot / int64(periods)
|
||||
}
|
||||
152
cmd/strelaysrv/testutil/main.go
Normal file
@@ -0,0 +1,152 @@
|
||||
// Copyright (C) 2015 Audrius Butkevicius and Contributors (see the CONTRIBUTORS file).
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/tls"
|
||||
"flag"
|
||||
"log"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
syncthingprotocol "github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/relay/client"
|
||||
"github.com/syncthing/syncthing/lib/relay/protocol"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.SetOutput(os.Stdout)
|
||||
log.SetFlags(log.LstdFlags | log.Lshortfile)
|
||||
|
||||
var connect, relay, dir string
|
||||
var join, test bool
|
||||
|
||||
flag.StringVar(&connect, "connect", "", "Device ID to which to connect to")
|
||||
flag.BoolVar(&join, "join", false, "Join relay")
|
||||
flag.BoolVar(&test, "test", false, "Generic relay test")
|
||||
flag.StringVar(&relay, "relay", "relay://127.0.0.1:22067", "Relay address")
|
||||
flag.StringVar(&dir, "keys", ".", "Directory where cert.pem and key.pem is stored")
|
||||
|
||||
flag.Parse()
|
||||
|
||||
certFile, keyFile := filepath.Join(dir, "cert.pem"), filepath.Join(dir, "key.pem")
|
||||
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to load X509 key pair:", err)
|
||||
}
|
||||
|
||||
id := syncthingprotocol.NewDeviceID(cert.Certificate[0])
|
||||
log.Println("ID:", id)
|
||||
|
||||
uri, err := url.Parse(relay)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
stdin := make(chan string)
|
||||
|
||||
go stdinReader(stdin)
|
||||
|
||||
if join {
|
||||
log.Println("Creating client")
|
||||
relay, err := client.NewClient(uri, []tls.Certificate{cert}, nil, 10*time.Second)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
log.Println("Created client")
|
||||
|
||||
go relay.Serve()
|
||||
|
||||
recv := make(chan protocol.SessionInvitation)
|
||||
|
||||
go func() {
|
||||
log.Println("Starting invitation receiver")
|
||||
for invite := range relay.Invitations() {
|
||||
select {
|
||||
case recv <- invite:
|
||||
log.Println("Received invitation", invite)
|
||||
default:
|
||||
log.Println("Discarding invitation", invite)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
conn, err := client.JoinSession(<-recv)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to join", err)
|
||||
}
|
||||
log.Println("Joined", conn.RemoteAddr(), conn.LocalAddr())
|
||||
connectToStdio(stdin, conn)
|
||||
log.Println("Finished", conn.RemoteAddr(), conn.LocalAddr())
|
||||
}
|
||||
} else if connect != "" {
|
||||
id, err := syncthingprotocol.DeviceIDFromString(connect)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
invite, err := client.GetInvitationFromRelay(uri, id, []tls.Certificate{cert}, 10*time.Second)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
log.Println("Received invitation", invite)
|
||||
conn, err := client.JoinSession(invite)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to join", err)
|
||||
}
|
||||
log.Println("Joined", conn.RemoteAddr(), conn.LocalAddr())
|
||||
connectToStdio(stdin, conn)
|
||||
log.Println("Finished", conn.RemoteAddr(), conn.LocalAddr())
|
||||
} else if test {
|
||||
if client.TestRelay(uri, []tls.Certificate{cert}, time.Second, 2*time.Second, 4) {
|
||||
log.Println("OK")
|
||||
} else {
|
||||
log.Println("FAIL")
|
||||
}
|
||||
} else {
|
||||
log.Fatal("Requires either join or connect")
|
||||
}
|
||||
}
|
||||
|
||||
func stdinReader(c chan<- string) {
|
||||
scanner := bufio.NewScanner(os.Stdin)
|
||||
for scanner.Scan() {
|
||||
c <- scanner.Text()
|
||||
c <- "\n"
|
||||
}
|
||||
}
|
||||
|
||||
func connectToStdio(stdin <-chan string, conn net.Conn) {
|
||||
go func() {
|
||||
|
||||
}()
|
||||
|
||||
buf := make([]byte, 1024)
|
||||
for {
|
||||
conn.SetReadDeadline(time.Now().Add(time.Millisecond))
|
||||
n, err := conn.Read(buf[0:])
|
||||
if err != nil {
|
||||
nerr, ok := err.(net.Error)
|
||||
if !ok || !nerr.Timeout() {
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
os.Stdout.Write(buf[:n])
|
||||
|
||||
select {
|
||||
case msg := <-stdin:
|
||||
_, err := conn.Write([]byte(msg))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
28
cmd/strelaysrv/utils.go
Normal file
@@ -0,0 +1,28 @@
|
||||
// Copyright (C) 2015 Audrius Butkevicius and Contributors.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
)
|
||||
|
||||
func setTCPOptions(conn net.Conn) error {
|
||||
tcpConn, ok := conn.(*net.TCPConn)
|
||||
if !ok {
|
||||
return errors.New("Not a TCP connection")
|
||||
}
|
||||
if err := tcpConn.SetLinger(0); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := tcpConn.SetNoDelay(true); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := tcpConn.SetKeepAlivePeriod(networkTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := tcpConn.SetKeepAlive(true); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -2,7 +2,7 @@
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
@@ -130,7 +130,7 @@ func printProgress(prefix string, count *int64) {
|
||||
expectedIterations := float64(int(1) << uint(wantBits))
|
||||
fmt.Printf("Want %d bits for prefix %q, about %.2g certs to test (statistically speaking)\n", wantBits, prefix, expectedIterations)
|
||||
|
||||
for _ = range time.NewTicker(15 * time.Second).C {
|
||||
for range time.NewTicker(15 * time.Second).C {
|
||||
tried := atomic.LoadInt64(count)
|
||||
elapsed := time.Since(started)
|
||||
rate := float64(tried) / elapsed.Seconds()
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
@@ -20,13 +20,13 @@ func TestAuditService(t *testing.T) {
|
||||
service := newAuditService(buf)
|
||||
|
||||
// Event sent before start, will not be logged
|
||||
events.Default.Log(events.Ping, "the first event")
|
||||
events.Default.Log(events.ConfigSaved, "the first event")
|
||||
|
||||
go service.Serve()
|
||||
service.WaitForStart()
|
||||
|
||||
// Event that should end up in the audit log
|
||||
events.Default.Log(events.Ping, "the second event")
|
||||
events.Default.Log(events.ConfigSaved, "the second event")
|
||||
|
||||
// We need to give the events time to arrive, since the channels are buffered etc.
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
@@ -35,7 +35,7 @@ func TestAuditService(t *testing.T) {
|
||||
service.WaitForStop()
|
||||
|
||||
// This event should not be logged, since we have stopped.
|
||||
events.Default.Log(events.Ping, "the third event")
|
||||
events.Default.Log(events.ConfigSaved, "the third event")
|
||||
|
||||
result := string(buf.Bytes())
|
||||
t.Log(result)
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
|
||||
13
cmd/syncthing/buildtag_noupgrade.go
Normal file
@@ -0,0 +1,13 @@
|
||||
// Copyright (C) 2017 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
//+build noupgrade
|
||||
|
||||
package main
|
||||
|
||||
func init() {
|
||||
BuildTags = append(BuildTags, "noupgrade")
|
||||
}
|
||||
13
cmd/syncthing/buildtag_race.go
Normal file
@@ -0,0 +1,13 @@
|
||||
// Copyright (C) 2017 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
//+build race
|
||||
|
||||
package main
|
||||
|
||||
func init() {
|
||||
BuildTags = append(BuildTags, "race")
|
||||
}
|
||||
59
cmd/syncthing/cpuusage.go
Normal file
@@ -0,0 +1,59 @@
|
||||
// Copyright (C) 2017 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
metrics "github.com/rcrowley/go-metrics"
|
||||
)
|
||||
|
||||
const cpuTickRate = 5 * time.Second
|
||||
|
||||
type cpuService struct {
|
||||
avg metrics.EWMA
|
||||
stop chan struct{}
|
||||
}
|
||||
|
||||
func newCPUService() *cpuService {
|
||||
return &cpuService{
|
||||
// 10 second average. Magic alpha value comes from looking at EWMA package
|
||||
// definitions of EWMA1, EWMA5. The tick rate *must* be five seconds (hard
|
||||
// coded in the EWMA package).
|
||||
avg: metrics.NewEWMA(1 - math.Exp(-float64(cpuTickRate)/float64(time.Second)/10.0)),
|
||||
stop: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *cpuService) Serve() {
|
||||
// Initialize prevUsage to an actual value returned by cpuUsage
|
||||
// instead of zero, because at least Windows returns a huge negative
|
||||
// number here that then slowly increments...
|
||||
prevUsage := cpuUsage()
|
||||
ticker := time.NewTicker(cpuTickRate)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
curUsage := cpuUsage()
|
||||
s.avg.Update(int64((curUsage - prevUsage) / time.Millisecond))
|
||||
prevUsage = curUsage
|
||||
s.avg.Tick()
|
||||
case <-s.stop:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *cpuService) Stop() {
|
||||
close(s.stop)
|
||||
}
|
||||
|
||||
func (s *cpuService) Rate() float64 {
|
||||
return s.avg.Rate()
|
||||
}
|
||||
@@ -2,7 +2,7 @@
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
//+build solaris
|
||||
|
||||
@@ -59,40 +59,20 @@ type prusage_t struct {
|
||||
|
||||
}
|
||||
|
||||
func solarisPrusage(pid int, rusage *prusage_t) error {
|
||||
fd, err := os.Open(fmt.Sprintf("/proc/%d/usage", pid))
|
||||
var procFile = fmt.Sprintf("/proc/%d/usage", os.Getpid())
|
||||
|
||||
func cpuUsage() time.Duration {
|
||||
fd, err := os.Open(procFile)
|
||||
if err != nil {
|
||||
return err
|
||||
return 0
|
||||
}
|
||||
|
||||
var rusage prusage_t
|
||||
err = binary.Read(fd, binary.LittleEndian, rusage)
|
||||
fd.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
func init() {
|
||||
go trackCPUUsage()
|
||||
}
|
||||
|
||||
func trackCPUUsage() {
|
||||
var prevUsage int64
|
||||
var prevTime = time.Now().UnixNano()
|
||||
var rusage prusage_t
|
||||
var pid = os.Getpid()
|
||||
for _ = range time.NewTicker(time.Second).C {
|
||||
err := solarisPrusage(pid, &rusage)
|
||||
if err != nil {
|
||||
l.Warnln("getting prusage:", err)
|
||||
continue
|
||||
}
|
||||
curTime := time.Now().UnixNano()
|
||||
timeDiff := curTime - prevTime
|
||||
curUsage := rusage.Pr_utime.Nano() + rusage.Pr_stime.Nano()
|
||||
usageDiff := curUsage - prevUsage
|
||||
cpuUsageLock.Lock()
|
||||
copy(cpuUsagePercent[1:], cpuUsagePercent[0:])
|
||||
cpuUsagePercent[0] = 100 * float64(usageDiff) / float64(timeDiff)
|
||||
cpuUsageLock.Unlock()
|
||||
prevTime = curTime
|
||||
prevUsage = curUsage
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return time.Duration(rusage.Pr_utime.Nano() + rusage.Pr_stime.Nano())
|
||||
}
|
||||
18
cmd/syncthing/cpuusage_unix.go
Normal file
@@ -0,0 +1,18 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
//+build !windows,!solaris
|
||||
|
||||
package main
|
||||
|
||||
import "syscall"
|
||||
import "time"
|
||||
|
||||
func cpuUsage() time.Duration {
|
||||
var rusage syscall.Rusage
|
||||
syscall.Getrusage(syscall.RUSAGE_SELF, &rusage)
|
||||
return time.Duration(rusage.Utime.Nano() + rusage.Stime.Nano())
|
||||
}
|
||||
27
cmd/syncthing/cpuusage_windows.go
Normal file
@@ -0,0 +1,27 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
//+build windows
|
||||
|
||||
package main
|
||||
|
||||
import "syscall"
|
||||
import "time"
|
||||
|
||||
func cpuUsage() time.Duration {
|
||||
handle, err := syscall.GetCurrentProcess()
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
defer syscall.CloseHandle(handle)
|
||||
|
||||
var ctime, etime, ktime, utime syscall.Filetime
|
||||
if err := syscall.GetProcessTimes(handle, &ctime, &etime, &ktime, &utime); err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return time.Duration(ktime.Nanoseconds() + utime.Nanoseconds())
|
||||
}
|
||||
@@ -2,7 +2,7 @@
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
@@ -37,6 +37,16 @@ func csrfMiddleware(unique string, prefix string, cfg config.GUIConfiguration, n
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Allow requests carrying a valid API key
|
||||
if cfg.IsValidAPIKey(r.Header.Get("X-API-Key")) {
|
||||
// Set the access-control-allow-origin header for CORS requests
|
||||
// since a valid API key has been provided
|
||||
w.Header().Add("Access-Control-Allow-Origin", "*")
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
if strings.HasPrefix(r.URL.Path, "/rest/debug") {
|
||||
// Debugging functions are only available when explicitly
|
||||
// enabled, and can be accessed without a CSRF token
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
@@ -106,7 +116,7 @@ func saveCsrfTokens() {
|
||||
// nothing relevant we can do about them anyway...
|
||||
|
||||
name := locations[locCsrfTokens]
|
||||
f, err := osutil.CreateAtomic(name, 0600)
|
||||
f, err := osutil.CreateAtomic(name)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
176
cmd/syncthing/gui_statics.go
Normal file
@@ -0,0 +1,176 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/auto"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
)
|
||||
|
||||
type staticsServer struct {
|
||||
assetDir string
|
||||
assets map[string][]byte
|
||||
availableThemes []string
|
||||
|
||||
mut sync.RWMutex
|
||||
theme string
|
||||
}
|
||||
|
||||
func newStaticsServer(theme, assetDir string) *staticsServer {
|
||||
s := &staticsServer{
|
||||
assetDir: assetDir,
|
||||
assets: auto.Assets(),
|
||||
mut: sync.NewRWMutex(),
|
||||
theme: theme,
|
||||
}
|
||||
|
||||
seen := make(map[string]struct{})
|
||||
// Load themes from compiled in assets.
|
||||
for file := range auto.Assets() {
|
||||
theme := strings.Split(file, "/")[0]
|
||||
if _, ok := seen[theme]; !ok {
|
||||
seen[theme] = struct{}{}
|
||||
s.availableThemes = append(s.availableThemes, theme)
|
||||
}
|
||||
}
|
||||
if assetDir != "" {
|
||||
// Load any extra themes from the asset override dir.
|
||||
for _, dir := range dirNames(assetDir) {
|
||||
if _, ok := seen[dir]; !ok {
|
||||
seen[dir] = struct{}{}
|
||||
s.availableThemes = append(s.availableThemes, dir)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *staticsServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.URL.Path {
|
||||
case "/themes.json":
|
||||
s.serveThemes(w, r)
|
||||
default:
|
||||
s.serveAsset(w, r)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *staticsServer) serveAsset(w http.ResponseWriter, r *http.Request) {
|
||||
file := r.URL.Path
|
||||
|
||||
if file[0] == '/' {
|
||||
file = file[1:]
|
||||
}
|
||||
|
||||
if len(file) == 0 {
|
||||
file = "index.html"
|
||||
}
|
||||
|
||||
s.mut.RLock()
|
||||
theme := s.theme
|
||||
s.mut.RUnlock()
|
||||
|
||||
// Check for an override for the current theme.
|
||||
if s.assetDir != "" {
|
||||
p := filepath.Join(s.assetDir, theme, filepath.FromSlash(file))
|
||||
if _, err := os.Stat(p); err == nil {
|
||||
http.ServeFile(w, r, p)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Check for a compiled in asset for the current theme.
|
||||
bs, ok := s.assets[theme+"/"+file]
|
||||
if !ok {
|
||||
// Check for an overridden default asset.
|
||||
if s.assetDir != "" {
|
||||
p := filepath.Join(s.assetDir, config.DefaultTheme, filepath.FromSlash(file))
|
||||
if _, err := os.Stat(p); err == nil {
|
||||
http.ServeFile(w, r, p)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Check for a compiled in default asset.
|
||||
bs, ok = s.assets[config.DefaultTheme+"/"+file]
|
||||
if !ok {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
mtype := s.mimeTypeForFile(file)
|
||||
if len(mtype) != 0 {
|
||||
w.Header().Set("Content-Type", mtype)
|
||||
}
|
||||
if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
|
||||
w.Header().Set("Content-Encoding", "gzip")
|
||||
} else {
|
||||
// ungzip if browser not send gzip accepted header
|
||||
var gr *gzip.Reader
|
||||
gr, _ = gzip.NewReader(bytes.NewReader(bs))
|
||||
bs, _ = ioutil.ReadAll(gr)
|
||||
gr.Close()
|
||||
}
|
||||
w.Header().Set("Content-Length", fmt.Sprintf("%d", len(bs)))
|
||||
|
||||
w.Write(bs)
|
||||
}
|
||||
|
||||
func (s *staticsServer) serveThemes(w http.ResponseWriter, r *http.Request) {
|
||||
sendJSON(w, map[string][]string{
|
||||
"themes": s.availableThemes,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *staticsServer) mimeTypeForFile(file string) string {
|
||||
// We use a built in table of the common types since the system
|
||||
// TypeByExtension might be unreliable. But if we don't know, we delegate
|
||||
// to the system.
|
||||
ext := filepath.Ext(file)
|
||||
switch ext {
|
||||
case ".htm", ".html":
|
||||
return "text/html"
|
||||
case ".css":
|
||||
return "text/css"
|
||||
case ".js":
|
||||
return "application/javascript"
|
||||
case ".json":
|
||||
return "application/json"
|
||||
case ".png":
|
||||
return "image/png"
|
||||
case ".ttf":
|
||||
return "application/x-font-ttf"
|
||||
case ".woff":
|
||||
return "application/x-font-woff"
|
||||
case ".svg":
|
||||
return "image/svg+xml"
|
||||
default:
|
||||
return mime.TypeByExtension(ext)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *staticsServer) setTheme(theme string) {
|
||||
s.mut.Lock()
|
||||
s.theme = theme
|
||||
s.mut.Unlock()
|
||||
}
|
||||
|
||||
func (s *staticsServer) String() string {
|
||||
return fmt.Sprintf("staticsServer@%p", s)
|
||||
}
|
||||
@@ -2,7 +2,7 @@
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
@@ -11,16 +11,19 @@ import (
|
||||
"compress/gzip"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/d4l3k/messagediff"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
"github.com/thejerf/suture"
|
||||
@@ -68,11 +71,8 @@ func TestStopAfterBrokenConfig(t *testing.T) {
|
||||
}
|
||||
w := config.Wrap("/dev/null", cfg)
|
||||
|
||||
srv, err := newAPIService(protocol.LocalDeviceID, w, "../../test/h1/https-cert.pem", "../../test/h1/https-key.pem", "", nil, nil, nil, nil, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
srv.started = make(chan struct{})
|
||||
srv := newAPIService(protocol.LocalDeviceID, w, "../../test/h1/https-cert.pem", "../../test/h1/https-key.pem", "", nil, nil, nil, nil, nil, nil, nil, nil)
|
||||
srv.started = make(chan string)
|
||||
|
||||
sup := suture.NewSimple("test")
|
||||
sup.Add(srv)
|
||||
@@ -90,8 +90,8 @@ func TestStopAfterBrokenConfig(t *testing.T) {
|
||||
RawUseTLS: false,
|
||||
},
|
||||
}
|
||||
if srv.CommitConfiguration(cfg, newCfg) {
|
||||
t.Fatal("Config commit should have failed")
|
||||
if err := srv.VerifyConfiguration(cfg, newCfg); err == nil {
|
||||
t.Fatal("Verify config should have failed")
|
||||
}
|
||||
|
||||
// Nonetheless, it should be fine to Stop() it without panic.
|
||||
@@ -119,7 +119,7 @@ func TestAssetsDir(t *testing.T) {
|
||||
gw.Close()
|
||||
foo := buf.Bytes()
|
||||
|
||||
e := embeddedStatic{
|
||||
e := &staticsServer{
|
||||
theme: "foo",
|
||||
mut: sync.NewRWMutex(),
|
||||
assetDir: "testdata",
|
||||
@@ -462,7 +462,6 @@ func TestHTTPLogin(t *testing.T) {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Errorf("Unexpected non-200 return code %d for authed request (ISO-8859-1)", resp.StatusCode)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func startHTTP(cfg *mockedConfig) (string, error) {
|
||||
@@ -471,34 +470,37 @@ func startHTTP(cfg *mockedConfig) (string, error) {
|
||||
httpsKeyFile := "../../test/h1/https-key.pem"
|
||||
assetDir := "../../gui"
|
||||
eventSub := new(mockedEventSub)
|
||||
diskEventSub := new(mockedEventSub)
|
||||
discoverer := new(mockedCachingMux)
|
||||
connections := new(mockedConnections)
|
||||
errorLog := new(mockedLoggerRecorder)
|
||||
systemLog := new(mockedLoggerRecorder)
|
||||
cpu := new(mockedCPUService)
|
||||
addrChan := make(chan string)
|
||||
|
||||
// Instantiate the API service
|
||||
svc, err := newAPIService(protocol.LocalDeviceID, cfg, httpsCertFile, httpsKeyFile, assetDir, model,
|
||||
eventSub, discoverer, connections, errorLog, systemLog)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Make sure the API service is listening, and get the URL to use.
|
||||
addr := svc.listener.Addr()
|
||||
if addr == nil {
|
||||
return "", fmt.Errorf("Nil listening address from API service")
|
||||
}
|
||||
tcpAddr, err := net.ResolveTCPAddr("tcp", addr.String())
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Weird address from API service: %v", err)
|
||||
}
|
||||
baseURL := fmt.Sprintf("http://127.0.0.1:%d", tcpAddr.Port)
|
||||
svc := newAPIService(protocol.LocalDeviceID, cfg, httpsCertFile, httpsKeyFile, assetDir, model,
|
||||
eventSub, diskEventSub, discoverer, connections, errorLog, systemLog, cpu)
|
||||
svc.started = addrChan
|
||||
|
||||
// Actually start the API service
|
||||
supervisor := suture.NewSimple("API test")
|
||||
supervisor.Add(svc)
|
||||
supervisor.ServeBackground()
|
||||
|
||||
// Make sure the API service is listening, and get the URL to use.
|
||||
addr := <-addrChan
|
||||
tcpAddr, err := net.ResolveTCPAddr("tcp", addr)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Weird address from API service: %v", err)
|
||||
}
|
||||
|
||||
host, _, _ := net.SplitHostPort(cfg.gui.RawAddress)
|
||||
if host == "" || host == "0.0.0.0" {
|
||||
host = "127.0.0.1"
|
||||
}
|
||||
baseURL := fmt.Sprintf("http://%s", net.JoinHostPort(host, strconv.Itoa(tcpAddr.Port)))
|
||||
|
||||
return baseURL, nil
|
||||
}
|
||||
|
||||
@@ -507,6 +509,9 @@ func TestCSRFRequired(t *testing.T) {
|
||||
cfg := new(mockedConfig)
|
||||
cfg.gui.APIKey = testAPIKey
|
||||
baseURL, err := startHTTP(cfg)
|
||||
if err != nil {
|
||||
t.Fatal("Unexpected error from getting base URL:", err)
|
||||
}
|
||||
|
||||
cli := &http.Client{
|
||||
Timeout: time.Second,
|
||||
@@ -620,3 +625,335 @@ func TestRandomString(t *testing.T) {
|
||||
t.Errorf("Expected 27 random characters, got %q of length %d", res["random"], len(res["random"]))
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigPostOK(t *testing.T) {
|
||||
cfg := bytes.NewBuffer([]byte(`{
|
||||
"version": 15,
|
||||
"folders": [
|
||||
{"id": "foo"}
|
||||
]
|
||||
}`))
|
||||
|
||||
resp, err := testConfigPost(cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Error("Expected 200 OK, not", resp.Status)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigPostDupFolder(t *testing.T) {
|
||||
cfg := bytes.NewBuffer([]byte(`{
|
||||
"version": 15,
|
||||
"folders": [
|
||||
{"id": "foo"},
|
||||
{"id": "foo"}
|
||||
]
|
||||
}`))
|
||||
|
||||
resp, err := testConfigPost(cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if resp.StatusCode != http.StatusBadRequest {
|
||||
t.Error("Expected 400 Bad Request, not", resp.Status)
|
||||
}
|
||||
}
|
||||
|
||||
func testConfigPost(data io.Reader) (*http.Response, error) {
|
||||
const testAPIKey = "foobarbaz"
|
||||
cfg := new(mockedConfig)
|
||||
cfg.gui.APIKey = testAPIKey
|
||||
baseURL, err := startHTTP(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cli := &http.Client{
|
||||
Timeout: time.Second,
|
||||
}
|
||||
|
||||
req, _ := http.NewRequest("POST", baseURL+"/rest/system/config", data)
|
||||
req.Header.Set("X-API-Key", testAPIKey)
|
||||
return cli.Do(req)
|
||||
}
|
||||
|
||||
func TestHostCheck(t *testing.T) {
|
||||
// An API service bound to localhost should reject non-localhost host Headers
|
||||
|
||||
cfg := new(mockedConfig)
|
||||
cfg.gui.RawAddress = "127.0.0.1:0"
|
||||
baseURL, err := startHTTP(cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// A normal HTTP get to the localhost-bound service should succeed
|
||||
|
||||
resp, err := http.Get(baseURL)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Error("Regular HTTP get: expected 200 OK, not", resp.Status)
|
||||
}
|
||||
|
||||
// A request with a suspicious Host header should fail
|
||||
|
||||
req, _ := http.NewRequest("GET", baseURL, nil)
|
||||
req.Host = "example.com"
|
||||
resp, err = http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusForbidden {
|
||||
t.Error("Suspicious Host header: expected 403 Forbidden, not", resp.Status)
|
||||
}
|
||||
|
||||
// A request with an explicit "localhost:8384" Host header should pass
|
||||
|
||||
req, _ = http.NewRequest("GET", baseURL, nil)
|
||||
req.Host = "localhost:8384"
|
||||
resp, err = http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Error("Explicit localhost:8384: expected 200 OK, not", resp.Status)
|
||||
}
|
||||
|
||||
// A request with an explicit "localhost" Host header (no port) should pass
|
||||
|
||||
req, _ = http.NewRequest("GET", baseURL, nil)
|
||||
req.Host = "localhost"
|
||||
resp, err = http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Error("Explicit localhost: expected 200 OK, not", resp.Status)
|
||||
}
|
||||
|
||||
// A server with InsecureSkipHostCheck set behaves differently
|
||||
|
||||
cfg = new(mockedConfig)
|
||||
cfg.gui.RawAddress = "127.0.0.1:0"
|
||||
cfg.gui.InsecureSkipHostCheck = true
|
||||
baseURL, err = startHTTP(cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// A request with a suspicious Host header should be allowed
|
||||
|
||||
req, _ = http.NewRequest("GET", baseURL, nil)
|
||||
req.Host = "example.com"
|
||||
resp, err = http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Error("Incorrect host header, check disabled: expected 200 OK, not", resp.Status)
|
||||
}
|
||||
|
||||
// A server bound to a wildcard address also doesn't do the check
|
||||
|
||||
cfg = new(mockedConfig)
|
||||
cfg.gui.RawAddress = "0.0.0.0:0"
|
||||
cfg.gui.InsecureSkipHostCheck = true
|
||||
baseURL, err = startHTTP(cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// A request with a suspicious Host header should be allowed
|
||||
|
||||
req, _ = http.NewRequest("GET", baseURL, nil)
|
||||
req.Host = "example.com"
|
||||
resp, err = http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Error("Incorrect host header, wildcard bound: expected 200 OK, not", resp.Status)
|
||||
}
|
||||
|
||||
// This should all work over IPv6 as well
|
||||
|
||||
cfg = new(mockedConfig)
|
||||
cfg.gui.RawAddress = "[::1]:0"
|
||||
baseURL, err = startHTTP(cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// A normal HTTP get to the localhost-bound service should succeed
|
||||
|
||||
resp, err = http.Get(baseURL)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Error("Regular HTTP get (IPv6): expected 200 OK, not", resp.Status)
|
||||
}
|
||||
|
||||
// A request with a suspicious Host header should fail
|
||||
|
||||
req, _ = http.NewRequest("GET", baseURL, nil)
|
||||
req.Host = "example.com"
|
||||
resp, err = http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusForbidden {
|
||||
t.Error("Suspicious Host header (IPv6): expected 403 Forbidden, not", resp.Status)
|
||||
}
|
||||
|
||||
// A request with an explicit "localhost:8384" Host header should pass
|
||||
|
||||
req, _ = http.NewRequest("GET", baseURL, nil)
|
||||
req.Host = "localhost:8384"
|
||||
resp, err = http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Error("Explicit localhost:8384 (IPv6): expected 200 OK, not", resp.Status)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddressIsLocalhost(t *testing.T) {
|
||||
testcases := []struct {
|
||||
address string
|
||||
result bool
|
||||
}{
|
||||
// These are all valid localhost addresses
|
||||
{"localhost", true},
|
||||
{"LOCALHOST", true},
|
||||
{"::1", true},
|
||||
{"127.0.0.1", true},
|
||||
{"localhost:8080", true},
|
||||
{"LOCALHOST:8000", true},
|
||||
{"[::1]:8080", true},
|
||||
{"127.0.0.1:8080", true},
|
||||
|
||||
// These are all non-localhost addresses
|
||||
{"example.com", false},
|
||||
{"example.com:8080", false},
|
||||
{"192.0.2.10", false},
|
||||
{"192.0.2.10:8080", false},
|
||||
{"0.0.0.0", false},
|
||||
{"0.0.0.0:8080", false},
|
||||
{"::", false},
|
||||
{"[::]:8080", false},
|
||||
{":8080", false},
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
result := addressIsLocalhost(tc.address)
|
||||
if result != tc.result {
|
||||
t.Errorf("addressIsLocalhost(%q)=%v, expected %v", tc.address, result, tc.result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAccessControlAllowOriginHeader(t *testing.T) {
|
||||
const testAPIKey = "foobarbaz"
|
||||
cfg := new(mockedConfig)
|
||||
cfg.gui.APIKey = testAPIKey
|
||||
baseURL, err := startHTTP(cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cli := &http.Client{
|
||||
Timeout: time.Second,
|
||||
}
|
||||
|
||||
req, _ := http.NewRequest("GET", baseURL+"/rest/system/status", nil)
|
||||
req.Header.Set("X-API-Key", testAPIKey)
|
||||
resp, err := cli.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Fatal("GET on /rest/system/status should succeed, not", resp.Status)
|
||||
}
|
||||
if resp.Header.Get("Access-Control-Allow-Origin") != "*" {
|
||||
t.Fatal("GET on /rest/system/status should return a 'Access-Control-Allow-Origin: *' header")
|
||||
}
|
||||
}
|
||||
|
||||
func TestOptionsRequest(t *testing.T) {
|
||||
const testAPIKey = "foobarbaz"
|
||||
cfg := new(mockedConfig)
|
||||
cfg.gui.APIKey = testAPIKey
|
||||
baseURL, err := startHTTP(cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cli := &http.Client{
|
||||
Timeout: time.Second,
|
||||
}
|
||||
|
||||
req, _ := http.NewRequest("OPTIONS", baseURL+"/rest/system/status", nil)
|
||||
resp, err := cli.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusNoContent {
|
||||
t.Fatal("OPTIONS on /rest/system/status should succeed, not", resp.Status)
|
||||
}
|
||||
if resp.Header.Get("Access-Control-Allow-Origin") != "*" {
|
||||
t.Fatal("OPTIONS on /rest/system/status should return a 'Access-Control-Allow-Origin: *' header")
|
||||
}
|
||||
if resp.Header.Get("Access-Control-Allow-Methods") != "GET, POST" {
|
||||
t.Fatal("OPTIONS on /rest/system/status should return a 'Access-Control-Allow-Methods: GET, POST' header")
|
||||
}
|
||||
if resp.Header.Get("Access-Control-Allow-Headers") != "Content-Type, X-API-Key" {
|
||||
t.Fatal("OPTIONS on /rest/system/status should return a 'Access-Control-Allow-Headers: Content-Type, X-API-KEY' header")
|
||||
}
|
||||
}
|
||||
|
||||
func TestEventMasks(t *testing.T) {
|
||||
cfg := new(mockedConfig)
|
||||
defSub := new(mockedEventSub)
|
||||
diskSub := new(mockedEventSub)
|
||||
svc := newAPIService(protocol.LocalDeviceID, cfg, "", "", "", nil, defSub, diskSub, nil, nil, nil, nil, nil)
|
||||
|
||||
if mask := svc.getEventMask(""); mask != defaultEventMask {
|
||||
t.Errorf("incorrect default mask %x != %x", int64(mask), int64(defaultEventMask))
|
||||
}
|
||||
|
||||
expected := events.FolderSummary | events.LocalChangeDetected
|
||||
if mask := svc.getEventMask("FolderSummary,LocalChangeDetected"); mask != expected {
|
||||
t.Errorf("incorrect parsed mask %x != %x", int64(mask), int64(expected))
|
||||
}
|
||||
|
||||
expected = 0
|
||||
if mask := svc.getEventMask("WeirdEvent,something else that doesn't exist"); mask != expected {
|
||||
t.Errorf("incorrect parsed mask %x != %x", int64(mask), int64(expected))
|
||||
}
|
||||
|
||||
if res := svc.getEventSub(defaultEventMask); res != defSub {
|
||||
t.Errorf("should have returned the given default event sub")
|
||||
}
|
||||
if res := svc.getEventSub(diskEventMask); res != diskSub {
|
||||
t.Errorf("should have returned the given disk event sub")
|
||||
}
|
||||
if res := svc.getEventSub(events.LocalIndexUpdated); res == nil || res == defSub || res == diskSub {
|
||||
t.Errorf("should have returned a valid, non-default event sub")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
//+build !windows,!solaris
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
go trackCPUUsage()
|
||||
}
|
||||
|
||||
func trackCPUUsage() {
|
||||
var prevUsage int64
|
||||
var prevTime = time.Now().UnixNano()
|
||||
var rusage syscall.Rusage
|
||||
for _ = range time.NewTicker(time.Second).C {
|
||||
syscall.Getrusage(syscall.RUSAGE_SELF, &rusage)
|
||||
curTime := time.Now().UnixNano()
|
||||
timeDiff := curTime - prevTime
|
||||
curUsage := rusage.Utime.Nano() + rusage.Stime.Nano()
|
||||
usageDiff := curUsage - prevUsage
|
||||
cpuUsageLock.Lock()
|
||||
copy(cpuUsagePercent[1:], cpuUsagePercent[0:])
|
||||
cpuUsagePercent[0] = 100 * float64(usageDiff) / float64(timeDiff)
|
||||
cpuUsageLock.Unlock()
|
||||
prevTime = curTime
|
||||
prevUsage = curUsage
|
||||
}
|
||||
}
|
||||
@@ -1,58 +0,0 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
//+build windows
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
go trackCPUUsage()
|
||||
}
|
||||
|
||||
func trackCPUUsage() {
|
||||
handle, err := syscall.GetCurrentProcess()
|
||||
if err != nil {
|
||||
l.Warnln("Cannot track CPU usage:", err)
|
||||
return
|
||||
}
|
||||
|
||||
var ctime, etime, ktime, utime syscall.Filetime
|
||||
err = syscall.GetProcessTimes(handle, &ctime, &etime, &ktime, &utime)
|
||||
if err != nil {
|
||||
l.Warnln("Cannot track CPU usage:", err)
|
||||
return
|
||||
}
|
||||
|
||||
prevTime := ctime.Nanoseconds()
|
||||
prevUsage := ktime.Nanoseconds() + utime.Nanoseconds() // Always overflows
|
||||
|
||||
for _ = range time.NewTicker(time.Second).C {
|
||||
err := syscall.GetProcessTimes(handle, &ctime, &etime, &ktime, &utime)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
curTime := time.Now().UnixNano()
|
||||
timeDiff := curTime - prevTime
|
||||
// This is sometimes 0, no clue why.
|
||||
if timeDiff == 0 {
|
||||
continue
|
||||
}
|
||||
curUsage := ktime.Nanoseconds() + utime.Nanoseconds()
|
||||
usageDiff := curUsage - prevUsage
|
||||
cpuUsageLock.Lock()
|
||||
copy(cpuUsagePercent[1:], cpuUsagePercent[0:])
|
||||
cpuUsagePercent[0] = 100 * float64(usageDiff) / float64(timeDiff)
|
||||
cpuUsageLock.Unlock()
|
||||
prevTime = curTime
|
||||
prevUsage = curUsage
|
||||
}
|
||||
}
|
||||
@@ -2,7 +2,7 @@
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
@@ -48,7 +48,7 @@ var locations = map[locationEnum]string{
|
||||
locKeyFile: "${config}/key.pem",
|
||||
locHTTPSCertFile: "${config}/https-cert.pem",
|
||||
locHTTPSKeyFile: "${config}/https-key.pem",
|
||||
locDatabase: "${config}/index-v0.13.0.db",
|
||||
locDatabase: "${config}/index-v0.14.0.db",
|
||||
locLogFile: "${config}/syncthing.log", // -logfile on Windows
|
||||
locCsrfTokens: "${config}/csrftokens.txt",
|
||||
locPanicLog: "${config}/panic-${timestamp}.log",
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
@@ -12,13 +12,15 @@ import (
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
@@ -40,23 +42,28 @@ import (
|
||||
"github.com/syncthing/syncthing/lib/osutil"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/rand"
|
||||
"github.com/syncthing/syncthing/lib/symlinks"
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
"github.com/syncthing/syncthing/lib/upgrade"
|
||||
"github.com/syncthing/syncthing/lib/weakhash"
|
||||
|
||||
"github.com/thejerf/suture"
|
||||
|
||||
_ "net/http/pprof" // Need to import this to support STPROFILER.
|
||||
)
|
||||
|
||||
var (
|
||||
Version = "unknown-dev"
|
||||
Codename = "Copper Cockroach"
|
||||
Codename = "Dysprosium Dragonfly"
|
||||
BuildStamp = "0"
|
||||
BuildDate time.Time
|
||||
BuildHost = "unknown"
|
||||
BuildUser = "unknown"
|
||||
IsRelease bool
|
||||
IsCandidate bool
|
||||
IsBeta bool
|
||||
LongVersion string
|
||||
BuildTags []string
|
||||
allowedVersionExp = regexp.MustCompile(`^v\d+\.\d+\.\d+(-[a-z0-9]+)*(\.\d+)*(\+\d+-g[0-9a-f]+)?(-[^\s]+)?$`)
|
||||
)
|
||||
|
||||
@@ -73,7 +80,7 @@ const (
|
||||
tlsDefaultCommonName = "syncthing"
|
||||
httpsRSABits = 2048
|
||||
bepRSABits = 0 // 384 bit ECDSA used instead
|
||||
pingEventInterval = time.Minute
|
||||
defaultEventTimeout = time.Minute
|
||||
maxSystemErrors = 5
|
||||
initialSystemLog = 10
|
||||
maxSystemLog = 250
|
||||
@@ -93,15 +100,26 @@ func init() {
|
||||
l.Fatalf("Invalid version string %q;\n\tdoes not match regexp %v", Version, allowedVersionExp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check for a clean release build. A release is something like "v0.1.2",
|
||||
// with an optional suffix of letters and dot separated numbers like
|
||||
// "-beta3.47". If there's more stuff, like a plus sign and a commit hash
|
||||
// and so on, then it's not a release. If there's a dash anywhere in
|
||||
// there, it's some kind of beta or prerelease version.
|
||||
func setBuildMetadata() {
|
||||
// Check for a clean release build. A release is something like
|
||||
// "v0.1.2", with an optional suffix of letters and dot separated
|
||||
// numbers like "-beta3.47". If there's more stuff, like a plus sign and
|
||||
// a commit hash and so on, then it's not a release. If it has a dash in
|
||||
// it, it's some sort of beta, release candidate or special build. If it
|
||||
// has "-rc." in it, like "v0.14.35-rc.42", then it's a candidate build.
|
||||
//
|
||||
// So, every build that is not a stable release build has IsBeta = true.
|
||||
// This is used to enable some extra debugging (the deadlock detector).
|
||||
//
|
||||
// Release candidate builds are also "betas" from this point of view and
|
||||
// will have that debugging enabled. In addition, some features are
|
||||
// forced for release candidates - auto upgrade, and usage reporting.
|
||||
|
||||
exp := regexp.MustCompile(`^v\d+\.\d+\.\d+(-[a-z]+[\d\.]+)?$`)
|
||||
IsRelease = exp.MatchString(Version)
|
||||
IsCandidate = strings.Contains(Version, "-rc.")
|
||||
IsBeta = strings.Contains(Version, "-")
|
||||
|
||||
stamp, _ := strconv.Atoi(BuildStamp)
|
||||
@@ -109,6 +127,10 @@ func init() {
|
||||
|
||||
date := BuildDate.UTC().Format("2006-01-02 15:04:05 MST")
|
||||
LongVersion = fmt.Sprintf(`syncthing %s "%s" (%s %s-%s) %s@%s %s`, Version, Codename, runtime.Version(), runtime.GOOS, runtime.GOARCH, BuildUser, BuildHost, date)
|
||||
|
||||
if len(BuildTags) > 0 {
|
||||
LongVersion = fmt.Sprintf("%s [%s]", LongVersion, strings.Join(BuildTags, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -136,11 +158,11 @@ show time only (2).
|
||||
Development Settings
|
||||
--------------------
|
||||
|
||||
The following environment variables modify syncthing's behavior in ways that
|
||||
The following environment variables modify Syncthing's behavior in ways that
|
||||
are mostly useful for developers. Use with care.
|
||||
|
||||
STNODEFAULTFOLDER Don't create a default folder when starting for the first
|
||||
time. This variable will be ignored anytime after the first
|
||||
time. This variable will be ignored anytime after the first
|
||||
run.
|
||||
|
||||
STGUIASSETS Directory to load GUI assets from. Overrides compiled in
|
||||
@@ -149,8 +171,8 @@ are mostly useful for developers. Use with care.
|
||||
STTRACE A comma separated string of facilities to trace. The valid
|
||||
facility strings listed below.
|
||||
|
||||
STPROFILER Set to a listen address such as "127.0.0.1:9090" to start the
|
||||
profiler with HTTP access.
|
||||
STPROFILER Set to a listen address such as "127.0.0.1:9090" to start
|
||||
the profiler with HTTP access.
|
||||
|
||||
STCPUPROFILE Write a CPU profile to cpu-$pid.pprof on exit.
|
||||
|
||||
@@ -163,14 +185,33 @@ are mostly useful for developers. Use with care.
|
||||
STPERFSTATS Write running performance statistics to perf-$pid.csv. Not
|
||||
supported on Windows.
|
||||
|
||||
STDEADLOCK Used for debugging internal deadlocks. Use only under
|
||||
direction of a developer.
|
||||
|
||||
STDEADLOCKTIMEOUT Used for debugging internal deadlocks; sets debug
|
||||
sensitivity. Use only under direction of a developer.
|
||||
|
||||
STDEADLOCKTHRESHOLD Used for debugging internal deadlocks; sets debug
|
||||
sensitivity. Use only under direction of a developer.
|
||||
|
||||
STNORESTART Equivalent to the -no-restart argument. Disable the
|
||||
Syncthing monitor process which handles restarts for some
|
||||
configuration changes, upgrades, crashes and also log file
|
||||
writing (stdout is still written).
|
||||
|
||||
STNOUPGRADE Disable automatic upgrades.
|
||||
|
||||
STHASHING Select the SHA256 hashing package to use. Possible values
|
||||
are "standard" for the Go standard library implementation,
|
||||
"minio" for the github.com/minio/sha256-simd implementation,
|
||||
and blank (the default) for auto detection.
|
||||
|
||||
GOMAXPROCS Set the maximum number of CPU cores to use. Defaults to all
|
||||
available CPU cores.
|
||||
|
||||
GOGC Percentage of heap growth at which to trigger GC. Default is
|
||||
100. Lower numbers keep peak memory usage down, at the price
|
||||
of CPU usage (ie. performance).
|
||||
of CPU usage (i.e. performance).
|
||||
|
||||
|
||||
Debugging Facilities
|
||||
@@ -183,14 +224,15 @@ The following are valid values for the STTRACE variable:
|
||||
|
||||
// Environment options
|
||||
var (
|
||||
noUpgrade = os.Getenv("STNOUPGRADE") != ""
|
||||
innerProcess = os.Getenv("STNORESTART") != "" || os.Getenv("STMONITORED") != ""
|
||||
noDefaultFolder = os.Getenv("STNODEFAULTFOLDER") != ""
|
||||
noUpgradeFromEnv = os.Getenv("STNOUPGRADE") != ""
|
||||
innerProcess = os.Getenv("STNORESTART") != "" || os.Getenv("STMONITORED") != ""
|
||||
noDefaultFolder = os.Getenv("STNODEFAULTFOLDER") != ""
|
||||
)
|
||||
|
||||
type RuntimeOptions struct {
|
||||
confDir string
|
||||
reset bool
|
||||
resetDatabase bool
|
||||
resetDeltaIdxs bool
|
||||
showVersion bool
|
||||
showPaths bool
|
||||
doUpgrade bool
|
||||
@@ -201,8 +243,10 @@ type RuntimeOptions struct {
|
||||
hideConsole bool
|
||||
logFile string
|
||||
auditEnabled bool
|
||||
auditFile string
|
||||
verbose bool
|
||||
paused bool
|
||||
unpaused bool
|
||||
guiAddress string
|
||||
guiAPIKey string
|
||||
generateDir string
|
||||
@@ -225,7 +269,7 @@ func defaultRuntimeOptions() RuntimeOptions {
|
||||
}
|
||||
|
||||
if os.Getenv("STTRACE") != "" {
|
||||
options.logFlags = log.Ltime | log.Ldate | log.Lmicroseconds | log.Lshortfile
|
||||
options.logFlags = logger.DebugFlags
|
||||
}
|
||||
|
||||
if runtime.GOOS != "windows" {
|
||||
@@ -249,8 +293,9 @@ func parseCommandLineOptions() RuntimeOptions {
|
||||
flag.IntVar(&options.logFlags, "logflags", options.logFlags, "Select information in log line prefix (see below)")
|
||||
flag.BoolVar(&options.noBrowser, "no-browser", false, "Do not start browser")
|
||||
flag.BoolVar(&options.browserOnly, "browser-only", false, "Open GUI in browser")
|
||||
flag.BoolVar(&options.noRestart, "no-restart", options.noRestart, "Do not restart; just exit")
|
||||
flag.BoolVar(&options.reset, "reset", false, "Reset the database")
|
||||
flag.BoolVar(&options.noRestart, "no-restart", options.noRestart, "Disable monitor process, managed restarts and log file writing")
|
||||
flag.BoolVar(&options.resetDatabase, "reset-database", false, "Reset the database, forcing a full rescan and resync")
|
||||
flag.BoolVar(&options.resetDeltaIdxs, "reset-deltas", false, "Reset delta index IDs, forcing a full index exchange")
|
||||
flag.BoolVar(&options.doUpgrade, "upgrade", false, "Perform upgrade")
|
||||
flag.BoolVar(&options.doUpgradeCheck, "upgrade-check", false, "Check for available upgrade")
|
||||
flag.BoolVar(&options.showVersion, "version", false, "Show version")
|
||||
@@ -258,8 +303,10 @@ func parseCommandLineOptions() RuntimeOptions {
|
||||
flag.StringVar(&options.upgradeTo, "upgrade-to", options.upgradeTo, "Force upgrade directly from specified URL")
|
||||
flag.BoolVar(&options.auditEnabled, "audit", false, "Write events to audit file")
|
||||
flag.BoolVar(&options.verbose, "verbose", false, "Print verbose log output")
|
||||
flag.BoolVar(&options.paused, "paused", false, "Start with all devices paused")
|
||||
flag.BoolVar(&options.paused, "paused", false, "Start with all devices and folders paused")
|
||||
flag.BoolVar(&options.unpaused, "unpaused", false, "Start with all devices and folders unpaused")
|
||||
flag.StringVar(&options.logFile, "logfile", options.logFile, "Log file name (use \"-\" for stdout)")
|
||||
flag.StringVar(&options.auditFile, "auditfile", options.auditFile, "Specify audit file (use \"-\" for stdout, \"--\" for stderr)")
|
||||
if runtime.GOOS == "windows" {
|
||||
// Allow user to hide the console window
|
||||
flag.BoolVar(&options.hideConsole, "no-console", false, "Hide console window")
|
||||
@@ -269,10 +316,17 @@ func parseCommandLineOptions() RuntimeOptions {
|
||||
flag.Usage = usageFor(flag.CommandLine, usage, longUsage)
|
||||
flag.Parse()
|
||||
|
||||
if len(flag.Args()) > 0 {
|
||||
flag.Usage()
|
||||
os.Exit(2)
|
||||
}
|
||||
|
||||
return options
|
||||
}
|
||||
|
||||
func main() {
|
||||
setBuildMetadata()
|
||||
|
||||
options := parseCommandLineOptions()
|
||||
l.SetFlags(options.logFlags)
|
||||
|
||||
@@ -285,12 +339,27 @@ func main() {
|
||||
os.Setenv("STGUIAPIKEY", options.guiAPIKey)
|
||||
}
|
||||
|
||||
// Check for options which are not compatible with each other. We have
|
||||
// to check logfile before it's set to the default below - we only want
|
||||
// to complain if they set -logfile explicitly, not if it's set to its
|
||||
// default location
|
||||
if options.noRestart && (options.logFile != "" && options.logFile != "-") {
|
||||
l.Fatalln("-logfile may not be used with -no-restart or STNORESTART")
|
||||
}
|
||||
|
||||
if options.hideConsole {
|
||||
osutil.HideConsole()
|
||||
}
|
||||
|
||||
if options.confDir != "" {
|
||||
// Not set as default above because the string can be really long.
|
||||
if !filepath.IsAbs(options.confDir) {
|
||||
var err error
|
||||
options.confDir, err = filepath.Abs(options.confDir)
|
||||
if err != nil {
|
||||
l.Fatalln(err)
|
||||
}
|
||||
}
|
||||
baseDirs["config"] = options.confDir
|
||||
}
|
||||
|
||||
@@ -353,12 +422,12 @@ func main() {
|
||||
return
|
||||
}
|
||||
|
||||
if options.reset {
|
||||
if options.resetDatabase {
|
||||
resetDB()
|
||||
return
|
||||
}
|
||||
|
||||
if options.noRestart {
|
||||
if innerProcess || options.noRestart {
|
||||
syncthingMain(options)
|
||||
} else {
|
||||
monitorMain(options)
|
||||
@@ -438,8 +507,8 @@ func debugFacilities() string {
|
||||
|
||||
func checkUpgrade() upgrade.Release {
|
||||
cfg, _ := loadConfig()
|
||||
releasesURL := cfg.Options().ReleasesURL
|
||||
release, err := upgrade.LatestRelease(releasesURL, Version)
|
||||
opts := cfg.Options()
|
||||
release, err := upgrade.LatestRelease(opts.ReleasesURL, Version, opts.UpgradeToPreReleases)
|
||||
if err != nil {
|
||||
l.Fatalln("Upgrade:", err)
|
||||
}
|
||||
@@ -476,8 +545,13 @@ func performUpgrade(release upgrade.Release) {
|
||||
|
||||
func upgradeViaRest() error {
|
||||
cfg, _ := loadConfig()
|
||||
target := cfg.GUI().URL()
|
||||
r, _ := http.NewRequest("POST", target+"/rest/system/upgrade", nil)
|
||||
u, err := url.Parse(cfg.GUI().URL())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
u.Path = path.Join(u.Path, "rest/system/upgrade")
|
||||
target := u.String()
|
||||
r, _ := http.NewRequest("POST", target, nil)
|
||||
r.Header.Set("X-API-Key", cfg.GUI().APIKey)
|
||||
|
||||
tr := &http.Transport{
|
||||
@@ -522,7 +596,7 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
l.SetPrefix("[start] ")
|
||||
|
||||
if runtimeOptions.auditEnabled {
|
||||
startAuditing(mainService)
|
||||
startAuditing(mainService, runtimeOptions.auditFile)
|
||||
}
|
||||
|
||||
if runtimeOptions.verbose {
|
||||
@@ -532,9 +606,11 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
errors := logger.NewRecorder(l, logger.LevelWarn, maxSystemErrors, 0)
|
||||
systemLog := logger.NewRecorder(l, logger.LevelDebug, maxSystemLog, initialSystemLog)
|
||||
|
||||
// Event subscription for the API; must start early to catch the early events. The LocalDiskUpdated
|
||||
// event might overwhelm the event reciever in some situations so we will not subscribe to it here.
|
||||
apiSub := events.NewBufferedSubscription(events.Default.Subscribe(events.AllEvents&^events.LocalChangeDetected), 1000)
|
||||
// Event subscription for the API; must start early to catch the early
|
||||
// events. The LocalChangeDetected event might overwhelm the event
|
||||
// receiver in some situations so we will not subscribe to it here.
|
||||
defaultSub := events.NewBufferedSubscription(events.Default.Subscribe(defaultEventMask), eventSubBufferSize)
|
||||
diskSub := events.NewBufferedSubscription(events.Default.Subscribe(diskEventMask), eventSubBufferSize)
|
||||
|
||||
if len(os.Getenv("GOMAXPROCS")) == 0 {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
@@ -545,7 +621,7 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
// report the error if there is one.
|
||||
osutil.MaximizeOpenFileLimit()
|
||||
|
||||
// Ensure that that we have a certificate and key.
|
||||
// Ensure that we have a certificate and key.
|
||||
cert, err := tls.LoadX509KeyPair(locations[locCertFile], locations[locKeyFile])
|
||||
if err != nil {
|
||||
l.Infof("Generating ECDSA key and certificate for %s...", tlsDefaultCommonName)
|
||||
@@ -560,7 +636,13 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
|
||||
l.Infoln(LongVersion)
|
||||
l.Infoln("My ID:", myID)
|
||||
printHashRate()
|
||||
|
||||
sha256.SelectAlgo()
|
||||
sha256.Report()
|
||||
perfWithWeakHash := cpuBench(3, 150*time.Millisecond, true)
|
||||
l.Infof("Hashing performance with weak hash is %.02f MB/s", perfWithWeakHash)
|
||||
perfWithoutWeakHash := cpuBench(3, 150*time.Millisecond, false)
|
||||
l.Infof("Hashing performance without weak hash is %.02f MB/s", perfWithoutWeakHash)
|
||||
|
||||
// Emit the Starting event, now that we know who we are.
|
||||
|
||||
@@ -597,6 +679,10 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
InsecureSkipVerify: true,
|
||||
MinVersion: tls.VersionTLS12,
|
||||
CipherSuites: []uint16{
|
||||
0xCCA8, // TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, Go 1.8
|
||||
0xCCA9, // TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, Go 1.8
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
||||
@@ -606,13 +692,22 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
},
|
||||
}
|
||||
|
||||
// If the read or write rate should be limited, set up a rate limiter for it.
|
||||
// This will be used on connections created in the connect and listen routines.
|
||||
|
||||
opts := cfg.Options()
|
||||
|
||||
if !opts.SymlinksEnabled {
|
||||
symlinks.Supported = false
|
||||
if opts.WeakHashSelectionMethod == config.WeakHashAuto {
|
||||
if perfWithoutWeakHash*0.8 > perfWithWeakHash {
|
||||
l.Infof("Weak hash disabled, as it has an unacceptable performance impact.")
|
||||
weakhash.Enabled = false
|
||||
} else {
|
||||
l.Infof("Weak hash enabled, as it has an acceptable performance impact.")
|
||||
weakhash.Enabled = true
|
||||
}
|
||||
} else if opts.WeakHashSelectionMethod == config.WeakHashNever {
|
||||
l.Infof("Disabling weak hash")
|
||||
weakhash.Enabled = false
|
||||
} else if opts.WeakHashSelectionMethod == config.WeakHashAlways {
|
||||
l.Infof("Enabling weak hash")
|
||||
weakhash.Enabled = true
|
||||
}
|
||||
|
||||
if (opts.MaxRecvKbps > 0 || opts.MaxSendKbps > 0) && !opts.LimitBandwidthInLan {
|
||||
@@ -640,6 +735,11 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
l.Fatalln("Cannot open database:", err, "- Is another copy of Syncthing already running?")
|
||||
}
|
||||
|
||||
if runtimeOptions.resetDeltaIdxs {
|
||||
l.Infoln("Reinitializing delta index IDs")
|
||||
ldb.DropDeltaIndexIDs()
|
||||
}
|
||||
|
||||
protectedFiles := []string{
|
||||
locations[locDatabase],
|
||||
locations[locConfigFile],
|
||||
@@ -656,8 +756,18 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
}
|
||||
}
|
||||
|
||||
m := model.NewModel(cfg, myID, myDeviceName(cfg), "syncthing", Version, ldb, protectedFiles)
|
||||
cfg.Subscribe(m)
|
||||
if cfg.RawCopy().OriginalVersion == 15 {
|
||||
// The config version 15->16 migration is about handling ignores and
|
||||
// delta indexes and requires that we drop existing indexes that
|
||||
// have been incorrectly ignore filtered.
|
||||
ldb.DropDeltaIndexIDs()
|
||||
}
|
||||
if cfg.RawCopy().OriginalVersion < 19 {
|
||||
// Converts old symlink types to new in the entire database.
|
||||
ldb.ConvertSymlinkTypes()
|
||||
}
|
||||
|
||||
m := model.NewModel(cfg, myID, "syncthing", Version, ldb, protectedFiles)
|
||||
|
||||
if t := os.Getenv("STDEADLOCKTIMEOUT"); len(t) > 0 {
|
||||
it, err := strconv.Atoi(t)
|
||||
@@ -668,23 +778,19 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
m.StartDeadlockDetector(20 * time.Minute)
|
||||
}
|
||||
|
||||
if runtimeOptions.paused {
|
||||
for device := range cfg.Devices() {
|
||||
m.PauseDevice(device)
|
||||
}
|
||||
if runtimeOptions.unpaused {
|
||||
setPauseState(cfg, false)
|
||||
} else if runtimeOptions.paused {
|
||||
setPauseState(cfg, true)
|
||||
}
|
||||
|
||||
// Clear out old indexes for other devices. Otherwise we'll start up and
|
||||
// start needing a bunch of files which are nowhere to be found. This
|
||||
// needs to be changed when we correctly do persistent indexes.
|
||||
// Add and start folders
|
||||
for _, folderCfg := range cfg.Folders() {
|
||||
m.AddFolder(folderCfg)
|
||||
for _, device := range folderCfg.DeviceIDs() {
|
||||
if device == myID {
|
||||
continue
|
||||
}
|
||||
m.Index(device, folderCfg.ID, nil, 0, nil)
|
||||
if folderCfg.Paused {
|
||||
folderCfg.CreateRoot()
|
||||
continue
|
||||
}
|
||||
m.AddFolder(folderCfg)
|
||||
m.StartFolder(folderCfg.ID)
|
||||
}
|
||||
|
||||
@@ -735,7 +841,7 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
|
||||
// GUI
|
||||
|
||||
setupGUI(mainService, cfg, m, apiSub, cachedDiscovery, connectionsService, errors, systemLog, runtimeOptions)
|
||||
setupGUI(mainService, cfg, m, defaultSub, diskSub, cachedDiscovery, connectionsService, errors, systemLog, runtimeOptions)
|
||||
|
||||
if runtimeOptions.cpuProfile {
|
||||
f, err := os.Create(fmt.Sprintf("cpu-%d.pprof", os.Getpid()))
|
||||
@@ -751,20 +857,26 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
}
|
||||
}
|
||||
|
||||
// Candidate builds always run with usage reporting.
|
||||
|
||||
if IsCandidate {
|
||||
l.Infoln("Anonymous usage reporting is always enabled for candidate releases.")
|
||||
opts.URAccepted = usageReportVersion
|
||||
// Unique ID will be set and config saved below if necessary.
|
||||
}
|
||||
|
||||
if opts.URAccepted > 0 && opts.URAccepted < usageReportVersion {
|
||||
l.Infoln("Anonymous usage report has changed; revoking acceptance")
|
||||
opts.URAccepted = 0
|
||||
opts.URUniqueID = ""
|
||||
cfg.SetOptions(opts)
|
||||
}
|
||||
if opts.URAccepted >= usageReportVersion {
|
||||
if opts.URUniqueID == "" {
|
||||
// Previously the ID was generated from the node ID. We now need
|
||||
// to generate a new one.
|
||||
opts.URUniqueID = rand.String(8)
|
||||
cfg.SetOptions(opts)
|
||||
cfg.Save()
|
||||
}
|
||||
|
||||
if opts.URAccepted >= usageReportVersion && opts.URUniqueID == "" {
|
||||
// Generate and save a new unique ID if it is missing.
|
||||
opts.URUniqueID = rand.String(8)
|
||||
cfg.SetOptions(opts)
|
||||
cfg.Save()
|
||||
}
|
||||
|
||||
// The usageReportingManager registers itself to listen to configuration
|
||||
@@ -776,20 +888,38 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
go standbyMonitor()
|
||||
}
|
||||
|
||||
if opts.AutoUpgradeIntervalH > 0 {
|
||||
if noUpgrade {
|
||||
l.Infof("No automatic upgrades; STNOUPGRADE environment variable defined.")
|
||||
} else if IsRelease {
|
||||
go autoUpgrade(cfg)
|
||||
} else {
|
||||
l.Infof("No automatic upgrades; %s is not a release version.", Version)
|
||||
// Candidate builds should auto upgrade. Make sure the option is set,
|
||||
// unless we are in a build where it's disabled or the STNOUPGRADE
|
||||
// environment variable is set.
|
||||
|
||||
if IsCandidate && !upgrade.DisabledByCompilation && !noUpgradeFromEnv {
|
||||
l.Infoln("Automatic upgrade is always enabled for candidate releases.")
|
||||
if opts.AutoUpgradeIntervalH == 0 || opts.AutoUpgradeIntervalH > 24 {
|
||||
opts.AutoUpgradeIntervalH = 12
|
||||
// Set the option into the config as well, as the auto upgrade
|
||||
// loop expects to read a valid interval from there.
|
||||
cfg.SetOptions(opts)
|
||||
cfg.Save()
|
||||
}
|
||||
// We don't tweak the user's choice of upgrading to pre-releases or
|
||||
// not, as otherwise they cannot step off the candidate channel.
|
||||
}
|
||||
|
||||
if opts.AutoUpgradeIntervalH > 0 {
|
||||
if noUpgradeFromEnv {
|
||||
l.Infof("No automatic upgrades; STNOUPGRADE environment variable defined.")
|
||||
} else {
|
||||
go autoUpgrade(cfg)
|
||||
}
|
||||
}
|
||||
|
||||
if isSuperUser() {
|
||||
l.Warnln("Syncthing should not run as a privileged or system user. Please consider using a normal user account.")
|
||||
}
|
||||
|
||||
events.Default.Log(events.StartupComplete, map[string]string{
|
||||
"myID": myID.String(),
|
||||
})
|
||||
go generatePingEvents()
|
||||
|
||||
cleanConfigDirectory()
|
||||
|
||||
@@ -806,15 +936,6 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
func myDeviceName(cfg *config.Wrapper) string {
|
||||
devices := cfg.Devices()
|
||||
myName := devices[myID].Name
|
||||
if myName == "" {
|
||||
myName, _ = os.Hostname()
|
||||
}
|
||||
return myName
|
||||
}
|
||||
|
||||
func setupSignalHandling() {
|
||||
// Exit cleanly with "restarting" code on SIGHUP.
|
||||
|
||||
@@ -837,28 +958,11 @@ func setupSignalHandling() {
|
||||
}()
|
||||
}
|
||||
|
||||
// printHashRate prints the hashing performance in MB/s, formatting it with
|
||||
// appropriate precision for the value, i.e. 182 MB/s, 18 MB/s, 1.8 MB/s, 0.18
|
||||
// MB/s.
|
||||
func printHashRate() {
|
||||
hashRate := cpuBench(3, 100*time.Millisecond)
|
||||
|
||||
decimals := 0
|
||||
if hashRate < 1 {
|
||||
decimals = 2
|
||||
} else if hashRate < 10 {
|
||||
decimals = 1
|
||||
}
|
||||
|
||||
l.Infof("Single thread hash performance is ~%.*f MB/s", decimals, hashRate)
|
||||
}
|
||||
|
||||
func loadConfig() (*config.Wrapper, error) {
|
||||
cfgFile := locations[locConfigFile]
|
||||
cfg, err := config.Load(cfgFile, myID)
|
||||
|
||||
if err != nil {
|
||||
l.Infoln("Error loading config file; using defaults for now")
|
||||
myName, _ := os.Hostname()
|
||||
newCfg := defaultConfig(myName)
|
||||
cfg = config.Wrap(cfgFile, newCfg)
|
||||
@@ -876,7 +980,7 @@ func loadOrCreateConfig() *config.Wrapper {
|
||||
l.Fatalln("Config:", err)
|
||||
}
|
||||
|
||||
if cfg.Raw().OriginalVersion != config.CurrentVersion {
|
||||
if cfg.RawCopy().OriginalVersion != config.CurrentVersion {
|
||||
err = archiveAndSaveConfig(cfg)
|
||||
if err != nil {
|
||||
l.Fatalln("Config archive:", err)
|
||||
@@ -887,24 +991,57 @@ func loadOrCreateConfig() *config.Wrapper {
|
||||
}
|
||||
|
||||
func archiveAndSaveConfig(cfg *config.Wrapper) error {
|
||||
// To prevent previous config from being cleaned up, quickly touch it too
|
||||
now := time.Now()
|
||||
_ = os.Chtimes(cfg.ConfigPath(), now, now) // May return error on Android etc; no worries
|
||||
|
||||
archivePath := cfg.ConfigPath() + fmt.Sprintf(".v%d", cfg.Raw().OriginalVersion)
|
||||
// Copy the existing config to an archive copy
|
||||
archivePath := cfg.ConfigPath() + fmt.Sprintf(".v%d", cfg.RawCopy().OriginalVersion)
|
||||
l.Infoln("Archiving a copy of old config file format at:", archivePath)
|
||||
if err := osutil.Rename(cfg.ConfigPath(), archivePath); err != nil {
|
||||
if err := copyFile(cfg.ConfigPath(), archivePath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Do a regular atomic config sve
|
||||
return cfg.Save()
|
||||
}
|
||||
|
||||
func startAuditing(mainService *suture.Supervisor) {
|
||||
auditFile := timestampedLoc(locAuditLog)
|
||||
fd, err := os.OpenFile(auditFile, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
|
||||
func copyFile(src, dst string) error {
|
||||
bs, err := ioutil.ReadFile(src)
|
||||
if err != nil {
|
||||
l.Fatalln("Audit:", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(dst, bs, 0600); err != nil {
|
||||
// Attempt to clean up
|
||||
os.Remove(dst)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func startAuditing(mainService *suture.Supervisor, auditFile string) {
|
||||
|
||||
var fd io.Writer
|
||||
var err error
|
||||
var auditDest string
|
||||
var auditFlags int
|
||||
|
||||
if auditFile == "-" {
|
||||
fd = os.Stdout
|
||||
auditDest = "stdout"
|
||||
} else if auditFile == "--" {
|
||||
fd = os.Stderr
|
||||
auditDest = "stderr"
|
||||
} else {
|
||||
if auditFile == "" {
|
||||
auditFile = timestampedLoc(locAuditLog)
|
||||
auditFlags = os.O_WRONLY | os.O_CREATE | os.O_EXCL
|
||||
} else {
|
||||
auditFlags = os.O_WRONLY | os.O_CREATE | os.O_APPEND
|
||||
}
|
||||
fd, err = os.OpenFile(auditFile, auditFlags, 0600)
|
||||
if err != nil {
|
||||
l.Fatalln("Audit:", err)
|
||||
}
|
||||
auditDest = auditFile
|
||||
}
|
||||
|
||||
auditService := newAuditService(fd)
|
||||
@@ -914,10 +1051,10 @@ func startAuditing(mainService *suture.Supervisor) {
|
||||
// ensure we capture all events from the start.
|
||||
auditService.WaitForStart()
|
||||
|
||||
l.Infoln("Audit log in", auditFile)
|
||||
l.Infoln("Audit log in", auditDest)
|
||||
}
|
||||
|
||||
func setupGUI(mainService *suture.Supervisor, cfg *config.Wrapper, m *model.Model, apiSub events.BufferedSubscription, discoverer discover.CachingMux, connectionsService *connections.Service, errors, systemLog logger.Recorder, runtimeOptions RuntimeOptions) {
|
||||
func setupGUI(mainService *suture.Supervisor, cfg *config.Wrapper, m *model.Model, defaultSub, diskSub events.BufferedSubscription, discoverer discover.CachingMux, connectionsService *connections.Service, errors, systemLog logger.Recorder, runtimeOptions RuntimeOptions) {
|
||||
guiCfg := cfg.GUI()
|
||||
|
||||
if !guiCfg.Enabled {
|
||||
@@ -928,16 +1065,17 @@ func setupGUI(mainService *suture.Supervisor, cfg *config.Wrapper, m *model.Mode
|
||||
l.Warnln("Insecure admin access is enabled.")
|
||||
}
|
||||
|
||||
api, err := newAPIService(myID, cfg, locations[locHTTPSCertFile], locations[locHTTPSKeyFile], runtimeOptions.assetDir, m, apiSub, discoverer, connectionsService, errors, systemLog)
|
||||
if err != nil {
|
||||
l.Fatalln("Cannot start GUI:", err)
|
||||
}
|
||||
cpu := newCPUService()
|
||||
mainService.Add(cpu)
|
||||
|
||||
api := newAPIService(myID, cfg, locations[locHTTPSCertFile], locations[locHTTPSKeyFile], runtimeOptions.assetDir, m, defaultSub, diskSub, discoverer, connectionsService, errors, systemLog, cpu)
|
||||
cfg.Subscribe(api)
|
||||
mainService.Add(api)
|
||||
|
||||
if cfg.Options().StartBrowser && !runtimeOptions.noBrowser && !runtimeOptions.stRestarting {
|
||||
// Can potentially block if the utility we are invoking doesn't
|
||||
// fork, and just execs, hence keep it in it's own routine.
|
||||
// fork, and just execs, hence keep it in its own routine.
|
||||
<-api.startedOnce
|
||||
go openURL(guiCfg.URL())
|
||||
}
|
||||
}
|
||||
@@ -947,11 +1085,10 @@ func defaultConfig(myName string) config.Configuration {
|
||||
|
||||
if !noDefaultFolder {
|
||||
l.Infoln("Default folder created and/or linked to new config")
|
||||
folderID := rand.String(5) + "-" + rand.String(5)
|
||||
defaultFolder = config.NewFolderConfiguration(folderID, locations[locDefFolder])
|
||||
defaultFolder.Label = "Default Folder (" + folderID + ")"
|
||||
defaultFolder = config.NewFolderConfiguration("default", locations[locDefFolder])
|
||||
defaultFolder.Label = "Default Folder"
|
||||
defaultFolder.RescanIntervalS = 60
|
||||
defaultFolder.MinDiskFreePct = 1
|
||||
defaultFolder.MinDiskFree = config.Size{Value: 1, Unit: "%"}
|
||||
defaultFolder.Devices = []config.FolderDeviceConfiguration{{DeviceID: myID}}
|
||||
defaultFolder.AutoNormalize = true
|
||||
defaultFolder.MaxConflicts = -1
|
||||
@@ -990,13 +1127,6 @@ func defaultConfig(myName string) config.Configuration {
|
||||
return newCfg
|
||||
}
|
||||
|
||||
func generatePingEvents() {
|
||||
for {
|
||||
time.Sleep(pingEventInterval)
|
||||
events.Default.Log(events.Ping, nil)
|
||||
}
|
||||
}
|
||||
|
||||
func resetDB() error {
|
||||
return os.RemoveAll(locations[locDatabase])
|
||||
}
|
||||
@@ -1054,7 +1184,7 @@ func getFreePort(host string, ports ...int) (int, error) {
|
||||
}
|
||||
|
||||
func standbyMonitor() {
|
||||
restartDelay := time.Duration(60 * time.Second)
|
||||
restartDelay := 60 * time.Second
|
||||
now := time.Now()
|
||||
for {
|
||||
time.Sleep(10 * time.Second)
|
||||
@@ -1087,7 +1217,15 @@ func autoUpgrade(cfg *config.Wrapper) {
|
||||
case <-timer.C:
|
||||
}
|
||||
|
||||
rel, err := upgrade.LatestRelease(cfg.Options().ReleasesURL, Version)
|
||||
opts := cfg.Options()
|
||||
checkInterval := time.Duration(opts.AutoUpgradeIntervalH) * time.Hour
|
||||
if checkInterval < time.Hour {
|
||||
// We shouldn't be here if AutoUpgradeIntervalH < 1, but for
|
||||
// safety's sake.
|
||||
checkInterval = time.Hour
|
||||
}
|
||||
|
||||
rel, err := upgrade.LatestRelease(opts.ReleasesURL, Version, opts.UpgradeToPreReleases)
|
||||
if err == upgrade.ErrUpgradeUnsupported {
|
||||
events.Default.Unsubscribe(sub)
|
||||
return
|
||||
@@ -1096,13 +1234,13 @@ func autoUpgrade(cfg *config.Wrapper) {
|
||||
// Don't complain too loudly here; we might simply not have
|
||||
// internet connectivity, or the upgrade server might be down.
|
||||
l.Infoln("Automatic upgrade:", err)
|
||||
timer.Reset(time.Duration(cfg.Options().AutoUpgradeIntervalH) * time.Hour)
|
||||
timer.Reset(checkInterval)
|
||||
continue
|
||||
}
|
||||
|
||||
if upgrade.CompareVersions(rel.Tag, Version) != upgrade.Newer {
|
||||
// Skip equal, older or majorly newer (incompatible) versions
|
||||
timer.Reset(time.Duration(cfg.Options().AutoUpgradeIntervalH) * time.Hour)
|
||||
timer.Reset(checkInterval)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -1110,7 +1248,7 @@ func autoUpgrade(cfg *config.Wrapper) {
|
||||
err = upgrade.To(rel)
|
||||
if err != nil {
|
||||
l.Warnln("Automatic upgrade:", err)
|
||||
timer.Reset(time.Duration(cfg.Options().AutoUpgradeIntervalH) * time.Hour)
|
||||
timer.Reset(checkInterval)
|
||||
continue
|
||||
}
|
||||
events.Default.Unsubscribe(sub)
|
||||
@@ -1125,13 +1263,16 @@ func autoUpgrade(cfg *config.Wrapper) {
|
||||
// suitable time after they have gone out of fashion.
|
||||
func cleanConfigDirectory() {
|
||||
patterns := map[string]time.Duration{
|
||||
"panic-*.log": 7 * 24 * time.Hour, // keep panic logs for a week
|
||||
"audit-*.log": 7 * 24 * time.Hour, // keep audit logs for a week
|
||||
"index": 14 * 24 * time.Hour, // keep old index format for two weeks
|
||||
"index*.converted": 14 * 24 * time.Hour, // keep old converted indexes for two weeks
|
||||
"config.xml.v*": 30 * 24 * time.Hour, // old config versions for a month
|
||||
"*.idx.gz": 30 * 24 * time.Hour, // these should for sure no longer exist
|
||||
"backup-of-v0.8": 30 * 24 * time.Hour, // these neither
|
||||
"panic-*.log": 7 * 24 * time.Hour, // keep panic logs for a week
|
||||
"audit-*.log": 7 * 24 * time.Hour, // keep audit logs for a week
|
||||
"index": 14 * 24 * time.Hour, // keep old index format for two weeks
|
||||
"index-v0.11.0.db": 14 * 24 * time.Hour, // keep old index format for two weeks
|
||||
"index-v0.13.0.db": 14 * 24 * time.Hour, // keep old index format for two weeks
|
||||
"index*.converted": 14 * 24 * time.Hour, // keep old converted indexes for two weeks
|
||||
"config.xml.v*": 30 * 24 * time.Hour, // old config versions for a month
|
||||
"*.idx.gz": 30 * 24 * time.Hour, // these should for sure no longer exist
|
||||
"backup-of-v0.8": 30 * 24 * time.Hour, // these neither
|
||||
"tmp-index-sorter.*": time.Minute, // these should never exist on startup
|
||||
}
|
||||
|
||||
for pat, dur := range patterns {
|
||||
@@ -1184,3 +1325,16 @@ func showPaths() {
|
||||
fmt.Printf("GUI override directory:\n\t%s\n\n", locations[locGUIAssets])
|
||||
fmt.Printf("Default sync folder directory:\n\t%s\n\n", locations[locDefFolder])
|
||||
}
|
||||
|
||||
func setPauseState(cfg *config.Wrapper, paused bool) {
|
||||
raw := cfg.RawCopy()
|
||||
for i := range raw.Devices {
|
||||
raw.Devices[i].Paused = paused
|
||||
}
|
||||
for i := range raw.Folders {
|
||||
raw.Folders[i].Paused = paused
|
||||
}
|
||||
if err := cfg.Replace(raw); err != nil {
|
||||
l.Fatalln("Cannot adjust paused state:", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,156 +2,17 @@
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
"github.com/syncthing/syncthing/lib/model"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
func TestFolderErrors(t *testing.T) {
|
||||
// This test intentionally avoids starting the folders. If they are
|
||||
// started, they will perform an initial scan, which will create missing
|
||||
// folder markers and race with the stuff we do in the test.
|
||||
|
||||
fcfg := config.FolderConfiguration{
|
||||
ID: "folder",
|
||||
RawPath: "testdata/testfolder",
|
||||
}
|
||||
cfg := config.Wrap("/tmp/test", config.Configuration{
|
||||
Folders: []config.FolderConfiguration{fcfg},
|
||||
})
|
||||
|
||||
for _, file := range []string{".stfolder", "testfolder/.stfolder", "testfolder"} {
|
||||
if err := os.Remove("testdata/" + file); err != nil && !os.IsNotExist(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
ldb := db.OpenMemory()
|
||||
|
||||
// Case 1 - new folder, directory and marker created
|
||||
|
||||
m := model.NewModel(cfg, protocol.LocalDeviceID, "device", "syncthing", "dev", ldb, nil)
|
||||
m.AddFolder(fcfg)
|
||||
|
||||
if err := m.CheckFolderHealth("folder"); err != nil {
|
||||
t.Error("Unexpected error", cfg.Folders()["folder"].Invalid)
|
||||
}
|
||||
|
||||
s, err := os.Stat("testdata/testfolder")
|
||||
if err != nil || !s.IsDir() {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
_, err = os.Stat("testdata/testfolder/.stfolder")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if err := os.Remove("testdata/testfolder/.stfolder"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.Remove("testdata/testfolder/"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Case 2 - new folder, marker created
|
||||
|
||||
fcfg.RawPath = "testdata/"
|
||||
cfg = config.Wrap("/tmp/test", config.Configuration{
|
||||
Folders: []config.FolderConfiguration{fcfg},
|
||||
})
|
||||
|
||||
m = model.NewModel(cfg, protocol.LocalDeviceID, "device", "syncthing", "dev", ldb, nil)
|
||||
m.AddFolder(fcfg)
|
||||
|
||||
if err := m.CheckFolderHealth("folder"); err != nil {
|
||||
t.Error("Unexpected error", cfg.Folders()["folder"].Invalid)
|
||||
}
|
||||
|
||||
_, err = os.Stat("testdata/.stfolder")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if err := os.Remove("testdata/.stfolder"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Case 3 - Folder marker missing
|
||||
|
||||
set := db.NewFileSet("folder", ldb)
|
||||
set.Update(protocol.LocalDeviceID, []protocol.FileInfo{
|
||||
{Name: "dummyfile"},
|
||||
})
|
||||
|
||||
m = model.NewModel(cfg, protocol.LocalDeviceID, "device", "syncthing", "dev", ldb, nil)
|
||||
m.AddFolder(fcfg)
|
||||
|
||||
if err := m.CheckFolderHealth("folder"); err == nil || err.Error() != "folder marker missing" {
|
||||
t.Error("Incorrect error: Folder marker missing !=", m.CheckFolderHealth("folder"))
|
||||
}
|
||||
|
||||
// Case 3.1 - recover after folder marker missing
|
||||
|
||||
if err = fcfg.CreateMarker(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if err := m.CheckFolderHealth("folder"); err != nil {
|
||||
t.Error("Unexpected error", cfg.Folders()["folder"].Invalid)
|
||||
}
|
||||
|
||||
// Case 4 - Folder path missing
|
||||
|
||||
if err := os.Remove("testdata/testfolder/.stfolder"); err != nil && !os.IsNotExist(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.Remove("testdata/testfolder"); err != nil && !os.IsNotExist(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fcfg.RawPath = "testdata/testfolder"
|
||||
cfg = config.Wrap("testdata/subfolder", config.Configuration{
|
||||
Folders: []config.FolderConfiguration{fcfg},
|
||||
})
|
||||
|
||||
m = model.NewModel(cfg, protocol.LocalDeviceID, "device", "syncthing", "dev", ldb, nil)
|
||||
m.AddFolder(fcfg)
|
||||
|
||||
if err := m.CheckFolderHealth("folder"); err == nil || err.Error() != "folder path missing" {
|
||||
t.Error("Incorrect error: Folder path missing !=", m.CheckFolderHealth("folder"))
|
||||
}
|
||||
|
||||
// Case 4.1 - recover after folder path missing
|
||||
|
||||
if err := os.Mkdir("testdata/testfolder", 0700); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := m.CheckFolderHealth("folder"); err == nil || err.Error() != "folder marker missing" {
|
||||
t.Error("Incorrect error: Folder marker missing !=", m.CheckFolderHealth("folder"))
|
||||
}
|
||||
|
||||
// Case 4.2 - recover after missing marker
|
||||
|
||||
if err = fcfg.CreateMarker(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if err := m.CheckFolderHealth("folder"); err != nil {
|
||||
t.Error("Unexpected error", cfg.Folders()["folder"].Invalid)
|
||||
}
|
||||
}
|
||||
|
||||
func TestShortIDCheck(t *testing.T) {
|
||||
cfg := config.Wrap("/tmp/test", config.Configuration{
|
||||
Devices: []config.DeviceConfiguration{
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
// +build solaris
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
// +build freebsd openbsd dragonfly
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
@@ -20,9 +20,8 @@ var (
|
||||
func memorySize() (int64, error) {
|
||||
var memoryStatusEx [64]byte
|
||||
binary.LittleEndian.PutUint32(memoryStatusEx[:], 64)
|
||||
p := uintptr(unsafe.Pointer(&memoryStatusEx[0]))
|
||||
|
||||
ret, _, callErr := syscall.Syscall(uintptr(globalMemoryStatusEx), 1, p, 0, 0)
|
||||
ret, _, callErr := syscall.Syscall(uintptr(globalMemoryStatusEx), 1, uintptr(unsafe.Pointer(&memoryStatusEx[0])), 0, 0)
|
||||
if ret == 0 {
|
||||
return 0, callErr
|
||||
}
|
||||
|
||||