Compare commits
959 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ec86db176e | ||
|
|
60e8630413 | ||
|
|
9d29dbbe5d | ||
|
|
54e27f551d | ||
|
|
59bdcdabba | ||
|
|
f6375ecbfc | ||
|
|
031b91c0ed | ||
|
|
e4c995a321 | ||
|
|
130d14cec9 | ||
|
|
e893ca1c9a | ||
|
|
156d96e582 | ||
|
|
9ba7611537 | ||
|
|
15d2dc3a4f | ||
|
|
f6df1a760d | ||
|
|
f71fcd440a | ||
|
|
c2bb11a794 | ||
|
|
1a00ea7c6e | ||
|
|
ec0a66c75b | ||
|
|
1658afc883 | ||
|
|
67aaeef537 | ||
|
|
13679284ac | ||
|
|
a514b65d81 | ||
|
|
7931af1078 | ||
|
|
4fc3446f24 | ||
|
|
fb4fdaf4c0 | ||
|
|
8e38ecdeb2 | ||
|
|
04623718ce | ||
|
|
9e857ed2d4 | ||
|
|
f30f9c50f8 | ||
|
|
96ba5c2b23 | ||
|
|
0dcd9794d4 | ||
|
|
34f0feb13a | ||
|
|
7778f50b50 | ||
|
|
fb2d85b9d5 | ||
|
|
7f0d4f6ba8 | ||
|
|
33212716cf | ||
|
|
707001c403 | ||
|
|
f93c6fbe4a | ||
|
|
70b9654671 | ||
|
|
0d7a77ba85 | ||
|
|
924b96856f | ||
|
|
f7929229c8 | ||
|
|
6b25eb2e79 | ||
|
|
bc08a951f1 | ||
|
|
a87c5515bd | ||
|
|
ebcd22b02b | ||
|
|
4b02b7e6f1 | ||
|
|
fdd823d2cb | ||
|
|
8ef504f745 | ||
|
|
960e850a78 | ||
|
|
ea701a4e9e | ||
|
|
6c573a5762 | ||
|
|
3ac858b150 | ||
|
|
f4372710bf | ||
|
|
f39477bbd5 | ||
|
|
6e5514419d | ||
|
|
f014b7b919 | ||
|
|
81484699db | ||
|
|
6d93d9c488 | ||
|
|
0930bccf88 | ||
|
|
e321bd3941 | ||
|
|
4b02937862 | ||
|
|
21e6849f2d | ||
|
|
39c2d1bc1a | ||
|
|
cd21b8dfa5 | ||
|
|
40fbdc87ce | ||
|
|
1814f4693d | ||
|
|
3f2b584c4e | ||
|
|
e0dd737822 | ||
|
|
d2d4fcc1df | ||
|
|
273ee09925 | ||
|
|
bb886868d2 | ||
|
|
f80ee472c2 | ||
|
|
a12ede3bbe | ||
|
|
97a8777d03 | ||
|
|
8a4c00d82e | ||
|
|
31f859e909 | ||
|
|
4d979a1ce9 | ||
|
|
4465cdf8bc | ||
|
|
3938b61c3f | ||
|
|
cdef503db6 | ||
|
|
df08984a58 | ||
|
|
cf838c71f7 | ||
|
|
9a001051d6 | ||
|
|
5548a8eb7a | ||
|
|
727df34aa1 | ||
|
|
9587a523b3 | ||
|
|
22e44642a0 | ||
|
|
c00520281b | ||
|
|
587c89d979 | ||
|
|
310fba4c12 | ||
|
|
c1d06d9501 | ||
|
|
4735575e8d | ||
|
|
767e1c6f58 | ||
|
|
83fcb49894 | ||
|
|
5fd88481b6 | ||
|
|
69c63e94bf | ||
|
|
3d91f7c975 | ||
|
|
7945430e64 | ||
|
|
e2120c4728 | ||
|
|
56b5352f64 | ||
|
|
55d5e03639 | ||
|
|
cca17f5306 | ||
|
|
ffcaffa32f | ||
|
|
0ffd80f380 | ||
|
|
25151b14e7 | ||
|
|
428c5c02ce | ||
|
|
e2e5643c3c | ||
|
|
956d212e99 | ||
|
|
fff8805ff6 | ||
|
|
a69afc9eeb | ||
|
|
0bf9645f2f | ||
|
|
6bfad8fce8 | ||
|
|
6ebab5db07 | ||
|
|
34aa89a7d6 | ||
|
|
44e4f754dd | ||
|
|
60c218efdb | ||
|
|
8d290eb055 | ||
|
|
cd17aa2cab | ||
|
|
edaff81c0a | ||
|
|
dd7d6188f2 | ||
|
|
9be6f1a70e | ||
|
|
b0cce98648 | ||
|
|
e159f76ced | ||
|
|
07d3af21c2 | ||
|
|
1ed0116147 | ||
|
|
c5663689a3 | ||
|
|
9caaaa49b6 | ||
|
|
c18dc6a629 | ||
|
|
b1fbd87680 | ||
|
|
be630691b1 | ||
|
|
aa1c274231 | ||
|
|
78c2844e3f | ||
|
|
57a7f4391f | ||
|
|
0970aed596 | ||
|
|
342ade501a | ||
|
|
35316e9142 | ||
|
|
2a0775da03 | ||
|
|
8bd67f72dd | ||
|
|
82f190285a | ||
|
|
f5590c3345 | ||
|
|
9be07de7c6 | ||
|
|
327604719a | ||
|
|
e8ef586cef | ||
|
|
c014fc5ec5 | ||
|
|
fb078068b4 | ||
|
|
b2c9e7b07b | ||
|
|
d117b4b570 | ||
|
|
80fc238bec | ||
|
|
7e4e2f3720 | ||
|
|
5e9c7bc022 | ||
|
|
55afa625fc | ||
|
|
f2e9b40ad1 | ||
|
|
6697b8fde3 | ||
|
|
4f20c900d0 | ||
|
|
0471daf771 | ||
|
|
5788ad2529 | ||
|
|
472affb6a3 | ||
|
|
802b933778 | ||
|
|
f1ec7fe55b | ||
|
|
d842197931 | ||
|
|
4e7510dea9 | ||
|
|
c0f353c0e8 | ||
|
|
e95d005c21 | ||
|
|
11e9d575c8 | ||
|
|
46bbc78e82 | ||
|
|
50a621bc7b | ||
|
|
d0c84916c7 | ||
|
|
6db8dc33f2 | ||
|
|
194501c958 | ||
|
|
27a34609a1 | ||
|
|
ffc14a77c6 | ||
|
|
31119ed61a | ||
|
|
070bf3b776 | ||
|
|
ade8d79d42 | ||
|
|
42917d707d | ||
|
|
1522cf74bc | ||
|
|
3b7a57d108 | ||
|
|
a7d9268e4d | ||
|
|
052dc13487 | ||
|
|
249bcb3a01 | ||
|
|
fbe52faf49 | ||
|
|
8b86171642 | ||
|
|
e19d6e993d | ||
|
|
ef0473c091 | ||
|
|
3406a3ba95 | ||
|
|
7c1ed420a9 | ||
|
|
d7e86af6c6 | ||
|
|
76cf326290 | ||
|
|
6c3e187d1d | ||
|
|
e32a516b5f | ||
|
|
6da83ac9f5 | ||
|
|
adc07eddf6 | ||
|
|
9c88efd55f | ||
|
|
ffcb57580f | ||
|
|
abfbd13f17 | ||
|
|
f63cdbfcfa | ||
|
|
b2d82da20d | ||
|
|
70fddb6523 | ||
|
|
83cfd308b4 | ||
|
|
36acaf5e21 | ||
|
|
572ccfe3e2 | ||
|
|
253049a4cc | ||
|
|
8f199e12b3 | ||
|
|
f6fac3e949 | ||
|
|
0b193b76c2 | ||
|
|
e6f0ed65be | ||
|
|
b13b15758d | ||
|
|
c48eb4241a | ||
|
|
0f8290485e | ||
|
|
3d1edd2492 | ||
|
|
a73db6fd84 | ||
|
|
a05dc6cc47 | ||
|
|
5440d1dc3b | ||
|
|
f13e6ca631 | ||
|
|
81553b4da7 | ||
|
|
07618f8674 | ||
|
|
1555a4da7f | ||
|
|
a20a5f61f0 | ||
|
|
4bcc38cf63 | ||
|
|
05f25e600e | ||
|
|
a744dee94c | ||
|
|
78bd0341a8 | ||
|
|
b5de49917c | ||
|
|
c845e245a1 | ||
|
|
4a787986cd | ||
|
|
78a41828fc | ||
|
|
bd0c9913cf | ||
|
|
d904dfa191 | ||
|
|
fa40ccece1 | ||
|
|
7919310dc6 | ||
|
|
7669af578a | ||
|
|
739e99c4d9 | ||
|
|
7502997e7e | ||
|
|
4470cd5aaa | ||
|
|
466e8a5cd0 | ||
|
|
4142a431b5 | ||
|
|
5565afdd9f | ||
|
|
0db3b7a530 | ||
|
|
b37ecc3cf4 | ||
|
|
ec5a5d5218 | ||
|
|
7980c8cea2 | ||
|
|
e9b68a224c | ||
|
|
8fd6b1d428 | ||
|
|
4198b5061f | ||
|
|
b0a525a504 | ||
|
|
25d904dc37 | ||
|
|
c1b452df93 | ||
|
|
28bc8b6153 | ||
|
|
cccbb0bd5e | ||
|
|
240ae0c14f | ||
|
|
9a9b7002cd | ||
|
|
b16cc72fc7 | ||
|
|
ab53687c38 | ||
|
|
bbb22c8c80 | ||
|
|
a9764fc16c | ||
|
|
af13f0cd35 | ||
|
|
e1b958284e | ||
|
|
bf7d03d029 | ||
|
|
3169212046 | ||
|
|
54b50e3d52 | ||
|
|
c6f2ec9400 | ||
|
|
e6595c1ab9 | ||
|
|
6c7e8c08db | ||
|
|
6864f7c9d0 | ||
|
|
e5c1948b94 | ||
|
|
100ef10d84 | ||
|
|
5d2c83a7e9 | ||
|
|
8e5c844370 | ||
|
|
8ebd893349 | ||
|
|
cc4071d0ba | ||
|
|
b253022a96 | ||
|
|
db1f20603a | ||
|
|
24af89c8e2 | ||
|
|
641b7aee38 | ||
|
|
b43eccf2fe | ||
|
|
7f63afcb99 | ||
|
|
dcddd9c1e4 | ||
|
|
e11b309379 | ||
|
|
53fd54e308 | ||
|
|
2d3a535ced | ||
|
|
9524b51708 | ||
|
|
e8fc465ea8 | ||
|
|
1491898fd4 | ||
|
|
cca73de6a1 | ||
|
|
5e669e0ae1 | ||
|
|
35c813f56f | ||
|
|
1f81940a1f | ||
|
|
b8051fb37e | ||
|
|
1d3b9876f6 | ||
|
|
2f6a25a56f | ||
|
|
846b265430 | ||
|
|
31559e908b | ||
|
|
4db5ea5893 | ||
|
|
54643e86b5 | ||
|
|
326111d10f | ||
|
|
0fb7cc186c | ||
|
|
1f1729ba43 | ||
|
|
d4ce0dfd84 | ||
|
|
cc9ea9db89 | ||
|
|
a08a1b6998 | ||
|
|
33185fdeb5 | ||
|
|
d0ccea0404 | ||
|
|
c206fbdc58 | ||
|
|
2b9cef3ae5 | ||
|
|
a38b370c8d | ||
|
|
942b8ebb27 | ||
|
|
7892547873 | ||
|
|
5b9280c50f | ||
|
|
4d1bcd718c | ||
|
|
7dc0c6ab43 | ||
|
|
9d1ee2f7e0 | ||
|
|
4a616f3cb2 | ||
|
|
deafe4ca53 | ||
|
|
bc012d750d | ||
|
|
c7d40ccbae | ||
|
|
0d90ae26ac | ||
|
|
9189c79d74 | ||
|
|
a20d85d451 | ||
|
|
f0f60ba2e7 | ||
|
|
1c2be84e4e | ||
|
|
2ba3be5e4d | ||
|
|
5c91723ef2 | ||
|
|
a17a8cd48b | ||
|
|
27c91c57d5 | ||
|
|
86b040f595 | ||
|
|
1a8c10a8d0 | ||
|
|
01a7ef3b0f | ||
|
|
23c935b05a | ||
|
|
8e8e30dc7b | ||
|
|
d828adb648 | ||
|
|
9d09fd6af3 | ||
|
|
b1db328931 | ||
|
|
14ae330eff | ||
|
|
977ee4f06b | ||
|
|
42de53c6c9 | ||
|
|
48da6f0f22 | ||
|
|
a20c6ca868 | ||
|
|
e027175446 | ||
|
|
7774932302 | ||
|
|
3fe331c2d0 | ||
|
|
1b1d38183d | ||
|
|
fb3281b647 | ||
|
|
e6b1f67ecf | ||
|
|
9e0b924d57 | ||
|
|
df99237a7f | ||
|
|
8452fd2ab4 | ||
|
|
c390565eef | ||
|
|
02744cd73f | ||
|
|
bd622b8edd | ||
|
|
67761d8795 | ||
|
|
9f8b01b1b9 | ||
|
|
d2e3295767 | ||
|
|
aa65c6f249 | ||
|
|
8210466b03 | ||
|
|
3e24d82513 | ||
|
|
08bebbe59b | ||
|
|
c5c23ed10f | ||
|
|
286698ccb1 | ||
|
|
56d48d341f | ||
|
|
780fb3bac1 | ||
|
|
e3cd9219b8 | ||
|
|
44658258d2 | ||
|
|
b628ec5054 | ||
|
|
ecc24428ac | ||
|
|
674fca3868 | ||
|
|
e19728d8cc | ||
|
|
415adfbae6 | ||
|
|
3dd13c3994 | ||
|
|
52d1681fe6 | ||
|
|
7b821d2550 | ||
|
|
0e3e0a7c9e | ||
|
|
71bfad0bc6 | ||
|
|
b7986801cd | ||
|
|
7e738118df | ||
|
|
6365e6108f | ||
|
|
e3e46ec1de | ||
|
|
0d3db38b2f | ||
|
|
540518a7b7 | ||
|
|
44bf8cfd27 | ||
|
|
563cea0dbe | ||
|
|
55fddacdc2 | ||
|
|
3943873626 | ||
|
|
15ce96f704 | ||
|
|
3ffbe45a6d | ||
|
|
3393db1f69 | ||
|
|
145d87ce70 | ||
|
|
06ac631351 | ||
|
|
1fc2dbdeeb | ||
|
|
3f0eba388c | ||
|
|
0941ce76b7 | ||
|
|
c74df2d588 | ||
|
|
0ca737b528 | ||
|
|
b19b5c95d3 | ||
|
|
d507d932b8 | ||
|
|
5b953033c7 | ||
|
|
dfc3525cf7 | ||
|
|
675b535ead | ||
|
|
a65b46debd | ||
|
|
b628460930 | ||
|
|
cc1f6e4d4a | ||
|
|
f79e980fdf | ||
|
|
88599bc154 | ||
|
|
fc2c46e82f | ||
|
|
ce4d149bf5 | ||
|
|
cbbc262161 | ||
|
|
96e35aa7f5 | ||
|
|
086d1f8f6a | ||
|
|
758a1a6a37 | ||
|
|
b1bfa9aece | ||
|
|
d0c6c18b4f | ||
|
|
2286a6ebef | ||
|
|
bf9ff17267 | ||
|
|
8f5215878b | ||
|
|
9c2117f08e | ||
|
|
fa9bc08afb | ||
|
|
fd8bea6179 | ||
|
|
f2f1a28206 | ||
|
|
89946b21be | ||
|
|
872fbf6e32 | ||
|
|
b2e7ecdbf0 | ||
|
|
00008994e4 | ||
|
|
e9bb17307d | ||
|
|
ff84f075d5 | ||
|
|
5e1f39b6f6 | ||
|
|
96e197e502 | ||
|
|
bbf25e2d02 | ||
|
|
5c9df60699 | ||
|
|
a85bc1c3a6 | ||
|
|
850dd4cd25 | ||
|
|
e46c8ab9ee | ||
|
|
2dc2aa5d21 | ||
|
|
d53a2567a4 | ||
|
|
6f4671ed27 | ||
|
|
b70dbfa0f7 | ||
|
|
424d1b1608 | ||
|
|
1b9e5c0937 | ||
|
|
26b188dc0e | ||
|
|
4faa5882f2 | ||
|
|
62cff26edf | ||
|
|
932d8c69de | ||
|
|
21dd9d6b43 | ||
|
|
08e0f938a9 | ||
|
|
ebead944b5 | ||
|
|
d91d77a2b2 | ||
|
|
55147f5901 | ||
|
|
851ee51c1b | ||
|
|
29cd156e71 | ||
|
|
aaecf813e2 | ||
|
|
cd8e1ec738 | ||
|
|
aedc2d788f | ||
|
|
245b4b98c4 | ||
|
|
74ee83cc86 | ||
|
|
16f4921c50 | ||
|
|
91922b6dc8 | ||
|
|
d57694dc04 | ||
|
|
fc1dac5196 | ||
|
|
9f92f8c609 | ||
|
|
bbda58a29f | ||
|
|
3d339cc8d3 | ||
|
|
8cbdf1e1de | ||
|
|
ff7873fcc1 | ||
|
|
8d67235a75 | ||
|
|
493de9392a | ||
|
|
0c61c66511 | ||
|
|
baf21a8fa2 | ||
|
|
ebbe1abe28 | ||
|
|
9d4a700829 | ||
|
|
d7fc7008af | ||
|
|
9c45ac381c | ||
|
|
e0c8865a45 | ||
|
|
4f06708330 | ||
|
|
80ada1bb6c | ||
|
|
0648fb0626 | ||
|
|
90e248615f | ||
|
|
57f47bcf83 | ||
|
|
bb76311ec6 | ||
|
|
78d294f78c | ||
|
|
689cf2a5ee | ||
|
|
72f954dcab | ||
|
|
adace320a0 | ||
|
|
96d3117759 | ||
|
|
85794933d3 | ||
|
|
4881a6336f | ||
|
|
5fb3992275 | ||
|
|
b62b7d269e | ||
|
|
cbaef624cf | ||
|
|
aee4b10d3a | ||
|
|
dc145bfad7 | ||
|
|
d985aa9e4b | ||
|
|
3d75819cdb | ||
|
|
705710b1a1 | ||
|
|
f66e57947b | ||
|
|
3fcf22ed5d | ||
|
|
2716898cb9 | ||
|
|
deaccc7f8d | ||
|
|
22f0077262 | ||
|
|
946170f3fc | ||
|
|
8cf9d91ed4 | ||
|
|
4812fd3ec1 | ||
|
|
273cc9cef8 | ||
|
|
cbe0d2fffc | ||
|
|
a218a69530 | ||
|
|
3be9f68b51 | ||
|
|
6976219d6d | ||
|
|
dbacef35c4 | ||
|
|
f310bbaaac | ||
|
|
df83b84aa1 | ||
|
|
a47546a1f1 | ||
|
|
f619a7f4cc | ||
|
|
d09c8f0d0a | ||
|
|
ec718e729e | ||
|
|
5be13f62a2 | ||
|
|
36e6f8b082 | ||
|
|
fdf5a5c0d7 | ||
|
|
091cb5e611 | ||
|
|
b6b6caeab5 | ||
|
|
53af64a2a4 | ||
|
|
b6a84ecb0c | ||
|
|
5f40879a75 | ||
|
|
0b65a616ba | ||
|
|
6b4fe5c063 | ||
|
|
3065b127b5 | ||
|
|
74ea9c5f67 | ||
|
|
46536509d7 | ||
|
|
607bcc0b0e | ||
|
|
1950efb790 | ||
|
|
1b77ab2b52 | ||
|
|
4f367e4376 | ||
|
|
98418c9b5c | ||
|
|
3e4a90a2ba | ||
|
|
fac4dec840 | ||
|
|
79bf1f1056 | ||
|
|
9ef17322be | ||
|
|
c80e0bfc28 | ||
|
|
28d5c84599 | ||
|
|
d7c3d81dfb | ||
|
|
b033c36b31 | ||
|
|
bfc9478965 | ||
|
|
d9cb7e2739 | ||
|
|
1f8e6c55f6 | ||
|
|
1eea076f5c | ||
|
|
94beed5c10 | ||
|
|
ed6bfc5417 | ||
|
|
04ff890263 | ||
|
|
9c0825c0d9 | ||
|
|
f78133b8e9 | ||
|
|
b784f5b9e3 | ||
|
|
1c089a4d11 | ||
|
|
baa38eea7a | ||
|
|
c3b5eba205 | ||
|
|
cf75329067 | ||
|
|
8343db6766 | ||
|
|
8c74177699 | ||
|
|
9a5f7fbadf | ||
|
|
8f344d0915 | ||
|
|
77dd874383 | ||
|
|
5b34c31cb3 | ||
|
|
668979605b | ||
|
|
5ffa012410 | ||
|
|
5c54d879a1 | ||
|
|
1c5af3a4bd | ||
|
|
22c222bf75 | ||
|
|
651ee2ce74 | ||
|
|
5c9dc4c883 | ||
|
|
258341f8bf | ||
|
|
f5ca213682 | ||
|
|
a1c5b44c74 | ||
|
|
de9489585f | ||
|
|
438f687591 | ||
|
|
7a8cc5fc99 | ||
|
|
f05ccd775a | ||
|
|
e5cc55ce09 | ||
|
|
299b9d8883 | ||
|
|
074097a8e7 | ||
|
|
ee445e35a0 | ||
|
|
3ad049184e | ||
|
|
6768daec07 | ||
|
|
974551375e | ||
|
|
531ceb2b0f | ||
|
|
ba4462a70b | ||
|
|
8419c05794 | ||
|
|
c84f60f949 | ||
|
|
6201eebc98 | ||
|
|
decb967969 | ||
|
|
aac3750298 | ||
|
|
a94951becd | ||
|
|
7dc290c3ed | ||
|
|
50faa8f7ef | ||
|
|
5c87ceb392 | ||
|
|
6ffc8255b6 | ||
|
|
3354e60461 | ||
|
|
06365e5635 | ||
|
|
da99203dcd | ||
|
|
57ea8a1bf5 | ||
|
|
f72832d591 | ||
|
|
c0c18a568c | ||
|
|
b0b3abf76b | ||
|
|
c20ed80dc4 | ||
|
|
e2febf246e | ||
|
|
2cdeb1bf70 | ||
|
|
876609a0f0 | ||
|
|
744ef0d8ac | ||
|
|
8d6fb86ee0 | ||
|
|
13c3dac89c | ||
|
|
b5fc332782 | ||
|
|
4a8a8b294d | ||
|
|
92905d30e8 | ||
|
|
914eb77ca4 | ||
|
|
f560e8c850 | ||
|
|
2e3975e956 | ||
|
|
bd0c2bf237 | ||
|
|
f86deedd9c | ||
|
|
782bd08aad | ||
|
|
22242d51be | ||
|
|
ac7338f1f2 | ||
|
|
bdb25f9ba5 | ||
|
|
0e2a07d71a | ||
|
|
5e1cd0e71a | ||
|
|
5224f07ac8 | ||
|
|
d9664a946d | ||
|
|
8c61e0d6ab | ||
|
|
6c73617974 | ||
|
|
037934ec74 | ||
|
|
78a741d0be | ||
|
|
ebad9e2073 | ||
|
|
d68fa84055 | ||
|
|
d3ed4de4ed | ||
|
|
6bbd24de12 | ||
|
|
c63ca4f563 | ||
|
|
0e5ba3ca05 | ||
|
|
44b0f0b456 | ||
|
|
4aa2199d5b | ||
|
|
49798552f2 | ||
|
|
777a30e870 | ||
|
|
aea66ff25a | ||
|
|
171b8139ab | ||
|
|
7fa699e159 | ||
|
|
5373e38ac8 | ||
|
|
41ef945b2b | ||
|
|
9dd319f4da | ||
|
|
8b4a1f52d0 | ||
|
|
7a9f317ee1 | ||
|
|
48efea99e5 | ||
|
|
fbef856a97 | ||
|
|
3b7c760ffd | ||
|
|
ae776ff8df | ||
|
|
24a637e9e6 | ||
|
|
f53475a204 | ||
|
|
81ff31b8fc | ||
|
|
82fbcb96f8 | ||
|
|
3fbc51cd38 | ||
|
|
37ede49077 | ||
|
|
0ba3abdee4 | ||
|
|
ab92f8520c | ||
|
|
0e67c036bb | ||
|
|
5f1aba9a37 | ||
|
|
95f7a26bce | ||
|
|
6e1828ff63 | ||
|
|
89c53c508b | ||
|
|
9918cb4ffc | ||
|
|
7b61f800c3 | ||
|
|
33d47063e8 | ||
|
|
e67b91977d | ||
|
|
ac603e9228 | ||
|
|
632b6f8fbd | ||
|
|
b247ef2632 | ||
|
|
046bbdfbd4 | ||
|
|
c6c74e8291 | ||
|
|
59b1b0e1dc | ||
|
|
4b17c511f9 | ||
|
|
0f532a5607 | ||
|
|
df318ed370 | ||
|
|
0275cbd66a | ||
|
|
670a9809fa | ||
|
|
07ce3572a0 | ||
|
|
b64052bc26 | ||
|
|
7956e7d0ef | ||
|
|
d400e51422 | ||
|
|
5a6d79a66e | ||
|
|
674a99e9ae | ||
|
|
88cabb9e0a | ||
|
|
1e33cc9720 | ||
|
|
ee465c0890 | ||
|
|
236816cb93 | ||
|
|
3ee9bc097f | ||
|
|
f205ce14c1 | ||
|
|
8199e0a7a9 | ||
|
|
b7ba401c0b | ||
|
|
7505ea79a0 | ||
|
|
e1324a0e23 | ||
|
|
b7b9476e5a | ||
|
|
d1db7e3dd2 | ||
|
|
362da59396 | ||
|
|
66262392c3 | ||
|
|
48f9d323fa | ||
|
|
f18fc40436 | ||
|
|
f509c65509 | ||
|
|
bf062db83f | ||
|
|
9907523321 | ||
|
|
915faabe25 | ||
|
|
a9b6801b22 | ||
|
|
d76f1fe356 | ||
|
|
3bdceb1d6b | ||
|
|
f69c0b550c | ||
|
|
32245435e2 | ||
|
|
2658369051 | ||
|
|
d50adb225b | ||
|
|
7da898f2d6 | ||
|
|
0d919bd79c | ||
|
|
f91e90a94f | ||
|
|
7ce20f197b | ||
|
|
cce8b60515 | ||
|
|
06eacb87d8 | ||
|
|
865f1f2ea6 | ||
|
|
f59a26cb80 | ||
|
|
d384ac52a1 | ||
|
|
42fa03aa4a | ||
|
|
dae1f990a5 | ||
|
|
33398b0b6b | ||
|
|
192e843989 | ||
|
|
2f26a95973 | ||
|
|
123941cf62 | ||
|
|
9c67d57c28 | ||
|
|
5b3466dc6e | ||
|
|
bca6854c03 | ||
|
|
c930b2e9e2 | ||
|
|
8851f4571c | ||
|
|
7fed8334b9 | ||
|
|
6af2657d7e | ||
|
|
933a57af7a | ||
|
|
e5b85ff1a0 | ||
|
|
d7a257b391 | ||
|
|
7709ac33a7 | ||
|
|
c382aa04e4 | ||
|
|
86141c1eef | ||
|
|
d67c9b66d3 | ||
|
|
6158b20a8c | ||
|
|
485a234263 | ||
|
|
7226a87c6d | ||
|
|
7aca2bcbc0 | ||
|
|
3840d57f8d | ||
|
|
b7f3425f36 | ||
|
|
9fe0c30299 | ||
|
|
9d126760fa | ||
|
|
09673ba0c6 | ||
|
|
bc7d71ee3d | ||
|
|
78a31449aa | ||
|
|
0361d303f2 | ||
|
|
2fe94736e6 | ||
|
|
9589f25e57 | ||
|
|
15c2556e06 | ||
|
|
f342a2a54b | ||
|
|
598dc991e8 | ||
|
|
7d59dc6c18 | ||
|
|
05f01a5d94 | ||
|
|
03c6fb2f82 | ||
|
|
7a657bf3c7 | ||
|
|
02d14c7e9b | ||
|
|
32c849e18c | ||
|
|
c27fe5694d | ||
|
|
b0ebf56283 | ||
|
|
fb33a8ad6b | ||
|
|
9207562545 | ||
|
|
230eb20a34 | ||
|
|
e0d9542bfe | ||
|
|
063951cffa | ||
|
|
dae0872379 | ||
|
|
123f4a6d9d | ||
|
|
65679892b0 | ||
|
|
c4979cf6d6 | ||
|
|
09d498bb80 | ||
|
|
90ed7aa121 | ||
|
|
206c0d0933 | ||
|
|
81c539c516 | ||
|
|
75b3b31d11 | ||
|
|
90c39de0cc | ||
|
|
241864b43c | ||
|
|
b2d839d4e7 | ||
|
|
8bdf409d07 | ||
|
|
08da982de5 | ||
|
|
f250425ef1 | ||
|
|
aebcc495f9 | ||
|
|
eaffbc0f92 | ||
|
|
37f2f2cdf6 | ||
|
|
82f5706350 | ||
|
|
1e2379df1b | ||
|
|
ea5c9176e1 | ||
|
|
cc1b003f21 | ||
|
|
38bd90e6f2 | ||
|
|
1c47fae206 | ||
|
|
79a758be3c | ||
|
|
c7cf3ef899 | ||
|
|
2c2e6cd0d5 | ||
|
|
b7dffc051e | ||
|
|
963e9a4071 | ||
|
|
b28899ac07 | ||
|
|
83f6da8dca | ||
|
|
1a29296d9d | ||
|
|
a7de4c68e3 | ||
|
|
7f23de4f03 | ||
|
|
ca89f12be6 | ||
|
|
ddfa82e990 | ||
|
|
61302c467c | ||
|
|
d6b4873eed | ||
|
|
1ea98a16b1 | ||
|
|
e2f3500df9 | ||
|
|
8b025af1e5 | ||
|
|
f1b253fc00 | ||
|
|
c4abe6f815 | ||
|
|
4c5e9cf921 | ||
|
|
b33d5e57c6 | ||
|
|
0060840249 | ||
|
|
71faae67f2 | ||
|
|
74706bb02b | ||
|
|
5975772ed8 | ||
|
|
cf11fa4327 | ||
|
|
40580d8b9b | ||
|
|
e25e71cdde | ||
|
|
00b2340f9a | ||
|
|
cc2a55892f | ||
|
|
80107d5f5e | ||
|
|
f10e85d0c2 | ||
|
|
f4a6e4439a | ||
|
|
2ae3ea0d52 | ||
|
|
1e68ab3f90 | ||
|
|
f08d09f607 | ||
|
|
e053db6a5e | ||
|
|
1bd4ea0cbb | ||
|
|
a1cb1d70c4 | ||
|
|
c101a04179 | ||
|
|
ad2902b3ee | ||
|
|
ce2c6c01da | ||
|
|
fb4c7d288c | ||
|
|
838b2a6a34 | ||
|
|
fd0de9eb6c | ||
|
|
6b89991ba8 | ||
|
|
4727570870 | ||
|
|
ebf0541385 | ||
|
|
af3d380b6a | ||
|
|
cf3de8cf35 | ||
|
|
61b4de581d | ||
|
|
031fd72dfd | ||
|
|
16698b12b1 | ||
|
|
0bc571b2fd | ||
|
|
1d249a877e | ||
|
|
3eb6045dee | ||
|
|
20aaa5927b | ||
|
|
d612c35290 | ||
|
|
5ab257fb60 | ||
|
|
db02545ef3 | ||
|
|
2faa1ad360 | ||
|
|
860ae7f395 | ||
|
|
135c71ca87 | ||
|
|
c7d6a6d780 | ||
|
|
92533dd9f0 | ||
|
|
dd92b2b8f4 | ||
|
|
eddc8d3ff2 | ||
|
|
dfdd5af7a6 | ||
|
|
6b5c281dd5 | ||
|
|
52e72e0122 | ||
|
|
c08e253e7c | ||
|
|
d1e0a38c04 | ||
|
|
ac19cdb2cd | ||
|
|
58607486af | ||
|
|
0b610017ea | ||
|
|
5de6f6d349 | ||
|
|
daf05c6509 | ||
|
|
9a1df97c69 | ||
|
|
ee61da5b6a | ||
|
|
883497966e | ||
|
|
4f7a77597e | ||
|
|
c4b9046eaa | ||
|
|
299a80d328 | ||
|
|
4e4b9a872a | ||
|
|
cb624dbf5d | ||
|
|
71aecc5cd4 | ||
|
|
10af09e4b4 | ||
|
|
680b0b14db | ||
|
|
55238e3b5b | ||
|
|
f0e33d052a | ||
|
|
7b8622c2e9 | ||
|
|
40e1835927 | ||
|
|
a5e12a0a3d | ||
|
|
10cb14fcb8 | ||
|
|
4f29180e7c | ||
|
|
32e12abb43 | ||
|
|
4cc1b7f42c | ||
|
|
0fb2cd52ff | ||
|
|
6489feb1d7 | ||
|
|
a4bd4d118a | ||
|
|
fae7425bbf | ||
|
|
7b5551248a | ||
|
|
4026625c2d | ||
|
|
3e0241ea31 | ||
|
|
bb375b1aff | ||
|
|
05e23f1991 | ||
|
|
71de6fe290 | ||
|
|
6a840a040b | ||
|
|
c3637f2191 | ||
|
|
ca90f4e6af | ||
|
|
51fa36d61f | ||
|
|
d95a087829 | ||
|
|
a728743c86 | ||
|
|
ce27780a4c | ||
|
|
0df39ddc72 | ||
|
|
b84aa114be | ||
|
|
a596e5e2f0 | ||
|
|
04e648fee6 | ||
|
|
29736b1e33 | ||
|
|
b61da487e4 | ||
|
|
b4064e07dc | ||
|
|
b4dc15bc06 | ||
|
|
3304e0f832 | ||
|
|
b8a5e1a244 | ||
|
|
5823e7a5ce | ||
|
|
55937b61ca | ||
|
|
9cef283151 | ||
|
|
e0d4cdc9a3 | ||
|
|
ac3879e2b0 | ||
|
|
d91c4b010b | ||
|
|
84920bff63 | ||
|
|
bf4c8439e8 | ||
|
|
8fc2dfad0c | ||
|
|
17b441c993 | ||
|
|
b9879e2013 | ||
|
|
08f0e125ef | ||
|
|
d62a0cf692 | ||
|
|
ddd26f5c42 | ||
|
|
69da11a263 | ||
|
|
879d757850 | ||
|
|
6c8e8f0391 | ||
|
|
39891cdf42 | ||
|
|
e782bab9fc | ||
|
|
9cc49aea77 | ||
|
|
29690502f0 | ||
|
|
d323e9c106 | ||
|
|
a79de840bd | ||
|
|
f454e8b609 | ||
|
|
c6cef168a5 | ||
|
|
4de6b94de7 | ||
|
|
08bb730ad0 | ||
|
|
1b52197f71 | ||
|
|
71882765f2 | ||
|
|
119d76d035 | ||
|
|
08753ccabe | ||
|
|
ceb9475668 | ||
|
|
f56a5545d4 | ||
|
|
7a8e73d599 | ||
|
|
1e69c31d87 | ||
|
|
3dc3e01f80 | ||
|
|
fb5f1bb56a | ||
|
|
0f1e0eff05 | ||
|
|
a963bc8b86 | ||
|
|
de64ffddab | ||
|
|
8140350094 | ||
|
|
82ed8e702c | ||
|
|
fca2876795 | ||
|
|
633ddba2b2 |
15
.codecov.yml
@@ -1,5 +1,20 @@
|
||||
comment: false
|
||||
|
||||
coverage:
|
||||
range: "40...100"
|
||||
precision: 1
|
||||
status:
|
||||
patch:
|
||||
default:
|
||||
informational: true
|
||||
project:
|
||||
default:
|
||||
informational: true
|
||||
|
||||
github_checks:
|
||||
annotations: false
|
||||
|
||||
ignore:
|
||||
- "**.pb.go"
|
||||
- "**_mocked.go"
|
||||
- "**/mocks/*"
|
||||
|
||||
12
.deepsource.toml
Normal file
@@ -0,0 +1,12 @@
|
||||
version = 1
|
||||
|
||||
exclude_patterns = ["*.pb.go"]
|
||||
test_patterns = ["*_test.go"]
|
||||
|
||||
[[analyzers]]
|
||||
name = "go"
|
||||
enabled = true
|
||||
|
||||
[analyzers.meta]
|
||||
import_paths = ["github.com/syncthing/syncthing"]
|
||||
build_tags = ["noassets"]
|
||||
11
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
github: syncthing
|
||||
custom: "https://syncthing.net/donations/"
|
||||
|
||||
# patreon: # Replace with a single Patreon username
|
||||
# open_collective: # Replace with a single Open Collective username
|
||||
# ko_fi: # Replace with a single Ko-fi username
|
||||
# tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
||||
# community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
||||
# liberapay: # Replace with a single Liberapay username
|
||||
# issuehunt: # Replace with a single IssueHunt username
|
||||
# otechie: # Replace with a single Otechie username
|
||||
42
.github/ISSUE_TEMPLATE.md
vendored
@@ -1,42 +0,0 @@
|
||||
### DO NOT REPORT SECURITY ISSUES IN THIS ISSUE TRACKER
|
||||
|
||||
Instead, contact security@syncthing.net directly - see
|
||||
https://syncthing.net/security.html for more information.
|
||||
|
||||
### DO NOT POST SUPPORT REQUESTS OR GENERAL QUESTIONS IN THIS ISSUE TRACKER
|
||||
|
||||
Please use the forum at https://forum.syncthing.net/ where a large number of
|
||||
helpful people hang out. This issue tracker is for reporting bugs or feature
|
||||
requests directly to the developers. Worst case you might get a short
|
||||
"that's a bug, please report it on GitHub" response on the forum, in which
|
||||
case we thank you for your patience and following our advice. :)
|
||||
|
||||
### Please use the correct issue tracker
|
||||
|
||||
If your problem relates to a Syncthing wrapper or [sub-project](https://github.com/syncthing) such as [Syncthing for Android](https://github.com/syncthing/syncthing-android/issues), [SyncTrayzor](https://github.com/canton7/synctrayzor) or the [documentation](https://github.com/syncthing/docs/issues), please use their respective issue trackers.
|
||||
|
||||
### Does your log mention database corruption?
|
||||
|
||||
If your Syncthing log reports panics because of database corruption it is most likely a fault with your system's storage or memory. Affected log entries will contain lines starting with `panic: leveldb`. You will need to delete the index database to clear this, by running `syncthing -reset-database`.
|
||||
|
||||
### Please do post actual bug reports and feature requests.
|
||||
|
||||
If your issue is a bug report, replace this boilerplate with a description
|
||||
of the problem, being sure to include at least:
|
||||
|
||||
- what happened,
|
||||
- what you expected to happen instead, and
|
||||
- any steps to reproduce the problem.
|
||||
|
||||
Also fill out the version information below and add log output or
|
||||
screenshots as appropriate.
|
||||
|
||||
If your issue is a feature request, simply replace this template text in
|
||||
its entirety.
|
||||
|
||||
### Version Information
|
||||
|
||||
Syncthing Version: v1.x.y
|
||||
OS Version: Windows 7 / Ubuntu 14.04 / ...
|
||||
Browser Version: (if applicable, for GUI issues)
|
||||
|
||||
13
.github/ISSUE_TEMPLATE/01-feature.md
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: If you're just not sure how to do something, see "ask a question".
|
||||
labels: enhancement, needs-triage
|
||||
---
|
||||
|
||||
### Include required information
|
||||
|
||||
Please be sure to include at least:
|
||||
|
||||
- what problem your new feature would solve
|
||||
- how or why you think it is generally useful (i.e., not just for you)
|
||||
- what alternatives or workarounds you considered
|
||||
23
.github/ISSUE_TEMPLATE/02-bug.md
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: If you're actually looking for support, see "ask a question".
|
||||
labels: bug, needs-triage
|
||||
---
|
||||
|
||||
### Does your log mention database corruption?
|
||||
|
||||
If your Syncthing log reports panics because of database corruption it is
|
||||
most likely a fault with your system's storage or memory. Affected log
|
||||
entries will contain lines starting with `panic: leveldb`. You will need to
|
||||
delete the index database to clear this, by running `syncthing
|
||||
-reset-database`.
|
||||
|
||||
### Include required information
|
||||
|
||||
Please be sure to include at least:
|
||||
|
||||
- which version of Syncthing and what operating system you are using
|
||||
- browser and version, if applicable
|
||||
- what happened,
|
||||
- what you expected to happen instead, and
|
||||
- any steps to reproduce the problem.
|
||||
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: I need help / I have a question
|
||||
url: https://forum.syncthing.net/
|
||||
about: Ask questions, get support, and discuss with other community members.
|
||||
- name: Android issues
|
||||
url: https://github.com/syncthing/syncthing-android/issues/
|
||||
about: The Android app has its own issue tracker.
|
||||
52
.github/SECURITY.md
vendored
@@ -1,48 +1,10 @@
|
||||
## Reporting a Vulnerability
|
||||
|
||||
If you believe that you've found a Syncthing-related security vulnerability, please report it by sending email to the address security@syncthing.net. The PGP key for security@syncthing.net (B683AD7B76CAB013) below can be used to send encrypted mail or to verify responses received from that address.
|
||||
If you believe that you've found a Syncthing-related security vulnerability,
|
||||
please report it by sending email to the address security@syncthing.net. The
|
||||
[PGP key for security@syncthing.net
|
||||
(B683AD7B76CAB013)](https://syncthing.net/security-key.txt) can be used to
|
||||
send encrypted mail or to verify responses received from that address.
|
||||
|
||||
```
|
||||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
Version: GnuPG v1
|
||||
|
||||
mQENBFShFlIBCACsW346HYskhKhxrdZMjyU5Ntsjvg6/ogqINDPoL10/oaIP0G+t
|
||||
7zzC0K5Cq29ix43kNNLKTJNyPkdTeaJEcqslMUt6tovjHwoKJ073GP0W3KsNvBRg
|
||||
ffCZOAexGfOsBSL9KHaYGK67Py3TFgtN1H/EmboU1arrLfAMrmqOip++EGqOxjse
|
||||
gH0qk7Mk1TqEC5Xh3NGE7r1UobAlqdUv5E3v7U11NhAdP1zu/XZ/zvP5mwVQJMLv
|
||||
iZyeWGliNI8nEeRjYw+S85f4gq7H2mgoeNBN4WwwK1hhz9qpvCsgPW3XqlExTPI4
|
||||
1vM4PxpiFIuF0zuy2OwsmjrpTCZeBscr4Tj5ABEBAAG0K1N5bmN0aGluZyBTZWN1
|
||||
cml0eSA8c2VjdXJpdHlAc3luY3RoaW5nLm5ldD6JATgEEwECACIFAlShFlICGwMG
|
||||
CwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJELaDrXt2yrATB1UH/jnnIa6DekCA
|
||||
2V36N/2+pFSNLVWeoZQxZ9ne9S7WSubaoK4oCWiuChPSAy5hagnKnNA3a9wrz5iN
|
||||
0hgCA88NnTU18biZW6HX8xWEd3dnY3fX1sG0XdHuKlFria8ByfcrbShf/CttXkEd
|
||||
Y5qPHH9aLIMtksBS1MIsRYrOCHiLNYFCKlbjDDCGT3tuk/yaU7aBAFVPIag54afC
|
||||
zZAIvTIgRruLWkNT/sSEJx9ekhJzO/+pXNbSSHwhoj5OJh7sQjZWwPzNazelk48R
|
||||
+ku8VpB5Wxgk2MnJPf1RxF+7saBAaeAVZsJcPN+Jp7u9LCFelIRn1ISsHbhLhyqL
|
||||
cPXqllltLCKJAhwEEAECAAYFAlShFlsACgkQSfWuwLzlJMcEpA//VvZYHGZgZMM0
|
||||
bBkYnjManYnJkHSQ2pHsjquSFH+fbfq2FxxTk/nQ/IAvBzt8NDTT3ylPOmsl4A8q
|
||||
r8BxsRNENhU7zDQrKC5NtYUrzXhPGo8qfDwkeLyqd2msUvHn7EH3PNgiN2QaFWxE
|
||||
21FqoXIpERKJRgRtv1SYZoMyNGjmT2hta1ZbwEfrPJnzjYhneoUGsRApG7p+uPqe
|
||||
4LRpbG3Fa2vBm/UWUrOe+6jPzvMokjIJbdK0IjXamFAzwYW5fSZaFa94mR6L5f5m
|
||||
X8Dhp66mBRTx7qk6ldEqptvaYGqihaWP1xbDaApQsGHQujtcBYGp+edlkabuW5h7
|
||||
stl/7QTFkEPqHT4ybxEX0rLoFUGq56WUlKp27z40keStaXMgfrsxJtkz66Xs8vU4
|
||||
7qZcLAcPsin+y0toqavtwtM/L7uCMu1yhbFRGJ+JL6saJAqzwS8l7r6E2R7OnVj1
|
||||
BdASgxx1TgzW6ZFW5p1Iy6mtpkBypsnp8s3UcP3GFRnQe9gi1EXHjzuZAUGafe4Q
|
||||
juvJ8t8xIcQMFuAylNIVyXvIWJoehqsVY9EBgVtE4y1SRh8XTT3Tddn8ab5fl7uh
|
||||
HWAY8cRlv6WIOhK8w8oroiYx0SID8jMeEwJBS4DL7qWtJMkDo8ZEJiB+Pkd963MX
|
||||
05QXt33AvJJ9PmbGCHkcH7198tCmA+e5AQ0EVKEWUgEIAPGczHpa6NdxY0pm+tIp
|
||||
btiA6gdPE70pJYgJTKX7siCQ2w770H3CBSKmqEXadr7WnfIgUYIDaSxadeGzB/Mn
|
||||
3SHCYRCqbA7mwu2k4wNNvCEM0xZqFAvaDJ4avlZ5oiMT8IFHKsjC77nkhmfXaIGt
|
||||
hn10H2MFADjvJYul7vR+Ghg1wGhTGWo2u7VVj9BI+AfvnWaouFI0cx2KNWEI/Ocj
|
||||
z6jk8nmC3yOEFQECM/hF4lkAOv9CQUa8UcvAr31trzawmV1iSsKjmVZgqd0N4T8f
|
||||
hUikqUPZGNCRcqEUffTzggIyGPbedFnZw9Di7o1xByxyTrZycemAVqaVGF+9nFLG
|
||||
pccAEQEAAYkBHwQYAQIACQUCVKEWUgIbDAAKCRC2g617dsqwEzrjB/9q0T8A4XUE
|
||||
p0g6xq86jhmh4jlEedxrfXUL3R6ejFtuKMThulxEP0xiQ7xLzBhOnvyxLCsVbjp8
|
||||
0JtJTVCq44UzUiIBuVNRoYG29uXuTUL2UtI27VhjFxMzLDwZL97tbGR6lzdM/+U5
|
||||
9PC+PIvS0yz13z2t3/x3KUOnBgxZnpy9h4AdKwjrNVtnQdGDXSKlJBLb57TFcS94
|
||||
f3roZ5Gpw3AWYSmSSiZWbhks2UNzYSQ84LAKlV1NkktO+qRc/pUqr6IxMjOKc7XP
|
||||
e8u1Nst6fN3GNqZOV+jUYs/fqrJgp1TUWjNTuf22Rl0Idz7XLPJKYFh9W7T/4MbU
|
||||
M7Q8GZuww1rk
|
||||
=No/v
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
||||
```
|
||||
You can read more about Syncthing security at
|
||||
https://syncthing.net/security/.
|
||||
|
||||
9
.gitignore
vendored
@@ -15,12 +15,7 @@ coverage.xml
|
||||
syncthing.sig
|
||||
RELEASE
|
||||
deb
|
||||
lib/auto/gui.files.go
|
||||
snapcraft.yaml
|
||||
prime/
|
||||
snap/
|
||||
parts/
|
||||
stage/
|
||||
*.snap
|
||||
*.bz2
|
||||
/repos
|
||||
/proto/scripts/protoc-gen-gosyncthing
|
||||
/gui/next-gen-gui
|
||||
|
||||
@@ -16,6 +16,7 @@ linters:
|
||||
- funlen
|
||||
- wsl
|
||||
- gocognit
|
||||
- godox
|
||||
|
||||
service:
|
||||
golangci-lint-version: 1.21.x
|
||||
|
||||
70
AUTHORS
@@ -16,9 +16,14 @@
|
||||
Aaron Bieber (qbit) <qbit@deftly.net>
|
||||
Adam Piggott (ProactiveServices) <aD@simplypeachy.co.uk> <simplypeachy@users.noreply.github.com> <ProactiveServices@users.noreply.github.com> <adam@proactiveservices.co.uk>
|
||||
Adel Qalieh (adelq) <aqalieh95@gmail.com> <adelq@users.noreply.github.com>
|
||||
Alan Pope <alan@popey.com>
|
||||
Alberto Donato <albertodonato@users.noreply.github.com>
|
||||
Alessandro G. (alessandro.g89) <alessandro.g89@gmail.com>
|
||||
Alex Lindeman <139387+aelindeman@users.noreply.github.com>
|
||||
Alex Xu <alex.hello71@gmail.com>
|
||||
Alexander Graf (alex2108) <register-github@alex-graf.de>
|
||||
Alexandre Viau (aviau) <alexandre@alexandreviau.net> <aviau@debian.org>
|
||||
Aman Gupta <aman@tmm1.net>
|
||||
Anderson Mesquita (andersonvom) <andersonvom@gmail.com>
|
||||
andresvia <andres.via@gmail.com>
|
||||
Andrew Dunham (andrew-d) <andrew@du.nham.ca>
|
||||
@@ -26,11 +31,15 @@ Andrew Rabert (nvllsvm) <ar@nullsum.net> <6550543+nvllsvm@users.noreply.github.c
|
||||
Andrey D (scienmind) <scintertech@cryptolab.net> <scienmind@users.noreply.github.com>
|
||||
André Colomb (acolomb) <src@andre.colomb.de> <github.com@andre.colomb.de>
|
||||
andyleap <andyleap@gmail.com>
|
||||
Anjan Momi <anjan@momi.ca>
|
||||
Antoine Lamielle (0x010C) <antoine.lamielle@0x010c.fr> <gh@0x010c.fr>
|
||||
Antony Male (canton7) <antony.male@gmail.com>
|
||||
Aranjedeath <Aranjedeath@users.noreply.github.com>
|
||||
Arkadiusz Tymiński <gevleeog@gmail.com>
|
||||
Arthur Axel fREW Schmidt (frioux) <frew@afoolishmanifesto.com> <frioux@gmail.com>
|
||||
Artur Zubilewicz <AkaZecik@users.noreply.github.com>
|
||||
Audrius Butkevicius (AudriusButkevicius) <audrius.butkevicius@gmail.com> <github@audrius.rocks>
|
||||
Aurélien Rainone <476650+arl@users.noreply.github.com>
|
||||
BAHADIR YILMAZ <bahadiryilmaz32@gmail.com>
|
||||
Bart De Vries (mogwa1) <devriesb@gmail.com>
|
||||
Ben Curthoys (bencurthoys) <ben@bencurthoys.com>
|
||||
@@ -41,96 +50,127 @@ Benedikt Heine (bebehei) <bebe@bebehei.de>
|
||||
Benedikt Morbach <benedikt.morbach@googlemail.com>
|
||||
Benno Fünfstück <benno.fuenfstueck@gmail.com>
|
||||
Benny Ng (tpng) <benny.tpng@gmail.com>
|
||||
boomsquared <54829195+boomsquared@users.noreply.github.com>
|
||||
Boqin Qin <bobbqqin@bupt.edu.cn>
|
||||
Boris Rybalkin <ribalkin@gmail.com>
|
||||
Brandon Philips (philips) <brandon@ifup.org>
|
||||
Brendan Long (brendanlong) <self@brendanlong.com>
|
||||
Brian R. Becker (brbecker) <brbecker@gmail.com>
|
||||
bt90 <btom1990@googlemail.com>
|
||||
Caleb Callaway (cqcallaw) <enlightened.despot@gmail.com>
|
||||
Carsten Hagemann (carstenhag) <moter8@gmail.com> <carsten@chagemann.de>
|
||||
Cathryne Linenweaver (Cathryne) <cathryne.linenweaver@gmail.com> <Cathryne@users.noreply.github.com> <katrinleinweber@MAC.local>
|
||||
Cedric Staniewski (xduugu) <cedric@gmx.ca>
|
||||
chenrui <rui@meetup.com>
|
||||
Choongkyu <choongkyu.kim+gh@gmail.com> <vapidlyrapid+gh@gmail.com>
|
||||
Chris Howie (cdhowie) <me@chrishowie.com>
|
||||
Chris Joel (cdata) <chris@scriptolo.gy>
|
||||
Chris Tonkinson <chris@masterbran.ch>
|
||||
Christian Prescott <me@christianprescott.com>
|
||||
chucic <chucic@seznam.cz>
|
||||
Colin Kennedy (moshen) <moshen.colin@gmail.com>
|
||||
Cromefire_ <tim.l@nghorst.net>
|
||||
Cromefire_ <tim.l@nghorst.net> <26320625+cromefire@users.noreply.github.com>
|
||||
Cyprien Devillez <cypx@users.noreply.github.com>
|
||||
Dale Visser <dale.visser@live.com>
|
||||
Dan <benda.daniel@gmail.com>
|
||||
Daniel Bergmann (brgmnn) <dan.arne.bergmann@gmail.com> <brgmnn@users.noreply.github.com>
|
||||
Daniel Harte (norgeous) <daniel@harte.me> <daniel@danielharte.co.uk> <norgeous@users.noreply.github.com>
|
||||
Daniel Martí (mvdan) <mvdan@mvdan.cc>
|
||||
Darshil Chanpura (dtchanpura) <dtchanpura@gmail.com> <dcprime314@gmail.com>
|
||||
David Rimmer (dinosore) <dinosore@dbrsoftware.co.uk>
|
||||
deepsource-autofix[bot] <62050782+deepsource-autofix[bot]@users.noreply.github.com>
|
||||
Denis A. (dva) <denisva@gmail.com>
|
||||
Dennis Wilson (snnd) <dw@risu.io>
|
||||
dependabot-preview[bot] <dependabot-preview[bot]@users.noreply.github.com>
|
||||
dependabot-preview[bot] <dependabot-preview[bot]@users.noreply.github.com> <27856297+dependabot-preview[bot]@users.noreply.github.com>
|
||||
dependabot[bot] <dependabot[bot]@users.noreply.github.com>
|
||||
derekriemer <derek.riemer@colorado.edu>
|
||||
desbma <desbma@users.noreply.github.com>
|
||||
Dmitry Saveliev (dsaveliev) <d.e.saveliev@gmail.com>
|
||||
Domenic Horner <domenic@tgxn.net>
|
||||
Dominik Heidler (asdil12) <dominik@heidler.eu>
|
||||
Elias Jarlebring (jarlebring) <jarlebring@gmail.com>
|
||||
Elliot Huffman <thelich2@gmail.com>
|
||||
Emil Hessman (ceh) <emil@hessman.se>
|
||||
Eric Lesiuta <elesiuta@gmail.com>
|
||||
Erik Meitner (WSGCSysadmin) <e.meitner@willystreet.coop>
|
||||
Evgeny Kuznetsov <evgeny@kuznetsov.md>
|
||||
Federico Castagnini (facastagnini) <federico.castagnini@gmail.com>
|
||||
Felix Ableitner (Nutomic) <me@nutomic.com>
|
||||
Felix Lampe <mail@flampe.de>
|
||||
Felix Unterpaintner (bigbear2nd) <bigbear2nd@gmail.com>
|
||||
Francois-Xavier Gsell (zukoo) <fxgsell@gmail.com>
|
||||
Frank Isemann (fti7) <frank@isemann.name>
|
||||
georgespatton <georgespatton@users.noreply.github.com>
|
||||
ghjklw <malo@jaffre.info>
|
||||
Gilli Sigurdsson (gillisig) <gilli@vx.is>
|
||||
Gleb Sinyavskiy <zhulik.gleb@gmail.com>
|
||||
Graham Miln (grahammiln) <graham.miln@dssw.co.uk> <graham.miln@miln.eu>
|
||||
greatroar <61184462+greatroar@users.noreply.github.com>
|
||||
Han Boetes <han@boetes.org>
|
||||
HansK-p <42314815+HansK-p@users.noreply.github.com>
|
||||
Harrison Jones (harrisonhjones) <harrisonhjones@users.noreply.github.com>
|
||||
Heiko Zuerker (Smiley73) <heiko@zuerker.org>
|
||||
Hugo Locurcio <hugo.locurcio@hugo.pro>
|
||||
Iain Barnett <iainspeed@gmail.com>
|
||||
Ian Johnson (anonymouse64) <ian.johnson@canonical.com> <person.uwsome@gmail.com>
|
||||
Ilya Brin <464157+ilyabrin@users.noreply.github.com>
|
||||
Iskander Sharipov (Alex) <quasilyte@gmail.com>
|
||||
Jaakko Hannikainen (jgke) <jgke@jgke.fi>
|
||||
Jacek Szafarkiewicz (hadogenes) <szafar@linux.pl>
|
||||
Jack Croft <jccroft1@users.noreply.github.com>
|
||||
Jacob <jyundt@gmail.com>
|
||||
Jake Peterson (acogdev) <jake@acogdev.com>
|
||||
Jakob Borg (calmh) <jakob@nym.se> <jakob@kastelo.net>
|
||||
James Patterson (jpjp) <jamespatterson@operamail.com> <jpjp@users.noreply.github.com>
|
||||
janost <janost@tuta.io>
|
||||
Jaroslav Lichtblau <svetlemodry@users.noreply.github.com>
|
||||
Jaroslav Malec (dzarda) <dzardacz@gmail.com>
|
||||
jaseg <githubaccount@jaseg.net>
|
||||
Jaya Chithra (jayachithra) <s.k.jayachithra@gmail.com>
|
||||
jelle van der Waa <jelle@vdwaa.nl>
|
||||
Jens Diemer (jedie) <github.com@jensdiemer.de> <git@jensdiemer.de>
|
||||
Jerry Jacobs (xor-gate) <jerry.jacobs@xor-gate.org> <xor-gate@users.noreply.github.com>
|
||||
Jesse Lucas <jesse@jesselucas.com>
|
||||
Jochen Voss (seehuhn) <voss@seehuhn.de>
|
||||
Johan Andersson <j@i19.se>
|
||||
Johan Vromans (sciurius) <jvromans@squirrel.nl>
|
||||
John Rinehart (fuzzybear3965) <johnrichardrinehart@gmail.com>
|
||||
Jonas Thelemann <e-mail@jonas-thelemann.de>
|
||||
Jonathan <artback@protonmail.com>
|
||||
Jonathan Cross <jcross@gmail.com>
|
||||
Jonta <359397+Jonta@users.noreply.github.com>
|
||||
Jose Manuel Delicado (jmdaweb) <jmdaweb@hotmail.com> <jmdaweb@users.noreply.github.com>
|
||||
Jörg Thalheim <Mic92@users.noreply.github.com>
|
||||
Jędrzej Kula <kula.jedrek@gmail.com>
|
||||
Kalle Laine <pahakalle@protonmail.com>
|
||||
Karol Różycki (krozycki) <rozycki.karol@gmail.com>
|
||||
Keith Turner <kturner@apache.org>
|
||||
Kelong Cong (kc1212) <kc04bc@gmx.com> <kc1212@users.noreply.github.com>
|
||||
Ken'ichi Kamada (kamadak) <kamada@nanohz.org>
|
||||
Kevin Allen (ironmig) <kma1660@gmail.com>
|
||||
Kevin Bushiri (keevBush) <keevbush@gmail.com> <36192217+keevBush@users.noreply.github.com>
|
||||
Kevin White, Jr. (kwhite17) <kevinwhite1710@gmail.com>
|
||||
klemens <ka7@github.com>
|
||||
Kurt Fitzner (Kudalufi) <kurt@va1der.ca> <kurt.fitzner@gmail.com>
|
||||
Lars K.W. Gohlke (lkwg82) <lkwg82@gmx.de>
|
||||
Lars Lehtonen <lars.lehtonen@gmail.com>
|
||||
Laurent Arnoud <laurent@spkdev.net>
|
||||
Laurent Etiemble (letiemble) <laurent.etiemble@gmail.com> <laurent.etiemble@monobjc.net>
|
||||
Leo Arias (elopio) <yo@elopio.net>
|
||||
Liu Siyuan (liusy182) <liusy182@gmail.com> <liusy182@hotmail.com>
|
||||
Lode Hoste (Zillode) <zillode@zillode.be>
|
||||
Lord Landon Agahnim (LordLandon) <lordlandon@gmail.com>
|
||||
Lukas Lihotzki <lukas@lihotzki.de>
|
||||
Majed Abdulaziz (majedev) <majed.alhajry@gmail.com>
|
||||
Marc Laporte (marclaporte) <marc@marclaporte.com> <marc@laporte.name>
|
||||
Marc Pujol (kilburn) <kilburn@la3.org>
|
||||
Marcin Dziadus (marcindziadus) <dziadus.marcin@gmail.com>
|
||||
marco-m <marco.molteni@laposte.net>
|
||||
Marcus Legendre <marcus.legendre@gmail.com>
|
||||
Mario Majila <mariustshipichik@gmail.com>
|
||||
Mark Pulford (mpx) <mark@kyne.com.au>
|
||||
Mateusz Naściszewski (mateon1) <matin1111@wp.pl>
|
||||
Mateusz Ż <thedead4fun@live.com>
|
||||
Matic Potočnik <hairyfotr@gmail.com>
|
||||
Matt Burke (burkemw3) <mburke@amplify.com> <burkemw3@gmail.com>
|
||||
Matt Robenolt <matt@ydekproductions.com>
|
||||
@@ -139,23 +179,34 @@ Maurizio Tomasi <ziotom78@gmail.com>
|
||||
Max Schulze (kralo) <max.schulze@online.de> <kralo@users.noreply.github.com>
|
||||
MaximAL <almaximal@ya.ru>
|
||||
Maxime Thirouin <m@moox.io>
|
||||
mclang <1721600+mclang@users.noreply.github.com>
|
||||
Michael Jephcote (Rewt0r) <rewt0r@gmx.com> <Rewt0r@users.noreply.github.com>
|
||||
Michael Ploujnikov (plouj) <ploujj@gmail.com>
|
||||
Michael Rienstra <mrienstra@gmail.com>
|
||||
Michael Tilli (pyfisch) <pyfisch@gmail.com>
|
||||
MichaIng <micha@dietpi.com>
|
||||
Mike Boone <mike@boonedocks.net>
|
||||
MikeLund <MikeLund@users.noreply.github.com>
|
||||
MikolajTwarog <43782609+MikolajTwarog@users.noreply.github.com>
|
||||
Mingxuan Lin <gdlmx@users.noreply.github.com>
|
||||
mv1005 <49659413+mv1005@users.noreply.github.com>
|
||||
Nate Morrison (nrm21) <natemorrison@gmail.com>
|
||||
Nicholas Rishel (PrototypeNM1) <rishel.nick@gmail.com> <PrototypeNM1@users.noreply.github.com>
|
||||
Nico Stapelbroek <3368018+nstapelbroek@users.noreply.github.com>
|
||||
Nicolas Braud-Santoni <nicolas@braud-santoni.eu>
|
||||
Nicolas Perraut <n.perraut@gmail.com>
|
||||
Niels Peter Roest (Niller303) <nielsproest@hotmail.com> <seje.niels@hotmail.com>
|
||||
Nils Jakobi (thunderstorm99) <jakobi.nils@gmail.com>
|
||||
NinoM4ster <ninom4ster@gmail.com>
|
||||
Nitroretro <43112364+Nitroretro@users.noreply.github.com>
|
||||
NoLooseEnds <jon.koslung@gmail.com>
|
||||
Oliver Freyermuth <o.freyermuth@googlemail.com>
|
||||
otbutz <tbutz@optitool.de>
|
||||
Otiel <Otiel@users.noreply.github.com>
|
||||
Oyebanji Jacob Mayowa <oyebanji05@gmail.com>
|
||||
Pablo <pbaeyens31+github@gmail.com>
|
||||
Pascal Jungblut (pascalj) <github@pascalj.com> <mail@pascal-jungblut.com>
|
||||
Paul Brit <paulbrit44@gmail.com>
|
||||
Pawel Palenica (qepasa) <pawelpalenica11@gmail.com>
|
||||
Paweł Rozlach <vespian@users.noreply.github.com>
|
||||
perewa <cavalcante.ten@gmail.com>
|
||||
@@ -163,37 +214,48 @@ Peter Badida <KeyWeeUsr@users.noreply.github.com>
|
||||
Peter Dave Hello <hsu@peterdavehello.org>
|
||||
Peter Hoeg (peterhoeg) <peter@speartail.com>
|
||||
Peter Marquardt (wwwutz) <wwwutz@gmail.com> <wwwutz@googlemail.com>
|
||||
Phani Rithvij <phanirithvij2000@gmail.com>
|
||||
Phil Davis <phil.davis@inf.org>
|
||||
Philippe Schommers (filoozoom) <philippe@schommers.be>
|
||||
Phill Luby (pluby) <phill.luby@newredo.com>
|
||||
Pier Paolo Ramon <ramonpierre@gmail.com>
|
||||
Piotr Bejda (piobpl) <piotrb10@gmail.com>
|
||||
Pramodh KP (pramodhkp) <pramodh.p@directi.com> <1507241+pramodhkp@users.noreply.github.com>
|
||||
Quentin Hibon <qh.public@yahoo.com>
|
||||
Rahmi Pruitt <rjpruitt16@gmail.com>
|
||||
Richard Hartmann <RichiH@users.noreply.github.com>
|
||||
Robert Carosi (nov1n) <robert@carosi.nl>
|
||||
Roberto Santalla <roobre@users.noreply.github.com>
|
||||
Robin Schoonover <robin@cornhooves.org>
|
||||
Roman Zaynetdinov (zaynetro) <romanznet@gmail.com>
|
||||
Ross Smith II (rasa) <ross@smithii.com>
|
||||
rubenbe <github-com-00ff86@vandamme.email>
|
||||
Ruslan Yevdokymov <38809160+ruslanye@users.noreply.github.com>
|
||||
Ryan Sullivan (KayoticSully) <kayoticsully@gmail.com>
|
||||
Sacheendra Talluri (sacheendra) <sacheendra.t@gmail.com>
|
||||
Scott Klupfel (kluppy) <kluppy@going2blue.com>
|
||||
Sergey Mishin (ralder) <ralder@yandex.ru>
|
||||
Shaarad Dalvi <60266155+shaaraddalvi@users.noreply.github.com>
|
||||
Simon Frei (imsodin) <freisim93@gmail.com>
|
||||
Simon Mwepu <simonmwepu@gmail.com>
|
||||
Sly_tom_cat <slytomcat@mail.ru>
|
||||
Stefan Kuntz (Stefan-Code) <stefan.github@gmail.com> <Stefan.github@gmail.com>
|
||||
Stefan Tatschner (rumpelsepp) <stefan@sevenbyte.org> <rumpelsepp@sevenbyte.org> <stefan@rumpelsepp.org>
|
||||
Steven Eckhoff <steven.eckhoff.opensource@gmail.com>
|
||||
Suhas Gundimeda (snugghash) <suhas.gundimeda@gmail.com> <snugghash@gmail.com>
|
||||
Taylor Khan (nelsonkhan) <nelsonkhan@gmail.com>
|
||||
Thomas Hipp <thomashipp@gmail.com>
|
||||
Tim Abell (timabell) <tim@timwise.co.uk>
|
||||
Tim Howes (timhowes) <timhowes@berkeley.edu>
|
||||
Tobias Klauser <tobias.klauser@gmail.com>
|
||||
Tobias Nygren (tnn2) <tnn@nygren.pp.se>
|
||||
Tobias Tom (tobiastom) <t.tom@succont.de>
|
||||
Tom Jakubowski <tom@crystae.net>
|
||||
Tomas Cerveny (kozec) <kozec@kozec.com>
|
||||
Tomasz Wilczyński <5626656+tomasz1986@users.noreply.github.com> <twilczynski@naver.com>
|
||||
Tommy Thorn <tommy-github-email@thorn.ws>
|
||||
Tully Robinson (tojrobinson) <tully@tojr.org>
|
||||
Tyler Brazier (tylerbrazier) <tyler@tylerbrazier.com>
|
||||
Tyler Kropp <kropptyler@gmail.com>
|
||||
Unrud (Unrud) <unrud@openaliasbox.org> <Unrud@users.noreply.github.com>
|
||||
Veeti Paananen (veeti) <veeti.paananen@rojekti.fi>
|
||||
Victor Buinsky (buinsky) <vix_booja@tut.by>
|
||||
@@ -201,7 +263,9 @@ Vil Brekin (Vilbrekin) <vilbrekin@gmail.com>
|
||||
Vladimir Rusinov <vrusinov@google.com>
|
||||
wangguoliang <liangcszzu@163.com>
|
||||
William A. Kennington III (wkennington) <william@wkennington.com>
|
||||
wouter bolsterlee <wouter@bolsterl.ee>
|
||||
Wulf Weich (wweich) <wweich@users.noreply.github.com> <wweich@gmx.de> <wulf@weich-kr.de>
|
||||
xarx00 <xarx00@users.noreply.github.com>
|
||||
Xavier O. (damajor) <damajor@gmail.com>
|
||||
xjtdy888 (xjtdy888) <xjtdy888@163.com>
|
||||
Yannic A. (eipiminus1) <eipiminusone+github@gmail.com> <eipiminus1@users.noreply.github.com>
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
FROM golang:1.13 AS builder
|
||||
ARG GOVERSION=latest
|
||||
FROM golang:$GOVERSION AS builder
|
||||
|
||||
WORKDIR /src
|
||||
COPY . .
|
||||
@@ -10,11 +11,11 @@ RUN rm -f syncthing && go run build.go -no-upgrade build syncthing
|
||||
|
||||
FROM alpine
|
||||
|
||||
EXPOSE 8384 22000 21027/udp
|
||||
EXPOSE 8384 22000/tcp 22000/udp 21027/udp
|
||||
|
||||
VOLUME ["/var/syncthing"]
|
||||
|
||||
RUN apk add --no-cache ca-certificates su-exec
|
||||
RUN apk add --no-cache ca-certificates su-exec tzdata
|
||||
|
||||
COPY --from=builder /src/syncthing /bin/syncthing
|
||||
COPY --from=builder /src/script/docker-entrypoint.sh /bin/entrypoint.sh
|
||||
@@ -22,7 +23,7 @@ COPY --from=builder /src/script/docker-entrypoint.sh /bin/entrypoint.sh
|
||||
ENV PUID=1000 PGID=1000 HOME=/var/syncthing
|
||||
|
||||
HEALTHCHECK --interval=1m --timeout=10s \
|
||||
CMD nc -z localhost 8384 || exit 1
|
||||
CMD nc -z 127.0.0.1 8384 || exit 1
|
||||
|
||||
ENV STGUIADDRESS=0.0.0.0:8384
|
||||
ENTRYPOINT ["/bin/entrypoint.sh", "/bin/syncthing", "-home", "/var/syncthing/config"]
|
||||
|
||||
9
Dockerfile.builder
Normal file
@@ -0,0 +1,9 @@
|
||||
ARG GOVERSION=latest
|
||||
FROM golang:$GOVERSION
|
||||
|
||||
# FPM to build Debian packages
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
locales rubygems ruby-dev build-essential git \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& gem install --no-ri --no-rdoc fpm
|
||||
19
Dockerfile.buildx
Normal file
@@ -0,0 +1,19 @@
|
||||
FROM alpine
|
||||
ARG TARGETARCH
|
||||
|
||||
EXPOSE 8384 22000/tcp 22000/udp 21027/udp
|
||||
|
||||
VOLUME ["/var/syncthing"]
|
||||
|
||||
RUN apk add --no-cache ca-certificates su-exec tzdata
|
||||
|
||||
COPY ./syncthing-linux-$TARGETARCH /bin/syncthing
|
||||
COPY ./script/docker-entrypoint.sh /bin/entrypoint.sh
|
||||
|
||||
ENV PUID=1000 PGID=1000 HOME=/var/syncthing
|
||||
|
||||
HEALTHCHECK --interval=1m --timeout=10s \
|
||||
CMD nc -z 127.0.0.1 8384 || exit 1
|
||||
|
||||
ENV STGUIADDRESS=0.0.0.0:8384
|
||||
ENTRYPOINT ["/bin/entrypoint.sh", "/bin/syncthing", "-home", "/var/syncthing/config"]
|
||||
@@ -1,4 +1,5 @@
|
||||
FROM golang:1.13 AS builder
|
||||
ARG GOVERSION=latest
|
||||
FROM golang:$GOVERSION AS builder
|
||||
|
||||
WORKDIR /src
|
||||
COPY . .
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
FROM golang:1.13 AS builder
|
||||
ARG GOVERSION=latest
|
||||
FROM golang:$GOVERSION AS builder
|
||||
|
||||
WORKDIR /src
|
||||
COPY . .
|
||||
|
||||
@@ -7,17 +7,45 @@ Use the `/var/syncthing` volume to have the synchronized files available on the
|
||||
host. You can add more folders and map them as you prefer.
|
||||
|
||||
Note that Syncthing runs as UID 1000 and GID 1000 by default. These may be
|
||||
altered with the ``PUID`` and ``PGID`` environment variables.
|
||||
altered with the ``PUID`` and ``PGID`` environment variables. In addition
|
||||
the name of the Syncthing instance can be optionally defined by using
|
||||
``--hostname=syncthing`` parameter.
|
||||
|
||||
## Example Usage
|
||||
|
||||
**Docker cli**
|
||||
```
|
||||
$ docker pull syncthing/syncthing
|
||||
$ docker run -p 8384:8384 -p 22000:22000 \
|
||||
$ docker run --sysctl net.core.rmem_max=2097152 \
|
||||
-p 8384:8384 -p 22000:22000/tcp -p 22000:22000/udp \
|
||||
-v /wherever/st-sync:/var/syncthing \
|
||||
--hostname=my-syncthing \
|
||||
syncthing/syncthing:latest
|
||||
```
|
||||
|
||||
**Docker compose**
|
||||
```
|
||||
---
|
||||
version: "3"
|
||||
services:
|
||||
syncthing:
|
||||
image: syncthing/syncthing
|
||||
container_name: syncthing
|
||||
hostname: my-syncthing
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
volumes:
|
||||
- /wherever/st-sync:/var/syncthing
|
||||
ports:
|
||||
- 8384:8384
|
||||
- 22000:22000/tcp
|
||||
- 22000:22000/udp
|
||||
sysctls:
|
||||
- net.core.rmem_max=2097152
|
||||
restart: unless-stopped
|
||||
```
|
||||
|
||||
## Discovery
|
||||
|
||||
Note that local device discovery will not work with the above command,
|
||||
|
||||
10
README.md
@@ -80,7 +80,7 @@ bug, feel free to report it in the [GitHub issue tracker][10].
|
||||
|
||||
## Building
|
||||
|
||||
Building Syncthing from source is easy, and there's a [guide][5]
|
||||
Building Syncthing from source is easy, and there's [a guide][5]
|
||||
that describes it for both Unix and Windows systems.
|
||||
|
||||
## Signed Releases
|
||||
@@ -101,18 +101,18 @@ All code is licensed under the [MPLv2 License][7].
|
||||
|
||||
[1]: https://docs.syncthing.net/specs/bep-v1.html
|
||||
[2]: https://docs.syncthing.net/intro/getting-started.html
|
||||
[3]: https://github.com/syncthing/syncthing/blob/master/etc
|
||||
[3]: https://github.com/syncthing/syncthing/blob/main/etc
|
||||
[4]: https://www.freenode.net/
|
||||
[5]: https://docs.syncthing.net/dev/building.html
|
||||
[6]: https://docs.syncthing.net/
|
||||
[7]: https://github.com/syncthing/syncthing/blob/master/LICENSE
|
||||
[7]: https://github.com/syncthing/syncthing/blob/main/LICENSE
|
||||
[8]: https://forum.syncthing.net/
|
||||
[9]: https://kiwiirc.com/client/irc.freenode.net/#syncthing
|
||||
[10]: https://github.com/syncthing/syncthing/issues
|
||||
[11]: https://docs.syncthing.net/users/contrib.html#gui-wrappers
|
||||
[12]: https://www.bountysource.com/teams/syncthing/issues
|
||||
[13]: https://github.com/syncthing/syncthing/blob/master/GOALS.md
|
||||
[13]: https://github.com/syncthing/syncthing/blob/main/GOALS.md
|
||||
[14]: assets/logo-text-128.png
|
||||
[15]: https://syncthing.net/
|
||||
[16]: https://github.com/syncthing/syncthing/blob/master/README-Docker.md
|
||||
[16]: https://github.com/syncthing/syncthing/blob/main/README-Docker.md
|
||||
|
||||
|
||||
|
Before Width: | Height: | Size: 9.8 KiB After Width: | Height: | Size: 9.5 KiB |
|
Before Width: | Height: | Size: 20 KiB After Width: | Height: | Size: 19 KiB |
|
Before Width: | Height: | Size: 2.2 KiB After Width: | Height: | Size: 2.2 KiB |
|
Before Width: | Height: | Size: 40 KiB After Width: | Height: | Size: 38 KiB |
|
Before Width: | Height: | Size: 4.9 KiB After Width: | Height: | Size: 4.8 KiB |
|
Before Width: | Height: | Size: 19 KiB After Width: | Height: | Size: 18 KiB |
|
Before Width: | Height: | Size: 38 KiB After Width: | Height: | Size: 36 KiB |
|
Before Width: | Height: | Size: 9.8 KiB After Width: | Height: | Size: 9.5 KiB |
|
Before Width: | Height: | Size: 8.2 KiB After Width: | Height: | Size: 8.1 KiB |
BIN
assets/logo.pdf
554
build.go
@@ -15,6 +15,7 @@ import (
|
||||
"compress/flate"
|
||||
"compress/gzip"
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
@@ -24,7 +25,6 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
@@ -35,22 +35,24 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
versionRe = regexp.MustCompile(`-[0-9]{1,3}-g[0-9a-f]{5,10}`)
|
||||
goarch string
|
||||
goos string
|
||||
noupgrade bool
|
||||
version string
|
||||
goCmd string
|
||||
goVersion float64
|
||||
race bool
|
||||
debug = os.Getenv("BUILDDEBUG") != ""
|
||||
extraTags string
|
||||
installSuffix string
|
||||
pkgdir string
|
||||
cc string
|
||||
debugBinary bool
|
||||
coverage bool
|
||||
timeout = "120s"
|
||||
goarch string
|
||||
goos string
|
||||
noupgrade bool
|
||||
version string
|
||||
goCmd string
|
||||
race bool
|
||||
debug = os.Getenv("BUILDDEBUG") != ""
|
||||
extraTags string
|
||||
installSuffix string
|
||||
pkgdir string
|
||||
cc string
|
||||
run string
|
||||
benchRun string
|
||||
debugBinary bool
|
||||
coverage bool
|
||||
timeout = "120s"
|
||||
numVersions = 5
|
||||
withNextGenGUI = os.Getenv("BUILD_NEXT_GEN_GUI") != ""
|
||||
)
|
||||
|
||||
type target struct {
|
||||
@@ -58,12 +60,11 @@ type target struct {
|
||||
debname string
|
||||
debdeps []string
|
||||
debpre string
|
||||
debpost string
|
||||
description string
|
||||
buildPkgs []string
|
||||
binaryName string
|
||||
archiveFiles []archiveFile
|
||||
systemdServices []string
|
||||
systemdService string
|
||||
installationFiles []archiveFile
|
||||
tags []string
|
||||
}
|
||||
@@ -85,7 +86,6 @@ var targets = map[string]target{
|
||||
name: "syncthing",
|
||||
debname: "syncthing",
|
||||
debdeps: []string{"libc6", "procps"},
|
||||
debpost: "script/post-upgrade",
|
||||
description: "Open Source Continuous File Synchronization",
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/syncthing"},
|
||||
binaryName: "syncthing", // .exe will be added automatically for Windows builds
|
||||
@@ -96,6 +96,7 @@ var targets = map[string]target{
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0644},
|
||||
// All files from etc/ and extra/ added automatically in init().
|
||||
},
|
||||
systemdService: "syncthing@*.service",
|
||||
installationFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0755},
|
||||
{src: "README.md", dst: "deb/usr/share/doc/syncthing/README.txt", perm: 0644},
|
||||
@@ -114,6 +115,7 @@ var targets = map[string]target{
|
||||
{src: "etc/linux-systemd/system/syncthing@.service", dst: "deb/lib/systemd/system/syncthing@.service", perm: 0644},
|
||||
{src: "etc/linux-systemd/system/syncthing-resume.service", dst: "deb/lib/systemd/system/syncthing-resume.service", perm: 0644},
|
||||
{src: "etc/linux-systemd/user/syncthing.service", dst: "deb/usr/lib/systemd/user/syncthing.service", perm: 0644},
|
||||
{src: "etc/linux-sysctl/30-syncthing.conf", dst: "deb/usr/lib/sysctl.d/30-syncthing.conf", perm: 0644},
|
||||
{src: "etc/firewall-ufw/syncthing", dst: "deb/etc/ufw/applications.d/syncthing", perm: 0644},
|
||||
{src: "etc/linux-desktop/syncthing-start.desktop", dst: "deb/usr/share/applications/syncthing-start.desktop", perm: 0644},
|
||||
{src: "etc/linux-desktop/syncthing-ui.desktop", dst: "deb/usr/share/applications/syncthing-ui.desktop", perm: 0644},
|
||||
@@ -139,9 +141,7 @@ var targets = map[string]target{
|
||||
{src: "LICENSE", dst: "LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0644},
|
||||
},
|
||||
systemdServices: []string{
|
||||
"cmd/stdiscosrv/etc/linux-systemd/stdiscosrv.service",
|
||||
},
|
||||
systemdService: "cmd/stdiscosrv/etc/linux-systemd/stdiscosrv.service",
|
||||
installationFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0755},
|
||||
{src: "cmd/stdiscosrv/README.md", dst: "deb/usr/share/doc/syncthing-discosrv/README.txt", perm: 0644},
|
||||
@@ -168,9 +168,7 @@ var targets = map[string]target{
|
||||
{src: "LICENSE", dst: "LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0644},
|
||||
},
|
||||
systemdServices: []string{
|
||||
"cmd/strelaysrv/etc/linux-systemd/strelaysrv.service",
|
||||
},
|
||||
systemdService: "cmd/strelaysrv/etc/linux-systemd/strelaysrv.service",
|
||||
installationFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0755},
|
||||
{src: "cmd/strelaysrv/README.md", dst: "deb/usr/share/doc/syncthing-relaysrv/README.txt", perm: 0644},
|
||||
@@ -216,7 +214,7 @@ var dependencyRepos = []dependencyRepo{
|
||||
{path: "xdr", repo: "https://github.com/calmh/xdr.git", commit: "08e072f9cb16"},
|
||||
}
|
||||
|
||||
func init() {
|
||||
func initTargets() {
|
||||
all := targets["all"]
|
||||
pkgs, _ := filepath.Glob("cmd/*")
|
||||
for _, pkg := range pkgs {
|
||||
@@ -225,6 +223,9 @@ func init() {
|
||||
// ignore dotfiles
|
||||
continue
|
||||
}
|
||||
if noupgrade && pkg == "stupgrades" {
|
||||
continue
|
||||
}
|
||||
all.buildPkgs = append(all.buildPkgs, fmt.Sprintf("github.com/syncthing/syncthing/cmd/%s", pkg))
|
||||
}
|
||||
targets["all"] = all
|
||||
@@ -256,6 +257,8 @@ func main() {
|
||||
}()
|
||||
}
|
||||
|
||||
initTargets()
|
||||
|
||||
// Invoking build.go with no parameters at all builds everything (incrementally),
|
||||
// which is what you want for maximum error checking during development.
|
||||
if flag.NArg() == 0 {
|
||||
@@ -278,29 +281,31 @@ func main() {
|
||||
}
|
||||
|
||||
func runCommand(cmd string, target target) {
|
||||
var tags []string
|
||||
if noupgrade {
|
||||
tags = []string{"noupgrade"}
|
||||
}
|
||||
tags = append(tags, strings.Fields(extraTags)...)
|
||||
|
||||
switch cmd {
|
||||
case "install":
|
||||
var tags []string
|
||||
if noupgrade {
|
||||
tags = []string{"noupgrade"}
|
||||
}
|
||||
tags = append(tags, strings.Fields(extraTags)...)
|
||||
install(target, tags)
|
||||
metalintShort()
|
||||
|
||||
case "build":
|
||||
var tags []string
|
||||
if noupgrade {
|
||||
tags = []string{"noupgrade"}
|
||||
}
|
||||
tags = append(tags, strings.Fields(extraTags)...)
|
||||
build(target, tags)
|
||||
|
||||
case "test":
|
||||
test("github.com/syncthing/syncthing/lib/...", "github.com/syncthing/syncthing/cmd/...")
|
||||
test(strings.Fields(extraTags), "github.com/syncthing/syncthing/lib/...", "github.com/syncthing/syncthing/cmd/...")
|
||||
|
||||
case "bench":
|
||||
bench("github.com/syncthing/syncthing/lib/...", "github.com/syncthing/syncthing/cmd/...")
|
||||
bench(strings.Fields(extraTags), "github.com/syncthing/syncthing/lib/...", "github.com/syncthing/syncthing/cmd/...")
|
||||
|
||||
case "integration":
|
||||
integration(false)
|
||||
|
||||
case "integrationbench":
|
||||
integration(true)
|
||||
|
||||
case "assets":
|
||||
rebuildAssets()
|
||||
@@ -308,6 +313,9 @@ func runCommand(cmd string, target target) {
|
||||
case "proto":
|
||||
proto()
|
||||
|
||||
case "testmocks":
|
||||
testmocks()
|
||||
|
||||
case "translate":
|
||||
translate()
|
||||
|
||||
@@ -315,17 +323,14 @@ func runCommand(cmd string, target target) {
|
||||
transifex()
|
||||
|
||||
case "tar":
|
||||
buildTar(target)
|
||||
buildTar(target, tags)
|
||||
|
||||
case "zip":
|
||||
buildZip(target)
|
||||
buildZip(target, tags)
|
||||
|
||||
case "deb":
|
||||
buildDeb(target)
|
||||
|
||||
case "snap":
|
||||
buildSnap(target)
|
||||
|
||||
case "vet":
|
||||
metalintShort()
|
||||
|
||||
@@ -338,6 +343,20 @@ func runCommand(cmd string, target target) {
|
||||
case "version":
|
||||
fmt.Println(getVersion())
|
||||
|
||||
case "changelog":
|
||||
vers, err := currentAndLatestVersions(numVersions)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
for _, ver := range vers {
|
||||
underline := strings.Repeat("=", len(ver))
|
||||
msg, err := tagMessage(ver)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Printf("%s\n%s\n\n%s\n\n", ver, underline, msg)
|
||||
}
|
||||
|
||||
default:
|
||||
log.Fatalf("Unknown command %q", cmd)
|
||||
}
|
||||
@@ -356,13 +375,18 @@ func parseFlags() {
|
||||
flag.StringVar(&cc, "cc", os.Getenv("CC"), "Set CC environment variable for `go build`")
|
||||
flag.BoolVar(&debugBinary, "debug-binary", debugBinary, "Create unoptimized binary to use with delve, set -gcflags='-N -l' and omit -ldflags")
|
||||
flag.BoolVar(&coverage, "coverage", coverage, "Write coverage profile of tests to coverage.txt")
|
||||
flag.IntVar(&numVersions, "num-versions", numVersions, "Number of versions for changelog command")
|
||||
flag.StringVar(&run, "run", "", "Specify which tests to run")
|
||||
flag.StringVar(&benchRun, "bench", "", "Specify which benchmarks to run")
|
||||
flag.BoolVar(&withNextGenGUI, "with-next-gen-gui", withNextGenGUI, "Also build 'newgui'")
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
func test(pkgs ...string) {
|
||||
func test(tags []string, pkgs ...string) {
|
||||
lazyRebuildAssets()
|
||||
|
||||
args := []string{"test", "-short", "-timeout", timeout, "-tags", "purego"}
|
||||
tags = append(tags, "purego")
|
||||
args := []string{"test", "-short", "-timeout", timeout, "-tags", strings.Join(tags, " ")}
|
||||
|
||||
if runtime.GOARCH == "amd64" {
|
||||
switch runtime.GOOS {
|
||||
@@ -375,15 +399,56 @@ func test(pkgs ...string) {
|
||||
args = append(args, "-covermode", "atomic", "-coverprofile", "coverage.txt", "-coverpkg", strings.Join(pkgs, ","))
|
||||
}
|
||||
|
||||
args = append(args, runArgs()...)
|
||||
|
||||
runPrint(goCmd, append(args, pkgs...)...)
|
||||
}
|
||||
|
||||
func bench(pkgs ...string) {
|
||||
func bench(tags []string, pkgs ...string) {
|
||||
lazyRebuildAssets()
|
||||
runPrint(goCmd, append([]string{"test", "-run", "NONE", "-bench", "."}, pkgs...)...)
|
||||
args := append([]string{"test", "-run", "NONE", "-tags", strings.Join(tags, " ")}, benchArgs()...)
|
||||
runPrint(goCmd, append(args, pkgs...)...)
|
||||
}
|
||||
|
||||
func integration(bench bool) {
|
||||
lazyRebuildAssets()
|
||||
args := []string{"test", "-v", "-timeout", "60m", "-tags"}
|
||||
tags := "purego,integration"
|
||||
if bench {
|
||||
tags += ",benchmark"
|
||||
}
|
||||
args = append(args, tags)
|
||||
args = append(args, runArgs()...)
|
||||
if bench {
|
||||
if run == "" {
|
||||
args = append(args, "-run", "Benchmark")
|
||||
}
|
||||
args = append(args, benchArgs()...)
|
||||
}
|
||||
args = append(args, "./test")
|
||||
fmt.Println(args)
|
||||
runPrint(goCmd, args...)
|
||||
}
|
||||
|
||||
func runArgs() []string {
|
||||
if run == "" {
|
||||
return nil
|
||||
}
|
||||
return []string{"-run", run}
|
||||
}
|
||||
|
||||
func benchArgs() []string {
|
||||
if benchRun == "" {
|
||||
return []string{"-bench", "."}
|
||||
}
|
||||
return []string{"-bench", benchRun}
|
||||
}
|
||||
|
||||
func install(target target, tags []string) {
|
||||
if (target.name == "syncthing" || target.name == "") && !withNextGenGUI {
|
||||
log.Println("Notice: Next generation GUI will not be built; see --with-next-gen-gui.")
|
||||
}
|
||||
|
||||
lazyRebuildAssets()
|
||||
|
||||
tags = append(target.tags, tags...)
|
||||
@@ -394,9 +459,7 @@ func install(target target, tags []string) {
|
||||
}
|
||||
os.Setenv("GOBIN", filepath.Join(cwd, "bin"))
|
||||
|
||||
os.Setenv("GOOS", goos)
|
||||
os.Setenv("GOARCH", goarch)
|
||||
os.Setenv("CC", cc)
|
||||
setBuildEnvVars()
|
||||
|
||||
// On Windows generate a special file which the Go compiler will
|
||||
// automatically use when generating Windows binaries to set things like
|
||||
@@ -409,23 +472,22 @@ func install(target target, tags []string) {
|
||||
defer shouldCleanupSyso(sysoPath)
|
||||
}
|
||||
|
||||
for _, pkg := range target.buildPkgs {
|
||||
args := []string{"install", "-v"}
|
||||
args = appendParameters(args, tags, pkg)
|
||||
|
||||
runPrint(goCmd, args...)
|
||||
}
|
||||
args := []string{"install", "-v"}
|
||||
args = appendParameters(args, tags, target.buildPkgs...)
|
||||
runPrint(goCmd, args...)
|
||||
}
|
||||
|
||||
func build(target target, tags []string) {
|
||||
if (target.name == "syncthing" || target.name == "") && !withNextGenGUI {
|
||||
log.Println("Notice: Next generation GUI will not be built; see --with-next-gen-gui.")
|
||||
}
|
||||
|
||||
lazyRebuildAssets()
|
||||
tags = append(target.tags, tags...)
|
||||
|
||||
rmr(target.BinaryName())
|
||||
|
||||
os.Setenv("GOOS", goos)
|
||||
os.Setenv("GOARCH", goarch)
|
||||
os.Setenv("CC", cc)
|
||||
setBuildEnvVars()
|
||||
|
||||
// On Windows generate a special file which the Go compiler will
|
||||
// automatically use when generating Windows binaries to set things like
|
||||
@@ -442,15 +504,25 @@ func build(target target, tags []string) {
|
||||
defer shouldCleanupSyso(sysoPath)
|
||||
}
|
||||
|
||||
for _, pkg := range target.buildPkgs {
|
||||
args := []string{"build", "-v"}
|
||||
args = appendParameters(args, tags, pkg)
|
||||
args := []string{"build", "-v"}
|
||||
args = appendParameters(args, tags, target.buildPkgs...)
|
||||
runPrint(goCmd, args...)
|
||||
}
|
||||
|
||||
runPrint(goCmd, args...)
|
||||
func setBuildEnvVars() {
|
||||
os.Setenv("GOOS", goos)
|
||||
os.Setenv("GOARCH", goarch)
|
||||
os.Setenv("CC", cc)
|
||||
if os.Getenv("CGO_ENABLED") == "" {
|
||||
switch goos {
|
||||
case "darwin", "solaris":
|
||||
default:
|
||||
os.Setenv("CGO_ENABLED", "0")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func appendParameters(args []string, tags []string, pkg string) []string {
|
||||
func appendParameters(args []string, tags []string, pkgs ...string) []string {
|
||||
if pkgdir != "" {
|
||||
args = append(args, "-pkgdir", pkgdir)
|
||||
}
|
||||
@@ -466,26 +538,27 @@ func appendParameters(args []string, tags []string, pkg string) []string {
|
||||
|
||||
if !debugBinary {
|
||||
// Regular binaries get version tagged and skip some debug symbols
|
||||
args = append(args, "-ldflags", ldflags(path.Base(pkg)))
|
||||
args = append(args, "-trimpath", "-ldflags", ldflags(tags))
|
||||
} else {
|
||||
// -gcflags to disable optimizations and inlining. Skip -ldflags
|
||||
// because `Could not launch program: decoding dwarf section info at
|
||||
// offset 0x0: too short` on 'dlv exec ...' see
|
||||
// https://github.com/derekparker/delve/issues/79
|
||||
args = append(args, "-gcflags", "-N -l")
|
||||
// https://github.com/go-delve/delve/issues/79
|
||||
args = append(args, "-gcflags", "all=-N -l")
|
||||
}
|
||||
|
||||
return append(args, pkg)
|
||||
return append(args, pkgs...)
|
||||
}
|
||||
|
||||
func buildTar(target target) {
|
||||
func buildTar(target target, tags []string) {
|
||||
name := archiveName(target)
|
||||
filename := name + ".tar.gz"
|
||||
|
||||
var tags []string
|
||||
if noupgrade {
|
||||
tags = []string{"noupgrade"}
|
||||
name += "-noupgrade"
|
||||
for _, tag := range tags {
|
||||
if tag == "noupgrade" {
|
||||
name += "-noupgrade"
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
build(target, tags)
|
||||
@@ -501,14 +574,15 @@ func buildTar(target target) {
|
||||
fmt.Println(filename)
|
||||
}
|
||||
|
||||
func buildZip(target target) {
|
||||
func buildZip(target target, tags []string) {
|
||||
name := archiveName(target)
|
||||
filename := name + ".zip"
|
||||
|
||||
var tags []string
|
||||
if noupgrade {
|
||||
tags = []string{"noupgrade"}
|
||||
name += "-noupgrade"
|
||||
for _, tag := range tags {
|
||||
if tag == "noupgrade" {
|
||||
name += "-noupgrade"
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
build(target, tags)
|
||||
@@ -576,11 +650,13 @@ func buildDeb(target target) {
|
||||
for _, dep := range target.debdeps {
|
||||
args = append(args, "-d", dep)
|
||||
}
|
||||
for _, service := range target.systemdServices {
|
||||
args = append(args, "--deb-systemd", service)
|
||||
}
|
||||
if target.debpost != "" {
|
||||
args = append(args, "--after-upgrade", target.debpost)
|
||||
if target.systemdService != "" {
|
||||
debpost, err := createPostInstScript(target)
|
||||
defer os.Remove(debpost)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
args = append(args, "--after-upgrade", debpost)
|
||||
}
|
||||
if target.debpre != "" {
|
||||
args = append(args, "--before-install", target.debpre)
|
||||
@@ -588,73 +664,68 @@ func buildDeb(target target) {
|
||||
runPrint("fpm", args...)
|
||||
}
|
||||
|
||||
func buildSnap(target target) {
|
||||
os.RemoveAll("snap")
|
||||
|
||||
tmpl, err := template.ParseFiles("snapcraft.yaml.template")
|
||||
func createPostInstScript(target target) (string, error) {
|
||||
scriptname := filepath.Join("script", "deb-post-inst.template")
|
||||
t, err := template.ParseFiles(scriptname)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
return "", err
|
||||
}
|
||||
f, err := os.Create("snapcraft.yaml")
|
||||
defer f.Close()
|
||||
scriptname = strings.TrimSuffix(scriptname, ".template")
|
||||
w, err := os.Create(scriptname)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
snaparch := goarch
|
||||
if snaparch == "armhf" {
|
||||
goarch = "arm"
|
||||
} else if snaparch == "i386" {
|
||||
goarch = "386"
|
||||
defer w.Close()
|
||||
if err = t.Execute(w, struct {
|
||||
Service, Command string
|
||||
}{
|
||||
target.systemdService, target.binaryName,
|
||||
}); err != nil {
|
||||
return "", err
|
||||
}
|
||||
snapver := version
|
||||
if strings.HasPrefix(snapver, "v") {
|
||||
snapver = snapver[1:]
|
||||
}
|
||||
snapgrade := "devel"
|
||||
if matched, _ := regexp.MatchString(`^\d+\.\d+\.\d+(-rc.\d+)?$`, snapver); matched {
|
||||
snapgrade = "stable"
|
||||
}
|
||||
err = tmpl.Execute(f, map[string]string{
|
||||
"Version": snapver,
|
||||
"HostArchitecture": runtime.GOARCH,
|
||||
"TargetArchitecture": snaparch,
|
||||
"Grade": snapgrade,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
runPrint("snapcraft", "clean")
|
||||
build(target, []string{"noupgrade"})
|
||||
runPrint("snapcraft")
|
||||
return scriptname, nil
|
||||
}
|
||||
|
||||
func shouldBuildSyso(dir string) (string, error) {
|
||||
type M map[string]interface{}
|
||||
version := getVersion()
|
||||
version = strings.TrimPrefix(version, "v")
|
||||
major, minor, patch := semanticVersion()
|
||||
bs, err := json.Marshal(M{
|
||||
"FixedFileInfo": M{
|
||||
"FileVersion": M{
|
||||
"Major": major,
|
||||
"Minor": minor,
|
||||
"Patch": patch,
|
||||
},
|
||||
"ProductVersion": M{
|
||||
"Major": major,
|
||||
"Minor": minor,
|
||||
"Patch": patch,
|
||||
},
|
||||
},
|
||||
"StringFileInfo": M{
|
||||
"CompanyName": "The Syncthing Authors",
|
||||
"FileDescription": "Syncthing - Open Source Continuous File Synchronization",
|
||||
"FileVersion": version,
|
||||
"InternalName": "syncthing",
|
||||
"LegalCopyright": "The Syncthing Authors",
|
||||
"OriginalFilename": "syncthing",
|
||||
"ProductName": "Syncthing",
|
||||
"ProductVersion": version,
|
||||
},
|
||||
"IconPath": "assets/logo.ico",
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
jsonPath := filepath.Join(dir, "versioninfo.json")
|
||||
file, err := os.Create(filepath.Join(dir, "versioninfo.json"))
|
||||
err = ioutil.WriteFile(jsonPath, bs, 0644)
|
||||
if err != nil {
|
||||
return "", errors.New("failed to create " + jsonPath + ": " + err.Error())
|
||||
}
|
||||
|
||||
major, minor, patch, build := semanticVersion()
|
||||
fmt.Fprintf(file, `{
|
||||
"FixedFileInfo": {
|
||||
"FileVersion": {
|
||||
"Major": %s,
|
||||
"Minor": %s,
|
||||
"Patch": %s,
|
||||
"Build": %s
|
||||
}
|
||||
},
|
||||
"StringFileInfo": {
|
||||
"FileDescription": "Open Source Continuous File Synchronization",
|
||||
"LegalCopyright": "The Syncthing Authors",
|
||||
"ProductVersion": "%s",
|
||||
"ProductName": "Syncthing"
|
||||
},
|
||||
"IconPath": "assets/logo.ico"
|
||||
}`, major, minor, patch, build, getVersion())
|
||||
file.Close()
|
||||
defer func() {
|
||||
if err := os.Remove(jsonPath); err != nil {
|
||||
log.Printf("Warning: unable to remove generated %s: %v. Please remove it manually.", jsonPath, err)
|
||||
@@ -728,15 +799,50 @@ func listFiles(dir string) []string {
|
||||
|
||||
func rebuildAssets() {
|
||||
os.Setenv("SOURCE_DATE_EPOCH", fmt.Sprint(buildStamp()))
|
||||
runPrint(goCmd, "generate", "github.com/syncthing/syncthing/lib/auto", "github.com/syncthing/syncthing/cmd/strelaypoolsrv/auto")
|
||||
runPrint(goCmd, "generate", "github.com/syncthing/syncthing/lib/api/auto", "github.com/syncthing/syncthing/cmd/strelaypoolsrv/auto")
|
||||
}
|
||||
|
||||
func lazyRebuildAssets() {
|
||||
if shouldRebuildAssets("lib/auto/gui.files.go", "gui") || shouldRebuildAssets("cmd/strelaypoolsrv/auto/gui.files.go", "cmd/strelaypoolsrv/auto/gui") {
|
||||
shouldRebuild := shouldRebuildAssets("lib/api/auto/gui.files.go", "gui") ||
|
||||
shouldRebuildAssets("cmd/strelaypoolsrv/auto/gui.files.go", "cmd/strelaypoolsrv/gui")
|
||||
|
||||
if withNextGenGUI {
|
||||
shouldRebuild = buildNextGenGUI() || shouldRebuild
|
||||
}
|
||||
|
||||
if shouldRebuild {
|
||||
rebuildAssets()
|
||||
}
|
||||
}
|
||||
|
||||
func buildNextGenGUI() bool {
|
||||
// Check if we need to run the npm process, and if so also set the flag
|
||||
// to rebuild Go assets afterwards. The index.html is regenerated every
|
||||
// time by the build process. This assumes the new GUI ends up in
|
||||
// next-gen-gui/dist/next-gen-gui.
|
||||
|
||||
if !shouldRebuildAssets("gui/next-gen-gui/index.html", "next-gen-gui") {
|
||||
// The GUI is up to date.
|
||||
return false
|
||||
}
|
||||
|
||||
runPrintInDir("next-gen-gui", "npm", "install")
|
||||
runPrintInDir("next-gen-gui", "npm", "run", "build", "--", "--prod", "--subresource-integrity")
|
||||
|
||||
rmr("gui/tech-ui")
|
||||
|
||||
for _, src := range listFiles("next-gen-gui/dist") {
|
||||
rel, _ := filepath.Rel("next-gen-gui/dist", src)
|
||||
dst := filepath.Join("gui", rel)
|
||||
if err := copyFile(src, dst, 0644); err != nil {
|
||||
fmt.Println("copy:", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func shouldRebuildAssets(target, srcdir string) bool {
|
||||
info, err := os.Stat(target)
|
||||
if err != nil {
|
||||
@@ -780,7 +886,24 @@ func proto() {
|
||||
}
|
||||
runPrintInDir(path, "git", "checkout", dep.commit)
|
||||
}
|
||||
runPrint(goCmd, "generate", "github.com/syncthing/syncthing/lib/...", "github.com/syncthing/syncthing/cmd/stdiscosrv")
|
||||
runPrint(goCmd, "generate", "github.com/syncthing/syncthing/cmd/stdiscosrv")
|
||||
runPrint(goCmd, "generate", "proto/generate.go")
|
||||
}
|
||||
|
||||
func testmocks() {
|
||||
runPrint(goCmd, "get", "golang.org/x/tools/cmd/goimports")
|
||||
runPrint(goCmd, "get", "github.com/maxbrunsfeld/counterfeiter/v6")
|
||||
args := []string{
|
||||
"generate",
|
||||
"github.com/syncthing/syncthing/lib/config",
|
||||
"github.com/syncthing/syncthing/lib/connections",
|
||||
"github.com/syncthing/syncthing/lib/discover",
|
||||
"github.com/syncthing/syncthing/lib/events",
|
||||
"github.com/syncthing/syncthing/lib/logger",
|
||||
"github.com/syncthing/syncthing/lib/model",
|
||||
"github.com/syncthing/syncthing/lib/protocol",
|
||||
}
|
||||
runPrint(goCmd, args...)
|
||||
}
|
||||
|
||||
func translate() {
|
||||
@@ -799,19 +922,14 @@ func transifex() {
|
||||
runPrint(goCmd, "run", "../../../../script/transifexdl.go")
|
||||
}
|
||||
|
||||
func ldflags(program string) string {
|
||||
sep := '='
|
||||
if goVersion > 0 && goVersion < 1.5 {
|
||||
sep = ' '
|
||||
}
|
||||
|
||||
b := new(bytes.Buffer)
|
||||
func ldflags(tags []string) string {
|
||||
b := new(strings.Builder)
|
||||
b.WriteString("-w")
|
||||
fmt.Fprintf(b, " -X github.com/syncthing/syncthing/lib/build.Version%c%s", sep, version)
|
||||
fmt.Fprintf(b, " -X github.com/syncthing/syncthing/lib/build.Stamp%c%d", sep, buildStamp())
|
||||
fmt.Fprintf(b, " -X github.com/syncthing/syncthing/lib/build.User%c%s", sep, buildUser())
|
||||
fmt.Fprintf(b, " -X github.com/syncthing/syncthing/lib/build.Host%c%s", sep, buildHost())
|
||||
fmt.Fprintf(b, " -X github.com/syncthing/syncthing/lib/build.Program%c%s", sep, program)
|
||||
fmt.Fprintf(b, " -X github.com/syncthing/syncthing/lib/build.Version=%s", version)
|
||||
fmt.Fprintf(b, " -X github.com/syncthing/syncthing/lib/build.Stamp=%d", buildStamp())
|
||||
fmt.Fprintf(b, " -X github.com/syncthing/syncthing/lib/build.User=%s", buildUser())
|
||||
fmt.Fprintf(b, " -X github.com/syncthing/syncthing/lib/build.Host=%s", buildHost())
|
||||
fmt.Fprintf(b, " -X github.com/syncthing/syncthing/lib/build.Tags=%s", strings.Join(tags, ","))
|
||||
if v := os.Getenv("EXTRA_LDFLAGS"); v != "" {
|
||||
fmt.Fprintf(b, " %s", v)
|
||||
}
|
||||
@@ -828,13 +946,7 @@ func rmr(paths ...string) {
|
||||
}
|
||||
|
||||
func getReleaseVersion() (string, error) {
|
||||
fd, err := os.Open("RELEASE")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
bs, err := ioutil.ReadAll(fd)
|
||||
bs, err := ioutil.ReadFile("RELEASE")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -842,15 +954,39 @@ func getReleaseVersion() (string, error) {
|
||||
}
|
||||
|
||||
func getGitVersion() (string, error) {
|
||||
v, err := runError("git", "describe", "--always", "--dirty")
|
||||
// The current version as Git sees it
|
||||
bs, err := runError("git", "describe", "--always", "--dirty", "--abbrev=8")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
v = versionRe.ReplaceAllFunc(v, func(s []byte) []byte {
|
||||
s[0] = '+'
|
||||
return s
|
||||
})
|
||||
return string(v), nil
|
||||
vcur := string(bs)
|
||||
|
||||
// The closest current tag name
|
||||
bs, err = runError("git", "describe", "--always", "--abbrev=0")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
v0 := string(bs)
|
||||
|
||||
// To be more semantic-versionish and ensure proper ordering in our
|
||||
// upgrade process, we make sure there's only one hypen in the version.
|
||||
|
||||
versionRe := regexp.MustCompile(`-([0-9]{1,3}-g[0-9a-f]{5,10}(-dirty)?)`)
|
||||
if m := versionRe.FindStringSubmatch(vcur); len(m) > 0 {
|
||||
suffix := strings.ReplaceAll(m[1], "-", ".")
|
||||
|
||||
if strings.Contains(v0, "-") {
|
||||
// We're based of a tag with a prerelease string. We can just
|
||||
// add our dev stuff directly.
|
||||
return fmt.Sprintf("%s.dev.%s", v0, suffix), nil
|
||||
}
|
||||
|
||||
// We're based on a release version. We need to bump the patch
|
||||
// version and then add a -dev prerelease string.
|
||||
next := nextPatchVersion(v0)
|
||||
return fmt.Sprintf("%s-dev.%s", next, suffix), nil
|
||||
}
|
||||
return vcur, nil
|
||||
}
|
||||
|
||||
func getVersion() string {
|
||||
@@ -871,13 +1007,18 @@ func getVersion() string {
|
||||
return "unknown-dev"
|
||||
}
|
||||
|
||||
func semanticVersion() (major, minor, patch, build string) {
|
||||
r := regexp.MustCompile(`v(?P<Major>\d+)\.(?P<Minor>\d+).(?P<Patch>\d+).*\+(?P<CommitsAhead>\d+)`)
|
||||
func semanticVersion() (major, minor, patch int) {
|
||||
r := regexp.MustCompile(`v(\d+)\.(\d+).(\d+)`)
|
||||
matches := r.FindStringSubmatch(getVersion())
|
||||
if len(matches) != 5 {
|
||||
return "0", "0", "0", "0"
|
||||
if len(matches) != 4 {
|
||||
return 0, 0, 0
|
||||
}
|
||||
return matches[1], matches[2], matches[3], matches[4]
|
||||
|
||||
var ints [3]int
|
||||
for i, s := range matches[1:] {
|
||||
ints[i], _ = strconv.Atoi(s)
|
||||
}
|
||||
return ints[0], ints[1], ints[2]
|
||||
}
|
||||
|
||||
func getBranchSuffix() string {
|
||||
@@ -916,7 +1057,7 @@ func getBranchSuffix() string {
|
||||
|
||||
branch = parts[len(parts)-1]
|
||||
switch branch {
|
||||
case "master", "release":
|
||||
case "master", "release", "main":
|
||||
// these are not special
|
||||
return ""
|
||||
}
|
||||
@@ -1192,7 +1333,7 @@ func macosCodesign(file string) {
|
||||
}
|
||||
|
||||
if id := os.Getenv("CODESIGN_IDENTITY"); id != "" {
|
||||
bs, err := runError("codesign", "-s", id, file)
|
||||
bs, err := runError("codesign", "--options=runtime", "-s", id, file)
|
||||
if err != nil {
|
||||
log.Println("Codesign: signing failed:", string(bs))
|
||||
return
|
||||
@@ -1288,3 +1429,70 @@ func protobufVersion() string {
|
||||
}
|
||||
return string(bs)
|
||||
}
|
||||
|
||||
func currentAndLatestVersions(n int) ([]string, error) {
|
||||
bs, err := runError("git", "tag", "--sort", "taggerdate")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
lines := strings.Split(string(bs), "\n")
|
||||
reverseStrings(lines)
|
||||
|
||||
// The one at the head is the latest version. We always keep that one.
|
||||
// Then we filter out remaining ones with dashes (pre-releases etc).
|
||||
|
||||
latest := lines[:1]
|
||||
nonPres := filterStrings(lines[1:], func(s string) bool { return !strings.Contains(s, "-") })
|
||||
vers := append(latest, nonPres...)
|
||||
return vers[:n], nil
|
||||
}
|
||||
|
||||
func reverseStrings(ss []string) {
|
||||
for i := 0; i < len(ss)/2; i++ {
|
||||
ss[i], ss[len(ss)-1-i] = ss[len(ss)-1-i], ss[i]
|
||||
}
|
||||
}
|
||||
|
||||
func filterStrings(ss []string, op func(string) bool) []string {
|
||||
n := ss[:0]
|
||||
for _, s := range ss {
|
||||
if op(s) {
|
||||
n = append(n, s)
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func tagMessage(tag string) (string, error) {
|
||||
hash, err := runError("git", "rev-parse", tag)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
obj, err := runError("git", "cat-file", "-p", string(hash))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return trimTagMessage(string(obj), tag), nil
|
||||
}
|
||||
|
||||
func trimTagMessage(msg, tag string) string {
|
||||
firstBlank := strings.Index(msg, "\n\n")
|
||||
if firstBlank > 0 {
|
||||
msg = msg[firstBlank+2:]
|
||||
}
|
||||
msg = strings.TrimPrefix(msg, tag)
|
||||
beginSig := strings.Index(msg, "-----BEGIN PGP")
|
||||
if beginSig > 0 {
|
||||
msg = msg[:beginSig]
|
||||
}
|
||||
return strings.TrimSpace(msg)
|
||||
}
|
||||
|
||||
func nextPatchVersion(ver string) string {
|
||||
parts := strings.SplitN(ver, "-", 2)
|
||||
digits := strings.Split(parts[0], ".")
|
||||
n, _ := strconv.Atoi(digits[len(digits)-1])
|
||||
digits[len(digits)-1] = strconv.Itoa(n + 1)
|
||||
return strings.Join(digits, ".")
|
||||
}
|
||||
|
||||
20
build.ps1
Normal file
@@ -0,0 +1,20 @@
|
||||
function build {
|
||||
go run build.go @args
|
||||
}
|
||||
|
||||
$cmd, $rest = $args
|
||||
switch ($cmd) {
|
||||
"test" {
|
||||
$env:LOGGER_DISCARD=1
|
||||
build test
|
||||
}
|
||||
|
||||
"bench" {
|
||||
$env:LOGGER_DISCARD=1
|
||||
build bench
|
||||
}
|
||||
|
||||
default {
|
||||
build @rest
|
||||
}
|
||||
}
|
||||
73
build.sh
@@ -2,8 +2,6 @@
|
||||
set -euo pipefail
|
||||
IFS=$'\n\t'
|
||||
|
||||
STTRACE=${STTRACE:-}
|
||||
|
||||
script() {
|
||||
name="$1"
|
||||
shift
|
||||
@@ -15,88 +13,23 @@ build() {
|
||||
}
|
||||
|
||||
case "${1:-default}" in
|
||||
default)
|
||||
build
|
||||
;;
|
||||
|
||||
clean)
|
||||
build "$@"
|
||||
;;
|
||||
|
||||
tar)
|
||||
build "$@"
|
||||
;;
|
||||
|
||||
assets)
|
||||
build "$@"
|
||||
;;
|
||||
|
||||
xdr)
|
||||
build "$@"
|
||||
;;
|
||||
|
||||
translate)
|
||||
build "$@"
|
||||
;;
|
||||
|
||||
deb)
|
||||
build "$@"
|
||||
;;
|
||||
|
||||
setup)
|
||||
build "$@"
|
||||
;;
|
||||
|
||||
test)
|
||||
LOGGER_DISCARD=1 build test
|
||||
;;
|
||||
|
||||
bench)
|
||||
LOGGER_DISCARD=1 build bench | script benchfilter
|
||||
LOGGER_DISCARD=1 build bench
|
||||
;;
|
||||
|
||||
prerelease)
|
||||
go run script/authors.go
|
||||
script authors
|
||||
build transifex
|
||||
pushd man ; ./refresh.sh ; popd
|
||||
git add -A gui man AUTHORS
|
||||
git commit -m 'gui, man, authors: Update docs, translations, and contributors'
|
||||
;;
|
||||
|
||||
noupgrade)
|
||||
build -no-upgrade tar
|
||||
;;
|
||||
|
||||
all)
|
||||
platforms=(
|
||||
darwin-amd64 dragonfly-amd64 freebsd-amd64 linux-amd64 netbsd-amd64 openbsd-amd64 solaris-amd64 windows-amd64
|
||||
freebsd-386 linux-386 netbsd-386 openbsd-386 windows-386
|
||||
linux-arm linux-arm64 linux-ppc64 linux-ppc64le
|
||||
)
|
||||
|
||||
for plat in "${platforms[@]}"; do
|
||||
echo Building "$plat"
|
||||
|
||||
goos="${plat%-*}"
|
||||
goarch="${plat#*-}"
|
||||
dist="tar"
|
||||
|
||||
if [[ $goos == "windows" ]]; then
|
||||
dist="zip"
|
||||
fi
|
||||
|
||||
build -goos "$goos" -goarch "$goarch" "$dist"
|
||||
echo
|
||||
done
|
||||
;;
|
||||
|
||||
test-xunit)
|
||||
|
||||
(GOPATH="$(pwd)/Godeps/_workspace:$GOPATH" go test -v -race ./lib/... ./cmd/... || true) > tests.out
|
||||
go2xunit -output tests.xml -fail < tests.out
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "Unknown build command $1"
|
||||
build "$@"
|
||||
;;
|
||||
esac
|
||||
|
||||
@@ -1,95 +0,0 @@
|
||||
// Copyright (C) 2019 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
)
|
||||
|
||||
type APIClient struct {
|
||||
http.Client
|
||||
cfg config.GUIConfiguration
|
||||
apikey string
|
||||
}
|
||||
|
||||
func getClient(cfg config.GUIConfiguration) *APIClient {
|
||||
httpClient := http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
},
|
||||
DialContext: func(_ context.Context, _, _ string) (net.Conn, error) {
|
||||
return net.Dial(cfg.Network(), cfg.Address())
|
||||
},
|
||||
},
|
||||
}
|
||||
return &APIClient{
|
||||
Client: httpClient,
|
||||
cfg: cfg,
|
||||
apikey: cfg.APIKey,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *APIClient) Endpoint() string {
|
||||
if c.cfg.Network() == "unix" {
|
||||
return "http://unix/"
|
||||
}
|
||||
url := c.cfg.URL()
|
||||
if !strings.HasSuffix(url, "/") {
|
||||
url += "/"
|
||||
}
|
||||
return url
|
||||
}
|
||||
|
||||
func (c *APIClient) Do(req *http.Request) (*http.Response, error) {
|
||||
req.Header.Set("X-API-Key", c.apikey)
|
||||
resp, err := c.Client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, checkResponse(resp)
|
||||
}
|
||||
|
||||
func (c *APIClient) Get(url string) (*http.Response, error) {
|
||||
request, err := http.NewRequest("GET", c.Endpoint()+"rest/"+url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.Do(request)
|
||||
}
|
||||
|
||||
func (c *APIClient) Post(url, body string) (*http.Response, error) {
|
||||
request, err := http.NewRequest("POST", c.Endpoint()+"rest/"+url, bytes.NewBufferString(body))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.Do(request)
|
||||
}
|
||||
|
||||
func checkResponse(response *http.Response) error {
|
||||
if response.StatusCode == 404 {
|
||||
return fmt.Errorf("Invalid endpoint or API call")
|
||||
} else if response.StatusCode == 403 {
|
||||
return fmt.Errorf("Invalid API key")
|
||||
} else if response.StatusCode != 200 {
|
||||
data, err := responseToBArray(response)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
body := strings.TrimSpace(string(data))
|
||||
return fmt.Errorf("Unexpected HTTP status returned: %s\n%s", response.Status, body)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,192 +0,0 @@
|
||||
// Copyright (C) 2019 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
|
||||
"github.com/AudriusButkevicius/recli"
|
||||
"github.com/flynn-archive/go-shlex"
|
||||
"github.com/mattn/go-isatty"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/syncthing/syncthing/lib/build"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
"github.com/syncthing/syncthing/lib/locations"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// This is somewhat a hack around a chicken and egg problem.
|
||||
// We need to set the home directory and potentially other flags to know where the syncthing instance is running
|
||||
// in order to get it's config ... which we then use to construct the actual CLI ... at which point it's too late
|
||||
// to add flags there...
|
||||
homeBaseDir := locations.GetBaseDir(locations.ConfigBaseDir)
|
||||
guiCfg := config.GUIConfiguration{}
|
||||
|
||||
flags := flag.NewFlagSet("", flag.ContinueOnError)
|
||||
flags.StringVar(&guiCfg.RawAddress, "gui-address", guiCfg.RawAddress, "Override GUI address (e.g. \"http://192.0.2.42:8443\")")
|
||||
flags.StringVar(&guiCfg.APIKey, "gui-apikey", guiCfg.APIKey, "Override GUI API key")
|
||||
flags.StringVar(&homeBaseDir, "home", homeBaseDir, "Set configuration directory")
|
||||
|
||||
// Implement the same flags at the lower CLI, with the same default values (pre-parse), but do nothing with them.
|
||||
// This is so that we could reuse os.Args
|
||||
fakeFlags := []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "gui-address",
|
||||
Value: guiCfg.RawAddress,
|
||||
Usage: "Override GUI address (e.g. \"http://192.0.2.42:8443\")",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "gui-apikey",
|
||||
Value: guiCfg.APIKey,
|
||||
Usage: "Override GUI API key",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "home",
|
||||
Value: homeBaseDir,
|
||||
Usage: "Set configuration directory",
|
||||
},
|
||||
}
|
||||
|
||||
// Do not print usage of these flags, and ignore errors as this can't understand plenty of things
|
||||
flags.Usage = func() {}
|
||||
_ = flags.Parse(os.Args[1:])
|
||||
|
||||
// Now if the API key and address is not provided (we are not connecting to a remote instance),
|
||||
// try to rip it out of the config.
|
||||
if guiCfg.RawAddress == "" && guiCfg.APIKey == "" {
|
||||
// Update the base directory
|
||||
err := locations.SetBaseDir(locations.ConfigBaseDir, homeBaseDir)
|
||||
if err != nil {
|
||||
log.Fatal(errors.Wrap(err, "setting home"))
|
||||
}
|
||||
|
||||
// Load the certs and get the ID
|
||||
cert, err := tls.LoadX509KeyPair(
|
||||
locations.Get(locations.CertFile),
|
||||
locations.Get(locations.KeyFile),
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatal(errors.Wrap(err, "reading device ID"))
|
||||
}
|
||||
|
||||
myID := protocol.NewDeviceID(cert.Certificate[0])
|
||||
|
||||
// Load the config
|
||||
cfg, err := config.Load(locations.Get(locations.ConfigFile), myID, events.NoopLogger)
|
||||
if err != nil {
|
||||
log.Fatalln(errors.Wrap(err, "loading config"))
|
||||
}
|
||||
|
||||
guiCfg = cfg.GUI()
|
||||
} else if guiCfg.Address() == "" || guiCfg.APIKey == "" {
|
||||
log.Fatalln("Both -gui-address and -gui-apikey should be specified")
|
||||
}
|
||||
|
||||
if guiCfg.Address() == "" {
|
||||
log.Fatalln("Could not find GUI Address")
|
||||
}
|
||||
|
||||
if guiCfg.APIKey == "" {
|
||||
log.Fatalln("Could not find GUI API key")
|
||||
}
|
||||
|
||||
client := getClient(guiCfg)
|
||||
|
||||
cfg, err := getConfig(client)
|
||||
original := cfg.Copy()
|
||||
if err != nil {
|
||||
log.Fatalln(errors.Wrap(err, "getting config"))
|
||||
}
|
||||
|
||||
// Copy the config and set the default flags
|
||||
recliCfg := recli.DefaultConfig
|
||||
recliCfg.IDTag.Name = "xml"
|
||||
recliCfg.SkipTag.Name = "json"
|
||||
|
||||
commands, err := recli.New(recliCfg).Construct(&cfg)
|
||||
if err != nil {
|
||||
log.Fatalln(errors.Wrap(err, "config reflect"))
|
||||
}
|
||||
|
||||
// Construct the actual CLI
|
||||
app := cli.NewApp()
|
||||
app.Name = "stcli"
|
||||
app.HelpName = app.Name
|
||||
app.Author = "The Syncthing Authors"
|
||||
app.Usage = "Syncthing command line interface"
|
||||
app.Version = build.Version
|
||||
app.Flags = fakeFlags
|
||||
app.Metadata = map[string]interface{}{
|
||||
"client": client,
|
||||
}
|
||||
app.Commands = []cli.Command{
|
||||
{
|
||||
Name: "config",
|
||||
HideHelp: true,
|
||||
Usage: "Configuration modification command group",
|
||||
Subcommands: commands,
|
||||
},
|
||||
showCommand,
|
||||
operationCommand,
|
||||
errorsCommand,
|
||||
}
|
||||
|
||||
tty := isatty.IsTerminal(os.Stdin.Fd()) || isatty.IsCygwinTerminal(os.Stdin.Fd())
|
||||
if !tty {
|
||||
// Not a TTY, consume from stdin
|
||||
scanner := bufio.NewScanner(os.Stdin)
|
||||
for scanner.Scan() {
|
||||
input, err := shlex.Split(scanner.Text())
|
||||
if err != nil {
|
||||
log.Fatalln(errors.Wrap(err, "parsing input"))
|
||||
}
|
||||
if len(input) == 0 {
|
||||
continue
|
||||
}
|
||||
err = app.Run(append(os.Args, input...))
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
err = scanner.Err()
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
} else {
|
||||
err = app.Run(os.Args)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(cfg, original) {
|
||||
body, err := json.MarshalIndent(cfg, "", " ")
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
resp, err := client.Post("system/config", string(body))
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
body, err := responseToBArray(resp)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
log.Fatalln(string(body))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -7,7 +7,6 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
@@ -15,6 +14,8 @@ import (
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -60,7 +61,7 @@ func compareDirectories(dirs ...string) error {
|
||||
} else if res[i].name > res[0].name {
|
||||
return fmt.Errorf("%s missing %v (present in %s)", dirs[i], res[0], dirs[0])
|
||||
}
|
||||
return fmt.Errorf("Mismatch; %v (%s) != %v (%s)", res[i], dirs[i], res[0], dirs[0])
|
||||
return fmt.Errorf("mismatch; %v (%s) != %v (%s)", res[i], dirs[i], res[0], dirs[0])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -74,7 +75,7 @@ type fileInfo struct {
|
||||
name string
|
||||
mode os.FileMode
|
||||
mod int64
|
||||
hash [16]byte
|
||||
hash [sha256.Size]byte
|
||||
}
|
||||
|
||||
func (f fileInfo) String() string {
|
||||
@@ -106,11 +107,7 @@ func startWalker(dir string, res chan<- fileInfo, abort <-chan struct{}) chan er
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h := md5.New()
|
||||
h.Write([]byte(tgt))
|
||||
hash := h.Sum(nil)
|
||||
|
||||
copy(f.hash[:], hash)
|
||||
f.hash = sha256.Sum256([]byte(tgt))
|
||||
} else if info.IsDir() {
|
||||
f = fileInfo{
|
||||
name: rn,
|
||||
@@ -123,7 +120,7 @@ func startWalker(dir string, res chan<- fileInfo, abort <-chan struct{}) chan er
|
||||
mode: info.Mode(),
|
||||
mod: info.ModTime().Unix(),
|
||||
}
|
||||
sum, err := md5file(path)
|
||||
sum, err := sha256file(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -150,14 +147,14 @@ func startWalker(dir string, res chan<- fileInfo, abort <-chan struct{}) chan er
|
||||
return errc
|
||||
}
|
||||
|
||||
func md5file(fname string) (hash [16]byte, err error) {
|
||||
func sha256file(fname string) (hash [sha256.Size]byte, err error) {
|
||||
f, err := os.Open(fname)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
h := md5.New()
|
||||
h := sha256.New()
|
||||
io.Copy(h, f)
|
||||
hb := h.Sum(nil)
|
||||
copy(hash[:], hb)
|
||||
|
||||
118
cmd/stcrashreceiver/main.go
Normal file
@@ -0,0 +1,118 @@
|
||||
// Copyright (C) 2019 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
// Command stcrashreceiver is a trivial HTTP server that allows two things:
|
||||
//
|
||||
// - uploading files (crash reports) named like a SHA256 hash using a PUT request
|
||||
// - checking whether such file exists using a HEAD request
|
||||
//
|
||||
// Typically this should be deployed behind something that manages HTTPS.
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
"github.com/syncthing/syncthing/lib/ur"
|
||||
|
||||
raven "github.com/getsentry/raven-go"
|
||||
)
|
||||
|
||||
const maxRequestSize = 1 << 20 // 1 MiB
|
||||
|
||||
func main() {
|
||||
dir := flag.String("dir", ".", "Directory to store reports in")
|
||||
dsn := flag.String("dsn", "", "Sentry DSN")
|
||||
listen := flag.String("listen", ":22039", "HTTP listen address")
|
||||
flag.Parse()
|
||||
|
||||
mux := http.NewServeMux()
|
||||
|
||||
cr := &crashReceiver{
|
||||
dir: *dir,
|
||||
dsn: *dsn,
|
||||
}
|
||||
mux.Handle("/", cr)
|
||||
|
||||
if *dsn != "" {
|
||||
mux.HandleFunc("/newcrash/failure", handleFailureFn(*dsn))
|
||||
}
|
||||
|
||||
log.SetOutput(os.Stdout)
|
||||
if err := http.ListenAndServe(*listen, mux); err != nil {
|
||||
log.Fatalln("HTTP serve:", err)
|
||||
}
|
||||
}
|
||||
|
||||
func handleFailureFn(dsn string) func(w http.ResponseWriter, req *http.Request) {
|
||||
return func(w http.ResponseWriter, req *http.Request) {
|
||||
lr := io.LimitReader(req.Body, maxRequestSize)
|
||||
bs, err := ioutil.ReadAll(lr)
|
||||
req.Body.Close()
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
}
|
||||
|
||||
var reports []ur.FailureReport
|
||||
err = json.Unmarshal(bs, &reports)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 400)
|
||||
return
|
||||
}
|
||||
if len(reports) == 0 {
|
||||
// Shouldn't happen
|
||||
log.Printf("Got zero failure reports")
|
||||
return
|
||||
}
|
||||
|
||||
version, err := parseVersion(reports[0].Version)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 400)
|
||||
return
|
||||
}
|
||||
for _, r := range reports {
|
||||
pkt := packet(version, "failure")
|
||||
pkt.Message = r.Description
|
||||
pkt.Extra = raven.Extra{
|
||||
"count": r.Count,
|
||||
}
|
||||
pkt.Fingerprint = []string{r.Description}
|
||||
|
||||
if err := sendReport(dsn, pkt, userIDFor(req)); err != nil {
|
||||
log.Println("Failed to send failure report:", err)
|
||||
} else {
|
||||
log.Println("Sent failure report:", r.Description)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// userIDFor returns a string we can use as the user ID for the purpose of
|
||||
// counting affected users. It's the truncated hash of a salt, the user
|
||||
// remote IP, and the current month.
|
||||
func userIDFor(req *http.Request) string {
|
||||
addr := req.RemoteAddr
|
||||
if fwd := req.Header.Get("x-forwarded-for"); fwd != "" {
|
||||
addr = fwd
|
||||
}
|
||||
if host, _, err := net.SplitHostPort(addr); err == nil {
|
||||
addr = host
|
||||
}
|
||||
now := time.Now().Format("200601")
|
||||
salt := "stcrashreporter"
|
||||
hash := sha256.Sum256([]byte(salt + addr + now))
|
||||
return fmt.Sprintf("%x", hash[:8])
|
||||
}
|
||||
@@ -31,17 +31,15 @@ var (
|
||||
clientsMut sync.Mutex
|
||||
)
|
||||
|
||||
func sendReport(dsn, path string, report []byte) error {
|
||||
pkt, err := parseReport(path, report)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
func sendReport(dsn string, pkt *raven.Packet, userID string) error {
|
||||
pkt.Interfaces = append(pkt.Interfaces, &raven.User{ID: userID})
|
||||
|
||||
clientsMut.Lock()
|
||||
defer clientsMut.Unlock()
|
||||
|
||||
cli, ok := clients[dsn]
|
||||
if !ok {
|
||||
var err error
|
||||
cli, err = raven.New(dsn)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -60,7 +58,7 @@ func sendReport(dsn, path string, report []byte) error {
|
||||
return <-errC
|
||||
}
|
||||
|
||||
func parseReport(path string, report []byte) (*raven.Packet, error) {
|
||||
func parseCrashReport(path string, report []byte) (*raven.Packet, error) {
|
||||
parts := bytes.SplitN(report, []byte("\n"), 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, errors.New("no first line")
|
||||
@@ -109,7 +107,7 @@ func parseReport(path string, report []byte) (*raven.Packet, error) {
|
||||
loader.LockWithVersion(version.tag)
|
||||
} else {
|
||||
// Last resort
|
||||
loader.LockWithVersion("master")
|
||||
loader.LockWithVersion("main")
|
||||
}
|
||||
defer loader.Unlock()
|
||||
|
||||
@@ -124,44 +122,63 @@ func parseReport(path string, report []byte) (*raven.Packet, error) {
|
||||
}
|
||||
}
|
||||
|
||||
pkt := &raven.Packet{
|
||||
Message: string(subjectLine),
|
||||
Platform: "go",
|
||||
Release: version.tag,
|
||||
Environment: version.environment(),
|
||||
Tags: raven.Tags{
|
||||
raven.Tag{Key: "version", Value: version.version},
|
||||
raven.Tag{Key: "tag", Value: version.tag},
|
||||
raven.Tag{Key: "codename", Value: version.codename},
|
||||
raven.Tag{Key: "runtime", Value: version.runtime},
|
||||
raven.Tag{Key: "goos", Value: version.goos},
|
||||
raven.Tag{Key: "goarch", Value: version.goarch},
|
||||
raven.Tag{Key: "builder", Value: version.builder},
|
||||
},
|
||||
Extra: raven.Extra{
|
||||
"url": reportServer + path,
|
||||
},
|
||||
Interfaces: []raven.Interface{&trace},
|
||||
}
|
||||
if version.commit != "" {
|
||||
pkt.Tags = append(pkt.Tags, raven.Tag{Key: "commit", Value: version.commit})
|
||||
pkt := packet(version, "crash")
|
||||
pkt.Message = string(subjectLine)
|
||||
pkt.Extra = raven.Extra{
|
||||
"url": reportServer + path,
|
||||
}
|
||||
pkt.Interfaces = []raven.Interface{&trace}
|
||||
pkt.Fingerprint = crashReportFingerprint(pkt.Message)
|
||||
|
||||
return pkt, nil
|
||||
}
|
||||
|
||||
// syncthing v1.1.4-rc.1+30-g6aaae618-dirty-crashrep "Erbium Earthworm" (go1.12.5 darwin-amd64) jb@kvin.kastelo.net 2019-05-23 16:08:14 UTC
|
||||
var longVersionRE = regexp.MustCompile(`syncthing\s+(v[^\s]+)\s+"([^"]+)"\s\(([^\s]+)\s+([^-]+)-([^)]+)\)\s+([^\s]+)`)
|
||||
var (
|
||||
indexRe = regexp.MustCompile(`\[[-:0-9]+\]`)
|
||||
sizeRe = regexp.MustCompile(`(length|capacity) [0-9]+`)
|
||||
ldbPosRe = regexp.MustCompile(`(\(pos=)([0-9]+)\)`)
|
||||
ldbChecksumRe = regexp.MustCompile(`(want=0x)([a-z0-9]+)( got=0x)([a-z0-9]+)`)
|
||||
ldbFileRe = regexp.MustCompile(`(\[file=)([0-9]+)(\.ldb\])`)
|
||||
ldbInternalKeyRe = regexp.MustCompile(`(internal key ")[^"]+(", len=)[0-9]+`)
|
||||
ldbPathRe = regexp.MustCompile(`(open|write|read) .+[\\/].+[\\/]index[^\\/]+[\\/][^\\/]+: `)
|
||||
)
|
||||
|
||||
func crashReportFingerprint(message string) []string {
|
||||
// Do not fingerprint on the stack in case of db corruption or fatal
|
||||
// db io error - where it occurs doesn't matter.
|
||||
orig := message
|
||||
message = ldbPosRe.ReplaceAllString(message, "${1}x)")
|
||||
message = ldbFileRe.ReplaceAllString(message, "${1}x${3}")
|
||||
message = ldbChecksumRe.ReplaceAllString(message, "${1}X${3}X")
|
||||
message = ldbInternalKeyRe.ReplaceAllString(message, "${1}x${2}x")
|
||||
message = ldbPathRe.ReplaceAllString(message, "$1 x: ")
|
||||
if message != orig {
|
||||
return []string{message}
|
||||
}
|
||||
|
||||
message = indexRe.ReplaceAllString(message, "[x]")
|
||||
message = sizeRe.ReplaceAllString(message, "$1 x")
|
||||
|
||||
// {{ default }} is what sentry uses as a fingerprint by default. While
|
||||
// never specified, the docs point at this being some hash derived from the
|
||||
// stack trace. Here we include the filtered panic message on top of that.
|
||||
// https://docs.sentry.io/platforms/go/data-management/event-grouping/sdk-fingerprinting/#basic-example
|
||||
return []string{"{{ default }}", message}
|
||||
}
|
||||
|
||||
// syncthing v1.1.4-rc.1+30-g6aaae618-dirty-crashrep "Erbium Earthworm" (go1.12.5 darwin-amd64) jb@kvin.kastelo.net 2019-05-23 16:08:14 UTC [foo, bar]
|
||||
var longVersionRE = regexp.MustCompile(`syncthing\s+(v[^\s]+)\s+"([^"]+)"\s\(([^\s]+)\s+([^-]+)-([^)]+)\)\s+([^\s]+)[^\[]*(?:\[(.+)\])?$`)
|
||||
|
||||
type version struct {
|
||||
version string // "v1.1.4-rc.1+30-g6aaae618-dirty-crashrep"
|
||||
tag string // "v1.1.4-rc.1"
|
||||
commit string // "6aaae618", blank when absent
|
||||
codename string // "Erbium Earthworm"
|
||||
runtime string // "go1.12.5"
|
||||
goos string // "darwin"
|
||||
goarch string // "amd64"
|
||||
builder string // "jb@kvin.kastelo.net"
|
||||
version string // "v1.1.4-rc.1+30-g6aaae618-dirty-crashrep"
|
||||
tag string // "v1.1.4-rc.1"
|
||||
commit string // "6aaae618", blank when absent
|
||||
codename string // "Erbium Earthworm"
|
||||
runtime string // "go1.12.5"
|
||||
goos string // "darwin"
|
||||
goarch string // "amd64"
|
||||
builder string // "jb@kvin.kastelo.net"
|
||||
extra []string // "foo", "bar"
|
||||
}
|
||||
|
||||
func (v version) environment() string {
|
||||
@@ -191,6 +208,7 @@ func parseVersion(line string) (version, error) {
|
||||
goarch: m[5],
|
||||
builder: m[6],
|
||||
}
|
||||
|
||||
parts := strings.Split(v.version, "+")
|
||||
v.tag = parts[0]
|
||||
if len(parts) > 1 {
|
||||
@@ -200,5 +218,38 @@ func parseVersion(line string) (version, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if len(m) >= 8 && m[7] != "" {
|
||||
tags := strings.Split(m[7], ",")
|
||||
for i := range tags {
|
||||
tags[i] = strings.TrimSpace(tags[i])
|
||||
}
|
||||
v.extra = tags
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func packet(version version, reportType string) *raven.Packet {
|
||||
pkt := &raven.Packet{
|
||||
Platform: "go",
|
||||
Release: version.tag,
|
||||
Environment: version.environment(),
|
||||
Tags: raven.Tags{
|
||||
raven.Tag{Key: "version", Value: version.version},
|
||||
raven.Tag{Key: "tag", Value: version.tag},
|
||||
raven.Tag{Key: "codename", Value: version.codename},
|
||||
raven.Tag{Key: "runtime", Value: version.runtime},
|
||||
raven.Tag{Key: "goos", Value: version.goos},
|
||||
raven.Tag{Key: "goarch", Value: version.goarch},
|
||||
raven.Tag{Key: "builder", Value: version.builder},
|
||||
raven.Tag{Key: "report_type", Value: reportType},
|
||||
},
|
||||
}
|
||||
if version.commit != "" {
|
||||
pkt.Tags = append(pkt.Tags, raven.Tag{Key: "commit", Value: version.commit})
|
||||
}
|
||||
for _, tag := range version.extra {
|
||||
pkt.Tags = append(pkt.Tags, raven.Tag{Key: tag, Value: "1"})
|
||||
}
|
||||
return pkt
|
||||
}
|
||||
|
||||
@@ -30,16 +30,30 @@ func TestParseVersion(t *testing.T) {
|
||||
builder: "jb@kvin.kastelo.net",
|
||||
},
|
||||
},
|
||||
{
|
||||
longVersion: `syncthing v1.1.4-rc.1+30-g6aaae618-dirty-crashrep "Erbium Earthworm" (go1.12.5 darwin-amd64) jb@kvin.kastelo.net 2019-05-23 16:08:14 UTC [foo, bar]`,
|
||||
parsed: version{
|
||||
version: "v1.1.4-rc.1+30-g6aaae618-dirty-crashrep",
|
||||
tag: "v1.1.4-rc.1",
|
||||
commit: "6aaae618",
|
||||
codename: "Erbium Earthworm",
|
||||
runtime: "go1.12.5",
|
||||
goos: "darwin",
|
||||
goarch: "amd64",
|
||||
builder: "jb@kvin.kastelo.net",
|
||||
extra: []string{"foo", "bar"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
v, err := parseVersion(tc.longVersion)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
t.Errorf("%s\nerror: %v\n", tc.longVersion, err)
|
||||
continue
|
||||
}
|
||||
if v != tc.parsed {
|
||||
t.Error(v)
|
||||
if fmt.Sprint(v) != fmt.Sprint(tc.parsed) {
|
||||
t.Errorf("%s\nA: %v\nE: %v\n", tc.longVersion, v, tc.parsed)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -50,7 +64,7 @@ func TestParseReport(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pkt, err := parseReport("1/2/345", bs)
|
||||
pkt, err := parseCrashReport("1/2/345", bs)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -62,3 +76,66 @@ func TestParseReport(t *testing.T) {
|
||||
|
||||
fmt.Printf("%s\n", bs)
|
||||
}
|
||||
|
||||
func TestCrashReportFingerprint(t *testing.T) {
|
||||
cases := []struct {
|
||||
message, exp string
|
||||
ldb bool
|
||||
}{
|
||||
{
|
||||
message: "panic: leveldb/table: corruption on data-block (pos=51308946): checksum mismatch, want=0xa89f9aa0 got=0xd27cc4c7 [file=004003.ldb]",
|
||||
exp: "panic: leveldb/table: corruption on data-block (pos=x): checksum mismatch, want=0xX got=0xX [file=x.ldb]",
|
||||
ldb: true,
|
||||
},
|
||||
{
|
||||
message: "panic: leveldb/table: corruption on table-footer (pos=248): bad magic number [file=001370.ldb]",
|
||||
exp: "panic: leveldb/table: corruption on table-footer (pos=x): bad magic number [file=x.ldb]",
|
||||
ldb: true,
|
||||
},
|
||||
{
|
||||
message: "panic: runtime error: slice bounds out of range [4294967283:4194304]",
|
||||
exp: "panic: runtime error: slice bounds out of range [x]",
|
||||
},
|
||||
{
|
||||
message: "panic: runtime error: slice bounds out of range [-2:]",
|
||||
exp: "panic: runtime error: slice bounds out of range [x]",
|
||||
},
|
||||
{
|
||||
message: "panic: runtime error: slice bounds out of range [:4294967283] with capacity 32768",
|
||||
exp: "panic: runtime error: slice bounds out of range [x] with capacity x",
|
||||
},
|
||||
{
|
||||
message: "panic: runtime error: index out of range [0] with length 0",
|
||||
exp: "panic: runtime error: index out of range [x] with length x",
|
||||
},
|
||||
{
|
||||
message: `panic: leveldb: internal key "\x01", len=1: invalid length`,
|
||||
exp: `panic: leveldb: internal key "x", len=x: invalid length`,
|
||||
ldb: true,
|
||||
},
|
||||
{
|
||||
message: `panic: write /var/syncthing/config/index-v0.14.0.db/2732813.log: cannot allocate memory`,
|
||||
exp: `panic: write x: cannot allocate memory`,
|
||||
ldb: true,
|
||||
},
|
||||
{
|
||||
message: `panic: filling Blocks: read C:\Users\Serv-Resp-Tizayuca\AppData\Local\Syncthing\index-v0.14.0.db\006561.ldb: Error de datos (comprobación de redundancia cíclica).`,
|
||||
exp: `panic: filling Blocks: read x: Error de datos (comprobación de redundancia cíclica).`,
|
||||
ldb: true,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range cases {
|
||||
fingerprint := crashReportFingerprint(tc.message)
|
||||
|
||||
expLen := 2
|
||||
if tc.ldb {
|
||||
expLen = 1
|
||||
}
|
||||
if l := len(fingerprint); l != expLen {
|
||||
t.Errorf("tc %v: Unexpected fingerprint length: %v != %v", i, l, expLen)
|
||||
} else if msg := fingerprint[expLen-1]; msg != tc.exp {
|
||||
t.Errorf("tc %v:\n\"%v\" !=\n\"%v\"", i, msg, tc.exp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -72,8 +72,12 @@ func (l *githubSourceCodeLoader) Load(filename string, line, context int) ([][]b
|
||||
url := urlPrefix + l.version + filename[idx:]
|
||||
resp, err := l.client.Get(url)
|
||||
|
||||
if err != nil || resp.StatusCode != http.StatusOK {
|
||||
fmt.Println("Loading source:", err.Error())
|
||||
if err != nil {
|
||||
fmt.Println("Loading source:", err)
|
||||
return nil, 0
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
fmt.Println("Loading source:", resp.Status)
|
||||
return nil, 0
|
||||
}
|
||||
data, err := ioutil.ReadAll(resp.Body)
|
||||
|
||||
@@ -4,18 +4,11 @@
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
// Command stcrashreceiver is a trivial HTTP server that allows two things:
|
||||
//
|
||||
// - uploading files (crash reports) named like a SHA256 hash using a PUT request
|
||||
// - checking whether such file exists using a HEAD request
|
||||
//
|
||||
// Typically this should be deployed behind something that manages HTTPS.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"flag"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
@@ -26,25 +19,6 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
const maxRequestSize = 1 << 20 // 1 MiB
|
||||
|
||||
func main() {
|
||||
dir := flag.String("dir", ".", "Directory to store reports in")
|
||||
dsn := flag.String("dsn", "", "Sentry DSN")
|
||||
listen := flag.String("listen", ":22039", "HTTP listen address")
|
||||
flag.Parse()
|
||||
|
||||
cr := &crashReceiver{
|
||||
dir: *dir,
|
||||
dsn: *dsn,
|
||||
}
|
||||
|
||||
log.SetOutput(os.Stdout)
|
||||
if err := http.ListenAndServe(*listen, cr); err != nil {
|
||||
log.Fatalln("HTTP serve:", err)
|
||||
}
|
||||
}
|
||||
|
||||
type crashReceiver struct {
|
||||
dir string
|
||||
dsn string
|
||||
@@ -145,10 +119,18 @@ func (r *crashReceiver) servePut(reportID, fullPath string, w http.ResponseWrite
|
||||
|
||||
// Send the report to Sentry
|
||||
if r.dsn != "" {
|
||||
// Remote ID
|
||||
user := userIDFor(req)
|
||||
|
||||
go func() {
|
||||
// There's no need for the client to have to wait for this part.
|
||||
if err := sendReport(r.dsn, reportID, bs); err != nil {
|
||||
log.Println("Failed to send report:", err)
|
||||
pkt, err := parseCrashReport(reportID, bs)
|
||||
if err != nil {
|
||||
log.Println("Failed to parse crash report:", err)
|
||||
return
|
||||
}
|
||||
if err := sendReport(r.dsn, pkt, user); err != nil {
|
||||
log.Println("Failed to send crash report:", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"flag"
|
||||
@@ -47,14 +48,16 @@ func main() {
|
||||
log.Println("My ID:", myID)
|
||||
}
|
||||
|
||||
runbeacon(beacon.NewMulticast(mc), fake)
|
||||
runbeacon(beacon.NewBroadcast(bc), fake)
|
||||
ctx := context.Background()
|
||||
|
||||
runbeacon(ctx, beacon.NewMulticast(mc), fake)
|
||||
runbeacon(ctx, beacon.NewBroadcast(bc), fake)
|
||||
|
||||
select {}
|
||||
}
|
||||
|
||||
func runbeacon(bc beacon.Interface, fake bool) {
|
||||
go bc.Serve()
|
||||
func runbeacon(ctx context.Context, bc beacon.Interface, fake bool) {
|
||||
go bc.Serve(ctx)
|
||||
go recv(bc)
|
||||
if fake {
|
||||
go send(bc)
|
||||
|
||||
@@ -66,12 +66,12 @@ func newAPISrv(addr string, cert tls.Certificate, db database, repl replicator,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *apiSrv) Serve() {
|
||||
func (s *apiSrv) Serve(ctx context.Context) error {
|
||||
if s.useHTTP {
|
||||
listener, err := net.Listen("tcp", s.addr)
|
||||
if err != nil {
|
||||
log.Println("Listen:", err)
|
||||
return
|
||||
return err
|
||||
}
|
||||
s.listener = listener
|
||||
} else {
|
||||
@@ -93,7 +93,7 @@ func (s *apiSrv) Serve() {
|
||||
tlsListener, err := tls.Listen("tcp", s.addr, tlsCfg)
|
||||
if err != nil {
|
||||
log.Println("Listen:", err)
|
||||
return
|
||||
return err
|
||||
}
|
||||
s.listener = tlsListener
|
||||
}
|
||||
@@ -107,9 +107,11 @@ func (s *apiSrv) Serve() {
|
||||
MaxHeaderBytes: httpMaxHeaderBytes,
|
||||
}
|
||||
|
||||
if err := srv.Serve(s.listener); err != nil {
|
||||
err := srv.Serve(s.listener)
|
||||
if err != nil {
|
||||
log.Println("Serve:", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
var topCtx = context.Background()
|
||||
@@ -132,11 +134,19 @@ func (s *apiSrv) handler(w http.ResponseWriter, req *http.Request) {
|
||||
log.Println(reqID, req.Method, req.URL)
|
||||
}
|
||||
|
||||
var remoteIP net.IP
|
||||
remoteAddr := &net.TCPAddr{
|
||||
IP: nil,
|
||||
Port: -1,
|
||||
}
|
||||
|
||||
if s.useHTTP {
|
||||
remoteIP = net.ParseIP(req.Header.Get("X-Forwarded-For"))
|
||||
remoteAddr.IP = net.ParseIP(req.Header.Get("X-Forwarded-For"))
|
||||
if parsedPort, err := strconv.ParseInt(req.Header.Get("X-Client-Port"), 10, 0); err == nil {
|
||||
remoteAddr.Port = int(parsedPort)
|
||||
}
|
||||
} else {
|
||||
addr, err := net.ResolveTCPAddr("tcp", req.RemoteAddr)
|
||||
var err error
|
||||
remoteAddr, err = net.ResolveTCPAddr("tcp", req.RemoteAddr)
|
||||
if err != nil {
|
||||
log.Println("remoteAddr:", err)
|
||||
lw.Header().Set("Retry-After", errorRetryAfterString())
|
||||
@@ -144,14 +154,13 @@ func (s *apiSrv) handler(w http.ResponseWriter, req *http.Request) {
|
||||
apiRequestsTotal.WithLabelValues("no_remote_addr").Inc()
|
||||
return
|
||||
}
|
||||
remoteIP = addr.IP
|
||||
}
|
||||
|
||||
switch req.Method {
|
||||
case "GET":
|
||||
s.handleGET(ctx, lw, req)
|
||||
case "POST":
|
||||
s.handlePOST(ctx, remoteIP, lw, req)
|
||||
s.handlePOST(ctx, remoteAddr, lw, req)
|
||||
default:
|
||||
http.Error(lw, "Method Not Allowed", http.StatusMethodNotAllowed)
|
||||
}
|
||||
@@ -217,7 +226,7 @@ func (s *apiSrv) handleGET(ctx context.Context, w http.ResponseWriter, req *http
|
||||
w.Write(bs)
|
||||
}
|
||||
|
||||
func (s *apiSrv) handlePOST(ctx context.Context, remoteIP net.IP, w http.ResponseWriter, req *http.Request) {
|
||||
func (s *apiSrv) handlePOST(ctx context.Context, remoteAddr *net.TCPAddr, w http.ResponseWriter, req *http.Request) {
|
||||
reqID := ctx.Value(idKey).(requestID)
|
||||
|
||||
rawCert := certificateBytes(req)
|
||||
@@ -244,7 +253,7 @@ func (s *apiSrv) handlePOST(ctx context.Context, remoteIP net.IP, w http.Respons
|
||||
|
||||
deviceID := protocol.NewDeviceID(rawCert)
|
||||
|
||||
addresses := fixupAddresses(remoteIP, ann.Addresses)
|
||||
addresses := fixupAddresses(remoteAddr, ann.Addresses)
|
||||
if len(addresses) == 0 {
|
||||
announceRequestsTotal.WithLabelValues("bad_request").Inc()
|
||||
w.Header().Set("Retry-After", errorRetryAfterString())
|
||||
@@ -252,7 +261,7 @@ func (s *apiSrv) handlePOST(ctx context.Context, remoteIP net.IP, w http.Respons
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.handleAnnounce(remoteIP, deviceID, addresses); err != nil {
|
||||
if err := s.handleAnnounce(deviceID, addresses); err != nil {
|
||||
announceRequestsTotal.WithLabelValues("internal_error").Inc()
|
||||
w.Header().Set("Retry-After", errorRetryAfterString())
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
@@ -269,7 +278,7 @@ func (s *apiSrv) Stop() {
|
||||
s.listener.Close()
|
||||
}
|
||||
|
||||
func (s *apiSrv) handleAnnounce(remote net.IP, deviceID protocol.DeviceID, addresses []string) error {
|
||||
func (s *apiSrv) handleAnnounce(deviceID protocol.DeviceID, addresses []string) error {
|
||||
key := deviceID.String()
|
||||
now := time.Now()
|
||||
expire := now.Add(addressExpiryTime).UnixNano()
|
||||
@@ -364,7 +373,7 @@ func certificateBytes(req *http.Request) []byte {
|
||||
|
||||
// fixupAddresses checks the list of addresses, removing invalid ones and
|
||||
// replacing unspecified IPs with the given remote IP.
|
||||
func fixupAddresses(remote net.IP, addresses []string) []string {
|
||||
func fixupAddresses(remote *net.TCPAddr, addresses []string) []string {
|
||||
fixed := make([]string, 0, len(addresses))
|
||||
for _, annAddr := range addresses {
|
||||
uri, err := url.Parse(annAddr)
|
||||
@@ -384,27 +393,34 @@ func fixupAddresses(remote net.IP, addresses []string) []string {
|
||||
continue
|
||||
}
|
||||
|
||||
if host == "" || ip.IsUnspecified() {
|
||||
// Replace the unspecified IP with the request source.
|
||||
if remote != nil {
|
||||
if host == "" || ip.IsUnspecified() {
|
||||
// Replace the unspecified IP with the request source.
|
||||
|
||||
// ... unless the request source is the loopback address or
|
||||
// multicast/unspecified (can't happen, really).
|
||||
if remote.IsLoopback() || remote.IsMulticast() || remote.IsUnspecified() {
|
||||
continue
|
||||
// ... unless the request source is the loopback address or
|
||||
// multicast/unspecified (can't happen, really).
|
||||
if remote.IP.IsLoopback() || remote.IP.IsMulticast() || remote.IP.IsUnspecified() {
|
||||
continue
|
||||
}
|
||||
|
||||
// Do not use IPv6 remote address if requested scheme is ...4
|
||||
// (i.e., tcp4, etc.)
|
||||
if strings.HasSuffix(uri.Scheme, "4") && remote.IP.To4() == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Do not use IPv4 remote address if requested scheme is ...6
|
||||
if strings.HasSuffix(uri.Scheme, "6") && remote.IP.To4() != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
host = remote.IP.String()
|
||||
}
|
||||
|
||||
// Do not use IPv6 remote address if requested scheme is ...4
|
||||
// (i.e., tcp4, etc.)
|
||||
if strings.HasSuffix(uri.Scheme, "4") && remote.To4() == nil {
|
||||
continue
|
||||
// If zero port was specified, use remote port.
|
||||
if port == "0" && remote.Port > 0 {
|
||||
port = fmt.Sprintf("%d", remote.Port)
|
||||
}
|
||||
|
||||
// Do not use IPv4 remote address if requested scheme is ...6
|
||||
if strings.HasSuffix(uri.Scheme, "6") && remote.To4() != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
host = remote.String()
|
||||
}
|
||||
|
||||
uri.Host = net.JoinHostPort(host, port)
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
|
||||
func TestFixupAddresses(t *testing.T) {
|
||||
cases := []struct {
|
||||
remote net.IP
|
||||
remote *net.TCPAddr
|
||||
in []string
|
||||
out []string
|
||||
}{
|
||||
@@ -22,37 +22,53 @@ func TestFixupAddresses(t *testing.T) {
|
||||
in: []string{"tcp://1.2.3.4:22000"},
|
||||
out: []string{"tcp://1.2.3.4:22000"},
|
||||
}, { // unspecified replaced by remote
|
||||
remote: net.ParseIP("1.2.3.4"),
|
||||
remote: addr("1.2.3.4", 22000),
|
||||
in: []string{"tcp://:22000", "tcp://192.0.2.42:22000"},
|
||||
out: []string{"tcp://1.2.3.4:22000", "tcp://192.0.2.42:22000"},
|
||||
}, { // unspecified not used as replacement
|
||||
remote: net.ParseIP("0.0.0.0"),
|
||||
remote: addr("0.0.0.0", 22000),
|
||||
in: []string{"tcp://:22000", "tcp://192.0.2.42:22000"},
|
||||
out: []string{"tcp://192.0.2.42:22000"},
|
||||
}, { // unspecified not used as replacement
|
||||
remote: net.ParseIP("::"),
|
||||
remote: addr("::", 22000),
|
||||
in: []string{"tcp://:22000", "tcp://192.0.2.42:22000"},
|
||||
out: []string{"tcp://192.0.2.42:22000"},
|
||||
}, { // localhost not used as replacement
|
||||
remote: net.ParseIP("127.0.0.1"),
|
||||
remote: addr("127.0.0.1", 22000),
|
||||
in: []string{"tcp://:22000", "tcp://192.0.2.42:22000"},
|
||||
out: []string{"tcp://192.0.2.42:22000"},
|
||||
}, { // localhost not used as replacement
|
||||
remote: net.ParseIP("::1"),
|
||||
remote: addr("::1", 22000),
|
||||
in: []string{"tcp://:22000", "tcp://192.0.2.42:22000"},
|
||||
out: []string{"tcp://192.0.2.42:22000"},
|
||||
}, { // multicast not used as replacement
|
||||
remote: net.ParseIP("224.0.0.1"),
|
||||
remote: addr("224.0.0.1", 22000),
|
||||
in: []string{"tcp://:22000", "tcp://192.0.2.42:22000"},
|
||||
out: []string{"tcp://192.0.2.42:22000"},
|
||||
}, { // multicast not used as replacement
|
||||
remote: net.ParseIP("ff80::42"),
|
||||
remote: addr("ff80::42", 22000),
|
||||
in: []string{"tcp://:22000", "tcp://192.0.2.42:22000"},
|
||||
out: []string{"tcp://192.0.2.42:22000"},
|
||||
}, { // explicitly announced weirdness is also filtered
|
||||
remote: net.ParseIP("192.0.2.42"),
|
||||
remote: addr("192.0.2.42", 22000),
|
||||
in: []string{"tcp://:22000", "tcp://127.1.2.3:22000", "tcp://[::1]:22000", "tcp://[ff80::42]:22000"},
|
||||
out: []string{"tcp://192.0.2.42:22000"},
|
||||
}, { // port remapping
|
||||
remote: addr("123.123.123.123", 9000),
|
||||
in: []string{"tcp://0.0.0.0:0"},
|
||||
out: []string{"tcp://123.123.123.123:9000"},
|
||||
}, { // unspecified port remapping
|
||||
remote: addr("123.123.123.123", 9000),
|
||||
in: []string{"tcp://:0"},
|
||||
out: []string{"tcp://123.123.123.123:9000"},
|
||||
}, { // empty remapping
|
||||
remote: addr("123.123.123.123", 9000),
|
||||
in: []string{"tcp://"},
|
||||
out: []string{},
|
||||
}, { // port only remapping
|
||||
remote: addr("123.123.123.123", 9000),
|
||||
in: []string{"tcp://44.44.44.44:0"},
|
||||
out: []string{"tcp://44.44.44.44:9000"},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -63,3 +79,10 @@ func TestFixupAddresses(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func addr(host string, port int) *net.TCPAddr {
|
||||
return &net.TCPAddr{
|
||||
IP: net.ParseIP(host),
|
||||
Port: port,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,12 +4,13 @@
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
//go:generate go run ../../script/protofmt.go database.proto
|
||||
//go:generate go run ../../proto/scripts/protofmt.go database.proto
|
||||
//go:generate protoc -I ../../ -I . --gogofast_out=. database.proto
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"sort"
|
||||
"time"
|
||||
@@ -37,7 +38,6 @@ type database interface {
|
||||
type levelDBStore struct {
|
||||
db *leveldb.DB
|
||||
inbox chan func()
|
||||
stop chan struct{}
|
||||
clock clock
|
||||
marshalBuf []byte
|
||||
}
|
||||
@@ -50,7 +50,6 @@ func newLevelDBStore(dir string) (*levelDBStore, error) {
|
||||
return &levelDBStore{
|
||||
db: db,
|
||||
inbox: make(chan func(), 16),
|
||||
stop: make(chan struct{}),
|
||||
clock: defaultClock{},
|
||||
}, nil
|
||||
}
|
||||
@@ -155,7 +154,7 @@ func (s *levelDBStore) get(key string) (DatabaseRecord, error) {
|
||||
return rec, nil
|
||||
}
|
||||
|
||||
func (s *levelDBStore) Serve() {
|
||||
func (s *levelDBStore) Serve(ctx context.Context) error {
|
||||
t := time.NewTimer(0)
|
||||
defer t.Stop()
|
||||
defer s.db.Close()
|
||||
@@ -183,7 +182,7 @@ loop:
|
||||
// the next.
|
||||
t.Reset(databaseStatisticsInterval)
|
||||
|
||||
case <-s.stop:
|
||||
case <-ctx.Done():
|
||||
// We're done.
|
||||
close(statisticsTrigger)
|
||||
break loop
|
||||
@@ -192,6 +191,8 @@ loop:
|
||||
|
||||
// Also wait for statisticsServe to return
|
||||
<-statisticsDone
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *levelDBStore) statisticsServe(trigger <-chan struct{}, done chan<- struct{}) {
|
||||
@@ -255,10 +256,6 @@ func (s *levelDBStore) statisticsServe(trigger <-chan struct{}, done chan<- stru
|
||||
}
|
||||
}
|
||||
|
||||
func (s *levelDBStore) Stop() {
|
||||
close(s.stop)
|
||||
}
|
||||
|
||||
// merge returns the merged result of the two database records a and b. The
|
||||
// result is the union of the two address sets, with the newer expiry time
|
||||
// chosen for any duplicates.
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
@@ -20,8 +21,9 @@ func TestDatabaseGetSet(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
go db.Serve()
|
||||
defer db.Stop()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
go db.Serve(ctx)
|
||||
defer cancel()
|
||||
|
||||
// Check missing record
|
||||
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
# Default settings for syncthing-relaysrv (strelaysrv).
|
||||
# Default settings for syncthing-discosrv (stdiscosrv).
|
||||
## Add Options here:
|
||||
DISCOSRV_OPTS=
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"flag"
|
||||
"log"
|
||||
@@ -21,7 +22,7 @@ import (
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/thejerf/suture"
|
||||
"github.com/thejerf/suture/v4"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -92,20 +93,21 @@ func main() {
|
||||
showVersion := flag.Bool("version", false, "Show version")
|
||||
flag.Parse()
|
||||
|
||||
log.Println(build.LongVersion)
|
||||
log.Println(build.LongVersionFor("stdiscosrv"))
|
||||
if *showVersion {
|
||||
return
|
||||
}
|
||||
|
||||
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
log.Println("Failed to load keypair. Generating one, this might take a while...")
|
||||
cert, err = tlsutil.NewCertificate(certFile, keyFile, "stdiscosrv", 20*365)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to generate X509 key pair:", err)
|
||||
}
|
||||
} else if err != nil {
|
||||
log.Fatalln("Failed to load keypair:", err)
|
||||
}
|
||||
|
||||
devID := protocol.NewDeviceID(cert.Certificate[0])
|
||||
log.Println("Server device ID is", devID)
|
||||
|
||||
@@ -182,5 +184,5 @@ func main() {
|
||||
}
|
||||
|
||||
// Engage!
|
||||
main.Serve()
|
||||
main.Serve(context.Background())
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
@@ -32,7 +33,6 @@ type replicationSender struct {
|
||||
cert tls.Certificate // our certificate
|
||||
allowedIDs []protocol.DeviceID
|
||||
outbox chan ReplicationRecord
|
||||
stop chan struct{}
|
||||
}
|
||||
|
||||
func newReplicationSender(dst string, cert tls.Certificate, allowedIDs []protocol.DeviceID) *replicationSender {
|
||||
@@ -41,11 +41,10 @@ func newReplicationSender(dst string, cert tls.Certificate, allowedIDs []protoco
|
||||
cert: cert,
|
||||
allowedIDs: allowedIDs,
|
||||
outbox: make(chan ReplicationRecord, replicationOutboxSize),
|
||||
stop: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *replicationSender) Serve() {
|
||||
func (s *replicationSender) Serve(ctx context.Context) error {
|
||||
// Sleep a little at startup. Peers often restart at the same time, and
|
||||
// this avoid the service failing and entering backoff state
|
||||
// unnecessarily, while also reducing the reconnect rate to something
|
||||
@@ -62,7 +61,7 @@ func (s *replicationSender) Serve() {
|
||||
conn, err := tls.Dial("tcp", s.dst, tlsCfg)
|
||||
if err != nil {
|
||||
log.Println("Replication connect:", err)
|
||||
return
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
conn.SetWriteDeadline(time.Now().Add(time.Second))
|
||||
@@ -73,13 +72,13 @@ func (s *replicationSender) Serve() {
|
||||
remoteID, err := deviceID(conn)
|
||||
if err != nil {
|
||||
log.Println("Replication connect:", err)
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify it's in the set of allowed device IDs.
|
||||
if !deviceIDIn(remoteID, s.allowedIDs) {
|
||||
log.Println("Replication connect: unexpected device ID:", remoteID)
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
heartBeatTicker := time.NewTicker(replicationHeartbeatInterval)
|
||||
@@ -122,20 +121,16 @@ func (s *replicationSender) Serve() {
|
||||
replicationSendsTotal.WithLabelValues("error").Inc()
|
||||
log.Println("Replication write:", err)
|
||||
// Yes, we are loosing the replication event here.
|
||||
return
|
||||
return err
|
||||
}
|
||||
replicationSendsTotal.WithLabelValues("success").Inc()
|
||||
|
||||
case <-s.stop:
|
||||
return
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *replicationSender) Stop() {
|
||||
close(s.stop)
|
||||
}
|
||||
|
||||
func (s *replicationSender) String() string {
|
||||
return fmt.Sprintf("replicationSender(%q)", s.dst)
|
||||
}
|
||||
@@ -172,7 +167,6 @@ type replicationListener struct {
|
||||
cert tls.Certificate
|
||||
allowedIDs []protocol.DeviceID
|
||||
db database
|
||||
stop chan struct{}
|
||||
}
|
||||
|
||||
func newReplicationListener(addr string, cert tls.Certificate, allowedIDs []protocol.DeviceID, db database) *replicationListener {
|
||||
@@ -181,11 +175,10 @@ func newReplicationListener(addr string, cert tls.Certificate, allowedIDs []prot
|
||||
cert: cert,
|
||||
allowedIDs: allowedIDs,
|
||||
db: db,
|
||||
stop: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (l *replicationListener) Serve() {
|
||||
func (l *replicationListener) Serve(ctx context.Context) error {
|
||||
tlsCfg := &tls.Config{
|
||||
Certificates: []tls.Certificate{l.cert},
|
||||
ClientAuth: tls.RequestClientCert,
|
||||
@@ -196,14 +189,14 @@ func (l *replicationListener) Serve() {
|
||||
lst, err := tls.Listen("tcp", l.addr, tlsCfg)
|
||||
if err != nil {
|
||||
log.Println("Replication listen:", err)
|
||||
return
|
||||
return err
|
||||
}
|
||||
defer lst.Close()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-l.stop:
|
||||
return
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
|
||||
@@ -211,7 +204,7 @@ func (l *replicationListener) Serve() {
|
||||
conn, err := lst.Accept()
|
||||
if err != nil {
|
||||
log.Println("Replication accept:", err)
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
// Figure out the other side device ID
|
||||
@@ -231,19 +224,15 @@ func (l *replicationListener) Serve() {
|
||||
continue
|
||||
}
|
||||
|
||||
go l.handle(conn)
|
||||
go l.handle(ctx, conn)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *replicationListener) Stop() {
|
||||
close(l.stop)
|
||||
}
|
||||
|
||||
func (l *replicationListener) String() string {
|
||||
return fmt.Sprintf("replicationListener(%q)", l.addr)
|
||||
}
|
||||
|
||||
func (l *replicationListener) handle(conn net.Conn) {
|
||||
func (l *replicationListener) handle(ctx context.Context, conn net.Conn) {
|
||||
defer func() {
|
||||
conn.SetWriteDeadline(time.Now().Add(time.Second))
|
||||
conn.Close()
|
||||
@@ -253,7 +242,7 @@ func (l *replicationListener) handle(conn net.Conn) {
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-l.stop:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
@@ -28,16 +28,21 @@ func main() {
|
||||
log.SetFlags(0)
|
||||
|
||||
target := flag.String("target", "localhost:8384", "Target Syncthing instance")
|
||||
types := flag.String("types", "", "Filter for specific event types (comma-separated)")
|
||||
apikey := flag.String("apikey", "", "Syncthing API key")
|
||||
flag.Parse()
|
||||
|
||||
if *apikey == "" {
|
||||
log.Fatal("Must give -apikey argument")
|
||||
}
|
||||
var eventsArg string
|
||||
if len(*types) > 0 {
|
||||
eventsArg = "&events=" + *types
|
||||
}
|
||||
|
||||
since := 0
|
||||
for {
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/rest/events?since=%d", *target, since), nil)
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/rest/events?since=%d%s", *target, since, eventsArg), nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"flag"
|
||||
@@ -95,7 +96,7 @@ func checkServer(deviceID protocol.DeviceID, server string) checkResult {
|
||||
})
|
||||
|
||||
go func() {
|
||||
addresses, err := disco.Lookup(deviceID)
|
||||
addresses, err := disco.Lookup(context.Background(), deviceID)
|
||||
res <- checkResult{addresses: addresses, error: err}
|
||||
}()
|
||||
|
||||
|
||||
@@ -1,87 +0,0 @@
|
||||
// Copyright (C) 2015 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
"github.com/syncthing/syncthing/lib/db/backend"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
func dump(ldb backend.Backend) {
|
||||
it, err := ldb.NewPrefixIterator(nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
for it.Next() {
|
||||
key := it.Key()
|
||||
switch key[0] {
|
||||
case db.KeyTypeDevice:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
device := binary.BigEndian.Uint32(key[1+4:])
|
||||
name := nulString(key[1+4+4:])
|
||||
fmt.Printf("[device] F:%d D:%d N:%q", folder, device, name)
|
||||
|
||||
var f protocol.FileInfo
|
||||
err := f.Unmarshal(it.Value())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Printf(" V:%v\n", f)
|
||||
|
||||
case db.KeyTypeGlobal:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
name := nulString(key[1+4:])
|
||||
var flv db.VersionList
|
||||
flv.Unmarshal(it.Value())
|
||||
fmt.Printf("[global] F:%d N:%q V:%s\n", folder, name, flv)
|
||||
|
||||
case db.KeyTypeBlock:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
hash := key[1+4 : 1+4+32]
|
||||
name := nulString(key[1+4+32:])
|
||||
fmt.Printf("[block] F:%d H:%x N:%q I:%d\n", folder, hash, name, binary.BigEndian.Uint32(it.Value()))
|
||||
|
||||
case db.KeyTypeDeviceStatistic:
|
||||
fmt.Printf("[dstat] K:%x V:%x\n", it.Key(), it.Value())
|
||||
|
||||
case db.KeyTypeFolderStatistic:
|
||||
fmt.Printf("[fstat] K:%x V:%x\n", it.Key(), it.Value())
|
||||
|
||||
case db.KeyTypeVirtualMtime:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
name := nulString(key[1+4:])
|
||||
val := it.Value()
|
||||
var real, virt time.Time
|
||||
real.UnmarshalBinary(val[:len(val)/2])
|
||||
virt.UnmarshalBinary(val[len(val)/2:])
|
||||
fmt.Printf("[mtime] F:%d N:%q R:%v V:%v\n", folder, name, real, virt)
|
||||
|
||||
case db.KeyTypeFolderIdx:
|
||||
key := binary.BigEndian.Uint32(it.Key()[1:])
|
||||
fmt.Printf("[folderidx] K:%d V:%q\n", key, it.Value())
|
||||
|
||||
case db.KeyTypeDeviceIdx:
|
||||
key := binary.BigEndian.Uint32(it.Key()[1:])
|
||||
val := it.Value()
|
||||
if len(val) == 0 {
|
||||
fmt.Printf("[deviceidx] K:%d V:<nil>\n", key)
|
||||
} else {
|
||||
dev := protocol.DeviceIDFromBytes(val)
|
||||
fmt.Printf("[deviceidx] K:%d V:%s\n", key, dev)
|
||||
}
|
||||
|
||||
default:
|
||||
fmt.Printf("[???]\n %x\n %x\n", it.Key(), it.Value())
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,49 +0,0 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/db/backend"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var mode string
|
||||
log.SetFlags(0)
|
||||
log.SetOutput(os.Stdout)
|
||||
|
||||
flag.StringVar(&mode, "mode", "dump", "Mode of operation: dump, dumpsize, idxck")
|
||||
|
||||
flag.Parse()
|
||||
|
||||
path := flag.Arg(0)
|
||||
if path == "" {
|
||||
path = filepath.Join(defaultConfigDir(), "index-v0.14.0.db")
|
||||
}
|
||||
|
||||
ldb, err := backend.OpenLevelDBRO(path)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if mode == "dump" {
|
||||
dump(ldb)
|
||||
} else if mode == "dumpsize" {
|
||||
dumpsize(ldb)
|
||||
} else if mode == "idxck" {
|
||||
if !idxck(ldb) {
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
fmt.Println("Unknown mode")
|
||||
}
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
// Copyright (C) 2015 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/fs"
|
||||
)
|
||||
|
||||
func nulString(bs []byte) string {
|
||||
for i := range bs {
|
||||
if bs[i] == 0 {
|
||||
return string(bs[:i])
|
||||
}
|
||||
}
|
||||
return string(bs)
|
||||
}
|
||||
|
||||
func defaultConfigDir() string {
|
||||
switch runtime.GOOS {
|
||||
case "windows":
|
||||
if p := os.Getenv("LocalAppData"); p != "" {
|
||||
return filepath.Join(p, "Syncthing")
|
||||
}
|
||||
return filepath.Join(os.Getenv("AppData"), "Syncthing")
|
||||
|
||||
case "darwin":
|
||||
dir, err := fs.ExpandTilde("~/Library/Application Support/Syncthing")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return dir
|
||||
|
||||
default:
|
||||
if xdgCfg := os.Getenv("XDG_CONFIG_HOME"); xdgCfg != "" {
|
||||
return filepath.Join(xdgCfg, "syncthing")
|
||||
}
|
||||
dir, err := fs.ExpandTilde("~/.config/syncthing")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return dir
|
||||
}
|
||||
}
|
||||
@@ -1,10 +1,15 @@
|
||||
// Copyright (C) 2016 The Syncthing Authors.
|
||||
// Copyright (C) 2021 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
// The existence of this file means we get 0% test coverage rather than no
|
||||
// test coverage at all. Remove when implementing an actual test.
|
||||
//+build noassets
|
||||
|
||||
package stats
|
||||
package auto
|
||||
|
||||
import "github.com/syncthing/syncthing/lib/assets"
|
||||
|
||||
func Assets() map[string]assets.Asset {
|
||||
return nil
|
||||
}
|
||||
@@ -9,8 +9,15 @@
|
||||
<meta name="author" content=""/>
|
||||
|
||||
<title>Relay stats</title>
|
||||
<link href="//maxcdn.bootstrapcdn.com/bootstrap/3.3.5/css/bootstrap.min.css" rel="stylesheet"/>
|
||||
<link rel="stylesheet" href="//use.fontawesome.com/releases/v5.0.13/css/all.css"/>
|
||||
<link rel="stylesheet" href="https://use.fontawesome.com/releases/v5.0.13/css/all.css"/>
|
||||
<link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/css/bootstrap.min.css" rel="stylesheet"/>
|
||||
<link rel="stylesheet" href="https://use.fontawesome.com/releases/v5.0.13/css/all.css"/>
|
||||
<link rel="stylesheet" href="https://unpkg.com/leaflet@1.6.0/dist/leaflet.css"
|
||||
integrity="sha512-xwE/Az9zrjBIphAcBb3F6JVqxf46+CDLwfLMHloNu6KEQCAWi6HcDUbeOfBIptF7tcCzusKFjFw2yuvEpDL9wQ=="
|
||||
crossorigin=""/>
|
||||
<script src="https://unpkg.com/leaflet@1.6.0/dist/leaflet.js"
|
||||
integrity="sha512-gZwIG9x3wUXg2hdXF6+rVkLF/0Vi9U8D2Ntg4Ga5I5BZpVkVxlJWbSQtXPSiUTtC0TjtGOmxa1AJPuV0CPthew=="
|
||||
crossorigin=""></script>
|
||||
|
||||
<style>
|
||||
#map {
|
||||
@@ -38,13 +45,15 @@
|
||||
<div class="container">
|
||||
<h1>Relay Pool Data</h1>
|
||||
<div ng-if="relays === undefined" class="text-center">
|
||||
<img src="//cdnjs.cloudflare.com/ajax/libs/galleriffic/2.0.1/css/loader.gif" alt=""/>
|
||||
<img src="https://cdnjs.cloudflare.com/ajax/libs/galleriffic/2.0.1/css/loader.gif" alt=""/>
|
||||
<p>Please wait while we gather data</p>
|
||||
</div>
|
||||
<div>
|
||||
<div ng-show="relays !== undefined" class="ng-hide">
|
||||
<p>
|
||||
Currently {{ relays.length }} relays online ({{ totals.goMaxProcs }} cores in total).
|
||||
The relays listed on this page are not managed or vetted by the Syncthing project.
|
||||
Each relay is the responsibility of the relay operator.
|
||||
Currently {{ relays.length }} relays online.
|
||||
</p>
|
||||
</div>
|
||||
<div id="map"></div> <!-- Can't hide the map, otherwise it freaks out -->
|
||||
@@ -184,10 +193,9 @@
|
||||
</div>
|
||||
|
||||
|
||||
<script type="text/javascript" src="//code.jquery.com/jquery-2.1.4.min.js"></script>
|
||||
<script type="text/javascript" src="//cdnjs.cloudflare.com/ajax/libs/angular.js/1.5.8/angular.min.js"></script>
|
||||
<script type="text/javascript" src="//maxcdn.bootstrapcdn.com/bootstrap/3.3.5/js/bootstrap.min.js"></script>
|
||||
<script type="text/javascript" src="//maps.googleapis.com/maps/api/js?key=AIzaSyDk5WJ8s7ueLKb99X5DbQ-vkWtPDAKqYs0"></script>
|
||||
<script type="text/javascript" src="https://code.jquery.com/jquery-2.1.4.min.js"></script>
|
||||
<script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/angular.js/1.5.8/angular.min.js"></script>
|
||||
<script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/js/bootstrap.min.js"></script>
|
||||
</body>
|
||||
|
||||
<script>
|
||||
@@ -228,11 +236,12 @@
|
||||
numProxies: 0,
|
||||
uptimeSeconds: 0,
|
||||
};
|
||||
$scope.map = new google.maps.Map(document.getElementById('map'), {
|
||||
zoom: 1,
|
||||
mapTypeId: google.maps.MapTypeId.ROADMAP
|
||||
});
|
||||
$scope.mapBounds = new google.maps.LatLngBounds();
|
||||
$scope.map = L.map('map').setView([40.90296, 1.90925], 2);
|
||||
L.tileLayer('https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png',
|
||||
{
|
||||
attribution: 'Leaflet',
|
||||
maxZoom: 17
|
||||
}).addTo($scope.map);
|
||||
$scope.tooltipTemplate = $('#infoTemplate').html();
|
||||
$scope.usedLocations = {};
|
||||
$scope.sortType = 'stats.numActiveSessions';
|
||||
@@ -279,8 +288,9 @@
|
||||
}
|
||||
});
|
||||
|
||||
$scope.map.fitBounds($scope.mapBounds);
|
||||
if ($scope.relays.length == 1) {
|
||||
//Center to only relay with zoom
|
||||
$scope.map.panTo(new L.LatLng(relays[0].location.latitude, relays[0].location.longitude));
|
||||
$scope.map.setZoom(13);
|
||||
}
|
||||
});
|
||||
@@ -300,44 +310,50 @@
|
||||
|
||||
var locParts = loc.split(',');
|
||||
|
||||
relay.marker = new google.maps.Marker({
|
||||
map: $scope.map,
|
||||
position: new google.maps.LatLng(locParts[0], locParts[1]),
|
||||
relay.marker = new L.Marker([relay.location.latitude, relay.location.longitude],{
|
||||
title: relay.url,
|
||||
});
|
||||
|
||||
var scope = $rootScope.$new(true);
|
||||
scope.relay = relay;
|
||||
|
||||
relay.marker.info = new google.maps.InfoWindow({
|
||||
content: $compile($scope.tooltipTemplate)(scope)[0],
|
||||
var icon = new L.Icon({
|
||||
iconSize: [18, 28], // size of the icon
|
||||
iconAnchor: [9, 28], // point of the icon which will correspond to marker's location
|
||||
shadowAnchor: [0, 0], // the same for the shadow
|
||||
popupAnchor: [0, -27], // popup anchor
|
||||
shadowSize: [0,0],
|
||||
iconUrl: 'https://cdn.rawgit.com/pointhi/leaflet-color-markers/master/img/marker-icon-red.png',
|
||||
shadowUrl: 'https://cdnjs.cloudflare.com/ajax/libs/leaflet/0.7.7/images/marker-shadow.png',
|
||||
});
|
||||
|
||||
relay.marker = new L.marker(new L.latLng(locParts[0], locParts[1]),{icon})
|
||||
.bindPopup($compile($scope.tooltipTemplate)(scope)[0],{})
|
||||
.on('mouseover', function (e) {
|
||||
this.openPopup();
|
||||
}).on('mouseout', function (e) {
|
||||
this.closePopup();
|
||||
}).addTo($scope.map);
|
||||
|
||||
relay.showMarker = function() {
|
||||
relay.marker.info.open($scope.map, relay.marker);
|
||||
relay.marker.openPopup();
|
||||
}
|
||||
|
||||
|
||||
relay.hideMarker = function() {
|
||||
relay.marker.info.close();
|
||||
relay.marker.closePopup();
|
||||
}
|
||||
}
|
||||
|
||||
relay.marker.addListener('mouseover', relay.showMarker);
|
||||
relay.marker.addListener('mouseout', relay.hideMarker);
|
||||
|
||||
$scope.mapBounds.extend(relay.marker.position);
|
||||
}
|
||||
|
||||
function addCircleToMap(relay) {
|
||||
relay.marker.circle = new google.maps.Circle({
|
||||
strokeColor: '#FF0000',
|
||||
strokeOpacity: 0.8,
|
||||
strokeWeight: 2,
|
||||
fillColor: '#FF0000',
|
||||
fillOpacity: 0.35,
|
||||
map: $scope.map,
|
||||
center: relay.marker.position,
|
||||
radius: ((relay.stats.bytesProxied * 100) / $scope.totals.bytesProxied) * 10000
|
||||
});
|
||||
console.log(relay.location.latitude)
|
||||
L.circle([relay.location.latitude, relay.location.longitude],
|
||||
{
|
||||
radius: ((relay.stats.bytesProxied * 100) / $scope.totals.bytesProxied) * 10000,
|
||||
color: "FF0000",
|
||||
fillColor: "#FF0000",
|
||||
fillOpacity: 0.35,
|
||||
}).addTo($scope.map);
|
||||
}
|
||||
|
||||
function constructURI(url) {
|
||||
|
||||
@@ -1,20 +1,18 @@
|
||||
// Copyright (C) 2015 Audrius Butkevicius and Contributors (see the CONTRIBUTORS file).
|
||||
|
||||
//go:generate go run ../../script/genassets.go gui >auto/gui.go
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"mime"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -24,11 +22,14 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
|
||||
"github.com/golang/groupcache/lru"
|
||||
"github.com/oschwald/geoip2-golang"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/syncthing/syncthing/cmd/strelaypoolsrv/auto"
|
||||
"github.com/syncthing/syncthing/lib/assets"
|
||||
"github.com/syncthing/syncthing/lib/rand"
|
||||
"github.com/syncthing/syncthing/lib/relay/client"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
@@ -93,33 +94,35 @@ type result struct {
|
||||
}
|
||||
|
||||
var (
|
||||
testCert tls.Certificate
|
||||
knownRelaysFile = filepath.Join(os.TempDir(), "strelaypoolsrv_known_relays")
|
||||
listen = ":80"
|
||||
dir string
|
||||
evictionTime = time.Hour
|
||||
debug bool
|
||||
getLRUSize = 10 << 10
|
||||
getLimitBurst = 10
|
||||
getLimitAvg = 2
|
||||
postLRUSize = 1 << 10
|
||||
postLimitBurst = 2
|
||||
postLimitAvg = 2
|
||||
getLimit time.Duration
|
||||
postLimit time.Duration
|
||||
permRelaysFile string
|
||||
ipHeader string
|
||||
geoipPath string
|
||||
proto string
|
||||
statsRefresh = time.Minute / 2
|
||||
testCert tls.Certificate
|
||||
knownRelaysFile = filepath.Join(os.TempDir(), "strelaypoolsrv_known_relays")
|
||||
listen = ":80"
|
||||
dir string
|
||||
evictionTime = time.Hour
|
||||
debug bool
|
||||
getLRUSize = 10 << 10
|
||||
getLimitBurst = 10
|
||||
getLimitAvg = 2
|
||||
postLRUSize = 1 << 10
|
||||
postLimitBurst = 2
|
||||
postLimitAvg = 2
|
||||
getLimit time.Duration
|
||||
postLimit time.Duration
|
||||
permRelaysFile string
|
||||
ipHeader string
|
||||
geoipPath string
|
||||
proto string
|
||||
statsRefresh = time.Minute / 2
|
||||
requestQueueLen = 10
|
||||
requestProcessors = 1
|
||||
|
||||
getMut = sync.NewRWMutex()
|
||||
getMut = sync.NewMutex()
|
||||
getLRUCache *lru.Cache
|
||||
|
||||
postMut = sync.NewRWMutex()
|
||||
postMut = sync.NewMutex()
|
||||
postLRUCache *lru.Cache
|
||||
|
||||
requests = make(chan request, 10)
|
||||
requests chan request
|
||||
|
||||
mut = sync.NewRWMutex()
|
||||
knownRelays = make([]*relay, 0)
|
||||
@@ -132,6 +135,9 @@ const (
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.SetOutput(os.Stdout)
|
||||
log.SetFlags(log.Lshortfile)
|
||||
|
||||
flag.StringVar(&listen, "listen", listen, "Listen address")
|
||||
flag.StringVar(&dir, "keys", dir, "Directory where http-cert.pem and http-key.pem is stored for TLS listening")
|
||||
flag.BoolVar(&debug, "debug", debug, "Enable debug output")
|
||||
@@ -147,9 +153,13 @@ func main() {
|
||||
flag.StringVar(&geoipPath, "geoip", "GeoLite2-City.mmdb", "Path to GeoLite2-City database")
|
||||
flag.StringVar(&proto, "protocol", "tcp", "Protocol used for listening. 'tcp' for IPv4 and IPv6, 'tcp4' for IPv4, 'tcp6' for IPv6")
|
||||
flag.DurationVar(&statsRefresh, "stats-refresh", statsRefresh, "Interval at which to refresh relay stats")
|
||||
flag.IntVar(&requestQueueLen, "request-queue", requestQueueLen, "Queue length for incoming test requests")
|
||||
flag.IntVar(&requestProcessors, "request-processors", requestProcessors, "Number of request processor routines")
|
||||
|
||||
flag.Parse()
|
||||
|
||||
requests = make(chan request, requestQueueLen)
|
||||
|
||||
getLimit = 10 * time.Second / time.Duration(getLimitAvg)
|
||||
postLimit = time.Minute / time.Duration(postLimitAvg)
|
||||
|
||||
@@ -165,7 +175,9 @@ func main() {
|
||||
|
||||
testCert = createTestCertificate()
|
||||
|
||||
go requestProcessor()
|
||||
for i := 0; i < requestProcessors; i++ {
|
||||
go requestProcessor()
|
||||
}
|
||||
|
||||
// Load relays from cache in the background.
|
||||
// Load them in a serial fashion to make sure any genuine requests
|
||||
@@ -199,6 +211,7 @@ func main() {
|
||||
tlsCfg := &tls.Config{
|
||||
Certificates: []tls.Certificate{cert},
|
||||
MinVersion: tls.VersionTLS10, // No SSLv3
|
||||
ClientAuth: tls.RequestClientCert,
|
||||
CipherSuites: []uint16{
|
||||
// No RC4
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
@@ -254,88 +267,27 @@ func handleMetrics(w http.ResponseWriter, r *http.Request) {
|
||||
func handleAssets(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Cache-Control", "no-cache, must-revalidate")
|
||||
|
||||
assets := auto.Assets()
|
||||
path := r.URL.Path[1:]
|
||||
if path == "" {
|
||||
path = "index.html"
|
||||
}
|
||||
|
||||
bs, ok := assets[path]
|
||||
as, ok := auto.Assets()[path]
|
||||
if !ok {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
etag := fmt.Sprintf("%d", auto.Generated)
|
||||
modified := time.Unix(auto.Generated, 0).UTC()
|
||||
|
||||
w.Header().Set("Last-Modified", modified.Format(http.TimeFormat))
|
||||
w.Header().Set("Etag", etag)
|
||||
|
||||
mtype := mimeTypeForFile(path)
|
||||
if len(mtype) != 0 {
|
||||
w.Header().Set("Content-Type", mtype)
|
||||
}
|
||||
|
||||
if t, err := time.Parse(http.TimeFormat, r.Header.Get("If-Modified-Since")); err == nil && modified.Add(time.Second).After(t) {
|
||||
w.WriteHeader(http.StatusNotModified)
|
||||
return
|
||||
}
|
||||
|
||||
if match := r.Header.Get("If-None-Match"); match != "" {
|
||||
if strings.Contains(match, etag) {
|
||||
w.WriteHeader(http.StatusNotModified)
|
||||
return
|
||||
}
|
||||
}
|
||||
if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
|
||||
w.Header().Set("Content-Encoding", "gzip")
|
||||
} else {
|
||||
// ungzip if browser not send gzip accepted header
|
||||
var gr *gzip.Reader
|
||||
gr, _ = gzip.NewReader(bytes.NewReader(bs))
|
||||
bs, _ = ioutil.ReadAll(gr)
|
||||
gr.Close()
|
||||
}
|
||||
w.Header().Set("Content-Length", fmt.Sprintf("%d", len(bs)))
|
||||
|
||||
w.Write(bs)
|
||||
}
|
||||
|
||||
func mimeTypeForFile(file string) string {
|
||||
// We use a built in table of the common types since the system
|
||||
// TypeByExtension might be unreliable. But if we don't know, we delegate
|
||||
// to the system.
|
||||
ext := filepath.Ext(file)
|
||||
switch ext {
|
||||
case ".htm", ".html":
|
||||
return "text/html"
|
||||
case ".css":
|
||||
return "text/css"
|
||||
case ".js":
|
||||
return "application/javascript"
|
||||
case ".json":
|
||||
return "application/json"
|
||||
case ".png":
|
||||
return "image/png"
|
||||
case ".ttf":
|
||||
return "application/x-font-ttf"
|
||||
case ".woff":
|
||||
return "application/x-font-woff"
|
||||
case ".svg":
|
||||
return "image/svg+xml"
|
||||
default:
|
||||
return mime.TypeByExtension(ext)
|
||||
}
|
||||
assets.Serve(w, r, as)
|
||||
}
|
||||
|
||||
func handleRequest(w http.ResponseWriter, r *http.Request) {
|
||||
timer := prometheus.NewTimer(apiRequestsSeconds.WithLabelValues(r.Method))
|
||||
|
||||
lw := NewLoggingResponseWriter(w)
|
||||
|
||||
w = NewLoggingResponseWriter(w)
|
||||
defer func() {
|
||||
timer.ObserveDuration()
|
||||
lw := w.(*loggingResponseWriter)
|
||||
apiRequestsTotal.WithLabelValues(r.Method, strconv.Itoa(lw.statusCode)).Inc()
|
||||
}()
|
||||
|
||||
@@ -364,21 +316,38 @@ func handleRequest(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
func handleGetRequest(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
func handleGetRequest(rw http.ResponseWriter, r *http.Request) {
|
||||
rw.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
|
||||
mut.RLock()
|
||||
relays := append(permanentRelays, knownRelays...)
|
||||
relays := make([]*relay, len(permanentRelays)+len(knownRelays))
|
||||
n := copy(relays, permanentRelays)
|
||||
copy(relays[n:], knownRelays)
|
||||
mut.RUnlock()
|
||||
|
||||
// Shuffle
|
||||
rand.Shuffle(relays)
|
||||
|
||||
json.NewEncoder(w).Encode(map[string][]*relay{
|
||||
w := io.Writer(rw)
|
||||
if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
|
||||
rw.Header().Set("Content-Encoding", "gzip")
|
||||
gw := gzip.NewWriter(rw)
|
||||
defer gw.Close()
|
||||
w = gw
|
||||
}
|
||||
|
||||
_ = json.NewEncoder(w).Encode(map[string][]*relay{
|
||||
"relays": relays,
|
||||
})
|
||||
}
|
||||
|
||||
func handlePostRequest(w http.ResponseWriter, r *http.Request) {
|
||||
var relayCert *x509.Certificate
|
||||
if r.TLS != nil && len(r.TLS.PeerCertificates) > 0 {
|
||||
relayCert = r.TLS.PeerCertificates[0]
|
||||
log.Printf("Got TLS cert from relay server")
|
||||
}
|
||||
|
||||
var newRelay relay
|
||||
err := json.NewDecoder(r.Body).Decode(&newRelay)
|
||||
r.Body.Close()
|
||||
@@ -387,7 +356,7 @@ func handlePostRequest(w http.ResponseWriter, r *http.Request) {
|
||||
if debug {
|
||||
log.Println("Failed to parse payload")
|
||||
}
|
||||
http.Error(w, err.Error(), 500)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -396,16 +365,26 @@ func handlePostRequest(w http.ResponseWriter, r *http.Request) {
|
||||
if debug {
|
||||
log.Println("Failed to parse URI", newRelay.URL)
|
||||
}
|
||||
http.Error(w, err.Error(), 500)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if relayCert != nil {
|
||||
advertisedId := uri.Query().Get("id")
|
||||
idFromCert := protocol.NewDeviceID(relayCert.Raw).String()
|
||||
if advertisedId != idFromCert {
|
||||
log.Println("Warning: Relay server requested to join with an ID different from the join request, rejecting")
|
||||
http.Error(w, "mismatched advertised id and join request cert", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
host, port, err := net.SplitHostPort(uri.Host)
|
||||
if err != nil {
|
||||
if debug {
|
||||
log.Println("Failed to split URI", newRelay.URL)
|
||||
}
|
||||
http.Error(w, err.Error(), 500)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -420,7 +399,7 @@ func handlePostRequest(w http.ResponseWriter, r *http.Request) {
|
||||
if ip == nil || ip.IsUnspecified() {
|
||||
uri.Host = net.JoinHostPort(rhost, port)
|
||||
newRelay.URL = uri.String()
|
||||
} else if host != rhost {
|
||||
} else if host != rhost && relayCert == nil {
|
||||
if debug {
|
||||
log.Println("IP address advertised does not match client IP address", r.RemoteAddr, uri)
|
||||
}
|
||||
@@ -481,11 +460,11 @@ func handleRelayTest(request request) {
|
||||
if debug {
|
||||
log.Println("Request for", request.relay)
|
||||
}
|
||||
if !client.TestRelay(context.TODO(), request.relay.uri, []tls.Certificate{testCert}, time.Second, 2*time.Second, 3) {
|
||||
if err := client.TestRelay(context.TODO(), request.relay.uri, []tls.Certificate{testCert}, time.Second, 2*time.Second, 3); err != nil {
|
||||
if debug {
|
||||
log.Println("Test for relay", request.relay, "failed")
|
||||
log.Println("Test for relay", request.relay, "failed:", err)
|
||||
}
|
||||
request.result <- result{fmt.Errorf("connection test failed"), 0}
|
||||
request.result <- result{err, 0}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -497,7 +476,7 @@ func handleRelayTest(request request) {
|
||||
updateMetrics(request.relay.uri.Host, *stats, location)
|
||||
}
|
||||
request.relay.Stats = stats
|
||||
request.relay.StatsRetrieved = time.Now()
|
||||
request.relay.StatsRetrieved = time.Now().Truncate(time.Second)
|
||||
request.relay.Location = location
|
||||
|
||||
timer, ok := evictionTimers[request.relay.uri.Host]
|
||||
@@ -563,26 +542,21 @@ func evict(relay *relay) func() {
|
||||
}
|
||||
}
|
||||
|
||||
func limit(addr string, cache *lru.Cache, lock sync.RWMutex, intv time.Duration, burst int) bool {
|
||||
func limit(addr string, cache *lru.Cache, lock sync.Mutex, intv time.Duration, burst int) bool {
|
||||
if host, _, err := net.SplitHostPort(addr); err == nil {
|
||||
addr = host
|
||||
}
|
||||
|
||||
lock.RLock()
|
||||
bkt, ok := cache.Get(addr)
|
||||
lock.RUnlock()
|
||||
if ok {
|
||||
bkt := bkt.(*rate.Limiter)
|
||||
if !bkt.Allow() {
|
||||
// Rate limit
|
||||
return true
|
||||
}
|
||||
} else {
|
||||
lock.Lock()
|
||||
cache.Add(addr, rate.NewLimiter(rate.Every(intv), burst))
|
||||
lock.Unlock()
|
||||
lock.Lock()
|
||||
v, _ := cache.Get(addr)
|
||||
bkt, ok := v.(*rate.Limiter)
|
||||
if !ok {
|
||||
bkt = rate.NewLimiter(rate.Every(intv), burst)
|
||||
cache.Add(addr, bkt)
|
||||
}
|
||||
return false
|
||||
lock.Unlock()
|
||||
|
||||
return !bkt.Allow()
|
||||
}
|
||||
|
||||
func loadRelays(file string) []*relay {
|
||||
|
||||
67
cmd/strelaypoolsrv/main_test.go
Normal file
@@ -0,0 +1,67 @@
|
||||
// Copyright © 2020 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func init() {
|
||||
for i := 0; i < 10; i++ {
|
||||
u := fmt.Sprintf("permanent%d", i)
|
||||
permanentRelays = append(permanentRelays, &relay{URL: u})
|
||||
}
|
||||
|
||||
knownRelays = []*relay{
|
||||
{URL: "known1"},
|
||||
{URL: "known2"},
|
||||
{URL: "known3"},
|
||||
}
|
||||
|
||||
mut = new(sync.RWMutex)
|
||||
}
|
||||
|
||||
// Regression test: handleGetRequest should not modify permanentRelays.
|
||||
func TestHandleGetRequest(t *testing.T) {
|
||||
needcap := len(permanentRelays) + len(knownRelays)
|
||||
if needcap > cap(permanentRelays) {
|
||||
t.Fatalf("test setup failed: need cap(permanentRelays) >= %d, have %d",
|
||||
needcap, cap(permanentRelays))
|
||||
}
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
w.Body = new(bytes.Buffer)
|
||||
handleGetRequest(w, httptest.NewRequest("GET", "/", nil))
|
||||
|
||||
result := make(map[string][]*relay)
|
||||
err := json.NewDecoder(w.Body).Decode(&result)
|
||||
if err != nil {
|
||||
t.Fatalf("invalid JSON: %v", err)
|
||||
}
|
||||
|
||||
relays := result["relays"]
|
||||
expect, actual := len(knownRelays)+len(permanentRelays), len(relays)
|
||||
if actual != expect {
|
||||
t.Errorf("expected %d relays, got %d", expect, actual)
|
||||
}
|
||||
|
||||
// Check for changes in permanentRelays.
|
||||
for i, r := range permanentRelays {
|
||||
switch {
|
||||
case !strings.HasPrefix(r.URL, "permanent"):
|
||||
t.Errorf("relay %q among permanent relays", r.URL)
|
||||
case r.URL != fmt.Sprintf("permanent%d", i):
|
||||
t.Error("order of permanent relays changed")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -121,11 +121,11 @@ Relay related libraries used by this repo
|
||||
----
|
||||
##### Relay protocol definition.
|
||||
|
||||
[Available here](https://github.com/syncthing/syncthing/tree/master/lib/relay/protocol)
|
||||
[Available here](https://github.com/syncthing/syncthing/tree/main/lib/relay/protocol)
|
||||
|
||||
|
||||
##### Relay client
|
||||
|
||||
Only used by the testutil.
|
||||
|
||||
[Available here](https://github.com/syncthing/syncthing/tree/master/lib/relay/client)
|
||||
[Available here](https://github.com/syncthing/syncthing/tree/main/lib/relay/client)
|
||||
|
||||
@@ -149,7 +149,15 @@ func protocolConnectionHandler(tcpConn net.Conn, config *tls.Config) {
|
||||
protocol.WriteMessage(conn, protocol.ResponseSuccess)
|
||||
|
||||
case protocol.ConnectRequest:
|
||||
requestedPeer := syncthingprotocol.DeviceIDFromBytes(msg.ID)
|
||||
requestedPeer, err := syncthingprotocol.DeviceIDFromBytes(msg.ID)
|
||||
if err != nil {
|
||||
if debug {
|
||||
log.Println(id, "is looking for an invalid peer ID")
|
||||
}
|
||||
protocol.WriteMessage(conn, protocol.ResponseNotFound)
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
outboxesMut.RLock()
|
||||
peerOutbox, ok := outboxes[requestedPeer]
|
||||
outboxesMut.RUnlock()
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"flag"
|
||||
"fmt"
|
||||
@@ -98,12 +99,13 @@ func main() {
|
||||
flag.IntVar(&natRenewal, "nat-renewal", 30, "NAT renewal frequency in minutes")
|
||||
flag.IntVar(&natTimeout, "nat-timeout", 10, "NAT discovery timeout in seconds")
|
||||
flag.BoolVar(&pprofEnabled, "pprof", false, "Enable the built in profiling on the status server")
|
||||
flag.IntVar(&networkBufferSize, "network-buffer", 2048, "Network buffer size (two of these per proxied connection)")
|
||||
flag.IntVar(&networkBufferSize, "network-buffer", 65536, "Network buffer size (two of these per proxied connection)")
|
||||
showVersion := flag.Bool("version", false, "Show version")
|
||||
flag.Parse()
|
||||
|
||||
longVer := build.LongVersionFor("strelaysrv")
|
||||
if *showVersion {
|
||||
fmt.Println(build.LongVersion)
|
||||
fmt.Println(longVer)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -135,7 +137,7 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
log.Println(build.LongVersion)
|
||||
log.Println(longVer)
|
||||
|
||||
maxDescriptors, err := osutil.MaximizeOpenFileLimit()
|
||||
if maxDescriptors > 0 {
|
||||
@@ -183,17 +185,20 @@ func main() {
|
||||
log.Println("ID:", id)
|
||||
}
|
||||
|
||||
wrapper := config.Wrap("config", config.New(id), events.NoopLogger)
|
||||
wrapper.SetOptions(config.OptionsConfiguration{
|
||||
NATLeaseM: natLease,
|
||||
NATRenewalM: natRenewal,
|
||||
NATTimeoutS: natTimeout,
|
||||
wrapper := config.Wrap("config", config.New(id), id, events.NoopLogger)
|
||||
go wrapper.Serve(context.TODO())
|
||||
wrapper.Modify(func(cfg *config.Configuration) {
|
||||
cfg.Options.NATLeaseM = natLease
|
||||
cfg.Options.NATRenewalM = natRenewal
|
||||
cfg.Options.NATTimeoutS = natTimeout
|
||||
})
|
||||
natSvc := nat.NewService(id, wrapper)
|
||||
mapping := mapping{natSvc.NewMapping(nat.TCP, addr.IP, addr.Port)}
|
||||
|
||||
if natEnabled {
|
||||
go natSvc.Serve()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
go natSvc.Serve(ctx)
|
||||
defer cancel()
|
||||
found := make(chan struct{})
|
||||
mapping.OnChanged(func(_ *nat.Mapping, _, _ []nat.Address) {
|
||||
select {
|
||||
@@ -228,6 +233,7 @@ func main() {
|
||||
uri, err := url.Parse(fmt.Sprintf("relay://%s/?id=%s&pingInterval=%s&networkTimeout=%s&sessionLimitBps=%d&globalLimitBps=%d&statusAddr=%s&providedBy=%s", mapping.Address(), id, pingInterval, networkTimeout, sessionLimitBps, globalLimitBps, statusAddr, providedBy))
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to construct URI", err)
|
||||
return
|
||||
}
|
||||
|
||||
log.Println("URI:", uri.String())
|
||||
@@ -243,7 +249,7 @@ func main() {
|
||||
for _, pool := range pools {
|
||||
pool = strings.TrimSpace(pool)
|
||||
if len(pool) > 0 {
|
||||
go poolHandler(pool, uri, mapping)
|
||||
go poolHandler(pool, uri, mapping, cert)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -4,14 +4,20 @@ package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
)
|
||||
|
||||
func poolHandler(pool string, uri *url.URL, mapping mapping) {
|
||||
const (
|
||||
httpStatusEnhanceYourCalm = 429
|
||||
)
|
||||
|
||||
func poolHandler(pool string, uri *url.URL, mapping mapping, ownCert tls.Certificate) {
|
||||
if debug {
|
||||
log.Println("Joining", pool)
|
||||
}
|
||||
@@ -26,40 +32,81 @@ func poolHandler(pool string, uri *url.URL, mapping mapping) {
|
||||
uriCopy.String(),
|
||||
})
|
||||
|
||||
resp, err := httpClient.Post(pool, "application/json", &b)
|
||||
poolUrl, err := url.Parse(pool)
|
||||
if err != nil {
|
||||
log.Println("Error joining pool", pool, err)
|
||||
} else if resp.StatusCode == 500 {
|
||||
bs, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
log.Println("Failed to join", pool, "due to an internal server error. Could not read response:", err)
|
||||
} else {
|
||||
log.Println("Failed to join", pool, "due to an internal server error:", string(bs))
|
||||
log.Printf("Could not parse pool url '%s': %v", pool, err)
|
||||
}
|
||||
|
||||
client := http.DefaultClient
|
||||
if poolUrl.Scheme == "https" {
|
||||
// Sent our certificate in join request
|
||||
client = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
Certificates: []tls.Certificate{ownCert},
|
||||
},
|
||||
},
|
||||
}
|
||||
resp.Body.Close()
|
||||
} else if resp.StatusCode == 429 {
|
||||
log.Println(pool, "under load, will retry in a minute")
|
||||
}
|
||||
|
||||
resp, err := client.Post(pool, "application/json", &b)
|
||||
if err != nil {
|
||||
log.Printf("Error joining pool %v: HTTP request: %v", pool, err)
|
||||
time.Sleep(time.Minute)
|
||||
continue
|
||||
} else if resp.StatusCode == 401 {
|
||||
log.Println(pool, "failed to join due to IP address not matching external address. Aborting")
|
||||
return
|
||||
} else if resp.StatusCode == 200 {
|
||||
}
|
||||
|
||||
bs, err := ioutil.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
if err != nil {
|
||||
log.Printf("Error joining pool %v: reading response: %v", pool, err)
|
||||
time.Sleep(time.Minute)
|
||||
continue
|
||||
}
|
||||
|
||||
switch resp.StatusCode {
|
||||
case http.StatusOK:
|
||||
var x struct {
|
||||
EvictionIn time.Duration `json:"evictionIn"`
|
||||
}
|
||||
err := json.NewDecoder(resp.Body).Decode(&x)
|
||||
if err == nil {
|
||||
if err := json.Unmarshal(bs, &x); err == nil {
|
||||
rejoin := x.EvictionIn - (x.EvictionIn / 5)
|
||||
log.Println("Joined", pool, "rejoining in", rejoin)
|
||||
log.Printf("Joined pool %s, rejoining in %v", pool, rejoin)
|
||||
time.Sleep(rejoin)
|
||||
continue
|
||||
} else {
|
||||
log.Println("Failed to deserialize response", err)
|
||||
log.Printf("Joined pool %s, failed to deserialize response: %v", pool, err)
|
||||
}
|
||||
} else {
|
||||
log.Println(pool, "unknown response type from server", resp.StatusCode)
|
||||
|
||||
case http.StatusInternalServerError:
|
||||
log.Printf("Failed to join %v: server error", pool)
|
||||
log.Printf("Response data: %s", bs)
|
||||
time.Sleep(time.Minute)
|
||||
continue
|
||||
|
||||
case http.StatusBadRequest:
|
||||
log.Printf("Failed to join %v: request or check error", pool)
|
||||
log.Printf("Response data: %s", bs)
|
||||
time.Sleep(time.Minute)
|
||||
continue
|
||||
|
||||
case httpStatusEnhanceYourCalm:
|
||||
log.Printf("Failed to join %v: under load (rate limiting)", pool)
|
||||
time.Sleep(time.Minute)
|
||||
continue
|
||||
|
||||
case http.StatusUnauthorized:
|
||||
log.Printf("Failed to join %v: IP address not matching external address", pool)
|
||||
log.Println("Aborting")
|
||||
return
|
||||
|
||||
default:
|
||||
log.Printf("Failed to join %v: unexpected status code from server: %d", pool, resp.StatusCode)
|
||||
log.Printf("Response data: %s", bs)
|
||||
time.Sleep(time.Minute)
|
||||
continue
|
||||
}
|
||||
|
||||
time.Sleep(time.Hour)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,7 +20,8 @@ import (
|
||||
)
|
||||
|
||||
func main() {
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
log.SetOutput(os.Stdout)
|
||||
log.SetFlags(log.LstdFlags | log.Lshortfile)
|
||||
@@ -62,7 +63,7 @@ func main() {
|
||||
}
|
||||
log.Println("Created client")
|
||||
|
||||
go relay.Serve()
|
||||
go relay.Serve(ctx)
|
||||
|
||||
recv := make(chan protocol.SessionInvitation)
|
||||
|
||||
@@ -107,10 +108,10 @@ func main() {
|
||||
connectToStdio(stdin, conn)
|
||||
log.Println("Finished", conn.RemoteAddr(), conn.LocalAddr())
|
||||
} else if test {
|
||||
if client.TestRelay(ctx, uri, []tls.Certificate{cert}, time.Second, 2*time.Second, 4) {
|
||||
if err := client.TestRelay(ctx, uri, []tls.Certificate{cert}, time.Second, 2*time.Second, 4); err == nil {
|
||||
log.Println("OK")
|
||||
} else {
|
||||
log.Println("FAIL")
|
||||
log.Println("FAIL:", err)
|
||||
}
|
||||
} else {
|
||||
log.Fatal("Requires either join or connect")
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"math/big"
|
||||
@@ -36,7 +37,7 @@ type result struct {
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
prefix := strings.ToUpper(strings.Replace(flag.Arg(0), "-", "", -1))
|
||||
prefix := strings.ToUpper(strings.ReplaceAll(flag.Arg(0), "-", ""))
|
||||
if len(prefix) > 7 {
|
||||
prefix = prefix[:7] + "-" + prefix[7:]
|
||||
}
|
||||
@@ -191,7 +192,7 @@ func pemBlockForKey(priv interface{}) (*pem.Block, error) {
|
||||
}
|
||||
return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b}, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown key type")
|
||||
return nil, errors.New("unknown key type")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -7,31 +7,15 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
)
|
||||
|
||||
func getmd5(filePath string) ([]byte, error) {
|
||||
var result []byte
|
||||
file, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
hash := md5.New()
|
||||
if _, err := io.Copy(hash, file); err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
return hash.Sum(result), nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
period := flag.Duration("period", 200*time.Millisecond, "Sleep period between checks")
|
||||
flag.Parse()
|
||||
@@ -46,7 +30,7 @@ func main() {
|
||||
exists := true
|
||||
size := int64(0)
|
||||
mtime := time.Time{}
|
||||
hash := []byte{}
|
||||
var hash [sha256.Size]byte
|
||||
|
||||
for {
|
||||
time.Sleep(*period)
|
||||
@@ -72,7 +56,7 @@ func main() {
|
||||
if !exists {
|
||||
size = 0
|
||||
mtime = time.Time{}
|
||||
hash = []byte{}
|
||||
hash = [sha256.Size]byte{}
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -83,12 +67,12 @@ func main() {
|
||||
newSize := fi.Size()
|
||||
newMtime := fi.ModTime()
|
||||
|
||||
newHash, err := getmd5(file)
|
||||
newHash, err := sha256file(file)
|
||||
if err != nil {
|
||||
fmt.Println("getmd5:", err)
|
||||
fmt.Println("sha256file:", err)
|
||||
}
|
||||
|
||||
if newSize != size || newMtime != mtime || !bytes.Equal(newHash, hash) {
|
||||
if newSize != size || newMtime != mtime || newHash != hash {
|
||||
fmt.Println(file, "Size:", newSize, "Mtime:", newMtime, "Hash:", fmt.Sprintf("%x", newHash))
|
||||
hash = newHash
|
||||
size = newSize
|
||||
@@ -96,3 +80,18 @@ func main() {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func sha256file(fname string) (hash [sha256.Size]byte, err error) {
|
||||
f, err := os.Open(fname)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
h := sha256.New()
|
||||
io.Copy(h, f)
|
||||
hb := h.Sum(nil)
|
||||
copy(hash[:], hb)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -15,19 +15,17 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
if innerProcess && os.Getenv("STBLOCKPROFILE") != "" {
|
||||
profiler := pprof.Lookup("block")
|
||||
if profiler == nil {
|
||||
panic("Couldn't find block profiler")
|
||||
}
|
||||
l.Debugln("Starting block profiling")
|
||||
go func() {
|
||||
err := saveBlockingProfiles(profiler) // Only returns on error
|
||||
l.Warnln("Block profiler failed:", err)
|
||||
panic("Block profiler failed")
|
||||
}()
|
||||
func startBlockProfiler() {
|
||||
profiler := pprof.Lookup("block")
|
||||
if profiler == nil {
|
||||
panic("Couldn't find block profiler")
|
||||
}
|
||||
l.Debugln("Starting block profiling")
|
||||
go func() {
|
||||
err := saveBlockingProfiles(profiler) // Only returns on error
|
||||
l.Warnln("Block profiler failed:", err)
|
||||
panic("Block profiler failed")
|
||||
}()
|
||||
}
|
||||
|
||||
func saveBlockingProfiles(profiler *pprof.Profile) error {
|
||||
|
||||
151
cmd/syncthing/cli/client.go
Normal file
@@ -0,0 +1,151 @@
|
||||
// Copyright (C) 2019 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package cli
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
"github.com/syncthing/syncthing/lib/locations"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
type APIClient interface {
|
||||
Get(url string) (*http.Response, error)
|
||||
Post(url, body string) (*http.Response, error)
|
||||
}
|
||||
|
||||
type apiClient struct {
|
||||
http.Client
|
||||
cfg config.GUIConfiguration
|
||||
apikey string
|
||||
}
|
||||
|
||||
type apiClientFactory struct {
|
||||
cfg config.GUIConfiguration
|
||||
}
|
||||
|
||||
func (f *apiClientFactory) getClient() (APIClient, error) {
|
||||
// Now if the API key and address is not provided (we are not connecting to a remote instance),
|
||||
// try to rip it out of the config.
|
||||
if f.cfg.RawAddress == "" && f.cfg.APIKey == "" {
|
||||
var err error
|
||||
f.cfg, err = loadGUIConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else if f.cfg.Address() == "" || f.cfg.APIKey == "" {
|
||||
return nil, errors.New("Both --gui-address and --gui-apikey should be specified")
|
||||
}
|
||||
|
||||
httpClient := http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
},
|
||||
DialContext: func(_ context.Context, _, _ string) (net.Conn, error) {
|
||||
return net.Dial(f.cfg.Network(), f.cfg.Address())
|
||||
},
|
||||
},
|
||||
}
|
||||
return &apiClient{
|
||||
Client: httpClient,
|
||||
cfg: f.cfg,
|
||||
apikey: f.cfg.APIKey,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func loadGUIConfig() (config.GUIConfiguration, error) {
|
||||
// Load the certs and get the ID
|
||||
cert, err := tls.LoadX509KeyPair(
|
||||
locations.Get(locations.CertFile),
|
||||
locations.Get(locations.KeyFile),
|
||||
)
|
||||
if err != nil {
|
||||
return config.GUIConfiguration{}, fmt.Errorf("reading device ID: %w", err)
|
||||
}
|
||||
|
||||
myID := protocol.NewDeviceID(cert.Certificate[0])
|
||||
|
||||
// Load the config
|
||||
cfg, _, err := config.Load(locations.Get(locations.ConfigFile), myID, events.NoopLogger)
|
||||
if err != nil {
|
||||
return config.GUIConfiguration{}, fmt.Errorf("loading config: %w", err)
|
||||
}
|
||||
|
||||
guiCfg := cfg.GUI()
|
||||
|
||||
if guiCfg.Address() == "" {
|
||||
return config.GUIConfiguration{}, errors.New("Could not find GUI Address")
|
||||
}
|
||||
|
||||
if guiCfg.APIKey == "" {
|
||||
return config.GUIConfiguration{}, errors.New("Could not find GUI API key")
|
||||
}
|
||||
|
||||
return guiCfg, nil
|
||||
}
|
||||
|
||||
func (c *apiClient) Endpoint() string {
|
||||
if c.cfg.Network() == "unix" {
|
||||
return "http://unix/"
|
||||
}
|
||||
url := c.cfg.URL()
|
||||
if !strings.HasSuffix(url, "/") {
|
||||
url += "/"
|
||||
}
|
||||
return url
|
||||
}
|
||||
|
||||
func (c *apiClient) Do(req *http.Request) (*http.Response, error) {
|
||||
req.Header.Set("X-API-Key", c.apikey)
|
||||
resp, err := c.Client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, checkResponse(resp)
|
||||
}
|
||||
|
||||
func (c *apiClient) Get(url string) (*http.Response, error) {
|
||||
request, err := http.NewRequest("GET", c.Endpoint()+"rest/"+url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.Do(request)
|
||||
}
|
||||
|
||||
func (c *apiClient) Post(url, body string) (*http.Response, error) {
|
||||
request, err := http.NewRequest("POST", c.Endpoint()+"rest/"+url, bytes.NewBufferString(body))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.Do(request)
|
||||
}
|
||||
|
||||
func checkResponse(response *http.Response) error {
|
||||
if response.StatusCode == 404 {
|
||||
return errors.New("invalid endpoint or API call")
|
||||
} else if response.StatusCode == 403 {
|
||||
return errors.New("invalid API key")
|
||||
} else if response.StatusCode != 200 {
|
||||
data, err := responseToBArray(response)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
body := strings.TrimSpace(string(data))
|
||||
return fmt.Errorf("unexpected HTTP status returned: %s\n%s", response.Status, body)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
87
cmd/syncthing/cli/config.go
Normal file
@@ -0,0 +1,87 @@
|
||||
// Copyright (C) 2021 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package cli
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/AudriusButkevicius/recli"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
type configHandler struct {
|
||||
original, cfg config.Configuration
|
||||
client APIClient
|
||||
err error
|
||||
}
|
||||
|
||||
func getConfigCommand(f *apiClientFactory) (cli.Command, error) {
|
||||
h := new(configHandler)
|
||||
h.client, h.err = f.getClient()
|
||||
if h.err == nil {
|
||||
h.cfg, h.err = getConfig(h.client)
|
||||
}
|
||||
h.original = h.cfg.Copy()
|
||||
|
||||
// Copy the config and set the default flags
|
||||
recliCfg := recli.DefaultConfig
|
||||
recliCfg.IDTag.Name = "xml"
|
||||
recliCfg.SkipTag.Name = "json"
|
||||
|
||||
commands, err := recli.New(recliCfg).Construct(&h.cfg)
|
||||
if err != nil {
|
||||
return cli.Command{}, fmt.Errorf("config reflect: %w", err)
|
||||
}
|
||||
|
||||
return cli.Command{
|
||||
Name: "config",
|
||||
HideHelp: true,
|
||||
Usage: "Configuration modification command group",
|
||||
Subcommands: commands,
|
||||
Before: h.configBefore,
|
||||
After: h.configAfter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (h *configHandler) configBefore(c *cli.Context) error {
|
||||
for _, arg := range c.Args() {
|
||||
if arg == "--help" || arg == "-h" {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return h.err
|
||||
}
|
||||
|
||||
func (h *configHandler) configAfter(c *cli.Context) error {
|
||||
if h.err != nil {
|
||||
// Error was already returned in configBefore
|
||||
return nil
|
||||
}
|
||||
if reflect.DeepEqual(h.cfg, h.original) {
|
||||
return nil
|
||||
}
|
||||
body, err := json.MarshalIndent(h.cfg, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp, err := h.client.Post("system/config", string(body))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
body, err := responseToBArray(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return errors.New(string(body))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
51
cmd/syncthing/cli/debug.go
Normal file
@@ -0,0 +1,51 @@
|
||||
// Copyright (C) 2021 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
var debugCommand = cli.Command{
|
||||
Name: "debug",
|
||||
HideHelp: true,
|
||||
Usage: "Debug command group",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "file",
|
||||
Usage: "Show information about a file (or directory/symlink)",
|
||||
ArgsUsage: "FOLDER-ID PATH",
|
||||
Action: expects(2, debugFile()),
|
||||
},
|
||||
indexCommand,
|
||||
{
|
||||
Name: "profile",
|
||||
Usage: "Save a profile to help figuring out what Syncthing does.",
|
||||
ArgsUsage: "cpu | heap",
|
||||
Action: expects(1, profile()),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func debugFile() cli.ActionFunc {
|
||||
return func(c *cli.Context) error {
|
||||
return indexDumpOutput(fmt.Sprintf("debug/file?folder=%v&file=%v", c.Args()[0], normalizePath(c.Args()[1])))(c)
|
||||
}
|
||||
}
|
||||
|
||||
func profile() cli.ActionFunc {
|
||||
return func(c *cli.Context) error {
|
||||
switch t := c.Args()[0]; t {
|
||||
case "cpu", "heap":
|
||||
return saveToFile(fmt.Sprintf("debug/%vprof", c.Args()[0]))(c)
|
||||
default:
|
||||
return fmt.Errorf("expected cpu or heap as argument, got %v", t)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -4,9 +4,10 @@
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
package cli
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
@@ -21,7 +22,7 @@ var errorsCommand = cli.Command{
|
||||
{
|
||||
Name: "show",
|
||||
Usage: "Show pending errors",
|
||||
Action: expects(0, dumpOutput("system/error")),
|
||||
Action: expects(0, indexDumpOutput("system/error")),
|
||||
},
|
||||
{
|
||||
Name: "push",
|
||||
@@ -38,7 +39,7 @@ var errorsCommand = cli.Command{
|
||||
}
|
||||
|
||||
func errorsPush(c *cli.Context) error {
|
||||
client := c.App.Metadata["client"].(*APIClient)
|
||||
client := c.App.Metadata["client"].(APIClient)
|
||||
errStr := strings.Join(c.Args(), " ")
|
||||
response, err := client.Post("system/error", strings.TrimSpace(errStr))
|
||||
if err != nil {
|
||||
@@ -54,7 +55,7 @@ func errorsPush(c *cli.Context) error {
|
||||
if body != "" {
|
||||
errStr += "\nBody: " + body
|
||||
}
|
||||
return fmt.Errorf(errStr)
|
||||
return errors.New(errStr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
38
cmd/syncthing/cli/index.go
Normal file
@@ -0,0 +1,38 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package cli
|
||||
|
||||
import (
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
var indexCommand = cli.Command{
|
||||
Name: "index",
|
||||
Usage: "Show information about the index (database)",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "dump",
|
||||
Usage: "Print the entire db",
|
||||
Action: expects(0, indexDump),
|
||||
},
|
||||
{
|
||||
Name: "dump-size",
|
||||
Usage: "Print the db size of different categories of information",
|
||||
Action: expects(0, indexDumpSize),
|
||||
},
|
||||
{
|
||||
Name: "check",
|
||||
Usage: "Check the database for inconsistencies",
|
||||
Action: expects(0, indexCheck),
|
||||
},
|
||||
{
|
||||
Name: "account",
|
||||
Usage: "Print key and value size statistics per key type",
|
||||
Action: expects(0, indexAccount),
|
||||
},
|
||||
},
|
||||
}
|
||||
64
cmd/syncthing/cli/index_accounting.go
Normal file
@@ -0,0 +1,64 @@
|
||||
// Copyright (C) 2020 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
// indexAccount prints key and data size statistics per class
|
||||
func indexAccount(*cli.Context) error {
|
||||
ldb, err := getDB()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
it, err := ldb.NewPrefixIterator(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var ksizes [256]int
|
||||
var dsizes [256]int
|
||||
var counts [256]int
|
||||
var max [256]int
|
||||
|
||||
for it.Next() {
|
||||
key := it.Key()
|
||||
t := key[0]
|
||||
ds := len(it.Value())
|
||||
ks := len(key)
|
||||
s := ks + ds
|
||||
|
||||
counts[t]++
|
||||
ksizes[t] += ks
|
||||
dsizes[t] += ds
|
||||
if s > max[t] {
|
||||
max[t] = s
|
||||
}
|
||||
}
|
||||
|
||||
tw := tabwriter.NewWriter(os.Stdout, 1, 1, 1, ' ', tabwriter.AlignRight)
|
||||
toti, totds, totks := 0, 0, 0
|
||||
for t := range ksizes {
|
||||
if ksizes[t] > 0 {
|
||||
// yes metric kilobytes 🤘
|
||||
fmt.Fprintf(tw, "0x%02x:\t%d items,\t%d KB keys +\t%d KB data,\t%d B +\t%d B avg,\t%d B max\t\n", t, counts[t], ksizes[t]/1000, dsizes[t]/1000, ksizes[t]/counts[t], dsizes[t]/counts[t], max[t])
|
||||
toti += counts[t]
|
||||
totds += dsizes[t]
|
||||
totks += ksizes[t]
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(tw, "Total\t%d items,\t%d KB keys +\t%d KB data.\t\n", toti, totks/1000, totds/1000)
|
||||
tw.Flush()
|
||||
|
||||
return nil
|
||||
}
|
||||
160
cmd/syncthing/cli/index_dump.go
Normal file
@@ -0,0 +1,160 @@
|
||||
// Copyright (C) 2015 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package cli
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/urfave/cli"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
func indexDump(*cli.Context) error {
|
||||
ldb, err := getDB()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
it, err := ldb.NewPrefixIterator(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for it.Next() {
|
||||
key := it.Key()
|
||||
switch key[0] {
|
||||
case db.KeyTypeDevice:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
device := binary.BigEndian.Uint32(key[1+4:])
|
||||
name := nulString(key[1+4+4:])
|
||||
fmt.Printf("[device] F:%d D:%d N:%q", folder, device, name)
|
||||
|
||||
var f protocol.FileInfo
|
||||
err := f.Unmarshal(it.Value())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf(" V:%v\n", f)
|
||||
|
||||
case db.KeyTypeGlobal:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
name := nulString(key[1+4:])
|
||||
var flv db.VersionList
|
||||
flv.Unmarshal(it.Value())
|
||||
fmt.Printf("[global] F:%d N:%q V:%s\n", folder, name, flv)
|
||||
|
||||
case db.KeyTypeBlock:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
hash := key[1+4 : 1+4+32]
|
||||
name := nulString(key[1+4+32:])
|
||||
fmt.Printf("[block] F:%d H:%x N:%q I:%d\n", folder, hash, name, binary.BigEndian.Uint32(it.Value()))
|
||||
|
||||
case db.KeyTypeDeviceStatistic:
|
||||
fmt.Printf("[dstat] K:%x V:%x\n", key, it.Value())
|
||||
|
||||
case db.KeyTypeFolderStatistic:
|
||||
fmt.Printf("[fstat] K:%x V:%x\n", key, it.Value())
|
||||
|
||||
case db.KeyTypeVirtualMtime:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
name := nulString(key[1+4:])
|
||||
val := it.Value()
|
||||
var realTime, virtualTime time.Time
|
||||
realTime.UnmarshalBinary(val[:len(val)/2])
|
||||
virtualTime.UnmarshalBinary(val[len(val)/2:])
|
||||
fmt.Printf("[mtime] F:%d N:%q R:%v V:%v\n", folder, name, realTime, virtualTime)
|
||||
|
||||
case db.KeyTypeFolderIdx:
|
||||
key := binary.BigEndian.Uint32(key[1:])
|
||||
fmt.Printf("[folderidx] K:%d V:%q\n", key, it.Value())
|
||||
|
||||
case db.KeyTypeDeviceIdx:
|
||||
key := binary.BigEndian.Uint32(key[1:])
|
||||
val := it.Value()
|
||||
device := "<nil>"
|
||||
if len(val) > 0 {
|
||||
dev, err := protocol.DeviceIDFromBytes(val)
|
||||
if err != nil {
|
||||
device = fmt.Sprintf("<invalid %d bytes>", len(val))
|
||||
} else {
|
||||
device = dev.String()
|
||||
}
|
||||
}
|
||||
fmt.Printf("[deviceidx] K:%d V:%s\n", key, device)
|
||||
|
||||
case db.KeyTypeIndexID:
|
||||
device := binary.BigEndian.Uint32(key[1:])
|
||||
folder := binary.BigEndian.Uint32(key[5:])
|
||||
fmt.Printf("[indexid] D:%d F:%d I:%x\n", device, folder, it.Value())
|
||||
|
||||
case db.KeyTypeFolderMeta:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
fmt.Printf("[foldermeta] F:%d", folder)
|
||||
var cs db.CountsSet
|
||||
if err := cs.Unmarshal(it.Value()); err != nil {
|
||||
fmt.Printf(" (invalid)\n")
|
||||
} else {
|
||||
fmt.Printf(" V:%v\n", cs)
|
||||
}
|
||||
|
||||
case db.KeyTypeMiscData:
|
||||
fmt.Printf("[miscdata] K:%q V:%q\n", key[1:], it.Value())
|
||||
|
||||
case db.KeyTypeSequence:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
seq := binary.BigEndian.Uint64(key[5:])
|
||||
fmt.Printf("[sequence] F:%d S:%d V:%q\n", folder, seq, it.Value())
|
||||
|
||||
case db.KeyTypeNeed:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
file := string(key[5:])
|
||||
fmt.Printf("[need] F:%d V:%q\n", folder, file)
|
||||
|
||||
case db.KeyTypeBlockList:
|
||||
fmt.Printf("[blocklist] H:%x\n", key[1:])
|
||||
|
||||
case db.KeyTypeBlockListMap:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
hash := key[5:37]
|
||||
fileName := string(key[37:])
|
||||
fmt.Printf("[blocklistmap] F:%d H:%x N:%s\n", folder, hash, fileName)
|
||||
|
||||
case db.KeyTypeVersion:
|
||||
fmt.Printf("[version] H:%x", key[1:])
|
||||
var v protocol.Vector
|
||||
err := v.Unmarshal(it.Value())
|
||||
if err != nil {
|
||||
fmt.Printf(" (invalid)\n")
|
||||
} else {
|
||||
fmt.Printf(" V:%v\n", v)
|
||||
}
|
||||
|
||||
case db.KeyTypePendingFolder:
|
||||
device := binary.BigEndian.Uint32(key[1:])
|
||||
folder := string(key[5:])
|
||||
var of db.ObservedFolder
|
||||
of.Unmarshal(it.Value())
|
||||
fmt.Printf("[pendingFolder] D:%d F:%s V:%v\n", device, folder, of)
|
||||
|
||||
case db.KeyTypePendingDevice:
|
||||
device := "<invalid>"
|
||||
dev, err := protocol.DeviceIDFromBytes(key[1:])
|
||||
if err == nil {
|
||||
device = dev.String()
|
||||
}
|
||||
var od db.ObservedDevice
|
||||
od.Unmarshal(it.Value())
|
||||
fmt.Printf("[pendingDevice] D:%v V:%v\n", device, od)
|
||||
|
||||
default:
|
||||
fmt.Printf("[??? %d]\n %x\n %x\n", key[0], key, it.Value())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -4,16 +4,16 @@
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
package cli
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/urfave/cli"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
"github.com/syncthing/syncthing/lib/db/backend"
|
||||
)
|
||||
|
||||
type SizedElement struct {
|
||||
@@ -39,13 +39,18 @@ func (h *ElementHeap) Pop() interface{} {
|
||||
return x
|
||||
}
|
||||
|
||||
func dumpsize(ldb backend.Backend) {
|
||||
func indexDumpSize(*cli.Context) error {
|
||||
ldb, err := getDB()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
h := &ElementHeap{}
|
||||
heap.Init(h)
|
||||
|
||||
it, err := ldb.NewPrefixIterator(nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
return err
|
||||
}
|
||||
var ele SizedElement
|
||||
for it.Next() {
|
||||
@@ -96,4 +101,6 @@ func dumpsize(ldb backend.Backend) {
|
||||
ele = heap.Pop(h).(SizedElement)
|
||||
fmt.Println(ele.key, ele.size)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -4,16 +4,18 @@
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
package cli
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"sort"
|
||||
|
||||
"github.com/urfave/cli"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
"github.com/syncthing/syncthing/lib/db/backend"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
@@ -33,7 +35,12 @@ type sequenceKey struct {
|
||||
sequence uint64
|
||||
}
|
||||
|
||||
func idxck(ldb backend.Backend) (success bool) {
|
||||
func indexCheck(*cli.Context) (err error) {
|
||||
ldb, err := getDB()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
folders := make(map[uint32]string)
|
||||
devices := make(map[uint32]string)
|
||||
deviceToIDs := make(map[string]uint32)
|
||||
@@ -41,12 +48,25 @@ func idxck(ldb backend.Backend) (success bool) {
|
||||
globals := make(map[globalKey]db.VersionList)
|
||||
sequences := make(map[sequenceKey]string)
|
||||
needs := make(map[globalKey]struct{})
|
||||
blocklists := make(map[string]struct{})
|
||||
versions := make(map[string]protocol.Vector)
|
||||
usedBlocklists := make(map[string]struct{})
|
||||
usedVersions := make(map[string]struct{})
|
||||
var localDeviceKey uint32
|
||||
success = true
|
||||
success := true
|
||||
defer func() {
|
||||
if err == nil {
|
||||
if success {
|
||||
fmt.Println("Index check completed succesfully.")
|
||||
} else {
|
||||
err = errors.New("Inconsistencies found in the index")
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
it, err := ldb.NewPrefixIterator(nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
return err
|
||||
}
|
||||
for it.Next() {
|
||||
key := it.Key()
|
||||
@@ -99,6 +119,20 @@ func idxck(ldb backend.Backend) (success bool) {
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
name := nulString(key[1+4:])
|
||||
needs[globalKey{folder, name}] = struct{}{}
|
||||
|
||||
case db.KeyTypeBlockList:
|
||||
hash := string(key[1:])
|
||||
blocklists[hash] = struct{}{}
|
||||
|
||||
case db.KeyTypeVersion:
|
||||
hash := string(key[1:])
|
||||
var v protocol.Vector
|
||||
if err := v.Unmarshal(it.Value()); err != nil {
|
||||
fmt.Println("Unable to unmarshal Vector:", err)
|
||||
success = false
|
||||
continue
|
||||
}
|
||||
versions[hash] = v
|
||||
}
|
||||
}
|
||||
|
||||
@@ -108,6 +142,7 @@ func idxck(ldb backend.Backend) (success bool) {
|
||||
return
|
||||
}
|
||||
|
||||
var missingSeq []sequenceKey
|
||||
for fk, fi := range fileInfos {
|
||||
if fk.name != fi.Name {
|
||||
fmt.Printf("Mismatching FileInfo name, %q (key) != %q (actual)\n", fk.name, fi.Name)
|
||||
@@ -126,9 +161,11 @@ func idxck(ldb backend.Backend) (success bool) {
|
||||
}
|
||||
|
||||
if fk.device == localDeviceKey {
|
||||
name, ok := sequences[sequenceKey{fk.folder, uint64(fi.Sequence)}]
|
||||
sk := sequenceKey{fk.folder, uint64(fi.Sequence)}
|
||||
name, ok := sequences[sk]
|
||||
if !ok {
|
||||
fmt.Printf("Sequence entry missing for FileInfo %q, folder %q, seq %d\n", fi.Name, folder, fi.Sequence)
|
||||
missingSeq = append(missingSeq, sk)
|
||||
success = false
|
||||
continue
|
||||
}
|
||||
@@ -137,6 +174,58 @@ func idxck(ldb backend.Backend) (success bool) {
|
||||
success = false
|
||||
}
|
||||
}
|
||||
|
||||
if len(fi.Blocks) == 0 && len(fi.BlocksHash) != 0 {
|
||||
key := string(fi.BlocksHash)
|
||||
if _, ok := blocklists[key]; !ok {
|
||||
fmt.Printf("Missing block list for file %q, block list hash %x\n", fi.Name, fi.BlocksHash)
|
||||
success = false
|
||||
} else {
|
||||
usedBlocklists[key] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
if fi.VersionHash != nil {
|
||||
key := string(fi.VersionHash)
|
||||
if _, ok := versions[key]; !ok {
|
||||
fmt.Printf("Missing version vector for file %q, version hash %x\n", fi.Name, fi.VersionHash)
|
||||
success = false
|
||||
} else {
|
||||
usedVersions[key] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
_, ok := globals[globalKey{fk.folder, fk.name}]
|
||||
if !ok {
|
||||
fmt.Printf("Missing global for file %q\n", fi.Name)
|
||||
success = false
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Aggregate the ranges of missing sequence entries, print them
|
||||
|
||||
sort.Slice(missingSeq, func(a, b int) bool {
|
||||
if missingSeq[a].folder != missingSeq[b].folder {
|
||||
return missingSeq[a].folder < missingSeq[b].folder
|
||||
}
|
||||
return missingSeq[a].sequence < missingSeq[b].sequence
|
||||
})
|
||||
|
||||
var folder uint32
|
||||
var startSeq, prevSeq uint64
|
||||
for _, sk := range missingSeq {
|
||||
if folder != sk.folder || sk.sequence != prevSeq+1 {
|
||||
if folder != 0 {
|
||||
fmt.Printf("Folder %d missing %d sequence entries: #%d - #%d\n", folder, prevSeq-startSeq+1, startSeq, prevSeq)
|
||||
}
|
||||
startSeq = sk.sequence
|
||||
folder = sk.folder
|
||||
}
|
||||
prevSeq = sk.sequence
|
||||
}
|
||||
if folder != 0 {
|
||||
fmt.Printf("Folder %d missing %d sequence entries: #%d - #%d\n", folder, prevSeq-startSeq+1, startSeq, prevSeq)
|
||||
}
|
||||
|
||||
for gk, vl := range globals {
|
||||
@@ -145,10 +234,10 @@ func idxck(ldb backend.Backend) (success bool) {
|
||||
fmt.Printf("Unknown folder ID %d for VersionList %q\n", gk.folder, gk.name)
|
||||
success = false
|
||||
}
|
||||
for i, fv := range vl.Versions {
|
||||
dev, ok := deviceToIDs[string(fv.Device)]
|
||||
checkGlobal := func(i int, device []byte, version protocol.Vector, invalid, deleted bool) {
|
||||
dev, ok := deviceToIDs[string(device)]
|
||||
if !ok {
|
||||
fmt.Printf("VersionList %q, folder %q refers to unknown device %q\n", gk.name, folder, fv.Device)
|
||||
fmt.Printf("VersionList %q, folder %q refers to unknown device %q\n", gk.name, folder, device)
|
||||
success = false
|
||||
}
|
||||
fi, ok := fileInfos[fileInfoKey{gk.folder, dev, gk.name}]
|
||||
@@ -156,14 +245,31 @@ func idxck(ldb backend.Backend) (success bool) {
|
||||
fmt.Printf("VersionList %q, folder %q, entry %d refers to unknown FileInfo\n", gk.name, folder, i)
|
||||
success = false
|
||||
}
|
||||
if !fi.Version.Equal(fv.Version) {
|
||||
fmt.Printf("VersionList %q, folder %q, entry %d, FileInfo version mismatch, %v (VersionList) != %v (FileInfo)\n", gk.name, folder, i, fv.Version, fi.Version)
|
||||
|
||||
fiv := fi.Version
|
||||
if fi.VersionHash != nil {
|
||||
fiv = versions[string(fi.VersionHash)]
|
||||
}
|
||||
if !fiv.Equal(version) {
|
||||
fmt.Printf("VersionList %q, folder %q, entry %d, FileInfo version mismatch, %v (VersionList) != %v (FileInfo)\n", gk.name, folder, i, version, fi.Version)
|
||||
success = false
|
||||
}
|
||||
if fi.IsInvalid() != fv.Invalid {
|
||||
fmt.Printf("VersionList %q, folder %q, entry %d, FileInfo invalid mismatch, %v (VersionList) != %v (FileInfo)\n", gk.name, folder, i, fv.Invalid, fi.IsInvalid())
|
||||
if fi.IsInvalid() != invalid {
|
||||
fmt.Printf("VersionList %q, folder %q, entry %d, FileInfo invalid mismatch, %v (VersionList) != %v (FileInfo)\n", gk.name, folder, i, invalid, fi.IsInvalid())
|
||||
success = false
|
||||
}
|
||||
if fi.IsDeleted() != deleted {
|
||||
fmt.Printf("VersionList %q, folder %q, entry %d, FileInfo deleted mismatch, %v (VersionList) != %v (FileInfo)\n", gk.name, folder, i, deleted, fi.IsDeleted())
|
||||
success = false
|
||||
}
|
||||
}
|
||||
for i, fv := range vl.RawVersions {
|
||||
for _, device := range fv.Devices {
|
||||
checkGlobal(i, device, fv.Version, false, fv.Deleted)
|
||||
}
|
||||
for _, device := range fv.InvalidDevices {
|
||||
checkGlobal(i, device, fv.Version, true, fv.Deleted)
|
||||
}
|
||||
}
|
||||
|
||||
// If we need this file we should have a need entry for it. False
|
||||
@@ -172,7 +278,9 @@ func idxck(ldb backend.Backend) (success bool) {
|
||||
if needsLocally(vl) {
|
||||
_, ok := needs[gk]
|
||||
if !ok {
|
||||
dev := deviceToIDs[string(vl.Versions[0].Device)]
|
||||
fv, _ := vl.GetGlobal()
|
||||
devB, _ := fv.FirstDevice()
|
||||
dev := deviceToIDs[string(devB)]
|
||||
fi := fileInfos[fileInfoKey{gk.folder, dev, gk.name}]
|
||||
if !fi.IsDeleted() && !fi.IsIgnored() {
|
||||
fmt.Printf("Missing need entry for needed file %q, folder %q\n", gk.name, folder)
|
||||
@@ -229,19 +337,21 @@ func idxck(ldb backend.Backend) (success bool) {
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
if d := len(blocklists) - len(usedBlocklists); d > 0 {
|
||||
fmt.Printf("%d block list entries out of %d needs GC\n", d, len(blocklists))
|
||||
}
|
||||
if d := len(versions) - len(usedVersions); d > 0 {
|
||||
fmt.Printf("%d version entries out of %d needs GC\n", d, len(versions))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func needsLocally(vl db.VersionList) bool {
|
||||
var lv *protocol.Vector
|
||||
for _, fv := range vl.Versions {
|
||||
if bytes.Equal(fv.Device, protocol.LocalDeviceID[:]) {
|
||||
lv = &fv.Version
|
||||
break
|
||||
}
|
||||
gfv, gok := vl.GetGlobal()
|
||||
if !gok { // That's weird, but we hardly need something non-existant
|
||||
return false
|
||||
}
|
||||
if lv == nil {
|
||||
return true // proviosinally, it looks like we need the file
|
||||
}
|
||||
return !lv.GreaterEqual(vl.Versions[0].Version)
|
||||
fv, ok := vl.Get(protocol.LocalDeviceID[:])
|
||||
return db.Need(gfv, ok, fv.Version)
|
||||
}
|
||||
157
cmd/syncthing/cli/main.go
Normal file
@@ -0,0 +1,157 @@
|
||||
// Copyright (C) 2019 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package cli
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/alecthomas/kong"
|
||||
"github.com/flynn-archive/go-shlex"
|
||||
"github.com/mattn/go-isatty"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli"
|
||||
|
||||
"github.com/syncthing/syncthing/cmd/syncthing/cmdutil"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
)
|
||||
|
||||
type preCli struct {
|
||||
GUIAddress string `name:"gui-address"`
|
||||
GUIAPIKey string `name:"gui-apikey"`
|
||||
HomeDir string `name:"home"`
|
||||
ConfDir string `name:"config"`
|
||||
DataDir string `name:"data"`
|
||||
}
|
||||
|
||||
func Run() error {
|
||||
// This is somewhat a hack around a chicken and egg problem. We need to set
|
||||
// the home directory and potentially other flags to know where the
|
||||
// syncthing instance is running in order to get it's config ... which we
|
||||
// then use to construct the actual CLI ... at which point it's too late to
|
||||
// add flags there...
|
||||
c := preCli{}
|
||||
parseFlags(&c)
|
||||
|
||||
// Not set as default above because the strings can be really long.
|
||||
err := cmdutil.SetConfigDataLocationsFromFlags(c.HomeDir, c.ConfDir, c.DataDir)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Command line options:")
|
||||
}
|
||||
clientFactory := &apiClientFactory{
|
||||
cfg: config.GUIConfiguration{
|
||||
RawAddress: c.GUIAddress,
|
||||
APIKey: c.GUIAPIKey,
|
||||
},
|
||||
}
|
||||
|
||||
configCommand, err := getConfigCommand(clientFactory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Implement the same flags at the upper CLI, but do nothing with them.
|
||||
// This is so that the usage text is the same
|
||||
fakeFlags := []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "gui-address",
|
||||
Value: "URL",
|
||||
Usage: "Override GUI address (e.g. \"http://192.0.2.42:8443\")",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "gui-apikey",
|
||||
Value: "API-KEY",
|
||||
Usage: "Override GUI API key",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "home",
|
||||
Value: "PATH",
|
||||
Usage: "Set configuration and data directory",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "conf",
|
||||
Value: "PATH",
|
||||
Usage: "Set configuration directory (config and keys)",
|
||||
},
|
||||
}
|
||||
|
||||
// Construct the actual CLI
|
||||
app := cli.NewApp()
|
||||
app.Author = "The Syncthing Authors"
|
||||
app.Metadata = map[string]interface{}{
|
||||
"clientFactory": clientFactory,
|
||||
}
|
||||
app.Commands = []cli.Command{{
|
||||
Name: "cli",
|
||||
Usage: "Syncthing command line interface",
|
||||
Flags: fakeFlags,
|
||||
Subcommands: []cli.Command{
|
||||
configCommand,
|
||||
showCommand,
|
||||
operationCommand,
|
||||
errorsCommand,
|
||||
debugCommand,
|
||||
},
|
||||
}}
|
||||
|
||||
tty := isatty.IsTerminal(os.Stdin.Fd()) || isatty.IsCygwinTerminal(os.Stdin.Fd())
|
||||
if !tty {
|
||||
// Not a TTY, consume from stdin
|
||||
scanner := bufio.NewScanner(os.Stdin)
|
||||
for scanner.Scan() {
|
||||
input, err := shlex.Split(scanner.Text())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "parsing input")
|
||||
}
|
||||
if len(input) == 0 {
|
||||
continue
|
||||
}
|
||||
err = app.Run(append(os.Args, input...))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
err = scanner.Err()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
err = app.Run(os.Args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseFlags(c *preCli) error {
|
||||
// kong only needs to parse the global arguments after "cli" and before the
|
||||
// subcommand (if any).
|
||||
if len(os.Args) <= 2 {
|
||||
return nil
|
||||
}
|
||||
args := os.Args[2:]
|
||||
for i := 0; i < len(args); i++ {
|
||||
if !strings.HasPrefix(args[i], "--") {
|
||||
args = args[:i]
|
||||
break
|
||||
}
|
||||
if !strings.Contains(args[i], "=") {
|
||||
i++
|
||||
}
|
||||
}
|
||||
// We don't want kong to print anything nor os.Exit (e.g. on -h)
|
||||
parser, err := kong.New(c, kong.Writers(ioutil.Discard, ioutil.Discard), kong.Exit(func(int) {}))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = parser.Parse(args)
|
||||
return err
|
||||
}
|
||||
@@ -4,7 +4,7 @@
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@@ -27,11 +27,6 @@ var operationCommand = cli.Command{
|
||||
Usage: "Shutdown syncthing",
|
||||
Action: expects(0, emptyPost("system/shutdown")),
|
||||
},
|
||||
{
|
||||
Name: "reset",
|
||||
Usage: "Reset syncthing deleting all folders and devices",
|
||||
Action: expects(0, emptyPost("system/reset")),
|
||||
},
|
||||
{
|
||||
Name: "upgrade",
|
||||
Usage: "Upgrade syncthing (if a newer version is available)",
|
||||
@@ -39,7 +34,7 @@ var operationCommand = cli.Command{
|
||||
},
|
||||
{
|
||||
Name: "folder-override",
|
||||
Usage: "Override changes on folder (remote for sendonly, local for receiveonly)",
|
||||
Usage: "Override changes on folder (remote for sendonly, local for receiveonly). WARNING: Destructive - deletes/changes your data.",
|
||||
ArgsUsage: "[folder id]",
|
||||
Action: expects(1, foldersOverride),
|
||||
},
|
||||
@@ -47,7 +42,10 @@ var operationCommand = cli.Command{
|
||||
}
|
||||
|
||||
func foldersOverride(c *cli.Context) error {
|
||||
client := c.App.Metadata["client"].(*APIClient)
|
||||
client, err := getClientFactory(c).getClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cfg, err := getConfig(client)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -4,7 +4,7 @@
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
package cli
|
||||
|
||||
import (
|
||||
"github.com/urfave/cli"
|
||||
@@ -18,27 +18,27 @@ var showCommand = cli.Command{
|
||||
{
|
||||
Name: "version",
|
||||
Usage: "Show syncthing client version",
|
||||
Action: expects(0, dumpOutput("system/version")),
|
||||
Action: expects(0, indexDumpOutput("system/version")),
|
||||
},
|
||||
{
|
||||
Name: "config-status",
|
||||
Usage: "Show configuration status, whether or not a restart is required for changes to take effect",
|
||||
Action: expects(0, dumpOutput("system/config/insync")),
|
||||
Action: expects(0, indexDumpOutput("config/restart-required")),
|
||||
},
|
||||
{
|
||||
Name: "system",
|
||||
Usage: "Show system status",
|
||||
Action: expects(0, dumpOutput("system/status")),
|
||||
Action: expects(0, indexDumpOutput("system/status")),
|
||||
},
|
||||
{
|
||||
Name: "connections",
|
||||
Usage: "Report about connections to other devices",
|
||||
Action: expects(0, dumpOutput("system/connections")),
|
||||
Action: expects(0, indexDumpOutput("system/connections")),
|
||||
},
|
||||
{
|
||||
Name: "usage",
|
||||
Usage: "Show usage report",
|
||||
Action: expects(0, dumpOutput("svc/report")),
|
||||
Action: expects(0, indexDumpOutput("svc/report")),
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -4,16 +4,21 @@
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
package cli
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/db/backend"
|
||||
"github.com/syncthing/syncthing/lib/locations"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
@@ -27,15 +32,21 @@ func responseToBArray(response *http.Response) ([]byte, error) {
|
||||
|
||||
func emptyPost(url string) cli.ActionFunc {
|
||||
return func(c *cli.Context) error {
|
||||
client := c.App.Metadata["client"].(*APIClient)
|
||||
_, err := client.Post(url, "")
|
||||
client, err := getClientFactory(c).getClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = client.Post(url, "")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func dumpOutput(url string) cli.ActionFunc {
|
||||
func indexDumpOutput(url string) cli.ActionFunc {
|
||||
return func(c *cli.Context) error {
|
||||
client := c.App.Metadata["client"].(*APIClient)
|
||||
client, err := getClientFactory(c).getClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
response, err := client.Get(url)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -44,7 +55,43 @@ func dumpOutput(url string) cli.ActionFunc {
|
||||
}
|
||||
}
|
||||
|
||||
func getConfig(c *APIClient) (config.Configuration, error) {
|
||||
func saveToFile(url string) cli.ActionFunc {
|
||||
return func(c *cli.Context) error {
|
||||
client, err := getClientFactory(c).getClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
response, err := client.Get(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, params, err := mime.ParseMediaType(response.Header.Get("Content-Disposition"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
filename := params["filename"]
|
||||
if filename == "" {
|
||||
return errors.New("Missing filename in response")
|
||||
}
|
||||
bs, err := responseToBArray(response)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f, err := os.Create(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
_, err = f.Write(bs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println("Wrote results to", filename)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func getConfig(c APIClient) (config.Configuration, error) {
|
||||
cfg := config.Configuration{}
|
||||
response, err := c.Get("system/config")
|
||||
if err != nil {
|
||||
@@ -92,3 +139,24 @@ func prettyPrintResponse(c *cli.Context, response *http.Response) error {
|
||||
// TODO: Check flag for pretty print format
|
||||
return prettyPrintJSON(data)
|
||||
}
|
||||
|
||||
func getDB() (backend.Backend, error) {
|
||||
return backend.OpenLevelDBRO(locations.Get(locations.Database))
|
||||
}
|
||||
|
||||
func nulString(bs []byte) string {
|
||||
for i := range bs {
|
||||
if bs[i] == 0 {
|
||||
return string(bs[:i])
|
||||
}
|
||||
}
|
||||
return string(bs)
|
||||
}
|
||||
|
||||
func normalizePath(path string) string {
|
||||
return filepath.ToSlash(filepath.Clean(path))
|
||||
}
|
||||
|
||||
func getClientFactory(c *cli.Context) *apiClientFactory {
|
||||
return c.App.Metadata["clientFactory"].(*apiClientFactory)
|
||||
}
|
||||
35
cmd/syncthing/cmdutil/util.go
Normal file
@@ -0,0 +1,35 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package cmdutil
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/locations"
|
||||
)
|
||||
|
||||
func SetConfigDataLocationsFromFlags(homeDir, confDir, dataDir string) error {
|
||||
homeSet := homeDir != ""
|
||||
confSet := confDir != ""
|
||||
dataSet := dataDir != ""
|
||||
switch {
|
||||
case dataSet != confSet:
|
||||
return errors.New("either both or none of -conf and -data must be given, use -home to set both at once")
|
||||
case homeSet && dataSet:
|
||||
return errors.New("-home must not be used together with -conf and -data")
|
||||
case homeSet:
|
||||
confDir = homeDir
|
||||
dataDir = homeDir
|
||||
fallthrough
|
||||
case dataSet:
|
||||
if err := locations.SetBaseDir(locations.ConfigBaseDir, confDir); err != nil {
|
||||
return err
|
||||
}
|
||||
return locations.SetBaseDir(locations.DataBaseDir, dataDir)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
275
cmd/syncthing/decrypt/decrypt.go
Normal file
@@ -0,0 +1,275 @@
|
||||
// Copyright (C) 2021 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
// Package decrypt implements the `syncthing decrypt` subcommand.
|
||||
package decrypt
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/fs"
|
||||
"github.com/syncthing/syncthing/lib/osutil"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/scanner"
|
||||
)
|
||||
|
||||
type CLI struct {
|
||||
Path string `arg:"" required:"1" help:"Path to encrypted folder"`
|
||||
To string `xor:"mode" placeholder:"PATH" help:"Destination directory, when decrypting"`
|
||||
VerifyOnly bool `xor:"mode" help:"Don't write decrypted files to disk (but verify plaintext hashes)"`
|
||||
Password string `help:"Folder password for decryption / verification" env:"FOLDER_PASSWORD"`
|
||||
FolderID string `help:"Folder ID of the encrypted folder, if it cannot be determined automatically"`
|
||||
Continue bool `help:"Continue processing next file in case of error, instead of aborting"`
|
||||
Verbose bool `help:"Show verbose progress information"`
|
||||
TokenPath string `placeholder:"PATH" help:"Path to the token file within the folder (used to determine folder ID)"`
|
||||
|
||||
folderKey *[32]byte
|
||||
}
|
||||
|
||||
type storedEncryptionToken struct {
|
||||
FolderID string
|
||||
Token []byte
|
||||
}
|
||||
|
||||
func (c *CLI) Run() error {
|
||||
log.SetFlags(0)
|
||||
|
||||
if c.To == "" && !c.VerifyOnly {
|
||||
return fmt.Errorf("must set --to or --verify")
|
||||
}
|
||||
|
||||
if c.TokenPath == "" {
|
||||
// This is a bit long to show as default in --help
|
||||
c.TokenPath = filepath.Join(config.DefaultMarkerName, config.EncryptionTokenName)
|
||||
}
|
||||
|
||||
if c.FolderID == "" {
|
||||
// We should try to figure out the folder ID
|
||||
folderID, err := c.getFolderID()
|
||||
if err != nil {
|
||||
log.Println("No --folder-id given and couldn't read folder token")
|
||||
return fmt.Errorf("getting folder ID: %w", err)
|
||||
}
|
||||
|
||||
c.FolderID = folderID
|
||||
if c.Verbose {
|
||||
log.Println("Found folder ID:", c.FolderID)
|
||||
}
|
||||
}
|
||||
|
||||
c.folderKey = protocol.KeyFromPassword(c.FolderID, c.Password)
|
||||
|
||||
return c.walk()
|
||||
}
|
||||
|
||||
// walk finds and processes every file in the encrypted folder
|
||||
func (c *CLI) walk() error {
|
||||
srcFs := fs.NewFilesystem(fs.FilesystemTypeBasic, c.Path)
|
||||
var dstFs fs.Filesystem
|
||||
if c.To != "" {
|
||||
dstFs = fs.NewFilesystem(fs.FilesystemTypeBasic, c.To)
|
||||
}
|
||||
|
||||
return srcFs.Walk(".", func(path string, info fs.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !info.IsRegular() {
|
||||
return nil
|
||||
}
|
||||
if fs.IsInternal(path) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return c.withContinue(c.process(srcFs, dstFs, path))
|
||||
})
|
||||
}
|
||||
|
||||
// If --continue was set we just mention the error and return nil to
|
||||
// continue processing.
|
||||
func (c *CLI) withContinue(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if c.Continue {
|
||||
log.Println("Warning:", err)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// getFolderID returns the folder ID found in the encrypted token, or an
|
||||
// error.
|
||||
func (c *CLI) getFolderID() (string, error) {
|
||||
tokenPath := filepath.Join(c.Path, c.TokenPath)
|
||||
bs, err := ioutil.ReadFile(tokenPath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("reading folder token: %w", err)
|
||||
}
|
||||
|
||||
var tok storedEncryptionToken
|
||||
if err := json.Unmarshal(bs, &tok); err != nil {
|
||||
return "", fmt.Errorf("parsing folder token: %w", err)
|
||||
}
|
||||
|
||||
return tok.FolderID, nil
|
||||
}
|
||||
|
||||
// process handles the file named path in srcFs, decrypting it into dstFs
|
||||
// unless dstFs is nil.
|
||||
func (c *CLI) process(srcFs fs.Filesystem, dstFs fs.Filesystem, path string) error {
|
||||
if c.Verbose {
|
||||
log.Printf("Processing %q", path)
|
||||
}
|
||||
|
||||
encFd, err := srcFs.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer encFd.Close()
|
||||
|
||||
encFi, err := c.loadEncryptedFileInfo(encFd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: loading metadata trailer: %w", path, err)
|
||||
}
|
||||
|
||||
// Workaround for a bug in <= v1.15.0-rc.5 where we stored names
|
||||
// in native format, while protocol expects wire format (slashes).
|
||||
encFi.Name = osutil.NormalizedFilename(encFi.Name)
|
||||
|
||||
plainFi, err := protocol.DecryptFileInfo(*encFi, c.folderKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: decrypting metadata: %w", path, err)
|
||||
}
|
||||
|
||||
if c.Verbose {
|
||||
log.Printf("Plaintext filename is %q", plainFi.Name)
|
||||
}
|
||||
|
||||
var plainFd fs.File
|
||||
if dstFs != nil {
|
||||
if err := dstFs.MkdirAll(filepath.Dir(plainFi.Name), 0700); err != nil {
|
||||
return fmt.Errorf("%s: %w", plainFi.Name, err)
|
||||
}
|
||||
|
||||
plainFd, err = dstFs.Create(plainFi.Name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %w", plainFi.Name, err)
|
||||
}
|
||||
defer plainFd.Close() // also closed explicitly in the return
|
||||
}
|
||||
|
||||
if err := c.decryptFile(encFi, &plainFi, encFd, plainFd); err != nil {
|
||||
// Decrypting the file failed, leaving it in an inconsistent state.
|
||||
// Delete it. Even --continue currently doesn't mean "leave broken
|
||||
// stuff in place", it just means "try the next file instead of
|
||||
// aborting".
|
||||
if plainFd != nil {
|
||||
_ = dstFs.Remove(plainFd.Name())
|
||||
}
|
||||
return fmt.Errorf("%s: %s: %w", path, plainFi.Name, err)
|
||||
} else if c.Verbose {
|
||||
log.Printf("Data verified for %q", plainFi.Name)
|
||||
}
|
||||
|
||||
if plainFd != nil {
|
||||
return plainFd.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// decryptFile reads, decrypts and verifies all the blocks in src, writing
|
||||
// it to dst if dst is non-nil. (If dst is nil it just becomes a
|
||||
// read-and-verify operation.)
|
||||
func (c *CLI) decryptFile(encFi *protocol.FileInfo, plainFi *protocol.FileInfo, src io.ReaderAt, dst io.WriterAt) error {
|
||||
// The encrypted and plaintext files must consist of an equal number of blocks
|
||||
if len(encFi.Blocks) != len(plainFi.Blocks) {
|
||||
return fmt.Errorf("block count mismatch: encrypted %d != plaintext %d", len(encFi.Blocks), len(plainFi.Blocks))
|
||||
}
|
||||
|
||||
fileKey := protocol.FileKey(plainFi.Name, c.folderKey)
|
||||
for i, encBlock := range encFi.Blocks {
|
||||
// Read the encrypted block
|
||||
buf := make([]byte, encBlock.Size)
|
||||
if _, err := src.ReadAt(buf, encBlock.Offset); err != nil {
|
||||
return fmt.Errorf("encrypted block %d (%d bytes): %w", i, encBlock.Size, err)
|
||||
}
|
||||
|
||||
// Decrypt it
|
||||
dec, err := protocol.DecryptBytes(buf, fileKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("encrypted block %d (%d bytes): %w", i, encBlock.Size, err)
|
||||
}
|
||||
|
||||
// Verify the block size against the expected plaintext
|
||||
plainBlock := plainFi.Blocks[i]
|
||||
if i == len(plainFi.Blocks)-1 && len(dec) > plainBlock.Size {
|
||||
// The last block might be padded, which is fine (we skip the padding)
|
||||
dec = dec[:plainBlock.Size]
|
||||
} else if len(dec) != plainBlock.Size {
|
||||
return fmt.Errorf("plaintext block %d size mismatch, actual %d != expected %d", i, len(dec), plainBlock.Size)
|
||||
}
|
||||
|
||||
// Verify the hash against the plaintext block info
|
||||
if !scanner.Validate(dec, plainBlock.Hash, 0) {
|
||||
// The block decrypted correctly but fails the hash check. This
|
||||
// is odd and unexpected, but it it's still a valid block from
|
||||
// the source. The file might have changed while we pulled it?
|
||||
err := fmt.Errorf("plaintext block %d (%d bytes) failed validation after decryption", i, plainBlock.Size)
|
||||
if c.Continue {
|
||||
log.Printf("Warning: %s: %s: %v", encFi.Name, plainFi.Name, err)
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Write it to the destination, unless we're just verifying.
|
||||
if dst != nil {
|
||||
if _, err := dst.WriteAt(dec, plainBlock.Offset); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// loadEncryptedFileInfo loads the encrypted FileInfo trailer from a file on
|
||||
// disk.
|
||||
func (c *CLI) loadEncryptedFileInfo(fd fs.File) (*protocol.FileInfo, error) {
|
||||
// Seek to the size of the trailer block
|
||||
if _, err := fd.Seek(-4, io.SeekEnd); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var bs [4]byte
|
||||
if _, err := io.ReadFull(fd, bs[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
size := int64(binary.BigEndian.Uint32(bs[:]))
|
||||
|
||||
// Seek to the start of the trailer
|
||||
if _, err := fd.Seek(-(4 + size), io.SeekEnd); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
trailer := make([]byte, size)
|
||||
if _, err := io.ReadFull(fd, trailer); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var encFi protocol.FileInfo
|
||||
if err := encFi.Unmarshal(trailer); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &encFi, nil
|
||||
}
|
||||
@@ -11,24 +11,17 @@ import (
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"strconv"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
if innerProcess && os.Getenv("STHEAPPROFILE") != "" {
|
||||
rate := 1
|
||||
if i, err := strconv.Atoi(os.Getenv("STHEAPPROFILE")); err == nil {
|
||||
rate = i
|
||||
}
|
||||
l.Debugln("Starting heap profiling")
|
||||
go func() {
|
||||
err := saveHeapProfiles(rate) // Only returns on error
|
||||
l.Warnln("Heap profiler failed:", err)
|
||||
panic("Heap profiler failed")
|
||||
}()
|
||||
}
|
||||
func startHeapProfiler() {
|
||||
l.Debugln("Starting heap profiling")
|
||||
go func() {
|
||||
err := saveHeapProfiles(1) // Only returns on error
|
||||
l.Warnln("Heap profiler failed:", err)
|
||||
panic("Heap profiler failed")
|
||||
}()
|
||||
}
|
||||
|
||||
func saveHeapProfiles(rate int) error {
|
||||
|
||||
@@ -21,11 +21,12 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
"github.com/syncthing/syncthing/lib/fs"
|
||||
"github.com/syncthing/syncthing/lib/locations"
|
||||
"github.com/syncthing/syncthing/lib/osutil"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/svcutil"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
"github.com/syncthing/syncthing/lib/syncthing"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -35,51 +36,58 @@ var (
|
||||
)
|
||||
|
||||
const (
|
||||
countRestarts = 4
|
||||
loopThreshold = 60 * time.Second
|
||||
restartCounts = 4
|
||||
restartPause = 1 * time.Second
|
||||
restartLoopThreshold = 60 * time.Second
|
||||
logFileAutoCloseDelay = 5 * time.Second
|
||||
logFileMaxOpenTime = time.Minute
|
||||
panicUploadMaxWait = 30 * time.Second
|
||||
panicUploadNoticeWait = 10 * time.Second
|
||||
)
|
||||
|
||||
func monitorMain(runtimeOptions RuntimeOptions) {
|
||||
func monitorMain(options serveOptions) {
|
||||
l.SetPrefix("[monitor] ")
|
||||
|
||||
var dst io.Writer = os.Stdout
|
||||
|
||||
logFile := runtimeOptions.logFile
|
||||
logFile := options.LogFile
|
||||
if logFile != "-" {
|
||||
if expanded, err := fs.ExpandTilde(logFile); err == nil {
|
||||
logFile = expanded
|
||||
}
|
||||
var fileDst io.Writer
|
||||
if runtimeOptions.logMaxSize > 0 {
|
||||
open := func(name string) (io.WriteCloser, error) {
|
||||
return newAutoclosedFile(name, logFileAutoCloseDelay, logFileMaxOpenTime), nil
|
||||
}
|
||||
fileDst = newRotatedFile(logFile, open, int64(runtimeOptions.logMaxSize), runtimeOptions.logMaxFiles)
|
||||
var err error
|
||||
open := func(name string) (io.WriteCloser, error) {
|
||||
return newAutoclosedFile(name, logFileAutoCloseDelay, logFileMaxOpenTime)
|
||||
}
|
||||
if options.LogMaxSize > 0 {
|
||||
fileDst, err = newRotatedFile(logFile, open, int64(options.LogMaxSize), options.LogMaxFiles)
|
||||
} else {
|
||||
fileDst = newAutoclosedFile(logFile, logFileAutoCloseDelay, logFileMaxOpenTime)
|
||||
fileDst, err = open(logFile)
|
||||
}
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
// Translate line breaks to Windows standard
|
||||
fileDst = osutil.ReplacingWriter{
|
||||
Writer: fileDst,
|
||||
From: '\n',
|
||||
To: []byte{'\r', '\n'},
|
||||
if err != nil {
|
||||
l.Warnln("Failed to setup logging to file, proceeding with logging to stdout only:", err)
|
||||
} else {
|
||||
if runtime.GOOS == "windows" {
|
||||
// Translate line breaks to Windows standard
|
||||
fileDst = osutil.ReplacingWriter{
|
||||
Writer: fileDst,
|
||||
From: '\n',
|
||||
To: []byte{'\r', '\n'},
|
||||
}
|
||||
}
|
||||
|
||||
// Log to both stdout and file.
|
||||
dst = io.MultiWriter(dst, fileDst)
|
||||
|
||||
l.Infof(`Log output saved to file "%s"`, logFile)
|
||||
}
|
||||
|
||||
// Log to both stdout and file.
|
||||
dst = io.MultiWriter(dst, fileDst)
|
||||
|
||||
l.Infof(`Log output saved to file "%s"`, logFile)
|
||||
}
|
||||
|
||||
args := os.Args
|
||||
var restarts [countRestarts]time.Time
|
||||
var restarts [restartCounts]time.Time
|
||||
|
||||
stopSign := make(chan os.Signal, 1)
|
||||
sigTerm := syscall.Signal(15)
|
||||
signal.Notify(stopSign, os.Interrupt, sigTerm)
|
||||
restartSign := make(chan os.Signal, 1)
|
||||
sigHup := syscall.Signal(1)
|
||||
@@ -90,9 +98,9 @@ func monitorMain(runtimeOptions RuntimeOptions) {
|
||||
for {
|
||||
maybeReportPanics()
|
||||
|
||||
if t := time.Since(restarts[0]); t < loopThreshold {
|
||||
l.Warnf("%d restarts in %v; not retrying further", countRestarts, t)
|
||||
os.Exit(syncthing.ExitError.AsInt())
|
||||
if t := time.Since(restarts[0]); t < restartLoopThreshold {
|
||||
l.Warnf("%d restarts in %v; not retrying further", restartCounts, t)
|
||||
os.Exit(svcutil.ExitError.AsInt())
|
||||
}
|
||||
|
||||
copy(restarts[0:], restarts[1:])
|
||||
@@ -111,7 +119,7 @@ func monitorMain(runtimeOptions RuntimeOptions) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
l.Infoln("Starting syncthing")
|
||||
l.Debugln("Starting syncthing")
|
||||
err = cmd.Start()
|
||||
if err != nil {
|
||||
l.Warnln("Error starting the main Syncthing process:", err)
|
||||
@@ -144,12 +152,13 @@ func monitorMain(runtimeOptions RuntimeOptions) {
|
||||
exit <- cmd.Wait()
|
||||
}()
|
||||
|
||||
stopped := false
|
||||
select {
|
||||
case s := <-stopSign:
|
||||
l.Infof("Signal %d received; exiting", s)
|
||||
cmd.Process.Signal(sigTerm)
|
||||
<-exit
|
||||
return
|
||||
err = <-exit
|
||||
stopped = true
|
||||
|
||||
case s := <-restartSign:
|
||||
l.Infof("Signal %d received; restarting", s)
|
||||
@@ -157,24 +166,35 @@ func monitorMain(runtimeOptions RuntimeOptions) {
|
||||
err = <-exit
|
||||
|
||||
case err = <-exit:
|
||||
if err == nil {
|
||||
// Successful exit indicates an intentional shutdown
|
||||
return
|
||||
} else if exiterr, ok := err.(*exec.ExitError); ok {
|
||||
if exiterr.ExitCode() == syncthing.ExitUpgrade.AsInt() {
|
||||
// Restart the monitor process to release the .old
|
||||
// binary as part of the upgrade process.
|
||||
l.Infoln("Restarting monitor...")
|
||||
if err = restartMonitor(args); err != nil {
|
||||
l.Warnln("Restart:", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
// Successful exit indicates an intentional shutdown
|
||||
os.Exit(svcutil.ExitSuccess.AsInt())
|
||||
}
|
||||
|
||||
if exiterr, ok := err.(*exec.ExitError); ok {
|
||||
exitCode := exiterr.ExitCode()
|
||||
if stopped || options.NoRestart {
|
||||
os.Exit(exitCode)
|
||||
}
|
||||
if exitCode == svcutil.ExitUpgrade.AsInt() {
|
||||
// Restart the monitor process to release the .old
|
||||
// binary as part of the upgrade process.
|
||||
l.Infoln("Restarting monitor...")
|
||||
if err = restartMonitor(args); err != nil {
|
||||
l.Warnln("Restart:", err)
|
||||
}
|
||||
os.Exit(exitCode)
|
||||
}
|
||||
}
|
||||
|
||||
if options.NoRestart {
|
||||
os.Exit(svcutil.ExitError.AsInt())
|
||||
}
|
||||
|
||||
l.Infoln("Syncthing exited:", err)
|
||||
time.Sleep(1 * time.Second)
|
||||
time.Sleep(restartPause)
|
||||
|
||||
if first {
|
||||
// Let the next child process know that this is not the first time
|
||||
@@ -292,6 +312,11 @@ func copyStdout(stdout io.Reader, dst io.Writer) {
|
||||
}
|
||||
|
||||
func restartMonitor(args []string) error {
|
||||
// Set the STRESTART environment variable to indicate to the next
|
||||
// process that this is a restart and not initial start. This prevents
|
||||
// opening the browser on startup.
|
||||
os.Setenv("STRESTART", "yes")
|
||||
|
||||
if runtime.GOOS != "windows" {
|
||||
// syscall.Exec is the cleanest way to restart on Unixes as it
|
||||
// replaces the current process with the new one, keeping the pid and
|
||||
@@ -338,16 +363,30 @@ type rotatedFile struct {
|
||||
currentSize int64
|
||||
}
|
||||
|
||||
// the createFn should act equivalently to os.Create
|
||||
type createFn func(name string) (io.WriteCloser, error)
|
||||
|
||||
func newRotatedFile(name string, create createFn, maxSize int64, maxFiles int) *rotatedFile {
|
||||
return &rotatedFile{
|
||||
name: name,
|
||||
create: create,
|
||||
maxSize: maxSize,
|
||||
maxFiles: maxFiles,
|
||||
func newRotatedFile(name string, create createFn, maxSize int64, maxFiles int) (*rotatedFile, error) {
|
||||
var size int64
|
||||
if info, err := os.Lstat(name); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
size = 0
|
||||
} else {
|
||||
size = info.Size()
|
||||
}
|
||||
writer, err := create(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &rotatedFile{
|
||||
name: name,
|
||||
create: create,
|
||||
maxSize: maxSize,
|
||||
maxFiles: maxFiles,
|
||||
currentFile: writer,
|
||||
currentSize: size,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *rotatedFile) Write(bs []byte) (int, error) {
|
||||
@@ -355,19 +394,13 @@ func (r *rotatedFile) Write(bs []byte) (int, error) {
|
||||
// file so we'll start on a new one.
|
||||
if r.currentSize+int64(len(bs)) > r.maxSize {
|
||||
r.currentFile.Close()
|
||||
r.currentFile = nil
|
||||
r.currentSize = 0
|
||||
}
|
||||
|
||||
// If we have no current log, rotate old files out of the way and create
|
||||
// a new one.
|
||||
if r.currentFile == nil {
|
||||
r.rotate()
|
||||
fd, err := r.create(r.name)
|
||||
f, err := r.create(r.name)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
r.currentFile = fd
|
||||
r.currentFile = f
|
||||
}
|
||||
|
||||
n, err := r.currentFile.Write(bs)
|
||||
@@ -420,7 +453,7 @@ type autoclosedFile struct {
|
||||
mut sync.Mutex
|
||||
}
|
||||
|
||||
func newAutoclosedFile(name string, closeDelay, maxOpenTime time.Duration) *autoclosedFile {
|
||||
func newAutoclosedFile(name string, closeDelay, maxOpenTime time.Duration) (*autoclosedFile, error) {
|
||||
f := &autoclosedFile{
|
||||
name: name,
|
||||
closeDelay: closeDelay,
|
||||
@@ -429,8 +462,13 @@ func newAutoclosedFile(name string, closeDelay, maxOpenTime time.Duration) *auto
|
||||
closed: make(chan struct{}),
|
||||
closeTimer: time.NewTimer(time.Minute),
|
||||
}
|
||||
f.mut.Lock()
|
||||
defer f.mut.Unlock()
|
||||
if err := f.ensureOpenLocked(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
go f.closerLoop()
|
||||
return f
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (f *autoclosedFile) Write(bs []byte) (int, error) {
|
||||
@@ -438,7 +476,7 @@ func (f *autoclosedFile) Write(bs []byte) (int, error) {
|
||||
defer f.mut.Unlock()
|
||||
|
||||
// Make sure the file is open for appending
|
||||
if err := f.ensureOpen(); err != nil {
|
||||
if err := f.ensureOpenLocked(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
@@ -468,22 +506,14 @@ func (f *autoclosedFile) Close() error {
|
||||
}
|
||||
|
||||
// Must be called with f.mut held!
|
||||
func (f *autoclosedFile) ensureOpen() error {
|
||||
func (f *autoclosedFile) ensureOpenLocked() error {
|
||||
if f.fd != nil {
|
||||
// File is already open
|
||||
return nil
|
||||
}
|
||||
|
||||
// We open the file for write only, and create it if it doesn't exist.
|
||||
flags := os.O_WRONLY | os.O_CREATE
|
||||
if f.opened.IsZero() {
|
||||
// This is the first time we are opening the file. We should truncate
|
||||
// it to better emulate an os.Create() call.
|
||||
flags |= os.O_TRUNC
|
||||
} else {
|
||||
// The file was already opened once, so we should append to it.
|
||||
flags |= os.O_APPEND
|
||||
}
|
||||
flags := os.O_WRONLY | os.O_CREATE | os.O_APPEND
|
||||
|
||||
fd, err := os.OpenFile(f.name, flags, 0644)
|
||||
if err != nil {
|
||||
@@ -534,7 +564,7 @@ func childEnv() []string {
|
||||
// panicUploadMaxWait uploading panics...
|
||||
func maybeReportPanics() {
|
||||
// Try to get a config to see if/where panics should be reported.
|
||||
cfg, err := loadOrDefaultConfig(protocol.EmptyDeviceID, events.NoopLogger)
|
||||
cfg, err := loadOrDefaultConfig(protocol.EmptyDeviceID, events.NoopLogger, true)
|
||||
if err != nil {
|
||||
l.Warnln("Couldn't load config; not reporting crash")
|
||||
return
|
||||
|
||||
@@ -33,7 +33,10 @@ func TestRotatedFile(t *testing.T) {
|
||||
maxSize := int64(len(testData) + len(testData)/2)
|
||||
|
||||
// We allow the log file plus two rotated copies.
|
||||
rf := newRotatedFile(logName, open, maxSize, 2)
|
||||
rf, err := newRotatedFile(logName, open, maxSize, 2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Write some bytes.
|
||||
if _, err := rf.Write(testData); err != nil {
|
||||
@@ -140,7 +143,10 @@ func TestAutoClosedFile(t *testing.T) {
|
||||
data := []byte("hello, world\n")
|
||||
|
||||
// An autoclosed file that closes very quickly
|
||||
ac := newAutoclosedFile(file, time.Millisecond, time.Millisecond)
|
||||
ac, err := newAutoclosedFile(file, time.Millisecond, time.Millisecond)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Write some data.
|
||||
if _, err := ac.Write(data); err != nil {
|
||||
@@ -182,21 +188,23 @@ func TestAutoClosedFile(t *testing.T) {
|
||||
}
|
||||
|
||||
// Open the file again.
|
||||
ac = newAutoclosedFile(file, time.Second, time.Second)
|
||||
ac, err = newAutoclosedFile(file, time.Second, time.Second)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Write something
|
||||
if _, err := ac.Write(data); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// It should now contain only one write, because the first open
|
||||
// should be a truncate.
|
||||
// It should now contain three writes, as the file is always opened for appending
|
||||
bs, err = ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(bs) != len(data) {
|
||||
t.Fatalf("Write failed, expected %d bytes, not %d", len(data), len(bs))
|
||||
if len(bs) != 3*len(data) {
|
||||
t.Fatalf("Write failed, expected %d bytes, not %d", 3*len(data), len(bs))
|
||||
}
|
||||
|
||||
// Close.
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
// Copyright (C) 2017 The Syncthing Authors.
|
||||
// Copyright (C) 2021 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package api
|
||||
// +build !windows
|
||||
|
||||
type mockedCPUService struct{}
|
||||
package main
|
||||
|
||||
func (*mockedCPUService) Rate() float64 {
|
||||
return 42
|
||||
type buildServeOptions struct {
|
||||
HideConsole bool `hidden:""`
|
||||
}
|
||||
11
cmd/syncthing/options_windows.go
Normal file
@@ -0,0 +1,11 @@
|
||||
// Copyright (C) 2021 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
type buildServeOptions struct {
|
||||
HideConsole bool `name:"no-console" help:"Hide console window"`
|
||||
}
|
||||
@@ -18,10 +18,8 @@ import (
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
func init() {
|
||||
if innerProcess && os.Getenv("STPERFSTATS") != "" {
|
||||
go savePerfStats(fmt.Sprintf("perfstats-%d.csv", syscall.Getpid()))
|
||||
}
|
||||
func startPerfStats() {
|
||||
go savePerfStats(fmt.Sprintf("perfstats-%d.csv", syscall.Getpid()))
|
||||
}
|
||||
|
||||
func savePerfStats(file string) {
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
// Copyright (C) 2017 The Syncthing Authors.
|
||||
// Copyright (C) 2021 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
//+build noupgrade
|
||||
// +build solaris windows
|
||||
|
||||
package build
|
||||
package main
|
||||
|
||||
func init() {
|
||||
Tags = append(Tags, "noupgrade")
|
||||
func startPerfStats() {
|
||||
}
|
||||
@@ -1,57 +0,0 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"text/tabwriter"
|
||||
)
|
||||
|
||||
func optionTable(w io.Writer, rows [][]string) {
|
||||
tw := tabwriter.NewWriter(w, 2, 4, 2, ' ', 0)
|
||||
for _, row := range rows {
|
||||
for i, cell := range row {
|
||||
if i > 0 {
|
||||
tw.Write([]byte("\t"))
|
||||
}
|
||||
tw.Write([]byte(cell))
|
||||
}
|
||||
tw.Write([]byte("\n"))
|
||||
}
|
||||
tw.Flush()
|
||||
}
|
||||
|
||||
func usageFor(fs *flag.FlagSet, usage string, extra string) func() {
|
||||
return func() {
|
||||
var b bytes.Buffer
|
||||
b.WriteString("Usage:\n " + usage + "\n")
|
||||
|
||||
var options [][]string
|
||||
fs.VisitAll(func(f *flag.Flag) {
|
||||
var opt = " -" + f.Name
|
||||
|
||||
if f.DefValue != "false" {
|
||||
opt += "=" + fmt.Sprintf(`"%s"`, f.DefValue)
|
||||
}
|
||||
options = append(options, []string{opt, f.Usage})
|
||||
})
|
||||
|
||||
if len(options) > 0 {
|
||||
b.WriteString("\nOptions:\n")
|
||||
optionTable(&b, options)
|
||||
}
|
||||
|
||||
fmt.Println(b.String())
|
||||
|
||||
if len(extra) > 0 {
|
||||
fmt.Println(extra)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -135,30 +135,30 @@ func setupDB(db *sql.DB) error {
|
||||
|
||||
row := db.QueryRow(`SELECT 'UniqueDayVersionIndex'::regclass`)
|
||||
if err := row.Scan(&t); err != nil {
|
||||
_, err = db.Exec(`CREATE UNIQUE INDEX UniqueDayVersionIndex ON VersionSummary (Day, Version)`)
|
||||
_, _ = db.Exec(`CREATE UNIQUE INDEX UniqueDayVersionIndex ON VersionSummary (Day, Version)`)
|
||||
}
|
||||
|
||||
row = db.QueryRow(`SELECT 'VersionDayIndex'::regclass`)
|
||||
if err := row.Scan(&t); err != nil {
|
||||
_, err = db.Exec(`CREATE INDEX VersionDayIndex ON VersionSummary (Day)`)
|
||||
_, _ = db.Exec(`CREATE INDEX VersionDayIndex ON VersionSummary (Day)`)
|
||||
}
|
||||
|
||||
row = db.QueryRow(`SELECT 'MovementDayIndex'::regclass`)
|
||||
if err := row.Scan(&t); err != nil {
|
||||
_, err = db.Exec(`CREATE INDEX MovementDayIndex ON UserMovement (Day)`)
|
||||
_, _ = db.Exec(`CREATE INDEX MovementDayIndex ON UserMovement (Day)`)
|
||||
}
|
||||
|
||||
row = db.QueryRow(`SELECT 'PerformanceDayIndex'::regclass`)
|
||||
if err := row.Scan(&t); err != nil {
|
||||
_, err = db.Exec(`CREATE INDEX PerformanceDayIndex ON Performance (Day)`)
|
||||
_, _ = db.Exec(`CREATE INDEX PerformanceDayIndex ON Performance (Day)`)
|
||||
}
|
||||
|
||||
row = db.QueryRow(`SELECT 'BlockStatsDayIndex'::regclass`)
|
||||
if err := row.Scan(&t); err != nil {
|
||||
_, err = db.Exec(`CREATE INDEX BlockStatsDayIndex ON BlockStats (Day)`)
|
||||
_, _ = db.Exec(`CREATE INDEX BlockStatsDayIndex ON BlockStats (Day)`)
|
||||
}
|
||||
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
func maxIndexedDay(db *sql.DB, table string) time.Time {
|
||||
@@ -175,13 +175,13 @@ func aggregateVersionSummary(db *sql.DB, since time.Time) (int64, error) {
|
||||
res, err := db.Exec(`INSERT INTO VersionSummary (
|
||||
SELECT
|
||||
DATE_TRUNC('day', Received) AS Day,
|
||||
SUBSTRING(Version FROM '^v\d.\d+') AS Ver,
|
||||
SUBSTRING(Report->>'version' FROM '^v\d.\d+') AS Ver,
|
||||
COUNT(*) AS Count
|
||||
FROM Reports
|
||||
FROM ReportsJson
|
||||
WHERE
|
||||
DATE_TRUNC('day', Received) > $1
|
||||
AND DATE_TRUNC('day', Received) < DATE_TRUNC('day', NOW())
|
||||
AND Version like 'v_.%'
|
||||
AND Report->>'version' like 'v_.%'
|
||||
GROUP BY Day, Ver
|
||||
);
|
||||
`, since)
|
||||
@@ -195,11 +195,11 @@ func aggregateVersionSummary(db *sql.DB, since time.Time) (int64, error) {
|
||||
func aggregateUserMovement(db *sql.DB) (int64, error) {
|
||||
rows, err := db.Query(`SELECT
|
||||
DATE_TRUNC('day', Received) AS Day,
|
||||
UniqueID
|
||||
FROM Reports
|
||||
Report->>'uniqueID'
|
||||
FROM ReportsJson
|
||||
WHERE
|
||||
DATE_TRUNC('day', Received) < DATE_TRUNC('day', NOW())
|
||||
AND Version like 'v_.%'
|
||||
AND Report->>'version' like 'v_.%'
|
||||
ORDER BY Day
|
||||
`)
|
||||
if err != nil {
|
||||
@@ -276,16 +276,16 @@ func aggregatePerformance(db *sql.DB, since time.Time) (int64, error) {
|
||||
res, err := db.Exec(`INSERT INTO Performance (
|
||||
SELECT
|
||||
DATE_TRUNC('day', Received) AS Day,
|
||||
AVG(TotFiles) As TotFiles,
|
||||
AVG(TotMiB) As TotMiB,
|
||||
AVG(SHA256Perf) As SHA256Perf,
|
||||
AVG(MemorySize) As MemorySize,
|
||||
AVG(MemoryUsageMiB) As MemoryUsageMiB
|
||||
FROM Reports
|
||||
AVG((Report->>'totFiles')::numeric) As TotFiles,
|
||||
AVG((Report->>'totMiB')::numeric) As TotMiB,
|
||||
AVG((Report->>'sha256Perf')::numeric) As SHA256Perf,
|
||||
AVG((Report->>'memorySize')::numeric) As MemorySize,
|
||||
AVG((Report->>'memoryUsageMiB')::numeric) As MemoryUsageMiB
|
||||
FROM ReportsJson
|
||||
WHERE
|
||||
DATE_TRUNC('day', Received) > $1
|
||||
AND DATE_TRUNC('day', Received) < DATE_TRUNC('day', NOW())
|
||||
AND Version like 'v_.%'
|
||||
AND Report->>'version' like 'v_.%'
|
||||
GROUP BY Day
|
||||
);
|
||||
`, since)
|
||||
@@ -303,22 +303,22 @@ func aggregateBlockStats(db *sql.DB, since time.Time) (int64, error) {
|
||||
SELECT
|
||||
DATE_TRUNC('day', Received) AS Day,
|
||||
COUNT(1) As Reports,
|
||||
SUM(BlocksTotal) AS Total,
|
||||
SUM(BlocksRenamed) AS Renamed,
|
||||
SUM(BlocksReused) AS Reused,
|
||||
SUM(BlocksPulled) AS Pulled,
|
||||
SUM(BlocksCopyOrigin) AS CopyOrigin,
|
||||
SUM(BlocksCopyOriginShifted) AS CopyOriginShifted,
|
||||
SUM(BlocksCopyElsewhere) AS CopyElsewhere
|
||||
FROM Reports
|
||||
SUM((Report->'blockStats'->>'total')::numeric) AS Total,
|
||||
SUM((Report->'blockStats'->>'renamed')::numeric) AS Renamed,
|
||||
SUM((Report->'blockStats'->>'reused')::numeric) AS Reused,
|
||||
SUM((Report->'blockStats'->>'pulled')::numeric) AS Pulled,
|
||||
SUM((Report->'blockStats'->>'copyOrigin')::numeric) AS CopyOrigin,
|
||||
SUM((Report->'blockStats'->>'copyOriginShifted')::numeric) AS CopyOriginShifted,
|
||||
SUM((Report->'blockStats'->>'copyElsewhere')::numeric) AS CopyElsewhere
|
||||
FROM ReportsJson
|
||||
WHERE
|
||||
DATE_TRUNC('day', Received) > $1
|
||||
AND DATE_TRUNC('day', Received) < DATE_TRUNC('day', NOW())
|
||||
AND ReportVersion = 3
|
||||
AND Version like 'v_.%'
|
||||
AND Version NOT LIKE 'v0.14.40%'
|
||||
AND Version NOT LIKE 'v0.14.39%'
|
||||
AND Version NOT LIKE 'v0.14.38%'
|
||||
AND (Report->>'urVersion')::numeric >= 3
|
||||
AND Report->>'version' like 'v_.%'
|
||||
AND Report->>'version' NOT LIKE 'v0.14.40%'
|
||||
AND Report->>'version' NOT LIKE 'v0.14.39%'
|
||||
AND Report->>'version' NOT LIKE 'v0.14.38%'
|
||||
GROUP BY Day
|
||||
);
|
||||
`, since)
|
||||
|
||||
@@ -114,6 +114,23 @@ func statsForInts(data []int) [4]float64 {
|
||||
return res
|
||||
}
|
||||
|
||||
func statsForInt64s(data []int64) [4]float64 {
|
||||
var res [4]float64
|
||||
if len(data) == 0 {
|
||||
return res
|
||||
}
|
||||
|
||||
sort.Slice(data, func(a, b int) bool {
|
||||
return data[a] < data[b]
|
||||
})
|
||||
|
||||
res[0] = float64(data[int(float64(len(data))*0.05)])
|
||||
res[1] = float64(data[len(data)/2])
|
||||
res[2] = float64(data[int(float64(len(data))*0.95)])
|
||||
res[3] = float64(data[len(data)-1])
|
||||
return res
|
||||
}
|
||||
|
||||
func statsForFloats(data []float64) [4]float64 {
|
||||
var res [4]float64
|
||||
if len(data) == 0 {
|
||||
|
||||
@@ -10,10 +10,7 @@ import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -23,13 +20,16 @@ import (
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unicode"
|
||||
|
||||
"github.com/lib/pq"
|
||||
geoip2 "github.com/oschwald/geoip2-golang"
|
||||
"github.com/oschwald/geoip2-golang"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/upgrade"
|
||||
"github.com/syncthing/syncthing/lib/ur/contract"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -55,7 +55,8 @@ var (
|
||||
{regexp.MustCompile("jenkins@build.syncthing.net"), "GitHub"},
|
||||
{regexp.MustCompile("snap@build.syncthing.net"), "Snapcraft"},
|
||||
{regexp.MustCompile("android-.*vagrant@basebox-stretch64"), "F-Droid"},
|
||||
{regexp.MustCompile("builduser@svetlemodry"), "Arch (3rd party)"},
|
||||
{regexp.MustCompile("builduser@(archlinux|svetlemodry)"), "Arch (3rd party)"},
|
||||
{regexp.MustCompile("synology@kastelo.net"), "Synology (Kastelo)"},
|
||||
{regexp.MustCompile("@debian"), "Debian (3rd party)"},
|
||||
{regexp.MustCompile("@fedora"), "Fedora (3rd party)"},
|
||||
{regexp.MustCompile(`\bbrew@`), "Homebrew (3rd party)"},
|
||||
@@ -88,7 +89,7 @@ var funcs = map[string]interface{}{
|
||||
parts = append(parts, part)
|
||||
}
|
||||
if len(input) > 0 {
|
||||
parts = append(parts, input[:])
|
||||
parts = append(parts, input)
|
||||
}
|
||||
return parts[whichPart-1]
|
||||
},
|
||||
@@ -101,585 +102,44 @@ func getEnvDefault(key, def string) string {
|
||||
return def
|
||||
}
|
||||
|
||||
type IntMap map[string]int
|
||||
|
||||
func (p IntMap) Value() (driver.Value, error) {
|
||||
return json.Marshal(p)
|
||||
}
|
||||
|
||||
func (p *IntMap) Scan(src interface{}) error {
|
||||
source, ok := src.([]byte)
|
||||
if !ok {
|
||||
return errors.New("Type assertion .([]byte) failed.")
|
||||
}
|
||||
|
||||
var i map[string]int
|
||||
err := json.Unmarshal(source, &i)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*p = i
|
||||
return nil
|
||||
}
|
||||
|
||||
type report struct {
|
||||
Received time.Time // Only from DB
|
||||
|
||||
UniqueID string
|
||||
Version string
|
||||
LongVersion string
|
||||
Platform string
|
||||
NumFolders int
|
||||
NumDevices int
|
||||
TotFiles int
|
||||
FolderMaxFiles int
|
||||
TotMiB int
|
||||
FolderMaxMiB int
|
||||
MemoryUsageMiB int
|
||||
SHA256Perf float64
|
||||
MemorySize int
|
||||
|
||||
// v2 fields
|
||||
|
||||
URVersion int
|
||||
NumCPU int
|
||||
FolderUses struct {
|
||||
SendOnly int
|
||||
ReceiveOnly int
|
||||
IgnorePerms int
|
||||
IgnoreDelete int
|
||||
AutoNormalize int
|
||||
SimpleVersioning int
|
||||
ExternalVersioning int
|
||||
StaggeredVersioning int
|
||||
TrashcanVersioning int
|
||||
}
|
||||
DeviceUses struct {
|
||||
Introducer int
|
||||
CustomCertName int
|
||||
CompressAlways int
|
||||
CompressMetadata int
|
||||
CompressNever int
|
||||
DynamicAddr int
|
||||
StaticAddr int
|
||||
}
|
||||
Announce struct {
|
||||
GlobalEnabled bool
|
||||
LocalEnabled bool
|
||||
DefaultServersDNS int
|
||||
DefaultServersIP int
|
||||
OtherServers int
|
||||
}
|
||||
Relays struct {
|
||||
Enabled bool
|
||||
DefaultServers int
|
||||
OtherServers int
|
||||
}
|
||||
UsesRateLimit bool
|
||||
UpgradeAllowedManual bool
|
||||
UpgradeAllowedAuto bool
|
||||
|
||||
// V2.5 fields (fields that were in v2 but never added to the database
|
||||
UpgradeAllowedPre bool
|
||||
RescanIntvs pq.Int64Array
|
||||
|
||||
// v3 fields
|
||||
|
||||
Uptime int
|
||||
NATType string
|
||||
AlwaysLocalNets bool
|
||||
CacheIgnoredFiles bool
|
||||
OverwriteRemoteDeviceNames bool
|
||||
ProgressEmitterEnabled bool
|
||||
CustomDefaultFolderPath bool
|
||||
WeakHashSelection string
|
||||
CustomTrafficClass bool
|
||||
CustomTempIndexMinBlocks bool
|
||||
TemporariesDisabled bool
|
||||
TemporariesCustom bool
|
||||
LimitBandwidthInLan bool
|
||||
CustomReleaseURL bool
|
||||
RestartOnWakeup bool
|
||||
CustomStunServers bool
|
||||
|
||||
FolderUsesV3 struct {
|
||||
ScanProgressDisabled int
|
||||
ConflictsDisabled int
|
||||
ConflictsUnlimited int
|
||||
ConflictsOther int
|
||||
DisableSparseFiles int
|
||||
DisableTempIndexes int
|
||||
AlwaysWeakHash int
|
||||
CustomWeakHashThreshold int
|
||||
FsWatcherEnabled int
|
||||
PullOrder IntMap
|
||||
FilesystemType IntMap
|
||||
FsWatcherDelays pq.Int64Array
|
||||
}
|
||||
|
||||
GUIStats struct {
|
||||
Enabled int
|
||||
UseTLS int
|
||||
UseAuth int
|
||||
InsecureAdminAccess int
|
||||
Debugging int
|
||||
InsecureSkipHostCheck int
|
||||
InsecureAllowFrameLoading int
|
||||
ListenLocal int
|
||||
ListenUnspecified int
|
||||
Theme IntMap
|
||||
}
|
||||
|
||||
BlockStats struct {
|
||||
Total int
|
||||
Renamed int
|
||||
Reused int
|
||||
Pulled int
|
||||
CopyOrigin int
|
||||
CopyOriginShifted int
|
||||
CopyElsewhere int
|
||||
}
|
||||
|
||||
TransportStats IntMap
|
||||
|
||||
IgnoreStats struct {
|
||||
Lines int
|
||||
Inverts int
|
||||
Folded int
|
||||
Deletable int
|
||||
Rooted int
|
||||
Includes int
|
||||
EscapedIncludes int
|
||||
DoubleStars int
|
||||
Stars int
|
||||
}
|
||||
|
||||
// V3 fields added late in the RC
|
||||
WeakHashEnabled bool
|
||||
|
||||
// Generated
|
||||
|
||||
Date string
|
||||
Address string
|
||||
}
|
||||
|
||||
func (r *report) Validate() error {
|
||||
if r.UniqueID == "" || r.Version == "" || r.Platform == "" {
|
||||
return fmt.Errorf("missing required field")
|
||||
}
|
||||
if len(r.Date) != 8 {
|
||||
return fmt.Errorf("date not initialized")
|
||||
}
|
||||
|
||||
// Some fields may not be null.
|
||||
if r.RescanIntvs == nil {
|
||||
r.RescanIntvs = []int64{}
|
||||
}
|
||||
if r.FolderUsesV3.FsWatcherDelays == nil {
|
||||
r.FolderUsesV3.FsWatcherDelays = []int64{}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *report) FieldPointers() []interface{} {
|
||||
// All the fields of the report, in the same order as the database fields.
|
||||
return []interface{}{
|
||||
&r.Received, &r.UniqueID, &r.Version, &r.LongVersion, &r.Platform,
|
||||
&r.NumFolders, &r.NumDevices, &r.TotFiles, &r.FolderMaxFiles,
|
||||
&r.TotMiB, &r.FolderMaxMiB, &r.MemoryUsageMiB, &r.SHA256Perf,
|
||||
&r.MemorySize, &r.Date,
|
||||
// V2
|
||||
&r.URVersion, &r.NumCPU, &r.FolderUses.SendOnly, &r.FolderUses.IgnorePerms,
|
||||
&r.FolderUses.IgnoreDelete, &r.FolderUses.AutoNormalize, &r.DeviceUses.Introducer,
|
||||
&r.DeviceUses.CustomCertName, &r.DeviceUses.CompressAlways,
|
||||
&r.DeviceUses.CompressMetadata, &r.DeviceUses.CompressNever,
|
||||
&r.DeviceUses.DynamicAddr, &r.DeviceUses.StaticAddr,
|
||||
&r.Announce.GlobalEnabled, &r.Announce.LocalEnabled,
|
||||
&r.Announce.DefaultServersDNS, &r.Announce.DefaultServersIP,
|
||||
&r.Announce.OtherServers, &r.Relays.Enabled, &r.Relays.DefaultServers,
|
||||
&r.Relays.OtherServers, &r.UsesRateLimit, &r.UpgradeAllowedManual,
|
||||
&r.UpgradeAllowedAuto, &r.FolderUses.SimpleVersioning,
|
||||
&r.FolderUses.ExternalVersioning, &r.FolderUses.StaggeredVersioning,
|
||||
&r.FolderUses.TrashcanVersioning,
|
||||
|
||||
// V2.5
|
||||
&r.UpgradeAllowedPre, &r.RescanIntvs,
|
||||
|
||||
// V3
|
||||
&r.Uptime, &r.NATType, &r.AlwaysLocalNets, &r.CacheIgnoredFiles,
|
||||
&r.OverwriteRemoteDeviceNames, &r.ProgressEmitterEnabled, &r.CustomDefaultFolderPath,
|
||||
&r.WeakHashSelection, &r.CustomTrafficClass, &r.CustomTempIndexMinBlocks,
|
||||
&r.TemporariesDisabled, &r.TemporariesCustom, &r.LimitBandwidthInLan,
|
||||
&r.CustomReleaseURL, &r.RestartOnWakeup, &r.CustomStunServers,
|
||||
|
||||
&r.FolderUsesV3.ScanProgressDisabled, &r.FolderUsesV3.ConflictsDisabled,
|
||||
&r.FolderUsesV3.ConflictsUnlimited, &r.FolderUsesV3.ConflictsOther,
|
||||
&r.FolderUsesV3.DisableSparseFiles, &r.FolderUsesV3.DisableTempIndexes,
|
||||
&r.FolderUsesV3.AlwaysWeakHash, &r.FolderUsesV3.CustomWeakHashThreshold,
|
||||
&r.FolderUsesV3.FsWatcherEnabled,
|
||||
|
||||
&r.FolderUsesV3.PullOrder, &r.FolderUsesV3.FilesystemType,
|
||||
&r.FolderUsesV3.FsWatcherDelays,
|
||||
|
||||
&r.GUIStats.Enabled, &r.GUIStats.UseTLS, &r.GUIStats.UseAuth,
|
||||
&r.GUIStats.InsecureAdminAccess,
|
||||
&r.GUIStats.Debugging, &r.GUIStats.InsecureSkipHostCheck,
|
||||
&r.GUIStats.InsecureAllowFrameLoading, &r.GUIStats.ListenLocal,
|
||||
&r.GUIStats.ListenUnspecified, &r.GUIStats.Theme,
|
||||
|
||||
&r.BlockStats.Total, &r.BlockStats.Renamed,
|
||||
&r.BlockStats.Reused, &r.BlockStats.Pulled, &r.BlockStats.CopyOrigin,
|
||||
&r.BlockStats.CopyOriginShifted, &r.BlockStats.CopyElsewhere,
|
||||
|
||||
&r.TransportStats,
|
||||
|
||||
&r.IgnoreStats.Lines, &r.IgnoreStats.Inverts, &r.IgnoreStats.Folded,
|
||||
&r.IgnoreStats.Deletable, &r.IgnoreStats.Rooted, &r.IgnoreStats.Includes,
|
||||
&r.IgnoreStats.EscapedIncludes, &r.IgnoreStats.DoubleStars, &r.IgnoreStats.Stars,
|
||||
|
||||
// V3 added late in the RC
|
||||
&r.WeakHashEnabled,
|
||||
&r.Address,
|
||||
|
||||
// Receive only folders
|
||||
&r.FolderUses.ReceiveOnly,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *report) FieldNames() []string {
|
||||
// The database fields that back this struct in PostgreSQL
|
||||
return []string{
|
||||
// V1
|
||||
"Received",
|
||||
"UniqueID",
|
||||
"Version",
|
||||
"LongVersion",
|
||||
"Platform",
|
||||
"NumFolders",
|
||||
"NumDevices",
|
||||
"TotFiles",
|
||||
"FolderMaxFiles",
|
||||
"TotMiB",
|
||||
"FolderMaxMiB",
|
||||
"MemoryUsageMiB",
|
||||
"SHA256Perf",
|
||||
"MemorySize",
|
||||
"Date",
|
||||
// V2
|
||||
"ReportVersion",
|
||||
"NumCPU",
|
||||
"FolderRO",
|
||||
"FolderIgnorePerms",
|
||||
"FolderIgnoreDelete",
|
||||
"FolderAutoNormalize",
|
||||
"DeviceIntroducer",
|
||||
"DeviceCustomCertName",
|
||||
"DeviceCompressAlways",
|
||||
"DeviceCompressMetadata",
|
||||
"DeviceCompressNever",
|
||||
"DeviceDynamicAddr",
|
||||
"DeviceStaticAddr",
|
||||
"AnnounceGlobalEnabled",
|
||||
"AnnounceLocalEnabled",
|
||||
"AnnounceDefaultServersDNS",
|
||||
"AnnounceDefaultServersIP",
|
||||
"AnnounceOtherServers",
|
||||
"RelayEnabled",
|
||||
"RelayDefaultServers",
|
||||
"RelayOtherServers",
|
||||
"RateLimitEnabled",
|
||||
"UpgradeAllowedManual",
|
||||
"UpgradeAllowedAuto",
|
||||
// v0.12.19+
|
||||
"FolderSimpleVersioning",
|
||||
"FolderExternalVersioning",
|
||||
"FolderStaggeredVersioning",
|
||||
"FolderTrashcanVersioning",
|
||||
// V2.5
|
||||
"UpgradeAllowedPre",
|
||||
"RescanIntvs",
|
||||
// V3
|
||||
"Uptime",
|
||||
"NATType",
|
||||
"AlwaysLocalNets",
|
||||
"CacheIgnoredFiles",
|
||||
"OverwriteRemoteDeviceNames",
|
||||
"ProgressEmitterEnabled",
|
||||
"CustomDefaultFolderPath",
|
||||
"WeakHashSelection",
|
||||
"CustomTrafficClass",
|
||||
"CustomTempIndexMinBlocks",
|
||||
"TemporariesDisabled",
|
||||
"TemporariesCustom",
|
||||
"LimitBandwidthInLan",
|
||||
"CustomReleaseURL",
|
||||
"RestartOnWakeup",
|
||||
"CustomStunServers",
|
||||
|
||||
"FolderScanProgressDisabled",
|
||||
"FolderConflictsDisabled",
|
||||
"FolderConflictsUnlimited",
|
||||
"FolderConflictsOther",
|
||||
"FolderDisableSparseFiles",
|
||||
"FolderDisableTempIndexes",
|
||||
"FolderAlwaysWeakHash",
|
||||
"FolderCustomWeakHashThreshold",
|
||||
"FolderFsWatcherEnabled",
|
||||
"FolderPullOrder",
|
||||
"FolderFilesystemType",
|
||||
"FolderFsWatcherDelays",
|
||||
|
||||
"GUIEnabled",
|
||||
"GUIUseTLS",
|
||||
"GUIUseAuth",
|
||||
"GUIInsecureAdminAccess",
|
||||
"GUIDebugging",
|
||||
"GUIInsecureSkipHostCheck",
|
||||
"GUIInsecureAllowFrameLoading",
|
||||
"GUIListenLocal",
|
||||
"GUIListenUnspecified",
|
||||
"GUITheme",
|
||||
|
||||
"BlocksTotal",
|
||||
"BlocksRenamed",
|
||||
"BlocksReused",
|
||||
"BlocksPulled",
|
||||
"BlocksCopyOrigin",
|
||||
"BlocksCopyOriginShifted",
|
||||
"BlocksCopyElsewhere",
|
||||
|
||||
"Transport",
|
||||
|
||||
"IgnoreLines",
|
||||
"IgnoreInverts",
|
||||
"IgnoreFolded",
|
||||
"IgnoreDeletable",
|
||||
"IgnoreRooted",
|
||||
"IgnoreIncludes",
|
||||
"IgnoreEscapedIncludes",
|
||||
"IgnoreDoubleStars",
|
||||
"IgnoreStars",
|
||||
|
||||
// V3 added late in the RC
|
||||
"WeakHashEnabled",
|
||||
"Address",
|
||||
|
||||
// Receive only folders
|
||||
"FolderRecvOnly",
|
||||
}
|
||||
}
|
||||
|
||||
func setupDB(db *sql.DB) error {
|
||||
_, err := db.Exec(`CREATE TABLE IF NOT EXISTS Reports (
|
||||
_, err := db.Exec(`CREATE TABLE IF NOT EXISTS ReportsJson (
|
||||
Received TIMESTAMP NOT NULL,
|
||||
UniqueID VARCHAR(32) NOT NULL,
|
||||
Version VARCHAR(32) NOT NULL,
|
||||
LongVersion VARCHAR(256) NOT NULL,
|
||||
Platform VARCHAR(32) NOT NULL,
|
||||
NumFolders INTEGER NOT NULL,
|
||||
NumDevices INTEGER NOT NULL,
|
||||
TotFiles INTEGER NOT NULL,
|
||||
FolderMaxFiles INTEGER NOT NULL,
|
||||
TotMiB INTEGER NOT NULL,
|
||||
FolderMaxMiB INTEGER NOT NULL,
|
||||
MemoryUsageMiB INTEGER NOT NULL,
|
||||
SHA256Perf DOUBLE PRECISION NOT NULL,
|
||||
MemorySize INTEGER NOT NULL,
|
||||
Date VARCHAR(8) NOT NULL
|
||||
Report JSONB NOT NULL
|
||||
)`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var t string
|
||||
row := db.QueryRow(`SELECT 'UniqueIDIndex'::regclass`)
|
||||
if err := row.Scan(&t); err != nil {
|
||||
if _, err = db.Exec(`CREATE UNIQUE INDEX UniqueIDIndex ON Reports (Date, UniqueID)`); err != nil {
|
||||
if err := db.QueryRow(`SELECT 'UniqueIDJsonIndex'::regclass`).Scan(&t); err != nil {
|
||||
if _, err = db.Exec(`CREATE UNIQUE INDEX UniqueIDJsonIndex ON ReportsJson ((Report->>'date'), (Report->>'uniqueID'))`); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
row = db.QueryRow(`SELECT 'ReceivedIndex'::regclass`)
|
||||
if err := row.Scan(&t); err != nil {
|
||||
if _, err = db.Exec(`CREATE INDEX ReceivedIndex ON Reports (Received)`); err != nil {
|
||||
if err := db.QueryRow(`SELECT 'ReceivedJsonIndex'::regclass`).Scan(&t); err != nil {
|
||||
if _, err = db.Exec(`CREATE INDEX ReceivedJsonIndex ON ReportsJson (Received)`); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// V2
|
||||
|
||||
row = db.QueryRow(`SELECT attname FROM pg_attribute WHERE attrelid = (SELECT oid FROM pg_class WHERE relname = 'reports') AND attname = 'reportversion'`)
|
||||
if err := row.Scan(&t); err != nil {
|
||||
// The ReportVersion column doesn't exist; add the new columns.
|
||||
_, err = db.Exec(`ALTER TABLE Reports
|
||||
ADD COLUMN ReportVersion INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN NumCPU INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN FolderRO INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN FolderIgnorePerms INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN FolderIgnoreDelete INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN FolderAutoNormalize INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN DeviceIntroducer INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN DeviceCustomCertName INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN DeviceCompressAlways INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN DeviceCompressMetadata INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN DeviceCompressNever INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN DeviceDynamicAddr INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN DeviceStaticAddr INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN AnnounceGlobalEnabled BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
ADD COLUMN AnnounceLocalEnabled BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
ADD COLUMN AnnounceDefaultServersDNS INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN AnnounceDefaultServersIP INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN AnnounceOtherServers INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN RelayEnabled BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
ADD COLUMN RelayDefaultServers INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN RelayOtherServers INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN RateLimitEnabled BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
ADD COLUMN UpgradeAllowedManual BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
ADD COLUMN UpgradeAllowedAuto BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
ADD COLUMN FolderSimpleVersioning INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN FolderExternalVersioning INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN FolderStaggeredVersioning INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN FolderTrashcanVersioning INTEGER NOT NULL DEFAULT 0
|
||||
`)
|
||||
if err != nil {
|
||||
if err := db.QueryRow(`SELECT 'ReportVersionJsonIndex'::regclass`).Scan(&t); err != nil {
|
||||
if _, err = db.Exec(`CREATE INDEX ReportVersionJsonIndex ON ReportsJson (cast((Report->>'urVersion') as numeric))`); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
row = db.QueryRow(`SELECT 'ReportVersionIndex'::regclass`)
|
||||
if err := row.Scan(&t); err != nil {
|
||||
if _, err = db.Exec(`CREATE INDEX ReportVersionIndex ON Reports (ReportVersion)`); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// V2.5
|
||||
|
||||
row = db.QueryRow(`SELECT attname FROM pg_attribute WHERE attrelid = (SELECT oid FROM pg_class WHERE relname = 'reports') AND attname = 'upgradeallowedpre'`)
|
||||
if err := row.Scan(&t); err != nil {
|
||||
// The ReportVersion column doesn't exist; add the new columns.
|
||||
_, err = db.Exec(`ALTER TABLE Reports
|
||||
ADD COLUMN UpgradeAllowedPre BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
ADD COLUMN RescanIntvs INT[] NOT NULL DEFAULT '{}'
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// V3
|
||||
|
||||
row = db.QueryRow(`SELECT attname FROM pg_attribute WHERE attrelid = (SELECT oid FROM pg_class WHERE relname = 'reports') AND attname = 'uptime'`)
|
||||
if err := row.Scan(&t); err != nil {
|
||||
// The Uptime column doesn't exist; add the new columns.
|
||||
_, err = db.Exec(`ALTER TABLE Reports
|
||||
ADD COLUMN Uptime INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN NATType VARCHAR(32) NOT NULL DEFAULT '',
|
||||
ADD COLUMN AlwaysLocalNets BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
ADD COLUMN CacheIgnoredFiles BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
ADD COLUMN OverwriteRemoteDeviceNames BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
ADD COLUMN ProgressEmitterEnabled BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
ADD COLUMN CustomDefaultFolderPath BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
ADD COLUMN WeakHashSelection VARCHAR(32) NOT NULL DEFAULT '',
|
||||
ADD COLUMN CustomTrafficClass BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
ADD COLUMN CustomTempIndexMinBlocks BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
ADD COLUMN TemporariesDisabled BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
ADD COLUMN TemporariesCustom BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
ADD COLUMN LimitBandwidthInLan BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
ADD COLUMN CustomReleaseURL BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
ADD COLUMN RestartOnWakeup BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
ADD COLUMN CustomStunServers BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
|
||||
ADD COLUMN FolderScanProgressDisabled INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN FolderConflictsDisabled INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN FolderConflictsUnlimited INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN FolderConflictsOther INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN FolderDisableSparseFiles INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN FolderDisableTempIndexes INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN FolderAlwaysWeakHash INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN FolderCustomWeakHashThreshold INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN FolderFsWatcherEnabled INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN FolderPullOrder JSONB NOT NULL DEFAULT '{}',
|
||||
ADD COLUMN FolderFilesystemType JSONB NOT NULL DEFAULT '{}',
|
||||
ADD COLUMN FolderFsWatcherDelays INT[] NOT NULL DEFAULT '{}',
|
||||
|
||||
ADD COLUMN GUIEnabled INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN GUIUseTLS INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN GUIUseAuth INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN GUIInsecureAdminAccess INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN GUIDebugging INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN GUIInsecureSkipHostCheck INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN GUIInsecureAllowFrameLoading INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN GUIListenLocal INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN GUIListenUnspecified INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN GUITheme JSONB NOT NULL DEFAULT '{}',
|
||||
|
||||
ADD COLUMN BlocksTotal INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN BlocksRenamed INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN BlocksReused INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN BlocksPulled INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN BlocksCopyOrigin INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN BlocksCopyOriginShifted INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN BlocksCopyElsewhere INTEGER NOT NULL DEFAULT 0,
|
||||
|
||||
ADD COLUMN Transport JSONB NOT NULL DEFAULT '{}',
|
||||
|
||||
ADD COLUMN IgnoreLines INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN IgnoreInverts INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN IgnoreFolded INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN IgnoreDeletable INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN IgnoreRooted INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN IgnoreIncludes INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN IgnoreEscapedIncludes INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN IgnoreDoubleStars INTEGER NOT NULL DEFAULT 0,
|
||||
ADD COLUMN IgnoreStars INTEGER NOT NULL DEFAULT 0
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// V3 added late in the RC
|
||||
|
||||
row = db.QueryRow(`SELECT attname FROM pg_attribute WHERE attrelid = (SELECT oid FROM pg_class WHERE relname = 'reports') AND attname = 'weakhashenabled'`)
|
||||
if err := row.Scan(&t); err != nil {
|
||||
// The WeakHashEnabled column doesn't exist; add the new columns.
|
||||
_, err = db.Exec(`ALTER TABLE Reports
|
||||
ADD COLUMN WeakHashEnabled BOOLEAN NOT NULL DEFAULT FALSE
|
||||
ADD COLUMN Address VARCHAR(45) NOT NULL DEFAULT ''
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Receive only added ad-hoc
|
||||
|
||||
row = db.QueryRow(`SELECT attname FROM pg_attribute WHERE attrelid = (SELECT oid FROM pg_class WHERE relname = 'reports') AND attname = 'folderrecvonly'`)
|
||||
if err := row.Scan(&t); err != nil {
|
||||
// The RecvOnly column doesn't exist; add it.
|
||||
_, err = db.Exec(`ALTER TABLE Reports
|
||||
ADD COLUMN FolderRecvOnly INTEGER NOT NULL DEFAULT 0
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Migrate from old schema to new schema if the table exists.
|
||||
if err := migrate(db); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func insertReport(db *sql.DB, r report) error {
|
||||
r.Received = time.Now().UTC()
|
||||
fields := r.FieldPointers()
|
||||
params := make([]string, len(fields))
|
||||
for i := range params {
|
||||
params[i] = fmt.Sprintf("$%d", i+1)
|
||||
}
|
||||
query := "INSERT INTO Reports (" + strings.Join(r.FieldNames(), ", ") + ") VALUES (" + strings.Join(params, ", ") + ")"
|
||||
_, err := db.Exec(query, fields...)
|
||||
func insertReport(db *sql.DB, r contract.Report) error {
|
||||
_, err := db.Exec("INSERT INTO ReportsJson (Report, Received) VALUES ($1, $2)", r, time.Now().UTC())
|
||||
|
||||
return err
|
||||
}
|
||||
@@ -687,9 +147,9 @@ func insertReport(db *sql.DB, r report) error {
|
||||
type withDBFunc func(*sql.DB, http.ResponseWriter, *http.Request)
|
||||
|
||||
func withDB(db *sql.DB, f withDBFunc) http.HandlerFunc {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
f(db, w, r)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
@@ -753,6 +213,7 @@ func main() {
|
||||
http.HandleFunc("/movement.json", withDB(db, movementHandler))
|
||||
http.HandleFunc("/performance.json", withDB(db, performanceHandler))
|
||||
http.HandleFunc("/blockstats.json", withDB(db, blockStatsHandler))
|
||||
http.HandleFunc("/locations.json", withDB(db, locationsHandler))
|
||||
http.Handle("/static/", http.StripPrefix("/static/", http.FileServer(http.Dir("static"))))
|
||||
|
||||
go cacheRefresher(db)
|
||||
@@ -764,9 +225,10 @@ func main() {
|
||||
}
|
||||
|
||||
var (
|
||||
cacheData []byte
|
||||
cacheTime time.Time
|
||||
cacheMut sync.Mutex
|
||||
cachedIndex []byte
|
||||
cachedLocations []byte
|
||||
cacheTime time.Time
|
||||
cacheMut sync.Mutex
|
||||
)
|
||||
|
||||
const maxCacheTime = 15 * time.Minute
|
||||
@@ -774,7 +236,7 @@ const maxCacheTime = 15 * time.Minute
|
||||
func cacheRefresher(db *sql.DB) {
|
||||
ticker := time.NewTicker(maxCacheTime - time.Minute)
|
||||
defer ticker.Stop()
|
||||
for range ticker.C {
|
||||
for ; true; <-ticker.C {
|
||||
cacheMut.Lock()
|
||||
if err := refreshCacheLocked(db); err != nil {
|
||||
log.Println(err)
|
||||
@@ -790,8 +252,15 @@ func refreshCacheLocked(db *sql.DB) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cacheData = buf.Bytes()
|
||||
cachedIndex = buf.Bytes()
|
||||
cacheTime = time.Now()
|
||||
|
||||
locs := rep["locations"].(map[location]int)
|
||||
wlocs := make([]weightedLocation, 0, len(locs))
|
||||
for loc, w := range locs {
|
||||
wlocs = append(wlocs, weightedLocation{loc, w})
|
||||
}
|
||||
cachedLocations, _ = json.Marshal(wlocs)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -809,13 +278,29 @@ func rootHandler(db *sql.DB, w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
w.Write(cacheData)
|
||||
w.Write(cachedIndex)
|
||||
} else {
|
||||
http.Error(w, "Not found", 404)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func locationsHandler(db *sql.DB, w http.ResponseWriter, r *http.Request) {
|
||||
cacheMut.Lock()
|
||||
defer cacheMut.Unlock()
|
||||
|
||||
if time.Since(cacheTime) > maxCacheTime {
|
||||
if err := refreshCacheLocked(db); err != nil {
|
||||
log.Println(err)
|
||||
http.Error(w, "Template Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
w.Write(cachedLocations)
|
||||
}
|
||||
|
||||
func newDataHandler(db *sql.DB, w http.ResponseWriter, r *http.Request) {
|
||||
defer r.Body.Close()
|
||||
|
||||
@@ -834,7 +319,7 @@ func newDataHandler(db *sql.DB, w http.ResponseWriter, r *http.Request) {
|
||||
addr = ""
|
||||
}
|
||||
|
||||
var rep report
|
||||
var rep contract.Report
|
||||
rep.Date = time.Now().UTC().Format("20060102")
|
||||
rep.Address = addr
|
||||
|
||||
@@ -859,7 +344,7 @@ func newDataHandler(db *sql.DB, w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
if err := insertReport(db, rep); err != nil {
|
||||
if err.Error() == `pq: duplicate key value violates unique constraint "uniqueidindex"` {
|
||||
if err.Error() == `pq: duplicate key value violates unique constraint "uniqueidjsonindex"` {
|
||||
// We already have a report today for the same unique ID; drop
|
||||
// this one without complaining.
|
||||
return
|
||||
@@ -874,7 +359,8 @@ func newDataHandler(db *sql.DB, w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
func summaryHandler(db *sql.DB, w http.ResponseWriter, r *http.Request) {
|
||||
s, err := getSummary(db)
|
||||
min, _ := strconv.Atoi(r.URL.Query().Get("min"))
|
||||
s, err := getSummary(db, min)
|
||||
if err != nil {
|
||||
log.Println("summaryHandler:", err)
|
||||
http.Error(w, "Database Error", http.StatusInternalServerError)
|
||||
@@ -1015,8 +501,13 @@ func inc(storage map[string]int, key string, i interface{}) {
|
||||
}
|
||||
|
||||
type location struct {
|
||||
Latitude float64
|
||||
Longitude float64
|
||||
Latitude float64 `json:"lat"`
|
||||
Longitude float64 `json:"lon"`
|
||||
}
|
||||
|
||||
type weightedLocation struct {
|
||||
location
|
||||
Weight int `json:"weight"`
|
||||
}
|
||||
|
||||
func getReport(db *sql.DB) map[string]interface{} {
|
||||
@@ -1036,11 +527,11 @@ func getReport(db *sql.DB) map[string]interface{} {
|
||||
var numDevices []int
|
||||
var totFiles []int
|
||||
var maxFiles []int
|
||||
var totMiB []int
|
||||
var maxMiB []int
|
||||
var memoryUsage []int
|
||||
var totMiB []int64
|
||||
var maxMiB []int64
|
||||
var memoryUsage []int64
|
||||
var sha256Perf []float64
|
||||
var memorySize []int
|
||||
var memorySize []int64
|
||||
var uptime []int
|
||||
var compilers []string
|
||||
var builders []string
|
||||
@@ -1079,9 +570,9 @@ func getReport(db *sql.DB) map[string]interface{} {
|
||||
|
||||
var numCPU []int
|
||||
|
||||
var rep report
|
||||
var rep contract.Report
|
||||
|
||||
rows, err := db.Query(`SELECT ` + strings.Join(rep.FieldNames(), ",") + ` FROM Reports WHERE Received > now() - '1 day'::INTERVAL`)
|
||||
rows, err := db.Query(`SELECT Received, Report FROM ReportsJson WHERE Received > now() - '1 day'::INTERVAL`)
|
||||
if err != nil {
|
||||
log.Println("sql:", err)
|
||||
return nil
|
||||
@@ -1089,7 +580,7 @@ func getReport(db *sql.DB) map[string]interface{} {
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
err := rows.Scan(rep.FieldPointers()...)
|
||||
err := rows.Scan(&rep.Received, &rep)
|
||||
|
||||
if err != nil {
|
||||
log.Println("sql:", err)
|
||||
@@ -1140,19 +631,19 @@ func getReport(db *sql.DB) map[string]interface{} {
|
||||
maxFiles = append(maxFiles, rep.FolderMaxFiles)
|
||||
}
|
||||
if rep.TotMiB > 0 {
|
||||
totMiB = append(totMiB, rep.TotMiB*(1<<20))
|
||||
totMiB = append(totMiB, int64(rep.TotMiB)*(1<<20))
|
||||
}
|
||||
if rep.FolderMaxMiB > 0 {
|
||||
maxMiB = append(maxMiB, rep.FolderMaxMiB*(1<<20))
|
||||
maxMiB = append(maxMiB, int64(rep.FolderMaxMiB)*(1<<20))
|
||||
}
|
||||
if rep.MemoryUsageMiB > 0 {
|
||||
memoryUsage = append(memoryUsage, rep.MemoryUsageMiB*(1<<20))
|
||||
memoryUsage = append(memoryUsage, int64(rep.MemoryUsageMiB)*(1<<20))
|
||||
}
|
||||
if rep.SHA256Perf > 0 {
|
||||
sha256Perf = append(sha256Perf, rep.SHA256Perf*(1<<20))
|
||||
}
|
||||
if rep.MemorySize > 0 {
|
||||
memorySize = append(memorySize, rep.MemorySize*(1<<20))
|
||||
memorySize = append(memorySize, int64(rep.MemorySize)*(1<<20))
|
||||
}
|
||||
if rep.Uptime > 0 {
|
||||
uptime = append(uptime, rep.Uptime)
|
||||
@@ -1234,8 +725,8 @@ func getReport(db *sql.DB) map[string]interface{} {
|
||||
|
||||
if rep.NATType != "" {
|
||||
natType := rep.NATType
|
||||
natType = strings.Replace(natType, "unknown", "Unknown", -1)
|
||||
natType = strings.Replace(natType, "Symetric", "Symmetric", -1)
|
||||
natType = strings.ReplaceAll(natType, "unknown", "Unknown")
|
||||
natType = strings.ReplaceAll(natType, "Symetric", "Symmetric")
|
||||
add(featureGroups["Various"]["v3"], "NAT Type", natType, 1)
|
||||
}
|
||||
|
||||
@@ -1253,6 +744,8 @@ func getReport(db *sql.DB) map[string]interface{} {
|
||||
inc(features["Folder"]["v3"], "Weak hash, always", rep.FolderUsesV3.AlwaysWeakHash)
|
||||
inc(features["Folder"]["v3"], "Weak hash, custom threshold", rep.FolderUsesV3.CustomWeakHashThreshold)
|
||||
inc(features["Folder"]["v3"], "Filesystem watcher", rep.FolderUsesV3.FsWatcherEnabled)
|
||||
inc(features["Folder"]["v3"], "Case sensitive FS", rep.FolderUsesV3.CaseSensitiveFS)
|
||||
inc(features["Folder"]["v3"], "Mode, receive encrypted", rep.FolderUsesV3.ReceiveEncrypted)
|
||||
|
||||
add(featureGroups["Folder"]["v3"], "Conflicts", "Disabled", rep.FolderUsesV3.ConflictsDisabled)
|
||||
add(featureGroups["Folder"]["v3"], "Conflicts", "Unlimited", rep.FolderUsesV3.ConflictsUnlimited)
|
||||
@@ -1262,6 +755,8 @@ func getReport(db *sql.DB) map[string]interface{} {
|
||||
add(featureGroups["Folder"]["v3"], "Pull Order", prettyCase(key), value)
|
||||
}
|
||||
|
||||
inc(features["Device"]["v3"], "Untrusted", rep.DeviceUsesV3.Untrusted)
|
||||
|
||||
totals["GUI"] += rep.GUIStats.Enabled
|
||||
|
||||
inc(features["GUI"]["v3"], "Auth Enabled", rep.GUIStats.UseAuth)
|
||||
@@ -1303,14 +798,14 @@ func getReport(db *sql.DB) map[string]interface{} {
|
||||
})
|
||||
|
||||
categories = append(categories, category{
|
||||
Values: statsForInts(totMiB),
|
||||
Values: statsForInt64s(totMiB),
|
||||
Descr: "Data Managed per Device",
|
||||
Unit: "B",
|
||||
Type: NumberBinary,
|
||||
})
|
||||
|
||||
categories = append(categories, category{
|
||||
Values: statsForInts(maxMiB),
|
||||
Values: statsForInt64s(maxMiB),
|
||||
Descr: "Data in Largest Folder",
|
||||
Unit: "B",
|
||||
Type: NumberBinary,
|
||||
@@ -1327,14 +822,14 @@ func getReport(db *sql.DB) map[string]interface{} {
|
||||
})
|
||||
|
||||
categories = append(categories, category{
|
||||
Values: statsForInts(memoryUsage),
|
||||
Values: statsForInt64s(memoryUsage),
|
||||
Descr: "Memory Usage",
|
||||
Unit: "B",
|
||||
Type: NumberBinary,
|
||||
})
|
||||
|
||||
categories = append(categories, category{
|
||||
Values: statsForInts(memorySize),
|
||||
Values: statsForInt64s(memorySize),
|
||||
Descr: "System Memory",
|
||||
Unit: "B",
|
||||
Type: NumberBinary,
|
||||
@@ -1419,7 +914,7 @@ func getReport(db *sql.DB) map[string]interface{} {
|
||||
r["platforms"] = group(byPlatform, analyticsFor(platforms, 2000), 10)
|
||||
r["compilers"] = group(byCompiler, analyticsFor(compilers, 2000), 5)
|
||||
r["builders"] = analyticsFor(builders, 12)
|
||||
r["distributions"] = analyticsFor(distributions, 10)
|
||||
r["distributions"] = analyticsFor(distributions, len(knownDistributions))
|
||||
r["featureOrder"] = featureOrder
|
||||
r["locations"] = locations
|
||||
r["contries"] = countryList
|
||||
@@ -1428,7 +923,7 @@ func getReport(db *sql.DB) map[string]interface{} {
|
||||
}
|
||||
|
||||
var (
|
||||
plusRe = regexp.MustCompile(`\+.*$`)
|
||||
plusRe = regexp.MustCompile(`(\+.*|\.dev\..*)$`)
|
||||
plusStr = "(+dev)"
|
||||
)
|
||||
|
||||
@@ -1487,7 +982,9 @@ func (s *summary) MarshalJSON() ([]byte, error) {
|
||||
for v := range s.versions {
|
||||
versions = append(versions, v)
|
||||
}
|
||||
sort.Strings(versions)
|
||||
sort.Slice(versions, func(a, b int) bool {
|
||||
return upgrade.CompareVersions(versions[a], versions[b]) < 0
|
||||
})
|
||||
|
||||
var filtered []string
|
||||
for _, v := range versions {
|
||||
@@ -1527,7 +1024,21 @@ func (s *summary) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(table)
|
||||
}
|
||||
|
||||
func getSummary(db *sql.DB) (summary, error) {
|
||||
// filter removes versions that never reach the specified min count.
|
||||
func (s *summary) filter(min int) {
|
||||
// We cheat and just remove the versions from the "index" and leave the
|
||||
// data points alone. The version index is used to build the table when
|
||||
// we do the serialization, so at that point the data points are
|
||||
// filtered out as well.
|
||||
for ver := range s.versions {
|
||||
if s.max[ver] < min {
|
||||
delete(s.versions, ver)
|
||||
delete(s.max, ver)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getSummary(db *sql.DB, min int) (summary, error) {
|
||||
s := newSummary()
|
||||
|
||||
rows, err := db.Query(`SELECT Day, Version, Count FROM VersionSummary WHERE Day > now() - '2 year'::INTERVAL;`)
|
||||
@@ -1558,6 +1069,7 @@ func getSummary(db *sql.DB) (summary, error) {
|
||||
s.setCount(day.Format("2006-01-02"), ver, num)
|
||||
}
|
||||
|
||||
s.filter(min)
|
||||
return s, nil
|
||||
}
|
||||
|
||||
|
||||
143
cmd/ursrv/migration.go
Normal file
@@ -0,0 +1,143 @@
|
||||
// Copyright (C) 2020 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"github.com/lib/pq"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/ur/contract"
|
||||
)
|
||||
|
||||
func migrate(db *sql.DB) error {
|
||||
var count uint64
|
||||
log.Println("Checking old table row count, this might take a while...")
|
||||
if err := db.QueryRow(`SELECT COUNT(1) FROM Reports`).Scan(&count); err != nil || count == 0 {
|
||||
// err != nil most likely means table does not exist.
|
||||
return nil
|
||||
}
|
||||
log.Printf("Found %d records, will perform migration.", count)
|
||||
|
||||
tx, err := db.Begin()
|
||||
if err != nil {
|
||||
log.Println("sql:", err)
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
// These must be lower case, because we don't quote them when creating, so postgres creates them lower case.
|
||||
// Yet pg.CopyIn quotes them, which makes them case sensitive.
|
||||
stmt, err := tx.Prepare(pq.CopyIn("reportsjson", "received", "report"))
|
||||
if err != nil {
|
||||
log.Println("sql:", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Custom types used in the old struct.
|
||||
var rep contract.Report
|
||||
var rescanIntvs pq.Int64Array
|
||||
var fsWatcherDelay pq.Int64Array
|
||||
pullOrder := make(IntMap)
|
||||
fileSystemType := make(IntMap)
|
||||
themes := make(IntMap)
|
||||
transportStats := make(IntMap)
|
||||
|
||||
rows, err := db.Query(`SELECT ` + strings.Join(rep.FieldNames(), ", ") + `, FolderFsWatcherDelays, RescanIntvs, FolderPullOrder, FolderFilesystemType, GUITheme, Transport FROM Reports`)
|
||||
if err != nil {
|
||||
log.Println("sql:", err)
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var done uint64
|
||||
pct := count / 100
|
||||
|
||||
for rows.Next() {
|
||||
err := rows.Scan(append(rep.FieldPointers(), &fsWatcherDelay, &rescanIntvs, &pullOrder, &fileSystemType, &themes, &transportStats)...)
|
||||
if err != nil {
|
||||
log.Println("sql scan:", err)
|
||||
return err
|
||||
}
|
||||
// Patch up parts that used to use custom types
|
||||
rep.RescanIntvs = make([]int, len(rescanIntvs))
|
||||
for i := range rescanIntvs {
|
||||
rep.RescanIntvs[i] = int(rescanIntvs[i])
|
||||
}
|
||||
rep.FolderUsesV3.FsWatcherDelays = make([]int, len(fsWatcherDelay))
|
||||
for i := range fsWatcherDelay {
|
||||
rep.FolderUsesV3.FsWatcherDelays[i] = int(fsWatcherDelay[i])
|
||||
}
|
||||
rep.FolderUsesV3.PullOrder = pullOrder
|
||||
rep.FolderUsesV3.FilesystemType = fileSystemType
|
||||
rep.GUIStats.Theme = themes
|
||||
rep.TransportStats = transportStats
|
||||
|
||||
_, err = stmt.Exec(rep.Received, rep)
|
||||
if err != nil {
|
||||
log.Println("sql insert:", err)
|
||||
return err
|
||||
}
|
||||
done++
|
||||
if done%pct == 0 {
|
||||
log.Printf("Migration progress %d/%d (%d%%)", done, count, (100*done)/count)
|
||||
}
|
||||
}
|
||||
|
||||
// Tell the driver bulk copy is finished
|
||||
_, err = stmt.Exec()
|
||||
if err != nil {
|
||||
log.Println("sql stmt exec:", err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = stmt.Close()
|
||||
if err != nil {
|
||||
log.Println("sql stmt close:", err)
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = tx.Exec("DROP TABLE Reports")
|
||||
if err != nil {
|
||||
log.Println("sql drop:", err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = tx.Commit()
|
||||
if err != nil {
|
||||
log.Println("sql commit:", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type IntMap map[string]int
|
||||
|
||||
func (p IntMap) Value() (driver.Value, error) {
|
||||
return json.Marshal(p)
|
||||
}
|
||||
|
||||
func (p *IntMap) Scan(src interface{}) error {
|
||||
source, ok := src.([]byte)
|
||||
if !ok {
|
||||
return errors.New("Type assertion .([]byte) failed.")
|
||||
}
|
||||
|
||||
var i map[string]int
|
||||
err := json.Unmarshal(source, &i)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*p = i
|
||||
return nil
|
||||
}
|
||||
|
Before Width: | Height: | Size: 6.4 KiB After Width: | Height: | Size: 4.8 KiB |
@@ -17,7 +17,11 @@ found in the LICENSE file.
|
||||
<link href="static/bootstrap/css/bootstrap.min.css" rel="stylesheet">
|
||||
<script type="text/javascript" src="https://ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js"></script>
|
||||
<script type="text/javascript" src="static/bootstrap/js/bootstrap.min.js"></script>
|
||||
<script type="text/javascript" src="https://maps.googleapis.com/maps/api/js?libraries=visualization&key=AIzaSyDk5WJ8s7ueLKb99X5DbQ-vkWtPDAKqYs0"></script>
|
||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/leaflet/0.7.7/leaflet.css">
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/leaflet/0.7.7/leaflet.js"></script>
|
||||
<script src="https://cdn.jsdelivr.net/npm/heatmapjs@2.0.2/heatmap.min.js"></script>
|
||||
<script src="https://cdn.jsdelivr.net/npm/leaflet-heatmap@1.0.0/leaflet-heatmap.js"></script>
|
||||
|
||||
<style type="text/css">
|
||||
body {
|
||||
margin: 40px;
|
||||
@@ -36,23 +40,25 @@ found in the LICENSE file.
|
||||
}
|
||||
</style>
|
||||
<script type="text/javascript"
|
||||
src="https://www.google.com/jsapi?autoload={
|
||||
'modules':[{
|
||||
'name':'visualization',
|
||||
'version':'1',
|
||||
'packages':['corechart']
|
||||
src='https://www.google.com/jsapi?autoload={
|
||||
"modules":[{
|
||||
"name":"visualization",
|
||||
"version":"1",
|
||||
"packages":["corechart"]
|
||||
}]
|
||||
}"></script>
|
||||
}'></script>
|
||||
|
||||
<script type="text/javascript">
|
||||
google.setOnLoadCallback(drawVersionChart);
|
||||
google.setOnLoadCallback(drawMovementChart);
|
||||
google.setOnLoadCallback(drawBlockStatsChart);
|
||||
google.setOnLoadCallback(drawPerformanceCharts);
|
||||
google.setOnLoadCallback(drawHeatMap);
|
||||
|
||||
function drawVersionChart() {
|
||||
var jsonData = $.ajax({url: "summary.json", dataType:"json", async: false}).responseText;
|
||||
// Summary version chart for versions that at some point in the chart
|
||||
// reaches 250 devices. This filters out versions that are old and
|
||||
// uninteresting yet linger forever with like four users.
|
||||
var jsonData = $.ajax({url: "summary.json?min=250", dataType:"json", async: false}).responseText;
|
||||
var rows = JSON.parse(jsonData);
|
||||
|
||||
var data = new google.visualization.DataTable();
|
||||
@@ -211,47 +217,46 @@ found in the LICENSE file.
|
||||
|
||||
var locations = [];
|
||||
{{range $location, $weight := .locations}}
|
||||
locations.push({location: new google.maps.LatLng({{- $location.Latitude -}}, {{- $location.Longitude -}}), weight: {{- $weight -}}});
|
||||
locations.push({lat:{{- $location.Latitude -}},lng:{{- $location.Longitude -}},count:Math.min(100, {{- $weight -}})});
|
||||
{{- end}}
|
||||
|
||||
function drawHeatMap() {
|
||||
if (locations.length == 0) {
|
||||
return;
|
||||
}
|
||||
var mapBounds = new google.maps.LatLngBounds();
|
||||
var map = new google.maps.Map(document.getElementById('map'), {
|
||||
zoom: 1,
|
||||
mapTypeId: google.maps.MapTypeId.ROADMAP
|
||||
});
|
||||
var heatmap = new google.maps.visualization.HeatmapLayer({
|
||||
var testData = {
|
||||
data: locations
|
||||
};
|
||||
|
||||
var baseLayer = L.tileLayer(
|
||||
'https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png',{
|
||||
attribution: '...',
|
||||
maxZoom: 18
|
||||
}
|
||||
);
|
||||
var cfg = {
|
||||
"radius": 1,
|
||||
"minOpacity": .25,
|
||||
"maxOpacity": .8,
|
||||
"scaleRadius": true,
|
||||
"useLocalExtrema": true,
|
||||
latField: 'lat',
|
||||
lngField: 'lng',
|
||||
valueField: 'count',
|
||||
gradient: {
|
||||
'.1': 'cyan',
|
||||
'.8': 'blue',
|
||||
'.95': 'red'
|
||||
}
|
||||
};
|
||||
var heatmapLayer = new HeatmapOverlay(cfg);
|
||||
|
||||
var map = new L.Map('map', {
|
||||
center: new L.LatLng(25, 0),
|
||||
zoom: 1,
|
||||
layers: [baseLayer, heatmapLayer]
|
||||
});
|
||||
heatmap.set('radius', 10);
|
||||
heatmap.set('maxIntensity', 20);
|
||||
heatmap.set('gradient', [
|
||||
'rgba(0, 255, 255, 0)',
|
||||
'rgba(0, 255, 255, 1)',
|
||||
'rgba(0, 191, 255, 1)',
|
||||
'rgba(0, 127, 255, 1)',
|
||||
'rgba(0, 63, 255, 1)',
|
||||
'rgba(0, 0, 255, 1)',
|
||||
'rgba(0, 0, 223, 1)',
|
||||
'rgba(0, 0, 191, 1)',
|
||||
'rgba(0, 0, 159, 1)',
|
||||
'rgba(0, 0, 127, 1)',
|
||||
'rgba(63, 0, 91, 1)',
|
||||
'rgba(127, 0, 63, 1)',
|
||||
'rgba(191, 0, 31, 1)',
|
||||
'rgba(255, 0, 0, 1)'
|
||||
]);
|
||||
heatmap.setMap(map);
|
||||
for (var x = 0; x < locations.length; x++) {
|
||||
mapBounds.extend(locations[x].location);
|
||||
}
|
||||
map.fitBounds(mapBounds);
|
||||
if (locations.length == 1) {
|
||||
map.setZoom(13);
|
||||
}
|
||||
heatmapLayer.setData(testData);
|
||||
}
|
||||
</script>
|
||||
</head>
|
||||
@@ -296,7 +301,7 @@ found in the LICENSE file.
|
||||
{{if .locations}}
|
||||
<div class="img-thumbnail" id="map" style="width: 1130px; height: 400px; padding: 10px;"></div>
|
||||
<p class="text-muted">
|
||||
Heatmap max intensity is capped at 20 reports within a location.
|
||||
Heatmap max intensity is capped at 100 reports within a location.
|
||||
</p>
|
||||
<div class="panel panel-default">
|
||||
<div class="panel-heading">
|
||||
@@ -651,6 +656,7 @@ found in the LICENSE file.
|
||||
</p>
|
||||
<script type="text/javascript">
|
||||
$('[data-toggle="tooltip"]').tooltip({html:true});
|
||||
drawHeatMap();
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[syncthing]
|
||||
title=Syncthing
|
||||
description=Syncthing file synchronisation
|
||||
ports=22000/tcp|21027/udp
|
||||
ports=22000|21027/udp
|
||||
|
||||
[syncthing-gui]
|
||||
title=Syncthing-GUI
|
||||
|
||||
@@ -38,13 +38,14 @@ syncthing_group=${syncthing_group:-$syncthing_user}
|
||||
|
||||
command=/usr/local/bin/syncthing
|
||||
pidfile=/var/run/syncthing.pid
|
||||
syncthing_flags="${syncthing_home:+-home=${syncthing_home}} ${syncthing_log_file:+-logfile=${syncthing_log_file}}"
|
||||
syncthing_cmd=serve
|
||||
syncthing_flags="${syncthing_home:+--home=${syncthing_home}} ${syncthing_log_file:+--logfile=${syncthing_log_file}}"
|
||||
|
||||
syncthing_start() {
|
||||
echo "Starting syncthing"
|
||||
touch ${pidfile} && chown ${syncthing_user} ${pidfile}
|
||||
touch ${syncthing_log_file} && chown ${syncthing_user} ${syncthing_log_file}
|
||||
/usr/sbin/daemon -cf -p ${pidfile} -u ${syncthing_user} ${command} ${syncthing_flags}
|
||||
/usr/sbin/daemon -cf -p ${pidfile} -u ${syncthing_user} ${command} ${syncthing_cmd} ${syncthing_flags}
|
||||
}
|
||||
|
||||
syncthing_cleanup() {
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
Name=Start Syncthing
|
||||
GenericName=File synchronization
|
||||
Comment=Starts the main syncthing process in the background.
|
||||
Exec=/usr/bin/syncthing -no-browser
|
||||
Exec=/usr/bin/syncthing serve --no-browser --logfile=default
|
||||
Icon=syncthing
|
||||
Terminal=false
|
||||
Type=Application
|
||||
|
||||
3
etc/linux-runit/run
Normal file → Executable file
@@ -5,5 +5,4 @@ export HOME="/home/$USERNAME"
|
||||
export SYNCTHING="$HOME/bin/syncthing"
|
||||
|
||||
exec 2>&1
|
||||
exec chpst -u "$USERNAME" "$SYNCTHING" -logflags 0
|
||||
|
||||
exec chpst -u "$USERNAME" "$SYNCTHING" serve --logflags 0
|
||||
|
||||
3
etc/linux-sysctl/30-syncthing.conf
Normal file
@@ -0,0 +1,3 @@
|
||||
# Increase maximum receive socket buffer size to 2MiB for QUIC connections
|
||||
# see https://github.com/lucas-clemente/quic-go/wiki/UDP-Receive-Buffer-Size
|
||||
net.core.rmem_max = 2097152
|
||||
21
etc/linux-sysctl/README.md
Normal file
@@ -0,0 +1,21 @@
|
||||
sysctl configuration to raise UDP buffer size
|
||||
===================
|
||||
Installation
|
||||
-----------
|
||||
**Please note:** When you installed syncthing using the official deb package, you can skip the copying.
|
||||
|
||||
Copy the file `30-syncthing.conf` to `/etc/sysctl.d/` (root permissions required).
|
||||
|
||||
In a terminal run
|
||||
```
|
||||
sudo sysctl -q --system
|
||||
```
|
||||
to apply the sysctl changes.
|
||||
|
||||
|
||||
Verification
|
||||
----------
|
||||
You can verify that the new limit is active using
|
||||
```
|
||||
sysctl net.core.rmem_max
|
||||
```
|
||||