mirror of
https://github.com/syncthing/syncthing.git
synced 2025-12-24 06:28:10 -05:00
Compare commits
805 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8bbf2ba9ac | ||
|
|
49c56b8caa | ||
|
|
0b8aa7974b | ||
|
|
cbec697e5f | ||
|
|
8991ecf444 | ||
|
|
17887ce0b1 | ||
|
|
2f88dafa56 | ||
|
|
2321d0db08 | ||
|
|
7047e00d1e | ||
|
|
a8533b269d | ||
|
|
ee30647bf7 | ||
|
|
750accb25f | ||
|
|
0530f0edbf | ||
|
|
784129e1cf | ||
|
|
38f2b34d29 | ||
|
|
882b711958 | ||
|
|
99595ce3d9 | ||
|
|
d5e4ef249f | ||
|
|
65cfefaa3c | ||
|
|
a6c2a5a0ce | ||
|
|
a5b6199507 | ||
|
|
966e9f9a68 | ||
|
|
b10e11abb7 | ||
|
|
5a50de1154 | ||
|
|
dc6d695c00 | ||
|
|
b120278b3a | ||
|
|
c25f5b40d9 | ||
|
|
abdac2caa2 | ||
|
|
5f1e27bb7f | ||
|
|
a85b4ab263 | ||
|
|
cf962a4fe8 | ||
|
|
39c0bfa05a | ||
|
|
458d6cff2a | ||
|
|
ded881c372 | ||
|
|
fb4209e382 | ||
|
|
473ca68dc4 | ||
|
|
c4e69cd66c | ||
|
|
634a3d0e3b | ||
|
|
09f4d865ae | ||
|
|
ad0044fec8 | ||
|
|
d157d12037 | ||
|
|
f9d68474ac | ||
|
|
f0126fe1ab | ||
|
|
940536a964 | ||
|
|
f378c4f191 | ||
|
|
358d2143e2 | ||
|
|
622ade40ad | ||
|
|
323405ea07 | ||
|
|
88c226ef87 | ||
|
|
d4b668caac | ||
|
|
05738001ac | ||
|
|
d40d9d5698 | ||
|
|
648c71cbd1 | ||
|
|
ab0eb909a2 | ||
|
|
d16c0652f7 | ||
|
|
0dae06deb3 | ||
|
|
663106ef6e | ||
|
|
0b2b8baf19 | ||
|
|
24275b4584 | ||
|
|
2a8362d7af | ||
|
|
8cca9dfef5 | ||
|
|
6aa04118a6 | ||
|
|
1b32e9f858 | ||
|
|
a523fef78e | ||
|
|
ce2a68622c | ||
|
|
a29605750d | ||
|
|
5e384c9185 | ||
|
|
a1cc293c21 | ||
|
|
452daad14b | ||
|
|
fbdaa265d3 | ||
|
|
46b375e8cc | ||
|
|
1e9bf17512 | ||
|
|
413c8cf4ea | ||
|
|
d8296ce111 | ||
|
|
06a1635d1d | ||
|
|
36221b70ac | ||
|
|
bf1e418e4a | ||
|
|
922946683d | ||
|
|
abb921acbc | ||
|
|
816354e66b | ||
|
|
8228020ff0 | ||
|
|
c96f76e1fd | ||
|
|
d3f50637d2 | ||
|
|
ed588ce335 | ||
|
|
34d91b228d | ||
|
|
fb6a35c98c | ||
|
|
87bf09ea40 | ||
|
|
a13bb926b7 | ||
|
|
7a402409f1 | ||
|
|
c791dba392 | ||
|
|
a0c80e030a | ||
|
|
b6bb67b142 | ||
|
|
1306d86a62 | ||
|
|
57443804ba | ||
|
|
2839baf0de | ||
|
|
7e848a150b | ||
|
|
81bdde79ea | ||
|
|
a206366d10 | ||
|
|
1e652de5af | ||
|
|
ad986f372d | ||
|
|
0935886045 | ||
|
|
fd0a6225aa | ||
|
|
b39985483d | ||
|
|
f38df0dadb | ||
|
|
361f7ae564 | ||
|
|
1cd2f5a91f | ||
|
|
dab68cf841 | ||
|
|
d51fe3fb3f | ||
|
|
5baf5fedb5 | ||
|
|
3f2742a275 | ||
|
|
0e23002414 | ||
|
|
641941562f | ||
|
|
698346edc3 | ||
|
|
39d3424e34 | ||
|
|
6cac308bcd | ||
|
|
8065cf7e97 | ||
|
|
6e768a8387 | ||
|
|
7d3c390c91 | ||
|
|
3e99ddfbf0 | ||
|
|
43f0e5c91d | ||
|
|
c3902f9887 | ||
|
|
33e3643aed | ||
|
|
78be7225fb | ||
|
|
6afaa9f20c | ||
|
|
152388b3a3 | ||
|
|
053425695a | ||
|
|
5d917e1306 | ||
|
|
837ffcfab5 | ||
|
|
62d4261f62 | ||
|
|
6dedffe3f7 | ||
|
|
b10d106a55 | ||
|
|
75eeae0ee7 | ||
|
|
5fd6278609 | ||
|
|
eb81f7400c | ||
|
|
06273875ae | ||
|
|
4d4bfe8032 | ||
|
|
28c660e41d | ||
|
|
d6a90f0022 | ||
|
|
63de838f27 | ||
|
|
20dea04aa2 | ||
|
|
f5aa604efc | ||
|
|
4eef43c62d | ||
|
|
d2f132f37f | ||
|
|
ecdb970468 | ||
|
|
b32e43fbb3 | ||
|
|
373859be83 | ||
|
|
209e68c1ba | ||
|
|
cc54488e55 | ||
|
|
c7b4fd5784 | ||
|
|
5c69761bc8 | ||
|
|
de7d62cc1b | ||
|
|
5977868165 | ||
|
|
585fb3f49b | ||
|
|
8e3f1190d1 | ||
|
|
a3c724f2c3 | ||
|
|
f13d65262a | ||
|
|
f2be9d1166 | ||
|
|
dde275c6cc | ||
|
|
212258d213 | ||
|
|
966db0d076 | ||
|
|
6686810943 | ||
|
|
6baa93e13f | ||
|
|
0c8b22c696 | ||
|
|
81d8fa1cb5 | ||
|
|
79f8bd0f33 | ||
|
|
5958f42294 | ||
|
|
8b4bd43306 | ||
|
|
755d21953f | ||
|
|
388e4db9cd | ||
|
|
f3835122bb | ||
|
|
5130c414da | ||
|
|
7bdb5faa9c | ||
|
|
7e26f74f38 | ||
|
|
0bdd0d595b | ||
|
|
80ec4acb53 | ||
|
|
a2c5d901f2 | ||
|
|
1b575b4461 | ||
|
|
c6a319d98b | ||
|
|
9f4d23cacf | ||
|
|
adce6fa473 | ||
|
|
34a5f087c8 | ||
|
|
a6dba7c6d6 | ||
|
|
2eabc79d4c | ||
|
|
9c2428c2ee | ||
|
|
9b390f1264 | ||
|
|
c06a169f5f | ||
|
|
4335285a64 | ||
|
|
34c05bee6d | ||
|
|
7cb8af9029 | ||
|
|
8facaf5a6a | ||
|
|
abea3d7552 | ||
|
|
13d545c9f2 | ||
|
|
3a6ebb8482 | ||
|
|
e071f16531 | ||
|
|
920f2ce1e6 | ||
|
|
2e135d0c65 | ||
|
|
0e79b532cf | ||
|
|
f7e30a9f10 | ||
|
|
b140b4a994 | ||
|
|
6fd7cd808c | ||
|
|
02224be83f | ||
|
|
ebe45b3965 | ||
|
|
486bb2da0e | ||
|
|
68aa00539e | ||
|
|
52a883d34e | ||
|
|
5ac122b85f | ||
|
|
de2d7c33a3 | ||
|
|
2ca8a5ac61 | ||
|
|
e3078cc531 | ||
|
|
5495d0f8ab | ||
|
|
388b21d9db | ||
|
|
a162e8d9f9 | ||
|
|
235422c26d | ||
|
|
2bcaa17fc3 | ||
|
|
97291c9184 | ||
|
|
520ca4bcb0 | ||
|
|
f35fb974d0 | ||
|
|
f8c51d801a | ||
|
|
16c0c2f7a7 | ||
|
|
2145b3701d | ||
|
|
ce0ded7c78 | ||
|
|
31a78592e8 | ||
|
|
334a78f185 | ||
|
|
233d3e7f7b | ||
|
|
41a429b52c | ||
|
|
d00a30069a | ||
|
|
49488c0e71 | ||
|
|
4031568cdf | ||
|
|
fff9bf98eb | ||
|
|
6a7fc49c6b | ||
|
|
89f5d0d400 | ||
|
|
1eda82b95f | ||
|
|
623ec03dad | ||
|
|
c0de42e3df | ||
|
|
4893513800 | ||
|
|
100142067d | ||
|
|
3907cb0693 | ||
|
|
61dffabf97 | ||
|
|
bc27aa12cd | ||
|
|
0537b9546f | ||
|
|
0525c755f4 | ||
|
|
bcd91f536e | ||
|
|
f9c6c69fa8 | ||
|
|
0c46e0a9cc | ||
|
|
bca6d31b95 | ||
|
|
db72579f0e | ||
|
|
9b09bcc5f1 | ||
|
|
22e12904c9 | ||
|
|
b947056e62 | ||
|
|
e30898ddb3 | ||
|
|
072fa46bfd | ||
|
|
edc3a77b98 | ||
|
|
2b80848341 | ||
|
|
30fa462e33 | ||
|
|
11ac945b87 | ||
|
|
55c513b827 | ||
|
|
0eca0ac45a | ||
|
|
4be867c560 | ||
|
|
44b11ec257 | ||
|
|
53926a1ae6 | ||
|
|
5f383923df | ||
|
|
26eaedc491 | ||
|
|
7b63254a35 | ||
|
|
d0fd6c6c82 | ||
|
|
6862dd04ab | ||
|
|
e1b1631c65 | ||
|
|
1d74b547dd | ||
|
|
a3a4da6e3e | ||
|
|
e974c13c7a | ||
|
|
1999383443 | ||
|
|
bd0acd04b1 | ||
|
|
f25947e5eb | ||
|
|
f890fe6fd3 | ||
|
|
10f9d95cd2 | ||
|
|
013c757a84 | ||
|
|
ffa46c2461 | ||
|
|
48fd9d05b5 | ||
|
|
0b1f792290 | ||
|
|
c25fcf0001 | ||
|
|
ba2c79f310 | ||
|
|
c396124bc9 | ||
|
|
d35d7d2360 | ||
|
|
2738735321 | ||
|
|
1c74944cca | ||
|
|
412616bb96 | ||
|
|
518d5174e6 | ||
|
|
2656c1b8e1 | ||
|
|
635085d139 | ||
|
|
d1e81a0acf | ||
|
|
6094b95784 | ||
|
|
d37a5b03f1 | ||
|
|
1794d45ff3 | ||
|
|
7b0fbb6fef | ||
|
|
80d4bc1cea | ||
|
|
8763fb05ec | ||
|
|
73e2e2a794 | ||
|
|
479712cdc5 | ||
|
|
9efac0f067 | ||
|
|
a0fd619df3 | ||
|
|
1af87577e1 | ||
|
|
99f96c5cb7 | ||
|
|
4329ba316f | ||
|
|
a78aedec7e | ||
|
|
f0d5dc5696 | ||
|
|
e40289ce7a | ||
|
|
b6d1e16b4e | ||
|
|
e700ef3208 | ||
|
|
45d145a281 | ||
|
|
b2996eee87 | ||
|
|
21d04b895a | ||
|
|
40bb52fdd8 | ||
|
|
1242ac74ab | ||
|
|
3bfd41c48b | ||
|
|
4fb7e04686 | ||
|
|
dc0dbed96e | ||
|
|
0cba3154f0 | ||
|
|
fec476cc80 | ||
|
|
bc3d306dd7 | ||
|
|
5237337626 | ||
|
|
368094e15d | ||
|
|
0f93e76e80 | ||
|
|
083fa1803a | ||
|
|
c00c6b3957 | ||
|
|
cc39341eb9 | ||
|
|
bf7f82f7b2 | ||
|
|
eb857dbc45 | ||
|
|
e7620e951d | ||
|
|
286a25ae49 | ||
|
|
ae70046b49 | ||
|
|
c366933416 | ||
|
|
6a9716e8a1 | ||
|
|
b84ee4d240 | ||
|
|
8a1e54d58a | ||
|
|
3e032c4da6 | ||
|
|
7c3b267645 | ||
|
|
7161a99b04 | ||
|
|
1754c93370 | ||
|
|
4b750b6dc3 | ||
|
|
bf89bffb0b | ||
|
|
e2288fe441 | ||
|
|
8265dac127 | ||
|
|
100870e142 | ||
|
|
12fb7f2a0a | ||
|
|
f1bf4d899a | ||
|
|
591e4d8af1 | ||
|
|
dec6f80d2b | ||
|
|
ec8a748514 | ||
|
|
db15e52743 | ||
|
|
41bfb7a330 | ||
|
|
1c2e96a5ca | ||
|
|
28ff033da6 | ||
|
|
807a6b1022 | ||
|
|
296cc1bca2 | ||
|
|
951b058952 | ||
|
|
8f8e8a9285 | ||
|
|
46082f194c | ||
|
|
cb607e8551 | ||
|
|
3e3954eb38 | ||
|
|
517667c590 | ||
|
|
36c044562c | ||
|
|
f760ef15b0 | ||
|
|
e557ba82e7 | ||
|
|
7c292cc812 | ||
|
|
c94b797f00 | ||
|
|
4e513b8393 | ||
|
|
708a5c2070 | ||
|
|
92eaf52c21 | ||
|
|
b75d083035 | ||
|
|
b0460079c1 | ||
|
|
e20d4e192d | ||
|
|
8d8f331a4a | ||
|
|
793035de61 | ||
|
|
198028d627 | ||
|
|
c5ec6cd7ef | ||
|
|
73c5184518 | ||
|
|
f96c211198 | ||
|
|
6d2489a562 | ||
|
|
fa05a1ba8c | ||
|
|
a4489dec30 | ||
|
|
30e5243f5e | ||
|
|
06998b3484 | ||
|
|
3c66d93aba | ||
|
|
a5792f3c42 | ||
|
|
721cd740d8 | ||
|
|
de719ac409 | ||
|
|
0ffa7f3f57 | ||
|
|
4868d347db | ||
|
|
7fa141ea39 | ||
|
|
13196ddd92 | ||
|
|
eafb40460d | ||
|
|
4e2a9bb139 | ||
|
|
3b2239357f | ||
|
|
7be1f0a71c | ||
|
|
96f5a11fd5 | ||
|
|
7501bee430 | ||
|
|
445c5f13c3 | ||
|
|
ed98039aa5 | ||
|
|
2816780b52 | ||
|
|
c76bd7dcc4 | ||
|
|
48796a1b60 | ||
|
|
5711bacd83 | ||
|
|
70a840d3d5 | ||
|
|
37df662325 | ||
|
|
ca908270ec | ||
|
|
d47745a86b | ||
|
|
8c94ce8d14 | ||
|
|
0fe72e6fc5 | ||
|
|
c025e76f30 | ||
|
|
e1bf1e672e | ||
|
|
a5bbb500e6 | ||
|
|
a615f868a5 | ||
|
|
db302b15ea | ||
|
|
952f3ffb0c | ||
|
|
fe77fac23f | ||
|
|
e61091d240 | ||
|
|
50aacdf1f0 | ||
|
|
e56e8b7aa1 | ||
|
|
6d0816e85a | ||
|
|
abdf024517 | ||
|
|
5a1f6cb813 | ||
|
|
37d0ba1660 | ||
|
|
734c2fc870 | ||
|
|
490ec4350c | ||
|
|
0836439256 | ||
|
|
b197b698a4 | ||
|
|
67b18569cf | ||
|
|
dc0dd09e93 | ||
|
|
7ec76095e6 | ||
|
|
cb26552440 | ||
|
|
1ae5ac7d0b | ||
|
|
eeb7091180 | ||
|
|
dc38e6ae88 | ||
|
|
eb6cad7f93 | ||
|
|
ae30b46bfe | ||
|
|
eab268f5f8 | ||
|
|
1e21042138 | ||
|
|
a56f70ab94 | ||
|
|
11c57b9097 | ||
|
|
1921533c4c | ||
|
|
89e762fd6e | ||
|
|
a63d3ee625 | ||
|
|
82741ad207 | ||
|
|
bd363fe0b7 | ||
|
|
7a4c6d262f | ||
|
|
445a82f120 | ||
|
|
69ce121267 | ||
|
|
08e3cd1cce | ||
|
|
da0e5edbec | ||
|
|
c78fa42f31 | ||
|
|
993a3ebe73 | ||
|
|
8040502599 | ||
|
|
c84e8d1e09 | ||
|
|
5fb72eed85 | ||
|
|
400d62c1e6 | ||
|
|
857caf3637 | ||
|
|
aeca1fb575 | ||
|
|
ac2988a485 | ||
|
|
cb5ef250f4 | ||
|
|
23a0e18292 | ||
|
|
aa6c55dec1 | ||
|
|
7e0c24ec89 | ||
|
|
2c7d9b59c6 | ||
|
|
adb7763f87 | ||
|
|
89740490ac | ||
|
|
0b290f7206 | ||
|
|
1e7a3997e3 | ||
|
|
e7f8538e4d | ||
|
|
18a608a6ff | ||
|
|
1a22689328 | ||
|
|
ce65aea0ab | ||
|
|
45edad867c | ||
|
|
ea0a408849 | ||
|
|
18592af993 | ||
|
|
b1e0e7b923 | ||
|
|
1e212a8dc6 | ||
|
|
0e9d2a13af | ||
|
|
6494a9332d | ||
|
|
41baccb85d | ||
|
|
52eb7392c4 | ||
|
|
855c53ad02 | ||
|
|
004eded398 | ||
|
|
df48276300 | ||
|
|
accaf23aa3 | ||
|
|
de6fe4dc6f | ||
|
|
08f6a91441 | ||
|
|
95c9561e97 | ||
|
|
fcb19518c7 | ||
|
|
fbaf696821 | ||
|
|
abe3483a8f | ||
|
|
22e09334ec | ||
|
|
58592e3ef1 | ||
|
|
0126188ba7 | ||
|
|
5bdb6798a9 | ||
|
|
ab2729ab79 | ||
|
|
58e81fdffb | ||
|
|
0619a27872 | ||
|
|
0e52ce830a | ||
|
|
97437cad64 | ||
|
|
5b90a98650 | ||
|
|
96dae7bfec | ||
|
|
93a02c677e | ||
|
|
0d054f9b64 | ||
|
|
1107f6eb5f | ||
|
|
3650364017 | ||
|
|
086508f51a | ||
|
|
4ace451013 | ||
|
|
c9ea773a22 | ||
|
|
0f4ae7636d | ||
|
|
87d3a8363b | ||
|
|
c494ced21f | ||
|
|
a8e2fc6f61 | ||
|
|
aca1b45e93 | ||
|
|
5cb2a10138 | ||
|
|
411796606c | ||
|
|
1a9b54c9fa | ||
|
|
c7f4f15272 | ||
|
|
713527facf | ||
|
|
6e662dc9fc | ||
|
|
eb178caf3a | ||
|
|
adf3f641ce | ||
|
|
6157c766de | ||
|
|
745cd4744a | ||
|
|
faf15b4567 | ||
|
|
3746c899b7 | ||
|
|
7bbca12ff8 | ||
|
|
3967b39a17 | ||
|
|
2b2d24fe20 | ||
|
|
f4e112f404 | ||
|
|
87a0eecc31 | ||
|
|
f09dcb98eb | ||
|
|
f90870b99f | ||
|
|
75b58eb480 | ||
|
|
ed9cb923fb | ||
|
|
dd39556759 | ||
|
|
d5141c6d51 | ||
|
|
5675644341 | ||
|
|
1f30383866 | ||
|
|
40531ef247 | ||
|
|
366700dc36 | ||
|
|
f23fd683a9 | ||
|
|
755ed6f69f | ||
|
|
66662cd678 | ||
|
|
ec86db176e | ||
|
|
60e8630413 | ||
|
|
9d29dbbe5d | ||
|
|
8734fa65fc | ||
|
|
c53e5c5f17 | ||
|
|
74823e81e9 | ||
|
|
ef4b8a2cf8 | ||
|
|
54e27f551d | ||
|
|
59bdcdabba | ||
|
|
f6375ecbfc | ||
|
|
031b91c0ed | ||
|
|
e4c995a321 | ||
|
|
130d14cec9 | ||
|
|
e893ca1c9a | ||
|
|
156d96e582 | ||
|
|
9ba7611537 | ||
|
|
15d2dc3a4f | ||
|
|
f6df1a760d | ||
|
|
f71fcd440a | ||
|
|
c2bb11a794 | ||
|
|
1a00ea7c6e | ||
|
|
ec0a66c75b | ||
|
|
1658afc883 | ||
|
|
67aaeef537 | ||
|
|
13679284ac | ||
|
|
a514b65d81 | ||
|
|
7931af1078 | ||
|
|
4fc3446f24 | ||
|
|
fb4fdaf4c0 | ||
|
|
8e38ecdeb2 | ||
|
|
04623718ce | ||
|
|
9e857ed2d4 | ||
|
|
f30f9c50f8 | ||
|
|
96ba5c2b23 | ||
|
|
0dcd9794d4 | ||
|
|
34f0feb13a | ||
|
|
7778f50b50 | ||
|
|
fb2d85b9d5 | ||
|
|
7f0d4f6ba8 | ||
|
|
33212716cf | ||
|
|
707001c403 | ||
|
|
f93c6fbe4a | ||
|
|
70b9654671 | ||
|
|
0d7a77ba85 | ||
|
|
924b96856f | ||
|
|
f7929229c8 | ||
|
|
6b25eb2e79 | ||
|
|
bc08a951f1 | ||
|
|
a87c5515bd | ||
|
|
ebcd22b02b | ||
|
|
4b02b7e6f1 | ||
|
|
fdd823d2cb | ||
|
|
8ef504f745 | ||
|
|
960e850a78 | ||
|
|
ea701a4e9e | ||
|
|
6c573a5762 | ||
|
|
3ac858b150 | ||
|
|
f4372710bf | ||
|
|
f39477bbd5 | ||
|
|
6e5514419d | ||
|
|
f014b7b919 | ||
|
|
81484699db | ||
|
|
6d93d9c488 | ||
|
|
0930bccf88 | ||
|
|
e321bd3941 | ||
|
|
4b02937862 | ||
|
|
21e6849f2d | ||
|
|
39c2d1bc1a | ||
|
|
cd21b8dfa5 | ||
|
|
40fbdc87ce | ||
|
|
1814f4693d | ||
|
|
3f2b584c4e | ||
|
|
e0dd737822 | ||
|
|
d2d4fcc1df | ||
|
|
273ee09925 | ||
|
|
bb886868d2 | ||
|
|
f80ee472c2 | ||
|
|
a12ede3bbe | ||
|
|
97a8777d03 | ||
|
|
8a4c00d82e | ||
|
|
31f859e909 | ||
|
|
4d979a1ce9 | ||
|
|
4465cdf8bc | ||
|
|
3938b61c3f | ||
|
|
cdef503db6 | ||
|
|
df08984a58 | ||
|
|
cf838c71f7 | ||
|
|
9a001051d6 | ||
|
|
5548a8eb7a | ||
|
|
727df34aa1 | ||
|
|
9587a523b3 | ||
|
|
22e44642a0 | ||
|
|
c00520281b | ||
|
|
587c89d979 | ||
|
|
310fba4c12 | ||
|
|
c1d06d9501 | ||
|
|
4735575e8d | ||
|
|
767e1c6f58 | ||
|
|
83fcb49894 | ||
|
|
5fd88481b6 | ||
|
|
69c63e94bf | ||
|
|
3d91f7c975 | ||
|
|
7945430e64 | ||
|
|
e2120c4728 | ||
|
|
56b5352f64 | ||
|
|
55d5e03639 | ||
|
|
cca17f5306 | ||
|
|
ffcaffa32f | ||
|
|
0ffd80f380 | ||
|
|
25151b14e7 | ||
|
|
428c5c02ce | ||
|
|
fff8805ff6 | ||
|
|
a69afc9eeb | ||
|
|
0bf9645f2f | ||
|
|
6bfad8fce8 | ||
|
|
6ebab5db07 | ||
|
|
34aa89a7d6 | ||
|
|
44e4f754dd | ||
|
|
1ed0116147 | ||
|
|
c5663689a3 | ||
|
|
9caaaa49b6 | ||
|
|
c18dc6a629 | ||
|
|
b1fbd87680 | ||
|
|
be630691b1 | ||
|
|
aa1c274231 | ||
|
|
78c2844e3f | ||
|
|
57a7f4391f | ||
|
|
0970aed596 | ||
|
|
342ade501a | ||
|
|
327604719a | ||
|
|
e8ef586cef | ||
|
|
c014fc5ec5 | ||
|
|
fb078068b4 | ||
|
|
b2c9e7b07b | ||
|
|
d117b4b570 | ||
|
|
80fc238bec | ||
|
|
7e4e2f3720 | ||
|
|
5e9c7bc022 | ||
|
|
55afa625fc | ||
|
|
f2e9b40ad1 | ||
|
|
6697b8fde3 | ||
|
|
4f20c900d0 | ||
|
|
0471daf771 | ||
|
|
5788ad2529 | ||
|
|
472affb6a3 | ||
|
|
777a30e870 | ||
|
|
aea66ff25a | ||
|
|
9dd319f4da | ||
|
|
8b4a1f52d0 | ||
|
|
7a9f317ee1 | ||
|
|
48efea99e5 | ||
|
|
fbef856a97 | ||
|
|
3b7c760ffd | ||
|
|
ae776ff8df | ||
|
|
24a637e9e6 | ||
|
|
f53475a204 | ||
|
|
3fbc51cd38 | ||
|
|
5f1aba9a37 | ||
|
|
95f7a26bce | ||
|
|
6e1828ff63 | ||
|
|
89c53c508b | ||
|
|
9918cb4ffc | ||
|
|
7b61f800c3 | ||
|
|
33d47063e8 | ||
|
|
e67b91977d | ||
|
|
ac603e9228 | ||
|
|
632b6f8fbd | ||
|
|
b247ef2632 | ||
|
|
5a6d79a66e | ||
|
|
1e33cc9720 | ||
|
|
ee465c0890 | ||
|
|
236816cb93 | ||
|
|
3ee9bc097f | ||
|
|
f205ce14c1 | ||
|
|
8199e0a7a9 | ||
|
|
f18fc40436 | ||
|
|
f509c65509 | ||
|
|
bf062db83f | ||
|
|
9907523321 | ||
|
|
915faabe25 | ||
|
|
a9b6801b22 | ||
|
|
d76f1fe356 | ||
|
|
3bdceb1d6b | ||
|
|
cce8b60515 | ||
|
|
06eacb87d8 | ||
|
|
865f1f2ea6 | ||
|
|
f59a26cb80 | ||
|
|
d384ac52a1 | ||
|
|
42fa03aa4a | ||
|
|
dae1f990a5 | ||
|
|
33398b0b6b | ||
|
|
192e843989 | ||
|
|
8851f4571c | ||
|
|
7fed8334b9 | ||
|
|
6af2657d7e | ||
|
|
933a57af7a | ||
|
|
e5b85ff1a0 | ||
|
|
c382aa04e4 | ||
|
|
86141c1eef | ||
|
|
d67c9b66d3 | ||
|
|
6158b20a8c | ||
|
|
485a234263 | ||
|
|
7226a87c6d | ||
|
|
7aca2bcbc0 | ||
|
|
3840d57f8d | ||
|
|
b7f3425f36 | ||
|
|
9fe0c30299 | ||
|
|
9d126760fa | ||
|
|
09673ba0c6 | ||
|
|
bc7d71ee3d | ||
|
|
78a31449aa | ||
|
|
0361d303f2 | ||
|
|
2fe94736e6 | ||
|
|
9589f25e57 | ||
|
|
15c2556e06 | ||
|
|
f342a2a54b | ||
|
|
598dc991e8 | ||
|
|
7d59dc6c18 | ||
|
|
05f01a5d94 | ||
|
|
03c6fb2f82 | ||
|
|
7a657bf3c7 | ||
|
|
02d14c7e9b | ||
|
|
32c849e18c | ||
|
|
c27fe5694d | ||
|
|
b0ebf56283 | ||
|
|
fb33a8ad6b | ||
|
|
9207562545 | ||
|
|
230eb20a34 | ||
|
|
e0d9542bfe | ||
|
|
063951cffa | ||
|
|
dae0872379 | ||
|
|
123f4a6d9d | ||
|
|
65679892b0 | ||
|
|
c4979cf6d6 | ||
|
|
09d498bb80 | ||
|
|
90ed7aa121 | ||
|
|
206c0d0933 | ||
|
|
81c539c516 | ||
|
|
75b3b31d11 | ||
|
|
90c39de0cc | ||
|
|
241864b43c | ||
|
|
b2d839d4e7 | ||
|
|
8bdf409d07 | ||
|
|
08da982de5 | ||
|
|
f250425ef1 | ||
|
|
aebcc495f9 | ||
|
|
eaffbc0f92 | ||
|
|
37f2f2cdf6 | ||
|
|
82f5706350 | ||
|
|
ad2902b3ee | ||
|
|
ce2c6c01da | ||
|
|
fb4c7d288c | ||
|
|
838b2a6a34 | ||
|
|
fd0de9eb6c | ||
|
|
6b89991ba8 | ||
|
|
4727570870 | ||
|
|
ebf0541385 | ||
|
|
af3d380b6a | ||
|
|
cf3de8cf35 | ||
|
|
61b4de581d | ||
|
|
031fd72dfd | ||
|
|
1d249a877e | ||
|
|
3eb6045dee |
15
.codecov.yml
15
.codecov.yml
@@ -1,5 +1,20 @@
|
||||
comment: false
|
||||
|
||||
coverage:
|
||||
range: "40...100"
|
||||
precision: 1
|
||||
status:
|
||||
patch:
|
||||
default:
|
||||
informational: true
|
||||
project:
|
||||
default:
|
||||
informational: true
|
||||
|
||||
github_checks:
|
||||
annotations: false
|
||||
|
||||
ignore:
|
||||
- "**.pb.go"
|
||||
- "**_mocked.go"
|
||||
- "**/mocks/*"
|
||||
|
||||
12
.deepsource.toml
Normal file
12
.deepsource.toml
Normal file
@@ -0,0 +1,12 @@
|
||||
version = 1
|
||||
|
||||
exclude_patterns = ["**/*.pb.go"]
|
||||
test_patterns = ["**/*_test.go"]
|
||||
|
||||
[[analyzers]]
|
||||
name = "go"
|
||||
enabled = true
|
||||
|
||||
[analyzers.meta]
|
||||
import_paths = ["github.com/syncthing/syncthing"]
|
||||
build_tags = ["noassets"]
|
||||
13
.github/dependabot.yml
vendored
Normal file
13
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
open-pull-requests-limit: 10
|
||||
382
.github/workflows/build-syncthing.yaml
vendored
Normal file
382
.github/workflows/build-syncthing.yaml
vendored
Normal file
@@ -0,0 +1,382 @@
|
||||
name: Build Syncthing
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
|
||||
env:
|
||||
# The go version to use for builds.
|
||||
GO_VERSION: "1.19.6"
|
||||
|
||||
# Optimize compatibility on the slow archictures.
|
||||
GO386: softfloat
|
||||
GOARM: "5"
|
||||
GOMIPS: softfloat
|
||||
|
||||
# Avoid hilarious amounts of obscuring log output when running tests.
|
||||
LOGGER_DISCARD: "1"
|
||||
|
||||
# Our build metadata
|
||||
BUILD_USER: builder
|
||||
BUILD_HOST: github.syncthing.net
|
||||
|
||||
# A note on actions and third party code... The actions under actions/ (like
|
||||
# `uses: actions/checkout`) are maintained by GitHub, and we need to trust
|
||||
# GitHub to maintain their code and infrastructure or we're in deep shit in
|
||||
# general. The same doesn't necessarily apply to other actions authors, so
|
||||
# some care needs to be taken when adding steps, especially in the paths
|
||||
# that lead up to code being packaged and signed.
|
||||
|
||||
jobs:
|
||||
|
||||
#
|
||||
# Tests for all platforms. Runs a matrix build on Windows, Linux and Mac,
|
||||
# with the list of expected supported Go versions (current, previous).
|
||||
#
|
||||
|
||||
build-test:
|
||||
name: Build and test
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
runner: ["windows-latest", "ubuntu-latest", "macos-latest"]
|
||||
# The oldest version in this list should match what we have in our go.mod.
|
||||
# Variables don't seem to be supported here, or we could have done something nice.
|
||||
go: ["1.19"] # Skip Go 1.20 for now, https://github.com/syncthing/syncthing/issues/8799
|
||||
runs-on: ${{ matrix.runner }}
|
||||
steps:
|
||||
- name: Set git to use LF
|
||||
if: matrix.runner == 'windows-latest'
|
||||
# Without this, the Windows checkout will happen with CRLF line
|
||||
# endings, which is fine for the source code but messes up tests
|
||||
# that depend on data on disk being as expected. Ideally, those
|
||||
# tests should be fixed, but not today.
|
||||
run: |
|
||||
git config --global core.autocrlf false
|
||||
git config --global core.eol lf
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.go }}
|
||||
cache: true
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
go run build.go
|
||||
|
||||
- name: Test
|
||||
# Our Windows tests currently don't work on Go 1.20
|
||||
# https://github.com/syncthing/syncthing/issues/8779
|
||||
# https://github.com/syncthing/syncthing/issues/8778
|
||||
if: "!(matrix.go == '1.20' && matrix.runner == 'windows-latest')"
|
||||
run: |
|
||||
go run build.go test
|
||||
|
||||
#
|
||||
# Meta checks for formatting, copyright, etc
|
||||
#
|
||||
|
||||
correctness:
|
||||
name: Check correctness
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
|
||||
- name: Check correctness
|
||||
run: |
|
||||
go test -v ./meta
|
||||
|
||||
#
|
||||
# Windows
|
||||
#
|
||||
|
||||
package-windows:
|
||||
name: Package for Windows
|
||||
if: github.event_name == 'push' && github.ref == 'refs/heads/release'
|
||||
environment: signing
|
||||
needs:
|
||||
- build-test
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- name: Set git to use LF
|
||||
# Without this, the checkout will happen with CRLF line endings,
|
||||
# which is fine for the source code but messes up tests that depend
|
||||
# on data on disk being as expected. Ideally, those tests should be
|
||||
# fixed, but not today.
|
||||
run: |
|
||||
git config --global core.autocrlf false
|
||||
git config --global core.eol lf
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~\AppData\Local\go-build
|
||||
~\go\pkg\mod
|
||||
key: ${{ runner.os }}-go-${{ env.GO_VERSION }}-package-${{ hashFiles('**/go.sum') }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
go install github.com/josephspurrier/goversioninfo/cmd/goversioninfo@v1.4.0
|
||||
|
||||
- name: Create packages
|
||||
run: |
|
||||
go run build.go -goarch amd64 zip
|
||||
go run build.go -goarch arm zip
|
||||
go run build.go -goarch arm64 zip
|
||||
go run build.go -goarch 386 zip
|
||||
env:
|
||||
CGO_ENABLED: "0"
|
||||
CODESIGN_SIGNTOOL: ${{ secrets.CODESIGN_SIGNTOOL }}
|
||||
CODESIGN_CERTIFICATE_BASE64: ${{ secrets.CODESIGN_CERTIFICATE_BASE64 }}
|
||||
CODESIGN_CERTIFICATE_PASSWORD: ${{ secrets.CODESIGN_CERTIFICATE_PASSWORD }}
|
||||
CODESIGN_TIMESTAMP_SERVER: ${{ secrets.CODESIGN_TIMESTAMP_SERVER }}
|
||||
|
||||
- name: Archive artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: packages
|
||||
path: syncthing-windows-*.zip
|
||||
|
||||
#
|
||||
# Linux
|
||||
#
|
||||
|
||||
package-linux:
|
||||
name: Package for Linux
|
||||
needs:
|
||||
- build-test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ env.GO_VERSION }}-package-${{ hashFiles('**/go.sum') }}
|
||||
|
||||
- name: Create packages
|
||||
run: |
|
||||
archs=$(go tool dist list | grep linux | sed 's#linux/##')
|
||||
for goarch in $archs ; do
|
||||
go run build.go -goarch "$goarch" tar
|
||||
done
|
||||
env:
|
||||
CGO_ENABLED: "0"
|
||||
|
||||
- name: Archive artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: packages
|
||||
path: syncthing-linux-*.tar.gz
|
||||
|
||||
#
|
||||
# macOS
|
||||
#
|
||||
|
||||
package-macos:
|
||||
name: Package for macOS
|
||||
if: github.event_name == 'push' && github.ref == 'refs/heads/release'
|
||||
environment: signing
|
||||
needs:
|
||||
- build-test
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ env.GO_VERSION }}-package-${{ hashFiles('**/go.sum') }}
|
||||
|
||||
- name: Import signing certificate
|
||||
run: |
|
||||
# Set up a run-specific keychain, making it available for the
|
||||
# `codesign` tool.
|
||||
umask 066
|
||||
KEYCHAIN_PATH=$RUNNER_TEMP/codesign.keychain
|
||||
KEYCHAIN_PASSWORD=$(uuidgen)
|
||||
security create-keychain -p "$KEYCHAIN_PASSWORD" "$KEYCHAIN_PATH"
|
||||
security default-keychain -s "$KEYCHAIN_PATH"
|
||||
security unlock-keychain -p "$KEYCHAIN_PASSWORD" "$KEYCHAIN_PATH"
|
||||
security set-keychain-settings -lut 21600 "$KEYCHAIN_PATH"
|
||||
|
||||
# Import the certificate
|
||||
CERTIFICATE_PATH=$RUNNER_TEMP/codesign.p12
|
||||
echo "$DEVELOPER_ID_CERTIFICATE_BASE64" | base64 -d -o "$CERTIFICATE_PATH"
|
||||
security import "$CERTIFICATE_PATH" -k "$KEYCHAIN_PATH" -P "$DEVELOPER_ID_CERTIFICATE_PASSWORD" -T /usr/bin/codesign -T /usr/bin/productsign
|
||||
security set-key-partition-list -S apple-tool:,apple: -s -k actions "$KEYCHAIN_PATH"
|
||||
|
||||
# Set the codesign identity for following steps
|
||||
echo "CODESIGN_IDENTITY=$CODESIGN_IDENTITY" >> $GITHUB_ENV
|
||||
env:
|
||||
DEVELOPER_ID_CERTIFICATE_BASE64: ${{ secrets.DEVELOPER_ID_CERTIFICATE_BASE64 }}
|
||||
DEVELOPER_ID_CERTIFICATE_PASSWORD: ${{ secrets.DEVELOPER_ID_CERTIFICATE_PASSWORD }}
|
||||
CODESIGN_IDENTITY: ${{ secrets.CODESIGN_IDENTITY }}
|
||||
|
||||
- name: Create package (amd64)
|
||||
run: |
|
||||
go run build.go -goarch amd64 zip
|
||||
env:
|
||||
CGO_ENABLED: "1"
|
||||
|
||||
- name: Create package (arm64 cross)
|
||||
run: |
|
||||
cat <<EOT > xgo.sh
|
||||
#!/bin/bash
|
||||
CGO_ENABLED=1 \
|
||||
CGO_CFLAGS="-target arm64-apple-macos10.15" \
|
||||
CGO_LDFLAGS="-target arm64-apple-macos10.15" \
|
||||
go "\$@"
|
||||
EOT
|
||||
chmod 755 xgo.sh
|
||||
go run build.go -gocmd ./xgo.sh -goarch arm64 zip
|
||||
env:
|
||||
CGO_ENABLED: "1"
|
||||
|
||||
- name: Create package (universal)
|
||||
run: |
|
||||
rm -rf _tmp
|
||||
mkdir _tmp
|
||||
pushd _tmp
|
||||
|
||||
unzip ../syncthing-macos-amd64-*.zip
|
||||
unzip ../syncthing-macos-arm64-*.zip
|
||||
lipo -create syncthing-macos-amd64-*/syncthing syncthing-macos-arm64-*/syncthing -o syncthing
|
||||
|
||||
amd64=(syncthing-macos-amd64-*)
|
||||
universal="${amd64/amd64/universal}"
|
||||
mv "$amd64" "$universal"
|
||||
mv syncthing "$universal"
|
||||
zip -r "../$universal.zip" "$universal"
|
||||
|
||||
- name: Archive artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: packages
|
||||
path: syncthing-*.zip
|
||||
|
||||
#
|
||||
# Cross compile other unixes
|
||||
#
|
||||
|
||||
package-cross:
|
||||
name: Package cross compiled
|
||||
needs:
|
||||
- build-test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ env.GO_VERSION }}-cross-${{ hashFiles('**/go.sum') }}
|
||||
|
||||
- name: Create packages
|
||||
run: |
|
||||
platforms=$(go tool dist list \
|
||||
| grep -v aix/ppc64 \
|
||||
| grep -v android/ \
|
||||
| grep -v darwin/ \
|
||||
| grep -v ios/ \
|
||||
| grep -v js/ \
|
||||
| grep -v linux/ \
|
||||
| grep -v nacl/ \
|
||||
| grep -v openbsd/arm\$ \
|
||||
| grep -v openbsd/arm64 \
|
||||
| grep -v openbsd/mips \
|
||||
| grep -v plan9/ \
|
||||
| grep -v windows/ \
|
||||
)
|
||||
|
||||
for plat in $platforms; do
|
||||
goos="${plat%/*}"
|
||||
goarch="${plat#*/}"
|
||||
go run build.go -goos "$goos" -goarch "$goarch" tar
|
||||
done
|
||||
env:
|
||||
CGO_ENABLED: "0"
|
||||
|
||||
- name: Archive artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: packages
|
||||
path: syncthing-*.tar.gz
|
||||
|
||||
#
|
||||
# Source
|
||||
#
|
||||
|
||||
package-source:
|
||||
name: Package source code
|
||||
needs:
|
||||
- build-test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
|
||||
- name: Package source
|
||||
run: |
|
||||
version=$(go run build.go version)
|
||||
echo "$version" > RELEASE
|
||||
|
||||
go mod vendor
|
||||
go run build.go assets
|
||||
|
||||
cd ..
|
||||
|
||||
tar c -z -f "syncthing-source-$version.tar.gz" \
|
||||
--exclude .git \
|
||||
syncthing
|
||||
|
||||
mv "syncthing-source-$version.tar.gz" syncthing
|
||||
|
||||
- name: Archive artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: packages
|
||||
path: syncthing-source-*.tar.gz
|
||||
28
.github/workflows/update-docs-translations.yaml
vendored
Normal file
28
.github/workflows/update-docs-translations.yaml
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
name: Update translations and documentation
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '42 3 * * 1'
|
||||
|
||||
jobs:
|
||||
|
||||
update_transifex_docs:
|
||||
runs-on: ubuntu-latest
|
||||
name: Update translations and documentation
|
||||
steps:
|
||||
- uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.ACTIONS_GITHUB_TOKEN }}
|
||||
- uses: actions/setup-go@268d8c0ca0432bb2cf416faae41297df9d262d7f # v2
|
||||
with:
|
||||
go-version: ^1.18.4
|
||||
- run: |
|
||||
set -euo pipefail
|
||||
git config --global user.name 'Syncthing Release Automation'
|
||||
git config --global user.email 'release@syncthing.net'
|
||||
bash build.sh translate
|
||||
bash build.sh prerelease
|
||||
git push
|
||||
env:
|
||||
WEBLATE_TOKEN: ${{ secrets.WEBLATE_TOKEN }}
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -18,3 +18,5 @@ deb
|
||||
*.bz2
|
||||
/repos
|
||||
/proto/scripts/protoc-gen-gosyncthing
|
||||
/gui/next-gen-gui
|
||||
.idea
|
||||
|
||||
4
.yamlfmt
Normal file
4
.yamlfmt
Normal file
@@ -0,0 +1,4 @@
|
||||
line_ending: lf
|
||||
formatter:
|
||||
type: basic
|
||||
retain_line_breaks: true
|
||||
56
AUTHORS
56
AUTHORS
@@ -18,15 +18,19 @@ Adam Piggott (ProactiveServices) <aD@simplypeachy.co.uk> <simplypeachy@users.nor
|
||||
Adel Qalieh (adelq) <aqalieh95@gmail.com> <adelq@users.noreply.github.com>
|
||||
Alan Pope <alan@popey.com>
|
||||
Alberto Donato <albertodonato@users.noreply.github.com>
|
||||
Aleksey Vasenev <margtu-fivt@ya.ru>
|
||||
Alessandro G. (alessandro.g89) <alessandro.g89@gmail.com>
|
||||
Alex Lindeman <139387+aelindeman@users.noreply.github.com>
|
||||
Alex Xu <alex.hello71@gmail.com>
|
||||
Alexander Graf (alex2108) <register-github@alex-graf.de>
|
||||
Alexandre Alves <alexandrealvesdb.contact@gmail.com>
|
||||
Alexandre Viau (aviau) <alexandre@alexandreviau.net> <aviau@debian.org>
|
||||
Aman Gupta <aman@tmm1.net>
|
||||
Anderson Mesquita (andersonvom) <andersonvom@gmail.com>
|
||||
Andreas Sommer <andreas.sommer87@googlemail.com>
|
||||
andresvia <andres.via@gmail.com>
|
||||
Andrew Dunham (andrew-d) <andrew@du.nham.ca>
|
||||
Andrew Meyer <andrewm.bpi@gmail.com>
|
||||
Andrew Rabert (nvllsvm) <ar@nullsum.net> <6550543+nvllsvm@users.noreply.github.com>
|
||||
Andrey D (scienmind) <scintertech@cryptolab.net> <scienmind@users.noreply.github.com>
|
||||
André Colomb (acolomb) <src@andre.colomb.de> <github.com@andre.colomb.de>
|
||||
@@ -34,8 +38,10 @@ andyleap <andyleap@gmail.com>
|
||||
Anjan Momi <anjan@momi.ca>
|
||||
Antoine Lamielle (0x010C) <antoine.lamielle@0x010c.fr> <gh@0x010c.fr>
|
||||
Antony Male (canton7) <antony.male@gmail.com>
|
||||
Anur <anurnomeru@163.com>
|
||||
Aranjedeath <Aranjedeath@users.noreply.github.com>
|
||||
Arkadiusz Tymiński <gevleeog@gmail.com>
|
||||
Aroun <login@b-vo.fr>
|
||||
Arthur Axel fREW Schmidt (frioux) <frew@afoolishmanifesto.com> <frioux@gmail.com>
|
||||
Artur Zubilewicz <AkaZecik@users.noreply.github.com>
|
||||
Audrius Butkevicius (AudriusButkevicius) <audrius.butkevicius@gmail.com> <github@audrius.rocks>
|
||||
@@ -48,6 +54,7 @@ Ben Shepherd (benshep) <bjashepherd@gmail.com>
|
||||
Ben Sidhom (bsidhom) <bsidhom@gmail.com>
|
||||
Benedikt Heine (bebehei) <bebe@bebehei.de>
|
||||
Benedikt Morbach <benedikt.morbach@googlemail.com>
|
||||
Benjamin Nater <17193640+bn4t@users.noreply.github.com>
|
||||
Benno Fünfstück <benno.fuenfstueck@gmail.com>
|
||||
Benny Ng (tpng) <benny.tpng@gmail.com>
|
||||
boomsquared <54829195+boomsquared@users.noreply.github.com>
|
||||
@@ -56,11 +63,13 @@ Boris Rybalkin <ribalkin@gmail.com>
|
||||
Brandon Philips (philips) <brandon@ifup.org>
|
||||
Brendan Long (brendanlong) <self@brendanlong.com>
|
||||
Brian R. Becker (brbecker) <brbecker@gmail.com>
|
||||
bt90 <btom1990@googlemail.com>
|
||||
Caleb Callaway (cqcallaw) <enlightened.despot@gmail.com>
|
||||
Carsten Hagemann (carstenhag) <moter8@gmail.com> <carsten@chagemann.de>
|
||||
Cathryne Linenweaver (Cathryne) <cathryne.linenweaver@gmail.com> <Cathryne@users.noreply.github.com> <katrinleinweber@MAC.local>
|
||||
Cedric Staniewski (xduugu) <cedric@gmx.ca>
|
||||
chenrui <rui@meetup.com>
|
||||
Chih-Hsuan Yen <yan12125@gmail.com>
|
||||
Choongkyu <choongkyu.kim+gh@gmail.com> <vapidlyrapid+gh@gmail.com>
|
||||
Chris Howie (cdhowie) <me@chrishowie.com>
|
||||
Chris Joel (cdata) <chris@scriptolo.gy>
|
||||
@@ -69,28 +78,36 @@ Christian Prescott <me@christianprescott.com>
|
||||
chucic <chucic@seznam.cz>
|
||||
Colin Kennedy (moshen) <moshen.colin@gmail.com>
|
||||
Cromefire_ <tim.l@nghorst.net> <26320625+cromefire@users.noreply.github.com>
|
||||
cui fliter <imcusg@gmail.com>
|
||||
Cyprien Devillez <cypx@users.noreply.github.com>
|
||||
Dale Visser <dale.visser@live.com>
|
||||
Dan <benda.daniel@gmail.com>
|
||||
Daniel Barczyk <46358936+DanielBarczyk@users.noreply.github.com>
|
||||
Daniel Bergmann (brgmnn) <dan.arne.bergmann@gmail.com> <brgmnn@users.noreply.github.com>
|
||||
Daniel Harte (norgeous) <daniel@harte.me> <daniel@danielharte.co.uk> <norgeous@users.noreply.github.com>
|
||||
Daniel Martí (mvdan) <mvdan@mvdan.cc>
|
||||
Darshil Chanpura (dtchanpura) <dtchanpura@gmail.com> <dcprime314@gmail.com>
|
||||
David Rimmer (dinosore) <dinosore@dbrsoftware.co.uk>
|
||||
deepsource-autofix[bot] <62050782+deepsource-autofix[bot]@users.noreply.github.com>
|
||||
Denis A. (dva) <denisva@gmail.com>
|
||||
Dennis Wilson (snnd) <dw@risu.io>
|
||||
dependabot-preview[bot] <dependabot-preview[bot]@users.noreply.github.com> <27856297+dependabot-preview[bot]@users.noreply.github.com>
|
||||
dependabot[bot] <dependabot[bot]@users.noreply.github.com>
|
||||
dependabot[bot] <dependabot[bot]@users.noreply.github.com> <49699333+dependabot[bot]@users.noreply.github.com>
|
||||
derekriemer <derek.riemer@colorado.edu>
|
||||
desbma <desbma@users.noreply.github.com>
|
||||
Devon G. Redekopp <devon@redekopp.com>
|
||||
Dmitry Saveliev (dsaveliev) <d.e.saveliev@gmail.com>
|
||||
Domenic Horner <domenic@tgxn.net>
|
||||
Dominik Heidler (asdil12) <dominik@heidler.eu>
|
||||
Elias Jarlebring (jarlebring) <jarlebring@gmail.com>
|
||||
Elliot Huffman <thelich2@gmail.com>
|
||||
Emil Hessman (ceh) <emil@hessman.se>
|
||||
Eng Zer Jun <engzerjun@gmail.com>
|
||||
entity0xfe <109791748+entity0xfe@users.noreply.github.com> <entity0xfe@my.domain>
|
||||
Eric Lesiuta <elesiuta@gmail.com>
|
||||
Eric P <eric@kastelo.net>
|
||||
Erik Meitner (WSGCSysadmin) <e.meitner@willystreet.coop>
|
||||
Evan Spensley <94762716+0evan@users.noreply.github.com>
|
||||
Evgeny Kuznetsov <evgeny@kuznetsov.md>
|
||||
Federico Castagnini (facastagnini) <federico.castagnini@gmail.com>
|
||||
Felix Ableitner (Nutomic) <me@nutomic.com>
|
||||
@@ -98,12 +115,14 @@ Felix Lampe <mail@flampe.de>
|
||||
Felix Unterpaintner (bigbear2nd) <bigbear2nd@gmail.com>
|
||||
Francois-Xavier Gsell (zukoo) <fxgsell@gmail.com>
|
||||
Frank Isemann (fti7) <frank@isemann.name>
|
||||
Gahl Saraf <saraf.gahl@gmail.com> <gahl@raftt.io>
|
||||
georgespatton <georgespatton@users.noreply.github.com>
|
||||
ghjklw <malo@jaffre.info>
|
||||
Gilli Sigurdsson (gillisig) <gilli@vx.is>
|
||||
Gleb Sinyavskiy <zhulik.gleb@gmail.com>
|
||||
Graham Miln (grahammiln) <graham.miln@dssw.co.uk> <graham.miln@miln.eu>
|
||||
greatroar <61184462+greatroar@users.noreply.github.com>
|
||||
Greg <gco@jazzhaiku.com>
|
||||
Han Boetes <han@boetes.org>
|
||||
HansK-p <42314815+HansK-p@users.noreply.github.com>
|
||||
Harrison Jones (harrisonhjones) <harrisonhjones@users.noreply.github.com>
|
||||
@@ -111,6 +130,8 @@ Heiko Zuerker (Smiley73) <heiko@zuerker.org>
|
||||
Hugo Locurcio <hugo.locurcio@hugo.pro>
|
||||
Iain Barnett <iainspeed@gmail.com>
|
||||
Ian Johnson (anonymouse64) <ian.johnson@canonical.com> <person.uwsome@gmail.com>
|
||||
ignacy123 <ignacy.buczek@onet.pl>
|
||||
Ikko Ashimine <eltociear@gmail.com>
|
||||
Ilya Brin <464157+ilyabrin@users.noreply.github.com>
|
||||
Iskander Sharipov (Alex) <quasilyte@gmail.com>
|
||||
Jaakko Hannikainen (jgke) <jgke@jgke.fi>
|
||||
@@ -119,12 +140,16 @@ Jack Croft <jccroft1@users.noreply.github.com>
|
||||
Jacob <jyundt@gmail.com>
|
||||
Jake Peterson (acogdev) <jake@acogdev.com>
|
||||
Jakob Borg (calmh) <jakob@nym.se> <jakob@kastelo.net>
|
||||
James O'Beirne <wild-github@au92.org>
|
||||
James Patterson (jpjp) <jamespatterson@operamail.com> <jpjp@users.noreply.github.com>
|
||||
janost <janost@tuta.io>
|
||||
Jaroslav Lichtblau <svetlemodry@users.noreply.github.com>
|
||||
Jaroslav Malec (dzarda) <dzardacz@gmail.com>
|
||||
jaseg <githubaccount@jaseg.net>
|
||||
Jauder Ho <jauderho@users.noreply.github.com>
|
||||
Jaya Chithra (jayachithra) <s.k.jayachithra@gmail.com>
|
||||
Jaya Kumar <jaya.kumar@ict.nl>
|
||||
Jeffery To <jeffery.to@gmail.com>
|
||||
jelle van der Waa <jelle@vdwaa.nl>
|
||||
Jens Diemer (jedie) <github.com@jensdiemer.de> <git@jensdiemer.de>
|
||||
Jerry Jacobs (xor-gate) <jerry.jacobs@xor-gate.org> <xor-gate@users.noreply.github.com>
|
||||
@@ -134,13 +159,16 @@ Johan Andersson <j@i19.se>
|
||||
Johan Vromans (sciurius) <jvromans@squirrel.nl>
|
||||
John Rinehart (fuzzybear3965) <johnrichardrinehart@gmail.com>
|
||||
Jonas Thelemann <e-mail@jonas-thelemann.de>
|
||||
Jonathan <artback@protonmail.com>
|
||||
Jonathan <artback@protonmail.com> <jonagn@gmail.com>
|
||||
Jonathan Cross <jcross@gmail.com>
|
||||
Jonta <359397+Jonta@users.noreply.github.com>
|
||||
Jose Manuel Delicado (jmdaweb) <jmdaweb@hotmail.com> <jmdaweb@users.noreply.github.com>
|
||||
jtagcat <git-514635f7@jtag.cat> <git-12dbd862@jtag.cat>
|
||||
Jörg Thalheim <Mic92@users.noreply.github.com>
|
||||
Jędrzej Kula <kula.jedrek@gmail.com>
|
||||
Kalle Laine <pahakalle@protonmail.com>
|
||||
Karol Różycki (krozycki) <rozycki.karol@gmail.com>
|
||||
Kebin Liu <lkebin@gmail.com>
|
||||
Keith Turner <kturner@apache.org>
|
||||
Kelong Cong (kc1212) <kc04bc@gmx.com> <kc1212@users.noreply.github.com>
|
||||
Ken'ichi Kamada (kamadak) <kamada@nanohz.org>
|
||||
@@ -150,13 +178,16 @@ Kevin White, Jr. (kwhite17) <kevinwhite1710@gmail.com>
|
||||
klemens <ka7@github.com>
|
||||
Kurt Fitzner (Kudalufi) <kurt@va1der.ca> <kurt.fitzner@gmail.com>
|
||||
Lars K.W. Gohlke (lkwg82) <lkwg82@gmx.de>
|
||||
Lars Lehtonen <lars.lehtonen@gmail.com>
|
||||
Laurent Arnoud <laurent@spkdev.net>
|
||||
Laurent Etiemble (letiemble) <laurent.etiemble@gmail.com> <laurent.etiemble@monobjc.net>
|
||||
Leo Arias (elopio) <yo@elopio.net>
|
||||
Liu Siyuan (liusy182) <liusy182@gmail.com> <liusy182@hotmail.com>
|
||||
Lode Hoste (Zillode) <zillode@zillode.be>
|
||||
Lord Landon Agahnim (LordLandon) <lordlandon@gmail.com>
|
||||
LSmithx2 <42276854+lsmithx2@users.noreply.github.com>
|
||||
Lukas Lihotzki <lukas@lihotzki.de>
|
||||
luzpaz <luzpaz@users.noreply.github.com>
|
||||
Majed Abdulaziz (majedev) <majed.alhajry@gmail.com>
|
||||
Marc Laporte (marclaporte) <marc@marclaporte.com> <marc@laporte.name>
|
||||
Marc Pujol (kilburn) <kilburn@la3.org>
|
||||
@@ -165,6 +196,7 @@ marco-m <marco.molteni@laposte.net>
|
||||
Marcus Legendre <marcus.legendre@gmail.com>
|
||||
Mario Majila <mariustshipichik@gmail.com>
|
||||
Mark Pulford (mpx) <mark@kyne.com.au>
|
||||
Martchus <martchus@gmx.net>
|
||||
Mateusz Naściszewski (mateon1) <matin1111@wp.pl>
|
||||
Mateusz Ż <thedead4fun@live.com>
|
||||
Matic Potočnik <hairyfotr@gmail.com>
|
||||
@@ -172,20 +204,25 @@ Matt Burke (burkemw3) <mburke@amplify.com> <burkemw3@gmail.com>
|
||||
Matt Robenolt <matt@ydekproductions.com>
|
||||
Matteo Ruina <matteo.ruina@gmail.com>
|
||||
Maurizio Tomasi <ziotom78@gmail.com>
|
||||
Max <github@germancoding.com>
|
||||
Max Schulze (kralo) <max.schulze@online.de> <kralo@users.noreply.github.com>
|
||||
MaximAL <almaximal@ya.ru>
|
||||
Maxime Thirouin <m@moox.io>
|
||||
mclang <1721600+mclang@users.noreply.github.com>
|
||||
Michael Jephcote (Rewt0r) <rewt0r@gmx.com> <Rewt0r@users.noreply.github.com>
|
||||
Michael Ploujnikov (plouj) <ploujj@gmail.com>
|
||||
Michael Rienstra <mrienstra@gmail.com>
|
||||
Michael Tilli (pyfisch) <pyfisch@gmail.com>
|
||||
MichaIng <micha@dietpi.com>
|
||||
Mike Boone <mike@boonedocks.net>
|
||||
MikeLund <MikeLund@users.noreply.github.com>
|
||||
MikolajTwarog <43782609+MikolajTwarog@users.noreply.github.com>
|
||||
Mingxuan Lin <gdlmx@users.noreply.github.com>
|
||||
mv1005 <49659413+mv1005@users.noreply.github.com>
|
||||
Nate Morrison (nrm21) <natemorrison@gmail.com>
|
||||
Naveen <172697+naveensrinivasan@users.noreply.github.com>
|
||||
Nicholas Rishel (PrototypeNM1) <rishel.nick@gmail.com> <PrototypeNM1@users.noreply.github.com>
|
||||
Nick Busey <NickBusey@users.noreply.github.com>
|
||||
Nico Stapelbroek <3368018+nstapelbroek@users.noreply.github.com>
|
||||
Nicolas Braud-Santoni <nicolas@braud-santoni.eu>
|
||||
Nicolas Perraut <n.perraut@gmail.com>
|
||||
@@ -197,6 +234,7 @@ NoLooseEnds <jon.koslung@gmail.com>
|
||||
Oliver Freyermuth <o.freyermuth@googlemail.com>
|
||||
otbutz <tbutz@optitool.de>
|
||||
Otiel <Otiel@users.noreply.github.com>
|
||||
overkill <22098433+0verk1ll@users.noreply.github.com>
|
||||
Oyebanji Jacob Mayowa <oyebanji05@gmail.com>
|
||||
Pablo <pbaeyens31+github@gmail.com>
|
||||
Pascal Jungblut (pascalj) <github@pascalj.com> <mail@pascal-jungblut.com>
|
||||
@@ -217,6 +255,7 @@ Piotr Bejda (piobpl) <piotrb10@gmail.com>
|
||||
Pramodh KP (pramodhkp) <pramodh.p@directi.com> <1507241+pramodhkp@users.noreply.github.com>
|
||||
Quentin Hibon <qh.public@yahoo.com>
|
||||
Rahmi Pruitt <rjpruitt16@gmail.com>
|
||||
red_led <red-led@users.noreply.github.com>
|
||||
Richard Hartmann <RichiH@users.noreply.github.com>
|
||||
Robert Carosi (nov1n) <robert@carosi.nl>
|
||||
Roberto Santalla <roobre@users.noreply.github.com>
|
||||
@@ -225,17 +264,22 @@ Roman Zaynetdinov (zaynetro) <romanznet@gmail.com>
|
||||
Ross Smith II (rasa) <ross@smithii.com>
|
||||
rubenbe <github-com-00ff86@vandamme.email>
|
||||
Ruslan Yevdokymov <38809160+ruslanye@users.noreply.github.com>
|
||||
Ryan Qian <i@bitbili.net>
|
||||
Ryan Sullivan (KayoticSully) <kayoticsully@gmail.com>
|
||||
Sacheendra Talluri (sacheendra) <sacheendra.t@gmail.com>
|
||||
Scott Klupfel (kluppy) <kluppy@going2blue.com>
|
||||
sec65 <106604020+sec65@users.noreply.github.com>
|
||||
Sergey Mishin (ralder) <ralder@yandex.ru>
|
||||
Shaarad Dalvi <60266155+shaaraddalvi@users.noreply.github.com>
|
||||
Shaarad Dalvi <60266155+shaaraddalvi@users.noreply.github.com> <shdalv@microsoft.com>
|
||||
Simon Frei (imsodin) <freisim93@gmail.com>
|
||||
Simon Mwepu <simonmwepu@gmail.com>
|
||||
Sly_tom_cat <slytomcat@mail.ru>
|
||||
Stefan Kuntz (Stefan-Code) <stefan.github@gmail.com> <Stefan.github@gmail.com>
|
||||
Stefan Tatschner (rumpelsepp) <stefan@sevenbyte.org> <rumpelsepp@sevenbyte.org> <stefan@rumpelsepp.org>
|
||||
Steven Eckhoff <steven.eckhoff.opensource@gmail.com>
|
||||
Suhas Gundimeda (snugghash) <suhas.gundimeda@gmail.com> <snugghash@gmail.com>
|
||||
Syncthing Automation <automation@syncthing.net>
|
||||
Syncthing Release Automation <release@syncthing.net>
|
||||
Taylor Khan (nelsonkhan) <nelsonkhan@gmail.com>
|
||||
Thomas Hipp <thomashipp@gmail.com>
|
||||
Tim Abell (timabell) <tim@timwise.co.uk>
|
||||
@@ -252,13 +296,17 @@ Tyler Kropp <kropptyler@gmail.com>
|
||||
Unrud (Unrud) <unrud@openaliasbox.org> <Unrud@users.noreply.github.com>
|
||||
Veeti Paananen (veeti) <veeti.paananen@rojekti.fi>
|
||||
Victor Buinsky (buinsky) <vix_booja@tut.by>
|
||||
Vik <63919734+ViktorOn@users.noreply.github.com>
|
||||
Vil Brekin (Vilbrekin) <vilbrekin@gmail.com>
|
||||
Vladimir Rusinov <vrusinov@google.com>
|
||||
villekalliomaki <53118179+villekalliomaki@users.noreply.github.com>
|
||||
Vladimir Rusinov <vrusinov@google.com> <vladimir.rusinov@gmail.com>
|
||||
wangguoliang <liangcszzu@163.com>
|
||||
William A. Kennington III (wkennington) <william@wkennington.com>
|
||||
wouter bolsterlee <wouter@bolsterl.ee>
|
||||
Wulf Weich (wweich) <wweich@users.noreply.github.com> <wweich@gmx.de> <wulf@weich-kr.de>
|
||||
xarx00 <xarx00@users.noreply.github.com>
|
||||
Xavier O. (damajor) <damajor@gmail.com>
|
||||
xjtdy888 (xjtdy888) <xjtdy888@163.com>
|
||||
Yannic A. (eipiminus1) <eipiminusone+github@gmail.com> <eipiminus1@users.noreply.github.com>
|
||||
佛跳墙 <daoquan@qq.com>
|
||||
落心 <luoxin.ttt@gmail.com>
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
## Reporting Bugs
|
||||
|
||||
Please file bugs in the [Github Issue
|
||||
Please file bugs in the [GitHub Issue
|
||||
Tracker](https://github.com/syncthing/syncthing/issues). Include at
|
||||
least the following:
|
||||
|
||||
@@ -24,10 +24,15 @@ too much information will never get you yelled at. :)
|
||||
## Contributing Translations
|
||||
|
||||
All translations are done via
|
||||
[Transifex](https://www.transifex.com/projects/p/syncthing/). If you
|
||||
wish to contribute to a translation, just head over there and sign up.
|
||||
[Weblate](https://hosted.weblate.org/projects/syncthing/). If you wish
|
||||
to contribute to a translation, just head over there and sign up.
|
||||
Before every release, the language resources are updated from the
|
||||
latest info on Transifex.
|
||||
latest info on Weblate.
|
||||
|
||||
Note that the previously used service at
|
||||
[Transifex](https://www.transifex.com/projects/p/syncthing/) is being
|
||||
retired and we kindly ask you to sign up on Weblate for continued
|
||||
involvement.
|
||||
|
||||
## Contributing Code
|
||||
|
||||
|
||||
@@ -11,11 +11,11 @@ RUN rm -f syncthing && go run build.go -no-upgrade build syncthing
|
||||
|
||||
FROM alpine
|
||||
|
||||
EXPOSE 8384 22000 21027/udp
|
||||
EXPOSE 8384 22000/tcp 22000/udp 21027/udp
|
||||
|
||||
VOLUME ["/var/syncthing"]
|
||||
|
||||
RUN apk add --no-cache ca-certificates su-exec tzdata
|
||||
RUN apk add --no-cache ca-certificates curl libcap su-exec tzdata
|
||||
|
||||
COPY --from=builder /src/syncthing /bin/syncthing
|
||||
COPY --from=builder /src/script/docker-entrypoint.sh /bin/entrypoint.sh
|
||||
@@ -23,7 +23,8 @@ COPY --from=builder /src/script/docker-entrypoint.sh /bin/entrypoint.sh
|
||||
ENV PUID=1000 PGID=1000 HOME=/var/syncthing
|
||||
|
||||
HEALTHCHECK --interval=1m --timeout=10s \
|
||||
CMD nc -z 127.0.0.1 8384 || exit 1
|
||||
CMD curl -fkLsS -m 2 127.0.0.1:8384/rest/noauth/health | grep -o --color=never OK || exit 1
|
||||
|
||||
ENV STGUIADDRESS=0.0.0.0:8384
|
||||
RUN chmod 755 /bin/entrypoint.sh
|
||||
ENTRYPOINT ["/bin/entrypoint.sh", "/bin/syncthing", "-home", "/var/syncthing/config"]
|
||||
|
||||
@@ -6,4 +6,4 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
locales rubygems ruby-dev build-essential git \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& gem install --no-ri --no-rdoc fpm
|
||||
&& gem install fpm
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
FROM alpine
|
||||
ARG TARGETARCH
|
||||
|
||||
EXPOSE 8384 22000 21027/udp
|
||||
EXPOSE 8384 22000/tcp 22000/udp 21027/udp
|
||||
|
||||
VOLUME ["/var/syncthing"]
|
||||
|
||||
RUN apk add --no-cache ca-certificates su-exec tzdata
|
||||
RUN apk add --no-cache ca-certificates curl libcap su-exec tzdata
|
||||
|
||||
COPY ./syncthing-linux-$TARGETARCH /bin/syncthing
|
||||
COPY ./script/docker-entrypoint.sh /bin/entrypoint.sh
|
||||
@@ -13,7 +13,7 @@ COPY ./script/docker-entrypoint.sh /bin/entrypoint.sh
|
||||
ENV PUID=1000 PGID=1000 HOME=/var/syncthing
|
||||
|
||||
HEALTHCHECK --interval=1m --timeout=10s \
|
||||
CMD nc -z 127.0.0.1 8384 || exit 1
|
||||
CMD curl -fkLsS -m 2 127.0.0.1:8384/rest/noauth/health | grep -o --color=never OK || exit 1
|
||||
|
||||
ENV STGUIADDRESS=0.0.0.0:8384
|
||||
ENTRYPOINT ["/bin/entrypoint.sh", "/bin/syncthing", "-home", "/var/syncthing/config"]
|
||||
|
||||
18
Dockerfile.stcrashreceiver
Normal file
18
Dockerfile.stcrashreceiver
Normal file
@@ -0,0 +1,18 @@
|
||||
ARG GOVERSION=latest
|
||||
FROM golang:$GOVERSION AS builder
|
||||
|
||||
WORKDIR /src
|
||||
COPY . .
|
||||
|
||||
ENV CGO_ENABLED=0
|
||||
ENV BUILD_HOST=syncthing.net
|
||||
ENV BUILD_USER=docker
|
||||
RUN rm -f stcrashreceiver && go run build.go build stcrashreceiver
|
||||
|
||||
FROM alpine
|
||||
|
||||
EXPOSE 8080
|
||||
|
||||
COPY --from=builder /src/stcrashreceiver /bin/stcrashreceiver
|
||||
|
||||
ENTRYPOINT [ "/bin/stcrashreceiver" ]
|
||||
26
Dockerfile.strelaypoolsrv
Normal file
26
Dockerfile.strelaypoolsrv
Normal file
@@ -0,0 +1,26 @@
|
||||
ARG GOVERSION=latest
|
||||
FROM golang:$GOVERSION AS builder
|
||||
|
||||
WORKDIR /src
|
||||
COPY . .
|
||||
|
||||
ENV CGO_ENABLED=0
|
||||
ENV BUILD_HOST=syncthing.net
|
||||
ENV BUILD_USER=docker
|
||||
RUN rm -f strelaysrv && go run build.go -no-upgrade build strelaypoolsrv
|
||||
|
||||
FROM alpine
|
||||
|
||||
EXPOSE 8080
|
||||
|
||||
RUN apk add --no-cache ca-certificates su-exec curl
|
||||
ENV PUID=1000 PGID=1000 MAXMIND_KEY=
|
||||
|
||||
RUN mkdir /var/strelaypoolsrv && chown 1000 /var/strelaypoolsrv
|
||||
USER 1000
|
||||
|
||||
COPY --from=builder /src/strelaypoolsrv /bin/strelaypoolsrv
|
||||
COPY --from=builder /src/script/strelaypoolsrv-entrypoint.sh /bin/entrypoint.sh
|
||||
|
||||
WORKDIR /var/strelaypoolsrv
|
||||
ENTRYPOINT ["/bin/entrypoint.sh", "/bin/strelaypoolsrv", "-listen", ":8080"]
|
||||
23
Dockerfile.stupgrades
Normal file
23
Dockerfile.stupgrades
Normal file
@@ -0,0 +1,23 @@
|
||||
ARG GOVERSION=latest
|
||||
FROM golang:$GOVERSION AS builder
|
||||
|
||||
WORKDIR /src
|
||||
COPY . .
|
||||
|
||||
ENV CGO_ENABLED=0
|
||||
ENV BUILD_HOST=syncthing.net
|
||||
ENV BUILD_USER=docker
|
||||
RUN rm -f stupgrades && go run build.go build stupgrades
|
||||
|
||||
FROM alpine
|
||||
|
||||
EXPOSE 8080
|
||||
|
||||
COPY --from=builder /src/stupgrades /bin/stupgrades
|
||||
|
||||
ENTRYPOINT [ \
|
||||
"/bin/stupgrades", \
|
||||
"-f", "/nightly.json->https://build.syncthing.net/guestAuth/repository/download/Release_Nightly/.lastSuccessful/nightly.json", \
|
||||
"-f", "/syncthing-macos/appcast.xml->https://build.syncthing.net/guestAuth/repository/download/SyncthingMacOS_CreateAppcastXml/.lastSuccessful/appcast.xml" \
|
||||
]
|
||||
|
||||
2
GOALS.md
2
GOALS.md
@@ -57,7 +57,7 @@ latest technology is not always available to any given individual.
|
||||
> Computers include desktops, laptops, servers, virtual machines, small
|
||||
> general purpose computers such as Raspberry Pis and, *where possible*,
|
||||
> tablets and phones. NAS appliances, toasters, cars, firearms, thermostats
|
||||
> and so on may include computing capabitilies but it is not our goal for
|
||||
> and so on may include computing capabilities but it is not our goal for
|
||||
> Syncthing to run smoothly on these devices.
|
||||
|
||||
### 6. For Individuals
|
||||
|
||||
@@ -7,25 +7,57 @@ Use the `/var/syncthing` volume to have the synchronized files available on the
|
||||
host. You can add more folders and map them as you prefer.
|
||||
|
||||
Note that Syncthing runs as UID 1000 and GID 1000 by default. These may be
|
||||
altered with the ``PUID`` and ``PGID`` environment variables.
|
||||
altered with the `PUID` and `PGID` environment variables. In addition
|
||||
the name of the Syncthing instance can be optionally defined by using
|
||||
`--hostname=syncthing` parameter.
|
||||
|
||||
To grant Syncthing additional capabilities without running as root, use the
|
||||
`PCAP` environment variable with the same syntax as that for `setcap(8)`.
|
||||
For example, `PCAP=cap_chown,cap_fowner+ep`.
|
||||
|
||||
## Example Usage
|
||||
|
||||
**Docker cli**
|
||||
```
|
||||
$ docker pull syncthing/syncthing
|
||||
$ docker run -p 8384:8384 -p 22000:22000 \
|
||||
$ docker run -p 8384:8384 -p 22000:22000/tcp -p 22000:22000/udp -p 21027:21027/udp \
|
||||
-v /wherever/st-sync:/var/syncthing \
|
||||
--hostname=my-syncthing \
|
||||
syncthing/syncthing:latest
|
||||
```
|
||||
|
||||
**Docker compose**
|
||||
```yml
|
||||
---
|
||||
version: "3"
|
||||
services:
|
||||
syncthing:
|
||||
image: syncthing/syncthing
|
||||
container_name: syncthing
|
||||
hostname: my-syncthing
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
volumes:
|
||||
- /wherever/st-sync:/var/syncthing
|
||||
ports:
|
||||
- 8384:8384 # Web UI
|
||||
- 22000:22000/tcp # TCP file transfers
|
||||
- 22000:22000/udp # QUIC file transfers
|
||||
- 21027:21027/udp # Receive local discovery broadcasts
|
||||
restart: unless-stopped
|
||||
```
|
||||
|
||||
## Discovery
|
||||
|
||||
Note that local device discovery will not work with the above command,
|
||||
resulting in poor local transfer rates if local device addresses are not
|
||||
manually configured.
|
||||
Note that Docker's default network mode prevents local IP addresses from
|
||||
being discovered, as Syncthing is only able to see the internal IP of the
|
||||
container on the `172.17.0.0/16` subnet. This will result in poor transfer rates
|
||||
if local device addresses are not manually configured.
|
||||
|
||||
To allow local discovery, the docker host network can be used instead:
|
||||
It is therefore advisable to use the [host network mode](https://docs.docker.com/network/host/) instead:
|
||||
|
||||
**Docker cli**
|
||||
```
|
||||
$ docker pull syncthing/syncthing
|
||||
$ docker run --network=host \
|
||||
@@ -33,6 +65,24 @@ $ docker run --network=host \
|
||||
syncthing/syncthing:latest
|
||||
```
|
||||
|
||||
**Docker compose**
|
||||
```yml
|
||||
---
|
||||
version: "3"
|
||||
services:
|
||||
syncthing:
|
||||
image: syncthing/syncthing
|
||||
container_name: syncthing
|
||||
hostname: my-syncthing
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
volumes:
|
||||
- /wherever/st-sync:/var/syncthing
|
||||
network_mode: host
|
||||
restart: unless-stopped
|
||||
```
|
||||
|
||||
Be aware that syncthing alone is now in control of what interfaces and ports it
|
||||
listens on. You can edit the syncthing configuration to change the defaults if
|
||||
there are conflicts.
|
||||
|
||||
17
README.md
17
README.md
@@ -73,15 +73,16 @@ This helps the team understand what are the biggest pain points for our users, a
|
||||
|
||||
## Getting in Touch
|
||||
|
||||
The first and best point of contact is the [Forum][8]. There is also an IRC
|
||||
channel, `#syncthing` on [freenode][4] (with a [web client][9]), for talking
|
||||
directly to developers and users. If you've found something that is clearly a
|
||||
The first and best point of contact is the [Forum][8].
|
||||
If you've found something that is clearly a
|
||||
bug, feel free to report it in the [GitHub issue tracker][10].
|
||||
|
||||
## Building
|
||||
|
||||
Building Syncthing from source is easy, and there's a [guide][5]
|
||||
that describes it for both Unix and Windows systems.
|
||||
Building Syncthing from source is easy. After extracting the source bundle from
|
||||
a release or checking out git, you just need to run `go run build.go` and the
|
||||
binaries are created in `./bin`. There's [a guide][5] with more details on the
|
||||
build process.
|
||||
|
||||
## Signed Releases
|
||||
|
||||
@@ -95,19 +96,17 @@ binaries are also properly code signed.
|
||||
|
||||
## Documentation
|
||||
|
||||
Please see the [Syncthing documentation site][6].
|
||||
Please see the Syncthing [documentation site][6] [[source]][17].
|
||||
|
||||
All code is licensed under the [MPLv2 License][7].
|
||||
|
||||
[1]: https://docs.syncthing.net/specs/bep-v1.html
|
||||
[2]: https://docs.syncthing.net/intro/getting-started.html
|
||||
[3]: https://github.com/syncthing/syncthing/blob/main/etc
|
||||
[4]: https://www.freenode.net/
|
||||
[5]: https://docs.syncthing.net/dev/building.html
|
||||
[6]: https://docs.syncthing.net/
|
||||
[7]: https://github.com/syncthing/syncthing/blob/main/LICENSE
|
||||
[8]: https://forum.syncthing.net/
|
||||
[9]: https://kiwiirc.com/client/irc.freenode.net/#syncthing
|
||||
[10]: https://github.com/syncthing/syncthing/issues
|
||||
[11]: https://docs.syncthing.net/users/contrib.html#gui-wrappers
|
||||
[12]: https://www.bountysource.com/teams/syncthing/issues
|
||||
@@ -115,4 +114,4 @@ All code is licensed under the [MPLv2 License][7].
|
||||
[14]: assets/logo-text-128.png
|
||||
[15]: https://syncthing.net/
|
||||
[16]: https://github.com/syncthing/syncthing/blob/main/README-Docker.md
|
||||
|
||||
[17]: https://github.com/syncthing/docs
|
||||
|
||||
346
build.go
346
build.go
@@ -4,6 +4,7 @@
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
//go:build ignore
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
@@ -14,13 +15,12 @@ import (
|
||||
"bytes"
|
||||
"compress/flate"
|
||||
"compress/gzip"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
@@ -30,27 +30,34 @@ import (
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
buildpkg "github.com/syncthing/syncthing/lib/build"
|
||||
)
|
||||
|
||||
var (
|
||||
goarch string
|
||||
goos string
|
||||
noupgrade bool
|
||||
version string
|
||||
goCmd string
|
||||
race bool
|
||||
debug = os.Getenv("BUILDDEBUG") != ""
|
||||
extraTags string
|
||||
installSuffix string
|
||||
pkgdir string
|
||||
cc string
|
||||
run string
|
||||
benchRun string
|
||||
debugBinary bool
|
||||
coverage bool
|
||||
timeout = "120s"
|
||||
numVersions = 5
|
||||
goarch string
|
||||
goos string
|
||||
noupgrade bool
|
||||
version string
|
||||
goCmd string
|
||||
race bool
|
||||
debug = os.Getenv("BUILDDEBUG") != ""
|
||||
extraTags string
|
||||
installSuffix string
|
||||
pkgdir string
|
||||
cc string
|
||||
run string
|
||||
benchRun string
|
||||
buildOut string
|
||||
debugBinary bool
|
||||
coverage bool
|
||||
long bool
|
||||
timeout = "120s"
|
||||
longTimeout = "600s"
|
||||
numVersions = 5
|
||||
withNextGenGUI = os.Getenv("BUILD_NEXT_GEN_GUI") != ""
|
||||
)
|
||||
|
||||
type target struct {
|
||||
@@ -58,12 +65,11 @@ type target struct {
|
||||
debname string
|
||||
debdeps []string
|
||||
debpre string
|
||||
debpost string
|
||||
description string
|
||||
buildPkgs []string
|
||||
binaryName string
|
||||
archiveFiles []archiveFile
|
||||
systemdServices []string
|
||||
systemdService string
|
||||
installationFiles []archiveFile
|
||||
tags []string
|
||||
}
|
||||
@@ -85,7 +91,6 @@ var targets = map[string]target{
|
||||
name: "syncthing",
|
||||
debname: "syncthing",
|
||||
debdeps: []string{"libc6", "procps"},
|
||||
debpost: "script/post-upgrade",
|
||||
description: "Open Source Continuous File Synchronization",
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/syncthing"},
|
||||
binaryName: "syncthing", // .exe will be added automatically for Windows builds
|
||||
@@ -96,6 +101,7 @@ var targets = map[string]target{
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0644},
|
||||
// All files from etc/ and extra/ added automatically in init().
|
||||
},
|
||||
systemdService: "syncthing@*.service",
|
||||
installationFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0755},
|
||||
{src: "README.md", dst: "deb/usr/share/doc/syncthing/README.txt", perm: 0644},
|
||||
@@ -114,6 +120,7 @@ var targets = map[string]target{
|
||||
{src: "etc/linux-systemd/system/syncthing@.service", dst: "deb/lib/systemd/system/syncthing@.service", perm: 0644},
|
||||
{src: "etc/linux-systemd/system/syncthing-resume.service", dst: "deb/lib/systemd/system/syncthing-resume.service", perm: 0644},
|
||||
{src: "etc/linux-systemd/user/syncthing.service", dst: "deb/usr/lib/systemd/user/syncthing.service", perm: 0644},
|
||||
{src: "etc/linux-sysctl/30-syncthing.conf", dst: "deb/usr/lib/sysctl.d/30-syncthing.conf", perm: 0644},
|
||||
{src: "etc/firewall-ufw/syncthing", dst: "deb/etc/ufw/applications.d/syncthing", perm: 0644},
|
||||
{src: "etc/linux-desktop/syncthing-start.desktop", dst: "deb/usr/share/applications/syncthing-start.desktop", perm: 0644},
|
||||
{src: "etc/linux-desktop/syncthing-ui.desktop", dst: "deb/usr/share/applications/syncthing-ui.desktop", perm: 0644},
|
||||
@@ -139,15 +146,14 @@ var targets = map[string]target{
|
||||
{src: "LICENSE", dst: "LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0644},
|
||||
},
|
||||
systemdServices: []string{
|
||||
"cmd/stdiscosrv/etc/linux-systemd/stdiscosrv.service",
|
||||
},
|
||||
systemdService: "stdiscosrv.service",
|
||||
installationFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0755},
|
||||
{src: "cmd/stdiscosrv/README.md", dst: "deb/usr/share/doc/syncthing-discosrv/README.txt", perm: 0644},
|
||||
{src: "LICENSE", dst: "deb/usr/share/doc/syncthing-discosrv/LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "deb/usr/share/doc/syncthing-discosrv/AUTHORS.txt", perm: 0644},
|
||||
{src: "man/stdiscosrv.1", dst: "deb/usr/share/man/man1/stdiscosrv.1", perm: 0644},
|
||||
{src: "cmd/stdiscosrv/etc/linux-systemd/stdiscosrv.service", dst: "deb/lib/systemd/system/stdiscosrv.service", perm: 0644},
|
||||
{src: "cmd/stdiscosrv/etc/linux-systemd/default", dst: "deb/etc/default/syncthing-discosrv", perm: 0644},
|
||||
{src: "cmd/stdiscosrv/etc/firewall-ufw/stdiscosrv", dst: "deb/etc/ufw/applications.d/stdiscosrv", perm: 0644},
|
||||
},
|
||||
@@ -168,9 +174,7 @@ var targets = map[string]target{
|
||||
{src: "LICENSE", dst: "LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0644},
|
||||
},
|
||||
systemdServices: []string{
|
||||
"cmd/strelaysrv/etc/linux-systemd/strelaysrv.service",
|
||||
},
|
||||
systemdService: "strelaysrv.service",
|
||||
installationFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0755},
|
||||
{src: "cmd/strelaysrv/README.md", dst: "deb/usr/share/doc/syncthing-relaysrv/README.txt", perm: 0644},
|
||||
@@ -178,6 +182,7 @@ var targets = map[string]target{
|
||||
{src: "LICENSE", dst: "deb/usr/share/doc/syncthing-relaysrv/LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "deb/usr/share/doc/syncthing-relaysrv/AUTHORS.txt", perm: 0644},
|
||||
{src: "man/strelaysrv.1", dst: "deb/usr/share/man/man1/strelaysrv.1", perm: 0644},
|
||||
{src: "cmd/strelaysrv/etc/linux-systemd/strelaysrv.service", dst: "deb/lib/systemd/system/strelaysrv.service", perm: 0644},
|
||||
{src: "cmd/strelaysrv/etc/linux-systemd/default", dst: "deb/etc/default/syncthing-relaysrv", perm: 0644},
|
||||
{src: "cmd/strelaysrv/etc/firewall-ufw/strelaysrv", dst: "deb/etc/ufw/applications.d/strelaysrv", perm: 0644},
|
||||
},
|
||||
@@ -202,21 +207,21 @@ var targets = map[string]target{
|
||||
{src: "AUTHORS", dst: "deb/usr/share/doc/syncthing-relaypoolsrv/AUTHORS.txt", perm: 0644},
|
||||
},
|
||||
},
|
||||
"stupgrades": {
|
||||
name: "stupgrades",
|
||||
description: "Syncthing Upgrade Check Server",
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/stupgrades"},
|
||||
binaryName: "stupgrades",
|
||||
},
|
||||
"stcrashreceiver": {
|
||||
name: "stupgrastcrashreceiverdes",
|
||||
description: "Syncthing Crash Server",
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/stcrashreceiver"},
|
||||
binaryName: "stcrashreceiver",
|
||||
},
|
||||
}
|
||||
|
||||
// These are repos we need to clone to run "go generate"
|
||||
|
||||
type dependencyRepo struct {
|
||||
path string
|
||||
repo string
|
||||
commit string
|
||||
}
|
||||
|
||||
var dependencyRepos = []dependencyRepo{
|
||||
{path: "xdr", repo: "https://github.com/calmh/xdr.git", commit: "08e072f9cb16"},
|
||||
}
|
||||
|
||||
func init() {
|
||||
func initTargets() {
|
||||
all := targets["all"]
|
||||
pkgs, _ := filepath.Glob("cmd/*")
|
||||
for _, pkg := range pkgs {
|
||||
@@ -225,6 +230,9 @@ func init() {
|
||||
// ignore dotfiles
|
||||
continue
|
||||
}
|
||||
if noupgrade && pkg == "stupgrades" {
|
||||
continue
|
||||
}
|
||||
all.buildPkgs = append(all.buildPkgs, fmt.Sprintf("github.com/syncthing/syncthing/cmd/%s", pkg))
|
||||
}
|
||||
targets["all"] = all
|
||||
@@ -256,6 +264,8 @@ func main() {
|
||||
}()
|
||||
}
|
||||
|
||||
initTargets()
|
||||
|
||||
// Invoking build.go with no parameters at all builds everything (incrementally),
|
||||
// which is what you want for maximum error checking during development.
|
||||
if flag.NArg() == 0 {
|
||||
@@ -307,15 +317,24 @@ func runCommand(cmd string, target target) {
|
||||
case "assets":
|
||||
rebuildAssets()
|
||||
|
||||
case "update-deps":
|
||||
updateDependencies()
|
||||
|
||||
case "proto":
|
||||
proto()
|
||||
|
||||
case "testmocks":
|
||||
testmocks()
|
||||
|
||||
case "translate":
|
||||
translate()
|
||||
|
||||
case "transifex":
|
||||
transifex()
|
||||
|
||||
case "weblate":
|
||||
weblate()
|
||||
|
||||
case "tar":
|
||||
buildTar(target, tags)
|
||||
|
||||
@@ -369,9 +388,12 @@ func parseFlags() {
|
||||
flag.StringVar(&cc, "cc", os.Getenv("CC"), "Set CC environment variable for `go build`")
|
||||
flag.BoolVar(&debugBinary, "debug-binary", debugBinary, "Create unoptimized binary to use with delve, set -gcflags='-N -l' and omit -ldflags")
|
||||
flag.BoolVar(&coverage, "coverage", coverage, "Write coverage profile of tests to coverage.txt")
|
||||
flag.BoolVar(&long, "long", long, "Run tests without the -short flag")
|
||||
flag.IntVar(&numVersions, "num-versions", numVersions, "Number of versions for changelog command")
|
||||
flag.StringVar(&run, "run", "", "Specify which tests to run")
|
||||
flag.StringVar(&benchRun, "bench", "", "Specify which benchmarks to run")
|
||||
flag.BoolVar(&withNextGenGUI, "with-next-gen-gui", withNextGenGUI, "Also build 'newgui'")
|
||||
flag.StringVar(&buildOut, "build-out", "", "Set the '-o' value for 'go build'")
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
@@ -379,11 +401,17 @@ func test(tags []string, pkgs ...string) {
|
||||
lazyRebuildAssets()
|
||||
|
||||
tags = append(tags, "purego")
|
||||
args := []string{"test", "-short", "-timeout", timeout, "-tags", strings.Join(tags, " ")}
|
||||
args := []string{"test", "-tags", strings.Join(tags, " ")}
|
||||
if long {
|
||||
timeout = longTimeout
|
||||
} else {
|
||||
args = append(args, "-short")
|
||||
}
|
||||
args = append(args, "-timeout", timeout)
|
||||
|
||||
if runtime.GOARCH == "amd64" {
|
||||
switch runtime.GOOS {
|
||||
case "darwin", "linux", "freebsd": // , "windows": # See https://github.com/golang/go/issues/27089
|
||||
case buildpkg.Darwin, buildpkg.Linux, buildpkg.FreeBSD: // , "windows": # See https://github.com/golang/go/issues/27089
|
||||
args = append(args, "-race")
|
||||
}
|
||||
}
|
||||
@@ -438,6 +466,10 @@ func benchArgs() []string {
|
||||
}
|
||||
|
||||
func install(target target, tags []string) {
|
||||
if (target.name == "syncthing" || target.name == "") && !withNextGenGUI {
|
||||
log.Println("Notice: Next generation GUI will not be built; see --with-next-gen-gui.")
|
||||
}
|
||||
|
||||
lazyRebuildAssets()
|
||||
|
||||
tags = append(target.tags, tags...)
|
||||
@@ -461,12 +493,16 @@ func install(target target, tags []string) {
|
||||
defer shouldCleanupSyso(sysoPath)
|
||||
}
|
||||
|
||||
args := []string{"install", "-v", "-trimpath"}
|
||||
args := []string{"install", "-v"}
|
||||
args = appendParameters(args, tags, target.buildPkgs...)
|
||||
runPrint(goCmd, args...)
|
||||
}
|
||||
|
||||
func build(target target, tags []string) {
|
||||
if (target.name == "syncthing" || target.name == "") && !withNextGenGUI {
|
||||
log.Println("Notice: Next generation GUI will not be built; see --with-next-gen-gui.")
|
||||
}
|
||||
|
||||
lazyRebuildAssets()
|
||||
tags = append(target.tags, tags...)
|
||||
|
||||
@@ -489,7 +525,10 @@ func build(target target, tags []string) {
|
||||
defer shouldCleanupSyso(sysoPath)
|
||||
}
|
||||
|
||||
args := []string{"build", "-v", "-trimpath"}
|
||||
args := []string{"build", "-v"}
|
||||
if buildOut != "" {
|
||||
args = append(args, "-o", buildOut)
|
||||
}
|
||||
args = appendParameters(args, tags, target.buildPkgs...)
|
||||
runPrint(goCmd, args...)
|
||||
}
|
||||
@@ -523,13 +562,13 @@ func appendParameters(args []string, tags []string, pkgs ...string) []string {
|
||||
|
||||
if !debugBinary {
|
||||
// Regular binaries get version tagged and skip some debug symbols
|
||||
args = append(args, "-ldflags", ldflags(tags))
|
||||
args = append(args, "-trimpath", "-ldflags", ldflags(tags))
|
||||
} else {
|
||||
// -gcflags to disable optimizations and inlining. Skip -ldflags
|
||||
// because `Could not launch program: decoding dwarf section info at
|
||||
// offset 0x0: too short` on 'dlv exec ...' see
|
||||
// https://github.com/go-delve/delve/issues/79
|
||||
args = append(args, "-gcflags", "-N -l")
|
||||
args = append(args, "-gcflags", "all=-N -l")
|
||||
}
|
||||
|
||||
return append(args, pkgs...)
|
||||
@@ -635,11 +674,13 @@ func buildDeb(target target) {
|
||||
for _, dep := range target.debdeps {
|
||||
args = append(args, "-d", dep)
|
||||
}
|
||||
for _, service := range target.systemdServices {
|
||||
args = append(args, "--deb-systemd", service)
|
||||
}
|
||||
if target.debpost != "" {
|
||||
args = append(args, "--after-upgrade", target.debpost)
|
||||
if target.systemdService != "" {
|
||||
debpost, err := createPostInstScript(target)
|
||||
defer os.Remove(debpost)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
args = append(args, "--after-upgrade", debpost)
|
||||
}
|
||||
if target.debpre != "" {
|
||||
args = append(args, "--before-install", target.debpre)
|
||||
@@ -647,6 +688,28 @@ func buildDeb(target target) {
|
||||
runPrint("fpm", args...)
|
||||
}
|
||||
|
||||
func createPostInstScript(target target) (string, error) {
|
||||
scriptname := filepath.Join("script", "deb-post-inst.template")
|
||||
t, err := template.ParseFiles(scriptname)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
scriptname = strings.TrimSuffix(scriptname, ".template")
|
||||
w, err := os.Create(scriptname)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer w.Close()
|
||||
if err = t.Execute(w, struct {
|
||||
Service, Command string
|
||||
}{
|
||||
target.systemdService, target.binaryName,
|
||||
}); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return scriptname, nil
|
||||
}
|
||||
|
||||
func shouldBuildSyso(dir string) (string, error) {
|
||||
type M map[string]interface{}
|
||||
version := getVersion()
|
||||
@@ -682,7 +745,7 @@ func shouldBuildSyso(dir string) (string, error) {
|
||||
}
|
||||
|
||||
jsonPath := filepath.Join(dir, "versioninfo.json")
|
||||
err = ioutil.WriteFile(jsonPath, bs, 0644)
|
||||
err = os.WriteFile(jsonPath, bs, 0644)
|
||||
if err != nil {
|
||||
return "", errors.New("failed to create " + jsonPath + ": " + err.Error())
|
||||
}
|
||||
@@ -695,7 +758,13 @@ func shouldBuildSyso(dir string) (string, error) {
|
||||
|
||||
sysoPath := filepath.Join(dir, "cmd", "syncthing", "resource.syso")
|
||||
|
||||
if _, err := runError("goversioninfo", "-o", sysoPath); err != nil {
|
||||
// See https://github.com/josephspurrier/goversioninfo#command-line-flags
|
||||
armOption := ""
|
||||
if strings.Contains(goarch, "arm") {
|
||||
armOption = "-arm=true"
|
||||
}
|
||||
|
||||
if _, err := runError("goversioninfo", "-o", sysoPath, armOption); err != nil {
|
||||
return "", errors.New("failed to create " + sysoPath + ": " + err.Error())
|
||||
}
|
||||
|
||||
@@ -715,12 +784,12 @@ func shouldCleanupSyso(sysoFilePath string) {
|
||||
// exists. The permission bits are copied as well. If dst already exists and
|
||||
// the contents are identical to src the modification time is not updated.
|
||||
func copyFile(src, dst string, perm os.FileMode) error {
|
||||
in, err := ioutil.ReadFile(src)
|
||||
in, err := os.ReadFile(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
out, err := ioutil.ReadFile(dst)
|
||||
out, err := os.ReadFile(dst)
|
||||
if err != nil {
|
||||
// The destination probably doesn't exist, we should create
|
||||
// it.
|
||||
@@ -736,7 +805,7 @@ func copyFile(src, dst string, perm os.FileMode) error {
|
||||
|
||||
copy:
|
||||
os.MkdirAll(filepath.Dir(dst), 0777)
|
||||
if err := ioutil.WriteFile(dst, in, perm); err != nil {
|
||||
if err := os.WriteFile(dst, in, perm); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -764,11 +833,46 @@ func rebuildAssets() {
|
||||
}
|
||||
|
||||
func lazyRebuildAssets() {
|
||||
if shouldRebuildAssets("lib/api/auto/gui.files.go", "gui") || shouldRebuildAssets("cmd/strelaypoolsrv/auto/gui.files.go", "cmd/strelaypoolsrv/gui") {
|
||||
shouldRebuild := shouldRebuildAssets("lib/api/auto/gui.files.go", "gui") ||
|
||||
shouldRebuildAssets("cmd/strelaypoolsrv/auto/gui.files.go", "cmd/strelaypoolsrv/gui")
|
||||
|
||||
if withNextGenGUI {
|
||||
shouldRebuild = buildNextGenGUI() || shouldRebuild
|
||||
}
|
||||
|
||||
if shouldRebuild {
|
||||
rebuildAssets()
|
||||
}
|
||||
}
|
||||
|
||||
func buildNextGenGUI() bool {
|
||||
// Check if we need to run the npm process, and if so also set the flag
|
||||
// to rebuild Go assets afterwards. The index.html is regenerated every
|
||||
// time by the build process. This assumes the new GUI ends up in
|
||||
// next-gen-gui/dist/next-gen-gui.
|
||||
|
||||
if !shouldRebuildAssets("gui/next-gen-gui/index.html", "next-gen-gui") {
|
||||
// The GUI is up to date.
|
||||
return false
|
||||
}
|
||||
|
||||
runPrintInDir("next-gen-gui", "npm", "install")
|
||||
runPrintInDir("next-gen-gui", "npm", "run", "build", "--", "--prod", "--subresource-integrity")
|
||||
|
||||
rmr("gui/tech-ui")
|
||||
|
||||
for _, src := range listFiles("next-gen-gui/dist") {
|
||||
rel, _ := filepath.Rel("next-gen-gui/dist", src)
|
||||
dst := filepath.Join("gui", rel)
|
||||
if err := copyFile(src, dst, 0644); err != nil {
|
||||
fmt.Println("copy:", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func shouldRebuildAssets(target, srcdir string) bool {
|
||||
info, err := os.Stat(target)
|
||||
if err != nil {
|
||||
@@ -795,27 +899,59 @@ func shouldRebuildAssets(target, srcdir string) bool {
|
||||
return assetsAreNewer
|
||||
}
|
||||
|
||||
func updateDependencies() {
|
||||
// Figure out desired Go version
|
||||
bs, err := os.ReadFile("go.mod")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
re := regexp.MustCompile(`(?m)^go\s+([0-9.]+)`)
|
||||
matches := re.FindSubmatch(bs)
|
||||
if len(matches) != 2 {
|
||||
log.Fatal("failed to parse go.mod")
|
||||
}
|
||||
goVersion := string(matches[1])
|
||||
|
||||
runPrint(goCmd, "get", "-u", "./...")
|
||||
runPrint(goCmd, "mod", "tidy", "-go="+goVersion, "-compat="+goVersion)
|
||||
|
||||
// We might have updated the protobuf package and should regenerate to match.
|
||||
proto()
|
||||
}
|
||||
|
||||
func proto() {
|
||||
pv := protobufVersion()
|
||||
dependencyRepos = append(dependencyRepos,
|
||||
dependencyRepo{path: "protobuf", repo: "https://github.com/gogo/protobuf.git", commit: pv},
|
||||
)
|
||||
repo := "https://github.com/gogo/protobuf.git"
|
||||
path := filepath.Join("repos", "protobuf")
|
||||
|
||||
runPrint(goCmd, "get", fmt.Sprintf("github.com/gogo/protobuf/protoc-gen-gogofast@%v", pv))
|
||||
runPrint(goCmd, "install", fmt.Sprintf("github.com/gogo/protobuf/protoc-gen-gogofast@%v", pv))
|
||||
os.MkdirAll("repos", 0755)
|
||||
for _, dep := range dependencyRepos {
|
||||
path := filepath.Join("repos", dep.path)
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
runPrintInDir("repos", "git", "clone", dep.repo, dep.path)
|
||||
} else {
|
||||
runPrintInDir(path, "git", "fetch")
|
||||
}
|
||||
runPrintInDir(path, "git", "checkout", dep.commit)
|
||||
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
runPrint("git", "clone", repo, path)
|
||||
} else {
|
||||
runPrintInDir(path, "git", "fetch")
|
||||
}
|
||||
runPrint(goCmd, "generate", "github.com/syncthing/syncthing/lib/...", "github.com/syncthing/syncthing/cmd/stdiscosrv")
|
||||
runPrintInDir(path, "git", "checkout", pv)
|
||||
|
||||
runPrint(goCmd, "generate", "github.com/syncthing/syncthing/cmd/stdiscosrv")
|
||||
runPrint(goCmd, "generate", "proto/generate.go")
|
||||
}
|
||||
|
||||
func testmocks() {
|
||||
args := []string{
|
||||
"generate",
|
||||
"github.com/syncthing/syncthing/lib/config",
|
||||
"github.com/syncthing/syncthing/lib/connections",
|
||||
"github.com/syncthing/syncthing/lib/discover",
|
||||
"github.com/syncthing/syncthing/lib/events",
|
||||
"github.com/syncthing/syncthing/lib/logger",
|
||||
"github.com/syncthing/syncthing/lib/model",
|
||||
"github.com/syncthing/syncthing/lib/protocol",
|
||||
}
|
||||
runPrint(goCmd, args...)
|
||||
}
|
||||
|
||||
func translate() {
|
||||
os.Chdir("gui/default/assets/lang")
|
||||
runPipe("lang-en-new.json", goCmd, "run", "../../../../script/translate.go", "lang-en.json", "../../../")
|
||||
@@ -832,6 +968,11 @@ func transifex() {
|
||||
runPrint(goCmd, "run", "../../../../script/transifexdl.go")
|
||||
}
|
||||
|
||||
func weblate() {
|
||||
os.Chdir("gui/default/assets/lang")
|
||||
runPrint(goCmd, "run", "../../../../script/weblatedl.go")
|
||||
}
|
||||
|
||||
func ldflags(tags []string) string {
|
||||
b := new(strings.Builder)
|
||||
b.WriteString("-w")
|
||||
@@ -856,7 +997,7 @@ func rmr(paths ...string) {
|
||||
}
|
||||
|
||||
func getReleaseVersion() (string, error) {
|
||||
bs, err := ioutil.ReadFile("RELEASE")
|
||||
bs, err := os.ReadFile("RELEASE")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -879,7 +1020,7 @@ func getGitVersion() (string, error) {
|
||||
v0 := string(bs)
|
||||
|
||||
// To be more semantic-versionish and ensure proper ordering in our
|
||||
// upgrade process, we make sure there's only one hypen in the version.
|
||||
// upgrade process, we make sure there's only one hyphen in the version.
|
||||
|
||||
versionRe := regexp.MustCompile(`-([0-9]{1,3}-g[0-9a-f]{5,10}(-dirty)?)`)
|
||||
if m := versionRe.FindStringSubmatch(vcur); len(m) > 0 {
|
||||
@@ -1188,11 +1329,11 @@ func zipFile(out string, files []archiveFile) {
|
||||
|
||||
if strings.HasSuffix(f.dst, ".txt") {
|
||||
// Text file. Read it and convert line endings.
|
||||
bs, err := ioutil.ReadAll(sf)
|
||||
bs, err := io.ReadAll(sf)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
bs = bytes.Replace(bs, []byte{'\n'}, []byte{'\n', '\r'}, -1)
|
||||
bs = bytes.Replace(bs, []byte{'\n'}, []byte{'\r', '\n'}, -1)
|
||||
fh.UncompressedSize = uint32(len(bs))
|
||||
fh.UncompressedSize64 = uint64(len(bs))
|
||||
|
||||
@@ -1263,6 +1404,33 @@ func windowsCodesign(file string) {
|
||||
args := []string{"sign", "/fd", algo}
|
||||
if f := os.Getenv("CODESIGN_CERTIFICATE_FILE"); f != "" {
|
||||
args = append(args, "/f", f)
|
||||
} else if b := os.Getenv("CODESIGN_CERTIFICATE_BASE64"); b != "" {
|
||||
// Decode the PFX certificate from base64.
|
||||
bs, err := base64.RawStdEncoding.DecodeString(b)
|
||||
if err != nil {
|
||||
log.Println("Codesign: signing failed: decoding base64:", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Write it to a temporary file
|
||||
f, err := os.CreateTemp("", "codesign-*.pfx")
|
||||
if err != nil {
|
||||
log.Println("Codesign: signing failed: creating temp file:", err)
|
||||
return
|
||||
}
|
||||
_ = f.Chmod(0600) // best effort remove other users' access
|
||||
defer os.Remove(f.Name())
|
||||
if _, err := f.Write(bs); err != nil {
|
||||
log.Println("Codesign: signing failed: writing temp file:", err)
|
||||
return
|
||||
}
|
||||
if err := f.Close(); err != nil {
|
||||
log.Println("Codesign: signing failed: closing temp file:", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Use that when signing
|
||||
args = append(args, "/f", f.Name())
|
||||
}
|
||||
if p := os.Getenv("CODESIGN_CERTIFICATE_PASSWORD"); p != "" {
|
||||
args = append(args, "/p", p)
|
||||
@@ -1282,7 +1450,7 @@ func windowsCodesign(file string) {
|
||||
|
||||
bs, err := runError(st, args...)
|
||||
if err != nil {
|
||||
log.Println("Codesign: signing failed:", string(bs))
|
||||
log.Printf("Codesign: signing failed: %v: %s", err, string(bs))
|
||||
return
|
||||
}
|
||||
log.Println("Codesign: successfully signed", file, "using", algo)
|
||||
@@ -1299,32 +1467,6 @@ func metalintShort() {
|
||||
runPrint(goCmd, "test", "-short", "-run", "Metalint", "./meta")
|
||||
}
|
||||
|
||||
func temporaryBuildDir() (string, error) {
|
||||
// The base of our temp dir is "syncthing-xxxxxxxx" where the x:es
|
||||
// are eight bytes from the sha256 of our working directory. We do
|
||||
// this because we want a name in the global temp dir that doesn't
|
||||
// conflict with someone else building syncthing on the same
|
||||
// machine, yet is persistent between runs from the same source
|
||||
// directory.
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
hash := sha256.Sum256([]byte(wd))
|
||||
base := fmt.Sprintf("syncthing-%x", hash[:4])
|
||||
|
||||
// The temp dir is taken from $STTMPDIR if set, otherwise the system
|
||||
// default (potentially infrluenced by $TMPDIR on unixes).
|
||||
var tmpDir string
|
||||
if t := os.Getenv("STTMPDIR"); t != "" {
|
||||
tmpDir = t
|
||||
} else {
|
||||
tmpDir = os.TempDir()
|
||||
}
|
||||
|
||||
return filepath.Join(tmpDir, base), nil
|
||||
}
|
||||
|
||||
func (t target) BinaryName() string {
|
||||
if goos == "windows" {
|
||||
return t.binaryName + ".exe"
|
||||
|
||||
2
build.sh
2
build.sh
@@ -23,7 +23,7 @@ case "${1:-default}" in
|
||||
|
||||
prerelease)
|
||||
script authors
|
||||
build transifex
|
||||
build weblate
|
||||
pushd man ; ./refresh.sh ; popd
|
||||
git add -A gui man AUTHORS
|
||||
git commit -m 'gui, man, authors: Update docs, translations, and contributors'
|
||||
|
||||
@@ -1,96 +0,0 @@
|
||||
// Copyright (C) 2019 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
)
|
||||
|
||||
type APIClient struct {
|
||||
http.Client
|
||||
cfg config.GUIConfiguration
|
||||
apikey string
|
||||
}
|
||||
|
||||
func getClient(cfg config.GUIConfiguration) *APIClient {
|
||||
httpClient := http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
},
|
||||
DialContext: func(_ context.Context, _, _ string) (net.Conn, error) {
|
||||
return net.Dial(cfg.Network(), cfg.Address())
|
||||
},
|
||||
},
|
||||
}
|
||||
return &APIClient{
|
||||
Client: httpClient,
|
||||
cfg: cfg,
|
||||
apikey: cfg.APIKey,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *APIClient) Endpoint() string {
|
||||
if c.cfg.Network() == "unix" {
|
||||
return "http://unix/"
|
||||
}
|
||||
url := c.cfg.URL()
|
||||
if !strings.HasSuffix(url, "/") {
|
||||
url += "/"
|
||||
}
|
||||
return url
|
||||
}
|
||||
|
||||
func (c *APIClient) Do(req *http.Request) (*http.Response, error) {
|
||||
req.Header.Set("X-API-Key", c.apikey)
|
||||
resp, err := c.Client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, checkResponse(resp)
|
||||
}
|
||||
|
||||
func (c *APIClient) Get(url string) (*http.Response, error) {
|
||||
request, err := http.NewRequest("GET", c.Endpoint()+"rest/"+url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.Do(request)
|
||||
}
|
||||
|
||||
func (c *APIClient) Post(url, body string) (*http.Response, error) {
|
||||
request, err := http.NewRequest("POST", c.Endpoint()+"rest/"+url, bytes.NewBufferString(body))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.Do(request)
|
||||
}
|
||||
|
||||
func checkResponse(response *http.Response) error {
|
||||
if response.StatusCode == 404 {
|
||||
return errors.New("invalid endpoint or API call")
|
||||
} else if response.StatusCode == 403 {
|
||||
return errors.New("invalid API key")
|
||||
} else if response.StatusCode != 200 {
|
||||
data, err := responseToBArray(response)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
body := strings.TrimSpace(string(data))
|
||||
return fmt.Errorf("unexpected HTTP status returned: %s\n%s", response.Status, body)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,192 +0,0 @@
|
||||
// Copyright (C) 2019 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
|
||||
"github.com/AudriusButkevicius/recli"
|
||||
"github.com/flynn-archive/go-shlex"
|
||||
"github.com/mattn/go-isatty"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/syncthing/syncthing/lib/build"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
"github.com/syncthing/syncthing/lib/locations"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// This is somewhat a hack around a chicken and egg problem.
|
||||
// We need to set the home directory and potentially other flags to know where the syncthing instance is running
|
||||
// in order to get it's config ... which we then use to construct the actual CLI ... at which point it's too late
|
||||
// to add flags there...
|
||||
homeBaseDir := locations.GetBaseDir(locations.ConfigBaseDir)
|
||||
guiCfg := config.GUIConfiguration{}
|
||||
|
||||
flags := flag.NewFlagSet("", flag.ContinueOnError)
|
||||
flags.StringVar(&guiCfg.RawAddress, "gui-address", guiCfg.RawAddress, "Override GUI address (e.g. \"http://192.0.2.42:8443\")")
|
||||
flags.StringVar(&guiCfg.APIKey, "gui-apikey", guiCfg.APIKey, "Override GUI API key")
|
||||
flags.StringVar(&homeBaseDir, "home", homeBaseDir, "Set configuration directory")
|
||||
|
||||
// Implement the same flags at the lower CLI, with the same default values (pre-parse), but do nothing with them.
|
||||
// This is so that we could reuse os.Args
|
||||
fakeFlags := []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "gui-address",
|
||||
Value: guiCfg.RawAddress,
|
||||
Usage: "Override GUI address (e.g. \"http://192.0.2.42:8443\")",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "gui-apikey",
|
||||
Value: guiCfg.APIKey,
|
||||
Usage: "Override GUI API key",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "home",
|
||||
Value: homeBaseDir,
|
||||
Usage: "Set configuration directory",
|
||||
},
|
||||
}
|
||||
|
||||
// Do not print usage of these flags, and ignore errors as this can't understand plenty of things
|
||||
flags.Usage = func() {}
|
||||
_ = flags.Parse(os.Args[1:])
|
||||
|
||||
// Now if the API key and address is not provided (we are not connecting to a remote instance),
|
||||
// try to rip it out of the config.
|
||||
if guiCfg.RawAddress == "" && guiCfg.APIKey == "" {
|
||||
// Update the base directory
|
||||
err := locations.SetBaseDir(locations.ConfigBaseDir, homeBaseDir)
|
||||
if err != nil {
|
||||
log.Fatal(errors.Wrap(err, "setting home"))
|
||||
}
|
||||
|
||||
// Load the certs and get the ID
|
||||
cert, err := tls.LoadX509KeyPair(
|
||||
locations.Get(locations.CertFile),
|
||||
locations.Get(locations.KeyFile),
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatal(errors.Wrap(err, "reading device ID"))
|
||||
}
|
||||
|
||||
myID := protocol.NewDeviceID(cert.Certificate[0])
|
||||
|
||||
// Load the config
|
||||
cfg, _, err := config.Load(locations.Get(locations.ConfigFile), myID, events.NoopLogger)
|
||||
if err != nil {
|
||||
log.Fatalln(errors.Wrap(err, "loading config"))
|
||||
}
|
||||
|
||||
guiCfg = cfg.GUI()
|
||||
} else if guiCfg.Address() == "" || guiCfg.APIKey == "" {
|
||||
log.Fatalln("Both -gui-address and -gui-apikey should be specified")
|
||||
}
|
||||
|
||||
if guiCfg.Address() == "" {
|
||||
log.Fatalln("Could not find GUI Address")
|
||||
}
|
||||
|
||||
if guiCfg.APIKey == "" {
|
||||
log.Fatalln("Could not find GUI API key")
|
||||
}
|
||||
|
||||
client := getClient(guiCfg)
|
||||
|
||||
cfg, err := getConfig(client)
|
||||
original := cfg.Copy()
|
||||
if err != nil {
|
||||
log.Fatalln(errors.Wrap(err, "getting config"))
|
||||
}
|
||||
|
||||
// Copy the config and set the default flags
|
||||
recliCfg := recli.DefaultConfig
|
||||
recliCfg.IDTag.Name = "xml"
|
||||
recliCfg.SkipTag.Name = "json"
|
||||
|
||||
commands, err := recli.New(recliCfg).Construct(&cfg)
|
||||
if err != nil {
|
||||
log.Fatalln(errors.Wrap(err, "config reflect"))
|
||||
}
|
||||
|
||||
// Construct the actual CLI
|
||||
app := cli.NewApp()
|
||||
app.Name = "stcli"
|
||||
app.HelpName = app.Name
|
||||
app.Author = "The Syncthing Authors"
|
||||
app.Usage = "Syncthing command line interface"
|
||||
app.Version = build.Version
|
||||
app.Flags = fakeFlags
|
||||
app.Metadata = map[string]interface{}{
|
||||
"client": client,
|
||||
}
|
||||
app.Commands = []cli.Command{
|
||||
{
|
||||
Name: "config",
|
||||
HideHelp: true,
|
||||
Usage: "Configuration modification command group",
|
||||
Subcommands: commands,
|
||||
},
|
||||
showCommand,
|
||||
operationCommand,
|
||||
errorsCommand,
|
||||
}
|
||||
|
||||
tty := isatty.IsTerminal(os.Stdin.Fd()) || isatty.IsCygwinTerminal(os.Stdin.Fd())
|
||||
if !tty {
|
||||
// Not a TTY, consume from stdin
|
||||
scanner := bufio.NewScanner(os.Stdin)
|
||||
for scanner.Scan() {
|
||||
input, err := shlex.Split(scanner.Text())
|
||||
if err != nil {
|
||||
log.Fatalln(errors.Wrap(err, "parsing input"))
|
||||
}
|
||||
if len(input) == 0 {
|
||||
continue
|
||||
}
|
||||
err = app.Run(append(os.Args, input...))
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
err = scanner.Err()
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
} else {
|
||||
err = app.Run(os.Args)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(cfg, original) {
|
||||
body, err := json.MarshalIndent(cfg, "", " ")
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
resp, err := client.Post("system/config", string(body))
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
body, err := responseToBArray(resp)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
log.Fatalln(string(body))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,94 +0,0 @@
|
||||
// Copyright (C) 2019 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func responseToBArray(response *http.Response) ([]byte, error) {
|
||||
bytes, err := ioutil.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bytes, response.Body.Close()
|
||||
}
|
||||
|
||||
func emptyPost(url string) cli.ActionFunc {
|
||||
return func(c *cli.Context) error {
|
||||
client := c.App.Metadata["client"].(*APIClient)
|
||||
_, err := client.Post(url, "")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func dumpOutput(url string) cli.ActionFunc {
|
||||
return func(c *cli.Context) error {
|
||||
client := c.App.Metadata["client"].(*APIClient)
|
||||
response, err := client.Get(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return prettyPrintResponse(c, response)
|
||||
}
|
||||
}
|
||||
|
||||
func getConfig(c *APIClient) (config.Configuration, error) {
|
||||
cfg := config.Configuration{}
|
||||
response, err := c.Get("system/config")
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
bytes, err := responseToBArray(response)
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
err = json.Unmarshal(bytes, &cfg)
|
||||
if err == nil {
|
||||
return cfg, err
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func expects(n int, actionFunc cli.ActionFunc) cli.ActionFunc {
|
||||
return func(ctx *cli.Context) error {
|
||||
if ctx.NArg() != n {
|
||||
plural := ""
|
||||
if n != 1 {
|
||||
plural = "s"
|
||||
}
|
||||
return fmt.Errorf("expected %d argument%s, got %d", n, plural, ctx.NArg())
|
||||
}
|
||||
return actionFunc(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func prettyPrintJSON(data interface{}) error {
|
||||
enc := json.NewEncoder(os.Stdout)
|
||||
enc.SetIndent("", " ")
|
||||
return enc.Encode(data)
|
||||
}
|
||||
|
||||
func prettyPrintResponse(c *cli.Context, response *http.Response) error {
|
||||
bytes, err := responseToBArray(response)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var data interface{}
|
||||
if err := json.Unmarshal(bytes, &data); err != nil {
|
||||
return err
|
||||
}
|
||||
// TODO: Check flag for pretty print format
|
||||
return prettyPrintJSON(data)
|
||||
}
|
||||
@@ -7,7 +7,6 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
@@ -15,6 +14,8 @@ import (
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -74,7 +75,7 @@ type fileInfo struct {
|
||||
name string
|
||||
mode os.FileMode
|
||||
mod int64
|
||||
hash [16]byte
|
||||
hash [sha256.Size]byte
|
||||
}
|
||||
|
||||
func (f fileInfo) String() string {
|
||||
@@ -106,11 +107,7 @@ func startWalker(dir string, res chan<- fileInfo, abort <-chan struct{}) chan er
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h := md5.New()
|
||||
h.Write([]byte(tgt))
|
||||
hash := h.Sum(nil)
|
||||
|
||||
copy(f.hash[:], hash)
|
||||
f.hash = sha256.Sum256([]byte(tgt))
|
||||
} else if info.IsDir() {
|
||||
f = fileInfo{
|
||||
name: rn,
|
||||
@@ -123,7 +120,7 @@ func startWalker(dir string, res chan<- fileInfo, abort <-chan struct{}) chan er
|
||||
mode: info.Mode(),
|
||||
mod: info.ModTime().Unix(),
|
||||
}
|
||||
sum, err := md5file(path)
|
||||
sum, err := sha256file(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -150,14 +147,14 @@ func startWalker(dir string, res chan<- fileInfo, abort <-chan struct{}) chan er
|
||||
return errc
|
||||
}
|
||||
|
||||
func md5file(fname string) (hash [16]byte, err error) {
|
||||
func sha256file(fname string) (hash [sha256.Size]byte, err error) {
|
||||
f, err := os.Open(fname)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
h := md5.New()
|
||||
h := sha256.New()
|
||||
io.Copy(h, f)
|
||||
hb := h.Sum(nil)
|
||||
copy(hash[:], hb)
|
||||
|
||||
187
cmd/stcrashreceiver/diskstore.go
Normal file
187
cmd/stcrashreceiver/diskstore.go
Normal file
@@ -0,0 +1,187 @@
|
||||
// Copyright (C) 2023 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"time"
|
||||
)
|
||||
|
||||
type diskStore struct {
|
||||
dir string
|
||||
inbox chan diskEntry
|
||||
maxBytes int64
|
||||
maxFiles int
|
||||
|
||||
currentFiles []currentFile
|
||||
currentSize int64
|
||||
}
|
||||
|
||||
type diskEntry struct {
|
||||
path string
|
||||
data []byte
|
||||
}
|
||||
|
||||
type currentFile struct {
|
||||
path string
|
||||
size int64
|
||||
mtime int64
|
||||
}
|
||||
|
||||
func (d *diskStore) Serve(ctx context.Context) {
|
||||
if err := os.MkdirAll(d.dir, 0750); err != nil {
|
||||
log.Println("Creating directory:", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := d.inventory(); err != nil {
|
||||
log.Println("Failed to inventory disk store:", err)
|
||||
}
|
||||
d.clean()
|
||||
|
||||
cleanTimer := time.NewTicker(time.Minute)
|
||||
inventoryTimer := time.NewTicker(24 * time.Hour)
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
gw := gzip.NewWriter(buf)
|
||||
for {
|
||||
select {
|
||||
case entry := <-d.inbox:
|
||||
path := d.fullPath(entry.path)
|
||||
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
|
||||
log.Println("Creating directory:", err)
|
||||
continue
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
gw.Reset(buf)
|
||||
if _, err := gw.Write(entry.data); err != nil {
|
||||
log.Println("Failed to compress crash report:", err)
|
||||
continue
|
||||
}
|
||||
if err := gw.Close(); err != nil {
|
||||
log.Println("Failed to compress crash report:", err)
|
||||
continue
|
||||
}
|
||||
if err := os.WriteFile(path, buf.Bytes(), 0644); err != nil {
|
||||
log.Printf("Failed to write %s: %v", entry.path, err)
|
||||
_ = os.Remove(path)
|
||||
continue
|
||||
}
|
||||
|
||||
d.currentSize += int64(buf.Len())
|
||||
d.currentFiles = append(d.currentFiles, currentFile{
|
||||
size: int64(len(entry.data)),
|
||||
path: path,
|
||||
})
|
||||
|
||||
case <-cleanTimer.C:
|
||||
d.clean()
|
||||
|
||||
case <-inventoryTimer.C:
|
||||
if err := d.inventory(); err != nil {
|
||||
log.Println("Failed to inventory disk store:", err)
|
||||
}
|
||||
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *diskStore) Put(path string, data []byte) bool {
|
||||
select {
|
||||
case d.inbox <- diskEntry{
|
||||
path: path,
|
||||
data: data,
|
||||
}:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (d *diskStore) Get(path string) ([]byte, error) {
|
||||
path = d.fullPath(path)
|
||||
bs, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
gr, err := gzip.NewReader(bytes.NewReader(bs))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer gr.Close()
|
||||
return io.ReadAll(gr)
|
||||
}
|
||||
|
||||
func (d *diskStore) Exists(path string) bool {
|
||||
path = d.fullPath(path)
|
||||
_, err := os.Lstat(path)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func (d *diskStore) clean() {
|
||||
for len(d.currentFiles) > 0 && (len(d.currentFiles) > d.maxFiles || d.currentSize > d.maxBytes) {
|
||||
f := d.currentFiles[0]
|
||||
log.Println("Removing", f.path)
|
||||
if err := os.Remove(f.path); err != nil {
|
||||
log.Println("Failed to remove file:", err)
|
||||
}
|
||||
d.currentFiles = d.currentFiles[1:]
|
||||
d.currentSize -= f.size
|
||||
}
|
||||
var oldest time.Duration
|
||||
if len(d.currentFiles) > 0 {
|
||||
oldest = time.Since(time.Unix(d.currentFiles[0].mtime, 0)).Truncate(time.Minute)
|
||||
}
|
||||
log.Printf("Clean complete: %d files, %d MB, oldest is %v ago", len(d.currentFiles), d.currentSize>>20, oldest)
|
||||
}
|
||||
|
||||
func (d *diskStore) inventory() error {
|
||||
d.currentFiles = nil
|
||||
d.currentSize = 0
|
||||
err := filepath.Walk(d.dir, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
if filepath.Ext(path) != ".gz" {
|
||||
return nil
|
||||
}
|
||||
d.currentSize += info.Size()
|
||||
d.currentFiles = append(d.currentFiles, currentFile{
|
||||
path: path,
|
||||
size: info.Size(),
|
||||
mtime: info.ModTime().Unix(),
|
||||
})
|
||||
return nil
|
||||
})
|
||||
sort.Slice(d.currentFiles, func(i, j int) bool {
|
||||
return d.currentFiles[i].mtime < d.currentFiles[j].mtime
|
||||
})
|
||||
var oldest time.Duration
|
||||
if len(d.currentFiles) > 0 {
|
||||
oldest = time.Since(time.Unix(d.currentFiles[0].mtime, 0)).Truncate(time.Minute)
|
||||
}
|
||||
log.Printf("Inventory complete: %d files, %d MB, oldest is %v ago", len(d.currentFiles), d.currentSize>>20, oldest)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *diskStore) fullPath(path string) string {
|
||||
return filepath.Join(d.dir, path[0:2], path[2:]) + ".gz"
|
||||
}
|
||||
@@ -13,17 +13,17 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/alecthomas/kong"
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
"github.com/syncthing/syncthing/lib/ur"
|
||||
|
||||
@@ -32,34 +32,58 @@ import (
|
||||
|
||||
const maxRequestSize = 1 << 20 // 1 MiB
|
||||
|
||||
type cli struct {
|
||||
Dir string `help:"Parent directory to store crash and failure reports in" env:"REPORTS_DIR" default:"."`
|
||||
DSN string `help:"Sentry DSN" env:"SENTRY_DSN"`
|
||||
Listen string `help:"HTTP listen address" default:":8080" env:"LISTEN_ADDRESS"`
|
||||
MaxDiskFiles int `help:"Maximum number of reports on disk" default:"100000" env:"MAX_DISK_FILES"`
|
||||
MaxDiskSizeMB int64 `help:"Maximum disk space to use for reports" default:"1024" env:"MAX_DISK_SIZE_MB"`
|
||||
CleanInterval time.Duration `help:"Interval between cleaning up old reports" default:"12h" env:"CLEAN_INTERVAL"`
|
||||
SentryQueue int `help:"Maximum number of reports to queue for sending to Sentry" default:"64" env:"SENTRY_QUEUE"`
|
||||
DiskQueue int `help:"Maximum number of reports to queue for writing to disk" default:"64" env:"DISK_QUEUE"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
dir := flag.String("dir", ".", "Directory to store reports in")
|
||||
dsn := flag.String("dsn", "", "Sentry DSN")
|
||||
listen := flag.String("listen", ":22039", "HTTP listen address")
|
||||
flag.Parse()
|
||||
var params cli
|
||||
kong.Parse(¶ms)
|
||||
|
||||
mux := http.NewServeMux()
|
||||
|
||||
cr := &crashReceiver{
|
||||
dir: *dir,
|
||||
dsn: *dsn,
|
||||
ds := &diskStore{
|
||||
dir: filepath.Join(params.Dir, "crash_reports"),
|
||||
inbox: make(chan diskEntry, params.DiskQueue),
|
||||
maxFiles: params.MaxDiskFiles,
|
||||
maxBytes: params.MaxDiskSizeMB << 20,
|
||||
}
|
||||
go ds.Serve(context.Background())
|
||||
|
||||
ss := &sentryService{
|
||||
dsn: params.DSN,
|
||||
inbox: make(chan sentryRequest, params.SentryQueue),
|
||||
}
|
||||
go ss.Serve(context.Background())
|
||||
|
||||
cr := &crashReceiver{
|
||||
store: ds,
|
||||
sentry: ss,
|
||||
}
|
||||
|
||||
mux.Handle("/", cr)
|
||||
|
||||
if *dsn != "" {
|
||||
mux.HandleFunc("/newcrash/failure", handleFailureFn(*dsn))
|
||||
if params.DSN != "" {
|
||||
mux.HandleFunc("/newcrash/failure", handleFailureFn(params.DSN, filepath.Join(params.Dir, "failure_reports")))
|
||||
}
|
||||
|
||||
log.SetOutput(os.Stdout)
|
||||
if err := http.ListenAndServe(*listen, mux); err != nil {
|
||||
if err := http.ListenAndServe(params.Listen, mux); err != nil {
|
||||
log.Fatalln("HTTP serve:", err)
|
||||
}
|
||||
}
|
||||
|
||||
func handleFailureFn(dsn string) func(w http.ResponseWriter, req *http.Request) {
|
||||
func handleFailureFn(dsn, failureDir string) func(w http.ResponseWriter, req *http.Request) {
|
||||
return func(w http.ResponseWriter, req *http.Request) {
|
||||
lr := io.LimitReader(req.Body, maxRequestSize)
|
||||
bs, err := ioutil.ReadAll(lr)
|
||||
bs, err := io.ReadAll(lr)
|
||||
req.Body.Close()
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
@@ -84,12 +108,25 @@ func handleFailureFn(dsn string) func(w http.ResponseWriter, req *http.Request)
|
||||
return
|
||||
}
|
||||
for _, r := range reports {
|
||||
pkt := packet(version)
|
||||
pkt := packet(version, "failure")
|
||||
pkt.Message = r.Description
|
||||
pkt.Extra = raven.Extra{
|
||||
"count": r.Count,
|
||||
}
|
||||
pkt.Fingerprint = []string{r.Description}
|
||||
for k, v := range r.Extra {
|
||||
pkt.Extra[k] = v
|
||||
}
|
||||
if r.Goroutines != "" {
|
||||
url, err := saveFailureWithGoroutines(r.FailureData, failureDir)
|
||||
if err != nil {
|
||||
log.Println("Saving failure report:", err)
|
||||
http.Error(w, "Internal server error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
pkt.Extra["goroutinesURL"] = url
|
||||
}
|
||||
message := sanitizeMessageLDB(r.Description)
|
||||
pkt.Fingerprint = []string{message}
|
||||
|
||||
if err := sendReport(dsn, pkt, userIDFor(req)); err != nil {
|
||||
log.Println("Failed to send failure report:", err)
|
||||
@@ -100,19 +137,15 @@ func handleFailureFn(dsn string) func(w http.ResponseWriter, req *http.Request)
|
||||
}
|
||||
}
|
||||
|
||||
// userIDFor returns a string we can use as the user ID for the purpose of
|
||||
// counting affected users. It's the truncated hash of a salt, the user
|
||||
// remote IP, and the current month.
|
||||
func userIDFor(req *http.Request) string {
|
||||
addr := req.RemoteAddr
|
||||
if fwd := req.Header.Get("x-forwarded-for"); fwd != "" {
|
||||
addr = fwd
|
||||
func saveFailureWithGoroutines(data ur.FailureData, failureDir string) (string, error) {
|
||||
bs := make([]byte, len(data.Description)+len(data.Goroutines))
|
||||
copy(bs, data.Description)
|
||||
copy(bs[len(data.Description):], data.Goroutines)
|
||||
id := fmt.Sprintf("%x", sha256.Sum256(bs))
|
||||
path := fullPathCompressed(failureDir, id)
|
||||
err := compressAndWrite(bs, path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if host, _, err := net.SplitHostPort(addr); err == nil {
|
||||
addr = host
|
||||
}
|
||||
now := time.Now().Format("200601")
|
||||
salt := "stcrashreporter"
|
||||
hash := sha256.Sum256([]byte(salt + addr + now))
|
||||
return fmt.Sprintf("%x", hash[:8])
|
||||
return reportServer + path, nil
|
||||
}
|
||||
|
||||
@@ -8,14 +8,16 @@ package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"log"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
raven "github.com/getsentry/raven-go"
|
||||
"github.com/maruel/panicparse/stack"
|
||||
"github.com/maruel/panicparse/v2/stack"
|
||||
)
|
||||
|
||||
const reportServer = "https://crash.syncthing.net/report/"
|
||||
@@ -31,6 +33,44 @@ var (
|
||||
clientsMut sync.Mutex
|
||||
)
|
||||
|
||||
type sentryService struct {
|
||||
dsn string
|
||||
inbox chan sentryRequest
|
||||
}
|
||||
|
||||
type sentryRequest struct {
|
||||
reportID string
|
||||
data []byte
|
||||
}
|
||||
|
||||
func (s *sentryService) Serve(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case req := <-s.inbox:
|
||||
pkt, err := parseCrashReport(req.reportID, req.data)
|
||||
if err != nil {
|
||||
log.Println("Failed to parse crash report:", err)
|
||||
continue
|
||||
}
|
||||
if err := sendReport(s.dsn, pkt, req.reportID); err != nil {
|
||||
log.Println("Failed to send crash report:", err)
|
||||
}
|
||||
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *sentryService) Send(reportID string, data []byte) bool {
|
||||
select {
|
||||
case s.inbox <- sentryRequest{reportID, data}:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func sendReport(dsn string, pkt *raven.Packet, userID string) error {
|
||||
pkt.Interfaces = append(pkt.Interfaces, &raven.User{ID: userID})
|
||||
|
||||
@@ -93,10 +133,13 @@ func parseCrashReport(path string, report []byte) (*raven.Packet, error) {
|
||||
}
|
||||
|
||||
r := bytes.NewReader(report)
|
||||
ctx, err := stack.ParseDump(r, ioutil.Discard, false)
|
||||
if err != nil {
|
||||
ctx, _, err := stack.ScanSnapshot(r, io.Discard, stack.DefaultOpts())
|
||||
if err != nil && err != io.EOF {
|
||||
return nil, err
|
||||
}
|
||||
if ctx == nil || len(ctx.Goroutines) == 0 {
|
||||
return nil, errors.New("no goroutines found")
|
||||
}
|
||||
|
||||
// Lock the source code loader to the version we are processing here.
|
||||
if version.commit != "" {
|
||||
@@ -116,13 +159,13 @@ func parseCrashReport(path string, report []byte) (*raven.Packet, error) {
|
||||
if gr.First {
|
||||
trace.Frames = make([]*raven.StacktraceFrame, len(gr.Stack.Calls))
|
||||
for i, sc := range gr.Stack.Calls {
|
||||
trace.Frames[len(trace.Frames)-1-i] = raven.NewStacktraceFrame(0, sc.Func.Name(), sc.SrcPath, sc.Line, 3, nil)
|
||||
trace.Frames[len(trace.Frames)-1-i] = raven.NewStacktraceFrame(0, sc.Func.Name, sc.RemoteSrcPath, sc.Line, 3, nil)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
pkt := packet(version)
|
||||
pkt := packet(version, "crash")
|
||||
pkt.Message = string(subjectLine)
|
||||
pkt.Extra = raven.Extra{
|
||||
"url": reportServer + path,
|
||||
@@ -143,15 +186,20 @@ var (
|
||||
ldbPathRe = regexp.MustCompile(`(open|write|read) .+[\\/].+[\\/]index[^\\/]+[\\/][^\\/]+: `)
|
||||
)
|
||||
|
||||
func crashReportFingerprint(message string) []string {
|
||||
// Do not fingerprint on the stack in case of db corruption or fatal
|
||||
// db io error - where it occurs doesn't matter.
|
||||
orig := message
|
||||
func sanitizeMessageLDB(message string) string {
|
||||
message = ldbPosRe.ReplaceAllString(message, "${1}x)")
|
||||
message = ldbFileRe.ReplaceAllString(message, "${1}x${3}")
|
||||
message = ldbChecksumRe.ReplaceAllString(message, "${1}X${3}X")
|
||||
message = ldbInternalKeyRe.ReplaceAllString(message, "${1}x${2}x")
|
||||
message = ldbPathRe.ReplaceAllString(message, "$1 x: ")
|
||||
return message
|
||||
}
|
||||
|
||||
func crashReportFingerprint(message string) []string {
|
||||
// Do not fingerprint on the stack in case of db corruption or fatal
|
||||
// db io error - where it occurs doesn't matter.
|
||||
orig := message
|
||||
message = sanitizeMessageLDB(message)
|
||||
if message != orig {
|
||||
return []string{message}
|
||||
}
|
||||
@@ -229,7 +277,7 @@ func parseVersion(line string) (version, error) {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func packet(version version) *raven.Packet {
|
||||
func packet(version version, reportType string) *raven.Packet {
|
||||
pkt := &raven.Packet{
|
||||
Platform: "go",
|
||||
Release: version.tag,
|
||||
@@ -242,6 +290,7 @@ func packet(version version) *raven.Packet {
|
||||
raven.Tag{Key: "goos", Value: version.goos},
|
||||
raven.Tag{Key: "goarch", Value: version.goarch},
|
||||
raven.Tag{Key: "builder", Value: version.builder},
|
||||
raven.Tag{Key: "report_type", Value: reportType},
|
||||
},
|
||||
}
|
||||
if version.commit != "" {
|
||||
|
||||
@@ -8,7 +8,7 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@@ -59,7 +59,7 @@ func TestParseVersion(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestParseReport(t *testing.T) {
|
||||
bs, err := ioutil.ReadFile("_testdata/panic.log")
|
||||
bs, err := os.ReadFile("_testdata/panic.log")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ package main
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -80,7 +80,7 @@ func (l *githubSourceCodeLoader) Load(filename string, line, context int) ([][]b
|
||||
fmt.Println("Loading source:", resp.Status)
|
||||
return nil, 0
|
||||
}
|
||||
data, err := ioutil.ReadAll(resp.Body)
|
||||
data, err := io.ReadAll(resp.Body)
|
||||
_ = resp.Body.Close()
|
||||
if err != nil {
|
||||
fmt.Println("Loading source:", err.Error())
|
||||
|
||||
@@ -7,21 +7,16 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type crashReceiver struct {
|
||||
dir string
|
||||
dsn string
|
||||
store *diskStore
|
||||
sentry *sentryService
|
||||
}
|
||||
|
||||
func (r *crashReceiver) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
@@ -44,99 +39,55 @@ func (r *crashReceiver) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
// The location of the report on disk, compressed
|
||||
fullPath := filepath.Join(r.dir, r.dirFor(reportID), reportID) + ".gz"
|
||||
|
||||
switch req.Method {
|
||||
case http.MethodGet:
|
||||
r.serveGet(fullPath, w, req)
|
||||
r.serveGet(reportID, w, req)
|
||||
case http.MethodHead:
|
||||
r.serveHead(fullPath, w, req)
|
||||
r.serveHead(reportID, w, req)
|
||||
case http.MethodPut:
|
||||
r.servePut(reportID, fullPath, w, req)
|
||||
r.servePut(reportID, w, req)
|
||||
default:
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
}
|
||||
}
|
||||
|
||||
// serveGet responds to GET requests by serving the uncompressed report.
|
||||
func (r *crashReceiver) serveGet(fullPath string, w http.ResponseWriter, _ *http.Request) {
|
||||
fd, err := os.Open(fullPath)
|
||||
func (r *crashReceiver) serveGet(reportID string, w http.ResponseWriter, _ *http.Request) {
|
||||
bs, err := r.store.Get(reportID)
|
||||
if err != nil {
|
||||
http.Error(w, "Not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
defer fd.Close()
|
||||
gr, err := gzip.NewReader(fd)
|
||||
if err != nil {
|
||||
http.Error(w, "Internal server error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
_, _ = io.Copy(w, gr) // best effort
|
||||
w.Write(bs)
|
||||
}
|
||||
|
||||
// serveHead responds to HEAD requests by checking if the named report
|
||||
// already exists in the system.
|
||||
func (r *crashReceiver) serveHead(fullPath string, w http.ResponseWriter, _ *http.Request) {
|
||||
if _, err := os.Lstat(fullPath); err != nil {
|
||||
func (r *crashReceiver) serveHead(reportID string, w http.ResponseWriter, _ *http.Request) {
|
||||
if !r.store.Exists(reportID) {
|
||||
http.Error(w, "Not found", http.StatusNotFound)
|
||||
}
|
||||
}
|
||||
|
||||
// servePut accepts and stores the given report.
|
||||
func (r *crashReceiver) servePut(reportID, fullPath string, w http.ResponseWriter, req *http.Request) {
|
||||
// Ensure the destination directory exists
|
||||
if err := os.MkdirAll(filepath.Dir(fullPath), 0755); err != nil {
|
||||
log.Println("Creating directory:", err)
|
||||
http.Error(w, "Internal server error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
func (r *crashReceiver) servePut(reportID string, w http.ResponseWriter, req *http.Request) {
|
||||
// Read at most maxRequestSize of report data.
|
||||
log.Println("Receiving report", reportID)
|
||||
lr := io.LimitReader(req.Body, maxRequestSize)
|
||||
bs, err := ioutil.ReadAll(lr)
|
||||
bs, err := io.ReadAll(lr)
|
||||
if err != nil {
|
||||
log.Println("Reading report:", err)
|
||||
http.Error(w, "Internal server error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Compress the report for storage
|
||||
buf := new(bytes.Buffer)
|
||||
gw := gzip.NewWriter(buf)
|
||||
_, _ = gw.Write(bs) // can't fail
|
||||
gw.Close()
|
||||
|
||||
// Create an output file with the compressed report
|
||||
err = ioutil.WriteFile(fullPath, buf.Bytes(), 0644)
|
||||
if err != nil {
|
||||
log.Println("Saving report:", err)
|
||||
http.Error(w, "Internal server error", http.StatusInternalServerError)
|
||||
return
|
||||
// Store the report
|
||||
if !r.store.Put(reportID, bs) {
|
||||
log.Println("Failed to store report (queue full):", reportID)
|
||||
}
|
||||
|
||||
// Send the report to Sentry
|
||||
if r.dsn != "" {
|
||||
// Remote ID
|
||||
user := userIDFor(req)
|
||||
|
||||
go func() {
|
||||
// There's no need for the client to have to wait for this part.
|
||||
pkt, err := parseCrashReport(reportID, bs)
|
||||
if err != nil {
|
||||
log.Println("Failed to parse crash report:", err)
|
||||
return
|
||||
}
|
||||
if err := sendReport(r.dsn, pkt, user); err != nil {
|
||||
log.Println("Failed to send crash report:", err)
|
||||
}
|
||||
}()
|
||||
if !r.sentry.Send(reportID, bs) {
|
||||
log.Println("Failed to send report to sentry (queue full):", reportID)
|
||||
}
|
||||
}
|
||||
|
||||
// 01234567890abcdef... => 01/23
|
||||
func (r *crashReceiver) dirFor(base string) string {
|
||||
return filepath.Join(base[0:2], base[2:4])
|
||||
}
|
||||
|
||||
57
cmd/stcrashreceiver/util.go
Normal file
57
cmd/stcrashreceiver/util.go
Normal file
@@ -0,0 +1,57 @@
|
||||
// Copyright (C) 2021 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
)
|
||||
|
||||
// userIDFor returns a string we can use as the user ID for the purpose of
|
||||
// counting affected users. It's the truncated hash of a salt, the user
|
||||
// remote IP, and the current month.
|
||||
func userIDFor(req *http.Request) string {
|
||||
addr := req.RemoteAddr
|
||||
if fwd := req.Header.Get("x-forwarded-for"); fwd != "" {
|
||||
addr = fwd
|
||||
}
|
||||
if host, _, err := net.SplitHostPort(addr); err == nil {
|
||||
addr = host
|
||||
}
|
||||
now := time.Now().Format("200601")
|
||||
salt := "stcrashreporter"
|
||||
hash := sha256.Sum256([]byte(salt + addr + now))
|
||||
return fmt.Sprintf("%x", hash[:8])
|
||||
}
|
||||
|
||||
// 01234567890abcdef... => 01/23
|
||||
func dirFor(base string) string {
|
||||
return filepath.Join(base[0:2], base[2:4])
|
||||
}
|
||||
|
||||
func fullPathCompressed(root, reportID string) string {
|
||||
return filepath.Join(root, dirFor(reportID), reportID) + ".gz"
|
||||
}
|
||||
|
||||
func compressAndWrite(bs []byte, fullPath string) error {
|
||||
// Compress the report for storage
|
||||
buf := new(bytes.Buffer)
|
||||
gw := gzip.NewWriter(buf)
|
||||
_, _ = gw.Write(bs) // can't fail
|
||||
gw.Close()
|
||||
|
||||
// Create an output file with the compressed report
|
||||
return os.WriteFile(fullPath, buf.Bytes(), 0644)
|
||||
}
|
||||
@@ -10,8 +10,10 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
@@ -66,7 +68,7 @@ func newAPISrv(addr string, cert tls.Certificate, db database, repl replicator,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *apiSrv) Serve(ctx context.Context) error {
|
||||
func (s *apiSrv) Serve(_ context.Context) error {
|
||||
if s.useHTTP {
|
||||
listener, err := net.Listen("tcp", s.addr)
|
||||
if err != nil {
|
||||
@@ -229,10 +231,10 @@ func (s *apiSrv) handleGET(ctx context.Context, w http.ResponseWriter, req *http
|
||||
func (s *apiSrv) handlePOST(ctx context.Context, remoteAddr *net.TCPAddr, w http.ResponseWriter, req *http.Request) {
|
||||
reqID := ctx.Value(idKey).(requestID)
|
||||
|
||||
rawCert := certificateBytes(req)
|
||||
if rawCert == nil {
|
||||
rawCert, err := certificateBytes(req)
|
||||
if err != nil {
|
||||
if debug {
|
||||
log.Println(reqID, "no certificates")
|
||||
log.Println(reqID, "no certificates:", err)
|
||||
}
|
||||
announceRequestsTotal.WithLabelValues("no_certificate").Inc()
|
||||
w.Header().Set("Retry-After", errorRetryAfterString())
|
||||
@@ -300,13 +302,13 @@ func (s *apiSrv) handleAnnounce(deviceID protocol.DeviceID, addresses []string)
|
||||
return s.db.merge(key, dbAddrs, seen)
|
||||
}
|
||||
|
||||
func handlePing(w http.ResponseWriter, r *http.Request) {
|
||||
func handlePing(w http.ResponseWriter, _ *http.Request) {
|
||||
w.WriteHeader(204)
|
||||
}
|
||||
|
||||
func certificateBytes(req *http.Request) []byte {
|
||||
func certificateBytes(req *http.Request) ([]byte, error) {
|
||||
if req.TLS != nil && len(req.TLS.PeerCertificates) > 0 {
|
||||
return req.TLS.PeerCertificates[0].Raw
|
||||
return req.TLS.PeerCertificates[0].Raw, nil
|
||||
}
|
||||
|
||||
var bs []byte
|
||||
@@ -319,7 +321,7 @@ func certificateBytes(req *http.Request) []byte {
|
||||
hdr, err := url.QueryUnescape(hdr)
|
||||
if err != nil {
|
||||
// Decoding failed
|
||||
return nil
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bs = []byte(hdr)
|
||||
@@ -338,6 +340,15 @@ func certificateBytes(req *http.Request) []byte {
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if hdr := req.Header.Get("X-Tls-Client-Cert-Der-Base64"); hdr != "" {
|
||||
// Caddy {tls_client_certificate_der_base64}
|
||||
hdr, err := base64.StdEncoding.DecodeString(hdr)
|
||||
if err != nil {
|
||||
// Decoding failed
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bs = pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: hdr})
|
||||
} else if hdr := req.Header.Get("X-Forwarded-Tls-Client-Cert"); hdr != "" {
|
||||
// Traefik 2 passtlsclientcert
|
||||
// The certificate is in PEM format with url encoding but without newlines
|
||||
@@ -346,7 +357,7 @@ func certificateBytes(req *http.Request) []byte {
|
||||
hdr, err := url.QueryUnescape(hdr)
|
||||
if err != nil {
|
||||
// Decoding failed
|
||||
return nil
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i := 64; i < len(hdr); i += 65 {
|
||||
@@ -359,16 +370,16 @@ func certificateBytes(req *http.Request) []byte {
|
||||
}
|
||||
|
||||
if bs == nil {
|
||||
return nil
|
||||
return nil, errors.New("empty certificate header")
|
||||
}
|
||||
|
||||
block, _ := pem.Decode(bs)
|
||||
if block == nil {
|
||||
// Decoding failed
|
||||
return nil
|
||||
return nil, errors.New("certificate decode result is empty")
|
||||
}
|
||||
|
||||
return block.Bytes
|
||||
return block.Bytes, nil
|
||||
}
|
||||
|
||||
// fixupAddresses checks the list of addresses, removing invalid ones and
|
||||
@@ -419,7 +430,7 @@ func fixupAddresses(remote *net.TCPAddr, addresses []string) []string {
|
||||
|
||||
// If zero port was specified, use remote port.
|
||||
if port == "0" && remote.Port > 0 {
|
||||
port = fmt.Sprintf("%d", remote.Port)
|
||||
port = strconv.Itoa(remote.Port)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -510,10 +510,7 @@ func (m *DatabaseRecord) Unmarshal(dAtA []byte) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
@@ -648,10 +645,7 @@ func (m *ReplicationRecord) Unmarshal(dAtA []byte) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
@@ -752,10 +746,7 @@ func (m *DatabaseAddress) Unmarshal(dAtA []byte) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
|
||||
@@ -116,8 +116,11 @@ func main() {
|
||||
var replicationDestinations []string
|
||||
parts := strings.Split(replicationPeers, ",")
|
||||
for _, part := range parts {
|
||||
fields := strings.Split(part, "@")
|
||||
if part == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
fields := strings.Split(part, "@")
|
||||
switch len(fields) {
|
||||
case 2:
|
||||
// This is an id@address specification. Grab the address for the
|
||||
@@ -137,6 +140,9 @@ func main() {
|
||||
if err != nil {
|
||||
log.Fatalln("Parsing device ID:", err)
|
||||
}
|
||||
if id == protocol.EmptyDeviceID {
|
||||
log.Fatalf("Missing device ID for peer in %q", part)
|
||||
}
|
||||
allowedReplicationPeers = append(allowedReplicationPeers, id)
|
||||
|
||||
default:
|
||||
|
||||
@@ -120,7 +120,7 @@ func (s *replicationSender) Serve(ctx context.Context) error {
|
||||
if _, err := conn.Write(buf[:4+n]); err != nil {
|
||||
replicationSendsTotal.WithLabelValues("error").Inc()
|
||||
log.Println("Replication write:", err)
|
||||
// Yes, we are loosing the replication event here.
|
||||
// Yes, we are losing the replication event here.
|
||||
return err
|
||||
}
|
||||
replicationSendsTotal.WithLabelValues("success").Inc()
|
||||
@@ -135,7 +135,7 @@ func (s *replicationSender) String() string {
|
||||
return fmt.Sprintf("replicationSender(%q)", s.dst)
|
||||
}
|
||||
|
||||
func (s *replicationSender) send(key string, ps []DatabaseAddress, seen int64) {
|
||||
func (s *replicationSender) send(key string, ps []DatabaseAddress, _ int64) {
|
||||
item := ReplicationRecord{
|
||||
Key: key,
|
||||
Addresses: ps,
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/collectors"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -109,7 +110,7 @@ func init() {
|
||||
databaseKeys, databaseStatisticsSeconds,
|
||||
databaseOperations, databaseOperationSeconds)
|
||||
|
||||
processCollectorOpts := prometheus.ProcessCollectorOpts{
|
||||
processCollectorOpts := collectors.ProcessCollectorOpts{
|
||||
Namespace: "syncthing_discovery",
|
||||
PidFn: func() (int, error) {
|
||||
return os.Getpid(), nil
|
||||
@@ -117,7 +118,7 @@ func init() {
|
||||
}
|
||||
|
||||
prometheus.MustRegister(
|
||||
prometheus.NewProcessCollector(processCollectorOpts),
|
||||
collectors.NewProcessCollector(processCollectorOpts),
|
||||
)
|
||||
|
||||
}
|
||||
|
||||
@@ -84,7 +84,7 @@ func checkServers(deviceID protocol.DeviceID, servers ...string) {
|
||||
}
|
||||
|
||||
func checkServer(deviceID protocol.DeviceID, server string) checkResult {
|
||||
disco, err := discover.NewGlobal(server, tls.Certificate{}, nil, events.NoopLogger)
|
||||
disco, err := discover.NewGlobal(server, tls.Certificate{}, nil, events.NoopLogger, nil)
|
||||
if err != nil {
|
||||
return checkResult{error: err}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
// Commmand stfindignored lists ignored files under a given folder root.
|
||||
// Command stfindignored lists ignored files under a given folder root.
|
||||
package main
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,63 +0,0 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/db/backend"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var mode string
|
||||
log.SetFlags(0)
|
||||
log.SetOutput(os.Stdout)
|
||||
|
||||
flag.StringVar(&mode, "mode", "dump", "Mode of operation: dump, dumpsize, idxck")
|
||||
|
||||
flag.Parse()
|
||||
|
||||
path := flag.Arg(0)
|
||||
if path == "" {
|
||||
path = filepath.Join(defaultConfigDir(), "index-v0.14.0.db")
|
||||
}
|
||||
|
||||
var ldb backend.Backend
|
||||
var err error
|
||||
if looksLikeBadger(path) {
|
||||
ldb, err = backend.OpenBadger(path)
|
||||
} else {
|
||||
ldb, err = backend.OpenLevelDBRO(path)
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
switch mode {
|
||||
case "dump":
|
||||
dump(ldb)
|
||||
case "dumpsize":
|
||||
dumpsize(ldb)
|
||||
case "idxck":
|
||||
if !idxck(ldb) {
|
||||
os.Exit(1)
|
||||
}
|
||||
case "account":
|
||||
account(ldb)
|
||||
default:
|
||||
fmt.Println("Unknown mode")
|
||||
}
|
||||
}
|
||||
|
||||
func looksLikeBadger(path string) bool {
|
||||
_, err := os.Stat(filepath.Join(path, "KEYREGISTRY"))
|
||||
return err == nil
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
// Copyright (C) 2015 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/fs"
|
||||
)
|
||||
|
||||
func nulString(bs []byte) string {
|
||||
for i := range bs {
|
||||
if bs[i] == 0 {
|
||||
return string(bs[:i])
|
||||
}
|
||||
}
|
||||
return string(bs)
|
||||
}
|
||||
|
||||
func defaultConfigDir() string {
|
||||
switch runtime.GOOS {
|
||||
case "windows":
|
||||
if p := os.Getenv("LocalAppData"); p != "" {
|
||||
return filepath.Join(p, "Syncthing")
|
||||
}
|
||||
return filepath.Join(os.Getenv("AppData"), "Syncthing")
|
||||
|
||||
case "darwin":
|
||||
dir, err := fs.ExpandTilde("~/Library/Application Support/Syncthing")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return dir
|
||||
|
||||
default:
|
||||
if xdgCfg := os.Getenv("XDG_CONFIG_HOME"); xdgCfg != "" {
|
||||
return filepath.Join(xdgCfg, "syncthing")
|
||||
}
|
||||
dir, err := fs.ExpandTilde("~/.config/syncthing")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return dir
|
||||
}
|
||||
}
|
||||
16
cmd/strelaypoolsrv/auto/noassets.go
Normal file
16
cmd/strelaypoolsrv/auto/noassets.go
Normal file
@@ -0,0 +1,16 @@
|
||||
// Copyright (C) 2021 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
//go:build noassets
|
||||
// +build noassets
|
||||
|
||||
package auto
|
||||
|
||||
import "github.com/syncthing/syncthing/lib/assets"
|
||||
|
||||
func Assets() map[string]assets.Asset {
|
||||
return nil
|
||||
}
|
||||
@@ -46,18 +46,18 @@
|
||||
<h1>Relay Pool Data</h1>
|
||||
<div ng-if="relays === undefined" class="text-center">
|
||||
<img src="https://cdnjs.cloudflare.com/ajax/libs/galleriffic/2.0.1/css/loader.gif" alt=""/>
|
||||
<p>Please wait while we gather data</p>
|
||||
<p>Please wait while we gather data…</p>
|
||||
</div>
|
||||
<div>
|
||||
<div ng-show="relays !== undefined" class="ng-hide">
|
||||
<p>
|
||||
The relays listed on this page are not managed or vetted by the Syncthing project.
|
||||
Each relay is the responsibility of the relay operator.
|
||||
Currently {{ relays.length }} relays online.
|
||||
Currently {{ relays.length }} relays are online.
|
||||
</p>
|
||||
</div>
|
||||
<div id="map"></div> <!-- Can't hide the map, otherwise it freaks out -->
|
||||
<p>The circle size represents how much bytes the relay transferred relative to other relays</p>
|
||||
<p>The circle size represents how much bytes the relay has transferred relatively to other relays.</p>
|
||||
</div>
|
||||
<div>
|
||||
<table class="table table-striped table-condensed table">
|
||||
@@ -188,7 +188,7 @@
|
||||
<hr>
|
||||
<p>
|
||||
This product includes GeoLite2 data created by MaxMind, available from
|
||||
<a href="http://www.maxmind.com">http://www.maxmind.com</a>.
|
||||
<a href="https://www.maxmind.com">https://www.maxmind.com</a>.
|
||||
</p>
|
||||
</div>
|
||||
|
||||
@@ -237,7 +237,7 @@
|
||||
uptimeSeconds: 0,
|
||||
};
|
||||
$scope.map = L.map('map').setView([40.90296, 1.90925], 2);
|
||||
L.tileLayer('http://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png',
|
||||
L.tileLayer('https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png',
|
||||
{
|
||||
attribution: 'Leaflet',
|
||||
maxZoom: 17
|
||||
|
||||
@@ -3,16 +3,12 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
@@ -21,9 +17,13 @@ import (
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/golang/groupcache/lru"
|
||||
lru "github.com/hashicorp/golang-lru/v2"
|
||||
"github.com/syncthing/syncthing/lib/httpcache"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
|
||||
"github.com/oschwald/geoip2-golang"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
@@ -33,7 +33,6 @@ import (
|
||||
"github.com/syncthing/syncthing/lib/relay/client"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
type location struct {
|
||||
@@ -99,27 +98,13 @@ var (
|
||||
dir string
|
||||
evictionTime = time.Hour
|
||||
debug bool
|
||||
getLRUSize = 10 << 10
|
||||
getLimitBurst = 10
|
||||
getLimitAvg = 2
|
||||
postLRUSize = 1 << 10
|
||||
postLimitBurst = 2
|
||||
postLimitAvg = 2
|
||||
getLimit time.Duration
|
||||
postLimit time.Duration
|
||||
permRelaysFile string
|
||||
ipHeader string
|
||||
geoipPath string
|
||||
proto string
|
||||
statsRefresh = time.Minute / 2
|
||||
requestQueueLen = 10
|
||||
requestProcessors = 1
|
||||
|
||||
getMut = sync.NewMutex()
|
||||
getLRUCache *lru.Cache
|
||||
|
||||
postMut = sync.NewMutex()
|
||||
postLRUCache *lru.Cache
|
||||
statsRefresh = time.Minute
|
||||
requestQueueLen = 64
|
||||
requestProcessors = 8
|
||||
|
||||
requests chan request
|
||||
|
||||
@@ -127,6 +112,7 @@ var (
|
||||
knownRelays = make([]*relay, 0)
|
||||
permanentRelays = make([]*relay, 0)
|
||||
evictionTimers = make(map[string]*time.Timer)
|
||||
globalBlocklist = newErrorTracker(1000)
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -141,13 +127,8 @@ func main() {
|
||||
flag.StringVar(&dir, "keys", dir, "Directory where http-cert.pem and http-key.pem is stored for TLS listening")
|
||||
flag.BoolVar(&debug, "debug", debug, "Enable debug output")
|
||||
flag.DurationVar(&evictionTime, "eviction", evictionTime, "After how long the relay is evicted")
|
||||
flag.IntVar(&getLRUSize, "get-limit-cache", getLRUSize, "Get request limiter cache size")
|
||||
flag.IntVar(&getLimitAvg, "get-limit-avg", getLimitAvg, "Allowed average get request rate, per 10 s")
|
||||
flag.IntVar(&getLimitBurst, "get-limit-burst", getLimitBurst, "Allowed burst get requests")
|
||||
flag.IntVar(&postLRUSize, "post-limit-cache", postLRUSize, "Post request limiter cache size")
|
||||
flag.IntVar(&postLimitAvg, "post-limit-avg", postLimitAvg, "Allowed average post request rate, per minute")
|
||||
flag.IntVar(&postLimitBurst, "post-limit-burst", postLimitBurst, "Allowed burst post requests")
|
||||
flag.StringVar(&permRelaysFile, "perm-relays", "", "Path to list of permanent relays")
|
||||
flag.StringVar(&knownRelaysFile, "known-relays", knownRelaysFile, "Path to list of current relays")
|
||||
flag.StringVar(&ipHeader, "ip-header", "", "Name of header which holds clients ip:port. Only meaningful when running behind a reverse proxy.")
|
||||
flag.StringVar(&geoipPath, "geoip", "GeoLite2-City.mmdb", "Path to GeoLite2-City database")
|
||||
flag.StringVar(&proto, "protocol", "tcp", "Protocol used for listening. 'tcp' for IPv4 and IPv6, 'tcp4' for IPv4, 'tcp6' for IPv6")
|
||||
@@ -159,12 +140,6 @@ func main() {
|
||||
|
||||
requests = make(chan request, requestQueueLen)
|
||||
|
||||
getLimit = 10 * time.Second / time.Duration(getLimitAvg)
|
||||
postLimit = time.Minute / time.Duration(postLimitAvg)
|
||||
|
||||
getLRUCache = lru.New(getLRUSize)
|
||||
postLRUCache = lru.New(postLRUSize)
|
||||
|
||||
var listener net.Listener
|
||||
var err error
|
||||
|
||||
@@ -240,7 +215,7 @@ func main() {
|
||||
|
||||
handler := http.NewServeMux()
|
||||
handler.HandleFunc("/", handleAssets)
|
||||
handler.HandleFunc("/endpoint", handleRequest)
|
||||
handler.Handle("/endpoint", httpcache.SinglePath(http.HandlerFunc(handleRequest), 15*time.Second))
|
||||
handler.HandleFunc("/metrics", handleMetrics)
|
||||
|
||||
srv := http.Server{
|
||||
@@ -291,21 +266,17 @@ func handleRequest(w http.ResponseWriter, r *http.Request) {
|
||||
}()
|
||||
|
||||
if ipHeader != "" {
|
||||
r.RemoteAddr = r.Header.Get(ipHeader)
|
||||
hdr := r.Header.Get(ipHeader)
|
||||
fields := strings.Split(hdr, ",")
|
||||
if len(fields) > 0 {
|
||||
r.RemoteAddr = strings.TrimSpace(fields[len(fields)-1])
|
||||
}
|
||||
}
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
switch r.Method {
|
||||
case "GET":
|
||||
if limit(r.RemoteAddr, getLRUCache, getMut, getLimit, getLimitBurst) {
|
||||
w.WriteHeader(httpStatusEnhanceYourCalm)
|
||||
return
|
||||
}
|
||||
handleGetRequest(w, r)
|
||||
case "POST":
|
||||
if limit(r.RemoteAddr, postLRUCache, postMut, postLimit, postLimitBurst) {
|
||||
w.WriteHeader(httpStatusEnhanceYourCalm)
|
||||
return
|
||||
}
|
||||
handlePostRequest(w, r)
|
||||
default:
|
||||
if debug {
|
||||
@@ -327,20 +298,28 @@ func handleGetRequest(rw http.ResponseWriter, r *http.Request) {
|
||||
// Shuffle
|
||||
rand.Shuffle(relays)
|
||||
|
||||
w := io.Writer(rw)
|
||||
if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
|
||||
rw.Header().Set("Content-Encoding", "gzip")
|
||||
gw := gzip.NewWriter(rw)
|
||||
defer gw.Close()
|
||||
w = gw
|
||||
}
|
||||
|
||||
_ = json.NewEncoder(w).Encode(map[string][]*relay{
|
||||
_ = json.NewEncoder(rw).Encode(map[string][]*relay{
|
||||
"relays": relays,
|
||||
})
|
||||
}
|
||||
|
||||
func handlePostRequest(w http.ResponseWriter, r *http.Request) {
|
||||
// Get the IP address of the client
|
||||
rhost := r.RemoteAddr
|
||||
if host, _, err := net.SplitHostPort(rhost); err == nil {
|
||||
rhost = host
|
||||
}
|
||||
|
||||
// Check the black list. A client is blacklisted if their last 10
|
||||
// attempts to join have all failed. The "Unauthorized" status return
|
||||
// causes strelaysrv to cease attempting to join.
|
||||
if globalBlocklist.IsBlocked(rhost) {
|
||||
log.Println("Rejected blocked client", rhost)
|
||||
http.Error(w, "Too many errors", http.StatusUnauthorized)
|
||||
globalBlocklist.ClearErrors(rhost)
|
||||
return
|
||||
}
|
||||
|
||||
var relayCert *x509.Certificate
|
||||
if r.TLS != nil && len(r.TLS.PeerCertificates) > 0 {
|
||||
relayCert = r.TLS.PeerCertificates[0]
|
||||
@@ -368,6 +347,11 @@ func handlePostRequest(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
// Canonicalize the URL. In particular, parse and re-encode the query
|
||||
// string so that it's guaranteed to be valid.
|
||||
uri.RawQuery = uri.Query().Encode()
|
||||
newRelay.URL = uri.String()
|
||||
|
||||
if relayCert != nil {
|
||||
advertisedId := uri.Query().Get("id")
|
||||
idFromCert := protocol.NewDeviceID(relayCert.Raw).String()
|
||||
@@ -387,12 +371,6 @@ func handlePostRequest(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
// Get the IP address of the client
|
||||
rhost := r.RemoteAddr
|
||||
if host, _, err := net.SplitHostPort(rhost); err == nil {
|
||||
rhost = host
|
||||
}
|
||||
|
||||
ip := net.ParseIP(host)
|
||||
// The client did not provide an IP address, use the IP address of the client.
|
||||
if ip == nil || ip.IsUnspecified() {
|
||||
@@ -424,10 +402,14 @@ func handlePostRequest(w http.ResponseWriter, r *http.Request) {
|
||||
case requests <- request{&newRelay, reschan, prometheus.NewTimer(relayTestActionsSeconds.WithLabelValues("queue"))}:
|
||||
result := <-reschan
|
||||
if result.err != nil {
|
||||
log.Println("Join from", r.RemoteAddr, "failed:", result.err)
|
||||
globalBlocklist.AddError(rhost)
|
||||
relayTestsTotal.WithLabelValues("failed").Inc()
|
||||
http.Error(w, result.err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
log.Println("Join from", r.RemoteAddr, "succeeded")
|
||||
globalBlocklist.ClearErrors(rhost)
|
||||
relayTestsTotal.WithLabelValues("success").Inc()
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
json.NewEncoder(w).Encode(map[string]time.Duration{
|
||||
@@ -475,7 +457,7 @@ func handleRelayTest(request request) {
|
||||
updateMetrics(request.relay.uri.Host, *stats, location)
|
||||
}
|
||||
request.relay.Stats = stats
|
||||
request.relay.StatsRetrieved = time.Now()
|
||||
request.relay.StatsRetrieved = time.Now().Truncate(time.Second)
|
||||
request.relay.Location = location
|
||||
|
||||
timer, ok := evictionTimers[request.relay.uri.Host]
|
||||
@@ -541,25 +523,8 @@ func evict(relay *relay) func() {
|
||||
}
|
||||
}
|
||||
|
||||
func limit(addr string, cache *lru.Cache, lock sync.Mutex, intv time.Duration, burst int) bool {
|
||||
if host, _, err := net.SplitHostPort(addr); err == nil {
|
||||
addr = host
|
||||
}
|
||||
|
||||
lock.Lock()
|
||||
v, _ := cache.Get(addr)
|
||||
bkt, ok := v.(*rate.Limiter)
|
||||
if !ok {
|
||||
bkt = rate.NewLimiter(rate.Every(intv), burst)
|
||||
cache.Add(addr, bkt)
|
||||
}
|
||||
lock.Unlock()
|
||||
|
||||
return !bkt.Allow()
|
||||
}
|
||||
|
||||
func loadRelays(file string) []*relay {
|
||||
content, err := ioutil.ReadFile(file)
|
||||
content, err := os.ReadFile(file)
|
||||
if err != nil {
|
||||
log.Println("Failed to load relays: " + err.Error())
|
||||
return nil
|
||||
@@ -567,7 +532,7 @@ func loadRelays(file string) []*relay {
|
||||
|
||||
var relays []*relay
|
||||
for _, line := range strings.Split(string(content), "\n") {
|
||||
if len(line) == 0 {
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -597,11 +562,11 @@ func saveRelays(file string, relays []*relay) error {
|
||||
for _, relay := range relays {
|
||||
content += relay.uri.String() + "\n"
|
||||
}
|
||||
return ioutil.WriteFile(file, []byte(content), 0777)
|
||||
return os.WriteFile(file, []byte(content), 0o777)
|
||||
}
|
||||
|
||||
func createTestCertificate() tls.Certificate {
|
||||
tmpDir, err := ioutil.TempDir("", "relaypoolsrv")
|
||||
tmpDir, err := os.MkdirTemp("", "relaypoolsrv")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
@@ -656,3 +621,42 @@ func (lrw *loggingResponseWriter) WriteHeader(code int) {
|
||||
lrw.statusCode = code
|
||||
lrw.ResponseWriter.WriteHeader(code)
|
||||
}
|
||||
|
||||
type errorTracker struct {
|
||||
errors *lru.TwoQueueCache[string, *errorCounter]
|
||||
}
|
||||
|
||||
type errorCounter struct {
|
||||
count atomic.Int32
|
||||
}
|
||||
|
||||
func newErrorTracker(size int) *errorTracker {
|
||||
cache, err := lru.New2Q[string, *errorCounter](size)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return &errorTracker{
|
||||
errors: cache,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *errorTracker) AddError(host string) {
|
||||
entry, ok := b.errors.Get(host)
|
||||
if !ok {
|
||||
entry = &errorCounter{}
|
||||
b.errors.Add(host, entry)
|
||||
}
|
||||
c := entry.count.Add(1)
|
||||
log.Printf("Error count for %s is now %d", host, c)
|
||||
}
|
||||
|
||||
func (b *errorTracker) ClearErrors(host string) {
|
||||
b.errors.Remove(host)
|
||||
}
|
||||
|
||||
func (b *errorTracker) IsBlocked(host string) bool {
|
||||
if be, ok := b.errors.Get(host); ok {
|
||||
return be.count.Load() > 10
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
@@ -65,3 +66,29 @@ func TestHandleGetRequest(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCanonicalizeQueryValues(t *testing.T) {
|
||||
// This just demonstrates and validates the uri.Parse/String stuff in
|
||||
// regards to query strings.
|
||||
|
||||
in := "http://example.com/?some weird= query^value"
|
||||
exp := "http://example.com/?some+weird=+query%5Evalue"
|
||||
|
||||
uri, err := url.Parse(in)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
str := uri.String()
|
||||
if str != in {
|
||||
// Just re-encoding the URL doesn't sanitize the query string.
|
||||
t.Errorf("expected %q, got %q", in, str)
|
||||
}
|
||||
|
||||
uri.RawQuery = uri.Query().Encode()
|
||||
str = uri.String()
|
||||
if str != exp {
|
||||
// The query string is now in correct format.
|
||||
t.Errorf("expected %q, got %q", exp, str)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,11 +10,12 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/collectors"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
)
|
||||
|
||||
func init() {
|
||||
processCollectorOpts := prometheus.ProcessCollectorOpts{
|
||||
processCollectorOpts := collectors.ProcessCollectorOpts{
|
||||
Namespace: "syncthing_relaypoolsrv",
|
||||
PidFn: func() (int, error) {
|
||||
return os.Getpid(), nil
|
||||
@@ -22,7 +23,7 @@ func init() {
|
||||
}
|
||||
|
||||
prometheus.MustRegister(
|
||||
prometheus.NewProcessCollector(processCollectorOpts),
|
||||
collectors.NewProcessCollector(processCollectorOpts),
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -20,10 +20,10 @@ import (
|
||||
var (
|
||||
outboxesMut = sync.RWMutex{}
|
||||
outboxes = make(map[syncthingprotocol.DeviceID]chan interface{})
|
||||
numConnections int64
|
||||
numConnections atomic.Int64
|
||||
)
|
||||
|
||||
func listener(proto, addr string, config *tls.Config) {
|
||||
func listener(_, addr string, config *tls.Config, token string) {
|
||||
tcpListener, err := net.Listen("tcp", addr)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
@@ -49,7 +49,7 @@ func listener(proto, addr string, config *tls.Config) {
|
||||
}
|
||||
|
||||
if isTLS {
|
||||
go protocolConnectionHandler(conn, config)
|
||||
go protocolConnectionHandler(conn, config, token)
|
||||
} else {
|
||||
go sessionConnectionHandler(conn)
|
||||
}
|
||||
@@ -57,7 +57,7 @@ func listener(proto, addr string, config *tls.Config) {
|
||||
}
|
||||
}
|
||||
|
||||
func protocolConnectionHandler(tcpConn net.Conn, config *tls.Config) {
|
||||
func protocolConnectionHandler(tcpConn net.Conn, config *tls.Config, token string) {
|
||||
conn := tls.Server(tcpConn, config)
|
||||
if err := conn.SetDeadline(time.Now().Add(messageTimeout)); err != nil {
|
||||
if debug {
|
||||
@@ -76,7 +76,7 @@ func protocolConnectionHandler(tcpConn net.Conn, config *tls.Config) {
|
||||
}
|
||||
|
||||
state := conn.ConnectionState()
|
||||
if (!state.NegotiatedProtocolIsMutual || state.NegotiatedProtocol != protocol.ProtocolName) && debug {
|
||||
if debug && state.NegotiatedProtocol != protocol.ProtocolName {
|
||||
log.Println("Protocol negotiation error")
|
||||
}
|
||||
|
||||
@@ -119,7 +119,16 @@ func protocolConnectionHandler(tcpConn net.Conn, config *tls.Config) {
|
||||
|
||||
switch msg := message.(type) {
|
||||
case protocol.JoinRelayRequest:
|
||||
if atomic.LoadInt32(&overLimit) > 0 {
|
||||
if token != "" && msg.Token != token {
|
||||
if debug {
|
||||
log.Printf("invalid token %s\n", msg.Token)
|
||||
}
|
||||
protocol.WriteMessage(conn, protocol.ResponseWrongToken)
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
if overLimit.Load() {
|
||||
protocol.WriteMessage(conn, protocol.RelayFull{})
|
||||
if debug {
|
||||
log.Println("Refusing join request from", id, "due to being over limits")
|
||||
@@ -258,7 +267,7 @@ func protocolConnectionHandler(tcpConn net.Conn, config *tls.Config) {
|
||||
conn.Close()
|
||||
}
|
||||
|
||||
if atomic.LoadInt32(&overLimit) > 0 && !hasSessions(id) {
|
||||
if overLimit.Load() && !hasSessions(id) {
|
||||
if debug {
|
||||
log.Println("Dropping", id, "as it has no sessions and we are over our limits")
|
||||
}
|
||||
@@ -351,8 +360,8 @@ func sessionConnectionHandler(conn net.Conn) {
|
||||
}
|
||||
|
||||
func messageReader(conn net.Conn, messages chan<- interface{}, errors chan<- error) {
|
||||
atomic.AddInt64(&numConnections, 1)
|
||||
defer atomic.AddInt64(&numConnections, -1)
|
||||
numConnections.Add(1)
|
||||
defer numConnections.Add(-1)
|
||||
|
||||
for {
|
||||
msg, err := protocol.ReadMessage(conn)
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
@@ -50,13 +49,14 @@ var (
|
||||
|
||||
sessionLimitBps int
|
||||
globalLimitBps int
|
||||
overLimit int32
|
||||
overLimit atomic.Bool
|
||||
descriptorLimit int64
|
||||
sessionLimiter *rate.Limiter
|
||||
globalLimiter *rate.Limiter
|
||||
networkBufferSize int
|
||||
|
||||
statusAddr string
|
||||
token string
|
||||
poolAddrs string
|
||||
pools []string
|
||||
providedBy string
|
||||
@@ -90,6 +90,7 @@ func main() {
|
||||
flag.IntVar(&globalLimitBps, "global-rate", globalLimitBps, "Global rate limit, in bytes/s")
|
||||
flag.BoolVar(&debug, "debug", debug, "Enable debug output")
|
||||
flag.StringVar(&statusAddr, "status-srv", ":22070", "Listen address for status service (blank to disable)")
|
||||
flag.StringVar(&token, "token", "", "Token to restrict access to the relay (optional). Disables joining any pools.")
|
||||
flag.StringVar(&poolAddrs, "pools", defaultPoolAddrs, "Comma separated list of relay pool addresses to join")
|
||||
flag.StringVar(&providedBy, "provided-by", "", "An optional description about who provides the relay")
|
||||
flag.StringVar(&extAddress, "ext-address", "", "An optional address to advertise as being available on.\n\tAllows listening on an unprivileged port with port forwarding from e.g. 443, and be connected to on port 443.")
|
||||
@@ -99,7 +100,7 @@ func main() {
|
||||
flag.IntVar(&natRenewal, "nat-renewal", 30, "NAT renewal frequency in minutes")
|
||||
flag.IntVar(&natTimeout, "nat-timeout", 10, "NAT discovery timeout in seconds")
|
||||
flag.BoolVar(&pprofEnabled, "pprof", false, "Enable the built in profiling on the status server")
|
||||
flag.IntVar(&networkBufferSize, "network-buffer", 2048, "Network buffer size (two of these per proxied connection)")
|
||||
flag.IntVar(&networkBufferSize, "network-buffer", 65536, "Network buffer size (two of these per proxied connection)")
|
||||
showVersion := flag.Bool("version", false, "Show version")
|
||||
flag.Parse()
|
||||
|
||||
@@ -146,7 +147,7 @@ func main() {
|
||||
log.Println("Connection limit", descriptorLimit)
|
||||
|
||||
go monitorLimits()
|
||||
} else if err != nil && runtime.GOOS != "windows" {
|
||||
} else if err != nil && !build.IsWindows {
|
||||
log.Println("Assuming no connection limit, due to error retrieving rlimits:", err)
|
||||
}
|
||||
|
||||
@@ -186,6 +187,7 @@ func main() {
|
||||
}
|
||||
|
||||
wrapper := config.Wrap("config", config.New(id), id, events.NoopLogger)
|
||||
go wrapper.Serve(context.TODO())
|
||||
wrapper.Modify(func(cfg *config.Configuration) {
|
||||
cfg.Options.NATLeaseM = natLease
|
||||
cfg.Options.NATRenewalM = natRenewal
|
||||
@@ -199,7 +201,7 @@ func main() {
|
||||
go natSvc.Serve(ctx)
|
||||
defer cancel()
|
||||
found := make(chan struct{})
|
||||
mapping.OnChanged(func(_ *nat.Mapping, _, _ []nat.Address) {
|
||||
mapping.OnChanged(func() {
|
||||
select {
|
||||
case found <- struct{}{}:
|
||||
default:
|
||||
@@ -229,13 +231,37 @@ func main() {
|
||||
go statusService(statusAddr)
|
||||
}
|
||||
|
||||
uri, err := url.Parse(fmt.Sprintf("relay://%s/?id=%s&pingInterval=%s&networkTimeout=%s&sessionLimitBps=%d&globalLimitBps=%d&statusAddr=%s&providedBy=%s", mapping.Address(), id, pingInterval, networkTimeout, sessionLimitBps, globalLimitBps, statusAddr, providedBy))
|
||||
uri, err := url.Parse(fmt.Sprintf("relay://%s/", mapping.Address()))
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to construct URI", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Add properly encoded query string parameters to URL.
|
||||
query := make(url.Values)
|
||||
query.Set("id", id.String())
|
||||
query.Set("pingInterval", pingInterval.String())
|
||||
query.Set("networkTimeout", networkTimeout.String())
|
||||
if sessionLimitBps > 0 {
|
||||
query.Set("sessionLimitBps", fmt.Sprint(sessionLimitBps))
|
||||
}
|
||||
if globalLimitBps > 0 {
|
||||
query.Set("globalLimitBps", fmt.Sprint(globalLimitBps))
|
||||
}
|
||||
if statusAddr != "" {
|
||||
query.Set("statusAddr", statusAddr)
|
||||
}
|
||||
if providedBy != "" {
|
||||
query.Set("providedBy", providedBy)
|
||||
}
|
||||
uri.RawQuery = query.Encode()
|
||||
|
||||
log.Println("URI:", uri.String())
|
||||
|
||||
if token != "" {
|
||||
poolAddrs = ""
|
||||
}
|
||||
|
||||
if poolAddrs == defaultPoolAddrs {
|
||||
log.Println("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
|
||||
log.Println("!! Joining default relay pools, this relay will be available for public use. !!")
|
||||
@@ -251,7 +277,7 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
go listener(proto, listen, tlsCfg)
|
||||
go listener(proto, listen, tlsCfg, token)
|
||||
|
||||
sigs := make(chan os.Signal, 1)
|
||||
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
|
||||
@@ -282,10 +308,10 @@ func main() {
|
||||
func monitorLimits() {
|
||||
limitCheckTimer = time.NewTimer(time.Minute)
|
||||
for range limitCheckTimer.C {
|
||||
if atomic.LoadInt64(&numConnections)+atomic.LoadInt64(&numProxies) > descriptorLimit {
|
||||
atomic.StoreInt32(&overLimit, 1)
|
||||
if numConnections.Load()+numProxies.Load() > descriptorLimit {
|
||||
overLimit.Store(true)
|
||||
log.Println("Gone past our connection limits. Starting to refuse new/drop idle connections.")
|
||||
} else if atomic.CompareAndSwapInt32(&overLimit, 1, 0) {
|
||||
} else if overLimit.CompareAndSwap(true, false) {
|
||||
log.Println("Dropped below our connection limits. Accepting new connections.")
|
||||
}
|
||||
limitCheckTimer.Reset(time.Minute)
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -56,7 +56,7 @@ func poolHandler(pool string, uri *url.URL, mapping mapping, ownCert tls.Certifi
|
||||
continue
|
||||
}
|
||||
|
||||
bs, err := ioutil.ReadAll(resp.Body)
|
||||
bs, err := io.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
if err != nil {
|
||||
log.Printf("Error joining pool %v: reading response: %v", pool, err)
|
||||
@@ -74,9 +74,8 @@ func poolHandler(pool string, uri *url.URL, mapping mapping, ownCert tls.Certifi
|
||||
log.Printf("Joined pool %s, rejoining in %v", pool, rejoin)
|
||||
time.Sleep(rejoin)
|
||||
continue
|
||||
} else {
|
||||
log.Printf("Joined pool %s, failed to deserialize response: %v", pool, err)
|
||||
}
|
||||
log.Printf("Joined pool %s, failed to deserialize response: %v", pool, err)
|
||||
|
||||
case http.StatusInternalServerError:
|
||||
log.Printf("Failed to join %v: server error", pool)
|
||||
|
||||
@@ -23,8 +23,8 @@ var (
|
||||
sessionMut = sync.RWMutex{}
|
||||
activeSessions = make([]*session, 0)
|
||||
pendingSessions = make(map[string]*session)
|
||||
numProxies int64
|
||||
bytesProxied int64
|
||||
numProxies atomic.Int64
|
||||
bytesProxied atomic.Int64
|
||||
)
|
||||
|
||||
func newSession(serverid, clientid syncthingprotocol.DeviceID, sessionRateLimit, globalRateLimit *rate.Limiter) *session {
|
||||
@@ -251,8 +251,8 @@ func (s *session) proxy(c1, c2 net.Conn) error {
|
||||
log.Println("Proxy", c1.RemoteAddr(), "->", c2.RemoteAddr())
|
||||
}
|
||||
|
||||
atomic.AddInt64(&numProxies, 1)
|
||||
defer atomic.AddInt64(&numProxies, -1)
|
||||
numProxies.Add(1)
|
||||
defer numProxies.Add(-1)
|
||||
|
||||
buf := make([]byte, networkBufferSize)
|
||||
for {
|
||||
@@ -262,7 +262,7 @@ func (s *session) proxy(c1, c2 net.Conn) error {
|
||||
return err
|
||||
}
|
||||
|
||||
atomic.AddInt64(&bytesProxied, int64(n))
|
||||
bytesProxied.Add(int64(n))
|
||||
|
||||
if debug {
|
||||
log.Printf("%d bytes from %s to %s", n, c1.RemoteAddr(), c2.RemoteAddr())
|
||||
|
||||
@@ -36,7 +36,7 @@ func statusService(addr string) {
|
||||
}
|
||||
}
|
||||
|
||||
func getStatus(w http.ResponseWriter, r *http.Request) {
|
||||
func getStatus(w http.ResponseWriter, _ *http.Request) {
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
status := make(map[string]interface{})
|
||||
|
||||
@@ -51,9 +51,9 @@ func getStatus(w http.ResponseWriter, r *http.Request) {
|
||||
status["numPendingSessionKeys"] = len(pendingSessions)
|
||||
status["numActiveSessions"] = len(activeSessions)
|
||||
sessionMut.Unlock()
|
||||
status["numConnections"] = atomic.LoadInt64(&numConnections)
|
||||
status["numProxies"] = atomic.LoadInt64(&numProxies)
|
||||
status["bytesProxied"] = atomic.LoadInt64(&bytesProxied)
|
||||
status["numConnections"] = numConnections.Load()
|
||||
status["numProxies"] = numProxies.Load()
|
||||
status["bytesProxied"] = bytesProxied.Load()
|
||||
status["goVersion"] = runtime.Version()
|
||||
status["goOS"] = runtime.GOOS
|
||||
status["goArch"] = runtime.GOARCH
|
||||
@@ -88,13 +88,13 @@ func getStatus(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
type rateCalculator struct {
|
||||
counter *int64 // atomic, must remain 64-bit aligned
|
||||
counter *atomic.Int64
|
||||
rates []int64
|
||||
prev int64
|
||||
startTime time.Time
|
||||
}
|
||||
|
||||
func newRateCalculator(keepIntervals int, interval time.Duration, counter *int64) *rateCalculator {
|
||||
func newRateCalculator(keepIntervals int, interval time.Duration, counter *atomic.Int64) *rateCalculator {
|
||||
r := &rateCalculator{
|
||||
rates: make([]int64, keepIntervals),
|
||||
counter: counter,
|
||||
@@ -112,7 +112,7 @@ func (r *rateCalculator) updateRates(interval time.Duration) {
|
||||
next := now.Truncate(interval).Add(interval)
|
||||
time.Sleep(next.Sub(now))
|
||||
|
||||
cur := atomic.LoadInt64(r.counter)
|
||||
cur := r.counter.Load()
|
||||
rate := int64(float64(cur-r.prev) / interval.Seconds())
|
||||
copy(r.rates[1:], r.rates)
|
||||
r.rates[0] = rate
|
||||
|
||||
@@ -57,7 +57,7 @@ func main() {
|
||||
|
||||
if join {
|
||||
log.Println("Creating client")
|
||||
relay, err := client.NewClient(uri, []tls.Certificate{cert}, nil, 10*time.Second)
|
||||
relay, err := client.NewClient(uri, []tls.Certificate{cert}, 10*time.Second)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
@@ -127,10 +127,6 @@ func stdinReader(c chan<- string) {
|
||||
}
|
||||
|
||||
func connectToStdio(stdin <-chan string, conn net.Conn) {
|
||||
go func() {
|
||||
|
||||
}()
|
||||
|
||||
buf := make([]byte, 1024)
|
||||
for {
|
||||
conn.SetReadDeadline(time.Now().Add(time.Millisecond))
|
||||
|
||||
@@ -9,7 +9,6 @@ package main
|
||||
import (
|
||||
"flag"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
@@ -69,7 +68,7 @@ func gen() {
|
||||
}
|
||||
|
||||
func sign(keyname, dataname string) {
|
||||
privkey, err := ioutil.ReadFile(keyname)
|
||||
privkey, err := os.ReadFile(keyname)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
@@ -95,7 +94,7 @@ func sign(keyname, dataname string) {
|
||||
}
|
||||
|
||||
func verifyWithFile(signame, dataname, keyname string) {
|
||||
pubkey, err := ioutil.ReadFile(keyname)
|
||||
pubkey, err := os.ReadFile(keyname)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
@@ -103,7 +102,7 @@ func verifyWithFile(signame, dataname, keyname string) {
|
||||
}
|
||||
|
||||
func verifyWithKey(signame, dataname string, pubkey []byte) {
|
||||
sig, err := ioutil.ReadFile(signame)
|
||||
sig, err := os.ReadFile(signame)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -7,31 +7,112 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alecthomas/kong"
|
||||
"github.com/syncthing/syncthing/lib/httpcache"
|
||||
"github.com/syncthing/syncthing/lib/upgrade"
|
||||
)
|
||||
|
||||
const defaultURL = "https://api.github.com/repos/syncthing/syncthing/releases?per_page=25"
|
||||
type cli struct {
|
||||
Listen string `default:":8080" help:"Listen address"`
|
||||
URL string `short:"u" default:"https://api.github.com/repos/syncthing/syncthing/releases?per_page=25" help:"GitHub releases url"`
|
||||
Forward []string `short:"f" help:"Forwarded pages, format: /path->https://example/com/url"`
|
||||
CacheTime time.Duration `default:"15m" help:"Cache time"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
url := flag.String("u", defaultURL, "GitHub releases url")
|
||||
flag.Parse()
|
||||
|
||||
rels := upgrade.FetchLatestReleases(*url, "")
|
||||
if rels == nil {
|
||||
// An error was already logged
|
||||
var params cli
|
||||
kong.Parse(¶ms)
|
||||
if err := server(¶ms); err != nil {
|
||||
fmt.Printf("Error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func server(params *cli) error {
|
||||
http.Handle("/meta.json", httpcache.SinglePath(&githubReleases{url: params.URL}, params.CacheTime))
|
||||
|
||||
for _, fwd := range params.Forward {
|
||||
path, url, ok := strings.Cut(fwd, "->")
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid forward: %q", fwd)
|
||||
}
|
||||
http.Handle(path, httpcache.SinglePath(&proxy{url: url}, params.CacheTime))
|
||||
}
|
||||
|
||||
return http.ListenAndServe(params.Listen, nil)
|
||||
}
|
||||
|
||||
type githubReleases struct {
|
||||
url string
|
||||
}
|
||||
|
||||
func (p *githubReleases) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
log.Println("Fetching", p.url)
|
||||
rels := upgrade.FetchLatestReleases(p.url, "")
|
||||
if rels == nil {
|
||||
http.Error(w, "no releases", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
sort.Sort(upgrade.SortByRelease(rels))
|
||||
rels = filterForLatest(rels)
|
||||
|
||||
if err := json.NewEncoder(os.Stdout).Encode(rels); err != nil {
|
||||
os.Exit(1)
|
||||
buf := new(bytes.Buffer)
|
||||
_ = json.NewEncoder(buf).Encode(rels)
|
||||
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
w.Header().Set("Access-Control-Allow-Methods", "GET")
|
||||
w.Write(buf.Bytes())
|
||||
}
|
||||
|
||||
type proxy struct {
|
||||
url string
|
||||
}
|
||||
|
||||
func (p *proxy) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
log.Println("Fetching", p.url)
|
||||
req, err := http.NewRequestWithContext(req.Context(), http.MethodGet, p.url, nil)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
ct := resp.Header.Get("Content-Type")
|
||||
w.Header().Set("Content-Type", ct)
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
w.Header().Set("Cache-Control", "public, max-age=900")
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
w.Header().Set("Access-Control-Allow-Methods", "GET")
|
||||
}
|
||||
w.WriteHeader(resp.StatusCode)
|
||||
if strings.HasPrefix(ct, "application/json") {
|
||||
// Special JSON handling; clean it up a bit.
|
||||
var v interface{}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&v); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(v)
|
||||
} else {
|
||||
_, _ = io.Copy(w, resp.Body)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -37,14 +37,14 @@ type result struct {
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
prefix := strings.ToUpper(strings.Replace(flag.Arg(0), "-", "", -1))
|
||||
prefix := strings.ToUpper(strings.ReplaceAll(flag.Arg(0), "-", ""))
|
||||
if len(prefix) > 7 {
|
||||
prefix = prefix[:7] + "-" + prefix[7:]
|
||||
}
|
||||
|
||||
found := make(chan result)
|
||||
stop := make(chan struct{})
|
||||
var count int64
|
||||
var count atomic.Int64
|
||||
|
||||
// Print periodic progress reports.
|
||||
go printProgress(prefix, &count)
|
||||
@@ -72,7 +72,7 @@ func main() {
|
||||
// Try certificates until one is found that has the prefix at the start of
|
||||
// the resulting device ID. Increments count atomically, sends the result to
|
||||
// found, returns when stop is closed.
|
||||
func generatePrefixed(prefix string, count *int64, found chan<- result, stop <-chan struct{}) {
|
||||
func generatePrefixed(prefix string, count *atomic.Int64, found chan<- result, stop <-chan struct{}) {
|
||||
notBefore := time.Now()
|
||||
notAfter := time.Date(2049, 12, 31, 23, 59, 59, 0, time.UTC)
|
||||
|
||||
@@ -109,7 +109,7 @@ func generatePrefixed(prefix string, count *int64, found chan<- result, stop <-c
|
||||
}
|
||||
|
||||
id := protocol.NewDeviceID(derBytes)
|
||||
atomic.AddInt64(count, 1)
|
||||
count.Add(1)
|
||||
|
||||
if strings.HasPrefix(id.String(), prefix) {
|
||||
select {
|
||||
@@ -121,7 +121,7 @@ func generatePrefixed(prefix string, count *int64, found chan<- result, stop <-c
|
||||
}
|
||||
}
|
||||
|
||||
func printProgress(prefix string, count *int64) {
|
||||
func printProgress(prefix string, count *atomic.Int64) {
|
||||
started := time.Now()
|
||||
wantBits := 5 * len(prefix)
|
||||
if wantBits > 63 {
|
||||
@@ -132,7 +132,7 @@ func printProgress(prefix string, count *int64) {
|
||||
fmt.Printf("Want %d bits for prefix %q, about %.2g certs to test (statistically speaking)\n", wantBits, prefix, expectedIterations)
|
||||
|
||||
for range time.NewTicker(15 * time.Second).C {
|
||||
tried := atomic.LoadInt64(count)
|
||||
tried := count.Load()
|
||||
elapsed := time.Since(started)
|
||||
rate := float64(tried) / elapsed.Seconds()
|
||||
expected := timeStr(expectedIterations / rate)
|
||||
|
||||
@@ -7,31 +7,15 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
)
|
||||
|
||||
func getmd5(filePath string) ([]byte, error) {
|
||||
var result []byte
|
||||
file, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
hash := md5.New()
|
||||
if _, err := io.Copy(hash, file); err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
return hash.Sum(result), nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
period := flag.Duration("period", 200*time.Millisecond, "Sleep period between checks")
|
||||
flag.Parse()
|
||||
@@ -46,7 +30,7 @@ func main() {
|
||||
exists := true
|
||||
size := int64(0)
|
||||
mtime := time.Time{}
|
||||
hash := []byte{}
|
||||
var hash [sha256.Size]byte
|
||||
|
||||
for {
|
||||
time.Sleep(*period)
|
||||
@@ -72,7 +56,7 @@ func main() {
|
||||
if !exists {
|
||||
size = 0
|
||||
mtime = time.Time{}
|
||||
hash = []byte{}
|
||||
hash = [sha256.Size]byte{}
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -83,12 +67,12 @@ func main() {
|
||||
newSize := fi.Size()
|
||||
newMtime := fi.ModTime()
|
||||
|
||||
newHash, err := getmd5(file)
|
||||
newHash, err := sha256file(file)
|
||||
if err != nil {
|
||||
fmt.Println("getmd5:", err)
|
||||
fmt.Println("sha256file:", err)
|
||||
}
|
||||
|
||||
if newSize != size || newMtime != mtime || !bytes.Equal(newHash, hash) {
|
||||
if newSize != size || newMtime != mtime || newHash != hash {
|
||||
fmt.Println(file, "Size:", newSize, "Mtime:", newMtime, "Hash:", fmt.Sprintf("%x", newHash))
|
||||
hash = newHash
|
||||
size = newSize
|
||||
@@ -96,3 +80,18 @@ func main() {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func sha256file(fname string) (hash [sha256.Size]byte, err error) {
|
||||
f, err := os.Open(fname)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
h := sha256.New()
|
||||
io.Copy(h, f)
|
||||
hb := h.Sum(nil)
|
||||
copy(hash[:], hb)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -15,19 +15,17 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
if innerProcess && os.Getenv("STBLOCKPROFILE") != "" {
|
||||
profiler := pprof.Lookup("block")
|
||||
if profiler == nil {
|
||||
panic("Couldn't find block profiler")
|
||||
}
|
||||
l.Debugln("Starting block profiling")
|
||||
go func() {
|
||||
err := saveBlockingProfiles(profiler) // Only returns on error
|
||||
l.Warnln("Block profiler failed:", err)
|
||||
panic("Block profiler failed")
|
||||
}()
|
||||
func startBlockProfiler() {
|
||||
profiler := pprof.Lookup("block")
|
||||
if profiler == nil {
|
||||
panic("Couldn't find block profiler")
|
||||
}
|
||||
l.Debugln("Starting block profiling")
|
||||
go func() {
|
||||
err := saveBlockingProfiles(profiler) // Only returns on error
|
||||
l.Warnln("Block profiler failed:", err)
|
||||
panic("Block profiler failed")
|
||||
}()
|
||||
}
|
||||
|
||||
func saveBlockingProfiles(profiler *pprof.Profile) error {
|
||||
|
||||
172
cmd/syncthing/cli/client.go
Normal file
172
cmd/syncthing/cli/client.go
Normal file
@@ -0,0 +1,172 @@
|
||||
// Copyright (C) 2019 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package cli
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
"github.com/syncthing/syncthing/lib/locations"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
type APIClient interface {
|
||||
Get(url string) (*http.Response, error)
|
||||
Post(url, body string) (*http.Response, error)
|
||||
PutJSON(url string, o interface{}) (*http.Response, error)
|
||||
}
|
||||
|
||||
type apiClient struct {
|
||||
http.Client
|
||||
cfg config.GUIConfiguration
|
||||
apikey string
|
||||
}
|
||||
|
||||
type apiClientFactory struct {
|
||||
cfg config.GUIConfiguration
|
||||
}
|
||||
|
||||
func (f *apiClientFactory) getClient() (APIClient, error) {
|
||||
// Now if the API key and address is not provided (we are not connecting to a remote instance),
|
||||
// try to rip it out of the config.
|
||||
if f.cfg.RawAddress == "" && f.cfg.APIKey == "" {
|
||||
var err error
|
||||
f.cfg, err = loadGUIConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else if f.cfg.Address() == "" || f.cfg.APIKey == "" {
|
||||
return nil, errors.New("Both --gui-address and --gui-apikey should be specified")
|
||||
}
|
||||
|
||||
httpClient := http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
},
|
||||
DialContext: func(_ context.Context, _, _ string) (net.Conn, error) {
|
||||
return net.Dial(f.cfg.Network(), f.cfg.Address())
|
||||
},
|
||||
},
|
||||
}
|
||||
return &apiClient{
|
||||
Client: httpClient,
|
||||
cfg: f.cfg,
|
||||
apikey: f.cfg.APIKey,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func loadGUIConfig() (config.GUIConfiguration, error) {
|
||||
// Load the certs and get the ID
|
||||
cert, err := tls.LoadX509KeyPair(
|
||||
locations.Get(locations.CertFile),
|
||||
locations.Get(locations.KeyFile),
|
||||
)
|
||||
if err != nil {
|
||||
return config.GUIConfiguration{}, fmt.Errorf("reading device ID: %w", err)
|
||||
}
|
||||
|
||||
myID := protocol.NewDeviceID(cert.Certificate[0])
|
||||
|
||||
// Load the config
|
||||
cfg, _, err := config.Load(locations.Get(locations.ConfigFile), myID, events.NoopLogger)
|
||||
if err != nil {
|
||||
return config.GUIConfiguration{}, fmt.Errorf("loading config: %w", err)
|
||||
}
|
||||
|
||||
guiCfg := cfg.GUI()
|
||||
|
||||
if guiCfg.Address() == "" {
|
||||
return config.GUIConfiguration{}, errors.New("Could not find GUI Address")
|
||||
}
|
||||
|
||||
if guiCfg.APIKey == "" {
|
||||
return config.GUIConfiguration{}, errors.New("Could not find GUI API key")
|
||||
}
|
||||
|
||||
return guiCfg, nil
|
||||
}
|
||||
|
||||
func (c *apiClient) Endpoint() string {
|
||||
if c.cfg.Network() == "unix" {
|
||||
return "http://unix/"
|
||||
}
|
||||
url := c.cfg.URL()
|
||||
if !strings.HasSuffix(url, "/") {
|
||||
url += "/"
|
||||
}
|
||||
return url
|
||||
}
|
||||
|
||||
func (c *apiClient) Do(req *http.Request) (*http.Response, error) {
|
||||
req.Header.Set("X-API-Key", c.apikey)
|
||||
resp, err := c.Client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, checkResponse(resp)
|
||||
}
|
||||
|
||||
func (c *apiClient) Request(url, method string, r io.Reader) (*http.Response, error) {
|
||||
request, err := http.NewRequest(method, c.Endpoint()+"rest/"+url, r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.Do(request)
|
||||
}
|
||||
|
||||
func (c *apiClient) RequestString(url, method, data string) (*http.Response, error) {
|
||||
return c.Request(url, method, bytes.NewBufferString(data))
|
||||
}
|
||||
|
||||
func (c *apiClient) RequestJSON(url, method string, o interface{}) (*http.Response, error) {
|
||||
data, err := json.Marshal(o)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.Request(url, method, bytes.NewBuffer(data))
|
||||
}
|
||||
|
||||
func (c *apiClient) Get(url string) (*http.Response, error) {
|
||||
return c.RequestString(url, "GET", "")
|
||||
}
|
||||
|
||||
func (c *apiClient) Post(url, body string) (*http.Response, error) {
|
||||
return c.RequestString(url, "POST", body)
|
||||
}
|
||||
|
||||
func (c *apiClient) PutJSON(url string, o interface{}) (*http.Response, error) {
|
||||
return c.RequestJSON(url, "PUT", o)
|
||||
}
|
||||
|
||||
var errNotFound = errors.New("invalid endpoint or API call")
|
||||
|
||||
func checkResponse(response *http.Response) error {
|
||||
if response.StatusCode == http.StatusNotFound {
|
||||
return errNotFound
|
||||
} else if response.StatusCode == http.StatusUnauthorized {
|
||||
return errors.New("invalid API key")
|
||||
} else if response.StatusCode != http.StatusOK {
|
||||
data, err := responseToBArray(response)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
body := strings.TrimSpace(string(data))
|
||||
return fmt.Errorf("unexpected HTTP status returned: %s\n%s", response.Status, body)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
87
cmd/syncthing/cli/config.go
Normal file
87
cmd/syncthing/cli/config.go
Normal file
@@ -0,0 +1,87 @@
|
||||
// Copyright (C) 2021 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package cli
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/AudriusButkevicius/recli"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
type configHandler struct {
|
||||
original, cfg config.Configuration
|
||||
client APIClient
|
||||
err error
|
||||
}
|
||||
|
||||
func getConfigCommand(f *apiClientFactory) (cli.Command, error) {
|
||||
h := new(configHandler)
|
||||
h.client, h.err = f.getClient()
|
||||
if h.err == nil {
|
||||
h.cfg, h.err = getConfig(h.client)
|
||||
}
|
||||
h.original = h.cfg.Copy()
|
||||
|
||||
// Copy the config and set the default flags
|
||||
recliCfg := recli.DefaultConfig
|
||||
recliCfg.IDTag.Name = "xml"
|
||||
recliCfg.SkipTag.Name = "json"
|
||||
|
||||
commands, err := recli.New(recliCfg).Construct(&h.cfg)
|
||||
if err != nil {
|
||||
return cli.Command{}, fmt.Errorf("config reflect: %w", err)
|
||||
}
|
||||
|
||||
return cli.Command{
|
||||
Name: "config",
|
||||
HideHelp: true,
|
||||
Usage: "Configuration modification command group",
|
||||
Subcommands: commands,
|
||||
Before: h.configBefore,
|
||||
After: h.configAfter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (h *configHandler) configBefore(c *cli.Context) error {
|
||||
for _, arg := range c.Args() {
|
||||
if arg == "--help" || arg == "-h" {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return h.err
|
||||
}
|
||||
|
||||
func (h *configHandler) configAfter(_ *cli.Context) error {
|
||||
if h.err != nil {
|
||||
// Error was already returned in configBefore
|
||||
return nil
|
||||
}
|
||||
if reflect.DeepEqual(h.cfg, h.original) {
|
||||
return nil
|
||||
}
|
||||
body, err := json.MarshalIndent(h.cfg, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp, err := h.client.Post("system/config", string(body))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
body, err := responseToBArray(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return errors.New(string(body))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
55
cmd/syncthing/cli/debug.go
Normal file
55
cmd/syncthing/cli/debug.go
Normal file
@@ -0,0 +1,55 @@
|
||||
// Copyright (C) 2021 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
var debugCommand = cli.Command{
|
||||
Name: "debug",
|
||||
HideHelp: true,
|
||||
Usage: "Debug command group",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "file",
|
||||
Usage: "Show information about a file (or directory/symlink)",
|
||||
ArgsUsage: "FOLDER-ID PATH",
|
||||
Action: expects(2, debugFile()),
|
||||
},
|
||||
indexCommand,
|
||||
{
|
||||
Name: "profile",
|
||||
Usage: "Save a profile to help figuring out what Syncthing does.",
|
||||
ArgsUsage: "cpu | heap",
|
||||
Action: expects(1, profile()),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func debugFile() cli.ActionFunc {
|
||||
return func(c *cli.Context) error {
|
||||
query := make(url.Values)
|
||||
query.Set("folder", c.Args()[0])
|
||||
query.Set("file", normalizePath(c.Args()[1]))
|
||||
return indexDumpOutput("debug/file?" + query.Encode())(c)
|
||||
}
|
||||
}
|
||||
|
||||
func profile() cli.ActionFunc {
|
||||
return func(c *cli.Context) error {
|
||||
switch t := c.Args()[0]; t {
|
||||
case "cpu", "heap":
|
||||
return saveToFile(fmt.Sprintf("debug/%vprof", c.Args()[0]))(c)
|
||||
default:
|
||||
return fmt.Errorf("expected cpu or heap as argument, got %v", t)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -4,7 +4,7 @@
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
package cli
|
||||
|
||||
import (
|
||||
"errors"
|
||||
@@ -22,12 +22,12 @@ var errorsCommand = cli.Command{
|
||||
{
|
||||
Name: "show",
|
||||
Usage: "Show pending errors",
|
||||
Action: expects(0, dumpOutput("system/error")),
|
||||
Action: expects(0, indexDumpOutput("system/error")),
|
||||
},
|
||||
{
|
||||
Name: "push",
|
||||
Usage: "Push an error to active clients",
|
||||
ArgsUsage: "[error message]",
|
||||
ArgsUsage: "ERROR-MESSAGE",
|
||||
Action: expects(1, errorsPush),
|
||||
},
|
||||
{
|
||||
@@ -39,7 +39,7 @@ var errorsCommand = cli.Command{
|
||||
}
|
||||
|
||||
func errorsPush(c *cli.Context) error {
|
||||
client := c.App.Metadata["client"].(*APIClient)
|
||||
client := c.App.Metadata["client"].(APIClient)
|
||||
errStr := strings.Join(c.Args(), " ")
|
||||
response, err := client.Post("system/error", strings.TrimSpace(errStr))
|
||||
if err != nil {
|
||||
38
cmd/syncthing/cli/index.go
Normal file
38
cmd/syncthing/cli/index.go
Normal file
@@ -0,0 +1,38 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package cli
|
||||
|
||||
import (
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
var indexCommand = cli.Command{
|
||||
Name: "index",
|
||||
Usage: "Show information about the index (database)",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "dump",
|
||||
Usage: "Print the entire db",
|
||||
Action: expects(0, indexDump),
|
||||
},
|
||||
{
|
||||
Name: "dump-size",
|
||||
Usage: "Print the db size of different categories of information",
|
||||
Action: expects(0, indexDumpSize),
|
||||
},
|
||||
{
|
||||
Name: "check",
|
||||
Usage: "Check the database for inconsistencies",
|
||||
Action: expects(0, indexCheck),
|
||||
},
|
||||
{
|
||||
Name: "account",
|
||||
Usage: "Print key and value size statistics per key type",
|
||||
Action: expects(0, indexAccount),
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -4,22 +4,26 @@
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/db/backend"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
// account prints key and data size statistics per class
|
||||
func account(ldb backend.Backend) {
|
||||
// indexAccount prints key and data size statistics per class
|
||||
func indexAccount(*cli.Context) error {
|
||||
ldb, err := getDB()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
it, err := ldb.NewPrefixIterator(nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
return err
|
||||
}
|
||||
|
||||
var ksizes [256]int
|
||||
@@ -55,4 +59,6 @@ func account(ldb backend.Backend) {
|
||||
}
|
||||
fmt.Fprintf(tw, "Total\t%d items,\t%d KB keys +\t%d KB data.\t\n", toti, totks/1000, totds/1000)
|
||||
tw.Flush()
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -4,23 +4,27 @@
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
package cli
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/urfave/cli"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
"github.com/syncthing/syncthing/lib/db/backend"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
func dump(ldb backend.Backend) {
|
||||
func indexDump(*cli.Context) error {
|
||||
ldb, err := getDB()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
it, err := ldb.NewPrefixIterator(nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
return err
|
||||
}
|
||||
for it.Next() {
|
||||
key := it.Key()
|
||||
@@ -34,7 +38,7 @@ func dump(ldb backend.Backend) {
|
||||
var f protocol.FileInfo
|
||||
err := f.Unmarshal(it.Value())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
return err
|
||||
}
|
||||
fmt.Printf(" V:%v\n", f)
|
||||
|
||||
@@ -61,10 +65,10 @@ func dump(ldb backend.Backend) {
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
name := nulString(key[1+4:])
|
||||
val := it.Value()
|
||||
var real, virt time.Time
|
||||
real.UnmarshalBinary(val[:len(val)/2])
|
||||
virt.UnmarshalBinary(val[len(val)/2:])
|
||||
fmt.Printf("[mtime] F:%d N:%q R:%v V:%v\n", folder, name, real, virt)
|
||||
var realTime, virtualTime time.Time
|
||||
realTime.UnmarshalBinary(val[:len(val)/2])
|
||||
virtualTime.UnmarshalBinary(val[len(val)/2:])
|
||||
fmt.Printf("[mtime] F:%d N:%q R:%v V:%v\n", folder, name, realTime, virtualTime)
|
||||
|
||||
case db.KeyTypeFolderIdx:
|
||||
key := binary.BigEndian.Uint32(key[1:])
|
||||
@@ -152,4 +156,5 @@ func dump(ldb backend.Backend) {
|
||||
fmt.Printf("[??? %d]\n %x\n %x\n", key[0], key, it.Value())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -4,51 +4,38 @@
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
package cli
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"log"
|
||||
"sort"
|
||||
|
||||
"github.com/urfave/cli"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
"github.com/syncthing/syncthing/lib/db/backend"
|
||||
)
|
||||
|
||||
type SizedElement struct {
|
||||
key string
|
||||
size int
|
||||
}
|
||||
func indexDumpSize(*cli.Context) error {
|
||||
type sizedElement struct {
|
||||
key string
|
||||
size int
|
||||
}
|
||||
|
||||
type ElementHeap []SizedElement
|
||||
|
||||
func (h ElementHeap) Len() int { return len(h) }
|
||||
func (h ElementHeap) Less(i, j int) bool { return h[i].size > h[j].size }
|
||||
func (h ElementHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
|
||||
|
||||
func (h *ElementHeap) Push(x interface{}) {
|
||||
*h = append(*h, x.(SizedElement))
|
||||
}
|
||||
|
||||
func (h *ElementHeap) Pop() interface{} {
|
||||
old := *h
|
||||
n := len(old)
|
||||
x := old[n-1]
|
||||
*h = old[0 : n-1]
|
||||
return x
|
||||
}
|
||||
|
||||
func dumpsize(ldb backend.Backend) {
|
||||
h := &ElementHeap{}
|
||||
heap.Init(h)
|
||||
ldb, err := getDB()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
it, err := ldb.NewPrefixIterator(nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
return err
|
||||
}
|
||||
var ele SizedElement
|
||||
|
||||
var elems []sizedElement
|
||||
for it.Next() {
|
||||
var ele sizedElement
|
||||
|
||||
key := it.Key()
|
||||
switch key[0] {
|
||||
case db.KeyTypeDevice:
|
||||
@@ -89,11 +76,15 @@ func dumpsize(ldb backend.Backend) {
|
||||
ele.key = fmt.Sprintf("UNKNOWN:%x", key)
|
||||
}
|
||||
ele.size = len(it.Value())
|
||||
heap.Push(h, ele)
|
||||
elems = append(elems, ele)
|
||||
}
|
||||
|
||||
for h.Len() > 0 {
|
||||
ele = heap.Pop(h).(SizedElement)
|
||||
sort.Slice(elems, func(i, j int) bool {
|
||||
return elems[i].size > elems[j].size
|
||||
})
|
||||
for _, ele := range elems {
|
||||
fmt.Println(ele.key, ele.size)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -4,17 +4,18 @@
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
package cli
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"sort"
|
||||
|
||||
"github.com/urfave/cli"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
"github.com/syncthing/syncthing/lib/db/backend"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
@@ -34,7 +35,12 @@ type sequenceKey struct {
|
||||
sequence uint64
|
||||
}
|
||||
|
||||
func idxck(ldb backend.Backend) (success bool) {
|
||||
func indexCheck(*cli.Context) (err error) {
|
||||
ldb, err := getDB()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
folders := make(map[uint32]string)
|
||||
devices := make(map[uint32]string)
|
||||
deviceToIDs := make(map[string]uint32)
|
||||
@@ -47,11 +53,20 @@ func idxck(ldb backend.Backend) (success bool) {
|
||||
usedBlocklists := make(map[string]struct{})
|
||||
usedVersions := make(map[string]struct{})
|
||||
var localDeviceKey uint32
|
||||
success = true
|
||||
success := true
|
||||
defer func() {
|
||||
if err == nil {
|
||||
if success {
|
||||
fmt.Println("Index check completed successfully.")
|
||||
} else {
|
||||
err = errors.New("Inconsistencies found in the index")
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
it, err := ldb.NewPrefixIterator(nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
return err
|
||||
}
|
||||
for it.Next() {
|
||||
key := it.Key()
|
||||
@@ -329,12 +344,12 @@ func idxck(ldb backend.Backend) (success bool) {
|
||||
fmt.Printf("%d version entries out of %d needs GC\n", d, len(versions))
|
||||
}
|
||||
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
func needsLocally(vl db.VersionList) bool {
|
||||
gfv, gok := vl.GetGlobal()
|
||||
if !gok { // That's weird, but we hardly need something non-existant
|
||||
if !gok { // That's weird, but we hardly need something non-existent
|
||||
return false
|
||||
}
|
||||
fv, ok := vl.Get(protocol.LocalDeviceID[:])
|
||||
172
cmd/syncthing/cli/main.go
Normal file
172
cmd/syncthing/cli/main.go
Normal file
@@ -0,0 +1,172 @@
|
||||
// Copyright (C) 2019 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package cli
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/alecthomas/kong"
|
||||
"github.com/flynn-archive/go-shlex"
|
||||
"github.com/urfave/cli"
|
||||
|
||||
"github.com/syncthing/syncthing/cmd/syncthing/cmdutil"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
)
|
||||
|
||||
type preCli struct {
|
||||
GUIAddress string `name:"gui-address"`
|
||||
GUIAPIKey string `name:"gui-apikey"`
|
||||
HomeDir string `name:"home"`
|
||||
ConfDir string `name:"config"`
|
||||
DataDir string `name:"data"`
|
||||
}
|
||||
|
||||
func Run() error {
|
||||
// This is somewhat a hack around a chicken and egg problem. We need to set
|
||||
// the home directory and potentially other flags to know where the
|
||||
// syncthing instance is running in order to get it's config ... which we
|
||||
// then use to construct the actual CLI ... at which point it's too late to
|
||||
// add flags there...
|
||||
c := preCli{}
|
||||
parseFlags(&c)
|
||||
return runInternal(c, os.Args)
|
||||
}
|
||||
|
||||
func RunWithArgs(cliArgs []string) error {
|
||||
c := preCli{}
|
||||
parseFlagsWithArgs(cliArgs, &c)
|
||||
return runInternal(c, cliArgs)
|
||||
}
|
||||
|
||||
func runInternal(c preCli, cliArgs []string) error {
|
||||
// Not set as default above because the strings can be really long.
|
||||
err := cmdutil.SetConfigDataLocationsFromFlags(c.HomeDir, c.ConfDir, c.DataDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Command line options: %w", err)
|
||||
}
|
||||
clientFactory := &apiClientFactory{
|
||||
cfg: config.GUIConfiguration{
|
||||
RawAddress: c.GUIAddress,
|
||||
APIKey: c.GUIAPIKey,
|
||||
},
|
||||
}
|
||||
|
||||
configCommand, err := getConfigCommand(clientFactory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Implement the same flags at the upper CLI, but do nothing with them.
|
||||
// This is so that the usage text is the same
|
||||
fakeFlags := []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "gui-address",
|
||||
Usage: "Override GUI address to `URL` (e.g. \"192.0.2.42:8443\")",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "gui-apikey",
|
||||
Usage: "Override GUI API key to `API-KEY`",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "home",
|
||||
Usage: "Set configuration and data directory to `PATH`",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "config",
|
||||
Usage: "Set configuration directory (config and keys) to `PATH`",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "data",
|
||||
Usage: "Set data directory (database and logs) to `PATH`",
|
||||
},
|
||||
}
|
||||
|
||||
// Construct the actual CLI
|
||||
app := cli.NewApp()
|
||||
app.Author = "The Syncthing Authors"
|
||||
app.Metadata = map[string]interface{}{
|
||||
"clientFactory": clientFactory,
|
||||
}
|
||||
app.Commands = []cli.Command{{
|
||||
Name: "cli",
|
||||
Usage: "Syncthing command line interface",
|
||||
Flags: fakeFlags,
|
||||
Subcommands: []cli.Command{
|
||||
configCommand,
|
||||
showCommand,
|
||||
operationCommand,
|
||||
errorsCommand,
|
||||
debugCommand,
|
||||
{
|
||||
Name: "-",
|
||||
HideHelp: true,
|
||||
Usage: "Read commands from stdin",
|
||||
Action: func(ctx *cli.Context) error {
|
||||
if ctx.NArg() > 0 {
|
||||
return errors.New("command does not expect any arguments")
|
||||
}
|
||||
|
||||
// Drop the `-` not to recurse into self.
|
||||
args := make([]string, len(cliArgs)-1)
|
||||
copy(args, cliArgs)
|
||||
|
||||
fmt.Println("Reading commands from stdin...", args)
|
||||
scanner := bufio.NewScanner(os.Stdin)
|
||||
for scanner.Scan() {
|
||||
input, err := shlex.Split(scanner.Text())
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing input: %w", err)
|
||||
}
|
||||
if len(input) == 0 {
|
||||
continue
|
||||
}
|
||||
err = app.Run(append(args, input...))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return scanner.Err()
|
||||
},
|
||||
},
|
||||
},
|
||||
}}
|
||||
|
||||
return app.Run(cliArgs)
|
||||
}
|
||||
|
||||
func parseFlags(c *preCli) error {
|
||||
// kong only needs to parse the global arguments after "cli" and before the
|
||||
// subcommand (if any).
|
||||
if len(os.Args) <= 2 {
|
||||
return nil
|
||||
}
|
||||
return parseFlagsWithArgs(os.Args[2:], c)
|
||||
}
|
||||
|
||||
func parseFlagsWithArgs(args []string, c *preCli) error {
|
||||
for i := 0; i < len(args); i++ {
|
||||
if !strings.HasPrefix(args[i], "--") {
|
||||
args = args[:i]
|
||||
break
|
||||
}
|
||||
if !strings.Contains(args[i], "=") {
|
||||
i++
|
||||
}
|
||||
}
|
||||
// We don't want kong to print anything nor os.Exit (e.g. on -h)
|
||||
parser, err := kong.New(c, kong.Writers(io.Discard, io.Discard), kong.Exit(func(int) {}))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = parser.Parse(args)
|
||||
return err
|
||||
}
|
||||
@@ -4,11 +4,16 @@
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
package cli
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/fs"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
@@ -27,11 +32,6 @@ var operationCommand = cli.Command{
|
||||
Usage: "Shutdown syncthing",
|
||||
Action: expects(0, emptyPost("system/shutdown")),
|
||||
},
|
||||
{
|
||||
Name: "reset",
|
||||
Usage: "Reset syncthing deleting all folders and devices",
|
||||
Action: expects(0, emptyPost("system/reset")),
|
||||
},
|
||||
{
|
||||
Name: "upgrade",
|
||||
Usage: "Upgrade syncthing (if a newer version is available)",
|
||||
@@ -39,15 +39,24 @@ var operationCommand = cli.Command{
|
||||
},
|
||||
{
|
||||
Name: "folder-override",
|
||||
Usage: "Override changes on folder (remote for sendonly, local for receiveonly)",
|
||||
ArgsUsage: "[folder id]",
|
||||
Usage: "Override changes on folder (remote for sendonly, local for receiveonly). WARNING: Destructive - deletes/changes your data.",
|
||||
ArgsUsage: "FOLDER-ID",
|
||||
Action: expects(1, foldersOverride),
|
||||
},
|
||||
{
|
||||
Name: "default-ignores",
|
||||
Usage: "Set the default ignores (config) from a file",
|
||||
ArgsUsage: "PATH",
|
||||
Action: expects(1, setDefaultIgnores),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func foldersOverride(c *cli.Context) error {
|
||||
client := c.App.Metadata["client"].(*APIClient)
|
||||
client, err := getClientFactory(c).getClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cfg, err := getConfig(client)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -69,10 +78,36 @@ func foldersOverride(c *cli.Context) error {
|
||||
if body != "" {
|
||||
errStr += "\nBody: " + body
|
||||
}
|
||||
return fmt.Errorf(errStr)
|
||||
return errors.New(errStr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("Folder " + rid + " not found")
|
||||
return fmt.Errorf("Folder %q not found", rid)
|
||||
}
|
||||
|
||||
func setDefaultIgnores(c *cli.Context) error {
|
||||
client, err := getClientFactory(c).getClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dir, file := filepath.Split(c.Args()[0])
|
||||
filesystem := fs.NewFilesystem(fs.FilesystemTypeBasic, dir)
|
||||
|
||||
fd, err := filesystem.Open(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
scanner := bufio.NewScanner(fd)
|
||||
var lines []string
|
||||
for scanner.Scan() {
|
||||
lines = append(lines, scanner.Text())
|
||||
}
|
||||
fd.Close()
|
||||
if err := scanner.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = client.PutJSON("config/defaults/ignores", config.Ignores{Lines: lines})
|
||||
return err
|
||||
}
|
||||
45
cmd/syncthing/cli/pending.go
Normal file
45
cmd/syncthing/cli/pending.go
Normal file
@@ -0,0 +1,45 @@
|
||||
// Copyright (C) 2021 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package cli
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
var pendingCommand = cli.Command{
|
||||
Name: "pending",
|
||||
HideHelp: true,
|
||||
Usage: "Pending subcommand group",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "devices",
|
||||
Usage: "Show pending devices",
|
||||
Action: expects(0, indexDumpOutput("cluster/pending/devices")),
|
||||
},
|
||||
{
|
||||
Name: "folders",
|
||||
Usage: "Show pending folders",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{Name: "device", Usage: "Show pending folders offered by given device"},
|
||||
},
|
||||
Action: expects(0, folders()),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func folders() cli.ActionFunc {
|
||||
return func(c *cli.Context) error {
|
||||
if c.String("device") != "" {
|
||||
query := make(url.Values)
|
||||
query.Set("device", c.String("device"))
|
||||
return indexDumpOutput("cluster/pending/folders?" + query.Encode())(c)
|
||||
}
|
||||
return indexDumpOutput("cluster/pending/folders")(c)
|
||||
}
|
||||
}
|
||||
@@ -4,7 +4,7 @@
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
package cli
|
||||
|
||||
import (
|
||||
"github.com/urfave/cli"
|
||||
@@ -18,27 +18,33 @@ var showCommand = cli.Command{
|
||||
{
|
||||
Name: "version",
|
||||
Usage: "Show syncthing client version",
|
||||
Action: expects(0, dumpOutput("system/version")),
|
||||
Action: expects(0, indexDumpOutput("system/version")),
|
||||
},
|
||||
{
|
||||
Name: "config-status",
|
||||
Usage: "Show configuration status, whether or not a restart is required for changes to take effect",
|
||||
Action: expects(0, dumpOutput("system/config/insync")),
|
||||
Action: expects(0, indexDumpOutput("config/restart-required")),
|
||||
},
|
||||
{
|
||||
Name: "system",
|
||||
Usage: "Show system status",
|
||||
Action: expects(0, dumpOutput("system/status")),
|
||||
Action: expects(0, indexDumpOutput("system/status")),
|
||||
},
|
||||
{
|
||||
Name: "connections",
|
||||
Usage: "Report about connections to other devices",
|
||||
Action: expects(0, dumpOutput("system/connections")),
|
||||
Action: expects(0, indexDumpOutput("system/connections")),
|
||||
},
|
||||
{
|
||||
Name: "discovery",
|
||||
Usage: "Show the discovered addresses of remote devices (from cache of the running syncthing instance)",
|
||||
Action: expects(0, indexDumpOutput("system/discovery")),
|
||||
},
|
||||
pendingCommand,
|
||||
{
|
||||
Name: "usage",
|
||||
Usage: "Show usage report",
|
||||
Action: expects(0, dumpOutput("svc/report")),
|
||||
Action: expects(0, indexDumpOutput("svc/report")),
|
||||
},
|
||||
},
|
||||
}
|
||||
165
cmd/syncthing/cli/utils.go
Normal file
165
cmd/syncthing/cli/utils.go
Normal file
@@ -0,0 +1,165 @@
|
||||
// Copyright (C) 2019 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package cli
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/db/backend"
|
||||
"github.com/syncthing/syncthing/lib/locations"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func responseToBArray(response *http.Response) ([]byte, error) {
|
||||
bytes, err := io.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bytes, response.Body.Close()
|
||||
}
|
||||
|
||||
func emptyPost(url string) cli.ActionFunc {
|
||||
return func(c *cli.Context) error {
|
||||
client, err := getClientFactory(c).getClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = client.Post(url, "")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func indexDumpOutput(url string) cli.ActionFunc {
|
||||
return func(c *cli.Context) error {
|
||||
client, err := getClientFactory(c).getClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
response, err := client.Get(url)
|
||||
if errors.Is(err, errNotFound) {
|
||||
return errors.New("not found (folder/file not in database)")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return prettyPrintResponse(response)
|
||||
}
|
||||
}
|
||||
|
||||
func saveToFile(url string) cli.ActionFunc {
|
||||
return func(c *cli.Context) error {
|
||||
client, err := getClientFactory(c).getClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
response, err := client.Get(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, params, err := mime.ParseMediaType(response.Header.Get("Content-Disposition"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
filename := params["filename"]
|
||||
if filename == "" {
|
||||
return errors.New("Missing filename in response")
|
||||
}
|
||||
bs, err := responseToBArray(response)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f, err := os.Create(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
_, err = f.Write(bs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println("Wrote results to", filename)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func getConfig(c APIClient) (config.Configuration, error) {
|
||||
cfg := config.Configuration{}
|
||||
response, err := c.Get("system/config")
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
bytes, err := responseToBArray(response)
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
err = json.Unmarshal(bytes, &cfg)
|
||||
if err != nil {
|
||||
return config.Configuration{}, err
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func expects(n int, actionFunc cli.ActionFunc) cli.ActionFunc {
|
||||
return func(ctx *cli.Context) error {
|
||||
if ctx.NArg() != n {
|
||||
plural := ""
|
||||
if n != 1 {
|
||||
plural = "s"
|
||||
}
|
||||
return fmt.Errorf("expected %d argument%s, got %d", n, plural, ctx.NArg())
|
||||
}
|
||||
return actionFunc(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func prettyPrintJSON(data interface{}) error {
|
||||
enc := json.NewEncoder(os.Stdout)
|
||||
enc.SetIndent("", " ")
|
||||
return enc.Encode(data)
|
||||
}
|
||||
|
||||
func prettyPrintResponse(response *http.Response) error {
|
||||
bytes, err := responseToBArray(response)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var data interface{}
|
||||
if err := json.Unmarshal(bytes, &data); err != nil {
|
||||
return err
|
||||
}
|
||||
// TODO: Check flag for pretty print format
|
||||
return prettyPrintJSON(data)
|
||||
}
|
||||
|
||||
func getDB() (backend.Backend, error) {
|
||||
return backend.OpenLevelDBRO(locations.Get(locations.Database))
|
||||
}
|
||||
|
||||
func nulString(bs []byte) string {
|
||||
for i := range bs {
|
||||
if bs[i] == 0 {
|
||||
return string(bs[:i])
|
||||
}
|
||||
}
|
||||
return string(bs)
|
||||
}
|
||||
|
||||
func normalizePath(path string) string {
|
||||
return filepath.ToSlash(filepath.Clean(path))
|
||||
}
|
||||
|
||||
func getClientFactory(c *cli.Context) *apiClientFactory {
|
||||
return c.App.Metadata["clientFactory"].(*apiClientFactory)
|
||||
}
|
||||
16
cmd/syncthing/cmdutil/options_common.go
Normal file
16
cmd/syncthing/cmdutil/options_common.go
Normal file
@@ -0,0 +1,16 @@
|
||||
// Copyright (C) 2021 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package cmdutil
|
||||
|
||||
// CommonOptions are reused among several subcommands
|
||||
type CommonOptions struct {
|
||||
buildCommonOptions
|
||||
ConfDir string `name:"config" placeholder:"PATH" help:"Set configuration directory (config and keys)"`
|
||||
HomeDir string `name:"home" placeholder:"PATH" help:"Set configuration and data directory"`
|
||||
NoDefaultFolder bool `env:"STNODEFAULTFOLDER" help:"Don't create the \"default\" folder on first startup"`
|
||||
SkipPortProbing bool `help:"Don't try to find free ports for GUI and listen addresses on first startup"`
|
||||
}
|
||||
14
cmd/syncthing/cmdutil/options_others.go
Normal file
14
cmd/syncthing/cmdutil/options_others.go
Normal file
@@ -0,0 +1,14 @@
|
||||
// Copyright (C) 2021 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package cmdutil
|
||||
|
||||
type buildCommonOptions struct {
|
||||
HideConsole bool `hidden:""`
|
||||
}
|
||||
11
cmd/syncthing/cmdutil/options_windows.go
Normal file
11
cmd/syncthing/cmdutil/options_windows.go
Normal file
@@ -0,0 +1,11 @@
|
||||
// Copyright (C) 2021 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package cmdutil
|
||||
|
||||
type buildCommonOptions struct {
|
||||
HideConsole bool `name:"no-console" help:"Hide console window"`
|
||||
}
|
||||
35
cmd/syncthing/cmdutil/util.go
Normal file
35
cmd/syncthing/cmdutil/util.go
Normal file
@@ -0,0 +1,35 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package cmdutil
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/locations"
|
||||
)
|
||||
|
||||
func SetConfigDataLocationsFromFlags(homeDir, confDir, dataDir string) error {
|
||||
homeSet := homeDir != ""
|
||||
confSet := confDir != ""
|
||||
dataSet := dataDir != ""
|
||||
switch {
|
||||
case dataSet != confSet:
|
||||
return errors.New("either both or none of --config and --data must be given, use --home to set both at once")
|
||||
case homeSet && dataSet:
|
||||
return errors.New("--home must not be used together with --config and --data")
|
||||
case homeSet:
|
||||
confDir = homeDir
|
||||
dataDir = homeDir
|
||||
fallthrough
|
||||
case dataSet:
|
||||
if err := locations.SetBaseDir(locations.ConfigBaseDir, confDir); err != nil {
|
||||
return err
|
||||
}
|
||||
return locations.SetBaseDir(locations.DataBaseDir, dataDir)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -62,7 +61,7 @@ func uploadPanicLogs(ctx context.Context, urlBase, dir string) {
|
||||
// the log contents. A HEAD request is made to see if the log has already
|
||||
// been reported. If not, a PUT is made with the log contents.
|
||||
func uploadPanicLog(ctx context.Context, urlBase, file string) error {
|
||||
data, err := ioutil.ReadFile(file)
|
||||
data, err := os.ReadFile(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
287
cmd/syncthing/decrypt/decrypt.go
Normal file
287
cmd/syncthing/decrypt/decrypt.go
Normal file
@@ -0,0 +1,287 @@
|
||||
// Copyright (C) 2021 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
// Package decrypt implements the `syncthing decrypt` subcommand.
|
||||
package decrypt
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/fs"
|
||||
"github.com/syncthing/syncthing/lib/osutil"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/scanner"
|
||||
)
|
||||
|
||||
type CLI struct {
|
||||
Path string `arg:"" required:"1" help:"Path to encrypted folder"`
|
||||
To string `xor:"mode" placeholder:"PATH" help:"Destination directory, when decrypting"`
|
||||
VerifyOnly bool `xor:"mode" help:"Don't write decrypted files to disk (but verify plaintext hashes)"`
|
||||
Password string `help:"Folder password for decryption / verification" env:"FOLDER_PASSWORD"`
|
||||
FolderID string `help:"Folder ID of the encrypted folder, if it cannot be determined automatically"`
|
||||
Continue bool `help:"Continue processing next file in case of error, instead of aborting"`
|
||||
Verbose bool `help:"Show verbose progress information"`
|
||||
TokenPath string `placeholder:"PATH" help:"Path to the token file within the folder (used to determine folder ID)"`
|
||||
|
||||
folderKey *[32]byte
|
||||
}
|
||||
|
||||
type storedEncryptionToken struct {
|
||||
FolderID string
|
||||
Token []byte
|
||||
}
|
||||
|
||||
func (c *CLI) Run() error {
|
||||
log.SetFlags(0)
|
||||
|
||||
if c.To == "" && !c.VerifyOnly {
|
||||
return errors.New("must set --to or --verify-only")
|
||||
}
|
||||
|
||||
if c.TokenPath == "" {
|
||||
// This is a bit long to show as default in --help
|
||||
c.TokenPath = filepath.Join(config.DefaultMarkerName, config.EncryptionTokenName)
|
||||
}
|
||||
|
||||
if c.FolderID == "" {
|
||||
// We should try to figure out the folder ID
|
||||
folderID, err := c.getFolderID()
|
||||
if err != nil {
|
||||
log.Println("No --folder-id given and couldn't read folder token")
|
||||
return fmt.Errorf("getting folder ID: %w", err)
|
||||
}
|
||||
|
||||
c.FolderID = folderID
|
||||
if c.Verbose {
|
||||
log.Println("Found folder ID:", c.FolderID)
|
||||
}
|
||||
}
|
||||
|
||||
c.folderKey = protocol.KeyFromPassword(c.FolderID, c.Password)
|
||||
|
||||
return c.walk()
|
||||
}
|
||||
|
||||
// walk finds and processes every file in the encrypted folder
|
||||
func (c *CLI) walk() error {
|
||||
srcFs := fs.NewFilesystem(fs.FilesystemTypeBasic, c.Path)
|
||||
var dstFs fs.Filesystem
|
||||
if c.To != "" {
|
||||
dstFs = fs.NewFilesystem(fs.FilesystemTypeBasic, c.To)
|
||||
}
|
||||
|
||||
return srcFs.Walk(".", func(path string, info fs.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !info.IsRegular() {
|
||||
return nil
|
||||
}
|
||||
if fs.IsInternal(path) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return c.withContinue(c.process(srcFs, dstFs, path))
|
||||
})
|
||||
}
|
||||
|
||||
// If --continue was set we just mention the error and return nil to
|
||||
// continue processing.
|
||||
func (c *CLI) withContinue(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if c.Continue {
|
||||
log.Println("Warning:", err)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// getFolderID returns the folder ID found in the encrypted token, or an
|
||||
// error.
|
||||
func (c *CLI) getFolderID() (string, error) {
|
||||
tokenPath := filepath.Join(c.Path, c.TokenPath)
|
||||
bs, err := os.ReadFile(tokenPath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("reading folder token: %w", err)
|
||||
}
|
||||
|
||||
var tok storedEncryptionToken
|
||||
if err := json.Unmarshal(bs, &tok); err != nil {
|
||||
return "", fmt.Errorf("parsing folder token: %w", err)
|
||||
}
|
||||
|
||||
return tok.FolderID, nil
|
||||
}
|
||||
|
||||
// process handles the file named path in srcFs, decrypting it into dstFs
|
||||
// unless dstFs is nil.
|
||||
func (c *CLI) process(srcFs fs.Filesystem, dstFs fs.Filesystem, path string) error {
|
||||
// Which filemode bits to preserve
|
||||
const retainBits = fs.ModePerm | fs.ModeSetgid | fs.ModeSetuid | fs.ModeSticky
|
||||
|
||||
if c.Verbose {
|
||||
log.Printf("Processing %q", path)
|
||||
}
|
||||
|
||||
encFd, err := srcFs.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer encFd.Close()
|
||||
|
||||
encFi, err := loadEncryptedFileInfo(encFd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: loading metadata trailer: %w", path, err)
|
||||
}
|
||||
|
||||
// Workaround for a bug in <= v1.15.0-rc.5 where we stored names
|
||||
// in native format, while protocol expects wire format (slashes).
|
||||
encFi.Name = osutil.NormalizedFilename(encFi.Name)
|
||||
|
||||
plainFi, err := protocol.DecryptFileInfo(*encFi, c.folderKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: decrypting metadata: %w", path, err)
|
||||
}
|
||||
|
||||
if c.Verbose {
|
||||
log.Printf("Plaintext filename is %q", plainFi.Name)
|
||||
}
|
||||
|
||||
var plainFd fs.File
|
||||
if dstFs != nil {
|
||||
if err := dstFs.MkdirAll(filepath.Dir(plainFi.Name), 0700); err != nil {
|
||||
return fmt.Errorf("%s: %w", plainFi.Name, err)
|
||||
}
|
||||
|
||||
plainFd, err = dstFs.Create(plainFi.Name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %w", plainFi.Name, err)
|
||||
}
|
||||
defer plainFd.Close() // also closed explicitly in the return
|
||||
if err := dstFs.Chmod(plainFi.Name, fs.FileMode(plainFi.Permissions&uint32(retainBits))); err != nil {
|
||||
return fmt.Errorf("%s: %w", plainFi.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := c.decryptFile(encFi, &plainFi, encFd, plainFd); err != nil {
|
||||
// Decrypting the file failed, leaving it in an inconsistent state.
|
||||
// Delete it. Even --continue currently doesn't mean "leave broken
|
||||
// stuff in place", it just means "try the next file instead of
|
||||
// aborting".
|
||||
if plainFd != nil {
|
||||
_ = dstFs.Remove(plainFd.Name())
|
||||
}
|
||||
return fmt.Errorf("%s: %s: %w", path, plainFi.Name, err)
|
||||
} else if c.Verbose {
|
||||
log.Printf("Data verified for %q", plainFi.Name)
|
||||
}
|
||||
|
||||
if plainFd != nil {
|
||||
if err := plainFd.Close(); err != nil {
|
||||
return fmt.Errorf("%s: %w", plainFi.Name, err)
|
||||
}
|
||||
if err := dstFs.Chtimes(plainFi.Name, plainFi.ModTime(), plainFi.ModTime()); err != nil {
|
||||
return fmt.Errorf("%s: %w", plainFi.Name, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// decryptFile reads, decrypts and verifies all the blocks in src, writing
|
||||
// it to dst if dst is non-nil. (If dst is nil it just becomes a
|
||||
// read-and-verify operation.)
|
||||
func (c *CLI) decryptFile(encFi *protocol.FileInfo, plainFi *protocol.FileInfo, src io.ReaderAt, dst io.WriterAt) error {
|
||||
// The encrypted and plaintext files must consist of an equal number of blocks
|
||||
if len(encFi.Blocks) != len(plainFi.Blocks) {
|
||||
return fmt.Errorf("block count mismatch: encrypted %d != plaintext %d", len(encFi.Blocks), len(plainFi.Blocks))
|
||||
}
|
||||
|
||||
fileKey := protocol.FileKey(plainFi.Name, c.folderKey)
|
||||
for i, encBlock := range encFi.Blocks {
|
||||
// Read the encrypted block
|
||||
buf := make([]byte, encBlock.Size)
|
||||
if _, err := src.ReadAt(buf, encBlock.Offset); err != nil {
|
||||
return fmt.Errorf("encrypted block %d (%d bytes): %w", i, encBlock.Size, err)
|
||||
}
|
||||
|
||||
// Decrypt it
|
||||
dec, err := protocol.DecryptBytes(buf, fileKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("encrypted block %d (%d bytes): %w", i, encBlock.Size, err)
|
||||
}
|
||||
|
||||
// Verify the block size against the expected plaintext
|
||||
plainBlock := plainFi.Blocks[i]
|
||||
if i == len(plainFi.Blocks)-1 && len(dec) > plainBlock.Size {
|
||||
// The last block might be padded, which is fine (we skip the padding)
|
||||
dec = dec[:plainBlock.Size]
|
||||
} else if len(dec) != plainBlock.Size {
|
||||
return fmt.Errorf("plaintext block %d size mismatch, actual %d != expected %d", i, len(dec), plainBlock.Size)
|
||||
}
|
||||
|
||||
// Verify the hash against the plaintext block info
|
||||
if !scanner.Validate(dec, plainBlock.Hash, 0) {
|
||||
// The block decrypted correctly but fails the hash check. This
|
||||
// is odd and unexpected, but it it's still a valid block from
|
||||
// the source. The file might have changed while we pulled it?
|
||||
err := fmt.Errorf("plaintext block %d (%d bytes) failed validation after decryption", i, plainBlock.Size)
|
||||
if c.Continue {
|
||||
log.Printf("Warning: %s: %s: %v", encFi.Name, plainFi.Name, err)
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Write it to the destination, unless we're just verifying.
|
||||
if dst != nil {
|
||||
if _, err := dst.WriteAt(dec, plainBlock.Offset); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// loadEncryptedFileInfo loads the encrypted FileInfo trailer from a file on
|
||||
// disk.
|
||||
func loadEncryptedFileInfo(fd fs.File) (*protocol.FileInfo, error) {
|
||||
// Seek to the size of the trailer block
|
||||
if _, err := fd.Seek(-4, io.SeekEnd); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var bs [4]byte
|
||||
if _, err := io.ReadFull(fd, bs[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
size := int64(binary.BigEndian.Uint32(bs[:]))
|
||||
|
||||
// Seek to the start of the trailer
|
||||
if _, err := fd.Seek(-(4 + size), io.SeekEnd); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
trailer := make([]byte, size)
|
||||
if _, err := io.ReadFull(fd, trailer); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var encFi protocol.FileInfo
|
||||
if err := encFi.Unmarshal(trailer); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &encFi, nil
|
||||
}
|
||||
136
cmd/syncthing/generate/generate.go
Normal file
136
cmd/syncthing/generate/generate.go
Normal file
@@ -0,0 +1,136 @@
|
||||
// Copyright (C) 2021 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
// Package generate implements the `syncthing generate` subcommand.
|
||||
package generate
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/syncthing/syncthing/cmd/syncthing/cmdutil"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
"github.com/syncthing/syncthing/lib/fs"
|
||||
"github.com/syncthing/syncthing/lib/locations"
|
||||
"github.com/syncthing/syncthing/lib/logger"
|
||||
"github.com/syncthing/syncthing/lib/osutil"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/syncthing"
|
||||
)
|
||||
|
||||
type CLI struct {
|
||||
cmdutil.CommonOptions
|
||||
GUIUser string `placeholder:"STRING" help:"Specify new GUI authentication user name"`
|
||||
GUIPassword string `placeholder:"STRING" help:"Specify new GUI authentication password (use - to read from standard input)"`
|
||||
}
|
||||
|
||||
func (c *CLI) Run(l logger.Logger) error {
|
||||
if c.HideConsole {
|
||||
osutil.HideConsole()
|
||||
}
|
||||
|
||||
if c.HomeDir != "" {
|
||||
if c.ConfDir != "" {
|
||||
return errors.New("--home must not be used together with --config")
|
||||
}
|
||||
c.ConfDir = c.HomeDir
|
||||
}
|
||||
if c.ConfDir == "" {
|
||||
c.ConfDir = locations.GetBaseDir(locations.ConfigBaseDir)
|
||||
}
|
||||
|
||||
// Support reading the password from a pipe or similar
|
||||
if c.GUIPassword == "-" {
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
password, _, err := reader.ReadLine()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed reading GUI password: %w", err)
|
||||
}
|
||||
c.GUIPassword = string(password)
|
||||
}
|
||||
|
||||
if err := Generate(l, c.ConfDir, c.GUIUser, c.GUIPassword, c.NoDefaultFolder, c.SkipPortProbing); err != nil {
|
||||
return fmt.Errorf("failed to generate config and keys: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Generate(l logger.Logger, confDir, guiUser, guiPassword string, noDefaultFolder, skipPortProbing bool) error {
|
||||
dir, err := fs.ExpandTilde(confDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := syncthing.EnsureDir(dir, 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
locations.SetBaseDir(locations.ConfigBaseDir, dir)
|
||||
|
||||
var myID protocol.DeviceID
|
||||
certFile, keyFile := locations.Get(locations.CertFile), locations.Get(locations.KeyFile)
|
||||
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||
if err == nil {
|
||||
l.Warnln("Key exists; will not overwrite.")
|
||||
} else {
|
||||
cert, err = syncthing.GenerateCertificate(certFile, keyFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create certificate: %w", err)
|
||||
}
|
||||
}
|
||||
myID = protocol.NewDeviceID(cert.Certificate[0])
|
||||
l.Infoln("Device ID:", myID)
|
||||
|
||||
cfgFile := locations.Get(locations.ConfigFile)
|
||||
cfg, _, err := config.Load(cfgFile, myID, events.NoopLogger)
|
||||
if fs.IsNotExist(err) {
|
||||
if cfg, err = syncthing.DefaultConfig(cfgFile, myID, events.NoopLogger, noDefaultFolder, skipPortProbing); err != nil {
|
||||
return fmt.Errorf("create config: %w", err)
|
||||
}
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("load config: %w", err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
go cfg.Serve(ctx)
|
||||
defer cancel()
|
||||
|
||||
var updateErr error
|
||||
waiter, err := cfg.Modify(func(cfg *config.Configuration) {
|
||||
updateErr = updateGUIAuthentication(l, &cfg.GUI, guiUser, guiPassword)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("modify config: %w", err)
|
||||
}
|
||||
|
||||
waiter.Wait()
|
||||
if updateErr != nil {
|
||||
return updateErr
|
||||
}
|
||||
if err := cfg.Save(); err != nil {
|
||||
return fmt.Errorf("save config: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func updateGUIAuthentication(l logger.Logger, guiCfg *config.GUIConfiguration, guiUser, guiPassword string) error {
|
||||
if guiUser != "" && guiCfg.User != guiUser {
|
||||
guiCfg.User = guiUser
|
||||
l.Infoln("Updated GUI authentication user name:", guiUser)
|
||||
}
|
||||
|
||||
if guiPassword != "" && guiCfg.Password != guiPassword {
|
||||
if err := guiCfg.HashAndSetPassword(guiPassword); err != nil {
|
||||
return fmt.Errorf("failed to set GUI authentication password: %w", err)
|
||||
}
|
||||
l.Infoln("Updated GUI authentication password.")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -11,24 +11,17 @@ import (
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"strconv"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
if innerProcess && os.Getenv("STHEAPPROFILE") != "" {
|
||||
rate := 1
|
||||
if i, err := strconv.Atoi(os.Getenv("STHEAPPROFILE")); err == nil {
|
||||
rate = i
|
||||
}
|
||||
l.Debugln("Starting heap profiling")
|
||||
go func() {
|
||||
err := saveHeapProfiles(rate) // Only returns on error
|
||||
l.Warnln("Heap profiler failed:", err)
|
||||
panic("Heap profiler failed")
|
||||
}()
|
||||
}
|
||||
func startHeapProfiler() {
|
||||
l.Debugln("Starting heap profiling")
|
||||
go func() {
|
||||
err := saveHeapProfiles(1) // Only returns on error
|
||||
l.Warnln("Heap profiler failed:", err)
|
||||
panic("Heap profiler failed")
|
||||
}()
|
||||
}
|
||||
|
||||
func saveHeapProfiles(rate int) error {
|
||||
|
||||
@@ -10,10 +10,9 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"flag"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
_ "net/http/pprof" // Need to import this to support STPROFILER.
|
||||
@@ -22,16 +21,26 @@ import (
|
||||
"os/signal"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/alecthomas/kong"
|
||||
"github.com/thejerf/suture/v4"
|
||||
|
||||
"github.com/syncthing/syncthing/cmd/syncthing/cli"
|
||||
"github.com/syncthing/syncthing/cmd/syncthing/cmdutil"
|
||||
"github.com/syncthing/syncthing/cmd/syncthing/decrypt"
|
||||
"github.com/syncthing/syncthing/cmd/syncthing/generate"
|
||||
"github.com/syncthing/syncthing/lib/build"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
"github.com/syncthing/syncthing/lib/db/backend"
|
||||
"github.com/syncthing/syncthing/lib/dialer"
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
"github.com/syncthing/syncthing/lib/fs"
|
||||
@@ -41,23 +50,16 @@ import (
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/svcutil"
|
||||
"github.com/syncthing/syncthing/lib/syncthing"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
"github.com/syncthing/syncthing/lib/upgrade"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/thejerf/suture/v4"
|
||||
)
|
||||
|
||||
const (
|
||||
tlsDefaultCommonName = "syncthing"
|
||||
deviceCertLifetimeDays = 20 * 365
|
||||
sigTerm = syscall.Signal(15)
|
||||
sigTerm = syscall.Signal(15)
|
||||
)
|
||||
|
||||
const (
|
||||
usage = "syncthing [options]"
|
||||
extraUsage = `
|
||||
The -logflags value is a sum of the following:
|
||||
The --logflags value is a sum of the following:
|
||||
|
||||
1 Date
|
||||
2 Time
|
||||
@@ -65,14 +67,14 @@ The -logflags value is a sum of the following:
|
||||
8 Long filename
|
||||
16 Short filename
|
||||
|
||||
I.e. to prefix each log line with date and time, set -logflags=3 (1 + 2 from
|
||||
above). The value 0 is used to disable all of the above. The default is to
|
||||
show time only (2).
|
||||
I.e. to prefix each log line with time and filename, set --logflags=18 (2 + 16
|
||||
from above). The value 0 is used to disable all of the above. The default is
|
||||
to show date and time (3).
|
||||
|
||||
Logging always happens to the command line (stdout) and optionally to the
|
||||
file at the path specified by -logfile=path. In addition to an path, the special
|
||||
file at the path specified by --logfile=path. In addition to an path, the special
|
||||
values "default" and "-" may be used. The former logs to DATADIR/syncthing.log
|
||||
(see -data-dir), which is the default on Windows, and the latter only to stdout,
|
||||
(see --data), which is the default on Windows, and the latter only to stdout,
|
||||
no file, which is the default anywhere else.
|
||||
|
||||
|
||||
@@ -80,31 +82,11 @@ Development Settings
|
||||
--------------------
|
||||
|
||||
The following environment variables modify Syncthing's behavior in ways that
|
||||
are mostly useful for developers. Use with care.
|
||||
|
||||
STNODEFAULTFOLDER Don't create a default folder when starting for the first
|
||||
time. This variable will be ignored anytime after the first
|
||||
run.
|
||||
|
||||
STGUIASSETS Directory to load GUI assets from. Overrides compiled in
|
||||
assets.
|
||||
are mostly useful for developers. Use with care. See also the --debug-* options
|
||||
above.
|
||||
|
||||
STTRACE A comma separated string of facilities to trace. The valid
|
||||
facility strings listed below.
|
||||
|
||||
STPROFILER Set to a listen address such as "127.0.0.1:9090" to start
|
||||
the profiler with HTTP access.
|
||||
|
||||
STCPUPROFILE Write a CPU profile to cpu-$pid.pprof on exit.
|
||||
|
||||
STHEAPPROFILE Write heap profiles to heap-$pid-$timestamp.pprof each time
|
||||
heap usage increases.
|
||||
|
||||
STBLOCKPROFILE Write block profiles to block-$pid-$timestamp.pprof every 20
|
||||
seconds.
|
||||
|
||||
STPERFSTATS Write running performance statistics to perf-$pid.csv. Not
|
||||
supported on Windows.
|
||||
facility strings are listed below.
|
||||
|
||||
STDEADLOCKTIMEOUT Used for debugging internal deadlocks; sets debug
|
||||
sensitivity. Use only under direction of a developer.
|
||||
@@ -112,24 +94,11 @@ are mostly useful for developers. Use with care.
|
||||
STLOCKTHRESHOLD Used for debugging internal deadlocks; sets debug
|
||||
sensitivity. Use only under direction of a developer.
|
||||
|
||||
STNORESTART Equivalent to the -no-restart argument.
|
||||
|
||||
STNOUPGRADE Disable automatic upgrades.
|
||||
|
||||
STHASHING Select the SHA256 hashing package to use. Possible values
|
||||
are "standard" for the Go standard library implementation,
|
||||
"minio" for the github.com/minio/sha256-simd implementation,
|
||||
and blank (the default) for auto detection.
|
||||
|
||||
STRECHECKDBEVERY Set to a time interval to override the default database
|
||||
check interval of 30 days (720h). The interval understands
|
||||
"h", "m" and "s" abbreviations for hours minutes and seconds.
|
||||
Valid values are like "720h", "30s", etc.
|
||||
|
||||
STGCINDIRECTEVERY Set to a time interval to override the default database
|
||||
indirection GC interval of 13 hours. Same format as the
|
||||
STRECHECKDBEVERY variable.
|
||||
|
||||
GOMAXPROCS Set the maximum number of CPU cores to use. Defaults to all
|
||||
available CPU cores.
|
||||
|
||||
@@ -143,218 +112,218 @@ Debugging Facilities
|
||||
|
||||
The following are valid values for the STTRACE variable:
|
||||
|
||||
%s`
|
||||
%s
|
||||
`
|
||||
)
|
||||
|
||||
var (
|
||||
// Environment options
|
||||
innerProcess = os.Getenv("STMONITORED") != ""
|
||||
noDefaultFolder = os.Getenv("STNODEFAULTFOLDER") != ""
|
||||
|
||||
upgradeCheckInterval = 5 * time.Minute
|
||||
upgradeRetryInterval = time.Hour
|
||||
upgradeCheckKey = "lastUpgradeCheck"
|
||||
upgradeTimeKey = "lastUpgradeTime"
|
||||
upgradeVersionKey = "lastUpgradeVersion"
|
||||
|
||||
errConcurrentUpgrade = errors.New("upgrade prevented by other running Syncthing instance")
|
||||
errTooEarlyUpgradeCheck = fmt.Errorf("last upgrade check happened less than %v ago, skipping", upgradeCheckInterval)
|
||||
errTooEarlyUpgrade = fmt.Errorf("last upgrade happened less than %v ago, skipping", upgradeRetryInterval)
|
||||
)
|
||||
|
||||
type RuntimeOptions struct {
|
||||
syncthing.Options
|
||||
homeDir string
|
||||
confDir string
|
||||
dataDir string
|
||||
resetDatabase bool
|
||||
showVersion bool
|
||||
showPaths bool
|
||||
showDeviceId bool
|
||||
doUpgrade bool
|
||||
doUpgradeCheck bool
|
||||
upgradeTo string
|
||||
noBrowser bool
|
||||
browserOnly bool
|
||||
hideConsole bool
|
||||
logFile string
|
||||
logMaxSize int
|
||||
logMaxFiles int
|
||||
auditEnabled bool
|
||||
auditFile string
|
||||
paused bool
|
||||
unpaused bool
|
||||
guiAddress string
|
||||
guiAPIKey string
|
||||
generateDir string
|
||||
noRestart bool
|
||||
cpuProfile bool
|
||||
stRestarting bool
|
||||
logFlags int
|
||||
showHelp bool
|
||||
allowNewerConfig bool
|
||||
// The entrypoint struct is the main entry point for the command line parser. The
|
||||
// commands and options here are top level commands to syncthing.
|
||||
// Cli is just a placeholder for the help text (see main).
|
||||
var entrypoint struct {
|
||||
Serve serveOptions `cmd:"" help:"Run Syncthing"`
|
||||
Generate generate.CLI `cmd:"" help:"Generate key and config, then exit"`
|
||||
Decrypt decrypt.CLI `cmd:"" help:"Decrypt or verify an encrypted folder"`
|
||||
Cli struct{} `cmd:"" help:"Command line interface for Syncthing"`
|
||||
}
|
||||
|
||||
func defaultRuntimeOptions() RuntimeOptions {
|
||||
options := RuntimeOptions{
|
||||
Options: syncthing.Options{
|
||||
AssetDir: os.Getenv("STGUIASSETS"),
|
||||
NoUpgrade: os.Getenv("STNOUPGRADE") != "",
|
||||
ProfilerURL: os.Getenv("STPROFILER"),
|
||||
},
|
||||
noRestart: os.Getenv("STNORESTART") != "",
|
||||
cpuProfile: os.Getenv("STCPUPROFILE") != "",
|
||||
stRestarting: os.Getenv("STRESTART") != "",
|
||||
logFlags: log.Ltime,
|
||||
logMaxSize: 10 << 20, // 10 MiB
|
||||
logMaxFiles: 3, // plus the current one
|
||||
}
|
||||
// serveOptions are the options for the `syncthing serve` command.
|
||||
type serveOptions struct {
|
||||
cmdutil.CommonOptions
|
||||
AllowNewerConfig bool `help:"Allow loading newer than current config version"`
|
||||
Audit bool `help:"Write events to audit file"`
|
||||
AuditFile string `name:"auditfile" placeholder:"PATH" help:"Specify audit file (use \"-\" for stdout, \"--\" for stderr)"`
|
||||
BrowserOnly bool `help:"Open GUI in browser"`
|
||||
DataDir string `name:"data" placeholder:"PATH" help:"Set data directory (database and logs)"`
|
||||
DeviceID bool `help:"Show the device ID"`
|
||||
GenerateDir string `name:"generate" placeholder:"PATH" help:"Generate key and config in specified dir, then exit"` //DEPRECATED: replaced by subcommand!
|
||||
GUIAddress string `name:"gui-address" placeholder:"URL" help:"Override GUI address (e.g. \"http://192.0.2.42:8443\")"`
|
||||
GUIAPIKey string `name:"gui-apikey" placeholder:"API-KEY" help:"Override GUI API key"`
|
||||
LogFile string `name:"logfile" default:"${logFile}" placeholder:"PATH" help:"Log file name (see below)"`
|
||||
LogFlags int `name:"logflags" default:"${logFlags}" placeholder:"BITS" help:"Select information in log line prefix (see below)"`
|
||||
LogMaxFiles int `placeholder:"N" default:"${logMaxFiles}" name:"log-max-old-files" help:"Number of old files to keep (zero to keep only current)"`
|
||||
LogMaxSize int `placeholder:"BYTES" default:"${logMaxSize}" help:"Maximum size of any file (zero to disable log rotation)"`
|
||||
NoBrowser bool `help:"Do not start browser"`
|
||||
NoRestart bool `env:"STNORESTART" help:"Do not restart Syncthing when exiting due to API/GUI command, upgrade, or crash"`
|
||||
NoUpgrade bool `env:"STNOUPGRADE" help:"Disable automatic upgrades"`
|
||||
Paths bool `help:"Show configuration paths"`
|
||||
Paused bool `help:"Start with all devices and folders paused"`
|
||||
Unpaused bool `help:"Start with all devices and folders unpaused"`
|
||||
Upgrade bool `help:"Perform upgrade"`
|
||||
UpgradeCheck bool `help:"Check for available upgrade"`
|
||||
UpgradeTo string `placeholder:"URL" help:"Force upgrade directly from specified URL"`
|
||||
Verbose bool `help:"Print verbose log output"`
|
||||
Version bool `help:"Show version"`
|
||||
|
||||
// Debug options below
|
||||
DebugDBIndirectGCInterval time.Duration `env:"STGCINDIRECTEVERY" help:"Database indirection GC interval"`
|
||||
DebugDBRecheckInterval time.Duration `env:"STRECHECKDBEVERY" help:"Database metadata recalculation interval"`
|
||||
DebugDeadlockTimeout int `placeholder:"SECONDS" env:"STDEADLOCKTIMEOUT" help:"Used for debugging internal deadlocks"`
|
||||
DebugGUIAssetsDir string `placeholder:"PATH" help:"Directory to load GUI assets from" env:"STGUIASSETS"`
|
||||
DebugPerfStats bool `env:"STPERFSTATS" help:"Write running performance statistics to perf-$pid.csv (Unix only)"`
|
||||
DebugProfileBlock bool `env:"STBLOCKPROFILE" help:"Write block profiles to block-$pid-$timestamp.pprof every 20 seconds"`
|
||||
DebugProfileCPU bool `help:"Write a CPU profile to cpu-$pid.pprof on exit" env:"STCPUPROFILE"`
|
||||
DebugProfileHeap bool `env:"STHEAPPROFILE" help:"Write heap profiles to heap-$pid-$timestamp.pprof each time heap usage increases"`
|
||||
DebugProfilerListen string `placeholder:"ADDR" env:"STPROFILER" help:"Network profiler listen address"`
|
||||
DebugResetDatabase bool `name:"reset-database" help:"Reset the database, forcing a full rescan and resync"`
|
||||
DebugResetDeltaIdxs bool `name:"reset-deltas" help:"Reset delta index IDs, forcing a full index exchange"`
|
||||
|
||||
// Internal options, not shown to users
|
||||
InternalRestarting bool `env:"STRESTART" hidden:"1"`
|
||||
InternalInnerProcess bool `env:"STMONITORED" hidden:"1"`
|
||||
}
|
||||
|
||||
func defaultVars() kong.Vars {
|
||||
vars := kong.Vars{}
|
||||
|
||||
vars["logFlags"] = strconv.Itoa(logger.DefaultFlags)
|
||||
vars["logMaxSize"] = strconv.Itoa(10 << 20) // 10 MiB
|
||||
vars["logMaxFiles"] = "3" // plus the current one
|
||||
|
||||
if os.Getenv("STTRACE") != "" {
|
||||
options.logFlags = logger.DebugFlags
|
||||
vars["logFlags"] = strconv.Itoa(logger.DebugFlags)
|
||||
}
|
||||
|
||||
// On non-Windows, we explicitly default to "-" which means stdout. On
|
||||
// Windows, the "default" options.logFile will later be replaced with the
|
||||
// default path, unless the user has manually specified "-" or
|
||||
// something else.
|
||||
if runtime.GOOS == "windows" {
|
||||
options.logFile = "default"
|
||||
if build.IsWindows {
|
||||
vars["logFile"] = "default"
|
||||
} else {
|
||||
options.logFile = "-"
|
||||
vars["logFile"] = "-"
|
||||
}
|
||||
|
||||
return options
|
||||
}
|
||||
|
||||
func parseCommandLineOptions() RuntimeOptions {
|
||||
options := defaultRuntimeOptions()
|
||||
|
||||
flag.StringVar(&options.generateDir, "generate", "", "Generate key and config in specified dir, then exit")
|
||||
flag.StringVar(&options.guiAddress, "gui-address", options.guiAddress, "Override GUI address (e.g. \"http://192.0.2.42:8443\")")
|
||||
flag.StringVar(&options.guiAPIKey, "gui-apikey", options.guiAPIKey, "Override GUI API key")
|
||||
flag.StringVar(&options.homeDir, "home", "", "Set configuration and data directory")
|
||||
flag.StringVar(&options.confDir, "config", "", "Set configuration directory (config and keys)")
|
||||
flag.StringVar(&options.dataDir, "data", "", "Set data directory (database and logs)")
|
||||
flag.IntVar(&options.logFlags, "logflags", options.logFlags, "Select information in log line prefix (see below)")
|
||||
flag.BoolVar(&options.noBrowser, "no-browser", false, "Do not start browser")
|
||||
flag.BoolVar(&options.browserOnly, "browser-only", false, "Open GUI in browser")
|
||||
flag.BoolVar(&options.noRestart, "no-restart", options.noRestart, "Do not restart Syncthing when exiting due to API/GUI command, upgrade, or crash")
|
||||
flag.BoolVar(&options.resetDatabase, "reset-database", false, "Reset the database, forcing a full rescan and resync")
|
||||
flag.BoolVar(&options.ResetDeltaIdxs, "reset-deltas", false, "Reset delta index IDs, forcing a full index exchange")
|
||||
flag.BoolVar(&options.doUpgrade, "upgrade", false, "Perform upgrade")
|
||||
flag.BoolVar(&options.doUpgradeCheck, "upgrade-check", false, "Check for available upgrade")
|
||||
flag.BoolVar(&options.showVersion, "version", false, "Show version")
|
||||
flag.BoolVar(&options.showHelp, "help", false, "Show this help")
|
||||
flag.BoolVar(&options.showPaths, "paths", false, "Show configuration paths")
|
||||
flag.BoolVar(&options.showDeviceId, "device-id", false, "Show the device ID")
|
||||
flag.StringVar(&options.upgradeTo, "upgrade-to", options.upgradeTo, "Force upgrade directly from specified URL")
|
||||
flag.BoolVar(&options.auditEnabled, "audit", false, "Write events to audit file")
|
||||
flag.BoolVar(&options.Verbose, "verbose", false, "Print verbose log output")
|
||||
flag.BoolVar(&options.paused, "paused", false, "Start with all devices and folders paused")
|
||||
flag.BoolVar(&options.unpaused, "unpaused", false, "Start with all devices and folders unpaused")
|
||||
flag.StringVar(&options.logFile, "logfile", options.logFile, "Log file name (see below).")
|
||||
flag.IntVar(&options.logMaxSize, "log-max-size", options.logMaxSize, "Maximum size of any file (zero to disable log rotation).")
|
||||
flag.IntVar(&options.logMaxFiles, "log-max-old-files", options.logMaxFiles, "Number of old files to keep (zero to keep only current).")
|
||||
flag.StringVar(&options.auditFile, "auditfile", options.auditFile, "Specify audit file (use \"-\" for stdout, \"--\" for stderr)")
|
||||
flag.BoolVar(&options.allowNewerConfig, "allow-newer-config", false, "Allow loading newer than current config version")
|
||||
if runtime.GOOS == "windows" {
|
||||
// Allow user to hide the console window
|
||||
flag.BoolVar(&options.hideConsole, "no-console", false, "Hide console window")
|
||||
}
|
||||
|
||||
longUsage := fmt.Sprintf(extraUsage, debugFacilities())
|
||||
flag.Usage = usageFor(flag.CommandLine, usage, longUsage)
|
||||
flag.Parse()
|
||||
|
||||
if len(flag.Args()) > 0 {
|
||||
flag.Usage()
|
||||
os.Exit(2)
|
||||
}
|
||||
|
||||
return options
|
||||
}
|
||||
|
||||
func setLocation(enum locations.BaseDirEnum, loc string) error {
|
||||
if !filepath.IsAbs(loc) {
|
||||
var err error
|
||||
loc, err = filepath.Abs(loc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return locations.SetBaseDir(enum, loc)
|
||||
return vars
|
||||
}
|
||||
|
||||
func main() {
|
||||
options := parseCommandLineOptions()
|
||||
l.SetFlags(options.logFlags)
|
||||
|
||||
if options.guiAddress != "" {
|
||||
// The config picks this up from the environment.
|
||||
os.Setenv("STGUIADDRESS", options.guiAddress)
|
||||
}
|
||||
if options.guiAPIKey != "" {
|
||||
// The config picks this up from the environment.
|
||||
os.Setenv("STGUIAPIKEY", options.guiAPIKey)
|
||||
// The "cli" subcommand uses a different command line parser, and e.g. help
|
||||
// gets mangled when integrating it as a subcommand -> detect it here at the
|
||||
// beginning.
|
||||
if len(os.Args) > 1 && os.Args[1] == "cli" {
|
||||
if err := cli.Run(); err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if options.hideConsole {
|
||||
// First some massaging of the raw command line to fit the new model.
|
||||
// Basically this means adding the default command at the front, and
|
||||
// converting -options to --options.
|
||||
|
||||
args := os.Args[1:]
|
||||
switch {
|
||||
case len(args) == 0:
|
||||
// Empty command line is equivalent to just calling serve
|
||||
args = []string{"serve"}
|
||||
case args[0] == "-help":
|
||||
// For consistency, we consider this equivalent with --help even
|
||||
// though kong would otherwise consider it a bad flag.
|
||||
args[0] = "--help"
|
||||
case args[0] == "-h", args[0] == "--help":
|
||||
// Top level request for help, let it pass as-is to be handled by
|
||||
// kong to list commands.
|
||||
case strings.HasPrefix(args[0], "-"):
|
||||
// There are flags not preceded by a command, so we tack on the
|
||||
// "serve" command and convert the old style arguments (single dash)
|
||||
// to new style (double dash).
|
||||
args = append([]string{"serve"}, convertLegacyArgs(args)...)
|
||||
}
|
||||
|
||||
// Create a parser with an overridden help function to print our extra
|
||||
// help info.
|
||||
parser, err := kong.New(&entrypoint, kong.Help(helpHandler), defaultVars())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
ctx, err := parser.Parse(args)
|
||||
parser.FatalIfErrorf(err)
|
||||
ctx.BindTo(l, (*logger.Logger)(nil)) // main logger available to subcommands
|
||||
err = ctx.Run()
|
||||
parser.FatalIfErrorf(err)
|
||||
}
|
||||
|
||||
func helpHandler(options kong.HelpOptions, ctx *kong.Context) error {
|
||||
if err := kong.DefaultHelpPrinter(options, ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
if ctx.Command() == "serve" {
|
||||
// Help was requested for `syncthing serve`, so we add our extra
|
||||
// usage info afte the normal options output.
|
||||
fmt.Printf(extraUsage, debugFacilities())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// serveOptions.Run() is the entrypoint for `syncthing serve`
|
||||
func (options serveOptions) Run() error {
|
||||
l.SetFlags(options.LogFlags)
|
||||
|
||||
if options.GUIAddress != "" {
|
||||
// The config picks this up from the environment.
|
||||
os.Setenv("STGUIADDRESS", options.GUIAddress)
|
||||
}
|
||||
if options.GUIAPIKey != "" {
|
||||
// The config picks this up from the environment.
|
||||
os.Setenv("STGUIAPIKEY", options.GUIAPIKey)
|
||||
}
|
||||
|
||||
if options.HideConsole {
|
||||
osutil.HideConsole()
|
||||
}
|
||||
|
||||
// Not set as default above because the strings can be really long.
|
||||
var err error
|
||||
homeSet := options.homeDir != ""
|
||||
confSet := options.confDir != ""
|
||||
dataSet := options.dataDir != ""
|
||||
switch {
|
||||
case dataSet != confSet:
|
||||
err = errors.New("either both or none of -conf and -data must be given, use -home to set both at once")
|
||||
case homeSet && dataSet:
|
||||
err = errors.New("-home must not be used together with -conf and -data")
|
||||
case homeSet:
|
||||
if err = setLocation(locations.ConfigBaseDir, options.homeDir); err == nil {
|
||||
err = setLocation(locations.DataBaseDir, options.homeDir)
|
||||
}
|
||||
case dataSet:
|
||||
if err = setLocation(locations.ConfigBaseDir, options.confDir); err == nil {
|
||||
err = setLocation(locations.DataBaseDir, options.dataDir)
|
||||
}
|
||||
}
|
||||
err := cmdutil.SetConfigDataLocationsFromFlags(options.HomeDir, options.ConfDir, options.DataDir)
|
||||
if err != nil {
|
||||
l.Warnln("Command line options:", err)
|
||||
os.Exit(svcutil.ExitError.AsInt())
|
||||
}
|
||||
|
||||
if options.logFile == "default" || options.logFile == "" {
|
||||
// Treat an explicitly empty log file name as no log file
|
||||
if options.LogFile == "" {
|
||||
options.LogFile = "-"
|
||||
}
|
||||
if options.LogFile != "default" {
|
||||
// We must set this *after* expandLocations above.
|
||||
// Handling an empty value is for backwards compatibility (<1.4.1).
|
||||
options.logFile = locations.Get(locations.LogFile)
|
||||
if err := locations.Set(locations.LogFile, options.LogFile); err != nil {
|
||||
l.Warnln("Setting log file path:", err)
|
||||
os.Exit(svcutil.ExitError.AsInt())
|
||||
}
|
||||
}
|
||||
|
||||
if options.AssetDir == "" {
|
||||
if options.DebugGUIAssetsDir != "" {
|
||||
// The asset dir is blank if STGUIASSETS wasn't set, in which case we
|
||||
// should look for extra assets in the default place.
|
||||
options.AssetDir = locations.Get(locations.GUIAssets)
|
||||
if err := locations.Set(locations.GUIAssets, options.DebugGUIAssetsDir); err != nil {
|
||||
l.Warnln("Setting GUI assets path:", err)
|
||||
os.Exit(svcutil.ExitError.AsInt())
|
||||
}
|
||||
}
|
||||
|
||||
if options.showVersion {
|
||||
if options.Version {
|
||||
fmt.Println(build.LongVersion)
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
if options.showHelp {
|
||||
flag.Usage()
|
||||
return
|
||||
if options.Paths {
|
||||
fmt.Print(locations.PrettyPaths())
|
||||
return nil
|
||||
}
|
||||
|
||||
if options.showPaths {
|
||||
showPaths(options)
|
||||
return
|
||||
}
|
||||
|
||||
if options.showDeviceId {
|
||||
if options.DeviceID {
|
||||
cert, err := tls.LoadX509KeyPair(
|
||||
locations.Get(locations.CertFile),
|
||||
locations.Get(locations.KeyFile),
|
||||
@@ -365,54 +334,55 @@ func main() {
|
||||
}
|
||||
|
||||
fmt.Println(protocol.NewDeviceID(cert.Certificate[0]))
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
if options.browserOnly {
|
||||
if err := openGUI(protocol.EmptyDeviceID); err != nil {
|
||||
if options.BrowserOnly {
|
||||
if err := openGUI(); err != nil {
|
||||
l.Warnln("Failed to open web UI:", err)
|
||||
os.Exit(svcutil.ExitError.AsInt())
|
||||
}
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
if options.generateDir != "" {
|
||||
if err := generate(options.generateDir); err != nil {
|
||||
if options.GenerateDir != "" {
|
||||
if err := generate.Generate(l, options.GenerateDir, "", "", options.NoDefaultFolder, options.SkipPortProbing); err != nil {
|
||||
l.Warnln("Failed to generate config and keys:", err)
|
||||
os.Exit(svcutil.ExitError.AsInt())
|
||||
}
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ensure that our home directory exists.
|
||||
if err := ensureDir(locations.GetBaseDir(locations.ConfigBaseDir), 0700); err != nil {
|
||||
if err := syncthing.EnsureDir(locations.GetBaseDir(locations.ConfigBaseDir), 0700); err != nil {
|
||||
l.Warnln("Failure on home directory:", err)
|
||||
os.Exit(svcutil.ExitError.AsInt())
|
||||
}
|
||||
|
||||
if options.upgradeTo != "" {
|
||||
err := upgrade.ToURL(options.upgradeTo)
|
||||
if options.UpgradeTo != "" {
|
||||
err := upgrade.ToURL(options.UpgradeTo)
|
||||
if err != nil {
|
||||
l.Warnln("Error while Upgrading:", err)
|
||||
os.Exit(svcutil.ExitError.AsInt())
|
||||
}
|
||||
l.Infoln("Upgraded from", options.upgradeTo)
|
||||
return
|
||||
l.Infoln("Upgraded from", options.UpgradeTo)
|
||||
return nil
|
||||
}
|
||||
|
||||
if options.doUpgradeCheck {
|
||||
if options.UpgradeCheck {
|
||||
if _, err := checkUpgrade(); err != nil {
|
||||
l.Warnln("Checking for upgrade:", err)
|
||||
os.Exit(exitCodeForUpgrade(err))
|
||||
}
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
if options.doUpgrade {
|
||||
if options.Upgrade {
|
||||
release, err := checkUpgrade()
|
||||
if err == nil {
|
||||
// Use leveldb database locks to protect against concurrent upgrades
|
||||
ldb, err := syncthing.OpenDBBackend(locations.Get(locations.Database), config.TuningAuto)
|
||||
var ldb backend.Backend
|
||||
ldb, err = syncthing.OpenDBBackend(locations.Get(locations.Database), config.TuningAuto)
|
||||
if err != nil {
|
||||
err = upgradeViaRest()
|
||||
} else {
|
||||
@@ -428,29 +398,30 @@ func main() {
|
||||
os.Exit(svcutil.ExitUpgrade.AsInt())
|
||||
}
|
||||
|
||||
if options.resetDatabase {
|
||||
if options.DebugResetDatabase {
|
||||
if err := resetDB(); err != nil {
|
||||
l.Warnln("Resetting database:", err)
|
||||
os.Exit(svcutil.ExitError.AsInt())
|
||||
}
|
||||
l.Infoln("Successfully reset database - it will be rebuilt after next start.")
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
if innerProcess {
|
||||
if options.InternalInnerProcess {
|
||||
syncthingMain(options)
|
||||
} else {
|
||||
monitorMain(options)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func openGUI(myID protocol.DeviceID) error {
|
||||
cfg, err := loadOrDefaultConfig(myID, events.NoopLogger)
|
||||
func openGUI() error {
|
||||
cfg, err := loadOrDefaultConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if cfg.GUI().Enabled {
|
||||
if err := openURL(cfg.GUI().URL()); err != nil {
|
||||
if guiCfg := cfg.GUI(); guiCfg.Enabled {
|
||||
if err := openURL(guiCfg.URL()); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
@@ -459,46 +430,6 @@ func openGUI(myID protocol.DeviceID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func generate(generateDir string) error {
|
||||
dir, err := fs.ExpandTilde(generateDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ensureDir(dir, 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var myID protocol.DeviceID
|
||||
certFile, keyFile := filepath.Join(dir, "cert.pem"), filepath.Join(dir, "key.pem")
|
||||
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||
if err == nil {
|
||||
l.Warnln("Key exists; will not overwrite.")
|
||||
} else {
|
||||
cert, err = tlsutil.NewCertificate(certFile, keyFile, tlsDefaultCommonName, deviceCertLifetimeDays)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "create certificate")
|
||||
}
|
||||
}
|
||||
myID = protocol.NewDeviceID(cert.Certificate[0])
|
||||
l.Infoln("Device ID:", myID)
|
||||
|
||||
cfgFile := filepath.Join(dir, "config.xml")
|
||||
if _, err := os.Stat(cfgFile); err == nil {
|
||||
l.Warnln("Config exists; will not overwrite.")
|
||||
return nil
|
||||
}
|
||||
cfg, err := syncthing.DefaultConfig(cfgFile, myID, events.NoopLogger, noDefaultFolder)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = cfg.Save()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "save config")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func debugFacilities() string {
|
||||
facilities := l.Facilities()
|
||||
|
||||
@@ -530,7 +461,7 @@ func (e *errNoUpgrade) Error() string {
|
||||
}
|
||||
|
||||
func checkUpgrade() (upgrade.Release, error) {
|
||||
cfg, err := loadOrDefaultConfig(protocol.EmptyDeviceID, events.NoopLogger)
|
||||
cfg, err := loadOrDefaultConfig()
|
||||
if err != nil {
|
||||
return upgrade.Release{}, err
|
||||
}
|
||||
@@ -549,7 +480,11 @@ func checkUpgrade() (upgrade.Release, error) {
|
||||
}
|
||||
|
||||
func upgradeViaRest() error {
|
||||
cfg, _ := loadOrDefaultConfig(protocol.EmptyDeviceID, events.NoopLogger)
|
||||
cfg, err := loadOrDefaultConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
u, err := url.Parse(cfg.GUI().URL())
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -573,7 +508,7 @@ func upgradeViaRest() error {
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
bs, err := ioutil.ReadAll(resp.Body)
|
||||
bs, err := io.ReadAll(resp.Body)
|
||||
defer resp.Body.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -584,7 +519,17 @@ func upgradeViaRest() error {
|
||||
return err
|
||||
}
|
||||
|
||||
func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
func syncthingMain(options serveOptions) {
|
||||
if options.DebugProfileBlock {
|
||||
startBlockProfiler()
|
||||
}
|
||||
if options.DebugProfileHeap {
|
||||
startHeapProfiler()
|
||||
}
|
||||
if options.DebugPerfStats {
|
||||
startPerfStats()
|
||||
}
|
||||
|
||||
// Set a log prefix similar to the ID we will have later on, or early log
|
||||
// lines look ugly.
|
||||
l.SetPrefix("[start] ")
|
||||
@@ -615,20 +560,18 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
evLogger := events.NewLogger()
|
||||
earlyService.Add(evLogger)
|
||||
|
||||
cfgWrapper, err := syncthing.LoadConfigAtStartup(locations.Get(locations.ConfigFile), cert, evLogger, runtimeOptions.allowNewerConfig, noDefaultFolder)
|
||||
cfgWrapper, err := syncthing.LoadConfigAtStartup(locations.Get(locations.ConfigFile), cert, evLogger, options.AllowNewerConfig, options.NoDefaultFolder, options.SkipPortProbing)
|
||||
if err != nil {
|
||||
l.Warnln("Failed to initialize config:", err)
|
||||
os.Exit(svcutil.ExitError.AsInt())
|
||||
}
|
||||
if cfgService, ok := cfgWrapper.(suture.Service); ok {
|
||||
earlyService.Add(cfgService)
|
||||
}
|
||||
earlyService.Add(cfgWrapper)
|
||||
|
||||
// Candidate builds should auto upgrade. Make sure the option is set,
|
||||
// unless we are in a build where it's disabled or the STNOUPGRADE
|
||||
// environment variable is set.
|
||||
|
||||
if build.IsCandidate && !upgrade.DisabledByCompilation && !runtimeOptions.NoUpgrade {
|
||||
if build.IsCandidate && !upgrade.DisabledByCompilation && !options.NoUpgrade {
|
||||
cfgWrapper.Modify(func(cfg *config.Configuration) {
|
||||
l.Infoln("Automatic upgrade is always enabled for candidate releases.")
|
||||
if cfg.Options.AutoUpgradeIntervalH == 0 || cfg.Options.AutoUpgradeIntervalH > 24 {
|
||||
@@ -649,10 +592,10 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
}
|
||||
|
||||
// Check if auto-upgrades is possible, and if yes, and it's enabled do an initial
|
||||
// upgrade immedately. The auto-upgrade routine can only be started
|
||||
// upgrade immediately. The auto-upgrade routine can only be started
|
||||
// later after App is initialised.
|
||||
|
||||
autoUpgradePossible := autoUpgradePossible(runtimeOptions)
|
||||
autoUpgradePossible := autoUpgradePossible(options)
|
||||
if autoUpgradePossible && cfgWrapper.Options().AutoUpgradeEnabled() {
|
||||
// try to do upgrade directly and log the error if relevant.
|
||||
release, err := initialAutoUpgradeCheck(db.NewMiscDataNamespace(ldb))
|
||||
@@ -671,15 +614,23 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
}
|
||||
}
|
||||
|
||||
if runtimeOptions.unpaused {
|
||||
if options.Unpaused {
|
||||
setPauseState(cfgWrapper, false)
|
||||
} else if runtimeOptions.paused {
|
||||
} else if options.Paused {
|
||||
setPauseState(cfgWrapper, true)
|
||||
}
|
||||
|
||||
appOpts := runtimeOptions.Options
|
||||
if runtimeOptions.auditEnabled {
|
||||
appOpts.AuditWriter = auditWriter(runtimeOptions.auditFile)
|
||||
appOpts := syncthing.Options{
|
||||
DeadlockTimeoutS: options.DebugDeadlockTimeout,
|
||||
NoUpgrade: options.NoUpgrade,
|
||||
ProfilerAddr: options.DebugProfilerListen,
|
||||
ResetDeltaIdxs: options.DebugResetDeltaIdxs,
|
||||
Verbose: options.Verbose,
|
||||
DBRecheckInterval: options.DebugDBRecheckInterval,
|
||||
DBIndirectGCInterval: options.DebugDBIndirectGCInterval,
|
||||
}
|
||||
if options.Audit {
|
||||
appOpts.AuditWriter = auditWriter(options.AuditFile)
|
||||
}
|
||||
if t := os.Getenv("STDEADLOCKTIMEOUT"); t != "" {
|
||||
secs, _ := strconv.Atoi(t)
|
||||
@@ -704,11 +655,11 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
|
||||
setupSignalHandling(app)
|
||||
|
||||
if len(os.Getenv("GOMAXPROCS")) == 0 {
|
||||
if os.Getenv("GOMAXPROCS") == "" {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
}
|
||||
|
||||
if runtimeOptions.cpuProfile {
|
||||
if options.DebugProfileCPU {
|
||||
f, err := os.Create(fmt.Sprintf("cpu-%d.pprof", os.Getpid()))
|
||||
if err != nil {
|
||||
l.Warnln("Creating profile:", err)
|
||||
@@ -720,15 +671,13 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
}
|
||||
}
|
||||
|
||||
go standbyMonitor(app, cfgWrapper)
|
||||
|
||||
if err := app.Start(); err != nil {
|
||||
os.Exit(svcutil.ExitError.AsInt())
|
||||
}
|
||||
|
||||
cleanConfigDirectory()
|
||||
|
||||
if cfgWrapper.Options().StartBrowser && !runtimeOptions.noBrowser && !runtimeOptions.stRestarting {
|
||||
if cfgWrapper.Options().StartBrowser && !options.NoBrowser && !options.InternalRestarting {
|
||||
// Can potentially block if the utility we are invoking doesn't
|
||||
// fork, and just execs, hence keep it in its own routine.
|
||||
go func() { _ = openURL(cfgWrapper.GUI().URL()) }()
|
||||
@@ -740,7 +689,7 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
l.Warnln("Syncthing stopped with error:", app.Error())
|
||||
}
|
||||
|
||||
if runtimeOptions.cpuProfile {
|
||||
if options.DebugProfileCPU {
|
||||
pprof.StopCPUProfile()
|
||||
}
|
||||
|
||||
@@ -768,12 +717,15 @@ func setupSignalHandling(app *syncthing.App) {
|
||||
}()
|
||||
}
|
||||
|
||||
func loadOrDefaultConfig(myID protocol.DeviceID, evLogger events.Logger) (config.Wrapper, error) {
|
||||
// loadOrDefaultConfig creates a temporary, minimal configuration wrapper if no file
|
||||
// exists. As it disregards some command-line options, that should never be persisted.
|
||||
func loadOrDefaultConfig() (config.Wrapper, error) {
|
||||
cfgFile := locations.Get(locations.ConfigFile)
|
||||
cfg, _, err := config.Load(cfgFile, myID, evLogger)
|
||||
cfg, _, err := config.Load(cfgFile, protocol.EmptyDeviceID, events.NoopLogger)
|
||||
|
||||
if err != nil {
|
||||
cfg, err = syncthing.DefaultConfig(cfgFile, myID, evLogger, noDefaultFolder)
|
||||
newCfg := config.New(protocol.EmptyDeviceID)
|
||||
return config.Wrap(cfgFile, newCfg, protocol.EmptyDeviceID, events.NoopLogger), nil
|
||||
}
|
||||
|
||||
return cfg, err
|
||||
@@ -815,54 +767,11 @@ func resetDB() error {
|
||||
return os.RemoveAll(locations.Get(locations.Database))
|
||||
}
|
||||
|
||||
func ensureDir(dir string, mode fs.FileMode) error {
|
||||
fs := fs.NewFilesystem(fs.FilesystemTypeBasic, dir)
|
||||
err := fs.MkdirAll(".", mode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if fi, err := fs.Stat("."); err == nil {
|
||||
// Apprently the stat may fail even though the mkdirall passed. If it
|
||||
// does, we'll just assume things are in order and let other things
|
||||
// fail (like loading or creating the config...).
|
||||
currentMode := fi.Mode() & 0777
|
||||
if currentMode != mode {
|
||||
err := fs.Chmod(".", mode)
|
||||
// This can fail on crappy filesystems, nothing we can do about it.
|
||||
if err != nil {
|
||||
l.Warnln(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func standbyMonitor(app *syncthing.App, cfg config.Wrapper) {
|
||||
restartDelay := 60 * time.Second
|
||||
now := time.Now()
|
||||
for {
|
||||
time.Sleep(10 * time.Second)
|
||||
if time.Since(now) > 2*time.Minute && cfg.Options().RestartOnWakeup {
|
||||
l.Infof("Paused state detected, possibly woke up from standby. Restarting in %v.", restartDelay)
|
||||
|
||||
// We most likely just woke from standby. If we restart
|
||||
// immediately chances are we won't have networking ready. Give
|
||||
// things a moment to stabilize.
|
||||
time.Sleep(restartDelay)
|
||||
|
||||
app.Stop(svcutil.ExitRestart)
|
||||
return
|
||||
}
|
||||
now = time.Now()
|
||||
}
|
||||
}
|
||||
|
||||
func autoUpgradePossible(runtimeOptions RuntimeOptions) bool {
|
||||
func autoUpgradePossible(options serveOptions) bool {
|
||||
if upgrade.DisabledByCompilation {
|
||||
return false
|
||||
}
|
||||
if runtimeOptions.NoUpgrade {
|
||||
if options.NoUpgrade {
|
||||
l.Infof("No automatic upgrades; STNOUPGRADE environment variable defined.")
|
||||
return false
|
||||
}
|
||||
@@ -989,16 +898,6 @@ func cleanConfigDirectory() {
|
||||
}
|
||||
}
|
||||
|
||||
func showPaths(options RuntimeOptions) {
|
||||
fmt.Printf("Configuration file:\n\t%s\n\n", locations.Get(locations.ConfigFile))
|
||||
fmt.Printf("Database directory:\n\t%s\n\n", locations.Get(locations.Database))
|
||||
fmt.Printf("Device private key & certificate files:\n\t%s\n\t%s\n\n", locations.Get(locations.KeyFile), locations.Get(locations.CertFile))
|
||||
fmt.Printf("HTTPS private key & certificate files:\n\t%s\n\t%s\n\n", locations.Get(locations.HTTPSKeyFile), locations.Get(locations.HTTPSCertFile))
|
||||
fmt.Printf("Log file:\n\t%s\n\n", options.logFile)
|
||||
fmt.Printf("GUI override directory:\n\t%s\n\n", options.AssetDir)
|
||||
fmt.Printf("Default sync folder directory:\n\t%s\n\n", locations.Get(locations.DefFolder))
|
||||
}
|
||||
|
||||
func setPauseState(cfgWrapper config.Wrapper, paused bool) {
|
||||
_, err := cfgWrapper.Modify(func(cfg *config.Configuration) {
|
||||
for i := range cfg.Devices {
|
||||
@@ -1020,3 +919,21 @@ func exitCodeForUpgrade(err error) int {
|
||||
}
|
||||
return svcutil.ExitError.AsInt()
|
||||
}
|
||||
|
||||
// convertLegacyArgs returns the slice of arguments with single dash long
|
||||
// flags converted to double dash long flags.
|
||||
func convertLegacyArgs(args []string) []string {
|
||||
// Legacy args begin with a single dash, followed by two or more characters.
|
||||
legacyExp := regexp.MustCompile(`^-\w{2,}`)
|
||||
|
||||
res := make([]string, len(args))
|
||||
for i, arg := range args {
|
||||
if legacyExp.MatchString(arg) {
|
||||
res[i] = "-" + arg
|
||||
} else {
|
||||
res[i] = arg
|
||||
}
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
@@ -15,16 +15,14 @@ import (
|
||||
"os/exec"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
"github.com/syncthing/syncthing/lib/build"
|
||||
"github.com/syncthing/syncthing/lib/fs"
|
||||
"github.com/syncthing/syncthing/lib/locations"
|
||||
"github.com/syncthing/syncthing/lib/osutil"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/svcutil"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
)
|
||||
@@ -36,20 +34,21 @@ var (
|
||||
)
|
||||
|
||||
const (
|
||||
countRestarts = 4
|
||||
loopThreshold = 60 * time.Second
|
||||
restartCounts = 4
|
||||
restartPause = 1 * time.Second
|
||||
restartLoopThreshold = 60 * time.Second
|
||||
logFileAutoCloseDelay = 5 * time.Second
|
||||
logFileMaxOpenTime = time.Minute
|
||||
panicUploadMaxWait = 30 * time.Second
|
||||
panicUploadNoticeWait = 10 * time.Second
|
||||
)
|
||||
|
||||
func monitorMain(runtimeOptions RuntimeOptions) {
|
||||
func monitorMain(options serveOptions) {
|
||||
l.SetPrefix("[monitor] ")
|
||||
|
||||
var dst io.Writer = os.Stdout
|
||||
|
||||
logFile := runtimeOptions.logFile
|
||||
logFile := locations.Get(locations.LogFile)
|
||||
if logFile != "-" {
|
||||
if expanded, err := fs.ExpandTilde(logFile); err == nil {
|
||||
logFile = expanded
|
||||
@@ -59,15 +58,15 @@ func monitorMain(runtimeOptions RuntimeOptions) {
|
||||
open := func(name string) (io.WriteCloser, error) {
|
||||
return newAutoclosedFile(name, logFileAutoCloseDelay, logFileMaxOpenTime)
|
||||
}
|
||||
if runtimeOptions.logMaxSize > 0 {
|
||||
fileDst, err = newRotatedFile(logFile, open, int64(runtimeOptions.logMaxSize), runtimeOptions.logMaxFiles)
|
||||
if options.LogMaxSize > 0 {
|
||||
fileDst, err = newRotatedFile(logFile, open, int64(options.LogMaxSize), options.LogMaxFiles)
|
||||
} else {
|
||||
fileDst, err = open(logFile)
|
||||
}
|
||||
if err != nil {
|
||||
l.Warnln("Failed to setup logging to file, proceeding with logging to stdout only:", err)
|
||||
} else {
|
||||
if runtime.GOOS == "windows" {
|
||||
if build.IsWindows {
|
||||
// Translate line breaks to Windows standard
|
||||
fileDst = osutil.ReplacingWriter{
|
||||
Writer: fileDst,
|
||||
@@ -84,7 +83,12 @@ func monitorMain(runtimeOptions RuntimeOptions) {
|
||||
}
|
||||
|
||||
args := os.Args
|
||||
var restarts [countRestarts]time.Time
|
||||
binary, err := getBinary(args[0])
|
||||
if err != nil {
|
||||
l.Warnln("Error starting the main Syncthing process:", err)
|
||||
panic("Error starting the main Syncthing process")
|
||||
}
|
||||
var restarts [restartCounts]time.Time
|
||||
|
||||
stopSign := make(chan os.Signal, 1)
|
||||
signal.Notify(stopSign, os.Interrupt, sigTerm)
|
||||
@@ -97,15 +101,15 @@ func monitorMain(runtimeOptions RuntimeOptions) {
|
||||
for {
|
||||
maybeReportPanics()
|
||||
|
||||
if t := time.Since(restarts[0]); t < loopThreshold {
|
||||
l.Warnf("%d restarts in %v; not retrying further", countRestarts, t)
|
||||
if t := time.Since(restarts[0]); t < restartLoopThreshold {
|
||||
l.Warnf("%d restarts in %v; not retrying further", restartCounts, t)
|
||||
os.Exit(svcutil.ExitError.AsInt())
|
||||
}
|
||||
|
||||
copy(restarts[0:], restarts[1:])
|
||||
restarts[len(restarts)-1] = time.Now()
|
||||
|
||||
cmd := exec.Command(args[0], args[1:]...)
|
||||
cmd := exec.Command(binary, args[1:]...)
|
||||
cmd.Env = childEnv
|
||||
|
||||
stderr, err := cmd.StderrPipe()
|
||||
@@ -174,26 +178,26 @@ func monitorMain(runtimeOptions RuntimeOptions) {
|
||||
|
||||
if exiterr, ok := err.(*exec.ExitError); ok {
|
||||
exitCode := exiterr.ExitCode()
|
||||
if stopped || runtimeOptions.noRestart {
|
||||
if stopped || options.NoRestart {
|
||||
os.Exit(exitCode)
|
||||
}
|
||||
if exitCode == svcutil.ExitUpgrade.AsInt() {
|
||||
// Restart the monitor process to release the .old
|
||||
// binary as part of the upgrade process.
|
||||
l.Infoln("Restarting monitor...")
|
||||
if err = restartMonitor(args); err != nil {
|
||||
if err = restartMonitor(binary, args); err != nil {
|
||||
l.Warnln("Restart:", err)
|
||||
}
|
||||
os.Exit(exitCode)
|
||||
}
|
||||
}
|
||||
|
||||
if runtimeOptions.noRestart {
|
||||
if options.NoRestart {
|
||||
os.Exit(svcutil.ExitError.AsInt())
|
||||
}
|
||||
|
||||
l.Infoln("Syncthing exited:", err)
|
||||
time.Sleep(1 * time.Second)
|
||||
time.Sleep(restartPause)
|
||||
|
||||
if first {
|
||||
// Let the next child process know that this is not the first time
|
||||
@@ -204,6 +208,19 @@ func monitorMain(runtimeOptions RuntimeOptions) {
|
||||
}
|
||||
}
|
||||
|
||||
func getBinary(args0 string) (string, error) {
|
||||
e, err := os.Executable()
|
||||
if err == nil {
|
||||
return e, nil
|
||||
}
|
||||
// Check if args0 cuts it
|
||||
e, lerr := exec.LookPath(args0)
|
||||
if lerr == nil {
|
||||
return e, nil
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
func copyStderr(stderr io.Reader, dst io.Writer) {
|
||||
br := bufio.NewReader(stderr)
|
||||
|
||||
@@ -256,7 +273,7 @@ func copyStderr(stderr io.Reader, dst io.Writer) {
|
||||
* This crash usually occurs due to one of the following reasons: *
|
||||
* - Syncthing being stopped abruptly (killed/loss of power) *
|
||||
* - Bad hardware (memory/disk issues) *
|
||||
* - Software that affects disk writes (SSD caching software and simillar) *
|
||||
* - Software that affects disk writes (SSD caching software and similar) *
|
||||
* *
|
||||
* Please see the following URL for instructions on how to recover: *
|
||||
* https://docs.syncthing.net/users/faq.html#my-syncthing-database-is-corrupt *
|
||||
@@ -310,40 +327,30 @@ func copyStdout(stdout io.Reader, dst io.Writer) {
|
||||
}
|
||||
}
|
||||
|
||||
func restartMonitor(args []string) error {
|
||||
func restartMonitor(binary string, args []string) error {
|
||||
// Set the STRESTART environment variable to indicate to the next
|
||||
// process that this is a restart and not initial start. This prevents
|
||||
// opening the browser on startup.
|
||||
os.Setenv("STRESTART", "yes")
|
||||
|
||||
if runtime.GOOS != "windows" {
|
||||
if !build.IsWindows {
|
||||
// syscall.Exec is the cleanest way to restart on Unixes as it
|
||||
// replaces the current process with the new one, keeping the pid and
|
||||
// controlling terminal and so on
|
||||
return restartMonitorUnix(args)
|
||||
return restartMonitorUnix(binary, args)
|
||||
}
|
||||
|
||||
// but it isn't supported on Windows, so there we start a normal
|
||||
// exec.Command and return.
|
||||
return restartMonitorWindows(args)
|
||||
return restartMonitorWindows(binary, args)
|
||||
}
|
||||
|
||||
func restartMonitorUnix(args []string) error {
|
||||
if !strings.ContainsRune(args[0], os.PathSeparator) {
|
||||
// The path to the binary doesn't contain a slash, so it should be
|
||||
// found in $PATH.
|
||||
binary, err := exec.LookPath(args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
args[0] = binary
|
||||
}
|
||||
|
||||
func restartMonitorUnix(binary string, args []string) error {
|
||||
return syscall.Exec(args[0], args, os.Environ())
|
||||
}
|
||||
|
||||
func restartMonitorWindows(args []string) error {
|
||||
cmd := exec.Command(args[0], args[1:]...)
|
||||
func restartMonitorWindows(binary string, args []string) error {
|
||||
cmd := exec.Command(binary, args[1:]...)
|
||||
// Retain the standard streams
|
||||
cmd.Stderr = os.Stderr
|
||||
cmd.Stdout = os.Stdout
|
||||
@@ -563,7 +570,7 @@ func childEnv() []string {
|
||||
// panicUploadMaxWait uploading panics...
|
||||
func maybeReportPanics() {
|
||||
// Try to get a config to see if/where panics should be reported.
|
||||
cfg, err := loadOrDefaultConfig(protocol.EmptyDeviceID, events.NoopLogger)
|
||||
cfg, err := loadOrDefaultConfig()
|
||||
if err != nil {
|
||||
l.Warnln("Couldn't load config; not reporting crash")
|
||||
return
|
||||
|
||||
@@ -8,7 +8,6 @@ package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
@@ -18,14 +17,17 @@ import (
|
||||
func TestRotatedFile(t *testing.T) {
|
||||
// Verify that log rotation happens.
|
||||
|
||||
dir, err := ioutil.TempDir("", "syncthing")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(dir)
|
||||
dir := t.TempDir()
|
||||
|
||||
open := func(name string) (io.WriteCloser, error) {
|
||||
return os.Create(name)
|
||||
f, err := os.Create(name)
|
||||
t.Cleanup(func() {
|
||||
if f != nil {
|
||||
_ = f.Close()
|
||||
}
|
||||
})
|
||||
|
||||
return f, err
|
||||
}
|
||||
|
||||
logName := filepath.Join(dir, "log.txt")
|
||||
@@ -179,7 +181,7 @@ func TestAutoClosedFile(t *testing.T) {
|
||||
}
|
||||
|
||||
// The file should have both writes in it.
|
||||
bs, err := ioutil.ReadFile(file)
|
||||
bs, err := os.ReadFile(file)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -199,7 +201,7 @@ func TestAutoClosedFile(t *testing.T) {
|
||||
}
|
||||
|
||||
// It should now contain three writes, as the file is always opened for appending
|
||||
bs, err = ioutil.ReadFile(file)
|
||||
bs, err = os.ReadFile(file)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -4,26 +4,25 @@
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"syscall"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/build"
|
||||
)
|
||||
|
||||
func openURL(url string) error {
|
||||
switch runtime.GOOS {
|
||||
case "darwin":
|
||||
if build.IsDarwin {
|
||||
return exec.Command("open", url).Run()
|
||||
|
||||
default:
|
||||
cmd := exec.Command("xdg-open", url)
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{
|
||||
Setpgid: true,
|
||||
}
|
||||
return cmd.Run()
|
||||
}
|
||||
cmd := exec.Command("xdg-open", url)
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{
|
||||
Setpgid: true,
|
||||
}
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package main
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
//go:build !solaris && !windows
|
||||
// +build !solaris,!windows
|
||||
|
||||
package main
|
||||
@@ -18,10 +19,8 @@ import (
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
func init() {
|
||||
if innerProcess && os.Getenv("STPERFSTATS") != "" {
|
||||
go savePerfStats(fmt.Sprintf("perfstats-%d.csv", syscall.Getpid()))
|
||||
}
|
||||
func startPerfStats() {
|
||||
go savePerfStats(fmt.Sprintf("perfstats-%d.csv", syscall.Getpid()))
|
||||
}
|
||||
|
||||
func savePerfStats(file string) {
|
||||
|
||||
13
cmd/syncthing/perfstats_unsupported.go
Normal file
13
cmd/syncthing/perfstats_unsupported.go
Normal file
@@ -0,0 +1,13 @@
|
||||
// Copyright (C) 2021 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
//go:build solaris || windows
|
||||
// +build solaris windows
|
||||
|
||||
package main
|
||||
|
||||
func startPerfStats() {
|
||||
}
|
||||
@@ -4,7 +4,8 @@
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
//+build go1.7
|
||||
//go:build go1.7
|
||||
// +build go1.7
|
||||
|
||||
package main
|
||||
|
||||
|
||||
@@ -1,57 +0,0 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"text/tabwriter"
|
||||
)
|
||||
|
||||
func optionTable(w io.Writer, rows [][]string) {
|
||||
tw := tabwriter.NewWriter(w, 2, 4, 2, ' ', 0)
|
||||
for _, row := range rows {
|
||||
for i, cell := range row {
|
||||
if i > 0 {
|
||||
tw.Write([]byte("\t"))
|
||||
}
|
||||
tw.Write([]byte(cell))
|
||||
}
|
||||
tw.Write([]byte("\n"))
|
||||
}
|
||||
tw.Flush()
|
||||
}
|
||||
|
||||
func usageFor(fs *flag.FlagSet, usage string, extra string) func() {
|
||||
return func() {
|
||||
var b bytes.Buffer
|
||||
b.WriteString("Usage:\n " + usage + "\n")
|
||||
|
||||
var options [][]string
|
||||
fs.VisitAll(func(f *flag.Flag) {
|
||||
var opt = " -" + f.Name
|
||||
|
||||
if f.DefValue != "false" {
|
||||
opt += "=" + fmt.Sprintf(`"%s"`, f.DefValue)
|
||||
}
|
||||
options = append(options, []string{opt, f.Usage})
|
||||
})
|
||||
|
||||
if len(options) > 0 {
|
||||
b.WriteString("\nOptions:\n")
|
||||
optionTable(&b, options)
|
||||
}
|
||||
|
||||
fmt.Println(b.String())
|
||||
|
||||
if len(extra) > 0 {
|
||||
fmt.Println(extra)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -47,32 +47,25 @@ func main() {
|
||||
func runAggregation(db *sql.DB) {
|
||||
since := maxIndexedDay(db, "VersionSummary")
|
||||
log.Println("Aggregating VersionSummary data since", since)
|
||||
rows, err := aggregateVersionSummary(db, since)
|
||||
rows, err := aggregateVersionSummary(db, since.Add(24*time.Hour))
|
||||
if err != nil {
|
||||
log.Fatalln("aggregate:", err)
|
||||
log.Println("aggregate:", err)
|
||||
}
|
||||
log.Println("Inserted", rows, "rows")
|
||||
|
||||
log.Println("Aggregating UserMovement data")
|
||||
rows, err = aggregateUserMovement(db)
|
||||
if err != nil {
|
||||
log.Fatalln("aggregate:", err)
|
||||
}
|
||||
log.Println("Inserted", rows, "rows")
|
||||
|
||||
log.Println("Aggregating Performance data")
|
||||
since = maxIndexedDay(db, "Performance")
|
||||
rows, err = aggregatePerformance(db, since)
|
||||
log.Println("Aggregating Performance data since", since)
|
||||
rows, err = aggregatePerformance(db, since.Add(24*time.Hour))
|
||||
if err != nil {
|
||||
log.Fatalln("aggregate:", err)
|
||||
log.Println("aggregate:", err)
|
||||
}
|
||||
log.Println("Inserted", rows, "rows")
|
||||
|
||||
log.Println("Aggregating BlockStats data")
|
||||
since = maxIndexedDay(db, "BlockStats")
|
||||
rows, err = aggregateBlockStats(db, since)
|
||||
log.Println("Aggregating BlockStats data since", since)
|
||||
rows, err = aggregateBlockStats(db, since.Add(24*time.Hour))
|
||||
if err != nil {
|
||||
log.Fatalln("aggregate:", err)
|
||||
log.Println("aggregate:", err)
|
||||
}
|
||||
log.Println("Inserted", rows, "rows")
|
||||
}
|
||||
@@ -119,13 +112,13 @@ func setupDB(db *sql.DB) error {
|
||||
_, err = db.Exec(`CREATE TABLE IF NOT EXISTS BlockStats (
|
||||
Day TIMESTAMP NOT NULL,
|
||||
Reports INTEGER NOT NULL,
|
||||
Total INTEGER NOT NULL,
|
||||
Renamed INTEGER NOT NULL,
|
||||
Reused INTEGER NOT NULL,
|
||||
Pulled INTEGER NOT NULL,
|
||||
CopyOrigin INTEGER NOT NULL,
|
||||
CopyOriginShifted INTEGER NOT NULL,
|
||||
CopyElsewhere INTEGER NOT NULL
|
||||
Total BIGINT NOT NULL,
|
||||
Renamed BIGINT NOT NULL,
|
||||
Reused BIGINT NOT NULL,
|
||||
Pulled BIGINT NOT NULL,
|
||||
CopyOrigin BIGINT NOT NULL,
|
||||
CopyOriginShifted BIGINT NOT NULL,
|
||||
CopyElsewhere BIGINT NOT NULL
|
||||
)`)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -135,35 +128,35 @@ func setupDB(db *sql.DB) error {
|
||||
|
||||
row := db.QueryRow(`SELECT 'UniqueDayVersionIndex'::regclass`)
|
||||
if err := row.Scan(&t); err != nil {
|
||||
_, err = db.Exec(`CREATE UNIQUE INDEX UniqueDayVersionIndex ON VersionSummary (Day, Version)`)
|
||||
_, _ = db.Exec(`CREATE UNIQUE INDEX UniqueDayVersionIndex ON VersionSummary (Day, Version)`)
|
||||
}
|
||||
|
||||
row = db.QueryRow(`SELECT 'VersionDayIndex'::regclass`)
|
||||
if err := row.Scan(&t); err != nil {
|
||||
_, err = db.Exec(`CREATE INDEX VersionDayIndex ON VersionSummary (Day)`)
|
||||
_, _ = db.Exec(`CREATE INDEX VersionDayIndex ON VersionSummary (Day)`)
|
||||
}
|
||||
|
||||
row = db.QueryRow(`SELECT 'MovementDayIndex'::regclass`)
|
||||
if err := row.Scan(&t); err != nil {
|
||||
_, err = db.Exec(`CREATE INDEX MovementDayIndex ON UserMovement (Day)`)
|
||||
_, _ = db.Exec(`CREATE INDEX MovementDayIndex ON UserMovement (Day)`)
|
||||
}
|
||||
|
||||
row = db.QueryRow(`SELECT 'PerformanceDayIndex'::regclass`)
|
||||
if err := row.Scan(&t); err != nil {
|
||||
_, err = db.Exec(`CREATE INDEX PerformanceDayIndex ON Performance (Day)`)
|
||||
_, _ = db.Exec(`CREATE INDEX PerformanceDayIndex ON Performance (Day)`)
|
||||
}
|
||||
|
||||
row = db.QueryRow(`SELECT 'BlockStatsDayIndex'::regclass`)
|
||||
if err := row.Scan(&t); err != nil {
|
||||
_, err = db.Exec(`CREATE INDEX BlockStatsDayIndex ON BlockStats (Day)`)
|
||||
_, _ = db.Exec(`CREATE INDEX BlockStatsDayIndex ON BlockStats (Day)`)
|
||||
}
|
||||
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
func maxIndexedDay(db *sql.DB, table string) time.Time {
|
||||
var t time.Time
|
||||
row := db.QueryRow("SELECT MAX(Day) FROM " + table)
|
||||
row := db.QueryRow("SELECT MAX(DATE_TRUNC('day', Day)) FROM " + table)
|
||||
err := row.Scan(&t)
|
||||
if err != nil {
|
||||
return time.Time{}
|
||||
@@ -179,8 +172,8 @@ func aggregateVersionSummary(db *sql.DB, since time.Time) (int64, error) {
|
||||
COUNT(*) AS Count
|
||||
FROM ReportsJson
|
||||
WHERE
|
||||
DATE_TRUNC('day', Received) > $1
|
||||
AND DATE_TRUNC('day', Received) < DATE_TRUNC('day', NOW())
|
||||
Received > $1
|
||||
AND Received < DATE_TRUNC('day', NOW())
|
||||
AND Report->>'version' like 'v_.%'
|
||||
GROUP BY Day, Ver
|
||||
);
|
||||
@@ -198,7 +191,8 @@ func aggregateUserMovement(db *sql.DB) (int64, error) {
|
||||
Report->>'uniqueID'
|
||||
FROM ReportsJson
|
||||
WHERE
|
||||
DATE_TRUNC('day', Received) < DATE_TRUNC('day', NOW())
|
||||
Report->>'uniqueID' IS NOT NULL
|
||||
AND Received < DATE_TRUNC('day', NOW())
|
||||
AND Report->>'version' like 'v_.%'
|
||||
ORDER BY Day
|
||||
`)
|
||||
@@ -283,9 +277,11 @@ func aggregatePerformance(db *sql.DB, since time.Time) (int64, error) {
|
||||
AVG((Report->>'memoryUsageMiB')::numeric) As MemoryUsageMiB
|
||||
FROM ReportsJson
|
||||
WHERE
|
||||
DATE_TRUNC('day', Received) > $1
|
||||
AND DATE_TRUNC('day', Received) < DATE_TRUNC('day', NOW())
|
||||
Received > $1
|
||||
AND Received < DATE_TRUNC('day', NOW())
|
||||
AND Report->>'version' like 'v_.%'
|
||||
/* Some custom implementation reported bytes when we expect megabytes, cap at petabyte */
|
||||
AND (Report->>'memorySize')::numeric < 1073741824
|
||||
GROUP BY Day
|
||||
);
|
||||
`, since)
|
||||
@@ -303,17 +299,17 @@ func aggregateBlockStats(db *sql.DB, since time.Time) (int64, error) {
|
||||
SELECT
|
||||
DATE_TRUNC('day', Received) AS Day,
|
||||
COUNT(1) As Reports,
|
||||
SUM((Report->'blockStats'->>'total')::numeric) AS Total,
|
||||
SUM((Report->'blockStats'->>'renamed')::numeric) AS Renamed,
|
||||
SUM((Report->'blockStats'->>'reused')::numeric) AS Reused,
|
||||
SUM((Report->'blockStats'->>'pulled')::numeric) AS Pulled,
|
||||
SUM((Report->'blockStats'->>'copyOrigin')::numeric) AS CopyOrigin,
|
||||
SUM((Report->'blockStats'->>'copyOriginShifted')::numeric) AS CopyOriginShifted,
|
||||
SUM((Report->'blockStats'->>'copyElsewhere')::numeric) AS CopyElsewhere
|
||||
SUM((Report->'blockStats'->>'total')::numeric)::bigint AS Total,
|
||||
SUM((Report->'blockStats'->>'renamed')::numeric)::bigint AS Renamed,
|
||||
SUM((Report->'blockStats'->>'reused')::numeric)::bigint AS Reused,
|
||||
SUM((Report->'blockStats'->>'pulled')::numeric)::bigint AS Pulled,
|
||||
SUM((Report->'blockStats'->>'copyOrigin')::numeric)::bigint AS CopyOrigin,
|
||||
SUM((Report->'blockStats'->>'copyOriginShifted')::numeric)::bigint AS CopyOriginShifted,
|
||||
SUM((Report->'blockStats'->>'copyElsewhere')::numeric)::bigint AS CopyElsewhere
|
||||
FROM ReportsJson
|
||||
WHERE
|
||||
DATE_TRUNC('day', Received) > $1
|
||||
AND DATE_TRUNC('day', Received) < DATE_TRUNC('day', NOW())
|
||||
Received > $1
|
||||
AND Received < DATE_TRUNC('day', NOW())
|
||||
AND (Report->>'urVersion')::numeric >= 3
|
||||
AND Report->>'version' like 'v_.%'
|
||||
AND Report->>'version' NOT LIKE 'v0.14.40%'
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"encoding/json"
|
||||
"html/template"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
@@ -26,7 +25,10 @@ import (
|
||||
"time"
|
||||
"unicode"
|
||||
|
||||
_ "github.com/lib/pq" // PostgreSQL driver
|
||||
"github.com/oschwald/geoip2-golang"
|
||||
"golang.org/x/text/cases"
|
||||
"golang.org/x/text/language"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/upgrade"
|
||||
"github.com/syncthing/syncthing/lib/ur/contract"
|
||||
@@ -48,19 +50,19 @@ var (
|
||||
knownDistributions = []distributionMatch{
|
||||
// Maps well known builders to the official distribution method that
|
||||
// they represent
|
||||
{regexp.MustCompile("android-.*teamcity@build.syncthing.net"), "Google Play"},
|
||||
{regexp.MustCompile("teamcity@build.syncthing.net"), "GitHub"},
|
||||
{regexp.MustCompile("deb@build.syncthing.net"), "APT"},
|
||||
{regexp.MustCompile("docker@syncthing.net"), "Docker Hub"},
|
||||
{regexp.MustCompile("jenkins@build.syncthing.net"), "GitHub"},
|
||||
{regexp.MustCompile("snap@build.syncthing.net"), "Snapcraft"},
|
||||
{regexp.MustCompile("android-.*vagrant@basebox-stretch64"), "F-Droid"},
|
||||
{regexp.MustCompile("builduser@(archlinux|svetlemodry)"), "Arch (3rd party)"},
|
||||
{regexp.MustCompile("synology@kastelo.net"), "Synology (Kastelo)"},
|
||||
{regexp.MustCompile("@debian"), "Debian (3rd party)"},
|
||||
{regexp.MustCompile("@fedora"), "Fedora (3rd party)"},
|
||||
{regexp.MustCompile(`android-.*teamcity@build\.syncthing\.net`), "Google Play"},
|
||||
{regexp.MustCompile(`teamcity@build\.syncthing\.net`), "GitHub"},
|
||||
{regexp.MustCompile(`deb@build\.syncthing\.net`), "APT"},
|
||||
{regexp.MustCompile(`docker@syncthing\.net`), "Docker Hub"},
|
||||
{regexp.MustCompile(`jenkins@build\.syncthing\.net`), "GitHub"},
|
||||
{regexp.MustCompile(`snap@build\.syncthing\.net`), "Snapcraft"},
|
||||
{regexp.MustCompile(`android-.*vagrant@basebox-stretch64`), "F-Droid"},
|
||||
{regexp.MustCompile(`builduser@(archlinux|svetlemodry)`), "Arch (3rd party)"},
|
||||
{regexp.MustCompile(`synology@kastelo\.net`), "Synology (Kastelo)"},
|
||||
{regexp.MustCompile(`@debian`), "Debian (3rd party)"},
|
||||
{regexp.MustCompile(`@fedora`), "Fedora (3rd party)"},
|
||||
{regexp.MustCompile(`\bbrew@`), "Homebrew (3rd party)"},
|
||||
{regexp.MustCompile("."), "Others"},
|
||||
{regexp.MustCompile(`.`), "Others"},
|
||||
}
|
||||
)
|
||||
|
||||
@@ -89,7 +91,7 @@ var funcs = map[string]interface{}{
|
||||
parts = append(parts, part)
|
||||
}
|
||||
if len(input) > 0 {
|
||||
parts = append(parts, input[:])
|
||||
parts = append(parts, input)
|
||||
}
|
||||
return parts[whichPart-1]
|
||||
},
|
||||
@@ -130,11 +132,6 @@ func setupDB(db *sql.DB) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Migrate from old schema to new schema if the table exists.
|
||||
if err := migrate(db); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -162,7 +159,7 @@ func main() {
|
||||
if err != nil {
|
||||
log.Fatalln("template:", err)
|
||||
}
|
||||
bs, err := ioutil.ReadAll(fd)
|
||||
bs, err := io.ReadAll(fd)
|
||||
if err != nil {
|
||||
log.Fatalln("template:", err)
|
||||
}
|
||||
@@ -285,7 +282,7 @@ func rootHandler(db *sql.DB, w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
func locationsHandler(db *sql.DB, w http.ResponseWriter, r *http.Request) {
|
||||
func locationsHandler(db *sql.DB, w http.ResponseWriter, _ *http.Request) {
|
||||
cacheMut.Lock()
|
||||
defer cacheMut.Unlock()
|
||||
|
||||
@@ -324,7 +321,7 @@ func newDataHandler(db *sql.DB, w http.ResponseWriter, r *http.Request) {
|
||||
rep.Address = addr
|
||||
|
||||
lr := &io.LimitedReader{R: r.Body, N: 40 * 1024}
|
||||
bs, _ := ioutil.ReadAll(lr)
|
||||
bs, _ := io.ReadAll(lr)
|
||||
if err := json.Unmarshal(bs, &rep); err != nil {
|
||||
log.Println("decode:", err)
|
||||
if debug {
|
||||
@@ -378,7 +375,7 @@ func summaryHandler(db *sql.DB, w http.ResponseWriter, r *http.Request) {
|
||||
w.Write(bs)
|
||||
}
|
||||
|
||||
func movementHandler(db *sql.DB, w http.ResponseWriter, r *http.Request) {
|
||||
func movementHandler(db *sql.DB, w http.ResponseWriter, _ *http.Request) {
|
||||
s, err := getMovement(db)
|
||||
if err != nil {
|
||||
log.Println("movementHandler:", err)
|
||||
@@ -397,7 +394,7 @@ func movementHandler(db *sql.DB, w http.ResponseWriter, r *http.Request) {
|
||||
w.Write(bs)
|
||||
}
|
||||
|
||||
func performanceHandler(db *sql.DB, w http.ResponseWriter, r *http.Request) {
|
||||
func performanceHandler(db *sql.DB, w http.ResponseWriter, _ *http.Request) {
|
||||
s, err := getPerformance(db)
|
||||
if err != nil {
|
||||
log.Println("performanceHandler:", err)
|
||||
@@ -416,7 +413,7 @@ func performanceHandler(db *sql.DB, w http.ResponseWriter, r *http.Request) {
|
||||
w.Write(bs)
|
||||
}
|
||||
|
||||
func blockStatsHandler(db *sql.DB, w http.ResponseWriter, r *http.Request) {
|
||||
func blockStatsHandler(db *sql.DB, w http.ResponseWriter, _ *http.Request) {
|
||||
s, err := getBlockStats(db)
|
||||
if err != nil {
|
||||
log.Println("blockStatsHandler:", err)
|
||||
@@ -581,7 +578,6 @@ func getReport(db *sql.DB) map[string]interface{} {
|
||||
|
||||
for rows.Next() {
|
||||
err := rows.Scan(&rep.Received, &rep)
|
||||
|
||||
if err != nil {
|
||||
log.Println("sql:", err)
|
||||
return nil
|
||||
@@ -725,8 +721,8 @@ func getReport(db *sql.DB) map[string]interface{} {
|
||||
|
||||
if rep.NATType != "" {
|
||||
natType := rep.NATType
|
||||
natType = strings.Replace(natType, "unknown", "Unknown", -1)
|
||||
natType = strings.Replace(natType, "Symetric", "Symmetric", -1)
|
||||
natType = strings.ReplaceAll(natType, "unknown", "Unknown")
|
||||
natType = strings.ReplaceAll(natType, "Symetric", "Symmetric")
|
||||
add(featureGroups["Various"]["v3"], "NAT Type", natType, 1)
|
||||
}
|
||||
|
||||
@@ -745,6 +741,7 @@ func getReport(db *sql.DB) map[string]interface{} {
|
||||
inc(features["Folder"]["v3"], "Weak hash, custom threshold", rep.FolderUsesV3.CustomWeakHashThreshold)
|
||||
inc(features["Folder"]["v3"], "Filesystem watcher", rep.FolderUsesV3.FsWatcherEnabled)
|
||||
inc(features["Folder"]["v3"], "Case sensitive FS", rep.FolderUsesV3.CaseSensitiveFS)
|
||||
inc(features["Folder"]["v3"], "Mode, receive encrypted", rep.FolderUsesV3.ReceiveEncrypted)
|
||||
|
||||
add(featureGroups["Folder"]["v3"], "Conflicts", "Disabled", rep.FolderUsesV3.ConflictsDisabled)
|
||||
add(featureGroups["Folder"]["v3"], "Conflicts", "Unlimited", rep.FolderUsesV3.ConflictsUnlimited)
|
||||
@@ -773,7 +770,7 @@ func getReport(db *sql.DB) map[string]interface{} {
|
||||
}
|
||||
|
||||
for transport, count := range rep.TransportStats {
|
||||
add(featureGroups["Connection"]["v3"], "Transport", strings.Title(transport), count)
|
||||
add(featureGroups["Connection"]["v3"], "Transport", cases.Title(language.English).String(transport), count)
|
||||
if strings.HasSuffix(transport, "4") {
|
||||
add(featureGroups["Connection"]["v3"], "IP version", "IPv4", count)
|
||||
} else if strings.HasSuffix(transport, "6") {
|
||||
@@ -785,72 +782,53 @@ func getReport(db *sql.DB) map[string]interface{} {
|
||||
}
|
||||
}
|
||||
|
||||
var categories []category
|
||||
categories = append(categories, category{
|
||||
Values: statsForInts(totFiles),
|
||||
Descr: "Files Managed per Device",
|
||||
})
|
||||
|
||||
categories = append(categories, category{
|
||||
Values: statsForInts(maxFiles),
|
||||
Descr: "Files in Largest Folder",
|
||||
})
|
||||
|
||||
categories = append(categories, category{
|
||||
Values: statsForInt64s(totMiB),
|
||||
Descr: "Data Managed per Device",
|
||||
Unit: "B",
|
||||
Type: NumberBinary,
|
||||
})
|
||||
|
||||
categories = append(categories, category{
|
||||
Values: statsForInt64s(maxMiB),
|
||||
Descr: "Data in Largest Folder",
|
||||
Unit: "B",
|
||||
Type: NumberBinary,
|
||||
})
|
||||
|
||||
categories = append(categories, category{
|
||||
Values: statsForInts(numDevices),
|
||||
Descr: "Number of Devices in Cluster",
|
||||
})
|
||||
|
||||
categories = append(categories, category{
|
||||
Values: statsForInts(numFolders),
|
||||
Descr: "Number of Folders Configured",
|
||||
})
|
||||
|
||||
categories = append(categories, category{
|
||||
Values: statsForInt64s(memoryUsage),
|
||||
Descr: "Memory Usage",
|
||||
Unit: "B",
|
||||
Type: NumberBinary,
|
||||
})
|
||||
|
||||
categories = append(categories, category{
|
||||
Values: statsForInt64s(memorySize),
|
||||
Descr: "System Memory",
|
||||
Unit: "B",
|
||||
Type: NumberBinary,
|
||||
})
|
||||
|
||||
categories = append(categories, category{
|
||||
Values: statsForFloats(sha256Perf),
|
||||
Descr: "SHA-256 Hashing Performance",
|
||||
Unit: "B/s",
|
||||
Type: NumberBinary,
|
||||
})
|
||||
|
||||
categories = append(categories, category{
|
||||
Values: statsForInts(numCPU),
|
||||
Descr: "Number of CPU cores",
|
||||
})
|
||||
|
||||
categories = append(categories, category{
|
||||
Values: statsForInts(uptime),
|
||||
Descr: "Uptime (v3)",
|
||||
Type: NumberDuration,
|
||||
})
|
||||
categories := []category{
|
||||
{
|
||||
Values: statsForInts(totFiles),
|
||||
Descr: "Files Managed per Device",
|
||||
}, {
|
||||
Values: statsForInts(maxFiles),
|
||||
Descr: "Files in Largest Folder",
|
||||
}, {
|
||||
Values: statsForInt64s(totMiB),
|
||||
Descr: "Data Managed per Device",
|
||||
Unit: "B",
|
||||
Type: NumberBinary,
|
||||
}, {
|
||||
Values: statsForInt64s(maxMiB),
|
||||
Descr: "Data in Largest Folder",
|
||||
Unit: "B",
|
||||
Type: NumberBinary,
|
||||
}, {
|
||||
Values: statsForInts(numDevices),
|
||||
Descr: "Number of Devices in Cluster",
|
||||
}, {
|
||||
Values: statsForInts(numFolders),
|
||||
Descr: "Number of Folders Configured",
|
||||
}, {
|
||||
Values: statsForInt64s(memoryUsage),
|
||||
Descr: "Memory Usage",
|
||||
Unit: "B",
|
||||
Type: NumberBinary,
|
||||
}, {
|
||||
Values: statsForInt64s(memorySize),
|
||||
Descr: "System Memory",
|
||||
Unit: "B",
|
||||
Type: NumberBinary,
|
||||
}, {
|
||||
Values: statsForFloats(sha256Perf),
|
||||
Descr: "SHA-256 Hashing Performance",
|
||||
Unit: "B/s",
|
||||
Type: NumberBinary,
|
||||
}, {
|
||||
Values: statsForInts(numCPU),
|
||||
Descr: "Number of CPU cores",
|
||||
}, {
|
||||
Values: statsForInts(uptime),
|
||||
Descr: "Uptime (v3)",
|
||||
Type: NumberDuration,
|
||||
},
|
||||
}
|
||||
|
||||
reportFeatures := make(map[string][]feature)
|
||||
for featureType, versions := range features {
|
||||
@@ -1040,7 +1018,7 @@ func (s *summary) filter(min int) {
|
||||
func getSummary(db *sql.DB, min int) (summary, error) {
|
||||
s := newSummary()
|
||||
|
||||
rows, err := db.Query(`SELECT Day, Version, Count FROM VersionSummary WHERE Day > now() - '2 year'::INTERVAL;`)
|
||||
rows, err := db.Query(`SELECT Day, Version, Count FROM VersionSummary WHERE Day > now() - '3 year'::INTERVAL;`)
|
||||
if err != nil {
|
||||
return summary{}, err
|
||||
}
|
||||
@@ -1073,7 +1051,7 @@ func getSummary(db *sql.DB, min int) (summary, error) {
|
||||
}
|
||||
|
||||
func getMovement(db *sql.DB) ([][]interface{}, error) {
|
||||
rows, err := db.Query(`SELECT Day, Added, Removed, Bounced FROM UserMovement WHERE Day > now() - '2 year'::INTERVAL ORDER BY Day`)
|
||||
rows, err := db.Query(`SELECT Day, Added, Removed, Bounced FROM UserMovement WHERE Day > now() - '3 year'::INTERVAL ORDER BY Day`)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1106,7 +1084,7 @@ func getMovement(db *sql.DB) ([][]interface{}, error) {
|
||||
}
|
||||
|
||||
func getPerformance(db *sql.DB) ([][]interface{}, error) {
|
||||
rows, err := db.Query(`SELECT Day, TotFiles, TotMiB, SHA256Perf, MemorySize, MemoryUsageMiB FROM Performance WHERE Day > '2014-06-20'::TIMESTAMP ORDER BY Day`)
|
||||
rows, err := db.Query(`SELECT Day, TotFiles, TotMiB, SHA256Perf, MemorySize, MemoryUsageMiB FROM Performance WHERE Day > now() - '5 year'::INTERVAL ORDER BY Day`)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1133,7 +1111,7 @@ func getPerformance(db *sql.DB) ([][]interface{}, error) {
|
||||
}
|
||||
|
||||
func getBlockStats(db *sql.DB) ([][]interface{}, error) {
|
||||
rows, err := db.Query(`SELECT Day, Reports, Pulled, Renamed, Reused, CopyOrigin, CopyOriginShifted, CopyElsewhere FROM BlockStats WHERE Day > '2017-10-23'::TIMESTAMP ORDER BY Day`)
|
||||
rows, err := db.Query(`SELECT Day, Reports, Pulled, Renamed, Reused, CopyOrigin, CopyOriginShifted, CopyElsewhere FROM BlockStats WHERE Day > now() - '3 year'::INTERVAL ORDER BY Day`)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1150,6 +1128,10 @@ func getBlockStats(db *sql.DB) ([][]interface{}, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Legacy bad data on certain days
|
||||
if reports <= 0 || pulled < 0 || renamed < 0 || reused < 0 || copyOrigin < 0 || copyOriginShifted < 0 || copyElsewhere < 0 {
|
||||
continue
|
||||
}
|
||||
row := []interface{}{
|
||||
day.Format("2006-01-02"),
|
||||
reports,
|
||||
@@ -1171,9 +1153,11 @@ type sortableFeatureList []feature
|
||||
func (l sortableFeatureList) Len() int {
|
||||
return len(l)
|
||||
}
|
||||
|
||||
func (l sortableFeatureList) Swap(a, b int) {
|
||||
l[a], l[b] = l[b], l[a]
|
||||
}
|
||||
|
||||
func (l sortableFeatureList) Less(a, b int) bool {
|
||||
if l[a].Pct != l[b].Pct {
|
||||
return l[a].Pct < l[b].Pct
|
||||
|
||||
@@ -1,143 +0,0 @@
|
||||
// Copyright (C) 2020 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"github.com/lib/pq"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/ur/contract"
|
||||
)
|
||||
|
||||
func migrate(db *sql.DB) error {
|
||||
var count uint64
|
||||
log.Println("Checking old table row count, this might take a while...")
|
||||
if err := db.QueryRow(`SELECT COUNT(1) FROM Reports`).Scan(&count); err != nil || count == 0 {
|
||||
// err != nil most likely means table does not exist.
|
||||
return nil
|
||||
}
|
||||
log.Printf("Found %d records, will perform migration.", count)
|
||||
|
||||
tx, err := db.Begin()
|
||||
if err != nil {
|
||||
log.Println("sql:", err)
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
// These must be lower case, because we don't quote them when creating, so postgres creates them lower case.
|
||||
// Yet pg.CopyIn quotes them, which makes them case sensitive.
|
||||
stmt, err := tx.Prepare(pq.CopyIn("reportsjson", "received", "report"))
|
||||
if err != nil {
|
||||
log.Println("sql:", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Custom types used in the old struct.
|
||||
var rep contract.Report
|
||||
var rescanIntvs pq.Int64Array
|
||||
var fsWatcherDelay pq.Int64Array
|
||||
pullOrder := make(IntMap)
|
||||
fileSystemType := make(IntMap)
|
||||
themes := make(IntMap)
|
||||
transportStats := make(IntMap)
|
||||
|
||||
rows, err := db.Query(`SELECT ` + strings.Join(rep.FieldNames(), ", ") + `, FolderFsWatcherDelays, RescanIntvs, FolderPullOrder, FolderFilesystemType, GUITheme, Transport FROM Reports`)
|
||||
if err != nil {
|
||||
log.Println("sql:", err)
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var done uint64
|
||||
pct := count / 100
|
||||
|
||||
for rows.Next() {
|
||||
err := rows.Scan(append(rep.FieldPointers(), &fsWatcherDelay, &rescanIntvs, &pullOrder, &fileSystemType, &themes, &transportStats)...)
|
||||
if err != nil {
|
||||
log.Println("sql scan:", err)
|
||||
return err
|
||||
}
|
||||
// Patch up parts that used to use custom types
|
||||
rep.RescanIntvs = make([]int, len(rescanIntvs))
|
||||
for i := range rescanIntvs {
|
||||
rep.RescanIntvs[i] = int(rescanIntvs[i])
|
||||
}
|
||||
rep.FolderUsesV3.FsWatcherDelays = make([]int, len(fsWatcherDelay))
|
||||
for i := range fsWatcherDelay {
|
||||
rep.FolderUsesV3.FsWatcherDelays[i] = int(fsWatcherDelay[i])
|
||||
}
|
||||
rep.FolderUsesV3.PullOrder = pullOrder
|
||||
rep.FolderUsesV3.FilesystemType = fileSystemType
|
||||
rep.GUIStats.Theme = themes
|
||||
rep.TransportStats = transportStats
|
||||
|
||||
_, err = stmt.Exec(rep.Received, rep)
|
||||
if err != nil {
|
||||
log.Println("sql insert:", err)
|
||||
return err
|
||||
}
|
||||
done++
|
||||
if done%pct == 0 {
|
||||
log.Printf("Migration progress %d/%d (%d%%)", done, count, (100*done)/count)
|
||||
}
|
||||
}
|
||||
|
||||
// Tell the driver bulk copy is finished
|
||||
_, err = stmt.Exec()
|
||||
if err != nil {
|
||||
log.Println("sql stmt exec:", err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = stmt.Close()
|
||||
if err != nil {
|
||||
log.Println("sql stmt close:", err)
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = tx.Exec("DROP TABLE Reports")
|
||||
if err != nil {
|
||||
log.Println("sql drop:", err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = tx.Commit()
|
||||
if err != nil {
|
||||
log.Println("sql commit:", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type IntMap map[string]int
|
||||
|
||||
func (p IntMap) Value() (driver.Value, error) {
|
||||
return json.Marshal(p)
|
||||
}
|
||||
|
||||
func (p *IntMap) Scan(src interface{}) error {
|
||||
source, ok := src.([]byte)
|
||||
if !ok {
|
||||
return errors.New("Type assertion .([]byte) failed.")
|
||||
}
|
||||
|
||||
var i map[string]int
|
||||
err := json.Unmarshal(source, &i)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*p = i
|
||||
return nil
|
||||
}
|
||||
@@ -50,7 +50,6 @@ found in the LICENSE file.
|
||||
|
||||
<script type="text/javascript">
|
||||
google.setOnLoadCallback(drawVersionChart);
|
||||
google.setOnLoadCallback(drawMovementChart);
|
||||
google.setOnLoadCallback(drawBlockStatsChart);
|
||||
google.setOnLoadCallback(drawPerformanceCharts);
|
||||
|
||||
@@ -82,37 +81,6 @@ found in the LICENSE file.
|
||||
chart.draw(data, options);
|
||||
}
|
||||
|
||||
function drawMovementChart() {
|
||||
var jsonData = $.ajax({url: "movement.json", dataType:"json", async: false}).responseText;
|
||||
var rows = JSON.parse(jsonData);
|
||||
|
||||
var data = new google.visualization.DataTable();
|
||||
data.addColumn('date', 'Day');
|
||||
for (var i = 1; i < rows[0].length; i++){
|
||||
data.addColumn('number', rows[0][i]);
|
||||
}
|
||||
|
||||
for (var i = 1; i < rows.length; i++){
|
||||
rows[i][0] = new Date(rows[i][0]);
|
||||
if (rows[i][1] > 500) {
|
||||
rows[i][1] = null;
|
||||
}
|
||||
if (rows[i][2] < -500) {
|
||||
rows[i][2] = null;
|
||||
}
|
||||
data.addRow(rows[i]);
|
||||
};
|
||||
|
||||
var options = {
|
||||
legend: { position: 'bottom', alignment: 'center' },
|
||||
colors: ['rgb(102,194,165)','rgb(252,141,98)','rgb(141,160,203)','rgb(231,138,195)','rgb(166,216,84)','rgb(255,217,47)'],
|
||||
chartArea: {left: 80, top: 20, width: '1020', height: '300'},
|
||||
};
|
||||
|
||||
var chart = new google.visualization.AreaChart(document.getElementById('movementChart'));
|
||||
chart.draw(data, options);
|
||||
}
|
||||
|
||||
function formatGibibytes(gibibytes, decimals) {
|
||||
if(gibibytes == 0) return '0 GiB';
|
||||
var k = 1024,
|
||||
@@ -273,14 +241,6 @@ found in the LICENSE file.
|
||||
</p>
|
||||
<div class="img-thumbnail" id="versionChart" style="width: 1130px; height: 400px; padding: 10px;"></div>
|
||||
|
||||
<h4 id="joining-leaving">Users Joining and Leaving per Day</h4>
|
||||
<p>
|
||||
This is the total number of unique users joining and leaving per day. A user is counted as "joined" on first the day their unique ID is seen, and as "left" on the last day the unique ID was seen before a two weeks or longer absence. "Bounced" refers to users who joined and left on the same day.
|
||||
</p>
|
||||
<div class="img-thumbnail" id="movementChart" style="width: 1130px; height: 400px; padding: 10px;"></div>
|
||||
<p class="text-muted">
|
||||
Reappearance of users cause the "left" data to shrink retroactively.
|
||||
</p>
|
||||
<div id="block-stats">
|
||||
<h4>Data Transfers per Day</h4>
|
||||
<p>
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[syncthing]
|
||||
title=Syncthing
|
||||
description=Syncthing file synchronisation
|
||||
ports=22000/tcp|21027/udp
|
||||
ports=22000|21027/udp
|
||||
|
||||
[syncthing-gui]
|
||||
title=Syncthing-GUI
|
||||
|
||||
@@ -13,4 +13,4 @@ syncthing_log_file=</path/to/syncthing/log/file>
|
||||
syncthing_user=<syncthing_user>
|
||||
syncthing_group=<syncthing_group>
|
||||
```
|
||||
See the rc.d script for more informations.
|
||||
See the rc.d script for more information.
|
||||
@@ -38,13 +38,14 @@ syncthing_group=${syncthing_group:-$syncthing_user}
|
||||
|
||||
command=/usr/local/bin/syncthing
|
||||
pidfile=/var/run/syncthing.pid
|
||||
syncthing_flags="${syncthing_home:+-home=${syncthing_home}} ${syncthing_log_file:+-logfile=${syncthing_log_file}}"
|
||||
syncthing_cmd=serve
|
||||
syncthing_flags="${syncthing_home:+--home=${syncthing_home}} ${syncthing_log_file:+--logfile=${syncthing_log_file}}"
|
||||
|
||||
syncthing_start() {
|
||||
echo "Starting syncthing"
|
||||
touch ${pidfile} && chown ${syncthing_user} ${pidfile}
|
||||
touch ${syncthing_log_file} && chown ${syncthing_user} ${syncthing_log_file}
|
||||
/usr/sbin/daemon -cf -p ${pidfile} -u ${syncthing_user} ${command} ${syncthing_flags}
|
||||
/usr/sbin/daemon -cf -p ${pidfile} -u ${syncthing_user} ${command} ${syncthing_cmd} ${syncthing_flags}
|
||||
}
|
||||
|
||||
syncthing_cleanup() {
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
Name=Start Syncthing
|
||||
GenericName=File synchronization
|
||||
Comment=Starts the main syncthing process in the background.
|
||||
Exec=/usr/bin/syncthing -no-browser -logfile=default
|
||||
Exec=/usr/bin/syncthing serve --no-browser --logfile=default
|
||||
Icon=syncthing
|
||||
Terminal=false
|
||||
Type=Application
|
||||
|
||||
@@ -5,5 +5,4 @@ export HOME="/home/$USERNAME"
|
||||
export SYNCTHING="$HOME/bin/syncthing"
|
||||
|
||||
exec 2>&1
|
||||
exec chpst -u "$USERNAME" "$SYNCTHING" -logflags 0
|
||||
|
||||
exec chpst -u "$USERNAME" "$SYNCTHING" serve --logflags 0
|
||||
|
||||
3
etc/linux-sysctl/30-syncthing.conf
Normal file
3
etc/linux-sysctl/30-syncthing.conf
Normal file
@@ -0,0 +1,3 @@
|
||||
# Increase maximum receive socket buffer size to 2MiB for QUIC connections
|
||||
# see https://github.com/quic-go/quic-go/wiki/UDP-Receive-Buffer-Size
|
||||
net.core.rmem_max = 2097152
|
||||
21
etc/linux-sysctl/README.md
Normal file
21
etc/linux-sysctl/README.md
Normal file
@@ -0,0 +1,21 @@
|
||||
sysctl configuration to raise UDP buffer size
|
||||
===================
|
||||
Installation
|
||||
-----------
|
||||
**Please note:** When you installed syncthing using the official deb package, you can skip the copying.
|
||||
|
||||
Copy the file `30-syncthing.conf` to `/etc/sysctl.d/` (root permissions required).
|
||||
|
||||
In a terminal run
|
||||
```
|
||||
sudo sysctl -q --system
|
||||
```
|
||||
to apply the sysctl changes.
|
||||
|
||||
|
||||
Verification
|
||||
----------
|
||||
You can verify that the new limit is active using
|
||||
```
|
||||
sysctl net.core.rmem_max
|
||||
```
|
||||
@@ -5,4 +5,4 @@ This directory contains configuration files for running Syncthing under the
|
||||
systemd user service. For further documentation take a look at the [systemd
|
||||
section][1] on https://docs.syncthing.net.
|
||||
|
||||
[1]: https://docs.syncthing.net/users/autostart.html#using-systemd
|
||||
[1]: https://docs.syncthing.net/users/autostart#using-systemd
|
||||
|
||||
@@ -2,12 +2,14 @@
|
||||
Description=Syncthing - Open Source Continuous File Synchronization for %I
|
||||
Documentation=man:syncthing(1)
|
||||
After=network.target
|
||||
StartLimitIntervalSec=60
|
||||
StartLimitBurst=4
|
||||
|
||||
[Service]
|
||||
User=%i
|
||||
ExecStart=/usr/bin/syncthing -no-browser -no-restart -logflags=0
|
||||
ExecStart=/usr/bin/syncthing serve --no-browser --no-restart --logflags=0
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
RestartSec=1
|
||||
SuccessExitStatus=3 4
|
||||
RestartForceExitStatus=3 4
|
||||
|
||||
@@ -18,5 +20,9 @@ SystemCallArchitectures=native
|
||||
MemoryDenyWriteExecute=true
|
||||
NoNewPrivileges=true
|
||||
|
||||
# Elevated permissions to sync ownership (disabled by default),
|
||||
# see https://docs.syncthing.net/advanced/folder-sync-ownership
|
||||
#AmbientCapabilities=CAP_CHOWN CAP_FOWNER
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user