mirror of
https://github.com/syncthing/syncthing.git
synced 2025-12-24 06:28:10 -05:00
Compare commits
560 Commits
v1.27.8
...
infrastruc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cde867cf74 | ||
|
|
70292b4902 | ||
|
|
553c02f244 | ||
|
|
ce884e5d72 | ||
|
|
5f702c1406 | ||
|
|
a6bcd02739 | ||
|
|
488c33aef5 | ||
|
|
9241a475e9 | ||
|
|
01eef47bbc | ||
|
|
c518d99c35 | ||
|
|
81c99e07db | ||
|
|
5279330c1d | ||
|
|
194b59b3ed | ||
|
|
8e796ddb94 | ||
|
|
7c9d06b4d2 | ||
|
|
df8d8c276e | ||
|
|
98cf5872e9 | ||
|
|
c883f49a24 | ||
|
|
d84280107c | ||
|
|
d97fd638bc | ||
|
|
a1069a0d70 | ||
|
|
465804161b | ||
|
|
d08f483811 | ||
|
|
655b4568c1 | ||
|
|
c6a887865f | ||
|
|
b4565c87ee | ||
|
|
20d2406a0e | ||
|
|
d3d3fc2d0e | ||
|
|
f8c44923c7 | ||
|
|
6f0acacbd2 | ||
|
|
932b4ce9bd | ||
|
|
b3e3ca7294 | ||
|
|
41b4c5cd5e | ||
|
|
eb4eb7524d | ||
|
|
a64c5396e9 | ||
|
|
5595113074 | ||
|
|
ea19ec64bf | ||
|
|
9de6c5ed69 | ||
|
|
d037681fd1 | ||
|
|
1b0eaa093a | ||
|
|
3382ccc3f1 | ||
|
|
9ee208b441 | ||
|
|
dd90e8ec7a | ||
|
|
aa6ae0f3b0 | ||
|
|
e8b256793a | ||
|
|
8233279a65 | ||
|
|
8e5d5802cc | ||
|
|
25ae01b0d7 | ||
|
|
66583927f8 | ||
|
|
f0328abeaa | ||
|
|
4b8d07d91c | ||
|
|
c33daca3b4 | ||
|
|
a533f453f8 | ||
|
|
3c9e87d994 | ||
|
|
f0180cb014 | ||
|
|
a99a730c0c | ||
|
|
36254473a3 | ||
|
|
800596139e | ||
|
|
f48782e4df | ||
|
|
922cc7544e | ||
|
|
9e262d84de | ||
|
|
42db6280e6 | ||
|
|
8d8adae310 | ||
|
|
12ba4b6aea | ||
|
|
372e3c26b0 | ||
|
|
01e2426a56 | ||
|
|
6e9ccf7211 | ||
|
|
4986fc1676 | ||
|
|
5ff050e665 | ||
|
|
fc40dc8af2 | ||
|
|
541678ad9e | ||
|
|
fafc3ba45e | ||
|
|
da7a75a823 | ||
|
|
e41d6b9c1e | ||
|
|
21ad99c80a | ||
|
|
4ad3f07691 | ||
|
|
4459438245 | ||
|
|
2306c6d989 | ||
|
|
0de55ef262 | ||
|
|
d083682418 | ||
|
|
c918299eab | ||
|
|
b59443f136 | ||
|
|
7189a3ebff | ||
|
|
6ed4cca691 | ||
|
|
958f51ace6 | ||
|
|
07f1320e00 | ||
|
|
3da449cfa3 | ||
|
|
655ef63c74 | ||
|
|
01257e838b | ||
|
|
e54f51c9c5 | ||
|
|
a259a009c8 | ||
|
|
8151bcddff | ||
|
|
d776657b52 | ||
|
|
0416103f26 | ||
|
|
7bfcdfb577 | ||
|
|
e6a9b09527 | ||
|
|
c8f52ba1bc | ||
|
|
3058aa6315 | ||
|
|
60160db23a | ||
|
|
66b28e9aed | ||
|
|
755daaa7b7 | ||
|
|
33b5c3c62e | ||
|
|
ffb30392e8 | ||
|
|
7a76685d7e | ||
|
|
370bbb8f26 | ||
|
|
9ea6c9c3c3 | ||
|
|
8f117a4417 | ||
|
|
bbf48ae334 | ||
|
|
fcf4916086 | ||
|
|
5d8033343f | ||
|
|
c74d2a9872 | ||
|
|
3da84804b6 | ||
|
|
5b75c6ddcb | ||
|
|
ae03854575 | ||
|
|
ad196173d0 | ||
|
|
d682220305 | ||
|
|
29e10e00d2 | ||
|
|
34f61ce464 | ||
|
|
adcbd31e62 | ||
|
|
431da839cf | ||
|
|
836045ee87 | ||
|
|
49462448d0 | ||
|
|
e3424ad503 | ||
|
|
5703423c00 | ||
|
|
356ec26c87 | ||
|
|
d37cb02e40 | ||
|
|
953944e54e | ||
|
|
6e26fab3a0 | ||
|
|
532e30eb6b | ||
|
|
54bb987fae | ||
|
|
74367d2f66 | ||
|
|
0f6750c8f5 | ||
|
|
c8c38f735f | ||
|
|
fa4bd5c057 | ||
|
|
36fb5425a5 | ||
|
|
32a913c0ff | ||
|
|
e8cfc8acfb | ||
|
|
7c07610ab2 | ||
|
|
ff88430efb | ||
|
|
06dd8ee6d7 | ||
|
|
c0aa7b436c | ||
|
|
b80fa9dcd2 | ||
|
|
95187bcc64 | ||
|
|
f2a5b62733 | ||
|
|
385ca6772c | ||
|
|
88c307b65b | ||
|
|
9d425b0588 | ||
|
|
cf84a260ca | ||
|
|
c4e024c7e3 | ||
|
|
c5a29b5b26 | ||
|
|
4c64843d60 | ||
|
|
b4ff96d754 | ||
|
|
21c5ac2161 | ||
|
|
6fc0b41f97 | ||
|
|
0b0b2143ed | ||
|
|
af64140c61 | ||
|
|
1c68062231 | ||
|
|
4d92855d76 | ||
|
|
1c6f542cb7 | ||
|
|
b28066c85d | ||
|
|
71c8a2c36f | ||
|
|
e4ab7b4ff3 | ||
|
|
8b978d4712 | ||
|
|
7b319111d3 | ||
|
|
cb7cea93a2 | ||
|
|
20257faf54 | ||
|
|
c14abebd68 | ||
|
|
b1a1a90045 | ||
|
|
8afc9855f2 | ||
|
|
4215058911 | ||
|
|
9fb1a18dbf | ||
|
|
064213ceb8 | ||
|
|
0211251b34 | ||
|
|
1903da569b | ||
|
|
b6a7beca1f | ||
|
|
7b83e7403e | ||
|
|
1915a470e9 | ||
|
|
0b100296e1 | ||
|
|
e6ed3acf5f | ||
|
|
9f95bf3573 | ||
|
|
b05ece0681 | ||
|
|
5381178c46 | ||
|
|
e7f4f8306c | ||
|
|
9922a3abd9 | ||
|
|
40ab668a73 | ||
|
|
10d20c4800 | ||
|
|
700bb75016 | ||
|
|
e25de22705 | ||
|
|
ef6d561c66 | ||
|
|
3c92999406 | ||
|
|
e61b8a1ae8 | ||
|
|
706409d2f3 | ||
|
|
e9133ef82b | ||
|
|
67ba20d777 | ||
|
|
21da0d7890 | ||
|
|
ebbe57d0ab | ||
|
|
9cf8fca03f | ||
|
|
f4abc71dcc | ||
|
|
2712f566ab | ||
|
|
8aa02da93a | ||
|
|
ac3f390afa | ||
|
|
0e560486db | ||
|
|
57d413099d | ||
|
|
8d37e8f307 | ||
|
|
1fdf07933c | ||
|
|
d49df1e44c | ||
|
|
50480f6ccb | ||
|
|
15f693d93c | ||
|
|
8e934a8c69 | ||
|
|
c50678618f | ||
|
|
8094b459e4 | ||
|
|
6765867a2e | ||
|
|
4fb8ee6a6f | ||
|
|
79bac43800 | ||
|
|
674834ccf4 | ||
|
|
3bd2bff23b | ||
|
|
40660c5fb7 | ||
|
|
d940d094a1 | ||
|
|
9d67727989 | ||
|
|
6f51700a7f | ||
|
|
43d33dbeb5 | ||
|
|
bb91f53641 | ||
|
|
598915193a | ||
|
|
905e5ec07f | ||
|
|
5945a8c5bd | ||
|
|
4075b886d0 | ||
|
|
e39dcc5c58 | ||
|
|
46d2c59b24 | ||
|
|
54f6b5c2ee | ||
|
|
cade790198 | ||
|
|
98555a9a80 | ||
|
|
48b757cac1 | ||
|
|
58c85fc9db | ||
|
|
ddd98a818a | ||
|
|
99b707c141 | ||
|
|
39d6692109 | ||
|
|
64b5a1b738 | ||
|
|
1a131a56f2 | ||
|
|
beda37f28b | ||
|
|
1b8a8032f0 | ||
|
|
3423de24ea | ||
|
|
6532715641 | ||
|
|
2532ac35cf | ||
|
|
832fa094a3 | ||
|
|
bcd30ceaec | ||
|
|
78bfe643a8 | ||
|
|
0a58747eb2 | ||
|
|
96b03fac04 | ||
|
|
085455d72e | ||
|
|
72849690c9 | ||
|
|
f09cca52f2 | ||
|
|
9a3493c2f4 | ||
|
|
fa404d5a0d | ||
|
|
73ad18fbfb | ||
|
|
a4db309b39 | ||
|
|
4bc17bc588 | ||
|
|
964c8d7d65 | ||
|
|
bacf506e90 | ||
|
|
1dd264894a | ||
|
|
8c3d2f3bc5 | ||
|
|
702ed8ecc1 | ||
|
|
70bb4459be | ||
|
|
b038650810 | ||
|
|
821d6f43ac | ||
|
|
a16bf555c0 | ||
|
|
fa7b81e1cf | ||
|
|
516f1aa0c7 | ||
|
|
6b94599467 | ||
|
|
f183d1cbec | ||
|
|
cd6ea60fa1 | ||
|
|
58bf2b5515 | ||
|
|
d28be1b711 | ||
|
|
47e3147d0b | ||
|
|
2159dfd27d | ||
|
|
0bf21d9db2 | ||
|
|
f61843ef2e | ||
|
|
23e8366f8d | ||
|
|
ed252ed6d7 | ||
|
|
93e72cc83f | ||
|
|
abe34fc1f6 | ||
|
|
be002362b3 | ||
|
|
190dff142c | ||
|
|
c667ada63a | ||
|
|
50480b89fc | ||
|
|
93ae30d889 | ||
|
|
486eebc4ac | ||
|
|
ff33d976d1 | ||
|
|
69890b4282 | ||
|
|
533c9a6ab0 | ||
|
|
9521bb3931 | ||
|
|
25e03ef9ab | ||
|
|
e46a0f99c3 | ||
|
|
ed6575411f | ||
|
|
ed97e365b2 | ||
|
|
b4776ea4e0 | ||
|
|
b5ffd0a796 | ||
|
|
c74299b59a | ||
|
|
8b6d837483 | ||
|
|
3e74b3dee2 | ||
|
|
2902da996c | ||
|
|
f6f144bf17 | ||
|
|
ab5c42f4a0 | ||
|
|
780b8fd3bc | ||
|
|
7db3f7eaac | ||
|
|
f0b666269b | ||
|
|
190a59842c | ||
|
|
40888c1a66 | ||
|
|
ddea2e449c | ||
|
|
7cfa871d58 | ||
|
|
95b39a791d | ||
|
|
fa0d933e49 | ||
|
|
e0c1abc5fe | ||
|
|
8372c0288f | ||
|
|
5f5d672a7d | ||
|
|
d23cd197e1 | ||
|
|
d7ca483df1 | ||
|
|
e48be98cd5 | ||
|
|
cbded11c43 | ||
|
|
d5aa991b73 | ||
|
|
05210d0325 | ||
|
|
55da878452 | ||
|
|
e9a2ff3aa6 | ||
|
|
c9650fc7d5 | ||
|
|
cf1cf85ce6 | ||
|
|
2301f72c5b | ||
|
|
7d51b1b620 | ||
|
|
f7c8efd93c | ||
|
|
3e7ccf7c48 | ||
|
|
fa3b9acca3 | ||
|
|
bae976905c | ||
|
|
6bc2784e9a | ||
|
|
1dbdd6b720 | ||
|
|
f15d50c2e8 | ||
|
|
8a2d8ebf81 | ||
|
|
b88aea34b6 | ||
|
|
82a0dd8eaa | ||
|
|
4096a35b86 | ||
|
|
86cbc2486f | ||
|
|
0bcc31d058 | ||
|
|
2c3a890d2f | ||
|
|
f9007ed106 | ||
|
|
2953630bc3 | ||
|
|
a99e670ebb | ||
|
|
05cc6b0f43 | ||
|
|
1efcfeb3ad | ||
|
|
93195911bd | ||
|
|
6085e3a5eb | ||
|
|
e5b72da607 | ||
|
|
0d6117d585 | ||
|
|
f1e136a17b | ||
|
|
025905fcdf | ||
|
|
b1c8f88a44 | ||
|
|
1a25ae32ca | ||
|
|
629971687d | ||
|
|
3c955a9706 | ||
|
|
7f3c8dbff1 | ||
|
|
7762e39fb3 | ||
|
|
4235b2c406 | ||
|
|
3fd090bfa7 | ||
|
|
6dfa54efa6 | ||
|
|
67575e1736 | ||
|
|
65923fc255 | ||
|
|
aea763868f | ||
|
|
26b134ae7b | ||
|
|
893071d2ba | ||
|
|
435f2d2178 | ||
|
|
8461ca539b | ||
|
|
fb977dc61d | ||
|
|
ee7ab4ce25 | ||
|
|
c3ce9713d9 | ||
|
|
6a147091c5 | ||
|
|
6208c36417 | ||
|
|
453fd20eeb | ||
|
|
28f0cffdb6 | ||
|
|
87c16c6cf5 | ||
|
|
5495c98e63 | ||
|
|
da7d5ce608 | ||
|
|
b300c297c6 | ||
|
|
124673f7a8 | ||
|
|
0395cf2bc0 | ||
|
|
36cd70040a | ||
|
|
1fbd396ffa | ||
|
|
2834bad85e | ||
|
|
516f3e29e8 | ||
|
|
ab20c16982 | ||
|
|
0c1df81ee9 | ||
|
|
d324b2ac86 | ||
|
|
0231089b99 | ||
|
|
74ffb85467 | ||
|
|
dcd280e6e2 | ||
|
|
4e56dbd883 | ||
|
|
13d7881b80 | ||
|
|
1d2c53bf3c | ||
|
|
9c449c966b | ||
|
|
ec2d4638e3 | ||
|
|
0ce92befc8 | ||
|
|
2167ce9656 | ||
|
|
0dc85d74aa | ||
|
|
b6e3f8037b | ||
|
|
00e7161a8f | ||
|
|
b5a7879eca | ||
|
|
4355dc69ea | ||
|
|
371ba69447 | ||
|
|
79ef57d0ea | ||
|
|
8bd6bdd397 | ||
|
|
ce3248cea7 | ||
|
|
99a6f3a5b6 | ||
|
|
c2e10dc156 | ||
|
|
fc914f3237 | ||
|
|
811d3752d0 | ||
|
|
00827dd5c1 | ||
|
|
a981c21d27 | ||
|
|
163dc122f3 | ||
|
|
83727e0824 | ||
|
|
529d3ef764 | ||
|
|
fefbf4dcc8 | ||
|
|
b9c6d3ae09 | ||
|
|
7bea8c758a | ||
|
|
479c0d3f16 | ||
|
|
d9ce7c3166 | ||
|
|
69979996d9 | ||
|
|
da58e5c50c | ||
|
|
44e259142f | ||
|
|
77970d5113 | ||
|
|
2b8ee4c7a5 | ||
|
|
be952e5f2d | ||
|
|
43ebac4242 | ||
|
|
f08a0ed01c | ||
|
|
612fdff377 | ||
|
|
8ccb7f1924 | ||
|
|
65d0ca8aa9 | ||
|
|
e82ed6e3d3 | ||
|
|
4b815fc086 | ||
|
|
7eaf843de2 | ||
|
|
110e1ae6f9 | ||
|
|
896f9725ec | ||
|
|
1a529e9d5d | ||
|
|
36ef17df8f | ||
|
|
955ac7775e | ||
|
|
8f69e874c4 | ||
|
|
ac06fd97e9 | ||
|
|
3726b7d112 | ||
|
|
377200591e | ||
|
|
4afc898c2f | ||
|
|
ff7e4fef55 | ||
|
|
9ffddb1923 | ||
|
|
896b857fc4 | ||
|
|
acc5d2675b | ||
|
|
6ece4c1fd2 | ||
|
|
cc09f0170d | ||
|
|
bb234d6c0e | ||
|
|
e6acc64758 | ||
|
|
f18cf545b9 | ||
|
|
6d64daaba3 | ||
|
|
47f48faed7 | ||
|
|
cfa834177b | ||
|
|
c454fc8baa | ||
|
|
dbe7fa9155 | ||
|
|
4d842f7d3b | ||
|
|
0e68221c91 | ||
|
|
19f63c7ea3 | ||
|
|
fb939ec496 | ||
|
|
39df3173d4 | ||
|
|
429672e0b4 | ||
|
|
605fd6d726 | ||
|
|
3c476542d2 | ||
|
|
31874f3ebb | ||
|
|
77942747db | ||
|
|
fe01b396ba | ||
|
|
3583949706 | ||
|
|
23fc22ebc5 | ||
|
|
cba163a1fd | ||
|
|
a8e2c8edb6 | ||
|
|
3e501d9036 | ||
|
|
9ca101756d | ||
|
|
a873d12c65 | ||
|
|
8ff670c564 | ||
|
|
b1ed2802fb | ||
|
|
b70cb580c8 | ||
|
|
28be3ba788 | ||
|
|
d4770ddc77 | ||
|
|
cbe1220680 | ||
|
|
0b95c5fa76 | ||
|
|
0343bca257 | ||
|
|
878016db39 | ||
|
|
1f4fde9525 | ||
|
|
5b9d8a838f | ||
|
|
8b19cb1e11 | ||
|
|
ce1e259bb4 | ||
|
|
2238a288d9 | ||
|
|
c8ee2a5cf6 | ||
|
|
1704827d04 | ||
|
|
a156e88eef | ||
|
|
94d0195b63 | ||
|
|
1616edcee3 | ||
|
|
6505e123bb | ||
|
|
63e4659282 | ||
|
|
f3f5557c8e | ||
|
|
b794726e1f | ||
|
|
3d59740a0a | ||
|
|
66fb65b01f | ||
|
|
5c2fcbfd19 | ||
|
|
f9b72330a8 | ||
|
|
822b6ac36b | ||
|
|
77f7778292 | ||
|
|
aed2c66e52 | ||
|
|
68a1fd010f | ||
|
|
ac8b3342ac | ||
|
|
0ea90dd932 | ||
|
|
718b1ce2b7 | ||
|
|
29f7510f5a | ||
|
|
a7f9ed4a80 | ||
|
|
1baefea410 | ||
|
|
563cec8923 | ||
|
|
a3c340ece9 | ||
|
|
cb24638ec9 | ||
|
|
2fb24dc2cc | ||
|
|
9aa2d2c92f | ||
|
|
d1c5100c98 | ||
|
|
42e677c055 | ||
|
|
27bba2c0c2 | ||
|
|
feff334547 | ||
|
|
713cf357ce | ||
|
|
5342bec1b7 | ||
|
|
7df75e681d | ||
|
|
8dc826b234 | ||
|
|
9ef37e1485 | ||
|
|
7517d18fbb | ||
|
|
42d0fee536 | ||
|
|
2ca9d3b5c5 | ||
|
|
9cde068f2a | ||
|
|
1243083831 | ||
|
|
356c5055ad | ||
|
|
19693734a3 | ||
|
|
17e60b9e0c | ||
|
|
ac22b2d00a | ||
|
|
de0b4270df | ||
|
|
e738af7c56 | ||
|
|
a28441a9bf | ||
|
|
2e313716e5 | ||
|
|
0b5ff1f5f7 | ||
|
|
0fe6d97d3d | ||
|
|
0756e42a85 | ||
|
|
13ebe1c87f | ||
|
|
aea7fa5f22 | ||
|
|
403ce7e597 | ||
|
|
4704d3bc48 | ||
|
|
2794b04243 | ||
|
|
1ce64971fd | ||
|
|
eb6d80eac4 | ||
|
|
a8db3351ae | ||
|
|
23a900e096 | ||
|
|
5a304cf295 | ||
|
|
136b3742bf | ||
|
|
21e0f98fe2 | ||
|
|
2bb5b2244b | ||
|
|
2f281799c1 | ||
|
|
18a58a2ddc | ||
|
|
f283215fce | ||
|
|
495809ac9e |
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
@@ -1,2 +0,0 @@
|
||||
/AUTHORS @calmh
|
||||
/*.md @calmh
|
||||
1
.github/ISSUE_TEMPLATE/01-feature.yml
vendored
1
.github/ISSUE_TEMPLATE/01-feature.yml
vendored
@@ -1,6 +1,7 @@
|
||||
name: Feature request
|
||||
description: File a new feature request
|
||||
labels: ["enhancement", "needs-triage"]
|
||||
type: Feature
|
||||
body:
|
||||
|
||||
- type: textarea
|
||||
|
||||
1
.github/ISSUE_TEMPLATE/02-bug.yml
vendored
1
.github/ISSUE_TEMPLATE/02-bug.yml
vendored
@@ -1,6 +1,7 @@
|
||||
name: Bug report
|
||||
description: If you're actually looking for support instead, see "I need help / I have a question".
|
||||
labels: ["bug", "needs-triage"]
|
||||
type: Bug
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
|
||||
4
.github/dependabot.yml
vendored
4
.github/dependabot.yml
vendored
@@ -3,11 +3,11 @@ updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
interval: monthly
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
interval: monthly
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
23
.github/labeler.yml
vendored
Normal file
23
.github/labeler.yml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
version: 1
|
||||
labels:
|
||||
|
||||
- label: enhancement
|
||||
title: ^feat\b
|
||||
|
||||
- label: bug
|
||||
title: ^fix\b
|
||||
|
||||
- label: documentation
|
||||
title: ^docs\b
|
||||
|
||||
- label: chore
|
||||
title: ^chore\b
|
||||
|
||||
- label: chore
|
||||
title: ^refactor\b
|
||||
|
||||
- label: build
|
||||
title: ^build\b
|
||||
|
||||
- label: dependencies
|
||||
title: ^build\(deps\)\b
|
||||
52
.github/regsync.yml
vendored
Normal file
52
.github/regsync.yml
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
version: 1
|
||||
creds:
|
||||
- registry: docker.io
|
||||
user: "{{env \"DOCKERHUB_USERNAME\"}}"
|
||||
pass: "{{env \"DOCKERHUB_TOKEN\"}}"
|
||||
|
||||
defaults:
|
||||
ratelimit:
|
||||
min: 100
|
||||
retry: 1m
|
||||
parallel: 4
|
||||
|
||||
sync:
|
||||
|
||||
- source: ghcr.io/syncthing/syncthing
|
||||
target: docker.io/syncthing/syncthing
|
||||
type: repository
|
||||
tags:
|
||||
allow:
|
||||
- latest
|
||||
- rc
|
||||
- edge
|
||||
- \d+
|
||||
- \d+\.\d+
|
||||
- \d+\.\d+\.\d+
|
||||
- \d+\.\d+\.\d+-rc\.\d+
|
||||
|
||||
- source: ghcr.io/syncthing/relaysrv
|
||||
target: docker.io/syncthing/relaysrv
|
||||
type: repository
|
||||
tags:
|
||||
allow:
|
||||
- latest
|
||||
- rc
|
||||
- edge
|
||||
- \d+
|
||||
- \d+\.\d+
|
||||
- \d+\.\d+\.\d+
|
||||
- \d+\.\d+\.\d+-rc\.\d+
|
||||
|
||||
- source: ghcr.io/syncthing/discosrv
|
||||
target: docker.io/syncthing/discosrv
|
||||
type: repository
|
||||
tags:
|
||||
allow:
|
||||
- latest
|
||||
- rc
|
||||
- edge
|
||||
- \d+
|
||||
- \d+\.\d+
|
||||
- \d+\.\d+\.\d+
|
||||
- \d+\.\d+\.\d+-rc\.\d+
|
||||
17
.github/release.yml
vendored
Normal file
17
.github/release.yml
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
changelog:
|
||||
exclude:
|
||||
labels:
|
||||
- dependencies
|
||||
|
||||
categories:
|
||||
- title: Fixes
|
||||
labels:
|
||||
- bug
|
||||
|
||||
- title: Features
|
||||
labels:
|
||||
- enhancement
|
||||
|
||||
- title: Other
|
||||
labels:
|
||||
- '*'
|
||||
21
.github/workflows/build-infra-dockers.yaml
vendored
21
.github/workflows/build-infra-dockers.yaml
vendored
@@ -7,11 +7,15 @@ on:
|
||||
- infra-*
|
||||
|
||||
env:
|
||||
GO_VERSION: "~1.22.3"
|
||||
GO_VERSION: "~1.25.0"
|
||||
CGO_ENABLED: "0"
|
||||
BUILD_USER: docker
|
||||
BUILD_HOST: github.syncthing.net
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
jobs:
|
||||
docker-syncthing:
|
||||
name: Build and push Docker images
|
||||
@@ -26,11 +30,11 @@ jobs:
|
||||
- stupgrades
|
||||
- ursrv
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
- uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
check-latest: true
|
||||
@@ -41,6 +45,13 @@ jobs:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build binaries
|
||||
run: |
|
||||
for arch in arm64 amd64; do
|
||||
@@ -53,13 +64,13 @@ jobs:
|
||||
|
||||
- name: Set Docker tags (all branches)
|
||||
run: |
|
||||
tags=syncthing/${{ matrix.pkg }}:${{ github.sha }}
|
||||
tags=docker.io/syncthing/${{ matrix.pkg }}:${{ github.sha }},ghcr.io/syncthing/infra/${{ matrix.pkg }}:${{ github.sha }}
|
||||
echo "TAGS=$tags" >> $GITHUB_ENV
|
||||
|
||||
- name: Set Docker tags (latest)
|
||||
if: github.ref == 'refs/heads/infrastructure'
|
||||
run: |
|
||||
tags=syncthing/${{ matrix.pkg }}:latest,${{ env.TAGS }}
|
||||
tags=docker.io/syncthing/${{ matrix.pkg }}:latest,ghcr.io/syncthing/infra/${{ matrix.pkg }}:latest,${{ env.TAGS }}
|
||||
echo "TAGS=$tags" >> $GITHUB_ENV
|
||||
|
||||
- name: Build and push
|
||||
|
||||
18
.github/workflows/build-nightly.yaml
vendored
Normal file
18
.github/workflows/build-nightly.yaml
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
name: Build Syncthing (Nightly)
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Run nightly build at 05:00 UTC
|
||||
- cron: '00 05 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
packages: write
|
||||
|
||||
jobs:
|
||||
build-syncthing:
|
||||
uses: ./.github/workflows/build-syncthing.yaml
|
||||
# if we only want nightlies to run for specific users:
|
||||
# if: contains(fromJSON('["syncthing", "calmh"]'), github.repository_owner)
|
||||
secrets: inherit
|
||||
811
.github/workflows/build-syncthing.yaml
vendored
811
.github/workflows/build-syncthing.yaml
vendored
File diff suppressed because it is too large
Load Diff
18
.github/workflows/mirrors.yaml
vendored
Normal file
18
.github/workflows/mirrors.yaml
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
name: Mirrors
|
||||
|
||||
on: [push, delete]
|
||||
|
||||
jobs:
|
||||
codeberg:
|
||||
name: Mirror to Codeberg
|
||||
if: github.repository_owner == 'syncthing'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: yesolutions/mirror-action@master
|
||||
with:
|
||||
REMOTE: ssh://git@codeberg.org/${{ github.repository }}.git
|
||||
GIT_SSH_PRIVATE_KEY: ${{ secrets.CODEBERG_PUSH_KEY }}
|
||||
GIT_SSH_NO_VERIFY_HOST: "true"
|
||||
20
.github/workflows/org-members.yaml
vendored
Normal file
20
.github/workflows/org-members.yaml
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
name: Org membership recommendations
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '0 0 1 * *'
|
||||
|
||||
jobs:
|
||||
|
||||
run-recommendation:
|
||||
runs-on: ubuntu-latest
|
||||
name: Check for a recommendation
|
||||
steps:
|
||||
|
||||
- uses: docker://ghcr.io/calmh/github-org-members:latest
|
||||
env:
|
||||
GITHUB_ORGANISATION: syncthing
|
||||
GITHUB_TOKEN: ${{ secrets.GOM_GITHUB_TOKEN }}
|
||||
GOM_IGNORE_USERS: ${{ secrets.GOM_IGNORE_USERS }}
|
||||
GOM_ALSO_REPOS: ${{ secrets.GOM_ALSO_REPOS }}
|
||||
27
.github/workflows/pr-metadata.yaml
vendored
Normal file
27
.github/workflows/pr-metadata.yaml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
name: PR metadata
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
- edited
|
||||
- synchronize
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
|
||||
#
|
||||
# Set labels on PRs, which are then used to categorise release notes
|
||||
#
|
||||
|
||||
labels:
|
||||
name: Set labels
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: srvaroa/labeler@v1
|
||||
env:
|
||||
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
60
.github/workflows/release-syncthing.yaml
vendored
Normal file
60
.github/workflows/release-syncthing.yaml
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
name: Release Syncthing
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- release
|
||||
- release-rc*
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
create-release-tag:
|
||||
name: Create release tag
|
||||
runs-on: ubuntu-latest
|
||||
environment: release
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.ref }} # https://github.com/actions/checkout/issues/882
|
||||
token: ${{ secrets.ACTIONS_GITHUB_TOKEN }}
|
||||
|
||||
- uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: stable
|
||||
|
||||
- name: Determine version to release
|
||||
run: |
|
||||
if [[ "$GITHUB_REF_NAME" == "release" ]] ; then
|
||||
next=$(go run ./script/next-version.go)
|
||||
else
|
||||
next=$(go run ./script/next-version.go --pre)
|
||||
fi
|
||||
echo "NEXT=$next" >> $GITHUB_ENV
|
||||
echo "Next version is $next"
|
||||
|
||||
prev=$(git describe --exclude "*-*" --abbrev=0)
|
||||
echo "PREV=$prev" >> $GITHUB_ENV
|
||||
echo "Previous version is $prev"
|
||||
|
||||
- name: Determine release notes
|
||||
run: |
|
||||
go run ./script/relnotes.go --new-ver "$NEXT" --branch "$GITHUB_REF_NAME" --prev-ver "$PREV" > notes.md
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.ACTIONS_GITHUB_TOKEN }}
|
||||
|
||||
- name: Create and push tag
|
||||
run: |
|
||||
git config --global user.name 'Syncthing Release Automation'
|
||||
git config --global user.email 'release@syncthing.net'
|
||||
git tag -a -F notes.md --cleanup=whitespace "$NEXT"
|
||||
git push origin "$NEXT"
|
||||
|
||||
- name: Trigger the build
|
||||
uses: benc-uk/workflow-dispatch@v1
|
||||
with:
|
||||
workflow: build-syncthing.yaml
|
||||
ref: refs/tags/${{ env.NEXT }}
|
||||
token: ${{ secrets.ACTIONS_GITHUB_TOKEN }}
|
||||
3
.github/workflows/trigger-nightly.yaml
vendored
3
.github/workflows/trigger-nightly.yaml
vendored
@@ -8,11 +8,12 @@ on:
|
||||
jobs:
|
||||
|
||||
trigger-nightly:
|
||||
if: github.repository_owner == 'syncthing'
|
||||
runs-on: ubuntu-latest
|
||||
name: Push to release-nightly to trigger build
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
token: ${{ secrets.ACTIONS_GITHUB_TOKEN }}
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -10,11 +10,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
name: Update translations and documentation
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.ACTIONS_GITHUB_TOKEN }}
|
||||
- uses: actions/setup-go@v5
|
||||
- uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: stable
|
||||
- run: |
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -17,4 +17,4 @@ deb
|
||||
*.bz2
|
||||
/repos
|
||||
/proto/scripts/protoc-gen-gosyncthing
|
||||
/gui/next-gen-gui
|
||||
/compat.json
|
||||
|
||||
110
.golangci.yml
110
.golangci.yml
@@ -1,26 +1,96 @@
|
||||
linters-settings:
|
||||
maligned:
|
||||
suggest-new: true
|
||||
|
||||
version: "2"
|
||||
linters:
|
||||
enable-all: true
|
||||
default: all
|
||||
disable:
|
||||
- goimports
|
||||
- cyclop
|
||||
- depguard
|
||||
- lll
|
||||
- gochecknoinits
|
||||
- gochecknoglobals
|
||||
- gofmt
|
||||
- scopelint
|
||||
- gocyclo
|
||||
- err113
|
||||
- exhaustive
|
||||
- exhaustruct
|
||||
- forbidigo
|
||||
- funcorder
|
||||
- funlen
|
||||
- wsl
|
||||
- gochecknoglobals
|
||||
- gochecknoinits
|
||||
- gocognit
|
||||
- goconst
|
||||
- gocyclo
|
||||
- godot
|
||||
- godox
|
||||
|
||||
service:
|
||||
golangci-lint-version: 1.21.x
|
||||
prepare:
|
||||
- rm -f go.sum # 1.12 -> 1.13 issues with QUIC-go
|
||||
- GO111MODULE=on go mod vendor
|
||||
- go run build.go assets
|
||||
- gomoddirectives
|
||||
- inamedparam
|
||||
- interfacebloat
|
||||
- ireturn
|
||||
- lll
|
||||
- maintidx
|
||||
- mnd
|
||||
- musttag
|
||||
- nestif
|
||||
- nlreturn
|
||||
- noinlineerr
|
||||
- nonamedreturns
|
||||
- paralleltest
|
||||
- prealloc
|
||||
- predeclared
|
||||
- protogetter
|
||||
- recvcheck
|
||||
- revive
|
||||
- tagalign
|
||||
- tagliatelle
|
||||
- testpackage
|
||||
- usetesting # go 1.24
|
||||
- varnamelen
|
||||
- whitespace
|
||||
- wrapcheck
|
||||
- wsl
|
||||
- wsl_v5
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
paths:
|
||||
- internal/gen
|
||||
- internal/db/olddb
|
||||
- cmd/dev
|
||||
- repos
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
- _test\.go$
|
||||
rules:
|
||||
# relax the slog rules for debug lines, for now
|
||||
- linters: [sloglint]
|
||||
source: Debug
|
||||
# contexts are irrelevant for SQLite
|
||||
- linters: [noctx]
|
||||
text: database/sql
|
||||
# Rollback errors can be ignored
|
||||
- linters: [errcheck]
|
||||
source: Rollback
|
||||
# Embedded fields named in selectors may add clarity
|
||||
- linters: [staticcheck]
|
||||
text: QF1008
|
||||
# Don't necessarily rewrite !(foo || bar) to !foo && !bar
|
||||
- linters: [staticcheck]
|
||||
text: QF1001
|
||||
settings:
|
||||
sloglint:
|
||||
context: "scope"
|
||||
static-msg: true
|
||||
msg-style: capitalized
|
||||
key-naming-case: camel
|
||||
formatters:
|
||||
enable:
|
||||
- gofumpt
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- internal/gen
|
||||
- cmd/dev
|
||||
- repos
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
|
||||
111
.policy.yml
Normal file
111
.policy.yml
Normal file
@@ -0,0 +1,111 @@
|
||||
# This is the policy-bot configuration for this repository. It controls
|
||||
# which approvals are required for any given pull request. The format is
|
||||
# described at https://github.com/palantir/policy-bot. The syntax of the
|
||||
# policy can be verified by the bot:
|
||||
# curl https://pb.syncthing.net/api/validate -X PUT -T .policy.yml
|
||||
|
||||
# The policy below is what is required for any pull request.
|
||||
policy:
|
||||
approval:
|
||||
- subject is conventional commit
|
||||
- or:
|
||||
- project metadata requires maintainer approval
|
||||
- a maintainer claims responsibility
|
||||
- or:
|
||||
- is approved by a syncthing contributor
|
||||
- is a translation or dependency update by a contributor
|
||||
- is a trivial change by a contributor
|
||||
- a maintainer claims responsibility
|
||||
|
||||
# Additionally, maintainers can disapprove of a PR
|
||||
disapproval:
|
||||
requires:
|
||||
teams:
|
||||
- syncthing/maintainers
|
||||
|
||||
# The rules for the policy are described below.
|
||||
|
||||
approval_rules:
|
||||
|
||||
# All commits (PRs before squashing) should have a valid conventional
|
||||
# commit type subject.
|
||||
- name: subject is conventional commit
|
||||
requires:
|
||||
conditions:
|
||||
title:
|
||||
matches:
|
||||
- '^(feat|fix|docs|chore|refactor|build): [a-z].+'
|
||||
- '^(feat|fix|docs|chore|refactor|build)\(\w+(, \w+)*\): [a-z].+'
|
||||
|
||||
# Changes to important project metadata and documentation, including this
|
||||
# policy, require signoff by a maintainer
|
||||
- name: project metadata requires maintainer approval
|
||||
if:
|
||||
changed_files:
|
||||
paths:
|
||||
- ^[^/]+\.md
|
||||
- ^\.policy\.yml
|
||||
- ^LICENSE
|
||||
requires:
|
||||
count: 1
|
||||
teams:
|
||||
- syncthing/maintainers
|
||||
options:
|
||||
ignore_update_merges: true
|
||||
allow_non_author_contributor: true
|
||||
|
||||
# Regular pull requests require approval by an active contributor
|
||||
- name: is approved by a syncthing contributor
|
||||
requires:
|
||||
count: 1
|
||||
teams:
|
||||
- syncthing/contributors
|
||||
options:
|
||||
ignore_update_merges: true
|
||||
allow_non_author_contributor: true
|
||||
|
||||
# Changes to some files (translations, dependencies, compatibility) do not
|
||||
# require approval if they were proposed by a contributor and have a
|
||||
# matching commit subject
|
||||
- name: is a translation or dependency update by a contributor
|
||||
if:
|
||||
only_changed_files:
|
||||
paths:
|
||||
- ^gui/default/assets/lang/
|
||||
- ^go\.mod$
|
||||
- ^go\.sum$
|
||||
- ^compat\.yaml$
|
||||
title:
|
||||
matches:
|
||||
- '^chore\(gui\):'
|
||||
- '^build\(deps\):'
|
||||
- '^build\(compat\):'
|
||||
has_author_in:
|
||||
teams:
|
||||
- syncthing/contributors
|
||||
|
||||
# If the change is small and the label "trivial" is added, we accept that
|
||||
# on trust. These PRs can be audited after the fact as appropriate.
|
||||
# Features are not trivial.
|
||||
- name: is a trivial change by a contributor
|
||||
if:
|
||||
modified_lines:
|
||||
total: "< 25"
|
||||
title:
|
||||
not_matches:
|
||||
- '^feat'
|
||||
has_labels:
|
||||
- trivial
|
||||
has_author_in:
|
||||
teams:
|
||||
- syncthing/contributors
|
||||
|
||||
# A member of the maintainers group can take responsibility by adding the
|
||||
# appropriate label.
|
||||
- name: a maintainer claims responsibility
|
||||
if:
|
||||
has_labels:
|
||||
- maintainer-responsibility
|
||||
has_author_in:
|
||||
teams:
|
||||
- syncthing/maintainers
|
||||
124
AUTHORS
124
AUTHORS
@@ -13,117 +13,115 @@
|
||||
# contents of this file.
|
||||
#
|
||||
|
||||
Aaron Bieber (qbit) <qbit@deftly.net>
|
||||
Jakob Borg (calmh) <jakob@nym.se> <jakob@kastelo.net> <jborg@coreweave.com>
|
||||
Audrius Butkevicius (AudriusButkevicius) <audrius.butkevicius@gmail.com> <github@audrius.rocks>
|
||||
Simon Frei (imsodin) <freisim93@gmail.com>
|
||||
Tomasz Wilczyński <5626656+tomasz1986@users.noreply.github.com> <twilczynski@naver.com>
|
||||
Alexander Graf (alex2108) <register-github@alex-graf.de>
|
||||
Alexandre Viau (aviau) <alexandre@alexandreviau.net> <aviau@debian.org>
|
||||
Anderson Mesquita (andersonvom) <andersonvom@gmail.com>
|
||||
André Colomb (acolomb) <src@andre.colomb.de> <github.com@andre.colomb.de>
|
||||
Antony Male (canton7) <antony.male@gmail.com>
|
||||
Ben Schulz (uok) <ueomkail@gmail.com> <uok@users.noreply.github.com>
|
||||
bt90 <btom1990@googlemail.com>
|
||||
Caleb Callaway (cqcallaw) <enlightened.despot@gmail.com>
|
||||
Daniel Harte (norgeous) <daniel@harte.me> <daniel@danielharte.co.uk> <norgeous@users.noreply.github.com>
|
||||
Emil Lundberg <emil@emlun.se>
|
||||
Eric P <eric@kastelo.net>
|
||||
Evgeny Kuznetsov <evgeny@kuznetsov.md>
|
||||
greatroar <61184462+greatroar@users.noreply.github.com>
|
||||
Lars K.W. Gohlke (lkwg82) <lkwg82@gmx.de>
|
||||
Lode Hoste (Zillode) <zillode@zillode.be>
|
||||
Marcus B Spencer <marcus@marcusspencer.xyz> <marcus@marcusspencer.us>
|
||||
Michael Ploujnikov (plouj) <ploujj@gmail.com>
|
||||
Ross Smith II (rasa) <ross@smithii.com>
|
||||
Stefan Tatschner (rumpelsepp) <stefan@sevenbyte.org> <rumpelsepp@sevenbyte.org> <stefan@rumpelsepp.org>
|
||||
Tommy van der Vorst <tommy-github@pixelspark.nl> <tommy@pixelspark.nl>
|
||||
Wulf Weich (wweich) <wweich@users.noreply.github.com> <wweich@gmx.de> <wulf@weich-kr.de>
|
||||
Adam Piggott (ProactiveServices) <aD@simplypeachy.co.uk> <simplypeachy@users.noreply.github.com> <ProactiveServices@users.noreply.github.com> <adam@proactiveservices.co.uk>
|
||||
Adel Qalieh (adelq) <aqalieh95@gmail.com> <adelq@users.noreply.github.com>
|
||||
Alan Pope <alan@popey.com>
|
||||
Alberto Donato <albertodonato@users.noreply.github.com>
|
||||
Aleksey Vasenev <margtu-fivt@ya.ru>
|
||||
Alessandro G. (alessandro.g89) <alessandro.g89@gmail.com>
|
||||
Alex Ionescu <github@ionescu.sh>
|
||||
Alex Lindeman <139387+aelindeman@users.noreply.github.com>
|
||||
Alex Xu <alex.hello71@gmail.com>
|
||||
Alexander Graf (alex2108) <register-github@alex-graf.de>
|
||||
Alexander Seiler <seileralex@gmail.com>
|
||||
Alexandre Alves <alexandrealvesdb.contact@gmail.com>
|
||||
Alexandre Viau (aviau) <alexandre@alexandreviau.net> <aviau@debian.org>
|
||||
Aman Gupta <aman@tmm1.net>
|
||||
Anatoli Babenia <anatoli@rainforce.org>
|
||||
Anderson Mesquita (andersonvom) <andersonvom@gmail.com>
|
||||
Andreas Sommer <andreas.sommer87@googlemail.com>
|
||||
andresvia <andres.via@gmail.com>
|
||||
Andrew Dunham (andrew-d) <andrew@du.nham.ca>
|
||||
Andrew Meyer <andrewm.bpi@gmail.com>
|
||||
Andrew Rabert (nvllsvm) <ar@nullsum.net> <6550543+nvllsvm@users.noreply.github.com>
|
||||
Andrey D (scienmind) <scintertech@cryptolab.net> <scienmind@users.noreply.github.com>
|
||||
André Colomb (acolomb) <src@andre.colomb.de> <github.com@andre.colomb.de>
|
||||
andyleap <andyleap@gmail.com>
|
||||
Anjan Momi <anjan@momi.ca>
|
||||
Anthony Goeckner <agoeckner@users.noreply.github.com>
|
||||
Antoine Lamielle (0x010C) <antoine.lamielle@0x010c.fr> <gh@0x010c.fr>
|
||||
Antony Male (canton7) <antony.male@gmail.com>
|
||||
Anur <anurnomeru@163.com>
|
||||
Aranjedeath <Aranjedeath@users.noreply.github.com>
|
||||
ardevd <ardevd@users.noreply.github.com>
|
||||
Arkadiusz Tymiński <gevleeog@gmail.com>
|
||||
Aroun <login@b-vo.fr>
|
||||
Arthur Axel fREW Schmidt (frioux) <frew@afoolishmanifesto.com> <frioux@gmail.com>
|
||||
Artur Zubilewicz <AkaZecik@users.noreply.github.com>
|
||||
Audrius Butkevicius (AudriusButkevicius) <audrius.butkevicius@gmail.com> <github@audrius.rocks>
|
||||
Ashish Bhate <bhate.ashish@gmail.com>
|
||||
Aurélien Rainone <476650+arl@users.noreply.github.com>
|
||||
BAHADIR YILMAZ <bahadiryilmaz32@gmail.com>
|
||||
Bart De Vries (mogwa1) <devriesb@gmail.com>
|
||||
Beat Reichenbach <44111292+beatreichenbach@users.noreply.github.com>
|
||||
Ben Curthoys (bencurthoys) <ben@bencurthoys.com>
|
||||
Ben Schulz (uok) <ueomkail@gmail.com> <uok@users.noreply.github.com>
|
||||
Ben Shepherd (benshep) <bjashepherd@gmail.com>
|
||||
Ben Sidhom (bsidhom) <bsidhom@gmail.com>
|
||||
Benedikt Heine (bebehei) <bebe@bebehei.de>
|
||||
Benedikt Morbach <benedikt.morbach@googlemail.com>
|
||||
Benjamin Nater <17193640+bn4t@users.noreply.github.com>
|
||||
Benno Fünfstück <benno.fuenfstueck@gmail.com>
|
||||
Benny Ng (tpng) <benny.tpng@gmail.com>
|
||||
boomsquared <54829195+boomsquared@users.noreply.github.com>
|
||||
Boqin Qin <bobbqqin@bupt.edu.cn>
|
||||
Boris Rybalkin <ribalkin@gmail.com>
|
||||
Brandon Philips (philips) <brandon@ifup.org>
|
||||
Brendan Long (brendanlong) <self@brendanlong.com>
|
||||
Brian R. Becker (brbecker) <brbecker@gmail.com>
|
||||
bt90 <btom1990@googlemail.com>
|
||||
Caleb Callaway (cqcallaw) <enlightened.despot@gmail.com>
|
||||
Carsten Hagemann (carstenhag) <moter8@gmail.com> <carsten@chagemann.de>
|
||||
Catfriend1 <16361913+Catfriend1@users.noreply.github.com>
|
||||
Cathryne Linenweaver (Cathryne) <cathryne.linenweaver@gmail.com> <Cathryne@users.noreply.github.com> <katrinleinweber@MAC.local>
|
||||
Cedric Staniewski (xduugu) <cedric@gmx.ca>
|
||||
chenrui <rui@meetup.com>
|
||||
Chih-Hsuan Yen <yan12125@gmail.com> <1937689+yan12125@users.noreply.github.com>
|
||||
Choongkyu <choongkyu.kim+gh@gmail.com> <vapidlyrapid+gh@gmail.com>
|
||||
Chris Howie (cdhowie) <me@chrishowie.com>
|
||||
Chris Joel (cdata) <chris@scriptolo.gy>
|
||||
Chris Tonkinson <chris@masterbran.ch>
|
||||
Christian Kujau <ckujau@users.noreply.github.com>
|
||||
Christian Prescott <me@christianprescott.com>
|
||||
chucic <chucic@seznam.cz>
|
||||
cjc7373 <niuchangcun@gmail.com>
|
||||
Colin Kennedy (moshen) <moshen.colin@gmail.com>
|
||||
Cromefire_ <tim.l@nghorst.net> <26320625+cromefire@users.noreply.github.com>
|
||||
cui fliter <imcusg@gmail.com>
|
||||
Cyprien Devillez <cypx@users.noreply.github.com>
|
||||
d-volution <49024624+d-volution@users.noreply.github.com>
|
||||
Dale Visser <dale.visser@live.com>
|
||||
Dan <benda.daniel@gmail.com>
|
||||
Daniel Barczyk <46358936+DanielBarczyk@users.noreply.github.com>
|
||||
Daniel Bergmann (brgmnn) <dan.arne.bergmann@gmail.com> <brgmnn@users.noreply.github.com>
|
||||
Daniel Harte (norgeous) <daniel@harte.me> <daniel@danielharte.co.uk> <norgeous@users.noreply.github.com>
|
||||
Daniel Martí (mvdan) <mvdan@mvdan.cc>
|
||||
Daniel Padrta <64928366+danpadcz@users.noreply.github.com>
|
||||
Daniil Gentili <daniil@daniil.it>
|
||||
Darshil Chanpura (dtchanpura) <dtchanpura@gmail.com> <dcprime314@gmail.com>
|
||||
dashangcun <907225865@qq.com>
|
||||
David Rimmer (dinosore) <dinosore@dbrsoftware.co.uk>
|
||||
deepsource-autofix[bot] <62050782+deepsource-autofix[bot]@users.noreply.github.com>
|
||||
DeflateAwning <11021263+DeflateAwning@users.noreply.github.com>
|
||||
Denis A. (dva) <denisva@gmail.com>
|
||||
Dennis Wilson (snnd) <dw@risu.io>
|
||||
dependabot-preview[bot] <dependabot-preview[bot]@users.noreply.github.com> <27856297+dependabot-preview[bot]@users.noreply.github.com>
|
||||
dependabot[bot] <dependabot[bot]@users.noreply.github.com> <49699333+dependabot[bot]@users.noreply.github.com>
|
||||
derekriemer <derek.riemer@colorado.edu>
|
||||
DerRockWolf <50499906+DerRockWolf@users.noreply.github.com>
|
||||
desbma <desbma@users.noreply.github.com>
|
||||
Devon G. Redekopp <devon@redekopp.com>
|
||||
diemade <spamkill@posteo.ch>
|
||||
digital <didev@dinid.net>
|
||||
Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com>
|
||||
Dmitry Saveliev (dsaveliev) <d.e.saveliev@gmail.com>
|
||||
domain <32405309+szu17dmy@users.noreply.github.com>
|
||||
Domenic Horner <domenic@tgxn.net>
|
||||
Dominik Heidler (asdil12) <dominik@heidler.eu>
|
||||
Elias Jarlebring (jarlebring) <jarlebring@gmail.com>
|
||||
Elliot Huffman <thelich2@gmail.com>
|
||||
Emil Hessman (ceh) <emil@hessman.se>
|
||||
Emil Lundberg <emil@emlun.se>
|
||||
Eng Zer Jun <engzerjun@gmail.com>
|
||||
entity0xfe <109791748+entity0xfe@users.noreply.github.com> <entity0xfe@my.domain>
|
||||
Eric Lesiuta <elesiuta@gmail.com>
|
||||
Eric P <eric@kastelo.net>
|
||||
Erik Meitner (WSGCSysadmin) <e.meitner@willystreet.coop>
|
||||
Evan Spensley <94762716+0evan@users.noreply.github.com>
|
||||
Evgeny Kuznetsov <evgeny@kuznetsov.md>
|
||||
Federico Castagnini (facastagnini) <federico.castagnini@gmail.com>
|
||||
Felix <53702818+f-eliks@users.noreply.github.com>
|
||||
Felix Ableitner (Nutomic) <me@nutomic.com>
|
||||
@@ -137,61 +135,49 @@ ghjklw <malo@jaffre.info>
|
||||
Gilli Sigurdsson (gillisig) <gilli@vx.is>
|
||||
Gleb Sinyavskiy <zhulik.gleb@gmail.com>
|
||||
Graham Miln (grahammiln) <graham.miln@dssw.co.uk> <graham.miln@miln.eu>
|
||||
greatroar <61184462+greatroar@users.noreply.github.com>
|
||||
Greg <gco@jazzhaiku.com>
|
||||
guangwu <guoguangwu@magic-shield.com>
|
||||
gudvinr <gudvinr@gmail.com>
|
||||
Gusted <postmaster@gusted.xyz> <williamzijl7@hotmail.com>
|
||||
Han Boetes <han@boetes.org>
|
||||
HansK-p <42314815+HansK-p@users.noreply.github.com>
|
||||
Harrison Jones (harrisonhjones) <harrisonhjones@users.noreply.github.com>
|
||||
Hazem Krimi <me@hazemkrimi.tech>
|
||||
Heiko Zuerker (Smiley73) <heiko@zuerker.org>
|
||||
Hireworks <129852174+hireworksltd@users.noreply.github.com>
|
||||
Hugo Locurcio <hugo.locurcio@hugo.pro>
|
||||
Iain Barnett <iainspeed@gmail.com>
|
||||
Ian Johnson (anonymouse64) <ian.johnson@canonical.com> <person.uwsome@gmail.com>
|
||||
ignacy123 <ignacy.buczek@onet.pl>
|
||||
Ikko Ashimine <eltociear@gmail.com>
|
||||
Ilya Brin <464157+ilyabrin@users.noreply.github.com>
|
||||
Iskander Sharipov (Alex) <quasilyte@gmail.com>
|
||||
Jaakko Hannikainen (jgke) <jgke@jgke.fi>
|
||||
Jacek Szafarkiewicz (hadogenes) <szafar@linux.pl>
|
||||
Jack Croft <jccroft1@users.noreply.github.com>
|
||||
Jacob <jyundt@gmail.com>
|
||||
Jake Peterson (acogdev) <jake@acogdev.com>
|
||||
Jakob Borg (calmh) <jakob@nym.se> <jakob@kastelo.net> <jborg@coreweave.com>
|
||||
James O'Beirne <wild-github@au92.org>
|
||||
James Patterson (jpjp) <jamespatterson@operamail.com> <jpjp@users.noreply.github.com>
|
||||
janost <janost@tuta.io>
|
||||
Jaroslav Lichtblau <svetlemodry@users.noreply.github.com>
|
||||
Jaroslav Malec (dzarda) <dzardacz@gmail.com>
|
||||
jaseg <githubaccount@jaseg.net>
|
||||
Jaspitta <ste.scarpitta@gmail.com>
|
||||
Jauder Ho <jauderho@users.noreply.github.com>
|
||||
Jaya Chithra (jayachithra) <s.k.jayachithra@gmail.com>
|
||||
Jaya Kumar <jaya.kumar@ict.nl>
|
||||
Jeffery To <jeffery.to@gmail.com>
|
||||
jelle van der Waa <jelle@vdwaa.nl>
|
||||
Jens Diemer (jedie) <github.com@jensdiemer.de> <git@jensdiemer.de>
|
||||
Jerry Jacobs (xor-gate) <jerry.jacobs@xor-gate.org> <xor-gate@users.noreply.github.com>
|
||||
Jesse Lucas <jesse@jesselucas.com>
|
||||
Jochen Voss (seehuhn) <voss@seehuhn.de>
|
||||
Johan Andersson <j@i19.se>
|
||||
Johan Vromans (sciurius) <jvromans@squirrel.nl>
|
||||
John Rinehart (fuzzybear3965) <johnrichardrinehart@gmail.com>
|
||||
Jonas Thelemann <e-mail@jonas-thelemann.de>
|
||||
Jonathan <artback@protonmail.com> <jonagn@gmail.com>
|
||||
Jonathan Cross <jcross@gmail.com>
|
||||
Jonta <359397+Jonta@users.noreply.github.com>
|
||||
Jose Manuel Delicado (jmdaweb) <jmdaweb@hotmail.com> <jmdaweb@users.noreply.github.com>
|
||||
jtagcat <git-514635f7@jtag.cat> <git-12dbd862@jtag.cat>
|
||||
Julian Lehrhuber <jul13579@users.noreply.github.com>
|
||||
Jörg Thalheim <Mic92@users.noreply.github.com>
|
||||
Jędrzej Kula <kula.jedrek@gmail.com>
|
||||
K.B.Dharun Krishna <kbdharunkrishna@gmail.com>
|
||||
Kalle Laine <pahakalle@protonmail.com>
|
||||
Kapil Sareen <kapilsareen584@gmail.com>
|
||||
Karol Różycki (krozycki) <rozycki.karol@gmail.com>
|
||||
Kebin Liu <lkebin@gmail.com>
|
||||
Keith Harrison <keithh@protonmail.com>
|
||||
Keith Turner <kturner@apache.org>
|
||||
Kelong Cong (kc1212) <kc04bc@gmx.com> <kc1212@users.noreply.github.com>
|
||||
Ken'ichi Kamada (kamadak) <kamada@nanohz.org>
|
||||
Kevin Allen (ironmig) <kma1660@gmail.com>
|
||||
@@ -200,31 +186,26 @@ Kevin White, Jr. (kwhite17) <kevinwhite1710@gmail.com>
|
||||
klemens <ka7@github.com>
|
||||
Kurt Fitzner (Kudalufi) <kurt@va1der.ca> <kurt.fitzner@gmail.com>
|
||||
kylosus <33132401+kylosus@users.noreply.github.com>
|
||||
Lars K.W. Gohlke (lkwg82) <lkwg82@gmx.de>
|
||||
Lars Lehtonen <lars.lehtonen@gmail.com>
|
||||
Laurent Arnoud <laurent@spkdev.net>
|
||||
Laurent Etiemble (letiemble) <laurent.etiemble@gmail.com> <laurent.etiemble@monobjc.net>
|
||||
Leo Arias (elopio) <yo@elopio.net>
|
||||
Liu Siyuan (liusy182) <liusy182@gmail.com> <liusy182@hotmail.com>
|
||||
Lode Hoste (Zillode) <zillode@zillode.be>
|
||||
Lord Landon Agahnim (LordLandon) <lordlandon@gmail.com>
|
||||
LSmithx2 <42276854+lsmithx2@users.noreply.github.com>
|
||||
luchenhan <168071714+luchenhan@users.noreply.github.com>
|
||||
Lukas Lihotzki <lukas@lihotzki.de>
|
||||
Luke Hamburg <1992842+luckman212@users.noreply.github.com>
|
||||
luzpaz <luzpaz@users.noreply.github.com>
|
||||
Majed Abdulaziz (majedev) <majed.alhajry@gmail.com>
|
||||
Marc Laporte (marclaporte) <marc@marclaporte.com> <marc@laporte.name>
|
||||
Marc Pujol (kilburn) <kilburn@la3.org>
|
||||
Marcel Meyer <mm.marcelmeyer@gmail.com>
|
||||
Marcin Dziadus (marcindziadus) <dziadus.marcin@gmail.com>
|
||||
marco-m <marco.molteni@laposte.net>
|
||||
Marcus Legendre <marcus.legendre@gmail.com>
|
||||
Mario Majila <mariustshipichik@gmail.com>
|
||||
Mark Pulford (mpx) <mark@kyne.com.au>
|
||||
Martchus <martchus@gmx.net>
|
||||
Martin Polehla <p0l0us@users.noreply.github.com>
|
||||
Mateusz Naściszewski (mateon1) <matin1111@wp.pl>
|
||||
Mateusz Ż <thedead4fun@live.com>
|
||||
mathias4833 <67101597+mathias4833@users.noreply.github.com>
|
||||
Matic Potočnik <hairyfotr@gmail.com>
|
||||
Matt Burke (burkemw3) <mburke@amplify.com> <burkemw3@gmail.com>
|
||||
Matt Robenolt <matt@ydekproductions.com>
|
||||
@@ -233,13 +214,9 @@ Maurizio Tomasi <ziotom78@gmail.com>
|
||||
Max <github@germancoding.com>
|
||||
Max Schulze (kralo) <max.schulze@online.de> <kralo@users.noreply.github.com>
|
||||
MaximAL <almaximal@ya.ru>
|
||||
Maxime Thirouin <m@moox.io>
|
||||
Maximilian <maxi.rostock@outlook.de> <public@complexvector.space>
|
||||
mclang <1721600+mclang@users.noreply.github.com>
|
||||
Michael Jephcote (Rewt0r) <rewt0r@gmx.com> <Rewt0r@users.noreply.github.com>
|
||||
Michael Ploujnikov (plouj) <ploujj@gmail.com>
|
||||
Michael Rienstra <mrienstra@gmail.com>
|
||||
Michael Tilli (pyfisch) <pyfisch@gmail.com>
|
||||
MichaIng <micha@dietpi.com>
|
||||
Migelo <miha@filetki.si>
|
||||
Mike Boone <mike@boonedocks.net>
|
||||
@@ -248,7 +225,6 @@ MikolajTwarog <43782609+MikolajTwarog@users.noreply.github.com>
|
||||
Mingxuan Lin <gdlmx@users.noreply.github.com>
|
||||
mv1005 <49659413+mv1005@users.noreply.github.com>
|
||||
Nate Morrison (nrm21) <natemorrison@gmail.com>
|
||||
Naveen <172697+naveensrinivasan@users.noreply.github.com>
|
||||
nf <nf@wh3rd.net>
|
||||
Nicholas Rishel (PrototypeNM1) <rishel.nick@gmail.com> <PrototypeNM1@users.noreply.github.com>
|
||||
Nick Busey <NickBusey@users.noreply.github.com>
|
||||
@@ -263,14 +239,13 @@ NoLooseEnds <jon.koslung@gmail.com>
|
||||
Oliver Freyermuth <o.freyermuth@googlemail.com>
|
||||
orangekame3 <miya.org.0309@gmail.com>
|
||||
otbutz <tbutz@optitool.de>
|
||||
Otiel <Otiel@users.noreply.github.com>
|
||||
overkill <22098433+0verk1ll@users.noreply.github.com>
|
||||
Oyebanji Jacob Mayowa <oyebanji05@gmail.com>
|
||||
Pablo <pbaeyens31+github@gmail.com>
|
||||
Pascal Jungblut (pascalj) <github@pascalj.com> <mail@pascal-jungblut.com>
|
||||
Paul Brit <paulbrit44@gmail.com>
|
||||
Paul Donald <newtwen+github@gmail.com>
|
||||
Pawel Palenica (qepasa) <pawelpalenica11@gmail.com>
|
||||
Paweł Rozlach <vespian@users.noreply.github.com>
|
||||
perewa <cavalcante.ten@gmail.com>
|
||||
Peter Badida <KeyWeeUsr@users.noreply.github.com>
|
||||
Peter Dave Hello <hsu@peterdavehello.org>
|
||||
@@ -280,18 +255,16 @@ Phani Rithvij <phanirithvij2000@gmail.com>
|
||||
Phil Davis <phil.davis@inf.org>
|
||||
Philippe Schommers (filoozoom) <philippe@schommers.be>
|
||||
Phill Luby (pluby) <phill.luby@newredo.com>
|
||||
Pier Paolo Ramon <ramonpierre@gmail.com>
|
||||
Piotr Bejda (piobpl) <piotrb10@gmail.com>
|
||||
Pramodh KP (pramodhkp) <pramodh.p@directi.com> <1507241+pramodhkp@users.noreply.github.com>
|
||||
polyfloyd <polyfloyd@users.noreply.github.com>
|
||||
pullmerge <166967364+pullmerge@users.noreply.github.com>
|
||||
Quentin Hibon <qh.public@yahoo.com>
|
||||
Rahmi Pruitt <rjpruitt16@gmail.com>
|
||||
red_led <red-led@users.noreply.github.com>
|
||||
Richard Hartmann <RichiH@users.noreply.github.com>
|
||||
Robert Carosi (nov1n) <robert@carosi.nl>
|
||||
Roberto Santalla <roobre@users.noreply.github.com>
|
||||
Robin Schoonover <robin@cornhooves.org>
|
||||
Roman Zaynetdinov (zaynetro) <romanznet@gmail.com>
|
||||
Ross Smith II (rasa) <ross@smithii.com>
|
||||
rubenbe <github-com-00ff86@vandamme.email>
|
||||
Ruslan Yevdokymov <38809160+ruslanye@users.noreply.github.com>
|
||||
Ryan Qian <i@bitbili.net>
|
||||
@@ -303,28 +276,27 @@ Sergey Mishin (ralder) <ralder@yandex.ru>
|
||||
Sertonix <83883937+Sertonix@users.noreply.github.com>
|
||||
Severin von Wnuck-Lipinski <ss7@live.de>
|
||||
Shaarad Dalvi <60266155+shaaraddalvi@users.noreply.github.com> <shdalv@microsoft.com>
|
||||
Simon Frei (imsodin) <freisim93@gmail.com>
|
||||
Simon Mwepu <simonmwepu@gmail.com>
|
||||
Simon Pickup <simon@pickupinfinity.com>
|
||||
Sly_tom_cat <slytomcat@mail.ru>
|
||||
Sonu Kumar Saw <31889738+dev-saw99@users.noreply.github.com>
|
||||
Stefan Kuntz (Stefan-Code) <stefan.github@gmail.com> <Stefan.github@gmail.com>
|
||||
Stefan Tatschner (rumpelsepp) <stefan@sevenbyte.org> <rumpelsepp@sevenbyte.org> <stefan@rumpelsepp.org>
|
||||
Steven Eckhoff <steven.eckhoff.opensource@gmail.com>
|
||||
Suhas Gundimeda (snugghash) <suhas.gundimeda@gmail.com> <snugghash@gmail.com>
|
||||
Sven Bachmann <dev@mcbachmann.de>
|
||||
Syncthing Automation <automation@syncthing.net>
|
||||
Syncthing Release Automation <release@syncthing.net>
|
||||
Sébastien WENSKE <sebastien@wenske.fr>
|
||||
Taylor Khan (nelsonkhan) <nelsonkhan@gmail.com>
|
||||
Terrance <git@terrance.allofti.me>
|
||||
TheCreeper <TheCreeper@users.noreply.github.com>
|
||||
Thomas <9749173+uhthomas@users.noreply.github.com>
|
||||
Thomas Hipp <thomashipp@gmail.com>
|
||||
Tim Abell (timabell) <tim@timwise.co.uk>
|
||||
Tim Howes (timhowes) <timhowes@berkeley.edu>
|
||||
Tim Nordenfur <tim@gurka.se>
|
||||
Tobias Frölich <40638719+tobifroe@users.noreply.github.com>
|
||||
Tobias Klauser <tobias.klauser@gmail.com>
|
||||
Tobias Nygren (tnn2) <tnn@nygren.pp.se>
|
||||
Tobias Tom (tobiastom) <t.tom@succont.de>
|
||||
Tom Jakubowski <tom@crystae.net>
|
||||
Tomasz Wilczyński <5626656+tomasz1986@users.noreply.github.com> <twilczynski@naver.com>
|
||||
Tommy Thorn <tommy-github-email@thorn.ws>
|
||||
Tully Robinson (tojrobinson) <tully@tojr.org>
|
||||
Tyler Brazier (tylerbrazier) <tyler@tylerbrazier.com>
|
||||
Tyler Kropp <kropptyler@gmail.com>
|
||||
@@ -341,10 +313,10 @@ WangXi <xib1102@icloud.com>
|
||||
Will Rouesnel <wrouesnel@wrouesnel.com>
|
||||
William A. Kennington III (wkennington) <william@wkennington.com>
|
||||
wouter bolsterlee <wouter@bolsterl.ee>
|
||||
Wulf Weich (wweich) <wweich@users.noreply.github.com> <wweich@gmx.de> <wulf@weich-kr.de>
|
||||
xarx00 <xarx00@users.noreply.github.com>
|
||||
Xavier O. (damajor) <damajor@gmail.com>
|
||||
xjtdy888 (xjtdy888) <xjtdy888@163.com>
|
||||
xjtdy888 (xjtdy888) <xjtdy888@163.com> <xjtdy888@gmail.com>
|
||||
Yannic A. (eipiminus1) <eipiminusone+github@gmail.com> <eipiminus1@users.noreply.github.com>
|
||||
yparitcher <y@paritcher.com>
|
||||
佛跳墙 <daoquan@qq.com>
|
||||
落心 <luoxin.ttt@gmail.com>
|
||||
|
||||
162
CONTRIBUTING.md
162
CONTRIBUTING.md
@@ -34,19 +34,163 @@ Note that the previously used service at
|
||||
retired and we kindly ask you to sign up on Weblate for continued
|
||||
involvement.
|
||||
|
||||
## Contributing Code
|
||||
|
||||
Every contribution is welcome. If you want to contribute but are unsure
|
||||
where to start, any open issues are fair game! See the [Contribution
|
||||
Guidelines](https://docs.syncthing.net/dev/contributing.html) for the full
|
||||
story on committing code.
|
||||
|
||||
## Contributing Documentation
|
||||
|
||||
Updates to the [documentation site](https://docs.syncthing.net/) can be
|
||||
made as pull requests on the [documentation
|
||||
repository](https://github.com/syncthing/docs).
|
||||
|
||||
## Contributing Code
|
||||
|
||||
Every contribution is welcome. If you want to contribute but are unsure
|
||||
where to start, any open issues are fair game! Here's a short rundown of
|
||||
what you need to keep in mind:
|
||||
|
||||
- Don't worry. You are not expected to get everything right on the first
|
||||
attempt, we'll guide you through it.
|
||||
|
||||
- Make sure there is an
|
||||
[issue](https://github.com/syncthing/syncthing/issues) that describes the
|
||||
change you want to do. If the thing you want to do does not have an issue
|
||||
yet, please file one before starting work on it.
|
||||
|
||||
- Fork the repository and make your changes in a new branch. Once it's ready
|
||||
for review, create a pull request.
|
||||
|
||||
### Authorship
|
||||
|
||||
All code authors are listed in the AUTHORS file. When your first pull
|
||||
request is accepted your details are added to the AUTHORS file and the list
|
||||
of authors in the GUI. Commits must be made with the same name and email as
|
||||
listed in the AUTHORS file. To accomplish this, ensure that your git
|
||||
configuration is set correctly prior to making your first commit:
|
||||
|
||||
$ git config --global user.name "Jane Doe"
|
||||
$ git config --global user.email janedoe@example.com
|
||||
|
||||
You must be reachable on the given email address. If you do not wish to use
|
||||
your real name for whatever reason, using a nickname or pseudonym is
|
||||
perfectly acceptable.
|
||||
|
||||
### The Developer Certificate of Origin (DCO)
|
||||
|
||||
The Syncthing project requires the Developer Certificate of Origin (DCO)
|
||||
sign-off on pull requests (PRs). This means that all commit messages must
|
||||
contain a signature line to indicate that the developer accepts the DCO.
|
||||
|
||||
The DCO is a lightweight way for contributors to certify that they wrote (or
|
||||
otherwise have the right to submit) the code and changes they are
|
||||
contributing to the project. Here is the full [text of the
|
||||
DCO](https://developercertificate.org):
|
||||
|
||||
---
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
1. The contribution was created in whole or in part by me and I have the
|
||||
right to submit it under the open source license indicated in the file;
|
||||
or
|
||||
|
||||
2. The contribution is based upon previous work that, to the best of my
|
||||
knowledge, is covered under an appropriate open source license and I have
|
||||
the right under that license to submit that work with modifications,
|
||||
whether created in whole or in part by me, under the same open source
|
||||
license (unless I am permitted to submit under a different license), as
|
||||
indicated in the file; or
|
||||
|
||||
3. The contribution was provided directly to me by some other person who
|
||||
certified (1), (2) or (3) and I have not modified it.
|
||||
|
||||
4. I understand and agree that this project and the contribution are public
|
||||
and that a record of the contribution (including all personal information
|
||||
I submit with it, including my sign-off) is maintained indefinitely and
|
||||
may be redistributed consistent with this project or the open source
|
||||
license(s) involved.
|
||||
|
||||
---
|
||||
|
||||
Contributors indicate that they adhere to these requirements by adding
|
||||
a `Signed-off-by` line to their commit messages. For example:
|
||||
|
||||
This is my commit message
|
||||
|
||||
Signed-off-by: Random J Developer <random@developer.example.org>
|
||||
|
||||
The name and email address in this line must match those of the committing
|
||||
author, and be the same as what you want in the AUTHORS file as per above.
|
||||
|
||||
### Coding Style
|
||||
|
||||
#### General
|
||||
|
||||
- All text files use Unix line endings. The git settings already present in
|
||||
the repository attempt to enforce this.
|
||||
|
||||
- When making changes, follow the brace and parenthesis style of the
|
||||
surrounding code.
|
||||
|
||||
#### Go Specific
|
||||
|
||||
- Follow the conventions laid out in [Effective
|
||||
Go](https://go.dev/doc/effective_go) as much as makes sense. The review
|
||||
guidelines in [Go Code Review
|
||||
Comments](https://github.com/golang/go/wiki/CodeReviewComments) should
|
||||
generally be followed.
|
||||
|
||||
- Each commit should be `go fmt` clean.
|
||||
|
||||
- Imports are grouped per `goimports` standard; that is, standard
|
||||
library first, then third party libraries after a blank line.
|
||||
|
||||
### Commits
|
||||
|
||||
- Commit messages (and pull request titles) should follow the [conventional
|
||||
commits](https://www.conventionalcommits.org/en/v1.0.0/) specification and
|
||||
be in lower case.
|
||||
|
||||
- We use a scope description in the commit message subject. This is the
|
||||
component of Syncthing that the commit affects. For example, `gui`,
|
||||
`protocol`, `scanner`, `upnp`, etc -- typically, the part after
|
||||
`internal/`, `lib/` or `cmd/` in the package path. If the commit doesn't
|
||||
affect a specific component, such as for changes to the build system or
|
||||
documentation, the scope should be omitted. The same goes for changes that
|
||||
affect many components which would be cumbersome to list.
|
||||
|
||||
- Commits that resolve an existing issue must include the issue number
|
||||
as `(fixes #123)` at the end of the commit message subject. A correctly
|
||||
formatted commit message subject looks like this:
|
||||
|
||||
feat(dialer): add env var to disable proxy fallback (fixes #3006)
|
||||
|
||||
- If the commit message subject doesn't say it all, one or more paragraphs of
|
||||
describing text should be added to the commit message. This should explain
|
||||
why the change is made and what it accomplishes.
|
||||
|
||||
- When drafting a pull request, please feel free to add commits with
|
||||
corrections and merge from `main` when necessary. This provides a clear time
|
||||
line with changes and simplifies review. Do not, in general, rebase your
|
||||
commits, as this makes review harder.
|
||||
|
||||
- Pull requests are merged to `main` using squash merge. The "stream of
|
||||
consciousness" set of commits described in the previous point will be reduced
|
||||
to a single commit at merge time. The pull request title and description will
|
||||
be used as the commit message.
|
||||
|
||||
### Tests
|
||||
|
||||
Yes please, do add tests when adding features or fixing bugs. Also, when a
|
||||
pull request is filed a number of automatic tests are run on the code. This
|
||||
includes:
|
||||
|
||||
- That the code actually builds and the test suite passes.
|
||||
|
||||
- That the code is correctly formatted (`go fmt`).
|
||||
|
||||
- That the commits are based on a reasonably recent `main`.
|
||||
|
||||
- That the output from `go lint` and `go vet` is clean. (This checks for a
|
||||
number of potential problems the compiler doesn't catch.)
|
||||
|
||||
## Licensing
|
||||
|
||||
All contributions are made available under the same license as the already
|
||||
@@ -59,10 +203,6 @@ otherwise stated this means MPLv2, but there are exceptions:
|
||||
- The documentation (man/...) is licensed under the Creative Commons
|
||||
Attribution 4.0 International License.
|
||||
|
||||
- Projects under vendor/... are copyright by and licensed from their
|
||||
respective original authors. Contributions should be made to the original
|
||||
project, not here.
|
||||
|
||||
Regardless of the license in effect, you retain the copyright to your
|
||||
contribution.
|
||||
|
||||
|
||||
@@ -23,47 +23,7 @@ example `UMASK=002`.
|
||||
**Docker cli**
|
||||
```
|
||||
$ docker pull syncthing/syncthing
|
||||
$ docker run -p 8384:8384 -p 22000:22000/tcp -p 22000:22000/udp -p 21027:21027/udp \
|
||||
-v /wherever/st-sync:/var/syncthing \
|
||||
--hostname=my-syncthing \
|
||||
syncthing/syncthing:latest
|
||||
```
|
||||
|
||||
**Docker compose**
|
||||
```yml
|
||||
---
|
||||
version: "3"
|
||||
services:
|
||||
syncthing:
|
||||
image: syncthing/syncthing
|
||||
container_name: syncthing
|
||||
hostname: my-syncthing
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
volumes:
|
||||
- /wherever/st-sync:/var/syncthing
|
||||
ports:
|
||||
- 8384:8384 # Web UI
|
||||
- 22000:22000/tcp # TCP file transfers
|
||||
- 22000:22000/udp # QUIC file transfers
|
||||
- 21027:21027/udp # Receive local discovery broadcasts
|
||||
restart: unless-stopped
|
||||
```
|
||||
|
||||
## Discovery
|
||||
|
||||
Note that Docker's default network mode prevents local IP addresses from
|
||||
being discovered, as Syncthing is only able to see the internal IP of the
|
||||
container on the `172.17.0.0/16` subnet. This will result in poor transfer rates
|
||||
if local device addresses are not manually configured.
|
||||
|
||||
It is therefore advisable to use the [host network mode](https://docs.docker.com/network/host/) instead:
|
||||
|
||||
**Docker cli**
|
||||
```
|
||||
$ docker pull syncthing/syncthing
|
||||
$ docker run --network=host \
|
||||
$ docker run --network=host -e STGUIADDRESS= \
|
||||
-v /wherever/st-sync:/var/syncthing \
|
||||
syncthing/syncthing:latest
|
||||
```
|
||||
@@ -80,33 +40,39 @@ services:
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- STGUIADDRESS=
|
||||
volumes:
|
||||
- /wherever/st-sync:/var/syncthing
|
||||
network_mode: host
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: curl -fkLsS -m 2 127.0.0.1:8384/rest/noauth/health | grep -o --color=never OK || exit 1
|
||||
interval: 1m
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
```
|
||||
|
||||
## Discovery
|
||||
|
||||
Please note that Docker's default network mode prevents local IP addresses
|
||||
from being discovered, as Syncthing can only see the internal IP address of
|
||||
the container on the `172.17.0.0/16` subnet. This would likely break the ability
|
||||
for nodes to establish LAN connections properly, resulting in poor transfer
|
||||
rates unless local device addresses are configured manually.
|
||||
|
||||
It is therefore strongly recommended to stick to the [host network mode](https://docs.docker.com/network/host/),
|
||||
as shown above.
|
||||
|
||||
Be aware that syncthing alone is now in control of what interfaces and ports it
|
||||
listens on. You can edit the syncthing configuration to change the defaults if
|
||||
there are conflicts.
|
||||
|
||||
## GUI Security
|
||||
|
||||
By default Syncthing inside the Docker image listens on 0.0.0.0:8384 to
|
||||
allow GUI connections via the Docker proxy. This is set by the
|
||||
`STGUIADDRESS` environment variable in the Dockerfile, as it differs from
|
||||
what Syncthing would otherwise use by default. This means you should set up
|
||||
authentication in the GUI, like for any other externally reachable Syncthing
|
||||
instance. If you do not require the GUI, or you use host networking, you can
|
||||
unset the `STGUIADDRESS` variable to have Syncthing fall back to listening
|
||||
on 127.0.0.1:
|
||||
|
||||
```
|
||||
$ docker pull syncthing/syncthing
|
||||
$ docker run -e STGUIADDRESS= \
|
||||
-v /wherever/st-sync:/var/syncthing \
|
||||
syncthing/syncthing:latest
|
||||
```
|
||||
|
||||
With the environment variable unset Syncthing will follow what is set in the
|
||||
configuration file / GUI settings dialog.
|
||||
By default Syncthing inside the Docker image listens on `0.0.0.0:8384`. This
|
||||
allows GUI connections when running without host network mode. The example
|
||||
above unsets the `STGUIADDRESS` environment variable to have Syncthing fall
|
||||
back to listening on what has been configured in the configuration file or the
|
||||
GUI settings dialog. By default this is the localhost IP address `127.0.0.1`.
|
||||
If you configure your GUI to be externally reachable, make sure you set up
|
||||
authentication and enable TLS.
|
||||
|
||||
12
README.md
12
README.md
@@ -82,13 +82,11 @@ build process.
|
||||
|
||||
## Signed Releases
|
||||
|
||||
As of v0.10.15 and onwards, release binaries are GPG signed with the key
|
||||
D26E6ED000654A3E, available from https://syncthing.net/security/ and
|
||||
most key servers.
|
||||
|
||||
There is also a built-in automatic upgrade mechanism (disabled in some
|
||||
distribution channels) which uses a compiled in ECDSA signature. macOS
|
||||
binaries are also properly code signed.
|
||||
Release binaries are GPG signed with the key available from
|
||||
https://syncthing.net/security/. There is also a built-in automatic
|
||||
upgrade mechanism (disabled in some distribution channels) which uses a
|
||||
compiled in ECDSA signature. macOS and Windows binaries are also
|
||||
code-signed.
|
||||
|
||||
## Documentation
|
||||
|
||||
|
||||
12
buf.gen.yaml
Normal file
12
buf.gen.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
version: v2
|
||||
managed:
|
||||
enabled: true
|
||||
override:
|
||||
- file_option: go_package_prefix
|
||||
value: github.com/syncthing/syncthing/internal/gen
|
||||
plugins:
|
||||
- remote: buf.build/protocolbuffers/go:v1.35.1
|
||||
out: .
|
||||
opt: module=github.com/syncthing/syncthing
|
||||
inputs:
|
||||
- directory: proto
|
||||
10
buf.yaml
Normal file
10
buf.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
version: v2
|
||||
modules:
|
||||
- path: proto
|
||||
name: github.com/syncthing/syncthing
|
||||
lint:
|
||||
use:
|
||||
- STANDARD
|
||||
breaking:
|
||||
use:
|
||||
- WIRE_JSON
|
||||
420
build.go
420
build.go
@@ -4,8 +4,8 @@
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
//go:build ignore
|
||||
// +build ignore
|
||||
//go:build tools
|
||||
// +build tools
|
||||
|
||||
package main
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
"bytes"
|
||||
"compress/flate"
|
||||
"compress/gzip"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"flag"
|
||||
@@ -33,32 +32,32 @@ import (
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
buildpkg "github.com/syncthing/syncthing/lib/build"
|
||||
"github.com/syncthing/syncthing/lib/upgrade"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
var (
|
||||
goarch string
|
||||
goos string
|
||||
noupgrade bool
|
||||
version string
|
||||
goCmd string
|
||||
race bool
|
||||
debug = os.Getenv("BUILDDEBUG") != ""
|
||||
extraTags string
|
||||
installSuffix string
|
||||
pkgdir string
|
||||
cc string
|
||||
run string
|
||||
benchRun string
|
||||
buildOut string
|
||||
debugBinary bool
|
||||
coverage bool
|
||||
long bool
|
||||
timeout = "120s"
|
||||
longTimeout = "600s"
|
||||
numVersions = 5
|
||||
withNextGenGUI = os.Getenv("BUILD_NEXT_GEN_GUI") != ""
|
||||
goarch string
|
||||
goos string
|
||||
noupgrade bool
|
||||
version string
|
||||
goCmd string
|
||||
race bool
|
||||
debug = os.Getenv("BUILDDEBUG") != ""
|
||||
extraTags string
|
||||
installSuffix string
|
||||
pkgdir string
|
||||
cc string
|
||||
run string
|
||||
benchRun string
|
||||
buildOut string
|
||||
debugBinary bool
|
||||
coverage bool
|
||||
long bool
|
||||
timeout = "120s"
|
||||
longTimeout = "600s"
|
||||
numVersions = 5
|
||||
)
|
||||
|
||||
type target struct {
|
||||
@@ -85,7 +84,6 @@ var targets = map[string]target{
|
||||
"all": {
|
||||
// Only valid for the "build" and "install" commands as it lacks all
|
||||
// the archive creation stuff. buildPkgs gets filled out in init()
|
||||
tags: []string{"purego"},
|
||||
},
|
||||
"syncthing": {
|
||||
// The default target for "build", "install", "tar", "zip", "deb", etc.
|
||||
@@ -96,41 +94,40 @@ var targets = map[string]target{
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/syncthing"},
|
||||
binaryName: "syncthing", // .exe will be added automatically for Windows builds
|
||||
archiveFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "{{binary}}", perm: 0755},
|
||||
{src: "README.md", dst: "README.txt", perm: 0644},
|
||||
{src: "LICENSE", dst: "LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0644},
|
||||
{src: "{{binary}}", dst: "{{binary}}", perm: 0o755},
|
||||
{src: "README.md", dst: "README.txt", perm: 0o644},
|
||||
{src: "LICENSE", dst: "LICENSE.txt", perm: 0o644},
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0o644},
|
||||
// All files from etc/ and extra/ added automatically in init().
|
||||
},
|
||||
systemdService: "syncthing@*.service",
|
||||
installationFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0755},
|
||||
{src: "README.md", dst: "deb/usr/share/doc/syncthing/README.txt", perm: 0644},
|
||||
{src: "LICENSE", dst: "deb/usr/share/doc/syncthing/LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "deb/usr/share/doc/syncthing/AUTHORS.txt", perm: 0644},
|
||||
{src: "man/syncthing.1", dst: "deb/usr/share/man/man1/syncthing.1", perm: 0644},
|
||||
{src: "man/syncthing-config.5", dst: "deb/usr/share/man/man5/syncthing-config.5", perm: 0644},
|
||||
{src: "man/syncthing-stignore.5", dst: "deb/usr/share/man/man5/syncthing-stignore.5", perm: 0644},
|
||||
{src: "man/syncthing-device-ids.7", dst: "deb/usr/share/man/man7/syncthing-device-ids.7", perm: 0644},
|
||||
{src: "man/syncthing-event-api.7", dst: "deb/usr/share/man/man7/syncthing-event-api.7", perm: 0644},
|
||||
{src: "man/syncthing-faq.7", dst: "deb/usr/share/man/man7/syncthing-faq.7", perm: 0644},
|
||||
{src: "man/syncthing-networking.7", dst: "deb/usr/share/man/man7/syncthing-networking.7", perm: 0644},
|
||||
{src: "man/syncthing-rest-api.7", dst: "deb/usr/share/man/man7/syncthing-rest-api.7", perm: 0644},
|
||||
{src: "man/syncthing-security.7", dst: "deb/usr/share/man/man7/syncthing-security.7", perm: 0644},
|
||||
{src: "man/syncthing-versioning.7", dst: "deb/usr/share/man/man7/syncthing-versioning.7", perm: 0644},
|
||||
{src: "etc/linux-systemd/system/syncthing@.service", dst: "deb/lib/systemd/system/syncthing@.service", perm: 0644},
|
||||
{src: "etc/linux-systemd/system/syncthing-resume.service", dst: "deb/lib/systemd/system/syncthing-resume.service", perm: 0644},
|
||||
{src: "etc/linux-systemd/user/syncthing.service", dst: "deb/usr/lib/systemd/user/syncthing.service", perm: 0644},
|
||||
{src: "etc/linux-sysctl/30-syncthing.conf", dst: "deb/usr/lib/sysctl.d/30-syncthing.conf", perm: 0644},
|
||||
{src: "etc/firewall-ufw/syncthing", dst: "deb/etc/ufw/applications.d/syncthing", perm: 0644},
|
||||
{src: "etc/linux-desktop/syncthing-start.desktop", dst: "deb/usr/share/applications/syncthing-start.desktop", perm: 0644},
|
||||
{src: "etc/linux-desktop/syncthing-ui.desktop", dst: "deb/usr/share/applications/syncthing-ui.desktop", perm: 0644},
|
||||
{src: "assets/logo-32.png", dst: "deb/usr/share/icons/hicolor/32x32/apps/syncthing.png", perm: 0644},
|
||||
{src: "assets/logo-64.png", dst: "deb/usr/share/icons/hicolor/64x64/apps/syncthing.png", perm: 0644},
|
||||
{src: "assets/logo-128.png", dst: "deb/usr/share/icons/hicolor/128x128/apps/syncthing.png", perm: 0644},
|
||||
{src: "assets/logo-256.png", dst: "deb/usr/share/icons/hicolor/256x256/apps/syncthing.png", perm: 0644},
|
||||
{src: "assets/logo-512.png", dst: "deb/usr/share/icons/hicolor/512x512/apps/syncthing.png", perm: 0644},
|
||||
{src: "assets/logo-only.svg", dst: "deb/usr/share/icons/hicolor/scalable/apps/syncthing.svg", perm: 0644},
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0o755},
|
||||
{src: "README.md", dst: "deb/usr/share/doc/syncthing/README.txt", perm: 0o644},
|
||||
{src: "LICENSE", dst: "deb/usr/share/doc/syncthing/LICENSE.txt", perm: 0o644},
|
||||
{src: "AUTHORS", dst: "deb/usr/share/doc/syncthing/AUTHORS.txt", perm: 0o644},
|
||||
{src: "man/syncthing.1", dst: "deb/usr/share/man/man1/syncthing.1", perm: 0o644},
|
||||
{src: "man/syncthing-config.5", dst: "deb/usr/share/man/man5/syncthing-config.5", perm: 0o644},
|
||||
{src: "man/syncthing-stignore.5", dst: "deb/usr/share/man/man5/syncthing-stignore.5", perm: 0o644},
|
||||
{src: "man/syncthing-device-ids.7", dst: "deb/usr/share/man/man7/syncthing-device-ids.7", perm: 0o644},
|
||||
{src: "man/syncthing-event-api.7", dst: "deb/usr/share/man/man7/syncthing-event-api.7", perm: 0o644},
|
||||
{src: "man/syncthing-faq.7", dst: "deb/usr/share/man/man7/syncthing-faq.7", perm: 0o644},
|
||||
{src: "man/syncthing-networking.7", dst: "deb/usr/share/man/man7/syncthing-networking.7", perm: 0o644},
|
||||
{src: "man/syncthing-rest-api.7", dst: "deb/usr/share/man/man7/syncthing-rest-api.7", perm: 0o644},
|
||||
{src: "man/syncthing-security.7", dst: "deb/usr/share/man/man7/syncthing-security.7", perm: 0o644},
|
||||
{src: "man/syncthing-versioning.7", dst: "deb/usr/share/man/man7/syncthing-versioning.7", perm: 0o644},
|
||||
{src: "etc/linux-systemd/system/syncthing@.service", dst: "deb/lib/systemd/system/syncthing@.service", perm: 0o644},
|
||||
{src: "etc/linux-systemd/user/syncthing.service", dst: "deb/usr/lib/systemd/user/syncthing.service", perm: 0o644},
|
||||
{src: "etc/linux-sysctl/30-syncthing.conf", dst: "deb/usr/lib/sysctl.d/30-syncthing.conf", perm: 0o644},
|
||||
{src: "etc/firewall-ufw/syncthing", dst: "deb/etc/ufw/applications.d/syncthing", perm: 0o644},
|
||||
{src: "etc/linux-desktop/syncthing-start.desktop", dst: "deb/usr/share/applications/syncthing-start.desktop", perm: 0o644},
|
||||
{src: "etc/linux-desktop/syncthing-ui.desktop", dst: "deb/usr/share/applications/syncthing-ui.desktop", perm: 0o644},
|
||||
{src: "assets/logo-32.png", dst: "deb/usr/share/icons/hicolor/32x32/apps/syncthing.png", perm: 0o644},
|
||||
{src: "assets/logo-64.png", dst: "deb/usr/share/icons/hicolor/64x64/apps/syncthing.png", perm: 0o644},
|
||||
{src: "assets/logo-128.png", dst: "deb/usr/share/icons/hicolor/128x128/apps/syncthing.png", perm: 0o644},
|
||||
{src: "assets/logo-256.png", dst: "deb/usr/share/icons/hicolor/256x256/apps/syncthing.png", perm: 0o644},
|
||||
{src: "assets/logo-512.png", dst: "deb/usr/share/icons/hicolor/512x512/apps/syncthing.png", perm: 0o644},
|
||||
{src: "assets/logo-only.svg", dst: "deb/usr/share/icons/hicolor/scalable/apps/syncthing.svg", perm: 0o644},
|
||||
},
|
||||
},
|
||||
"stdiscosrv": {
|
||||
@@ -142,23 +139,22 @@ var targets = map[string]target{
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/stdiscosrv"},
|
||||
binaryName: "stdiscosrv", // .exe will be added automatically for Windows builds
|
||||
archiveFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "{{binary}}", perm: 0755},
|
||||
{src: "cmd/stdiscosrv/README.md", dst: "README.txt", perm: 0644},
|
||||
{src: "LICENSE", dst: "LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0644},
|
||||
{src: "{{binary}}", dst: "{{binary}}", perm: 0o755},
|
||||
{src: "cmd/stdiscosrv/README.md", dst: "README.txt", perm: 0o644},
|
||||
{src: "LICENSE", dst: "LICENSE.txt", perm: 0o644},
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0o644},
|
||||
},
|
||||
systemdService: "stdiscosrv.service",
|
||||
installationFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0755},
|
||||
{src: "cmd/stdiscosrv/README.md", dst: "deb/usr/share/doc/syncthing-discosrv/README.txt", perm: 0644},
|
||||
{src: "LICENSE", dst: "deb/usr/share/doc/syncthing-discosrv/LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "deb/usr/share/doc/syncthing-discosrv/AUTHORS.txt", perm: 0644},
|
||||
{src: "man/stdiscosrv.1", dst: "deb/usr/share/man/man1/stdiscosrv.1", perm: 0644},
|
||||
{src: "cmd/stdiscosrv/etc/linux-systemd/stdiscosrv.service", dst: "deb/lib/systemd/system/stdiscosrv.service", perm: 0644},
|
||||
{src: "cmd/stdiscosrv/etc/linux-systemd/default", dst: "deb/etc/default/syncthing-discosrv", perm: 0644},
|
||||
{src: "cmd/stdiscosrv/etc/firewall-ufw/stdiscosrv", dst: "deb/etc/ufw/applications.d/stdiscosrv", perm: 0644},
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0o755},
|
||||
{src: "cmd/stdiscosrv/README.md", dst: "deb/usr/share/doc/syncthing-discosrv/README.txt", perm: 0o644},
|
||||
{src: "LICENSE", dst: "deb/usr/share/doc/syncthing-discosrv/LICENSE.txt", perm: 0o644},
|
||||
{src: "AUTHORS", dst: "deb/usr/share/doc/syncthing-discosrv/AUTHORS.txt", perm: 0o644},
|
||||
{src: "man/stdiscosrv.1", dst: "deb/usr/share/man/man1/stdiscosrv.1", perm: 0o644},
|
||||
{src: "cmd/stdiscosrv/etc/linux-systemd/stdiscosrv.service", dst: "deb/lib/systemd/system/stdiscosrv.service", perm: 0o644},
|
||||
{src: "cmd/stdiscosrv/etc/linux-systemd/default", dst: "deb/etc/default/syncthing-discosrv", perm: 0o644},
|
||||
{src: "cmd/stdiscosrv/etc/firewall-ufw/stdiscosrv", dst: "deb/etc/ufw/applications.d/stdiscosrv", perm: 0o644},
|
||||
},
|
||||
tags: []string{"purego"},
|
||||
},
|
||||
"strelaysrv": {
|
||||
name: "strelaysrv",
|
||||
@@ -169,61 +165,47 @@ var targets = map[string]target{
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/strelaysrv"},
|
||||
binaryName: "strelaysrv", // .exe will be added automatically for Windows builds
|
||||
archiveFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "{{binary}}", perm: 0755},
|
||||
{src: "cmd/strelaysrv/README.md", dst: "README.txt", perm: 0644},
|
||||
{src: "cmd/strelaysrv/LICENSE", dst: "LICENSE.txt", perm: 0644},
|
||||
{src: "LICENSE", dst: "LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0644},
|
||||
{src: "{{binary}}", dst: "{{binary}}", perm: 0o755},
|
||||
{src: "cmd/strelaysrv/README.md", dst: "README.txt", perm: 0o644},
|
||||
{src: "cmd/strelaysrv/LICENSE", dst: "LICENSE.txt", perm: 0o644},
|
||||
{src: "LICENSE", dst: "LICENSE.txt", perm: 0o644},
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0o644},
|
||||
},
|
||||
systemdService: "strelaysrv.service",
|
||||
installationFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0755},
|
||||
{src: "cmd/strelaysrv/README.md", dst: "deb/usr/share/doc/syncthing-relaysrv/README.txt", perm: 0644},
|
||||
{src: "cmd/strelaysrv/LICENSE", dst: "deb/usr/share/doc/syncthing-relaysrv/LICENSE.txt", perm: 0644},
|
||||
{src: "LICENSE", dst: "deb/usr/share/doc/syncthing-relaysrv/LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "deb/usr/share/doc/syncthing-relaysrv/AUTHORS.txt", perm: 0644},
|
||||
{src: "man/strelaysrv.1", dst: "deb/usr/share/man/man1/strelaysrv.1", perm: 0644},
|
||||
{src: "cmd/strelaysrv/etc/linux-systemd/strelaysrv.service", dst: "deb/lib/systemd/system/strelaysrv.service", perm: 0644},
|
||||
{src: "cmd/strelaysrv/etc/linux-systemd/default", dst: "deb/etc/default/syncthing-relaysrv", perm: 0644},
|
||||
{src: "cmd/strelaysrv/etc/firewall-ufw/strelaysrv", dst: "deb/etc/ufw/applications.d/strelaysrv", perm: 0644},
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0o755},
|
||||
{src: "cmd/strelaysrv/README.md", dst: "deb/usr/share/doc/syncthing-relaysrv/README.txt", perm: 0o644},
|
||||
{src: "cmd/strelaysrv/LICENSE", dst: "deb/usr/share/doc/syncthing-relaysrv/LICENSE.txt", perm: 0o644},
|
||||
{src: "LICENSE", dst: "deb/usr/share/doc/syncthing-relaysrv/LICENSE.txt", perm: 0o644},
|
||||
{src: "AUTHORS", dst: "deb/usr/share/doc/syncthing-relaysrv/AUTHORS.txt", perm: 0o644},
|
||||
{src: "man/strelaysrv.1", dst: "deb/usr/share/man/man1/strelaysrv.1", perm: 0o644},
|
||||
{src: "cmd/strelaysrv/etc/linux-systemd/strelaysrv.service", dst: "deb/lib/systemd/system/strelaysrv.service", perm: 0o644},
|
||||
{src: "cmd/strelaysrv/etc/linux-systemd/default", dst: "deb/etc/default/syncthing-relaysrv", perm: 0o644},
|
||||
{src: "cmd/strelaysrv/etc/firewall-ufw/strelaysrv", dst: "deb/etc/ufw/applications.d/strelaysrv", perm: 0o644},
|
||||
},
|
||||
},
|
||||
"strelaypoolsrv": {
|
||||
name: "strelaypoolsrv",
|
||||
debname: "syncthing-relaypoolsrv",
|
||||
debdeps: []string{"libc6"},
|
||||
description: "Syncthing Relay Pool Server",
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/strelaypoolsrv"},
|
||||
binaryName: "strelaypoolsrv", // .exe will be added automatically for Windows builds
|
||||
archiveFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "{{binary}}", perm: 0755},
|
||||
{src: "cmd/strelaypoolsrv/README.md", dst: "README.txt", perm: 0644},
|
||||
{src: "cmd/strelaypoolsrv/LICENSE", dst: "LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0644},
|
||||
},
|
||||
installationFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0755},
|
||||
{src: "cmd/strelaypoolsrv/README.md", dst: "deb/usr/share/doc/syncthing-relaypoolsrv/README.txt", perm: 0644},
|
||||
{src: "cmd/strelaypoolsrv/LICENSE", dst: "deb/usr/share/doc/syncthing-relaypoolsrv/LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "deb/usr/share/doc/syncthing-relaypoolsrv/AUTHORS.txt", perm: 0644},
|
||||
},
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/infra/strelaypoolsrv"},
|
||||
binaryName: "strelaypoolsrv",
|
||||
},
|
||||
"stupgrades": {
|
||||
name: "stupgrades",
|
||||
description: "Syncthing Upgrade Check Server",
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/stupgrades"},
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/infra/stupgrades"},
|
||||
binaryName: "stupgrades",
|
||||
},
|
||||
"stcrashreceiver": {
|
||||
name: "stcrashreceiver",
|
||||
description: "Syncthing Crash Server",
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/stcrashreceiver"},
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/infra/stcrashreceiver"},
|
||||
binaryName: "stcrashreceiver",
|
||||
},
|
||||
"ursrv": {
|
||||
name: "ursrv",
|
||||
description: "Syncthing Usage Reporting Server",
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/ursrv"},
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/infra/ursrv"},
|
||||
binaryName: "ursrv",
|
||||
},
|
||||
}
|
||||
@@ -232,15 +214,11 @@ func initTargets() {
|
||||
all := targets["all"]
|
||||
pkgs, _ := filepath.Glob("cmd/*")
|
||||
for _, pkg := range pkgs {
|
||||
pkg = filepath.Base(pkg)
|
||||
if strings.HasPrefix(pkg, ".") {
|
||||
// ignore dotfiles
|
||||
if files, err := filepath.Glob(pkg + "/*.go"); err != nil || len(files) == 0 {
|
||||
// No go files in the directory
|
||||
continue
|
||||
}
|
||||
if noupgrade && pkg == "stupgrades" {
|
||||
continue
|
||||
}
|
||||
all.buildPkgs = append(all.buildPkgs, fmt.Sprintf("github.com/syncthing/syncthing/cmd/%s", pkg))
|
||||
all.buildPkgs = append(all.buildPkgs, fmt.Sprintf("github.com/syncthing/syncthing/%s", pkg))
|
||||
}
|
||||
targets["all"] = all
|
||||
|
||||
@@ -248,13 +226,13 @@ func initTargets() {
|
||||
// and "extra" dirs.
|
||||
syncthingPkg := targets["syncthing"]
|
||||
for _, file := range listFiles("etc") {
|
||||
syncthingPkg.archiveFiles = append(syncthingPkg.archiveFiles, archiveFile{src: file, dst: file, perm: 0644})
|
||||
syncthingPkg.archiveFiles = append(syncthingPkg.archiveFiles, archiveFile{src: file, dst: file, perm: 0o644})
|
||||
}
|
||||
for _, file := range listFiles("extra") {
|
||||
syncthingPkg.archiveFiles = append(syncthingPkg.archiveFiles, archiveFile{src: file, dst: file, perm: 0644})
|
||||
syncthingPkg.archiveFiles = append(syncthingPkg.archiveFiles, archiveFile{src: file, dst: file, perm: 0o644})
|
||||
}
|
||||
for _, file := range listFiles("extra") {
|
||||
syncthingPkg.installationFiles = append(syncthingPkg.installationFiles, archiveFile{src: file, dst: "deb/usr/share/doc/syncthing/" + filepath.Base(file), perm: 0644})
|
||||
syncthingPkg.installationFiles = append(syncthingPkg.installationFiles, archiveFile{src: file, dst: "deb/usr/share/doc/syncthing/" + filepath.Base(file), perm: 0o644})
|
||||
}
|
||||
targets["syncthing"] = syncthingPkg
|
||||
}
|
||||
@@ -310,10 +288,10 @@ func runCommand(cmd string, target target) {
|
||||
build(target, tags)
|
||||
|
||||
case "test":
|
||||
test(strings.Fields(extraTags), "github.com/syncthing/syncthing/lib/...", "github.com/syncthing/syncthing/cmd/...")
|
||||
test(strings.Fields(extraTags), "github.com/syncthing/syncthing/internal/...", "github.com/syncthing/syncthing/lib/...", "github.com/syncthing/syncthing/cmd/...")
|
||||
|
||||
case "bench":
|
||||
bench(strings.Fields(extraTags), "github.com/syncthing/syncthing/lib/...", "github.com/syncthing/syncthing/cmd/...")
|
||||
bench(strings.Fields(extraTags), "github.com/syncthing/syncthing/internal/...", "github.com/syncthing/syncthing/lib/...", "github.com/syncthing/syncthing/cmd/...")
|
||||
|
||||
case "integration":
|
||||
integration(false)
|
||||
@@ -344,12 +322,14 @@ func runCommand(cmd string, target target) {
|
||||
|
||||
case "tar":
|
||||
buildTar(target, tags)
|
||||
writeCompatJSON()
|
||||
|
||||
case "zip":
|
||||
buildZip(target, tags)
|
||||
writeCompatJSON()
|
||||
|
||||
case "deb":
|
||||
buildDeb(target)
|
||||
buildDeb(target, tags)
|
||||
|
||||
case "vet":
|
||||
metalintShort()
|
||||
@@ -399,7 +379,6 @@ func parseFlags() {
|
||||
flag.IntVar(&numVersions, "num-versions", numVersions, "Number of versions for changelog command")
|
||||
flag.StringVar(&run, "run", "", "Specify which tests to run")
|
||||
flag.StringVar(&benchRun, "bench", "", "Specify which benchmarks to run")
|
||||
flag.BoolVar(&withNextGenGUI, "with-next-gen-gui", withNextGenGUI, "Also build 'newgui'")
|
||||
flag.StringVar(&buildOut, "build-out", "", "Set the '-o' value for 'go build'")
|
||||
flag.Parse()
|
||||
}
|
||||
@@ -407,7 +386,6 @@ func parseFlags() {
|
||||
func test(tags []string, pkgs ...string) {
|
||||
lazyRebuildAssets()
|
||||
|
||||
tags = append(tags, "purego")
|
||||
args := []string{"test", "-tags", strings.Join(tags, " ")}
|
||||
if long {
|
||||
timeout = longTimeout
|
||||
@@ -441,7 +419,7 @@ func bench(tags []string, pkgs ...string) {
|
||||
func integration(bench bool) {
|
||||
lazyRebuildAssets()
|
||||
args := []string{"test", "-v", "-timeout", "60m", "-tags"}
|
||||
tags := "purego,integration"
|
||||
tags := "integration"
|
||||
if bench {
|
||||
tags += ",benchmark"
|
||||
}
|
||||
@@ -473,10 +451,6 @@ func benchArgs() []string {
|
||||
}
|
||||
|
||||
func install(target target, tags []string) {
|
||||
if (target.name == "syncthing" || target.name == "") && !withNextGenGUI {
|
||||
log.Println("Notice: Next generation GUI will not be built; see --with-next-gen-gui.")
|
||||
}
|
||||
|
||||
lazyRebuildAssets()
|
||||
|
||||
tags = append(target.tags, tags...)
|
||||
@@ -500,16 +474,12 @@ func install(target target, tags []string) {
|
||||
defer shouldCleanupSyso(sysoPath)
|
||||
}
|
||||
|
||||
args := []string{"install", "-v"}
|
||||
args := []string{"install"}
|
||||
args = appendParameters(args, tags, target.buildPkgs...)
|
||||
runPrint(goCmd, args...)
|
||||
}
|
||||
|
||||
func build(target target, tags []string) {
|
||||
if (target.name == "syncthing" || target.name == "") && !withNextGenGUI {
|
||||
log.Println("Notice: Next generation GUI will not be built; see --with-next-gen-gui.")
|
||||
}
|
||||
|
||||
lazyRebuildAssets()
|
||||
tags = append(target.tags, tags...)
|
||||
|
||||
@@ -532,7 +502,7 @@ func build(target target, tags []string) {
|
||||
defer shouldCleanupSyso(sysoPath)
|
||||
}
|
||||
|
||||
args := []string{"build", "-v"}
|
||||
args := []string{"build"}
|
||||
if buildOut != "" {
|
||||
args = append(args, "-o", buildOut)
|
||||
}
|
||||
@@ -544,13 +514,6 @@ func setBuildEnvVars() {
|
||||
os.Setenv("GOOS", goos)
|
||||
os.Setenv("GOARCH", goarch)
|
||||
os.Setenv("CC", cc)
|
||||
if os.Getenv("CGO_ENABLED") == "" {
|
||||
switch goos {
|
||||
case "darwin", "solaris":
|
||||
default:
|
||||
os.Setenv("CGO_ENABLED", "0")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func appendParameters(args []string, tags []string, pkgs ...string) []string {
|
||||
@@ -629,7 +592,7 @@ func buildZip(target target, tags []string) {
|
||||
fmt.Println(filename)
|
||||
}
|
||||
|
||||
func buildDeb(target target) {
|
||||
func buildDeb(target target, tags []string) {
|
||||
os.RemoveAll("deb")
|
||||
|
||||
// "goarch" here is set to whatever the Debian packages expect. We correct
|
||||
@@ -643,7 +606,7 @@ func buildDeb(target target) {
|
||||
goarch = "arm"
|
||||
}
|
||||
|
||||
build(target, []string{"noupgrade"})
|
||||
build(target, append(tags, "noupgrade"))
|
||||
|
||||
for i := range target.installationFiles {
|
||||
target.installationFiles[i].src = strings.Replace(target.installationFiles[i].src, "{{binary}}", target.BinaryName(), 1)
|
||||
@@ -665,6 +628,9 @@ func buildDeb(target target) {
|
||||
// than just 0.14.26. This rectifies that.
|
||||
debver = strings.Replace(debver, "-", "~", -1)
|
||||
}
|
||||
if strings.Contains(debver, "_") {
|
||||
debver = strings.Replace(debver, "_", "~", -1)
|
||||
}
|
||||
args := []string{
|
||||
"-t", "deb",
|
||||
"-s", "dir",
|
||||
@@ -752,7 +718,7 @@ func shouldBuildSyso(dir string) (string, error) {
|
||||
}
|
||||
|
||||
jsonPath := filepath.Join(dir, "versioninfo.json")
|
||||
err = os.WriteFile(jsonPath, bs, 0644)
|
||||
err = os.WriteFile(jsonPath, bs, 0o644)
|
||||
if err != nil {
|
||||
return "", errors.New("failed to create " + jsonPath + ": " + err.Error())
|
||||
}
|
||||
@@ -766,12 +732,9 @@ func shouldBuildSyso(dir string) (string, error) {
|
||||
sysoPath := filepath.Join(dir, "cmd", "syncthing", "resource.syso")
|
||||
|
||||
// See https://github.com/josephspurrier/goversioninfo#command-line-flags
|
||||
armOption := ""
|
||||
if strings.Contains(goarch, "arm") {
|
||||
armOption = "-arm=true"
|
||||
}
|
||||
|
||||
if _, err := runError("goversioninfo", "-o", sysoPath, armOption); err != nil {
|
||||
arm := strings.HasPrefix(goarch, "arm")
|
||||
a64 := strings.Contains(goarch, "64")
|
||||
if _, err := runError("goversioninfo", "-o", sysoPath, fmt.Sprintf("-arm=%v", arm), fmt.Sprintf("-64=%v", a64)); err != nil {
|
||||
return "", errors.New("failed to create " + sysoPath + ": " + err.Error())
|
||||
}
|
||||
|
||||
@@ -811,7 +774,7 @@ func copyFile(src, dst string, perm os.FileMode) error {
|
||||
}
|
||||
|
||||
copy:
|
||||
os.MkdirAll(filepath.Dir(dst), 0777)
|
||||
os.MkdirAll(filepath.Dir(dst), 0o777)
|
||||
if err := os.WriteFile(dst, in, perm); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -836,50 +799,18 @@ func listFiles(dir string) []string {
|
||||
|
||||
func rebuildAssets() {
|
||||
os.Setenv("SOURCE_DATE_EPOCH", fmt.Sprint(buildStamp()))
|
||||
runPrint(goCmd, "generate", "github.com/syncthing/syncthing/lib/api/auto", "github.com/syncthing/syncthing/cmd/strelaypoolsrv/auto")
|
||||
runPrint(goCmd, "generate", "github.com/syncthing/syncthing/lib/api/auto", "github.com/syncthing/syncthing/cmd/infra/strelaypoolsrv/auto")
|
||||
}
|
||||
|
||||
func lazyRebuildAssets() {
|
||||
shouldRebuild := shouldRebuildAssets("lib/api/auto/gui.files.go", "gui") ||
|
||||
shouldRebuildAssets("cmd/strelaypoolsrv/auto/gui.files.go", "cmd/strelaypoolsrv/gui")
|
||||
|
||||
if withNextGenGUI {
|
||||
shouldRebuild = buildNextGenGUI() || shouldRebuild
|
||||
}
|
||||
shouldRebuildAssets("cmd/infra/strelaypoolsrv/auto/gui.files.go", "cmd/infra/strelaypoolsrv/gui")
|
||||
|
||||
if shouldRebuild {
|
||||
rebuildAssets()
|
||||
}
|
||||
}
|
||||
|
||||
func buildNextGenGUI() bool {
|
||||
// Check if we need to run the npm process, and if so also set the flag
|
||||
// to rebuild Go assets afterwards. The index.html is regenerated every
|
||||
// time by the build process. This assumes the new GUI ends up in
|
||||
// next-gen-gui/dist/next-gen-gui.
|
||||
|
||||
if !shouldRebuildAssets("gui/next-gen-gui/index.html", "next-gen-gui") {
|
||||
// The GUI is up to date.
|
||||
return false
|
||||
}
|
||||
|
||||
runPrintInDir("next-gen-gui", "npm", "install")
|
||||
runPrintInDir("next-gen-gui", "npm", "run", "build", "--", "--prod", "--subresource-integrity")
|
||||
|
||||
rmr("gui/tech-ui")
|
||||
|
||||
for _, src := range listFiles("next-gen-gui/dist") {
|
||||
rel, _ := filepath.Rel("next-gen-gui/dist", src)
|
||||
dst := filepath.Join("gui", rel)
|
||||
if err := copyFile(src, dst, 0644); err != nil {
|
||||
fmt.Println("copy:", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func shouldRebuildAssets(target, srcdir string) bool {
|
||||
info, err := os.Stat(target)
|
||||
if err != nil {
|
||||
@@ -927,22 +858,9 @@ func updateDependencies() {
|
||||
}
|
||||
|
||||
func proto() {
|
||||
pv := protobufVersion()
|
||||
repo := "https://github.com/gogo/protobuf.git"
|
||||
path := filepath.Join("repos", "protobuf")
|
||||
|
||||
runPrint(goCmd, "install", fmt.Sprintf("github.com/gogo/protobuf/protoc-gen-gogofast@%v", pv))
|
||||
os.MkdirAll("repos", 0755)
|
||||
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
runPrint("git", "clone", repo, path)
|
||||
} else {
|
||||
runPrintInDir(path, "git", "fetch")
|
||||
}
|
||||
runPrintInDir(path, "git", "checkout", pv)
|
||||
|
||||
runPrint(goCmd, "generate", "github.com/syncthing/syncthing/cmd/stdiscosrv")
|
||||
runPrint(goCmd, "generate", "proto/generate.go")
|
||||
// buf needs to be installed
|
||||
// https://buf.build/docs/installation/
|
||||
runPrint("buf", "generate")
|
||||
}
|
||||
|
||||
func testmocks() {
|
||||
@@ -952,7 +870,6 @@ func testmocks() {
|
||||
"github.com/syncthing/syncthing/lib/connections",
|
||||
"github.com/syncthing/syncthing/lib/discover",
|
||||
"github.com/syncthing/syncthing/lib/events",
|
||||
"github.com/syncthing/syncthing/lib/logger",
|
||||
"github.com/syncthing/syncthing/lib/model",
|
||||
"github.com/syncthing/syncthing/lib/protocol",
|
||||
}
|
||||
@@ -983,6 +900,7 @@ func weblate() {
|
||||
func ldflags(tags []string) string {
|
||||
b := new(strings.Builder)
|
||||
b.WriteString("-w")
|
||||
b.WriteString(" -buildid=")
|
||||
fmt.Fprintf(b, " -X github.com/syncthing/syncthing/lib/build.Version=%s", version)
|
||||
fmt.Fprintf(b, " -X github.com/syncthing/syncthing/lib/build.Stamp=%d", buildStamp())
|
||||
fmt.Fprintf(b, " -X github.com/syncthing/syncthing/lib/build.User=%s", buildUser())
|
||||
@@ -1004,6 +922,9 @@ func rmr(paths ...string) {
|
||||
}
|
||||
|
||||
func getReleaseVersion() (string, error) {
|
||||
if ver := os.Getenv("VERSION"); ver != "" {
|
||||
return strings.TrimSpace(ver), nil
|
||||
}
|
||||
bs, err := os.ReadFile("RELEASE")
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -1048,7 +969,7 @@ func getGitVersion() (string, error) {
|
||||
}
|
||||
|
||||
func getVersion() string {
|
||||
// First try for a RELEASE file,
|
||||
// First try for a RELEASE file or $VERSION env var,
|
||||
if ver, err := getReleaseVersion(); err == nil {
|
||||
return ver
|
||||
}
|
||||
@@ -1377,10 +1298,7 @@ func zipFile(out string, files []archiveFile) {
|
||||
}
|
||||
|
||||
func codesign(target target) {
|
||||
switch goos {
|
||||
case "windows":
|
||||
windowsCodesign(target.BinaryName())
|
||||
case "darwin":
|
||||
if goos == "darwin" {
|
||||
macosCodesign(target.BinaryName())
|
||||
}
|
||||
}
|
||||
@@ -1404,70 +1322,6 @@ func macosCodesign(file string) {
|
||||
}
|
||||
}
|
||||
|
||||
func windowsCodesign(file string) {
|
||||
st := "signtool.exe"
|
||||
|
||||
if path := os.Getenv("CODESIGN_SIGNTOOL"); path != "" {
|
||||
st = path
|
||||
}
|
||||
|
||||
for i, algo := range []string{"sha1", "sha256"} {
|
||||
args := []string{"sign", "/fd", algo}
|
||||
if f := os.Getenv("CODESIGN_CERTIFICATE_FILE"); f != "" {
|
||||
args = append(args, "/f", f)
|
||||
} else if b := os.Getenv("CODESIGN_CERTIFICATE_BASE64"); b != "" {
|
||||
// Decode the PFX certificate from base64.
|
||||
bs, err := base64.RawStdEncoding.DecodeString(b)
|
||||
if err != nil {
|
||||
log.Println("Codesign: signing failed: decoding base64:", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Write it to a temporary file
|
||||
f, err := os.CreateTemp("", "codesign-*.pfx")
|
||||
if err != nil {
|
||||
log.Println("Codesign: signing failed: creating temp file:", err)
|
||||
return
|
||||
}
|
||||
_ = f.Chmod(0600) // best effort remove other users' access
|
||||
defer os.Remove(f.Name())
|
||||
if _, err := f.Write(bs); err != nil {
|
||||
log.Println("Codesign: signing failed: writing temp file:", err)
|
||||
return
|
||||
}
|
||||
if err := f.Close(); err != nil {
|
||||
log.Println("Codesign: signing failed: closing temp file:", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Use that when signing
|
||||
args = append(args, "/f", f.Name())
|
||||
}
|
||||
if p := os.Getenv("CODESIGN_CERTIFICATE_PASSWORD"); p != "" {
|
||||
args = append(args, "/p", p)
|
||||
}
|
||||
if tr := os.Getenv("CODESIGN_TIMESTAMP_SERVER"); tr != "" {
|
||||
switch algo {
|
||||
case "sha256":
|
||||
args = append(args, "/tr", tr, "/td", algo)
|
||||
default:
|
||||
args = append(args, "/t", tr)
|
||||
}
|
||||
}
|
||||
if i > 0 {
|
||||
args = append(args, "/as")
|
||||
}
|
||||
args = append(args, file)
|
||||
|
||||
bs, err := runError(st, args...)
|
||||
if err != nil {
|
||||
log.Printf("Codesign: signing failed: %v: %s", err, string(bs))
|
||||
return
|
||||
}
|
||||
log.Println("Codesign: successfully signed", file, "using", algo)
|
||||
}
|
||||
}
|
||||
|
||||
func metalint() {
|
||||
lazyRebuildAssets()
|
||||
runPrint(goCmd, "test", "-run", "Metalint", "./meta")
|
||||
@@ -1485,14 +1339,6 @@ func (t target) BinaryName() string {
|
||||
return t.binaryName
|
||||
}
|
||||
|
||||
func protobufVersion() string {
|
||||
bs, err := runError(goCmd, "list", "-f", "{{.Version}}", "-m", "github.com/gogo/protobuf")
|
||||
if err != nil {
|
||||
log.Fatal("Getting protobuf version:", err)
|
||||
}
|
||||
return string(bs)
|
||||
}
|
||||
|
||||
func currentAndLatestVersions(n int) ([]string, error) {
|
||||
bs, err := runError("git", "tag", "--sort", "taggerdate")
|
||||
if err != nil {
|
||||
@@ -1559,3 +1405,29 @@ func nextPatchVersion(ver string) string {
|
||||
digits[len(digits)-1] = strconv.Itoa(n + 1)
|
||||
return strings.Join(digits, ".")
|
||||
}
|
||||
|
||||
func writeCompatJSON() {
|
||||
bs, err := os.ReadFile("compat.yaml")
|
||||
if err != nil {
|
||||
log.Fatal("Reading compat.yaml:", err)
|
||||
}
|
||||
|
||||
var entries []upgrade.ReleaseCompatibility
|
||||
if err := yaml.Unmarshal(bs, &entries); err != nil {
|
||||
log.Fatal("Parsing compat.yaml:", err)
|
||||
}
|
||||
|
||||
rt := runtime.Version()
|
||||
for _, e := range entries {
|
||||
if !strings.HasPrefix(rt, e.Runtime) {
|
||||
continue
|
||||
}
|
||||
bs, _ := json.MarshalIndent(e, "", " ")
|
||||
if err := os.WriteFile("compat.json", bs, 0o644); err != nil {
|
||||
log.Fatal("Writing compat.json:", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
log.Fatalf("runtime %v not found in compat.yaml", rt)
|
||||
}
|
||||
|
||||
3
build.sh
3
build.sh
@@ -23,10 +23,11 @@ case "${1:-default}" in
|
||||
|
||||
prerelease)
|
||||
script authors
|
||||
script copyrights
|
||||
build weblate
|
||||
pushd man ; ./refresh.sh ; popd
|
||||
git add -A gui man AUTHORS
|
||||
git commit -m 'gui, man, authors: Update docs, translations, and contributors'
|
||||
git commit -m 'chore(gui, man, authors): update docs, translations, and contributors'
|
||||
;;
|
||||
|
||||
*)
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
@@ -16,7 +17,6 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -15,6 +15,9 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/gen/discoproto"
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/syncthing/syncthing/lib/beacon"
|
||||
"github.com/syncthing/syncthing/lib/discover"
|
||||
@@ -75,20 +78,21 @@ func recv(bc beacon.Interface) {
|
||||
continue
|
||||
}
|
||||
|
||||
var ann discover.Announce
|
||||
ann.Unmarshal(data[4:])
|
||||
var ann discoproto.Announce
|
||||
proto.Unmarshal(data[4:], &ann)
|
||||
|
||||
if ann.ID == myID {
|
||||
id, _ := protocol.DeviceIDFromBytes(ann.Id)
|
||||
if id == myID {
|
||||
// This is one of our own fake packets, don't print it.
|
||||
continue
|
||||
}
|
||||
|
||||
// Print announcement details for the first packet from a given
|
||||
// device ID and source address, or if -all was given.
|
||||
key := ann.ID.String() + src.String()
|
||||
key := id.String() + src.String()
|
||||
if all || !seen[key] {
|
||||
log.Printf("Announcement from %v\n", src)
|
||||
log.Printf(" %v at %s\n", ann.ID, strings.Join(ann.Addresses, ", "))
|
||||
log.Printf(" %v at %s\n", id, strings.Join(ann.Addresses, ", "))
|
||||
seen[key] = true
|
||||
}
|
||||
}
|
||||
@@ -96,11 +100,11 @@ func recv(bc beacon.Interface) {
|
||||
|
||||
// sends fake discovery announcements once every second
|
||||
func send(bc beacon.Interface) {
|
||||
ann := discover.Announce{
|
||||
ID: myID,
|
||||
ann := &discoproto.Announce{
|
||||
Id: myID[:],
|
||||
Addresses: []string{"tcp://fake.example.com:12345"},
|
||||
}
|
||||
bs, _ := ann.Marshal()
|
||||
bs, _ := proto.Marshal(ann)
|
||||
|
||||
for {
|
||||
bc.Send(bs)
|
||||
@@ -72,7 +72,7 @@ func main() {
|
||||
if *standardBlocks || blockSize < protocol.MinBlockSize {
|
||||
blockSize = protocol.BlockSize(fi.Size())
|
||||
}
|
||||
bs, err := scanner.Blocks(context.TODO(), fd, blockSize, fi.Size(), nil, true)
|
||||
bs, err := scanner.Blocks(context.TODO(), fd, blockSize, fi.Size(), nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
@@ -7,6 +7,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -14,7 +15,6 @@ import (
|
||||
"time"
|
||||
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -8,6 +8,7 @@ package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"io"
|
||||
@@ -15,7 +16,7 @@ import (
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"slices"
|
||||
"time"
|
||||
)
|
||||
|
||||
@@ -177,8 +178,8 @@ func (d *diskStore) inventory() error {
|
||||
})
|
||||
return nil
|
||||
})
|
||||
sort.Slice(d.currentFiles, func(i, j int) bool {
|
||||
return d.currentFiles[i].mtime < d.currentFiles[j].mtime
|
||||
slices.SortFunc(d.currentFiles, func(a, b currentFile) int {
|
||||
return cmp.Compare(a.mtime, b.mtime)
|
||||
})
|
||||
var oldest time.Duration
|
||||
if len(d.currentFiles) > 0 {
|
||||
@@ -14,6 +14,7 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -21,25 +22,29 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/alecthomas/kong"
|
||||
raven "github.com/getsentry/raven-go"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
"github.com/syncthing/syncthing/lib/build"
|
||||
"github.com/syncthing/syncthing/lib/ur"
|
||||
)
|
||||
|
||||
const maxRequestSize = 1 << 20 // 1 MiB
|
||||
|
||||
type cli struct {
|
||||
Dir string `help:"Parent directory to store crash and failure reports in" env:"REPORTS_DIR" default:"."`
|
||||
DSN string `help:"Sentry DSN" env:"SENTRY_DSN"`
|
||||
Listen string `help:"HTTP listen address" default:":8080" env:"LISTEN_ADDRESS"`
|
||||
MaxDiskFiles int `help:"Maximum number of reports on disk" default:"100000" env:"MAX_DISK_FILES"`
|
||||
MaxDiskSizeMB int64 `help:"Maximum disk space to use for reports" default:"1024" env:"MAX_DISK_SIZE_MB"`
|
||||
SentryQueue int `help:"Maximum number of reports to queue for sending to Sentry" default:"64" env:"SENTRY_QUEUE"`
|
||||
DiskQueue int `help:"Maximum number of reports to queue for writing to disk" default:"64" env:"DISK_QUEUE"`
|
||||
Dir string `help:"Parent directory to store crash and failure reports in" env:"REPORTS_DIR" default:"."`
|
||||
DSN string `help:"Sentry DSN" env:"SENTRY_DSN"`
|
||||
Listen string `help:"HTTP listen address" default:":8080" env:"LISTEN_ADDRESS"`
|
||||
MaxDiskFiles int `help:"Maximum number of reports on disk" default:"100000" env:"MAX_DISK_FILES"`
|
||||
MaxDiskSizeMB int64 `help:"Maximum disk space to use for reports" default:"1024" env:"MAX_DISK_SIZE_MB"`
|
||||
SentryQueue int `help:"Maximum number of reports to queue for sending to Sentry" default:"64" env:"SENTRY_QUEUE"`
|
||||
DiskQueue int `help:"Maximum number of reports to queue for writing to disk" default:"64" env:"DISK_QUEUE"`
|
||||
MetricsListen string `help:"HTTP listen address for metrics" default:":8081" env:"METRICS_LISTEN_ADDRESS"`
|
||||
IgnorePatterns string `help:"File containing ignore patterns (regexp)" env:"IGNORE_PATTERNS" type:"existingfile"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
@@ -62,19 +67,38 @@ func main() {
|
||||
}
|
||||
go ss.Serve(context.Background())
|
||||
|
||||
var ip *ignorePatterns
|
||||
if params.IgnorePatterns != "" {
|
||||
var err error
|
||||
ip, err = loadIgnorePatterns(params.IgnorePatterns)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load ignore patterns: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
cr := &crashReceiver{
|
||||
store: ds,
|
||||
sentry: ss,
|
||||
ignore: ip,
|
||||
}
|
||||
|
||||
mux.Handle("/", cr)
|
||||
mux.HandleFunc("/ping", func(w http.ResponseWriter, req *http.Request) {
|
||||
w.Write([]byte("OK"))
|
||||
})
|
||||
mux.Handle("/metrics", promhttp.Handler())
|
||||
|
||||
if params.MetricsListen != "" {
|
||||
mmux := http.NewServeMux()
|
||||
mmux.Handle("/metrics", promhttp.Handler())
|
||||
go func() {
|
||||
if err := http.ListenAndServe(params.MetricsListen, mmux); err != nil {
|
||||
log.Fatalln("HTTP serve metrics:", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
if params.DSN != "" {
|
||||
mux.HandleFunc("/newcrash/failure", handleFailureFn(params.DSN, filepath.Join(params.Dir, "failure_reports")))
|
||||
mux.HandleFunc("/newcrash/failure", handleFailureFn(params.DSN, filepath.Join(params.Dir, "failure_reports"), ip))
|
||||
}
|
||||
|
||||
log.SetOutput(os.Stdout)
|
||||
@@ -83,7 +107,7 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
func handleFailureFn(dsn, failureDir string) func(w http.ResponseWriter, req *http.Request) {
|
||||
func handleFailureFn(dsn, failureDir string, ignore *ignorePatterns) func(w http.ResponseWriter, req *http.Request) {
|
||||
return func(w http.ResponseWriter, req *http.Request) {
|
||||
result := "failure"
|
||||
defer func() {
|
||||
@@ -94,14 +118,19 @@ func handleFailureFn(dsn, failureDir string) func(w http.ResponseWriter, req *ht
|
||||
bs, err := io.ReadAll(lr)
|
||||
req.Body.Close()
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if ignore.match(bs) {
|
||||
result = "ignored"
|
||||
return
|
||||
}
|
||||
|
||||
var reports []ur.FailureReport
|
||||
err = json.Unmarshal(bs, &reports)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 400)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if len(reports) == 0 {
|
||||
@@ -110,9 +139,9 @@ func handleFailureFn(dsn, failureDir string) func(w http.ResponseWriter, req *ht
|
||||
return
|
||||
}
|
||||
|
||||
version, err := parseVersion(reports[0].Version)
|
||||
version, err := build.ParseVersion(reports[0].Version)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 400)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
for _, r := range reports {
|
||||
@@ -158,3 +187,42 @@ func saveFailureWithGoroutines(data ur.FailureData, failureDir string) (string,
|
||||
}
|
||||
return reportServer + path, nil
|
||||
}
|
||||
|
||||
type ignorePatterns struct {
|
||||
patterns []*regexp.Regexp
|
||||
}
|
||||
|
||||
func loadIgnorePatterns(path string) (*ignorePatterns, error) {
|
||||
bs, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var patterns []*regexp.Regexp
|
||||
for _, line := range strings.Split(string(bs), "\n") {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
re, err := regexp.Compile(line)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
patterns = append(patterns, re)
|
||||
}
|
||||
|
||||
log.Printf("Loaded %d ignore patterns", len(patterns))
|
||||
return &ignorePatterns{patterns: patterns}, nil
|
||||
}
|
||||
|
||||
func (i *ignorePatterns) match(report []byte) bool {
|
||||
if i == nil {
|
||||
return false
|
||||
}
|
||||
for _, re := range i.patterns {
|
||||
if re.Match(report) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
|
||||
raven "github.com/getsentry/raven-go"
|
||||
"github.com/maruel/panicparse/v2/stack"
|
||||
"github.com/syncthing/syncthing/lib/build"
|
||||
)
|
||||
|
||||
const reportServer = "https://crash.syncthing.net/report/"
|
||||
@@ -89,7 +90,7 @@ func sendReport(dsn string, pkt *raven.Packet, userID string) error {
|
||||
}
|
||||
|
||||
// The client sets release and such on the packet before sending, in the
|
||||
// misguided idea that it knows this better than than the packet we give
|
||||
// misguided idea that it knows this better than the packet we give
|
||||
// it. So we copy the values from the packet to the client first...
|
||||
cli.SetRelease(pkt.Release)
|
||||
cli.SetEnvironment(pkt.Environment)
|
||||
@@ -105,7 +106,7 @@ func parseCrashReport(path string, report []byte) (*raven.Packet, error) {
|
||||
return nil, errors.New("no first line")
|
||||
}
|
||||
|
||||
version, err := parseVersion(string(parts[0]))
|
||||
version, err := build.ParseVersion(string(parts[0]))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -135,7 +136,7 @@ func parseCrashReport(path string, report []byte) (*raven.Packet, error) {
|
||||
|
||||
r := bytes.NewReader(report)
|
||||
ctx, _, err := stack.ScanSnapshot(r, io.Discard, stack.DefaultOpts())
|
||||
if err != nil && err != io.EOF {
|
||||
if err != nil && !errors.Is(err, io.EOF) {
|
||||
return nil, err
|
||||
}
|
||||
if ctx == nil || len(ctx.Goroutines) == 0 {
|
||||
@@ -143,12 +144,12 @@ func parseCrashReport(path string, report []byte) (*raven.Packet, error) {
|
||||
}
|
||||
|
||||
// Lock the source code loader to the version we are processing here.
|
||||
if version.commit != "" {
|
||||
if version.Commit != "" {
|
||||
// We have a commit hash, so we know exactly which source to use
|
||||
loader.LockWithVersion(version.commit)
|
||||
} else if strings.HasPrefix(version.tag, "v") {
|
||||
loader.LockWithVersion(version.Commit)
|
||||
} else if strings.HasPrefix(version.Tag, "v") {
|
||||
// Lets hope the tag is close enough
|
||||
loader.LockWithVersion(version.tag)
|
||||
loader.LockWithVersion(version.Tag)
|
||||
} else {
|
||||
// Last resort
|
||||
loader.LockWithVersion("main")
|
||||
@@ -215,106 +216,26 @@ func crashReportFingerprint(message string) []string {
|
||||
return []string{"{{ default }}", message}
|
||||
}
|
||||
|
||||
// syncthing v1.1.4-rc.1+30-g6aaae618-dirty-crashrep "Erbium Earthworm" (go1.12.5 darwin-amd64) jb@kvin.kastelo.net 2019-05-23 16:08:14 UTC [foo, bar]
|
||||
// or, somewhere along the way the "+" in the version tag disappeared:
|
||||
// syncthing v1.23.7-dev.26.gdf7b56ae.dirty-stversionextra "Fermium Flea" (go1.20.5 darwin-arm64) jb@ok.kastelo.net 2023-07-12 06:55:26 UTC [Some Wrapper, purego, stnoupgrade]
|
||||
var (
|
||||
longVersionRE = regexp.MustCompile(`syncthing\s+(v[^\s]+)\s+"([^"]+)"\s\(([^\s]+)\s+([^-]+)-([^)]+)\)\s+([^\s]+)[^\[]*(?:\[(.+)\])?$`)
|
||||
gitExtraRE = regexp.MustCompile(`\.\d+\.g[0-9a-f]+`) // ".1.g6aaae618"
|
||||
gitExtraSepRE = regexp.MustCompile(`[.-]`) // dot or dash
|
||||
)
|
||||
|
||||
type version struct {
|
||||
version string // "v1.1.4-rc.1+30-g6aaae618-dirty-crashrep"
|
||||
tag string // "v1.1.4-rc.1"
|
||||
commit string // "6aaae618", blank when absent
|
||||
codename string // "Erbium Earthworm"
|
||||
runtime string // "go1.12.5"
|
||||
goos string // "darwin"
|
||||
goarch string // "amd64"
|
||||
builder string // "jb@kvin.kastelo.net"
|
||||
extra []string // "foo", "bar"
|
||||
}
|
||||
|
||||
func (v version) environment() string {
|
||||
if v.commit != "" {
|
||||
return "Development"
|
||||
}
|
||||
if strings.Contains(v.tag, "-rc.") {
|
||||
return "Candidate"
|
||||
}
|
||||
if strings.Contains(v.tag, "-") {
|
||||
return "Beta"
|
||||
}
|
||||
return "Stable"
|
||||
}
|
||||
|
||||
func parseVersion(line string) (version, error) {
|
||||
m := longVersionRE.FindStringSubmatch(line)
|
||||
if len(m) == 0 {
|
||||
return version{}, errors.New("unintelligeble version string")
|
||||
}
|
||||
|
||||
v := version{
|
||||
version: m[1],
|
||||
codename: m[2],
|
||||
runtime: m[3],
|
||||
goos: m[4],
|
||||
goarch: m[5],
|
||||
builder: m[6],
|
||||
}
|
||||
|
||||
// Split the version tag into tag and commit. This is old style
|
||||
// v1.2.3-something.4+11-g12345678 or newer with just dots
|
||||
// v1.2.3-something.4.11.g12345678 or v1.2.3-dev.11.g12345678.
|
||||
parts := []string{v.version}
|
||||
if strings.Contains(v.version, "+") {
|
||||
parts = strings.Split(v.version, "+")
|
||||
} else {
|
||||
idxs := gitExtraRE.FindStringIndex(v.version)
|
||||
if len(idxs) > 0 {
|
||||
parts = []string{v.version[:idxs[0]], v.version[idxs[0]+1:]}
|
||||
}
|
||||
}
|
||||
v.tag = parts[0]
|
||||
if len(parts) > 1 {
|
||||
fields := gitExtraSepRE.Split(parts[1], -1)
|
||||
if len(fields) >= 2 && strings.HasPrefix(fields[1], "g") {
|
||||
v.commit = fields[1][1:]
|
||||
}
|
||||
}
|
||||
|
||||
if len(m) >= 8 && m[7] != "" {
|
||||
tags := strings.Split(m[7], ",")
|
||||
for i := range tags {
|
||||
tags[i] = strings.TrimSpace(tags[i])
|
||||
}
|
||||
v.extra = tags
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func packet(version version, reportType string) *raven.Packet {
|
||||
func packet(version build.VersionParts, reportType string) *raven.Packet {
|
||||
pkt := &raven.Packet{
|
||||
Platform: "go",
|
||||
Release: version.tag,
|
||||
Environment: version.environment(),
|
||||
Release: version.Tag,
|
||||
Environment: version.Environment(),
|
||||
Tags: raven.Tags{
|
||||
raven.Tag{Key: "version", Value: version.version},
|
||||
raven.Tag{Key: "tag", Value: version.tag},
|
||||
raven.Tag{Key: "codename", Value: version.codename},
|
||||
raven.Tag{Key: "runtime", Value: version.runtime},
|
||||
raven.Tag{Key: "goos", Value: version.goos},
|
||||
raven.Tag{Key: "goarch", Value: version.goarch},
|
||||
raven.Tag{Key: "builder", Value: version.builder},
|
||||
raven.Tag{Key: "version", Value: version.Version},
|
||||
raven.Tag{Key: "tag", Value: version.Tag},
|
||||
raven.Tag{Key: "codename", Value: version.Codename},
|
||||
raven.Tag{Key: "runtime", Value: version.Runtime},
|
||||
raven.Tag{Key: "goos", Value: version.GOOS},
|
||||
raven.Tag{Key: "goarch", Value: version.GOARCH},
|
||||
raven.Tag{Key: "builder", Value: version.Builder},
|
||||
raven.Tag{Key: "report_type", Value: reportType},
|
||||
},
|
||||
}
|
||||
if version.commit != "" {
|
||||
pkt.Tags = append(pkt.Tags, raven.Tag{Key: "commit", Value: version.commit})
|
||||
if version.Commit != "" {
|
||||
pkt.Tags = append(pkt.Tags, raven.Tag{Key: "commit", Value: version.Commit})
|
||||
}
|
||||
for _, tag := range version.extra {
|
||||
for _, tag := range version.Extra {
|
||||
pkt.Tags = append(pkt.Tags, raven.Tag{Key: tag, Value: "1"})
|
||||
}
|
||||
return pkt
|
||||
@@ -12,66 +12,6 @@ import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParseVersion(t *testing.T) {
|
||||
cases := []struct {
|
||||
longVersion string
|
||||
parsed version
|
||||
}{
|
||||
{
|
||||
longVersion: `syncthing v1.1.4-rc.1+30-g6aaae618-dirty-crashrep "Erbium Earthworm" (go1.12.5 darwin-amd64) jb@kvin.kastelo.net 2019-05-23 16:08:14 UTC`,
|
||||
parsed: version{
|
||||
version: "v1.1.4-rc.1+30-g6aaae618-dirty-crashrep",
|
||||
tag: "v1.1.4-rc.1",
|
||||
commit: "6aaae618",
|
||||
codename: "Erbium Earthworm",
|
||||
runtime: "go1.12.5",
|
||||
goos: "darwin",
|
||||
goarch: "amd64",
|
||||
builder: "jb@kvin.kastelo.net",
|
||||
},
|
||||
},
|
||||
{
|
||||
longVersion: `syncthing v1.1.4-rc.1+30-g6aaae618-dirty-crashrep "Erbium Earthworm" (go1.12.5 darwin-amd64) jb@kvin.kastelo.net 2019-05-23 16:08:14 UTC [foo, bar]`,
|
||||
parsed: version{
|
||||
version: "v1.1.4-rc.1+30-g6aaae618-dirty-crashrep",
|
||||
tag: "v1.1.4-rc.1",
|
||||
commit: "6aaae618",
|
||||
codename: "Erbium Earthworm",
|
||||
runtime: "go1.12.5",
|
||||
goos: "darwin",
|
||||
goarch: "amd64",
|
||||
builder: "jb@kvin.kastelo.net",
|
||||
extra: []string{"foo", "bar"},
|
||||
},
|
||||
},
|
||||
{
|
||||
longVersion: `syncthing v1.23.7-dev.26.gdf7b56ae-stversionextra "Fermium Flea" (go1.20.5 darwin-arm64) jb@ok.kastelo.net 2023-07-12 06:55:26 UTC [Some Wrapper, purego, stnoupgrade]`,
|
||||
parsed: version{
|
||||
version: "v1.23.7-dev.26.gdf7b56ae-stversionextra",
|
||||
tag: "v1.23.7-dev",
|
||||
commit: "df7b56ae",
|
||||
codename: "Fermium Flea",
|
||||
runtime: "go1.20.5",
|
||||
goos: "darwin",
|
||||
goarch: "arm64",
|
||||
builder: "jb@ok.kastelo.net",
|
||||
extra: []string{"Some Wrapper", "purego", "stnoupgrade"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
v, err := parseVersion(tc.longVersion)
|
||||
if err != nil {
|
||||
t.Errorf("%s\nerror: %v\n", tc.longVersion, err)
|
||||
continue
|
||||
}
|
||||
if fmt.Sprint(v) != fmt.Sprint(tc.parsed) {
|
||||
t.Errorf("%s\nA: %v\nE: %v\n", tc.longVersion, v, tc.parsed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseReport(t *testing.T) {
|
||||
bs, err := os.ReadFile("_testdata/panic.log")
|
||||
if err != nil {
|
||||
@@ -71,7 +71,6 @@ func (l *githubSourceCodeLoader) Load(filename string, line, context int) ([][]b
|
||||
|
||||
url := urlPrefix + l.version + filename[idx:]
|
||||
resp, err := l.client.Get(url)
|
||||
|
||||
if err != nil {
|
||||
fmt.Println("Loading source:", err)
|
||||
return nil, 0
|
||||
@@ -12,11 +12,16 @@ import (
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type crashReceiver struct {
|
||||
store *diskStore
|
||||
sentry *sentryService
|
||||
ignore *ignorePatterns
|
||||
|
||||
ignoredMut sync.RWMutex
|
||||
ignored map[string]struct{}
|
||||
}
|
||||
|
||||
func (r *crashReceiver) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
@@ -64,6 +69,12 @@ func (r *crashReceiver) serveGet(reportID string, w http.ResponseWriter, _ *http
|
||||
// serveHead responds to HEAD requests by checking if the named report
|
||||
// already exists in the system.
|
||||
func (r *crashReceiver) serveHead(reportID string, w http.ResponseWriter, _ *http.Request) {
|
||||
r.ignoredMut.RLock()
|
||||
_, ignored := r.ignored[reportID]
|
||||
r.ignoredMut.RUnlock()
|
||||
if ignored {
|
||||
return // found
|
||||
}
|
||||
if !r.store.Exists(reportID) {
|
||||
http.Error(w, "Not found", http.StatusNotFound)
|
||||
}
|
||||
@@ -76,6 +87,15 @@ func (r *crashReceiver) servePut(reportID string, w http.ResponseWriter, req *ht
|
||||
metricCrashReportsTotal.WithLabelValues(result).Inc()
|
||||
}()
|
||||
|
||||
r.ignoredMut.RLock()
|
||||
_, ignored := r.ignored[reportID]
|
||||
r.ignoredMut.RUnlock()
|
||||
if ignored {
|
||||
result = "ignored_cached"
|
||||
io.Copy(io.Discard, req.Body)
|
||||
return // found
|
||||
}
|
||||
|
||||
// Read at most maxRequestSize of report data.
|
||||
log.Println("Receiving report", reportID)
|
||||
lr := io.LimitReader(req.Body, maxRequestSize)
|
||||
@@ -86,6 +106,17 @@ func (r *crashReceiver) servePut(reportID string, w http.ResponseWriter, req *ht
|
||||
return
|
||||
}
|
||||
|
||||
if r.ignore.match(bs) {
|
||||
r.ignoredMut.Lock()
|
||||
if r.ignored == nil {
|
||||
r.ignored = make(map[string]struct{})
|
||||
}
|
||||
r.ignored[reportID] = struct{}{}
|
||||
r.ignoredMut.Unlock()
|
||||
result = "ignored"
|
||||
return
|
||||
}
|
||||
|
||||
result = "success"
|
||||
|
||||
// Store the report
|
||||
@@ -9,14 +9,13 @@ package main
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
)
|
||||
|
||||
// userIDFor returns a string we can use as the user ID for the purpose of
|
||||
@@ -24,7 +23,7 @@ import (
|
||||
// remote IP, and the current month.
|
||||
func userIDFor(req *http.Request) string {
|
||||
addr := req.RemoteAddr
|
||||
if fwd := req.Header.Get("x-forwarded-for"); fwd != "" {
|
||||
if fwd := req.Header.Get("X-Forwarded-For"); fwd != "" {
|
||||
addr = fwd
|
||||
}
|
||||
if host, _, err := net.SplitHostPort(addr); err == nil {
|
||||
@@ -33,7 +32,7 @@ func userIDFor(req *http.Request) string {
|
||||
now := time.Now().Format("200601")
|
||||
salt := "stcrashreporter"
|
||||
hash := sha256.Sum256([]byte(salt + addr + now))
|
||||
return fmt.Sprintf("%x", hash[:8])
|
||||
return hex.EncodeToString(hash[:8])
|
||||
}
|
||||
|
||||
// 01234567890abcdef... => 01/23
|
||||
@@ -53,5 +52,5 @@ func compressAndWrite(bs []byte, fullPath string) error {
|
||||
gw.Close()
|
||||
|
||||
// Create an output file with the compressed report
|
||||
return os.WriteFile(fullPath, buf.Bytes(), 0644)
|
||||
return os.WriteFile(fullPath, buf.Bytes(), 0o644)
|
||||
}
|
||||
@@ -10,7 +10,7 @@ to NAT or firewall issues.
|
||||
|
||||
There is very little reason why you'd want to run this yourself, as
|
||||
`relaypoolsrv` is just used for announcement and lookup of public relay
|
||||
servers. If you are looking to setup a private or a public relay, please
|
||||
servers. If you are looking to set up a private or a public relay, please
|
||||
check the documentation for
|
||||
[relaysrv](https://github.com/syncthing/relaysrv), which also explains how
|
||||
to join the default public pool.
|
||||
@@ -21,4 +21,3 @@ See `relaypoolsrv -help` for configuration options.
|
||||
|
||||
[oschwald/geoip2-golang](https://github.com/oschwald/geoip2-golang), [oschwald/maxminddb-golang](https://github.com/oschwald/maxminddb-golang), Copyright (C) 2015 [Gregory J. Oschwald](mailto:oschwald@gmail.com).
|
||||
|
||||
[lib/pq](https://github.com/lib/pq)</a>, Copyright (C) 2011-2013 'pq' Contributors Portions Copyright (C) 2011 Blake Mizerany.
|
||||
@@ -4,7 +4,7 @@
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
//go:generate go run ../../../script/genassets.go -o gui.files.go ../gui
|
||||
//go:generate go run ../../../../script/genassets.go -o gui.files.go ../gui
|
||||
|
||||
// Package auto contains auto generated files for web assets.
|
||||
package auto
|
||||
@@ -259,7 +259,7 @@
|
||||
return a.value > b.value ? 1 : -1;
|
||||
}
|
||||
|
||||
$http.get("/endpoint").then(function(response) {
|
||||
$http.get("/endpoint/full").then(function(response) {
|
||||
$scope.relays = response.data.relays;
|
||||
|
||||
angular.forEach($scope.relays, function(relay) {
|
||||
@@ -338,7 +338,7 @@
|
||||
relay.showMarker = function() {
|
||||
relay.marker.openPopup();
|
||||
}
|
||||
|
||||
|
||||
relay.hideMarker = function() {
|
||||
relay.marker.closePopup();
|
||||
}
|
||||
@@ -347,7 +347,7 @@
|
||||
|
||||
function addCircleToMap(relay) {
|
||||
console.log(relay.location.latitude)
|
||||
L.circle([relay.location.latitude, relay.location.longitude],
|
||||
L.circle([relay.location.latitude, relay.location.longitude],
|
||||
{
|
||||
radius: ((relay.stats.bytesProxied * 100) / $scope.totals.bytesProxied) * 10000,
|
||||
color: "FF0000",
|
||||
@@ -17,21 +17,21 @@ import (
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru/v2"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/syncthing/syncthing/cmd/strelaypoolsrv/auto"
|
||||
|
||||
"github.com/syncthing/syncthing/cmd/infra/strelaypoolsrv/auto"
|
||||
"github.com/syncthing/syncthing/lib/assets"
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/syncthing/syncthing/lib/geoip"
|
||||
"github.com/syncthing/syncthing/lib/httpcache"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/rand"
|
||||
"github.com/syncthing/syncthing/lib/relay/client"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
)
|
||||
|
||||
@@ -51,6 +51,10 @@ type relay struct {
|
||||
StatsRetrieved time.Time `json:"statsRetrieved"`
|
||||
}
|
||||
|
||||
type relayShort struct {
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
type stats struct {
|
||||
StartTime time.Time `json:"startTime"`
|
||||
UptimeSeconds int `json:"uptimeSeconds"`
|
||||
@@ -95,6 +99,7 @@ var (
|
||||
testCert tls.Certificate
|
||||
knownRelaysFile = filepath.Join(os.TempDir(), "strelaypoolsrv_known_relays")
|
||||
listen = ":80"
|
||||
metricsListen = ":8081"
|
||||
dir string
|
||||
evictionTime = time.Hour
|
||||
debug bool
|
||||
@@ -106,10 +111,11 @@ var (
|
||||
requestProcessors = 8
|
||||
geoipLicenseKey = os.Getenv("GEOIP_LICENSE_KEY")
|
||||
geoipAccountID, _ = strconv.Atoi(os.Getenv("GEOIP_ACCOUNT_ID"))
|
||||
maxRelaysReturned = 100
|
||||
|
||||
requests chan request
|
||||
|
||||
mut = sync.NewRWMutex()
|
||||
mut sync.RWMutex
|
||||
knownRelays = make([]*relay, 0)
|
||||
permanentRelays = make([]*relay, 0)
|
||||
evictionTimers = make(map[string]*time.Timer)
|
||||
@@ -125,6 +131,7 @@ func main() {
|
||||
log.SetFlags(log.Lshortfile)
|
||||
|
||||
flag.StringVar(&listen, "listen", listen, "Listen address")
|
||||
flag.StringVar(&metricsListen, "metrics-listen", metricsListen, "Metrics listen address")
|
||||
flag.StringVar(&dir, "keys", dir, "Directory where http-cert.pem and http-key.pem is stored for TLS listening")
|
||||
flag.BoolVar(&debug, "debug", debug, "Enable debug output")
|
||||
flag.DurationVar(&evictionTime, "eviction", evictionTime, "After how long the relay is evicted")
|
||||
@@ -136,6 +143,7 @@ func main() {
|
||||
flag.IntVar(&requestQueueLen, "request-queue", requestQueueLen, "Queue length for incoming test requests")
|
||||
flag.IntVar(&requestProcessors, "request-processors", requestProcessors, "Number of request processor routines")
|
||||
flag.StringVar(&geoipLicenseKey, "geoip-license-key", geoipLicenseKey, "License key for GeoIP database")
|
||||
flag.IntVar(&maxRelaysReturned, "max-relays-returned", maxRelaysReturned, "Maximum number of relays returned for a normal endpoint query")
|
||||
|
||||
flag.Parse()
|
||||
|
||||
@@ -154,7 +162,7 @@ func main() {
|
||||
|
||||
testCert = createTestCertificate()
|
||||
|
||||
for i := 0; i < requestProcessors; i++ {
|
||||
for range requestProcessors {
|
||||
go requestProcessor(geoip)
|
||||
}
|
||||
|
||||
@@ -172,7 +180,7 @@ func main() {
|
||||
relayTestsTotal.WithLabelValues("success").Inc()
|
||||
}
|
||||
}
|
||||
// Run the the stats refresher once the relays are loaded.
|
||||
// Run the stats refresher once the relays are loaded.
|
||||
statsRefresher(statsRefresh)
|
||||
}()
|
||||
|
||||
@@ -218,15 +226,40 @@ func main() {
|
||||
log.Fatalln("listen:", err)
|
||||
}
|
||||
|
||||
handler := http.NewServeMux()
|
||||
handler.HandleFunc("/", handleAssets)
|
||||
handler.Handle("/endpoint", httpcache.SinglePath(http.HandlerFunc(handleRequest), 15*time.Second))
|
||||
handler.HandleFunc("/metrics", handleMetrics)
|
||||
if metricsListen != "" {
|
||||
mmux := http.NewServeMux()
|
||||
mmux.HandleFunc("/metrics", handleMetrics)
|
||||
go func() {
|
||||
if err := http.ListenAndServe(metricsListen, mmux); err != nil {
|
||||
log.Fatalln("HTTP serve metrics:", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
getMux := http.NewServeMux()
|
||||
getMux.HandleFunc("/", handleAssets)
|
||||
getMux.HandleFunc("/endpoint", withAPIMetrics(handleEndpointShort))
|
||||
getMux.HandleFunc("/endpoint/full", withAPIMetrics(handleEndpointFull))
|
||||
|
||||
postMux := http.NewServeMux()
|
||||
postMux.HandleFunc("/endpoint", withAPIMetrics(handleRegister))
|
||||
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.Method {
|
||||
case http.MethodGet, http.MethodHead, http.MethodOptions:
|
||||
getMux.ServeHTTP(w, r)
|
||||
case http.MethodPost:
|
||||
postMux.ServeHTTP(w, r)
|
||||
default:
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
}
|
||||
})
|
||||
|
||||
srv := http.Server{
|
||||
Handler: handler,
|
||||
ReadTimeout: 10 * time.Second,
|
||||
}
|
||||
srv.SetKeepAlivesEnabled(false)
|
||||
|
||||
err = srv.Serve(listener)
|
||||
if err != nil {
|
||||
@@ -260,39 +293,24 @@ func handleAssets(w http.ResponseWriter, r *http.Request) {
|
||||
assets.Serve(w, r, as)
|
||||
}
|
||||
|
||||
func handleRequest(w http.ResponseWriter, r *http.Request) {
|
||||
timer := prometheus.NewTimer(apiRequestsSeconds.WithLabelValues(r.Method))
|
||||
|
||||
w = NewLoggingResponseWriter(w)
|
||||
defer func() {
|
||||
timer.ObserveDuration()
|
||||
lw := w.(*loggingResponseWriter)
|
||||
apiRequestsTotal.WithLabelValues(r.Method, strconv.Itoa(lw.statusCode)).Inc()
|
||||
}()
|
||||
|
||||
if ipHeader != "" {
|
||||
hdr := r.Header.Get(ipHeader)
|
||||
fields := strings.Split(hdr, ",")
|
||||
if len(fields) > 0 {
|
||||
r.RemoteAddr = strings.TrimSpace(fields[len(fields)-1])
|
||||
}
|
||||
}
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
switch r.Method {
|
||||
case "GET":
|
||||
handleGetRequest(w, r)
|
||||
case "POST":
|
||||
handlePostRequest(w, r)
|
||||
default:
|
||||
if debug {
|
||||
log.Println("Unhandled HTTP method", r.Method)
|
||||
}
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
func withAPIMetrics(next http.HandlerFunc) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
timer := prometheus.NewTimer(apiRequestsSeconds.WithLabelValues(r.Method))
|
||||
w = NewLoggingResponseWriter(w)
|
||||
defer func() {
|
||||
timer.ObserveDuration()
|
||||
lw := w.(*loggingResponseWriter)
|
||||
apiRequestsTotal.WithLabelValues(r.Method, strconv.Itoa(lw.statusCode)).Inc()
|
||||
}()
|
||||
next(w, r)
|
||||
}
|
||||
}
|
||||
|
||||
func handleGetRequest(rw http.ResponseWriter, r *http.Request) {
|
||||
// handleEndpointFull returns the relay list with full metadata and
|
||||
// statistics. Large, and expensive.
|
||||
func handleEndpointFull(rw http.ResponseWriter, r *http.Request) {
|
||||
rw.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
rw.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
|
||||
mut.RLock()
|
||||
relays := make([]*relay, len(permanentRelays)+len(knownRelays))
|
||||
@@ -300,17 +318,42 @@ func handleGetRequest(rw http.ResponseWriter, r *http.Request) {
|
||||
copy(relays[n:], knownRelays)
|
||||
mut.RUnlock()
|
||||
|
||||
// Shuffle
|
||||
rand.Shuffle(relays)
|
||||
|
||||
_ = json.NewEncoder(rw).Encode(map[string][]*relay{
|
||||
"relays": relays,
|
||||
})
|
||||
}
|
||||
|
||||
func handlePostRequest(w http.ResponseWriter, r *http.Request) {
|
||||
// handleEndpointShort returns the relay list with only the URL.
|
||||
func handleEndpointShort(rw http.ResponseWriter, r *http.Request) {
|
||||
rw.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
rw.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
|
||||
mut.RLock()
|
||||
relays := make([]relayShort, 0, len(permanentRelays)+len(knownRelays))
|
||||
for _, r := range append(permanentRelays, knownRelays...) {
|
||||
relays = append(relays, relayShort{URL: slimURL(r.URL)})
|
||||
}
|
||||
mut.RUnlock()
|
||||
if len(relays) > maxRelaysReturned {
|
||||
rand.Shuffle(relays)
|
||||
relays = relays[:maxRelaysReturned]
|
||||
}
|
||||
|
||||
_ = json.NewEncoder(rw).Encode(map[string][]relayShort{
|
||||
"relays": relays,
|
||||
})
|
||||
}
|
||||
|
||||
func handleRegister(w http.ResponseWriter, r *http.Request) {
|
||||
// Get the IP address of the client
|
||||
rhost := r.RemoteAddr
|
||||
if ipHeader != "" {
|
||||
hdr := r.Header.Get(ipHeader)
|
||||
fields := strings.Split(hdr, ",")
|
||||
if len(fields) > 0 {
|
||||
rhost = strings.TrimSpace(fields[len(fields)-1])
|
||||
}
|
||||
}
|
||||
if host, _, err := net.SplitHostPort(rhost); err == nil {
|
||||
rhost = host
|
||||
}
|
||||
@@ -577,7 +620,7 @@ func createTestCertificate() tls.Certificate {
|
||||
}
|
||||
|
||||
certFile, keyFile := filepath.Join(tmpDir, "cert.pem"), filepath.Join(tmpDir, "key.pem")
|
||||
cert, err := tlsutil.NewCertificate(certFile, keyFile, "relaypoolsrv", 20*365)
|
||||
cert, err := tlsutil.NewCertificate(certFile, keyFile, "relaypoolsrv", 20*365, false)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to create test X509 key pair:", err)
|
||||
}
|
||||
@@ -610,6 +653,7 @@ func getLocation(host string, geoip *geoip.Provider) location {
|
||||
|
||||
type loggingResponseWriter struct {
|
||||
http.ResponseWriter
|
||||
|
||||
statusCode int
|
||||
}
|
||||
|
||||
@@ -660,3 +704,16 @@ func (b *errorTracker) IsBlocked(host string) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func slimURL(u string) string {
|
||||
p, err := url.Parse(u)
|
||||
if err != nil {
|
||||
return u
|
||||
}
|
||||
newQuery := url.Values{}
|
||||
if id := p.Query().Get("id"); id != "" {
|
||||
newQuery.Set("id", id)
|
||||
}
|
||||
p.RawQuery = newQuery.Encode()
|
||||
return p.String()
|
||||
}
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@@ -28,8 +27,6 @@ func init() {
|
||||
{URL: "known2"},
|
||||
{URL: "known3"},
|
||||
}
|
||||
|
||||
mut = new(sync.RWMutex)
|
||||
}
|
||||
|
||||
// Regression test: handleGetRequest should not modify permanentRelays.
|
||||
@@ -42,7 +39,7 @@ func TestHandleGetRequest(t *testing.T) {
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
w.Body = new(bytes.Buffer)
|
||||
handleGetRequest(w, httptest.NewRequest("GET", "/", nil))
|
||||
handleEndpointFull(w, httptest.NewRequest("GET", "/", nil))
|
||||
|
||||
result := make(map[string][]*relay)
|
||||
err := json.NewDecoder(w.Body).Decode(&result)
|
||||
@@ -92,3 +89,18 @@ func TestCanonicalizeQueryValues(t *testing.T) {
|
||||
t.Errorf("expected %q, got %q", exp, str)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSlimURL(t *testing.T) {
|
||||
cases := []struct {
|
||||
in, out string
|
||||
}{
|
||||
{"http://example.com/", "http://example.com/"},
|
||||
{"relay://192.0.2.42:22067/?globalLimitBps=0&id=EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M&networkTimeout=2m0s&pingInterval=1m0s&providedBy=Test&sessionLimitBps=0&statusAddr=%3A22070", "relay://192.0.2.42:22067/?id=EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M"},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
if got := slimURL(c.in); got != c.out {
|
||||
t.Errorf("expected %q, got %q", c.out, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -6,27 +6,12 @@ import (
|
||||
"encoding/json"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/collectors"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
)
|
||||
|
||||
func init() {
|
||||
processCollectorOpts := collectors.ProcessCollectorOpts{
|
||||
Namespace: "syncthing_relaypoolsrv",
|
||||
PidFn: func() (int, error) {
|
||||
return os.Getpid(), nil
|
||||
},
|
||||
}
|
||||
|
||||
prometheus.MustRegister(
|
||||
collectors.NewProcessCollector(processCollectorOpts),
|
||||
)
|
||||
}
|
||||
|
||||
var (
|
||||
statusClient = http.Client{
|
||||
Timeout: 5 * time.Second,
|
||||
@@ -119,7 +104,7 @@ func refreshStats() {
|
||||
mut.RUnlock()
|
||||
|
||||
now := time.Now()
|
||||
wg := sync.NewWaitGroup()
|
||||
var wg sync.WaitGroup
|
||||
|
||||
results := make(chan statsFetchResult, len(relays))
|
||||
for _, rel := range relays {
|
||||
@@ -188,7 +173,7 @@ func fetchStats(relay *relay) *stats {
|
||||
|
||||
var stats stats
|
||||
|
||||
if json.NewDecoder(response.Body).Decode(&stats); err != nil {
|
||||
if err := json.NewDecoder(response.Body).Decode(&stats); err != nil {
|
||||
return nil
|
||||
}
|
||||
return &stats
|
||||
375
cmd/infra/stupgrades/main.go
Normal file
375
cmd/infra/stupgrades/main.go
Normal file
@@ -0,0 +1,375 @@
|
||||
// Copyright (C) 2019 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/alecthomas/kong"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/syncthing/syncthing/internal/slogutil"
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/syncthing/syncthing/lib/httpcache"
|
||||
"github.com/syncthing/syncthing/lib/upgrade"
|
||||
)
|
||||
|
||||
type cli struct {
|
||||
Listen string `default:":8080" help:"Listen address"`
|
||||
MetricsListen string `default:":8082" help:"Listen address for metrics"`
|
||||
URL string `short:"u" default:"https://api.github.com/repos/syncthing/syncthing/releases?per_page=25" help:"GitHub releases url"`
|
||||
Forward []string `short:"f" help:"Forwarded pages, format: /path->https://example/com/url"`
|
||||
CacheTime time.Duration `default:"15m" help:"Cache time"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
var params cli
|
||||
kong.Parse(¶ms)
|
||||
slog.SetDefault(slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{
|
||||
Level: slog.LevelInfo,
|
||||
})))
|
||||
|
||||
if err := server(¶ms); err != nil {
|
||||
fmt.Printf("Error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func server(params *cli) error {
|
||||
if params.MetricsListen != "" {
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/metrics", promhttp.Handler())
|
||||
metricsListen, err := net.Listen("tcp", params.MetricsListen)
|
||||
if err != nil {
|
||||
return fmt.Errorf("metrics: %w", err)
|
||||
}
|
||||
slog.Info("Metrics listener started", slogutil.Address(params.MetricsListen))
|
||||
go func() {
|
||||
if err := http.Serve(metricsListen, mux); err != nil {
|
||||
slog.Warn("Metrics server returned", slogutil.Error(err))
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
cache := &cachedReleases{url: params.URL}
|
||||
if err := cache.Update(context.Background()); err != nil {
|
||||
return fmt.Errorf("initial cache update: %w", err)
|
||||
} else {
|
||||
slog.Info("Initial cache update done")
|
||||
}
|
||||
|
||||
go func() {
|
||||
for range time.NewTicker(params.CacheTime).C {
|
||||
slog.Info("Refreshing cached releases", slogutil.URI(params.URL))
|
||||
if err := cache.Update(context.Background()); err != nil {
|
||||
slog.Error("Failed to refresh cached releases", slogutil.URI(params.URL), slogutil.Error(err))
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
ghRels := &githubReleases{cache: cache}
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/ping", ghRels.servePing)
|
||||
mux.HandleFunc("/meta.json", ghRels.serveReleases)
|
||||
|
||||
for _, fwd := range params.Forward {
|
||||
path, url, ok := strings.Cut(fwd, "->")
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid forward: %q", fwd)
|
||||
}
|
||||
slog.Info("Forwarding", "from", path, "to", url)
|
||||
name := strings.ReplaceAll(path, "/", "_")
|
||||
mux.Handle(path, httpcache.SinglePath(&proxy{name: name, url: url}, params.CacheTime))
|
||||
}
|
||||
|
||||
srv := &http.Server{
|
||||
Addr: params.Listen,
|
||||
Handler: mux,
|
||||
ReadTimeout: 5 * time.Second,
|
||||
WriteTimeout: 10 * time.Second,
|
||||
}
|
||||
srv.SetKeepAlivesEnabled(false)
|
||||
|
||||
srvListener, err := net.Listen("tcp", params.Listen)
|
||||
if err != nil {
|
||||
return fmt.Errorf("listen: %w", err)
|
||||
}
|
||||
slog.Info("Main listener started", slogutil.Address(params.Listen))
|
||||
|
||||
return srv.Serve(srvListener)
|
||||
}
|
||||
|
||||
type githubReleases struct {
|
||||
cache *cachedReleases
|
||||
}
|
||||
|
||||
func (p *githubReleases) servePing(w http.ResponseWriter, req *http.Request) {
|
||||
rels := p.cache.Releases()
|
||||
|
||||
if len(rels) == 0 {
|
||||
http.Error(w, "No releases available", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Syncthing-Num-Releases", strconv.Itoa(len(rels)))
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func (p *githubReleases) serveReleases(w http.ResponseWriter, req *http.Request) {
|
||||
rels := p.cache.Releases()
|
||||
|
||||
ua := req.Header.Get("User-Agent")
|
||||
osv := req.Header.Get("Syncthing-Os-Version")
|
||||
if ua != "" && osv != "" {
|
||||
// We should determine the compatibility of the releases.
|
||||
rels = filterForCompatibility(rels, ua, osv)
|
||||
} else {
|
||||
metricFilterCalls.WithLabelValues("no-ua-or-osversion").Inc()
|
||||
}
|
||||
|
||||
rels = filterForLatest(rels)
|
||||
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
w.Header().Set("Access-Control-Allow-Methods", "GET")
|
||||
w.Header().Set("Cache-Control", "public, max-age=900")
|
||||
w.Header().Set("Vary", "User-Agent, Syncthing-Os-Version")
|
||||
_ = json.NewEncoder(w).Encode(rels)
|
||||
|
||||
metricUpgradeChecks.Inc()
|
||||
}
|
||||
|
||||
type proxy struct {
|
||||
name string
|
||||
url string
|
||||
}
|
||||
|
||||
func (p *proxy) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
req, err := http.NewRequestWithContext(req.Context(), http.MethodGet, p.url, nil)
|
||||
if err != nil {
|
||||
metricHTTPRequests.WithLabelValues(p.name, "error").Inc()
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
metricHTTPRequests.WithLabelValues(p.name, "error").Inc()
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
metricHTTPRequests.WithLabelValues(p.name, "success").Inc()
|
||||
|
||||
ct := resp.Header.Get("Content-Type")
|
||||
w.Header().Set("Content-Type", ct)
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
w.Header().Set("Cache-Control", "public, max-age=900")
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
w.Header().Set("Access-Control-Allow-Methods", "GET")
|
||||
}
|
||||
w.WriteHeader(resp.StatusCode)
|
||||
if strings.HasPrefix(ct, "application/json") {
|
||||
// Special JSON handling; clean it up a bit.
|
||||
var v interface{}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&v); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(v)
|
||||
} else {
|
||||
_, _ = io.Copy(w, resp.Body)
|
||||
}
|
||||
}
|
||||
|
||||
// filterForLatest returns the latest stable and prerelease only. If the
|
||||
// stable version is newer (comes first in the list) there is no need to go
|
||||
// looking for a prerelease at all.
|
||||
func filterForLatest(rels []upgrade.Release) []upgrade.Release {
|
||||
var filtered []upgrade.Release
|
||||
havePre := make(map[string]bool)
|
||||
haveStable := make(map[string]bool)
|
||||
for _, rel := range rels {
|
||||
major, _, _ := strings.Cut(rel.Tag, ".")
|
||||
if !rel.Prerelease && !haveStable[major] {
|
||||
// Remember the first non-pre for each major
|
||||
filtered = append(filtered, rel)
|
||||
haveStable[major] = true
|
||||
continue
|
||||
}
|
||||
if rel.Prerelease && !havePre[major] && !haveStable[major] {
|
||||
// We remember the first prerelease we find, unless we've
|
||||
// already found a non-pre of the same major.
|
||||
filtered = append(filtered, rel)
|
||||
havePre[major] = true
|
||||
}
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
|
||||
var userAgentOSArchExp = regexp.MustCompile(`^syncthing.*\(.+ (\w+)-(\w+)\)$`)
|
||||
|
||||
func filterForCompatibility(rels []upgrade.Release, ua, osv string) []upgrade.Release {
|
||||
osArch := userAgentOSArchExp.FindStringSubmatch(ua)
|
||||
if len(osArch) != 3 {
|
||||
metricFilterCalls.WithLabelValues("bad-os-arch").Inc()
|
||||
return rels
|
||||
}
|
||||
os := osArch[1]
|
||||
|
||||
var filtered []upgrade.Release
|
||||
for _, rel := range rels {
|
||||
if rel.Compatibility == nil {
|
||||
// No requirements means it's compatible with everything.
|
||||
filtered = append(filtered, rel)
|
||||
continue
|
||||
}
|
||||
|
||||
req, ok := rel.Compatibility.Requirements[os]
|
||||
if !ok {
|
||||
// No entry for the current OS means it's compatible.
|
||||
filtered = append(filtered, rel)
|
||||
continue
|
||||
}
|
||||
|
||||
if upgrade.CompareVersions(osv, req) >= 0 {
|
||||
filtered = append(filtered, rel)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if len(filtered) != len(rels) {
|
||||
metricFilterCalls.WithLabelValues("filtered").Inc()
|
||||
} else {
|
||||
metricFilterCalls.WithLabelValues("unchanged").Inc()
|
||||
}
|
||||
|
||||
return filtered
|
||||
}
|
||||
|
||||
type cachedReleases struct {
|
||||
url string
|
||||
mut sync.RWMutex
|
||||
current []upgrade.Release
|
||||
latestRel, latestPre string
|
||||
}
|
||||
|
||||
func (c *cachedReleases) Releases() []upgrade.Release {
|
||||
c.mut.RLock()
|
||||
defer c.mut.RUnlock()
|
||||
return c.current
|
||||
}
|
||||
|
||||
func (c *cachedReleases) Update(ctx context.Context) error {
|
||||
rels, err := fetchGithubReleases(ctx, c.url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
latestRel, latestPre := "", ""
|
||||
for _, rel := range rels {
|
||||
if !rel.Prerelease && latestRel == "" {
|
||||
latestRel = rel.Tag
|
||||
}
|
||||
if rel.Prerelease && latestPre == "" {
|
||||
latestPre = rel.Tag
|
||||
}
|
||||
if latestRel != "" && latestPre != "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
c.mut.Lock()
|
||||
c.current = rels
|
||||
if latestRel != c.latestRel || latestPre != c.latestPre {
|
||||
metricLatestReleaseInfo.DeleteLabelValues(c.latestRel, c.latestPre)
|
||||
metricLatestReleaseInfo.WithLabelValues(latestRel, latestPre).Set(1)
|
||||
c.latestRel = latestRel
|
||||
c.latestPre = latestPre
|
||||
}
|
||||
c.mut.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func fetchGithubReleases(ctx context.Context, url string) ([]upgrade.Release, error) {
|
||||
req, err := http.NewRequestWithContext(context.TODO(), http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
metricHTTPRequests.WithLabelValues("github-releases", "error").Inc()
|
||||
return nil, err
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
metricHTTPRequests.WithLabelValues("github-releases", "error").Inc()
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
var rels []upgrade.Release
|
||||
if err := json.NewDecoder(resp.Body).Decode(&rels); err != nil {
|
||||
metricHTTPRequests.WithLabelValues("github-releases", "error").Inc()
|
||||
return nil, err
|
||||
}
|
||||
metricHTTPRequests.WithLabelValues("github-releases", "success").Inc()
|
||||
|
||||
// Move the URL used for browser downloads to the URL field, and remove
|
||||
// the browser URL field. This avoids going via the GitHub API for
|
||||
// downloads, since Syncthing uses the URL field.
|
||||
for _, rel := range rels {
|
||||
for j, asset := range rel.Assets {
|
||||
rel.Assets[j].URL = asset.BrowserURL
|
||||
rel.Assets[j].BrowserURL = ""
|
||||
}
|
||||
}
|
||||
|
||||
addReleaseCompatibility(ctx, rels)
|
||||
|
||||
sort.Sort(upgrade.SortByRelease(rels))
|
||||
return rels, nil
|
||||
}
|
||||
|
||||
func addReleaseCompatibility(ctx context.Context, rels []upgrade.Release) {
|
||||
for i := range rels {
|
||||
rel := &rels[i]
|
||||
for i, asset := range rel.Assets {
|
||||
if asset.Name != "compat.json" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Load compat.json into the Compatibility field
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, asset.URL, nil)
|
||||
if err != nil {
|
||||
metricHTTPRequests.WithLabelValues("compat-json", "error").Inc()
|
||||
break
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
metricHTTPRequests.WithLabelValues("compat-json", "error").Inc()
|
||||
break
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
metricHTTPRequests.WithLabelValues("compat-json", "error").Inc()
|
||||
resp.Body.Close()
|
||||
break
|
||||
}
|
||||
_ = json.NewDecoder(io.LimitReader(resp.Body, 10<<10)).Decode(&rel.Compatibility)
|
||||
metricHTTPRequests.WithLabelValues("compat-json", "success").Inc()
|
||||
resp.Body.Close()
|
||||
|
||||
// Remove compat.json from the asset list since it's been processed
|
||||
rel.Assets = append(rel.Assets[:i], rel.Assets[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
36
cmd/infra/stupgrades/metrics.go
Normal file
36
cmd/infra/stupgrades/metrics.go
Normal file
@@ -0,0 +1,36 @@
|
||||
// Copyright (C) 2024 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
metricUpgradeChecks = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "upgrade",
|
||||
Name: "metadata_requests",
|
||||
})
|
||||
metricFilterCalls = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "upgrade",
|
||||
Name: "filter_calls",
|
||||
}, []string{"result"})
|
||||
metricHTTPRequests = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "upgrade",
|
||||
Name: "http_requests",
|
||||
}, []string{"target", "result"})
|
||||
metricLatestReleaseInfo = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "upgrade",
|
||||
Name: "latest_release_info",
|
||||
Help: "Release information",
|
||||
}, []string{"latest_release", "latest_pre"})
|
||||
)
|
||||
@@ -8,22 +8,22 @@ package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"log/slog"
|
||||
"os"
|
||||
|
||||
"github.com/alecthomas/kong"
|
||||
"github.com/syncthing/syncthing/cmd/ursrv/aggregate"
|
||||
"github.com/syncthing/syncthing/cmd/ursrv/serve"
|
||||
"github.com/syncthing/syncthing/cmd/infra/ursrv/serve"
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
)
|
||||
|
||||
type CLI struct {
|
||||
Serve serve.CLI `cmd:"" default:""`
|
||||
Aggregate aggregate.CLI `cmd:""`
|
||||
Serve serve.CLI `cmd:"" default:""`
|
||||
}
|
||||
|
||||
func main() {
|
||||
log.SetFlags(log.Ltime | log.Ldate | log.Lshortfile)
|
||||
log.SetOutput(os.Stdout)
|
||||
slog.SetDefault(slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{
|
||||
Level: slog.LevelInfo,
|
||||
})))
|
||||
|
||||
var cli CLI
|
||||
ctx := kong.Parse(&cli)
|
||||
61
cmd/infra/ursrv/serve/metrics.go
Normal file
61
cmd/infra/ursrv/serve/metrics.go
Normal file
@@ -0,0 +1,61 @@
|
||||
// Copyright (C) 2023 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package serve
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
metricReportsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "ursrv_v2",
|
||||
Name: "incoming_reports_total",
|
||||
}, []string{"result"})
|
||||
metricsCollectsTotal = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "ursrv_v2",
|
||||
Name: "collects_total",
|
||||
})
|
||||
metricsCollectSecondsTotal = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "ursrv_v2",
|
||||
Name: "collect_seconds_total",
|
||||
})
|
||||
metricsCollectSecondsLast = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "ursrv_v2",
|
||||
Name: "collect_seconds_last",
|
||||
})
|
||||
metricsRecalcsTotal = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "ursrv_v2",
|
||||
Name: "recalcs_total",
|
||||
})
|
||||
metricsRecalcSecondsTotal = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "ursrv_v2",
|
||||
Name: "recalc_seconds_total",
|
||||
})
|
||||
metricsRecalcSecondsLast = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "ursrv_v2",
|
||||
Name: "recalc_seconds_last",
|
||||
})
|
||||
metricsWriteSecondsLast = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "ursrv_v2",
|
||||
Name: "write_seconds_last",
|
||||
})
|
||||
)
|
||||
|
||||
func init() {
|
||||
metricReportsTotal.WithLabelValues("fail")
|
||||
metricReportsTotal.WithLabelValues("replace")
|
||||
metricReportsTotal.WithLabelValues("accept")
|
||||
}
|
||||
351
cmd/infra/ursrv/serve/prometheus.go
Normal file
351
cmd/infra/ursrv/serve/prometheus.go
Normal file
@@ -0,0 +1,351 @@
|
||||
// Copyright (C) 2024 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package serve
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"reflect"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/syncthing/syncthing/lib/ur/contract"
|
||||
)
|
||||
|
||||
const namePrefix = "syncthing_usage_"
|
||||
|
||||
type metricsSet struct {
|
||||
srv *server
|
||||
|
||||
gauges map[string]prometheus.Gauge
|
||||
gaugeVecs map[string]*prometheus.GaugeVec
|
||||
gaugeVecLabels map[string][]string
|
||||
summaries map[string]*metricSummary
|
||||
|
||||
collectMut sync.RWMutex
|
||||
collectCutoff time.Duration
|
||||
}
|
||||
|
||||
func newMetricsSet(srv *server) *metricsSet {
|
||||
s := &metricsSet{
|
||||
srv: srv,
|
||||
gauges: make(map[string]prometheus.Gauge),
|
||||
gaugeVecs: make(map[string]*prometheus.GaugeVec),
|
||||
gaugeVecLabels: make(map[string][]string),
|
||||
summaries: make(map[string]*metricSummary),
|
||||
collectCutoff: -24 * time.Hour,
|
||||
}
|
||||
|
||||
var initForType func(reflect.Type)
|
||||
initForType = func(t reflect.Type) {
|
||||
for i := range t.NumField() {
|
||||
field := t.Field(i)
|
||||
if field.Type.Kind() == reflect.Struct {
|
||||
initForType(field.Type)
|
||||
continue
|
||||
}
|
||||
name, typ, label := fieldNameTypeLabel(field)
|
||||
sname, labels := nameConstLabels(name)
|
||||
switch typ {
|
||||
case "gauge":
|
||||
s.gauges[name] = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Name: namePrefix + sname,
|
||||
ConstLabels: labels,
|
||||
})
|
||||
case "summary":
|
||||
s.summaries[name] = newMetricSummary(namePrefix+sname, nil, labels)
|
||||
case "gaugeVec":
|
||||
s.gaugeVecLabels[name] = append(s.gaugeVecLabels[name], label)
|
||||
case "summaryVec":
|
||||
s.summaries[name] = newMetricSummary(namePrefix+sname, []string{label}, labels)
|
||||
}
|
||||
}
|
||||
}
|
||||
initForType(reflect.ValueOf(contract.Report{}).Type())
|
||||
|
||||
for name, labels := range s.gaugeVecLabels {
|
||||
s.gaugeVecs[name] = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: namePrefix + name,
|
||||
}, labels)
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func fieldNameTypeLabel(rf reflect.StructField) (string, string, string) {
|
||||
metric := rf.Tag.Get("metric")
|
||||
name, typ, ok := strings.Cut(metric, ",")
|
||||
if !ok {
|
||||
return "", "", ""
|
||||
}
|
||||
gv, label, ok := strings.Cut(typ, ":")
|
||||
if ok {
|
||||
typ = gv
|
||||
}
|
||||
return name, typ, label
|
||||
}
|
||||
|
||||
func nameConstLabels(name string) (string, prometheus.Labels) {
|
||||
if name == "-" {
|
||||
return "", nil
|
||||
}
|
||||
name, labels, ok := strings.Cut(name, "{")
|
||||
if !ok {
|
||||
return name, nil
|
||||
}
|
||||
lls := strings.Split(labels[:len(labels)-1], ",")
|
||||
m := make(map[string]string)
|
||||
for _, l := range lls {
|
||||
k, v, _ := strings.Cut(l, "=")
|
||||
m[k] = v
|
||||
}
|
||||
return name, m
|
||||
}
|
||||
|
||||
func (s *metricsSet) Serve(ctx context.Context) error {
|
||||
s.recalc()
|
||||
|
||||
const recalcInterval = 5 * time.Minute
|
||||
next := time.Until(time.Now().Truncate(recalcInterval).Add(recalcInterval))
|
||||
recalcTimer := time.NewTimer(next)
|
||||
defer recalcTimer.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-recalcTimer.C:
|
||||
s.recalc()
|
||||
next := time.Until(time.Now().Truncate(recalcInterval).Add(recalcInterval))
|
||||
recalcTimer.Reset(next)
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *metricsSet) recalc() {
|
||||
s.collectMut.Lock()
|
||||
defer s.collectMut.Unlock()
|
||||
|
||||
t0 := time.Now()
|
||||
defer func() {
|
||||
dur := time.Since(t0)
|
||||
slog.Info("Metrics recalculated", "d", dur.String())
|
||||
metricsRecalcSecondsLast.Set(dur.Seconds())
|
||||
metricsRecalcSecondsTotal.Add(dur.Seconds())
|
||||
metricsRecalcsTotal.Inc()
|
||||
}()
|
||||
|
||||
for _, g := range s.gauges {
|
||||
g.Set(0)
|
||||
}
|
||||
for _, g := range s.gaugeVecs {
|
||||
g.Reset()
|
||||
}
|
||||
for _, g := range s.summaries {
|
||||
g.Reset()
|
||||
}
|
||||
|
||||
cutoff := time.Now().Add(s.collectCutoff)
|
||||
s.srv.reports.Range(func(key string, r *contract.Report) bool {
|
||||
if s.collectCutoff < 0 && r.Received.Before(cutoff) {
|
||||
s.srv.reports.Delete(key)
|
||||
return true
|
||||
}
|
||||
s.addReport(r)
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
func (s *metricsSet) addReport(r *contract.Report) {
|
||||
gaugeVecs := make(map[string][]string)
|
||||
s.addReportStruct(reflect.ValueOf(r).Elem(), gaugeVecs)
|
||||
for name, lv := range gaugeVecs {
|
||||
s.gaugeVecs[name].WithLabelValues(lv...).Add(1)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *metricsSet) addReportStruct(v reflect.Value, gaugeVecs map[string][]string) {
|
||||
t := v.Type()
|
||||
for i := range v.NumField() {
|
||||
field := v.Field(i)
|
||||
if field.Kind() == reflect.Struct {
|
||||
s.addReportStruct(field, gaugeVecs)
|
||||
continue
|
||||
}
|
||||
|
||||
name, typ, label := fieldNameTypeLabel(t.Field(i))
|
||||
switch typ {
|
||||
case "gauge":
|
||||
switch v := field.Interface().(type) {
|
||||
case int:
|
||||
s.gauges[name].Add(float64(v))
|
||||
case string:
|
||||
s.gaugeVecs[name].WithLabelValues(v).Add(1)
|
||||
case bool:
|
||||
if v {
|
||||
s.gauges[name].Add(1)
|
||||
}
|
||||
}
|
||||
case "gaugeVec":
|
||||
var labelValue string
|
||||
switch v := field.Interface().(type) {
|
||||
case string:
|
||||
labelValue = v
|
||||
case int:
|
||||
labelValue = strconv.Itoa(v)
|
||||
case map[string]int:
|
||||
for k, v := range v {
|
||||
labelValue = k
|
||||
field.SetInt(int64(v))
|
||||
break
|
||||
}
|
||||
}
|
||||
if _, ok := gaugeVecs[name]; !ok {
|
||||
gaugeVecs[name] = make([]string, len(s.gaugeVecLabels[name]))
|
||||
}
|
||||
for i, l := range s.gaugeVecLabels[name] {
|
||||
if l == label {
|
||||
gaugeVecs[name][i] = labelValue
|
||||
break
|
||||
}
|
||||
}
|
||||
case "summary", "summaryVec":
|
||||
switch v := field.Interface().(type) {
|
||||
case int:
|
||||
s.summaries[name].Observe("", float64(v))
|
||||
case float64:
|
||||
s.summaries[name].Observe("", v)
|
||||
case []int:
|
||||
for _, v := range v {
|
||||
s.summaries[name].Observe("", float64(v))
|
||||
}
|
||||
case map[string]int:
|
||||
for k, v := range v {
|
||||
if k == "" {
|
||||
// avoid empty string labels as those are the sign
|
||||
// of a non-vec summary
|
||||
k = "unknown"
|
||||
}
|
||||
s.summaries[name].Observe(k, float64(v))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *metricsSet) Describe(c chan<- *prometheus.Desc) {
|
||||
for _, g := range s.gauges {
|
||||
g.Describe(c)
|
||||
}
|
||||
for _, g := range s.gaugeVecs {
|
||||
g.Describe(c)
|
||||
}
|
||||
for _, g := range s.summaries {
|
||||
g.Describe(c)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *metricsSet) Collect(c chan<- prometheus.Metric) {
|
||||
s.collectMut.RLock()
|
||||
defer s.collectMut.RUnlock()
|
||||
|
||||
t0 := time.Now()
|
||||
defer func() {
|
||||
dur := time.Since(t0).Seconds()
|
||||
metricsCollectSecondsLast.Set(dur)
|
||||
metricsCollectSecondsTotal.Add(dur)
|
||||
metricsCollectsTotal.Inc()
|
||||
}()
|
||||
|
||||
for _, g := range s.gauges {
|
||||
c <- g
|
||||
}
|
||||
for _, g := range s.gaugeVecs {
|
||||
g.Collect(c)
|
||||
}
|
||||
for _, g := range s.summaries {
|
||||
g.Collect(c)
|
||||
}
|
||||
}
|
||||
|
||||
type metricSummary struct {
|
||||
name string
|
||||
values map[string][]float64
|
||||
zeroes map[string]int
|
||||
|
||||
qDesc *prometheus.Desc
|
||||
countDesc *prometheus.Desc
|
||||
sumDesc *prometheus.Desc
|
||||
zDesc *prometheus.Desc
|
||||
}
|
||||
|
||||
func newMetricSummary(name string, labels []string, constLabels prometheus.Labels) *metricSummary {
|
||||
return &metricSummary{
|
||||
name: name,
|
||||
values: make(map[string][]float64),
|
||||
zeroes: make(map[string]int),
|
||||
qDesc: prometheus.NewDesc(name, "", append(labels, "quantile"), constLabels),
|
||||
countDesc: prometheus.NewDesc(name+"_nonzero_count", "", labels, constLabels),
|
||||
sumDesc: prometheus.NewDesc(name+"_sum", "", labels, constLabels),
|
||||
zDesc: prometheus.NewDesc(name+"_zero_count", "", labels, constLabels),
|
||||
}
|
||||
}
|
||||
|
||||
func (q *metricSummary) Observe(labelValue string, v float64) {
|
||||
if v == 0 {
|
||||
q.zeroes[labelValue]++
|
||||
return
|
||||
}
|
||||
q.values[labelValue] = append(q.values[labelValue], v)
|
||||
}
|
||||
|
||||
func (q *metricSummary) Describe(c chan<- *prometheus.Desc) {
|
||||
c <- q.qDesc
|
||||
c <- q.countDesc
|
||||
c <- q.sumDesc
|
||||
c <- q.zDesc
|
||||
}
|
||||
|
||||
func (q *metricSummary) Collect(c chan<- prometheus.Metric) {
|
||||
for lv, vs := range q.values {
|
||||
var labelVals []string
|
||||
if lv != "" {
|
||||
labelVals = []string{lv}
|
||||
}
|
||||
|
||||
c <- prometheus.MustNewConstMetric(q.countDesc, prometheus.GaugeValue, float64(len(vs)), labelVals...)
|
||||
c <- prometheus.MustNewConstMetric(q.zDesc, prometheus.GaugeValue, float64(q.zeroes[lv]), labelVals...)
|
||||
|
||||
var sum float64
|
||||
for _, v := range vs {
|
||||
sum += v
|
||||
}
|
||||
c <- prometheus.MustNewConstMetric(q.sumDesc, prometheus.GaugeValue, sum, labelVals...)
|
||||
|
||||
if len(vs) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
slices.Sort(vs)
|
||||
|
||||
pctiles := []float64{0, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.975, 0.99, 1}
|
||||
for _, pct := range pctiles {
|
||||
idx := int(float64(len(vs)-1) * pct)
|
||||
c <- prometheus.MustNewConstMetric(q.qDesc, prometheus.GaugeValue, vs[idx], append(labelVals, fmt.Sprint(pct))...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (q *metricSummary) Reset() {
|
||||
clear(q.values)
|
||||
clear(q.zeroes)
|
||||
}
|
||||
444
cmd/infra/ursrv/serve/serve.go
Normal file
444
cmd/infra/ursrv/serve/serve.go
Normal file
@@ -0,0 +1,444 @@
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package serve
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/puzpuzpuz/xsync/v3"
|
||||
"github.com/syncthing/syncthing/internal/blob"
|
||||
"github.com/syncthing/syncthing/internal/blob/azureblob"
|
||||
"github.com/syncthing/syncthing/internal/blob/s3"
|
||||
"github.com/syncthing/syncthing/internal/slogutil"
|
||||
"github.com/syncthing/syncthing/lib/build"
|
||||
"github.com/syncthing/syncthing/lib/geoip"
|
||||
"github.com/syncthing/syncthing/lib/ur/contract"
|
||||
"github.com/thejerf/suture/v4"
|
||||
)
|
||||
|
||||
type CLI struct {
|
||||
Listen string `env:"UR_LISTEN" help:"Usage reporting & metrics endpoint listen address" default:"0.0.0.0:8080"`
|
||||
ListenInternal string `env:"UR_LISTEN_INTERNAL" help:"Internal metrics endpoint listen address" default:"0.0.0.0:8082"`
|
||||
GeoIPLicenseKey string `env:"UR_GEOIP_LICENSE_KEY"`
|
||||
GeoIPAccountID int `env:"UR_GEOIP_ACCOUNT_ID"`
|
||||
DumpFile string `env:"UR_DUMP_FILE" default:"reports.jsons.gz"`
|
||||
DumpInterval time.Duration `env:"UR_DUMP_INTERVAL" default:"5m"`
|
||||
|
||||
S3Endpoint string `name:"s3-endpoint" env:"UR_S3_ENDPOINT"`
|
||||
S3Region string `name:"s3-region" env:"UR_S3_REGION"`
|
||||
S3Bucket string `name:"s3-bucket" env:"UR_S3_BUCKET"`
|
||||
S3AccessKeyID string `name:"s3-access-key-id" env:"UR_S3_ACCESS_KEY_ID"`
|
||||
S3SecretKey string `name:"s3-secret-key" env:"UR_S3_SECRET_KEY"`
|
||||
|
||||
AzureBlobAccount string `name:"azure-blob-account" env:"UR_AZUREBLOB_ACCOUNT"`
|
||||
AzureBlobKey string `name:"azure-blob-key" env:"UR_AZUREBLOB_KEY"`
|
||||
AzureBlobContainer string `name:"azure-blob-container" env:"UR_AZUREBLOB_CONTAINER"`
|
||||
}
|
||||
|
||||
var (
|
||||
compilerRe = regexp.MustCompile(`\(([A-Za-z0-9()., -]+) \w+-\w+(?:| android| default)\) ([\w@.-]+)`)
|
||||
knownDistributions = []distributionMatch{
|
||||
// Maps well known builders to the official distribution method that
|
||||
// they represent
|
||||
|
||||
{regexp.MustCompile(`\steamcity@build\.syncthing\.net`), "GitHub"},
|
||||
{regexp.MustCompile(`\sjenkins@build\.syncthing\.net`), "GitHub"},
|
||||
{regexp.MustCompile(`\sbuilder@github\.syncthing\.net`), "GitHub"},
|
||||
|
||||
{regexp.MustCompile(`\sdeb@build\.syncthing\.net`), "APT"},
|
||||
{regexp.MustCompile(`\sdebian@github\.syncthing\.net`), "APT"},
|
||||
|
||||
{regexp.MustCompile(`\sdocker@syncthing\.net`), "Docker Hub"},
|
||||
{regexp.MustCompile(`\sdocker@build.syncthing\.net`), "Docker Hub"},
|
||||
{regexp.MustCompile(`\sdocker@github.syncthing\.net`), "Docker Hub"},
|
||||
|
||||
{regexp.MustCompile(`\sandroid-builder@github\.syncthing\.net`), "Google Play"},
|
||||
{regexp.MustCompile(`\sandroid-.*teamcity@build\.syncthing\.net`), "Google Play"},
|
||||
|
||||
{regexp.MustCompile(`\sandroid-.*vagrant@basebox-stretch64`), "F-Droid"},
|
||||
{regexp.MustCompile(`\svagrant@bullseye`), "F-Droid"},
|
||||
{regexp.MustCompile(`\svagrant@bookworm`), "F-Droid"},
|
||||
|
||||
{regexp.MustCompile(`\sreproducible-build@Catfriend1-syncthing-android`), "Syncthing-Fork Catfriend1 (3rd party)"},
|
||||
{regexp.MustCompile(`\sreproducible-build@nel0x-syncthing-android-gplay`), "Syncthing-Fork nel0x (3rd party)"},
|
||||
|
||||
{regexp.MustCompile(`\sbuilduser@(archlinux|svetlemodry)`), "Arch (3rd party)"},
|
||||
{regexp.MustCompile(`\ssyncthing@archlinux`), "Arch (3rd party)"},
|
||||
{regexp.MustCompile(`@debian`), "Debian (3rd party)"},
|
||||
{regexp.MustCompile(`@fedora`), "Fedora (3rd party)"},
|
||||
{regexp.MustCompile(`@openSUSE`), "openSUSE (3rd party)"},
|
||||
{regexp.MustCompile(`\sbrew@`), "Homebrew (3rd party)"},
|
||||
{regexp.MustCompile(`\sroot@buildkitsandbox`), "LinuxServer.io (3rd party)"},
|
||||
{regexp.MustCompile(`\sports@freebsd`), "FreeBSD (3rd party)"},
|
||||
{regexp.MustCompile(`\snix@nix`), "Nix (3rd party)"},
|
||||
{regexp.MustCompile(`.`), "Others"},
|
||||
}
|
||||
)
|
||||
|
||||
type distributionMatch struct {
|
||||
matcher *regexp.Regexp
|
||||
distribution string
|
||||
}
|
||||
|
||||
func (cli *CLI) Run() error {
|
||||
slog.Info("Starting", "version", build.Version)
|
||||
|
||||
// Listening
|
||||
|
||||
urListener, err := net.Listen("tcp", cli.Listen)
|
||||
if err != nil {
|
||||
slog.Error("Failed to listen (usage reports)", slogutil.Error(err))
|
||||
return err
|
||||
}
|
||||
slog.Info("Listening (usage reports)", slogutil.Address(urListener.Addr()))
|
||||
|
||||
internalListener, err := net.Listen("tcp", cli.ListenInternal)
|
||||
if err != nil {
|
||||
slog.Error("Failed to listen (internal)", slogutil.Error(err))
|
||||
return err
|
||||
}
|
||||
slog.Info("Listening (internal)", slogutil.Address(internalListener.Addr()))
|
||||
|
||||
var geo *geoip.Provider
|
||||
if cli.GeoIPAccountID != 0 && cli.GeoIPLicenseKey != "" {
|
||||
geo, err = geoip.NewGeoLite2CityProvider(context.Background(), cli.GeoIPAccountID, cli.GeoIPLicenseKey, os.TempDir())
|
||||
if err != nil {
|
||||
slog.Error("Failed to load GeoIP", slogutil.Error(err))
|
||||
return err
|
||||
}
|
||||
go geo.Serve(context.TODO())
|
||||
}
|
||||
|
||||
// Blob storage
|
||||
|
||||
var blobs blob.Store
|
||||
if cli.S3Endpoint != "" {
|
||||
blobs, err = s3.NewSession(cli.S3Endpoint, cli.S3Region, cli.S3Bucket, cli.S3AccessKeyID, cli.S3SecretKey)
|
||||
if err != nil {
|
||||
slog.Error("Failed to create S3 session", slogutil.Error(err))
|
||||
return err
|
||||
}
|
||||
} else if cli.AzureBlobAccount != "" {
|
||||
blobs, err = azureblob.NewBlobStore(cli.AzureBlobAccount, cli.AzureBlobKey, cli.AzureBlobContainer)
|
||||
if err != nil {
|
||||
slog.Error("Failed to create Azure blob store", slogutil.Error(err))
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := os.Stat(cli.DumpFile); err != nil && blobs != nil {
|
||||
if err := cli.downloadDumpFile(blobs); err != nil {
|
||||
slog.Error("Failed to download dump file", slogutil.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
// server
|
||||
|
||||
srv := &server{
|
||||
geo: geo,
|
||||
reports: xsync.NewMapOf[string, *contract.Report](),
|
||||
}
|
||||
|
||||
if fd, err := os.Open(cli.DumpFile); err == nil {
|
||||
gr, err := gzip.NewReader(fd)
|
||||
if err == nil {
|
||||
srv.load(gr)
|
||||
}
|
||||
fd.Close()
|
||||
}
|
||||
|
||||
go func() {
|
||||
for range time.Tick(cli.DumpInterval) {
|
||||
if err := cli.saveDumpFile(srv, blobs); err != nil {
|
||||
slog.Error("Failed to write dump file", slogutil.Error(err))
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// The internal metrics endpoint just serves metrics about what the
|
||||
// server is doing.
|
||||
|
||||
http.Handle("/metrics", promhttp.Handler())
|
||||
|
||||
internalSrv := http.Server{
|
||||
ReadTimeout: 5 * time.Second,
|
||||
WriteTimeout: 15 * time.Second,
|
||||
}
|
||||
go internalSrv.Serve(internalListener)
|
||||
|
||||
// New external metrics endpoint accepts reports from clients and serves
|
||||
// aggregated usage reporting metrics.
|
||||
|
||||
main := suture.NewSimple("main")
|
||||
main.ServeBackground(context.Background())
|
||||
|
||||
ms := newMetricsSet(srv)
|
||||
main.Add(ms)
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
reg.MustRegister(ms)
|
||||
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{}))
|
||||
mux.HandleFunc("/newdata", srv.handleNewData)
|
||||
mux.HandleFunc("/ping", srv.handlePing)
|
||||
|
||||
metricsSrv := http.Server{
|
||||
ReadTimeout: 5 * time.Second,
|
||||
WriteTimeout: 60 * time.Second,
|
||||
Handler: mux,
|
||||
}
|
||||
|
||||
slog.Info("Ready to serve")
|
||||
return metricsSrv.Serve(urListener)
|
||||
}
|
||||
|
||||
func (cli *CLI) downloadDumpFile(blobs blob.Store) error {
|
||||
latestKey, err := blobs.LatestKey(context.Background())
|
||||
if err != nil {
|
||||
return fmt.Errorf("list latest S3 key: %w", err)
|
||||
}
|
||||
fd, err := os.Create(cli.DumpFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create dump file: %w", err)
|
||||
}
|
||||
if err := blobs.Download(context.Background(), latestKey, fd); err != nil {
|
||||
_ = fd.Close()
|
||||
return fmt.Errorf("download dump file: %w", err)
|
||||
}
|
||||
if err := fd.Close(); err != nil {
|
||||
return fmt.Errorf("close dump file: %w", err)
|
||||
}
|
||||
slog.Info("Dump file downloaded", "key", latestKey)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cli *CLI) saveDumpFile(srv *server, blobs blob.Store) error {
|
||||
t0 := time.Now()
|
||||
defer func() {
|
||||
metricsWriteSecondsLast.Set(float64(time.Since(t0)))
|
||||
}()
|
||||
|
||||
fd, err := os.Create(cli.DumpFile + ".tmp")
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating dump file: %w", err)
|
||||
}
|
||||
gw := gzip.NewWriter(fd)
|
||||
if err := srv.save(gw); err != nil {
|
||||
return fmt.Errorf("saving dump file: %w", err)
|
||||
}
|
||||
if err := gw.Close(); err != nil {
|
||||
fd.Close()
|
||||
return fmt.Errorf("closing gzip writer: %w", err)
|
||||
}
|
||||
if err := fd.Close(); err != nil {
|
||||
return fmt.Errorf("closing dump file: %w", err)
|
||||
}
|
||||
if err := os.Rename(cli.DumpFile+".tmp", cli.DumpFile); err != nil {
|
||||
return fmt.Errorf("renaming dump file: %w", err)
|
||||
}
|
||||
slog.Info("Dump file saved", "d", time.Since(t0).String())
|
||||
|
||||
if blobs != nil {
|
||||
t1 := time.Now()
|
||||
key := fmt.Sprintf("reports-%s.jsons.gz", time.Now().UTC().Format("2006-01-02"))
|
||||
fd, err := os.Open(cli.DumpFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening dump file: %w", err)
|
||||
}
|
||||
if err := blobs.Upload(context.Background(), key, fd); err != nil {
|
||||
return fmt.Errorf("uploading dump file: %w", err)
|
||||
}
|
||||
_ = fd.Close()
|
||||
slog.Info("Dump file uploaded", "d", time.Since(t1).String())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type server struct {
|
||||
geo *geoip.Provider
|
||||
reports *xsync.MapOf[string, *contract.Report]
|
||||
}
|
||||
|
||||
func (s *server) handlePing(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
func (s *server) handleNewData(w http.ResponseWriter, r *http.Request) {
|
||||
result := "fail"
|
||||
defer func() {
|
||||
// result is "accept" (new report), "replace" (existing report) or
|
||||
// "fail"
|
||||
metricReportsTotal.WithLabelValues(result).Inc()
|
||||
}()
|
||||
|
||||
defer r.Body.Close()
|
||||
|
||||
if r.Method != http.MethodPost {
|
||||
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
addr := r.Header.Get("X-Forwarded-For")
|
||||
if addr != "" {
|
||||
addr = strings.Split(addr, ", ")[0]
|
||||
} else {
|
||||
addr = r.RemoteAddr
|
||||
}
|
||||
|
||||
if host, _, err := net.SplitHostPort(addr); err == nil {
|
||||
addr = host
|
||||
}
|
||||
|
||||
log := slog.With("addr", addr)
|
||||
|
||||
if net.ParseIP(addr) == nil {
|
||||
addr = ""
|
||||
}
|
||||
|
||||
var rep contract.Report
|
||||
|
||||
lr := &io.LimitedReader{R: r.Body, N: 40 * 1024}
|
||||
bs, _ := io.ReadAll(lr)
|
||||
if err := json.Unmarshal(bs, &rep); err != nil {
|
||||
log.Error("Failed to decode JSON", slogutil.Error(err))
|
||||
http.Error(w, "JSON Decode Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
rep.Received = time.Now()
|
||||
rep.Date = rep.Received.UTC().Format("20060102")
|
||||
rep.Address = addr
|
||||
|
||||
if err := rep.Validate(); err != nil {
|
||||
log.Error("Failed to validate report", slogutil.Error(err))
|
||||
http.Error(w, "Validation Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if s.addReport(&rep) {
|
||||
result = "replace"
|
||||
} else {
|
||||
result = "accept"
|
||||
}
|
||||
}
|
||||
|
||||
func (s *server) addReport(rep *contract.Report) bool {
|
||||
if s.geo != nil {
|
||||
if ip := net.ParseIP(rep.Address); ip != nil {
|
||||
if city, err := s.geo.City(ip); err == nil {
|
||||
rep.Country = city.Country.Names["en"]
|
||||
rep.CountryCode = city.Country.IsoCode
|
||||
}
|
||||
}
|
||||
}
|
||||
if rep.Country == "" {
|
||||
rep.Country = "Unknown"
|
||||
}
|
||||
if rep.CountryCode == "" {
|
||||
rep.CountryCode = "ZZ"
|
||||
}
|
||||
|
||||
rep.Version = transformVersion(rep.Version)
|
||||
if strings.Contains(rep.Version, ".") {
|
||||
split := strings.SplitN(rep.Version, ".", 3)
|
||||
if len(split) == 3 {
|
||||
rep.MajorVersion = strings.Join(split[:2], ".")
|
||||
}
|
||||
}
|
||||
rep.OS, rep.Arch, _ = strings.Cut(rep.Platform, "-")
|
||||
|
||||
if m := compilerRe.FindStringSubmatch(rep.LongVersion); len(m) == 3 {
|
||||
rep.Compiler = m[1]
|
||||
rep.Builder = m[2]
|
||||
}
|
||||
for _, d := range knownDistributions {
|
||||
if d.matcher.MatchString(rep.LongVersion) {
|
||||
rep.Distribution = d.distribution
|
||||
break
|
||||
}
|
||||
}
|
||||
rep.DistDist = rep.Distribution
|
||||
rep.DistOS = rep.OS
|
||||
rep.DistArch = rep.Arch
|
||||
|
||||
if strings.HasPrefix(rep.Version, "v2.") {
|
||||
rep.Database.ModernCSQLite = strings.Contains(rep.LongVersion, "modernc-sqlite")
|
||||
rep.Database.MattnSQLite = !rep.Database.ModernCSQLite
|
||||
} else {
|
||||
rep.Database.LevelDB = true
|
||||
}
|
||||
|
||||
_, loaded := s.reports.LoadAndStore(rep.UniqueID, rep)
|
||||
return loaded
|
||||
}
|
||||
|
||||
func (s *server) save(w io.Writer) error {
|
||||
bw := bufio.NewWriter(w)
|
||||
enc := json.NewEncoder(bw)
|
||||
var err error
|
||||
s.reports.Range(func(k string, v *contract.Report) bool {
|
||||
err = enc.Encode(v)
|
||||
return err == nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return bw.Flush()
|
||||
}
|
||||
|
||||
func (s *server) load(r io.Reader) {
|
||||
t0 := time.Now()
|
||||
dec := json.NewDecoder(r)
|
||||
s.reports.Clear()
|
||||
for {
|
||||
var rep contract.Report
|
||||
if err := dec.Decode(&rep); errors.Is(err, io.EOF) {
|
||||
break
|
||||
} else if err != nil {
|
||||
slog.Error("Failed to load record", slogutil.Error(err))
|
||||
break
|
||||
}
|
||||
s.addReport(&rep)
|
||||
}
|
||||
slog.Info("Loaded reports", "count", s.reports.Size(), "d", time.Since(t0).String())
|
||||
}
|
||||
|
||||
var (
|
||||
plusRe = regexp.MustCompile(`(\+.*|[.-]dev\..*)$`)
|
||||
plusStr = "-dev"
|
||||
)
|
||||
|
||||
// transformVersion returns a version number formatted correctly, with all
|
||||
// development versions aggregated into one.
|
||||
func transformVersion(v string) string {
|
||||
if v == "unknown-dev" {
|
||||
return v
|
||||
}
|
||||
if !strings.HasPrefix(v, "v") {
|
||||
v = "v" + v
|
||||
}
|
||||
v = plusRe.ReplaceAllString(v, plusStr)
|
||||
|
||||
return v
|
||||
}
|
||||
262
cmd/stdiscosrv/amqp.go
Normal file
262
cmd/stdiscosrv/amqp.go
Normal file
@@ -0,0 +1,262 @@
|
||||
// Copyright (C) 2024 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
|
||||
amqp "github.com/rabbitmq/amqp091-go"
|
||||
"github.com/thejerf/suture/v4"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/gen/discosrv"
|
||||
"github.com/syncthing/syncthing/internal/protoutil"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
type amqpReplicator struct {
|
||||
suture.Service
|
||||
|
||||
broker string
|
||||
sender *amqpSender
|
||||
receiver *amqpReceiver
|
||||
outbox chan *discosrv.ReplicationRecord
|
||||
}
|
||||
|
||||
func newAMQPReplicator(broker, clientID string, db database) *amqpReplicator {
|
||||
svc := suture.New("amqpReplicator", suture.Spec{PassThroughPanics: true})
|
||||
|
||||
sender := &amqpSender{
|
||||
broker: broker,
|
||||
clientID: clientID,
|
||||
outbox: make(chan *discosrv.ReplicationRecord, replicationOutboxSize),
|
||||
}
|
||||
svc.Add(sender)
|
||||
|
||||
receiver := &amqpReceiver{
|
||||
broker: broker,
|
||||
clientID: clientID,
|
||||
db: db,
|
||||
}
|
||||
svc.Add(receiver)
|
||||
|
||||
return &amqpReplicator{
|
||||
Service: svc,
|
||||
broker: broker,
|
||||
sender: sender,
|
||||
receiver: receiver,
|
||||
outbox: make(chan *discosrv.ReplicationRecord, replicationOutboxSize),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *amqpReplicator) send(key *protocol.DeviceID, ps []*discosrv.DatabaseAddress, seen int64) {
|
||||
s.sender.send(key, ps, seen)
|
||||
}
|
||||
|
||||
type amqpSender struct {
|
||||
broker string
|
||||
clientID string
|
||||
outbox chan *discosrv.ReplicationRecord
|
||||
}
|
||||
|
||||
func (s *amqpSender) Serve(ctx context.Context) error {
|
||||
conn, ch, err := amqpChannel(s.broker)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer ch.Close()
|
||||
defer conn.Close()
|
||||
|
||||
buf := make([]byte, 1024)
|
||||
for {
|
||||
select {
|
||||
case rec := <-s.outbox:
|
||||
size := proto.Size(rec)
|
||||
if len(buf) < size {
|
||||
buf = make([]byte, size)
|
||||
}
|
||||
|
||||
n, err := protoutil.MarshalTo(buf, rec)
|
||||
if err != nil {
|
||||
replicationSendsTotal.WithLabelValues("error").Inc()
|
||||
return fmt.Errorf("replication marshal: %w", err)
|
||||
}
|
||||
|
||||
err = ch.PublishWithContext(ctx,
|
||||
"discovery", // exchange
|
||||
"", // routing key
|
||||
false, // mandatory
|
||||
false, // immediate
|
||||
amqp.Publishing{
|
||||
ContentType: "application/protobuf",
|
||||
Body: buf[:n],
|
||||
AppId: s.clientID,
|
||||
})
|
||||
if err != nil {
|
||||
replicationSendsTotal.WithLabelValues("error").Inc()
|
||||
return fmt.Errorf("replication publish: %w", err)
|
||||
}
|
||||
|
||||
replicationSendsTotal.WithLabelValues("success").Inc()
|
||||
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *amqpSender) String() string {
|
||||
return fmt.Sprintf("amqpSender(%q)", s.broker)
|
||||
}
|
||||
|
||||
func (s *amqpSender) send(key *protocol.DeviceID, ps []*discosrv.DatabaseAddress, seen int64) {
|
||||
item := &discosrv.ReplicationRecord{
|
||||
Key: key[:],
|
||||
Addresses: ps,
|
||||
Seen: seen,
|
||||
}
|
||||
|
||||
// The send should never block. The inbox is suitably buffered for at
|
||||
// least a few seconds of stalls, which shouldn't happen in practice.
|
||||
select {
|
||||
case s.outbox <- item:
|
||||
default:
|
||||
replicationSendsTotal.WithLabelValues("drop").Inc()
|
||||
}
|
||||
}
|
||||
|
||||
type amqpReceiver struct {
|
||||
broker string
|
||||
clientID string
|
||||
db database
|
||||
}
|
||||
|
||||
func (s *amqpReceiver) Serve(ctx context.Context) error {
|
||||
conn, ch, err := amqpChannel(s.broker)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer ch.Close()
|
||||
defer conn.Close()
|
||||
|
||||
msgs, err := amqpConsume(ch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case msg, ok := <-msgs:
|
||||
if !ok {
|
||||
return fmt.Errorf("subscription closed: %w", io.EOF)
|
||||
}
|
||||
|
||||
// ignore messages from ourself
|
||||
if msg.AppId == s.clientID {
|
||||
continue
|
||||
}
|
||||
|
||||
var rec discosrv.ReplicationRecord
|
||||
if err := proto.Unmarshal(msg.Body, &rec); err != nil {
|
||||
replicationRecvsTotal.WithLabelValues("error").Inc()
|
||||
return fmt.Errorf("replication unmarshal: %w", err)
|
||||
}
|
||||
id, err := protocol.DeviceIDFromBytes(rec.Key)
|
||||
if err != nil {
|
||||
id, err = protocol.DeviceIDFromString(string(rec.Key))
|
||||
}
|
||||
if err != nil {
|
||||
slog.Warn("Failed to parse replication device ID", "error", err)
|
||||
replicationRecvsTotal.WithLabelValues("error").Inc()
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.db.merge(&id, rec.Addresses, rec.Seen); err != nil {
|
||||
return fmt.Errorf("replication database merge: %w", err)
|
||||
}
|
||||
|
||||
replicationRecvsTotal.WithLabelValues("success").Inc()
|
||||
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *amqpReceiver) String() string {
|
||||
return fmt.Sprintf("amqpReceiver(%q)", s.broker)
|
||||
}
|
||||
|
||||
func amqpChannel(dst string) (*amqp.Connection, *amqp.Channel, error) {
|
||||
conn, err := amqp.Dial(dst)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("AMQP dial: %w", err)
|
||||
}
|
||||
|
||||
ch, err := conn.Channel()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("AMQP channel: %w", err)
|
||||
}
|
||||
|
||||
err = ch.ExchangeDeclare(
|
||||
"discovery", // name
|
||||
"fanout", // type
|
||||
false, // durable
|
||||
false, // auto-deleted
|
||||
false, // internal
|
||||
false, // no-wait
|
||||
nil, // arguments
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("AMQP declare exchange: %w", err)
|
||||
}
|
||||
|
||||
return conn, ch, nil
|
||||
}
|
||||
|
||||
func amqpConsume(ch *amqp.Channel) (<-chan amqp.Delivery, error) {
|
||||
q, err := ch.QueueDeclare(
|
||||
"", // name
|
||||
false, // durable
|
||||
false, // delete when unused
|
||||
true, // exclusive
|
||||
false, // no-wait
|
||||
nil, // arguments
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("AMQP declare queue: %w", err)
|
||||
}
|
||||
|
||||
err = ch.QueueBind(
|
||||
q.Name, // queue name
|
||||
"", // routing key
|
||||
"discovery", // exchange
|
||||
false,
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("AMQP bind queue: %w", err)
|
||||
}
|
||||
|
||||
msgs, err := ch.Consume(
|
||||
q.Name, // queue
|
||||
"", // consumer
|
||||
true, // auto-ack
|
||||
false, // exclusive
|
||||
false, // no-local
|
||||
false, // no-wait
|
||||
nil, // args
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("AMQP consume: %w", err)
|
||||
}
|
||||
|
||||
return msgs, nil
|
||||
}
|
||||
@@ -18,16 +18,18 @@ import (
|
||||
"fmt"
|
||||
io "io"
|
||||
"log"
|
||||
"log/slog"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/gen/discosrv"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/stringutil"
|
||||
)
|
||||
@@ -39,15 +41,20 @@ type announcement struct {
|
||||
}
|
||||
|
||||
type apiSrv struct {
|
||||
addr string
|
||||
cert tls.Certificate
|
||||
db database
|
||||
listener net.Listener
|
||||
repl replicator // optional
|
||||
useHTTP bool
|
||||
addr string
|
||||
cert tls.Certificate
|
||||
db database
|
||||
listener net.Listener
|
||||
repl replicator // optional
|
||||
useHTTP bool
|
||||
compression bool
|
||||
gzipWriters sync.Pool
|
||||
seenTracker *retryAfterTracker
|
||||
notSeenTracker *retryAfterTracker
|
||||
}
|
||||
|
||||
mapsMut sync.Mutex
|
||||
misses map[string]int32
|
||||
type replicator interface {
|
||||
send(key *protocol.DeviceID, addrs []*discosrv.DatabaseAddress, seen int64)
|
||||
}
|
||||
|
||||
type requestID int64
|
||||
@@ -60,22 +67,34 @@ type contextKey int
|
||||
|
||||
const idKey contextKey = iota
|
||||
|
||||
func newAPISrv(addr string, cert tls.Certificate, db database, repl replicator, useHTTP bool) *apiSrv {
|
||||
func newAPISrv(addr string, cert tls.Certificate, db database, repl replicator, useHTTP, compression bool, desiredNotFoundRate float64) *apiSrv {
|
||||
return &apiSrv{
|
||||
addr: addr,
|
||||
cert: cert,
|
||||
db: db,
|
||||
repl: repl,
|
||||
useHTTP: useHTTP,
|
||||
misses: make(map[string]int32),
|
||||
addr: addr,
|
||||
cert: cert,
|
||||
db: db,
|
||||
repl: repl,
|
||||
useHTTP: useHTTP,
|
||||
compression: compression,
|
||||
seenTracker: &retryAfterTracker{
|
||||
name: "seenTracker",
|
||||
bucketStarts: time.Now(),
|
||||
desiredRate: desiredNotFoundRate / 2,
|
||||
currentDelay: notFoundRetryUnknownMinSeconds,
|
||||
},
|
||||
notSeenTracker: &retryAfterTracker{
|
||||
name: "notSeenTracker",
|
||||
bucketStarts: time.Now(),
|
||||
desiredRate: desiredNotFoundRate / 2,
|
||||
currentDelay: notFoundRetryUnknownMaxSeconds / 2,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *apiSrv) Serve(_ context.Context) error {
|
||||
func (s *apiSrv) Serve(ctx context.Context) error {
|
||||
if s.useHTTP {
|
||||
listener, err := net.Listen("tcp", s.addr)
|
||||
if err != nil {
|
||||
log.Println("Listen:", err)
|
||||
slog.ErrorContext(ctx, "Failed to listen", "error", err)
|
||||
return err
|
||||
}
|
||||
s.listener = listener
|
||||
@@ -89,7 +108,7 @@ func (s *apiSrv) Serve(_ context.Context) error {
|
||||
|
||||
tlsListener, err := tls.Listen("tcp", s.addr, tlsCfg)
|
||||
if err != nil {
|
||||
log.Println("Listen:", err)
|
||||
slog.ErrorContext(ctx, "Failed to listen", "error", err)
|
||||
return err
|
||||
}
|
||||
s.listener = tlsListener
|
||||
@@ -102,12 +121,19 @@ func (s *apiSrv) Serve(_ context.Context) error {
|
||||
ReadTimeout: httpReadTimeout,
|
||||
WriteTimeout: httpWriteTimeout,
|
||||
MaxHeaderBytes: httpMaxHeaderBytes,
|
||||
ErrorLog: log.New(io.Discard, "", 0),
|
||||
}
|
||||
if !debug {
|
||||
srv.ErrorLog = log.New(io.Discard, "", 0)
|
||||
}
|
||||
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
srv.Shutdown(context.Background())
|
||||
}()
|
||||
|
||||
err := srv.Serve(s.listener)
|
||||
if err != nil {
|
||||
log.Println("Serve:", err)
|
||||
slog.ErrorContext(ctx, "Failed to serve", "error", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -126,9 +152,7 @@ func (s *apiSrv) handler(w http.ResponseWriter, req *http.Request) {
|
||||
reqID := requestID(rand.Int63())
|
||||
req = req.WithContext(context.WithValue(req.Context(), idKey, reqID))
|
||||
|
||||
if debug {
|
||||
log.Println(reqID, req.Method, req.URL, req.Proto)
|
||||
}
|
||||
slog.Debug("Handling request", "id", reqID, "method", req.Method, "url", req.URL, "proto", req.Proto)
|
||||
|
||||
remoteAddr := &net.TCPAddr{
|
||||
IP: nil,
|
||||
@@ -149,7 +173,7 @@ func (s *apiSrv) handler(w http.ResponseWriter, req *http.Request) {
|
||||
var err error
|
||||
remoteAddr, err = net.ResolveTCPAddr("tcp", req.RemoteAddr)
|
||||
if err != nil {
|
||||
log.Println("remoteAddr:", err)
|
||||
slog.Warn("Failed to resolve remote address", "address", req.RemoteAddr, "error", err)
|
||||
lw.Header().Set("Retry-After", errorRetryAfterString())
|
||||
http.Error(lw, "Internal Server Error", http.StatusInternalServerError)
|
||||
apiRequestsTotal.WithLabelValues("no_remote_addr").Inc()
|
||||
@@ -172,17 +196,14 @@ func (s *apiSrv) handleGET(w http.ResponseWriter, req *http.Request) {
|
||||
|
||||
deviceID, err := protocol.DeviceIDFromString(req.URL.Query().Get("device"))
|
||||
if err != nil {
|
||||
if debug {
|
||||
log.Println(reqID, "bad device param")
|
||||
}
|
||||
slog.Debug("Request with bad device param", "id", reqID, "error", err)
|
||||
lookupRequestsTotal.WithLabelValues("bad_request").Inc()
|
||||
w.Header().Set("Retry-After", errorRetryAfterString())
|
||||
http.Error(w, "Bad Request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
key := deviceID.String()
|
||||
rec, err := s.db.get(key)
|
||||
rec, err := s.db.get(&deviceID)
|
||||
if err != nil {
|
||||
// some sort of internal error
|
||||
lookupRequestsTotal.WithLabelValues("internal_error").Inc()
|
||||
@@ -192,28 +213,14 @@ func (s *apiSrv) handleGET(w http.ResponseWriter, req *http.Request) {
|
||||
}
|
||||
|
||||
if len(rec.Addresses) == 0 {
|
||||
lookupRequestsTotal.WithLabelValues("not_found").Inc()
|
||||
|
||||
s.mapsMut.Lock()
|
||||
misses := s.misses[key]
|
||||
if misses < rec.Misses {
|
||||
misses = rec.Misses + 1
|
||||
var afterS int
|
||||
if rec.Seen == 0 {
|
||||
afterS = s.notSeenTracker.retryAfterS()
|
||||
lookupRequestsTotal.WithLabelValues("not_found_ever").Inc()
|
||||
} else {
|
||||
misses++
|
||||
afterS = s.seenTracker.retryAfterS()
|
||||
lookupRequestsTotal.WithLabelValues("not_found_recent").Inc()
|
||||
}
|
||||
s.misses[key] = misses
|
||||
s.mapsMut.Unlock()
|
||||
|
||||
if misses%notFoundMissesWriteInterval == 0 {
|
||||
rec.Misses = misses
|
||||
rec.Missed = time.Now().UnixNano()
|
||||
rec.Addresses = nil
|
||||
// rec.Seen retained from get
|
||||
s.db.put(key, rec)
|
||||
}
|
||||
|
||||
afterS := notFoundRetryAfterSeconds(int(misses))
|
||||
retryAfterHistogram.Observe(float64(afterS))
|
||||
w.Header().Set("Retry-After", strconv.Itoa(afterS))
|
||||
http.Error(w, "Not Found", http.StatusNotFound)
|
||||
return
|
||||
@@ -225,10 +232,16 @@ func (s *apiSrv) handleGET(w http.ResponseWriter, req *http.Request) {
|
||||
var bw io.Writer = w
|
||||
|
||||
// Use compression if the client asks for it
|
||||
if strings.Contains(req.Header.Get("Accept-Encoding"), "gzip") {
|
||||
if s.compression && strings.Contains(req.Header.Get("Accept-Encoding"), "gzip") {
|
||||
gw, ok := s.gzipWriters.Get().(*gzip.Writer)
|
||||
if ok {
|
||||
gw.Reset(w)
|
||||
} else {
|
||||
gw = gzip.NewWriter(w)
|
||||
}
|
||||
w.Header().Set("Content-Encoding", "gzip")
|
||||
gw := gzip.NewWriter(bw)
|
||||
defer gw.Close()
|
||||
defer s.gzipWriters.Put(gw)
|
||||
bw = gw
|
||||
}
|
||||
|
||||
@@ -243,9 +256,7 @@ func (s *apiSrv) handlePOST(remoteAddr *net.TCPAddr, w http.ResponseWriter, req
|
||||
|
||||
rawCert, err := certificateBytes(req)
|
||||
if err != nil {
|
||||
if debug {
|
||||
log.Println(reqID, "no certificates:", err)
|
||||
}
|
||||
slog.Debug("Request without certificates", "id", reqID, "error", err)
|
||||
announceRequestsTotal.WithLabelValues("no_certificate").Inc()
|
||||
w.Header().Set("Retry-After", errorRetryAfterString())
|
||||
http.Error(w, "Forbidden", http.StatusForbidden)
|
||||
@@ -254,9 +265,7 @@ func (s *apiSrv) handlePOST(remoteAddr *net.TCPAddr, w http.ResponseWriter, req
|
||||
|
||||
var ann announcement
|
||||
if err := json.NewDecoder(req.Body).Decode(&ann); err != nil {
|
||||
if debug {
|
||||
log.Println(reqID, "decode:", err)
|
||||
}
|
||||
slog.Debug("Failed to decode request", "id", reqID, "error", err)
|
||||
announceRequestsTotal.WithLabelValues("bad_request").Inc()
|
||||
w.Header().Set("Retry-After", errorRetryAfterString())
|
||||
http.Error(w, "Bad Request", http.StatusBadRequest)
|
||||
@@ -267,6 +276,7 @@ func (s *apiSrv) handlePOST(remoteAddr *net.TCPAddr, w http.ResponseWriter, req
|
||||
|
||||
addresses := fixupAddresses(remoteAddr, ann.Addresses)
|
||||
if len(addresses) == 0 {
|
||||
slog.Debug("Request without addresses", "id", reqID, "error", err)
|
||||
announceRequestsTotal.WithLabelValues("bad_request").Inc()
|
||||
w.Header().Set("Retry-After", errorRetryAfterString())
|
||||
http.Error(w, "Bad Request", http.StatusBadRequest)
|
||||
@@ -274,6 +284,7 @@ func (s *apiSrv) handlePOST(remoteAddr *net.TCPAddr, w http.ResponseWriter, req
|
||||
}
|
||||
|
||||
if err := s.handleAnnounce(deviceID, addresses); err != nil {
|
||||
slog.Debug("Failed to handle request", "id", reqID, "error", err)
|
||||
announceRequestsTotal.WithLabelValues("internal_error").Inc()
|
||||
w.Header().Set("Retry-After", errorRetryAfterString())
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
@@ -284,6 +295,7 @@ func (s *apiSrv) handlePOST(remoteAddr *net.TCPAddr, w http.ResponseWriter, req
|
||||
|
||||
w.Header().Set("Reannounce-After", reannounceAfterString())
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
slog.Debug("Device announced", "id", reqID, "device", deviceID, "addresses", addresses)
|
||||
}
|
||||
|
||||
func (s *apiSrv) Stop() {
|
||||
@@ -291,29 +303,31 @@ func (s *apiSrv) Stop() {
|
||||
}
|
||||
|
||||
func (s *apiSrv) handleAnnounce(deviceID protocol.DeviceID, addresses []string) error {
|
||||
key := deviceID.String()
|
||||
now := time.Now()
|
||||
expire := now.Add(addressExpiryTime).UnixNano()
|
||||
|
||||
dbAddrs := make([]DatabaseAddress, len(addresses))
|
||||
for i := range addresses {
|
||||
dbAddrs[i].Address = addresses[i]
|
||||
dbAddrs[i].Expires = expire
|
||||
}
|
||||
|
||||
// The address slice must always be sorted for database merges to work
|
||||
// properly.
|
||||
sort.Sort(databaseAddressOrder(dbAddrs))
|
||||
slices.Sort(addresses)
|
||||
addresses = slices.Compact(addresses)
|
||||
|
||||
dbAddrs := make([]*discosrv.DatabaseAddress, len(addresses))
|
||||
for i := range addresses {
|
||||
dbAddrs[i] = &discosrv.DatabaseAddress{
|
||||
Address: addresses[i],
|
||||
Expires: expire,
|
||||
}
|
||||
}
|
||||
|
||||
seen := now.UnixNano()
|
||||
if s.repl != nil {
|
||||
s.repl.send(key, dbAddrs, seen)
|
||||
s.repl.send(&deviceID, dbAddrs, seen)
|
||||
}
|
||||
return s.db.merge(key, dbAddrs, seen)
|
||||
return s.db.merge(&deviceID, dbAddrs, seen)
|
||||
}
|
||||
|
||||
func handlePing(w http.ResponseWriter, _ *http.Request) {
|
||||
w.WriteHeader(204)
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
func certificateBytes(req *http.Request) ([]byte, error) {
|
||||
@@ -323,7 +337,7 @@ func certificateBytes(req *http.Request) ([]byte, error) {
|
||||
|
||||
var bs []byte
|
||||
|
||||
if hdr := req.Header.Get("X-SSL-Cert"); hdr != "" {
|
||||
if hdr := req.Header.Get("X-Ssl-Cert"); hdr != "" {
|
||||
if strings.Contains(hdr, "%") {
|
||||
// Nginx using $ssl_client_escaped_cert
|
||||
// The certificate is in PEM format with url encoding.
|
||||
@@ -359,7 +373,7 @@ func certificateBytes(req *http.Request) ([]byte, error) {
|
||||
}
|
||||
|
||||
bs = pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: hdr})
|
||||
} else if hdr := req.Header.Get("X-Forwarded-Tls-Client-Cert"); hdr != "" {
|
||||
} else if cert := req.Header.Get("X-Forwarded-Tls-Client-Cert"); cert != "" {
|
||||
// Traefik 2 passtlsclientcert
|
||||
//
|
||||
// The certificate is in PEM format, maybe with URL encoding
|
||||
@@ -367,19 +381,36 @@ func certificateBytes(req *http.Request) ([]byte, error) {
|
||||
// statements. We need to decode, reinstate the newlines every 64
|
||||
// character and add statements for the PEM decoder
|
||||
|
||||
if strings.Contains(hdr, "%") {
|
||||
if unesc, err := url.QueryUnescape(hdr); err == nil {
|
||||
hdr = unesc
|
||||
if strings.Contains(cert, "%") {
|
||||
if unesc, err := url.QueryUnescape(cert); err == nil {
|
||||
cert = unesc
|
||||
}
|
||||
}
|
||||
|
||||
for i := 64; i < len(hdr); i += 65 {
|
||||
hdr = hdr[:i] + "\n" + hdr[i:]
|
||||
const (
|
||||
header = "-----BEGIN CERTIFICATE-----"
|
||||
footer = "-----END CERTIFICATE-----"
|
||||
)
|
||||
|
||||
var b bytes.Buffer
|
||||
b.Grow(len(header) + 1 + len(cert) + len(cert)/64 + 1 + len(footer) + 1)
|
||||
|
||||
b.WriteString(header)
|
||||
b.WriteByte('\n')
|
||||
|
||||
for i := 0; i < len(cert); i += 64 {
|
||||
end := i + 64
|
||||
if end > len(cert) {
|
||||
end = len(cert)
|
||||
}
|
||||
b.WriteString(cert[i:end])
|
||||
b.WriteByte('\n')
|
||||
}
|
||||
|
||||
hdr = "-----BEGIN CERTIFICATE-----\n" + hdr
|
||||
hdr += "\n-----END CERTIFICATE-----\n"
|
||||
bs = []byte(hdr)
|
||||
b.WriteString(footer)
|
||||
b.WriteByte('\n')
|
||||
|
||||
bs = b.Bytes()
|
||||
}
|
||||
|
||||
if bs == nil {
|
||||
@@ -444,7 +475,6 @@ func fixupAddresses(remote *net.TCPAddr, addresses []string) []string {
|
||||
// remote is nil, unable to determine host IP
|
||||
continue
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// If zero port was specified, use remote port.
|
||||
@@ -470,6 +500,7 @@ func fixupAddresses(remote *net.TCPAddr, addresses []string) []string {
|
||||
|
||||
type loggingResponseWriter struct {
|
||||
http.ResponseWriter
|
||||
|
||||
statusCode int
|
||||
}
|
||||
|
||||
@@ -482,7 +513,7 @@ func (lrw *loggingResponseWriter) WriteHeader(code int) {
|
||||
lrw.ResponseWriter.WriteHeader(code)
|
||||
}
|
||||
|
||||
func addressStrs(dbAddrs []DatabaseAddress) []string {
|
||||
func addressStrs(dbAddrs []*discosrv.DatabaseAddress) []string {
|
||||
res := make([]string, len(dbAddrs))
|
||||
for i, a := range dbAddrs {
|
||||
res[i] = a.Address
|
||||
@@ -494,15 +525,44 @@ func errorRetryAfterString() string {
|
||||
return strconv.Itoa(errorRetryAfterSeconds + rand.Intn(errorRetryFuzzSeconds))
|
||||
}
|
||||
|
||||
func notFoundRetryAfterSeconds(misses int) int {
|
||||
retryAfterS := notFoundRetryMinSeconds + notFoundRetryIncSeconds*misses
|
||||
if retryAfterS > notFoundRetryMaxSeconds {
|
||||
retryAfterS = notFoundRetryMaxSeconds
|
||||
}
|
||||
retryAfterS += rand.Intn(notFoundRetryFuzzSeconds)
|
||||
return retryAfterS
|
||||
}
|
||||
|
||||
func reannounceAfterString() string {
|
||||
return strconv.Itoa(reannounceAfterSeconds + rand.Intn(reannounzeFuzzSeconds))
|
||||
}
|
||||
|
||||
type retryAfterTracker struct {
|
||||
name string
|
||||
desiredRate float64 // requests per second
|
||||
|
||||
mut sync.Mutex
|
||||
lastCount int // requests in the last bucket
|
||||
curCount int // requests in the current bucket
|
||||
bucketStarts time.Time // start of the current bucket
|
||||
currentDelay int // current delay in seconds
|
||||
}
|
||||
|
||||
func (t *retryAfterTracker) retryAfterS() int {
|
||||
now := time.Now()
|
||||
t.mut.Lock()
|
||||
if durS := now.Sub(t.bucketStarts).Seconds(); durS > float64(t.currentDelay) {
|
||||
t.bucketStarts = now
|
||||
t.lastCount = t.curCount
|
||||
lastRate := float64(t.lastCount) / durS
|
||||
|
||||
switch {
|
||||
case t.currentDelay > notFoundRetryUnknownMinSeconds &&
|
||||
lastRate < 0.75*t.desiredRate:
|
||||
t.currentDelay = max(8*t.currentDelay/10, notFoundRetryUnknownMinSeconds)
|
||||
case t.currentDelay < notFoundRetryUnknownMaxSeconds &&
|
||||
lastRate > 1.25*t.desiredRate:
|
||||
t.currentDelay = min(3*t.currentDelay/2, notFoundRetryUnknownMaxSeconds)
|
||||
}
|
||||
|
||||
t.curCount = 0
|
||||
}
|
||||
if t.curCount == 0 {
|
||||
retryAfterLevel.WithLabelValues(t.name).Set(float64(t.currentDelay))
|
||||
}
|
||||
t.curCount++
|
||||
t.mut.Unlock()
|
||||
return t.currentDelay + rand.Intn(t.currentDelay/4)
|
||||
}
|
||||
|
||||
@@ -7,9 +7,20 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
)
|
||||
|
||||
func TestFixupAddresses(t *testing.T) {
|
||||
@@ -94,3 +105,79 @@ func addr(host string, port int) *net.TCPAddr {
|
||||
Port: port,
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAPIRequests(b *testing.B) {
|
||||
db := newInMemoryStore(b.TempDir(), 0, nil)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
go db.Serve(ctx)
|
||||
api := newAPISrv("127.0.0.1:0", tls.Certificate{}, db, nil, true, true, 1000)
|
||||
srv := httptest.NewServer(http.HandlerFunc(api.handler))
|
||||
|
||||
kf := b.TempDir() + "/cert"
|
||||
crt, err := tlsutil.NewCertificate(kf+".crt", kf+".key", "localhost", 7, true)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
certBs, err := os.ReadFile(kf + ".crt")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
certBs = regexp.MustCompile(`---[^\n]+---\n`).ReplaceAll(certBs, nil)
|
||||
certString := string(strings.ReplaceAll(string(certBs), "\n", " "))
|
||||
|
||||
devID := protocol.NewDeviceID(crt.Certificate[0])
|
||||
devIDString := devID.String()
|
||||
|
||||
b.Run("Announce", func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
url := srv.URL + "/v2/?device=" + devIDString
|
||||
for i := 0; i < b.N; i++ {
|
||||
req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(`{"addresses":["tcp://10.10.10.10:42000"]}`))
|
||||
req.Header.Set("X-Forwarded-Tls-Client-Cert", certString)
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusNoContent {
|
||||
b.Fatalf("unexpected status %s", resp.Status)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("Lookup", func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
url := srv.URL + "/v2/?device=" + devIDString
|
||||
for i := 0; i < b.N; i++ {
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
io.Copy(io.Discard, resp.Body)
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
b.Fatalf("unexpected status %s", resp.Status)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("LookupNoCompression", func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
url := srv.URL + "/v2/?device=" + devIDString
|
||||
for i := 0; i < b.N; i++ {
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
req.Header.Set("Accept-Encoding", "identity") // disable compression
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
io.Copy(io.Discard, resp.Body)
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
b.Fatalf("unexpected status %s", resp.Status)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -4,23 +4,31 @@
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
//go:generate go run ../../proto/scripts/protofmt.go database.proto
|
||||
//go:generate protoc -I ../../ -I . --gogofast_out=. database.proto
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"cmp"
|
||||
"context"
|
||||
"log"
|
||||
"net"
|
||||
"net/url"
|
||||
"sort"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
"log/slog"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/sliceutil"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
"github.com/puzpuzpuz/xsync/v3"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/blob"
|
||||
"github.com/syncthing/syncthing/internal/gen/discosrv"
|
||||
"github.com/syncthing/syncthing/internal/protoutil"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/rand"
|
||||
)
|
||||
|
||||
type clock interface {
|
||||
@@ -34,380 +42,407 @@ func (defaultClock) Now() time.Time {
|
||||
}
|
||||
|
||||
type database interface {
|
||||
put(key string, rec DatabaseRecord) error
|
||||
merge(key string, addrs []DatabaseAddress, seen int64) error
|
||||
get(key string) (DatabaseRecord, error)
|
||||
put(key *protocol.DeviceID, rec *discosrv.DatabaseRecord) error
|
||||
merge(key *protocol.DeviceID, addrs []*discosrv.DatabaseAddress, seen int64) error
|
||||
get(key *protocol.DeviceID) (*discosrv.DatabaseRecord, error)
|
||||
}
|
||||
|
||||
type levelDBStore struct {
|
||||
db *leveldb.DB
|
||||
inbox chan func()
|
||||
clock clock
|
||||
marshalBuf []byte
|
||||
type inMemoryStore struct {
|
||||
m *xsync.MapOf[protocol.DeviceID, *discosrv.DatabaseRecord]
|
||||
dir string
|
||||
flushInterval time.Duration
|
||||
blobs blob.Store
|
||||
objKey string
|
||||
clock clock
|
||||
}
|
||||
|
||||
func newLevelDBStore(dir string) (*levelDBStore, error) {
|
||||
db, err := leveldb.OpenFile(dir, levelDBOptions)
|
||||
func newInMemoryStore(dir string, flushInterval time.Duration, blobs blob.Store) *inMemoryStore {
|
||||
hn, err := os.Hostname()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
hn = rand.String(8)
|
||||
}
|
||||
return &levelDBStore{
|
||||
db: db,
|
||||
inbox: make(chan func(), 16),
|
||||
clock: defaultClock{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newMemoryLevelDBStore() (*levelDBStore, error) {
|
||||
db, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
s := &inMemoryStore{
|
||||
m: xsync.NewMapOf[protocol.DeviceID, *discosrv.DatabaseRecord](),
|
||||
dir: dir,
|
||||
flushInterval: flushInterval,
|
||||
blobs: blobs,
|
||||
objKey: hn + ".db",
|
||||
clock: defaultClock{},
|
||||
}
|
||||
return &levelDBStore{
|
||||
db: db,
|
||||
inbox: make(chan func(), 16),
|
||||
clock: defaultClock{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *levelDBStore) put(key string, rec DatabaseRecord) error {
|
||||
t0 := time.Now()
|
||||
defer func() {
|
||||
databaseOperationSeconds.WithLabelValues(dbOpPut).Observe(time.Since(t0).Seconds())
|
||||
}()
|
||||
|
||||
rc := make(chan error)
|
||||
|
||||
s.inbox <- func() {
|
||||
size := rec.Size()
|
||||
if len(s.marshalBuf) < size {
|
||||
s.marshalBuf = make([]byte, size)
|
||||
nr, err := s.read()
|
||||
if os.IsNotExist(err) && blobs != nil {
|
||||
// Try to read from blob storage
|
||||
latestKey, cerr := blobs.LatestKey(context.Background())
|
||||
if cerr != nil {
|
||||
slog.Error("Failed to find database in blob storage", "error", cerr)
|
||||
return s
|
||||
}
|
||||
n, _ := rec.MarshalTo(s.marshalBuf)
|
||||
rc <- s.db.Put([]byte(key), s.marshalBuf[:n], nil)
|
||||
fd, cerr := os.Create(path.Join(s.dir, "records.db"))
|
||||
if cerr != nil {
|
||||
slog.Error("Failed to create database file", "error", cerr)
|
||||
return s
|
||||
}
|
||||
if cerr := blobs.Download(context.Background(), latestKey, fd); cerr != nil {
|
||||
slog.Error("Failed to download database from blob storage", "error", cerr)
|
||||
}
|
||||
_ = fd.Close()
|
||||
nr, err = s.read()
|
||||
}
|
||||
|
||||
err := <-rc
|
||||
if err != nil {
|
||||
databaseOperations.WithLabelValues(dbOpPut, dbResError).Inc()
|
||||
} else {
|
||||
databaseOperations.WithLabelValues(dbOpPut, dbResSuccess).Inc()
|
||||
slog.Error("Failed to read database", "error", err)
|
||||
}
|
||||
|
||||
return err
|
||||
slog.Info("Loaded database", "records", nr)
|
||||
s.expireAndCalculateStatistics()
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *levelDBStore) merge(key string, addrs []DatabaseAddress, seen int64) error {
|
||||
func (s *inMemoryStore) put(key *protocol.DeviceID, rec *discosrv.DatabaseRecord) error {
|
||||
t0 := time.Now()
|
||||
defer func() {
|
||||
databaseOperationSeconds.WithLabelValues(dbOpMerge).Observe(time.Since(t0).Seconds())
|
||||
}()
|
||||
s.m.Store(*key, rec)
|
||||
databaseOperations.WithLabelValues(dbOpPut, dbResSuccess).Inc()
|
||||
databaseOperationSeconds.WithLabelValues(dbOpPut).Observe(time.Since(t0).Seconds())
|
||||
return nil
|
||||
}
|
||||
|
||||
rc := make(chan error)
|
||||
newRec := DatabaseRecord{
|
||||
func (s *inMemoryStore) merge(key *protocol.DeviceID, addrs []*discosrv.DatabaseAddress, seen int64) error {
|
||||
t0 := time.Now()
|
||||
|
||||
newRec := &discosrv.DatabaseRecord{
|
||||
Addresses: addrs,
|
||||
Seen: seen,
|
||||
}
|
||||
|
||||
s.inbox <- func() {
|
||||
// grab the existing record
|
||||
oldRec, err := s.get(key)
|
||||
if err != nil {
|
||||
// "not found" is not an error from get, so this is serious
|
||||
// stuff only
|
||||
rc <- err
|
||||
return
|
||||
}
|
||||
newRec = merge(newRec, oldRec)
|
||||
|
||||
// We replicate s.put() functionality here ourselves instead of
|
||||
// calling it because we want to serialize our get above together
|
||||
// with the put in the same function.
|
||||
size := newRec.Size()
|
||||
if len(s.marshalBuf) < size {
|
||||
s.marshalBuf = make([]byte, size)
|
||||
}
|
||||
n, _ := newRec.MarshalTo(s.marshalBuf)
|
||||
rc <- s.db.Put([]byte(key), s.marshalBuf[:n], nil)
|
||||
if oldRec, ok := s.m.Load(*key); ok {
|
||||
newRec = merge(oldRec, newRec)
|
||||
}
|
||||
s.m.Store(*key, newRec)
|
||||
|
||||
err := <-rc
|
||||
if err != nil {
|
||||
databaseOperations.WithLabelValues(dbOpMerge, dbResError).Inc()
|
||||
} else {
|
||||
databaseOperations.WithLabelValues(dbOpMerge, dbResSuccess).Inc()
|
||||
}
|
||||
databaseOperations.WithLabelValues(dbOpMerge, dbResSuccess).Inc()
|
||||
databaseOperationSeconds.WithLabelValues(dbOpMerge).Observe(time.Since(t0).Seconds())
|
||||
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *levelDBStore) get(key string) (DatabaseRecord, error) {
|
||||
func (s *inMemoryStore) get(key *protocol.DeviceID) (*discosrv.DatabaseRecord, error) {
|
||||
t0 := time.Now()
|
||||
defer func() {
|
||||
databaseOperationSeconds.WithLabelValues(dbOpGet).Observe(time.Since(t0).Seconds())
|
||||
}()
|
||||
|
||||
keyBs := []byte(key)
|
||||
val, err := s.db.Get(keyBs, nil)
|
||||
if err == leveldb.ErrNotFound {
|
||||
rec, ok := s.m.Load(*key)
|
||||
if !ok {
|
||||
databaseOperations.WithLabelValues(dbOpGet, dbResNotFound).Inc()
|
||||
return DatabaseRecord{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
databaseOperations.WithLabelValues(dbOpGet, dbResError).Inc()
|
||||
return DatabaseRecord{}, err
|
||||
return &discosrv.DatabaseRecord{}, nil
|
||||
}
|
||||
|
||||
var rec DatabaseRecord
|
||||
|
||||
if err := rec.Unmarshal(val); err != nil {
|
||||
databaseOperations.WithLabelValues(dbOpGet, dbResUnmarshalError).Inc()
|
||||
return DatabaseRecord{}, nil
|
||||
}
|
||||
|
||||
rec.Addresses = expire(rec.Addresses, s.clock.Now().UnixNano())
|
||||
rec.Addresses = expire(rec.Addresses, s.clock.Now())
|
||||
databaseOperations.WithLabelValues(dbOpGet, dbResSuccess).Inc()
|
||||
return rec, nil
|
||||
}
|
||||
|
||||
func (s *levelDBStore) Serve(ctx context.Context) error {
|
||||
t := time.NewTimer(0)
|
||||
defer t.Stop()
|
||||
defer s.db.Close()
|
||||
func (s *inMemoryStore) Serve(ctx context.Context) error {
|
||||
if s.flushInterval <= 0 {
|
||||
<-ctx.Done()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start the statistics serve routine. It will exit with us when
|
||||
// statisticsTrigger is closed.
|
||||
statisticsTrigger := make(chan struct{})
|
||||
statisticsDone := make(chan struct{})
|
||||
go s.statisticsServe(statisticsTrigger, statisticsDone)
|
||||
t := time.NewTimer(s.flushInterval)
|
||||
defer t.Stop()
|
||||
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case fn := <-s.inbox:
|
||||
// Run function in serialized order.
|
||||
fn()
|
||||
|
||||
case <-t.C:
|
||||
// Trigger the statistics routine to do its thing in the
|
||||
// background.
|
||||
statisticsTrigger <- struct{}{}
|
||||
|
||||
case <-statisticsDone:
|
||||
// The statistics routine is done with one iteratation, schedule
|
||||
// the next.
|
||||
t.Reset(databaseStatisticsInterval)
|
||||
slog.InfoContext(ctx, "Calculating statistics")
|
||||
s.expireAndCalculateStatistics()
|
||||
slog.InfoContext(ctx, "Flushing database")
|
||||
if err := s.write(); err != nil {
|
||||
slog.ErrorContext(ctx, "Failed to write database", "error", err)
|
||||
}
|
||||
slog.InfoContext(ctx, "Finished flushing database")
|
||||
t.Reset(s.flushInterval)
|
||||
|
||||
case <-ctx.Done():
|
||||
// We're done.
|
||||
close(statisticsTrigger)
|
||||
break loop
|
||||
}
|
||||
}
|
||||
|
||||
// Also wait for statisticsServe to return
|
||||
<-statisticsDone
|
||||
return s.write()
|
||||
}
|
||||
|
||||
func (s *inMemoryStore) expireAndCalculateStatistics() {
|
||||
now := s.clock.Now()
|
||||
cutoff24h := now.Add(-24 * time.Hour).UnixNano()
|
||||
cutoff1w := now.Add(-7 * 24 * time.Hour).UnixNano()
|
||||
current, currentIPv4, currentIPv6, currentIPv6GUA, last24h, last1w := 0, 0, 0, 0, 0, 0
|
||||
|
||||
n := 0
|
||||
s.m.Range(func(key protocol.DeviceID, rec *discosrv.DatabaseRecord) bool {
|
||||
if n%1000 == 0 {
|
||||
runtime.Gosched()
|
||||
}
|
||||
n++
|
||||
|
||||
addresses := expire(rec.Addresses, now)
|
||||
if len(addresses) == 0 {
|
||||
rec.Addresses = nil
|
||||
s.m.Store(key, rec)
|
||||
} else if len(addresses) != len(rec.Addresses) {
|
||||
rec.Addresses = addresses
|
||||
s.m.Store(key, rec)
|
||||
}
|
||||
|
||||
switch {
|
||||
case len(rec.Addresses) > 0:
|
||||
current++
|
||||
seenIPv4, seenIPv6, seenIPv6GUA := false, false, false
|
||||
for _, addr := range rec.Addresses {
|
||||
// We do fast and loose matching on strings here instead of
|
||||
// parsing the address and the IP and doing "proper" checks,
|
||||
// to keep things fast and generate less garbage.
|
||||
if strings.Contains(addr.Address, "[") {
|
||||
seenIPv6 = true
|
||||
if strings.Contains(addr.Address, "[2") {
|
||||
seenIPv6GUA = true
|
||||
}
|
||||
} else {
|
||||
seenIPv4 = true
|
||||
}
|
||||
if seenIPv4 && seenIPv6 && seenIPv6GUA {
|
||||
break
|
||||
}
|
||||
}
|
||||
if seenIPv4 {
|
||||
currentIPv4++
|
||||
}
|
||||
if seenIPv6 {
|
||||
currentIPv6++
|
||||
}
|
||||
if seenIPv6GUA {
|
||||
currentIPv6GUA++
|
||||
}
|
||||
case rec.Seen > cutoff24h:
|
||||
last24h++
|
||||
case rec.Seen > cutoff1w:
|
||||
last1w++
|
||||
default:
|
||||
// drop the record if it's older than a week
|
||||
s.m.Delete(key)
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
databaseKeys.WithLabelValues("current").Set(float64(current))
|
||||
databaseKeys.WithLabelValues("currentIPv4").Set(float64(currentIPv4))
|
||||
databaseKeys.WithLabelValues("currentIPv6").Set(float64(currentIPv6))
|
||||
databaseKeys.WithLabelValues("currentIPv6GUA").Set(float64(currentIPv6GUA))
|
||||
databaseKeys.WithLabelValues("last24h").Set(float64(last24h))
|
||||
databaseKeys.WithLabelValues("last1w").Set(float64(last1w))
|
||||
databaseStatisticsSeconds.Set(time.Since(now).Seconds())
|
||||
}
|
||||
|
||||
func (s *inMemoryStore) write() (err error) {
|
||||
t0 := time.Now()
|
||||
defer func() {
|
||||
if err == nil {
|
||||
databaseWriteSeconds.Set(time.Since(t0).Seconds())
|
||||
databaseLastWritten.Set(float64(t0.Unix()))
|
||||
}
|
||||
}()
|
||||
|
||||
dbf := path.Join(s.dir, "records.db")
|
||||
fd, err := os.Create(dbf + ".tmp")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bw := bufio.NewWriterSize(fd, 1<<20)
|
||||
|
||||
var buf []byte
|
||||
var rangeErr error
|
||||
now := s.clock.Now()
|
||||
cutoff1w := now.Add(-7 * 24 * time.Hour).UnixNano()
|
||||
n := 0
|
||||
s.m.Range(func(key protocol.DeviceID, value *discosrv.DatabaseRecord) bool {
|
||||
if n%1000 == 0 {
|
||||
runtime.Gosched()
|
||||
}
|
||||
n++
|
||||
|
||||
if value.Seen < cutoff1w {
|
||||
// drop the record if it's older than a week
|
||||
return true
|
||||
}
|
||||
rec := &discosrv.ReplicationRecord{
|
||||
Key: key[:],
|
||||
Addresses: value.Addresses,
|
||||
Seen: value.Seen,
|
||||
}
|
||||
s := proto.Size(rec)
|
||||
if s+4 > len(buf) {
|
||||
buf = make([]byte, s+4)
|
||||
}
|
||||
n, err := protoutil.MarshalTo(buf[4:], rec)
|
||||
if err != nil {
|
||||
rangeErr = err
|
||||
return false
|
||||
}
|
||||
binary.BigEndian.PutUint32(buf, uint32(n))
|
||||
if _, err := bw.Write(buf[:n+4]); err != nil {
|
||||
rangeErr = err
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
if rangeErr != nil {
|
||||
_ = fd.Close()
|
||||
return rangeErr
|
||||
}
|
||||
|
||||
if err := bw.Flush(); err != nil {
|
||||
_ = fd.Close
|
||||
return err
|
||||
}
|
||||
if err := fd.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.Rename(dbf+".tmp", dbf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if info, err := os.Lstat(dbf); err == nil {
|
||||
slog.Info("Saved database", "name", dbf, "size", info.Size(), "modtime", info.ModTime())
|
||||
} else {
|
||||
slog.Warn("Failed to stat database after save", "error", err)
|
||||
}
|
||||
|
||||
// Upload to blob storage
|
||||
if s.blobs != nil {
|
||||
fd, err = os.Open(dbf)
|
||||
if err != nil {
|
||||
slog.Error("Failed to upload database to blob storage", "error", err)
|
||||
return nil
|
||||
}
|
||||
defer fd.Close()
|
||||
if err := s.blobs.Upload(context.Background(), s.objKey, fd); err != nil {
|
||||
slog.Error("Failed to upload database to blob storage", "error", err)
|
||||
}
|
||||
slog.Info("Finished uploading database")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *levelDBStore) statisticsServe(trigger <-chan struct{}, done chan<- struct{}) {
|
||||
defer close(done)
|
||||
func (s *inMemoryStore) read() (int, error) {
|
||||
fd, err := os.Open(path.Join(s.dir, "records.db"))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
for range trigger {
|
||||
t0 := time.Now()
|
||||
nowNanos := t0.UnixNano()
|
||||
cutoff24h := t0.Add(-24 * time.Hour).UnixNano()
|
||||
cutoff1w := t0.Add(-7 * 24 * time.Hour).UnixNano()
|
||||
cutoff2Mon := t0.Add(-60 * 24 * time.Hour).UnixNano()
|
||||
current, currentIPv4, currentIPv6, last24h, last1w, inactive, errors := 0, 0, 0, 0, 0, 0, 0
|
||||
|
||||
iter := s.db.NewIterator(&util.Range{}, nil)
|
||||
for iter.Next() {
|
||||
// Attempt to unmarshal the record and count the
|
||||
// failure if there's something wrong with it.
|
||||
var rec DatabaseRecord
|
||||
if err := rec.Unmarshal(iter.Value()); err != nil {
|
||||
errors++
|
||||
continue
|
||||
}
|
||||
|
||||
// If there are addresses that have not expired it's a current
|
||||
// record, otherwise account it based on when it was last seen
|
||||
// (last 24 hours or last week) or finally as inactice.
|
||||
addrs := expire(rec.Addresses, nowNanos)
|
||||
switch {
|
||||
case len(addrs) > 0:
|
||||
current++
|
||||
seenIPv4, seenIPv6 := false, false
|
||||
for _, addr := range addrs {
|
||||
uri, err := url.Parse(addr.Address)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
host, _, err := net.SplitHostPort(uri.Host)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if ip := net.ParseIP(host); ip != nil && ip.To4() != nil {
|
||||
seenIPv4 = true
|
||||
} else if ip != nil {
|
||||
seenIPv6 = true
|
||||
}
|
||||
if seenIPv4 && seenIPv6 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if seenIPv4 {
|
||||
currentIPv4++
|
||||
}
|
||||
if seenIPv6 {
|
||||
currentIPv6++
|
||||
}
|
||||
case rec.Seen > cutoff24h:
|
||||
last24h++
|
||||
case rec.Seen > cutoff1w:
|
||||
last1w++
|
||||
case rec.Seen > cutoff2Mon:
|
||||
inactive++
|
||||
case rec.Missed < cutoff2Mon:
|
||||
// It hasn't been seen lately and we haven't recorded
|
||||
// someone asking for this device in a long time either;
|
||||
// delete the record.
|
||||
if err := s.db.Delete(iter.Key(), nil); err != nil {
|
||||
databaseOperations.WithLabelValues(dbOpDelete, dbResError).Inc()
|
||||
} else {
|
||||
databaseOperations.WithLabelValues(dbOpDelete, dbResSuccess).Inc()
|
||||
}
|
||||
default:
|
||||
inactive++
|
||||
br := bufio.NewReader(fd)
|
||||
var buf []byte
|
||||
nr := 0
|
||||
for {
|
||||
var n uint32
|
||||
if err := binary.Read(br, binary.BigEndian, &n); err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
return nr, err
|
||||
}
|
||||
if int(n) > len(buf) {
|
||||
buf = make([]byte, n)
|
||||
}
|
||||
if _, err := io.ReadFull(br, buf[:n]); err != nil {
|
||||
return nr, err
|
||||
}
|
||||
rec := &discosrv.ReplicationRecord{}
|
||||
if err := proto.Unmarshal(buf[:n], rec); err != nil {
|
||||
return nr, err
|
||||
}
|
||||
key, err := protocol.DeviceIDFromBytes(rec.Key)
|
||||
if err != nil {
|
||||
key, err = protocol.DeviceIDFromString(string(rec.Key))
|
||||
}
|
||||
if err != nil {
|
||||
slog.Error("Got bad device ID while reading database", "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
iter.Release()
|
||||
|
||||
databaseKeys.WithLabelValues("current").Set(float64(current))
|
||||
databaseKeys.WithLabelValues("currentIPv4").Set(float64(currentIPv4))
|
||||
databaseKeys.WithLabelValues("currentIPv6").Set(float64(currentIPv6))
|
||||
databaseKeys.WithLabelValues("last24h").Set(float64(last24h))
|
||||
databaseKeys.WithLabelValues("last1w").Set(float64(last1w))
|
||||
databaseKeys.WithLabelValues("inactive").Set(float64(inactive))
|
||||
databaseKeys.WithLabelValues("error").Set(float64(errors))
|
||||
databaseStatisticsSeconds.Set(time.Since(t0).Seconds())
|
||||
|
||||
// Signal that we are done and can be scheduled again.
|
||||
done <- struct{}{}
|
||||
slices.SortFunc(rec.Addresses, Cmp)
|
||||
rec.Addresses = slices.CompactFunc(rec.Addresses, Equal)
|
||||
s.m.Store(key, &discosrv.DatabaseRecord{
|
||||
Addresses: expire(rec.Addresses, s.clock.Now()),
|
||||
Seen: rec.Seen,
|
||||
})
|
||||
nr++
|
||||
}
|
||||
return nr, nil
|
||||
}
|
||||
|
||||
// merge returns the merged result of the two database records a and b. The
|
||||
// result is the union of the two address sets, with the newer expiry time
|
||||
// chosen for any duplicates.
|
||||
func merge(a, b DatabaseRecord) DatabaseRecord {
|
||||
// chosen for any duplicates. The address list in a is overwritten and
|
||||
// reused for the result.
|
||||
func merge(a, b *discosrv.DatabaseRecord) *discosrv.DatabaseRecord {
|
||||
// Both lists must be sorted for this to work.
|
||||
if !sort.IsSorted(databaseAddressOrder(a.Addresses)) {
|
||||
log.Println("Warning: bug: addresses not correctly sorted in merge")
|
||||
a.Addresses = sortedAddressCopy(a.Addresses)
|
||||
}
|
||||
if !sort.IsSorted(databaseAddressOrder(b.Addresses)) {
|
||||
// no warning because this is the side we read from disk and it may
|
||||
// legitimately predate correct sorting.
|
||||
b.Addresses = sortedAddressCopy(b.Addresses)
|
||||
}
|
||||
|
||||
res := DatabaseRecord{
|
||||
Addresses: make([]DatabaseAddress, 0, len(a.Addresses)+len(b.Addresses)),
|
||||
Seen: a.Seen,
|
||||
}
|
||||
if b.Seen > a.Seen {
|
||||
res.Seen = b.Seen
|
||||
}
|
||||
a.Seen = max(a.Seen, b.Seen)
|
||||
|
||||
aIdx := 0
|
||||
bIdx := 0
|
||||
aAddrs := a.Addresses
|
||||
bAddrs := b.Addresses
|
||||
loop:
|
||||
for {
|
||||
switch {
|
||||
case aIdx == len(aAddrs) && bIdx == len(bAddrs):
|
||||
// both lists are exhausted, we are done
|
||||
break loop
|
||||
|
||||
case aIdx == len(aAddrs):
|
||||
// a is exhausted, pick from b and continue
|
||||
res.Addresses = append(res.Addresses, bAddrs[bIdx])
|
||||
bIdx++
|
||||
continue
|
||||
|
||||
case bIdx == len(bAddrs):
|
||||
// b is exhausted, pick from a and continue
|
||||
res.Addresses = append(res.Addresses, aAddrs[aIdx])
|
||||
aIdx++
|
||||
continue
|
||||
}
|
||||
|
||||
// We have values left on both sides.
|
||||
aVal := aAddrs[aIdx]
|
||||
bVal := bAddrs[bIdx]
|
||||
|
||||
switch {
|
||||
case aVal.Address == bVal.Address:
|
||||
// update for same address, pick newer
|
||||
if aVal.Expires > bVal.Expires {
|
||||
res.Addresses = append(res.Addresses, aVal)
|
||||
} else {
|
||||
res.Addresses = append(res.Addresses, bVal)
|
||||
}
|
||||
for aIdx < len(a.Addresses) && bIdx < len(b.Addresses) {
|
||||
switch cmp.Compare(a.Addresses[aIdx].Address, b.Addresses[bIdx].Address) {
|
||||
case 0:
|
||||
// a == b, choose the newer expiry time
|
||||
a.Addresses[aIdx].Expires = max(a.Addresses[aIdx].Expires, b.Addresses[bIdx].Expires)
|
||||
aIdx++
|
||||
bIdx++
|
||||
|
||||
case aVal.Address < bVal.Address:
|
||||
// a is smallest, pick it and continue
|
||||
res.Addresses = append(res.Addresses, aVal)
|
||||
case -1:
|
||||
// a < b, keep a and move on
|
||||
aIdx++
|
||||
|
||||
default:
|
||||
// b is smallest, pick it and continue
|
||||
res.Addresses = append(res.Addresses, bVal)
|
||||
case 1:
|
||||
// a > b, insert b before a
|
||||
a.Addresses = append(a.Addresses[:aIdx], append([]*discosrv.DatabaseAddress{b.Addresses[bIdx]}, a.Addresses[aIdx:]...)...)
|
||||
bIdx++
|
||||
}
|
||||
}
|
||||
return res
|
||||
if bIdx < len(b.Addresses) {
|
||||
a.Addresses = append(a.Addresses, b.Addresses[bIdx:]...)
|
||||
}
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
// expire returns the list of addresses after removing expired entries.
|
||||
// Expiration happen in place, so the slice given as the parameter is
|
||||
// destroyed. Internal order is not preserved.
|
||||
func expire(addrs []DatabaseAddress, now int64) []DatabaseAddress {
|
||||
i := 0
|
||||
for i < len(addrs) {
|
||||
if addrs[i].Expires < now {
|
||||
addrs = sliceutil.RemoveAndZero(addrs, i)
|
||||
// destroyed. Internal order is preserved.
|
||||
func expire(addrs []*discosrv.DatabaseAddress, now time.Time) []*discosrv.DatabaseAddress {
|
||||
cutoff := now.UnixNano()
|
||||
naddrs := addrs[:0]
|
||||
for i := range addrs {
|
||||
if i > 0 && addrs[i].Address == addrs[i-1].Address {
|
||||
// Skip duplicates
|
||||
continue
|
||||
}
|
||||
i++
|
||||
if addrs[i].Expires >= cutoff {
|
||||
naddrs = append(naddrs, addrs[i])
|
||||
}
|
||||
}
|
||||
return addrs
|
||||
if len(naddrs) == 0 {
|
||||
return nil
|
||||
}
|
||||
return naddrs
|
||||
}
|
||||
|
||||
func sortedAddressCopy(addrs []DatabaseAddress) []DatabaseAddress {
|
||||
sorted := make([]DatabaseAddress, len(addrs))
|
||||
copy(sorted, addrs)
|
||||
sort.Sort(databaseAddressOrder(sorted))
|
||||
return sorted
|
||||
func Cmp(d, other *discosrv.DatabaseAddress) (n int) {
|
||||
if c := cmp.Compare(d.Address, other.Address); c != 0 {
|
||||
return c
|
||||
}
|
||||
return cmp.Compare(d.Expires, other.Expires)
|
||||
}
|
||||
|
||||
type databaseAddressOrder []DatabaseAddress
|
||||
|
||||
func (s databaseAddressOrder) Less(a, b int) bool {
|
||||
return s[a].Address < s[b].Address
|
||||
}
|
||||
|
||||
func (s databaseAddressOrder) Swap(a, b int) {
|
||||
s[a], s[b] = s[b], s[a]
|
||||
}
|
||||
|
||||
func (s databaseAddressOrder) Len() int {
|
||||
return len(s)
|
||||
func Equal(d, other *discosrv.DatabaseAddress) bool {
|
||||
return d.Address == other.Address
|
||||
}
|
||||
|
||||
@@ -1,847 +0,0 @@
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: database.proto
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
_ "github.com/gogo/protobuf/gogoproto"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
io "io"
|
||||
math "math"
|
||||
math_bits "math/bits"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
type DatabaseRecord struct {
|
||||
Addresses []DatabaseAddress `protobuf:"bytes,1,rep,name=addresses,proto3" json:"addresses"`
|
||||
Misses int32 `protobuf:"varint,2,opt,name=misses,proto3" json:"misses,omitempty"`
|
||||
Seen int64 `protobuf:"varint,3,opt,name=seen,proto3" json:"seen,omitempty"`
|
||||
Missed int64 `protobuf:"varint,4,opt,name=missed,proto3" json:"missed,omitempty"`
|
||||
}
|
||||
|
||||
func (m *DatabaseRecord) Reset() { *m = DatabaseRecord{} }
|
||||
func (m *DatabaseRecord) String() string { return proto.CompactTextString(m) }
|
||||
func (*DatabaseRecord) ProtoMessage() {}
|
||||
func (*DatabaseRecord) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b90fe3356ea5df07, []int{0}
|
||||
}
|
||||
func (m *DatabaseRecord) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *DatabaseRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_DatabaseRecord.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *DatabaseRecord) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_DatabaseRecord.Merge(m, src)
|
||||
}
|
||||
func (m *DatabaseRecord) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *DatabaseRecord) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_DatabaseRecord.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_DatabaseRecord proto.InternalMessageInfo
|
||||
|
||||
type ReplicationRecord struct {
|
||||
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
|
||||
Addresses []DatabaseAddress `protobuf:"bytes,2,rep,name=addresses,proto3" json:"addresses"`
|
||||
Seen int64 `protobuf:"varint,3,opt,name=seen,proto3" json:"seen,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ReplicationRecord) Reset() { *m = ReplicationRecord{} }
|
||||
func (m *ReplicationRecord) String() string { return proto.CompactTextString(m) }
|
||||
func (*ReplicationRecord) ProtoMessage() {}
|
||||
func (*ReplicationRecord) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b90fe3356ea5df07, []int{1}
|
||||
}
|
||||
func (m *ReplicationRecord) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *ReplicationRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_ReplicationRecord.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *ReplicationRecord) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ReplicationRecord.Merge(m, src)
|
||||
}
|
||||
func (m *ReplicationRecord) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *ReplicationRecord) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ReplicationRecord.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ReplicationRecord proto.InternalMessageInfo
|
||||
|
||||
type DatabaseAddress struct {
|
||||
Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"`
|
||||
Expires int64 `protobuf:"varint,2,opt,name=expires,proto3" json:"expires,omitempty"`
|
||||
}
|
||||
|
||||
func (m *DatabaseAddress) Reset() { *m = DatabaseAddress{} }
|
||||
func (m *DatabaseAddress) String() string { return proto.CompactTextString(m) }
|
||||
func (*DatabaseAddress) ProtoMessage() {}
|
||||
func (*DatabaseAddress) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b90fe3356ea5df07, []int{2}
|
||||
}
|
||||
func (m *DatabaseAddress) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *DatabaseAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_DatabaseAddress.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *DatabaseAddress) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_DatabaseAddress.Merge(m, src)
|
||||
}
|
||||
func (m *DatabaseAddress) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *DatabaseAddress) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_DatabaseAddress.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_DatabaseAddress proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*DatabaseRecord)(nil), "main.DatabaseRecord")
|
||||
proto.RegisterType((*ReplicationRecord)(nil), "main.ReplicationRecord")
|
||||
proto.RegisterType((*DatabaseAddress)(nil), "main.DatabaseAddress")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("database.proto", fileDescriptor_b90fe3356ea5df07) }
|
||||
|
||||
var fileDescriptor_b90fe3356ea5df07 = []byte{
|
||||
// 270 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x90, 0x41, 0x4a, 0xc4, 0x30,
|
||||
0x18, 0x85, 0x9b, 0x49, 0x1d, 0x99, 0x08, 0xa3, 0x06, 0x94, 0x20, 0x12, 0x4b, 0xdd, 0x74, 0xd5,
|
||||
0x01, 0x5d, 0xb9, 0x74, 0xd0, 0x0b, 0xe4, 0x06, 0xe9, 0xe4, 0x77, 0x08, 0x3a, 0x4d, 0x49, 0x2a,
|
||||
0xe8, 0x29, 0xf4, 0x58, 0x5d, 0xce, 0xd2, 0x95, 0x68, 0x7b, 0x11, 0x69, 0x26, 0x55, 0x14, 0x37,
|
||||
0xb3, 0x7b, 0xdf, 0xff, 0xbf, 0x97, 0xbc, 0x84, 0x4c, 0x95, 0xac, 0x65, 0x21, 0x1d, 0xe4, 0x95,
|
||||
0x35, 0xb5, 0xa1, 0xf1, 0x4a, 0xea, 0xf2, 0xe4, 0xdc, 0x42, 0x65, 0xdc, 0xcc, 0x8f, 0x8a, 0xc7,
|
||||
0xbb, 0xd9, 0xd2, 0x2c, 0x8d, 0x07, 0xaf, 0x36, 0xd6, 0xf4, 0x05, 0x91, 0xe9, 0x4d, 0x48, 0x0b,
|
||||
0x58, 0x18, 0xab, 0xe8, 0x15, 0x99, 0x48, 0xa5, 0x2c, 0x38, 0x07, 0x8e, 0xa1, 0x04, 0x67, 0x7b,
|
||||
0x17, 0x47, 0x79, 0x7f, 0x62, 0x3e, 0x18, 0xaf, 0x37, 0xeb, 0x79, 0xdc, 0xbc, 0x9f, 0x45, 0xe2,
|
||||
0xc7, 0x4d, 0x8f, 0xc9, 0x78, 0xa5, 0x7d, 0x6e, 0x94, 0xa0, 0x6c, 0x47, 0x04, 0xa2, 0x94, 0xc4,
|
||||
0x0e, 0xa0, 0x64, 0x38, 0x41, 0x19, 0x16, 0x5e, 0x7f, 0x7b, 0x15, 0x8b, 0xfd, 0x34, 0x50, 0x5a,
|
||||
0x93, 0x43, 0x01, 0xd5, 0x83, 0x5e, 0xc8, 0x5a, 0x9b, 0x32, 0x74, 0x3a, 0x20, 0xf8, 0x1e, 0x9e,
|
||||
0x19, 0x4a, 0x50, 0x36, 0x11, 0xbd, 0xfc, 0xdd, 0x72, 0xb4, 0x55, 0xcb, 0x7f, 0xda, 0xa4, 0xb7,
|
||||
0x64, 0xff, 0x4f, 0x8e, 0x32, 0xb2, 0x1b, 0x32, 0xe1, 0xde, 0x01, 0xfb, 0x0d, 0x3c, 0x55, 0xda,
|
||||
0x86, 0x77, 0x62, 0x31, 0xe0, 0xfc, 0xb4, 0xf9, 0xe4, 0x51, 0xd3, 0x72, 0xb4, 0x6e, 0x39, 0xfa,
|
||||
0x68, 0x39, 0x7a, 0xed, 0x78, 0xb4, 0xee, 0x78, 0xf4, 0xd6, 0xf1, 0xa8, 0x18, 0xfb, 0x3f, 0xbf,
|
||||
0xfc, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x7a, 0xa2, 0xf6, 0x1e, 0xb0, 0x01, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *DatabaseRecord) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *DatabaseRecord) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *DatabaseRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.Missed != 0 {
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(m.Missed))
|
||||
i--
|
||||
dAtA[i] = 0x20
|
||||
}
|
||||
if m.Seen != 0 {
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(m.Seen))
|
||||
i--
|
||||
dAtA[i] = 0x18
|
||||
}
|
||||
if m.Misses != 0 {
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(m.Misses))
|
||||
i--
|
||||
dAtA[i] = 0x10
|
||||
}
|
||||
if len(m.Addresses) > 0 {
|
||||
for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.Addresses[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *ReplicationRecord) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *ReplicationRecord) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *ReplicationRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.Seen != 0 {
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(m.Seen))
|
||||
i--
|
||||
dAtA[i] = 0x18
|
||||
}
|
||||
if len(m.Addresses) > 0 {
|
||||
for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.Addresses[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x12
|
||||
}
|
||||
}
|
||||
if len(m.Key) > 0 {
|
||||
i -= len(m.Key)
|
||||
copy(dAtA[i:], m.Key)
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(len(m.Key)))
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *DatabaseAddress) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *DatabaseAddress) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *DatabaseAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.Expires != 0 {
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(m.Expires))
|
||||
i--
|
||||
dAtA[i] = 0x10
|
||||
}
|
||||
if len(m.Address) > 0 {
|
||||
i -= len(m.Address)
|
||||
copy(dAtA[i:], m.Address)
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(len(m.Address)))
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func encodeVarintDatabase(dAtA []byte, offset int, v uint64) int {
|
||||
offset -= sovDatabase(v)
|
||||
base := offset
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return base
|
||||
}
|
||||
func (m *DatabaseRecord) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Addresses) > 0 {
|
||||
for _, e := range m.Addresses {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovDatabase(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.Misses != 0 {
|
||||
n += 1 + sovDatabase(uint64(m.Misses))
|
||||
}
|
||||
if m.Seen != 0 {
|
||||
n += 1 + sovDatabase(uint64(m.Seen))
|
||||
}
|
||||
if m.Missed != 0 {
|
||||
n += 1 + sovDatabase(uint64(m.Missed))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *ReplicationRecord) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Key)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovDatabase(uint64(l))
|
||||
}
|
||||
if len(m.Addresses) > 0 {
|
||||
for _, e := range m.Addresses {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovDatabase(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.Seen != 0 {
|
||||
n += 1 + sovDatabase(uint64(m.Seen))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *DatabaseAddress) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Address)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovDatabase(uint64(l))
|
||||
}
|
||||
if m.Expires != 0 {
|
||||
n += 1 + sovDatabase(uint64(m.Expires))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovDatabase(x uint64) (n int) {
|
||||
return (math_bits.Len64(x|1) + 6) / 7
|
||||
}
|
||||
func sozDatabase(x uint64) (n int) {
|
||||
return sovDatabase(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (m *DatabaseRecord) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: DatabaseRecord: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: DatabaseRecord: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Addresses = append(m.Addresses, DatabaseAddress{})
|
||||
if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Misses", wireType)
|
||||
}
|
||||
m.Misses = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Misses |= int32(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 3:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Seen", wireType)
|
||||
}
|
||||
m.Seen = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Seen |= int64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 4:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Missed", wireType)
|
||||
}
|
||||
m.Missed = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Missed |= int64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipDatabase(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *ReplicationRecord) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: ReplicationRecord: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: ReplicationRecord: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Key = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Addresses = append(m.Addresses, DatabaseAddress{})
|
||||
if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Seen", wireType)
|
||||
}
|
||||
m.Seen = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Seen |= int64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipDatabase(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *DatabaseAddress) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: DatabaseAddress: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: DatabaseAddress: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Address = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Expires", wireType)
|
||||
}
|
||||
m.Expires = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Expires |= int64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipDatabase(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipDatabase(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
depth := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthDatabase
|
||||
}
|
||||
iNdEx += length
|
||||
case 3:
|
||||
depth++
|
||||
case 4:
|
||||
if depth == 0 {
|
||||
return 0, ErrUnexpectedEndOfGroupDatabase
|
||||
}
|
||||
depth--
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthDatabase
|
||||
}
|
||||
if depth == 0 {
|
||||
return iNdEx, nil
|
||||
}
|
||||
}
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthDatabase = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowDatabase = fmt.Errorf("proto: integer overflow")
|
||||
ErrUnexpectedEndOfGroupDatabase = fmt.Errorf("proto: unexpected end of group")
|
||||
)
|
||||
@@ -1,36 +0,0 @@
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package main;
|
||||
|
||||
import "repos/protobuf/gogoproto/gogo.proto";
|
||||
|
||||
option (gogoproto.goproto_getters_all) = false;
|
||||
option (gogoproto.goproto_unkeyed_all) = false;
|
||||
option (gogoproto.goproto_unrecognized_all) = false;
|
||||
option (gogoproto.goproto_sizecache_all) = false;
|
||||
|
||||
message DatabaseRecord {
|
||||
repeated DatabaseAddress addresses = 1 [(gogoproto.nullable) = false];
|
||||
int32 misses = 2; // Number of lookups* without hits
|
||||
int64 seen = 3; // Unix nanos, last device announce
|
||||
int64 missed = 4; // Unix nanos, last* failed lookup
|
||||
}
|
||||
|
||||
// *) Not every lookup results in a write, so may not be completely accurate
|
||||
|
||||
message ReplicationRecord {
|
||||
string key = 1;
|
||||
repeated DatabaseAddress addresses = 2 [(gogoproto.nullable) = false];
|
||||
int64 seen = 3; // Unix nanos, last device announce
|
||||
}
|
||||
|
||||
message DatabaseAddress {
|
||||
string address = 1;
|
||||
int64 expires = 2; // Unix nanos
|
||||
}
|
||||
@@ -11,29 +11,26 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/gen/discosrv"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
func TestDatabaseGetSet(t *testing.T) {
|
||||
db, err := newMemoryLevelDBStore()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
db := newInMemoryStore(t.TempDir(), 0, nil)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
go db.Serve(ctx)
|
||||
defer cancel()
|
||||
|
||||
// Check missing record
|
||||
|
||||
rec, err := db.get("abcd")
|
||||
rec, err := db.get(&protocol.EmptyDeviceID)
|
||||
if err != nil {
|
||||
t.Error("not found should not be an error")
|
||||
}
|
||||
if len(rec.Addresses) != 0 {
|
||||
t.Error("addresses should be empty")
|
||||
}
|
||||
if rec.Misses != 0 {
|
||||
t.Error("missing should be zero")
|
||||
}
|
||||
|
||||
// Set up a clock
|
||||
|
||||
@@ -43,16 +40,16 @@ func TestDatabaseGetSet(t *testing.T) {
|
||||
|
||||
// Put a record
|
||||
|
||||
rec.Addresses = []DatabaseAddress{
|
||||
rec.Addresses = []*discosrv.DatabaseAddress{
|
||||
{Address: "tcp://1.2.3.4:5", Expires: tc.Now().Add(time.Minute).UnixNano()},
|
||||
}
|
||||
if err := db.put("abcd", rec); err != nil {
|
||||
if err := db.put(&protocol.EmptyDeviceID, rec); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify it
|
||||
|
||||
rec, err = db.get("abcd")
|
||||
rec, err = db.get(&protocol.EmptyDeviceID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -69,16 +66,16 @@ func TestDatabaseGetSet(t *testing.T) {
|
||||
|
||||
tc.wind(30 * time.Second)
|
||||
|
||||
addrs := []DatabaseAddress{
|
||||
addrs := []*discosrv.DatabaseAddress{
|
||||
{Address: "tcp://6.7.8.9:0", Expires: tc.Now().Add(time.Minute).UnixNano()},
|
||||
}
|
||||
if err := db.merge("abcd", addrs, tc.Now().UnixNano()); err != nil {
|
||||
if err := db.merge(&protocol.EmptyDeviceID, addrs, tc.Now().UnixNano()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify it
|
||||
|
||||
rec, err = db.get("abcd")
|
||||
rec, err = db.get(&protocol.EmptyDeviceID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -101,7 +98,7 @@ func TestDatabaseGetSet(t *testing.T) {
|
||||
|
||||
// Verify it
|
||||
|
||||
rec, err = db.get("abcd")
|
||||
rec, err = db.get(&protocol.EmptyDeviceID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -114,40 +111,18 @@ func TestDatabaseGetSet(t *testing.T) {
|
||||
t.Error("incorrect address")
|
||||
}
|
||||
|
||||
// Put a record with misses
|
||||
|
||||
rec = DatabaseRecord{Misses: 42, Missed: tc.Now().UnixNano()}
|
||||
if err := db.put("efgh", rec); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify it
|
||||
|
||||
rec, err = db.get("efgh")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(rec.Addresses) != 0 {
|
||||
t.Log(rec.Addresses)
|
||||
t.Fatal("should have no addresses")
|
||||
}
|
||||
if rec.Misses != 42 {
|
||||
t.Log(rec.Misses)
|
||||
t.Error("incorrect misses")
|
||||
}
|
||||
|
||||
// Set an address
|
||||
|
||||
addrs = []DatabaseAddress{
|
||||
addrs = []*discosrv.DatabaseAddress{
|
||||
{Address: "tcp://6.7.8.9:0", Expires: tc.Now().Add(time.Minute).UnixNano()},
|
||||
}
|
||||
if err := db.merge("efgh", addrs, tc.Now().UnixNano()); err != nil {
|
||||
if err := db.merge(&protocol.GlobalDeviceID, addrs, tc.Now().UnixNano()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify it
|
||||
|
||||
rec, err = db.get("efgh")
|
||||
rec, err = db.get(&protocol.GlobalDeviceID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -155,48 +130,126 @@ func TestDatabaseGetSet(t *testing.T) {
|
||||
t.Log(rec.Addresses)
|
||||
t.Fatal("should have one address")
|
||||
}
|
||||
if rec.Misses != 0 {
|
||||
t.Log(rec.Misses)
|
||||
t.Error("should have no misses")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter(t *testing.T) {
|
||||
// all cases are expired with t=10
|
||||
cases := []struct {
|
||||
a []DatabaseAddress
|
||||
b []DatabaseAddress
|
||||
a []*discosrv.DatabaseAddress
|
||||
b []*discosrv.DatabaseAddress
|
||||
}{
|
||||
{
|
||||
a: nil,
|
||||
b: nil,
|
||||
},
|
||||
{
|
||||
a: []DatabaseAddress{{Address: "a", Expires: 9}, {Address: "b", Expires: 9}, {Address: "c", Expires: 9}},
|
||||
b: []DatabaseAddress{},
|
||||
a: []*discosrv.DatabaseAddress{{Address: "a", Expires: 9}, {Address: "b", Expires: 9}, {Address: "c", Expires: 9}},
|
||||
b: []*discosrv.DatabaseAddress{},
|
||||
},
|
||||
{
|
||||
a: []DatabaseAddress{{Address: "a", Expires: 10}},
|
||||
b: []DatabaseAddress{{Address: "a", Expires: 10}},
|
||||
a: []*discosrv.DatabaseAddress{{Address: "a", Expires: 10}},
|
||||
b: []*discosrv.DatabaseAddress{{Address: "a", Expires: 10}},
|
||||
},
|
||||
{
|
||||
a: []DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 10}, {Address: "c", Expires: 10}},
|
||||
b: []DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 10}, {Address: "c", Expires: 10}},
|
||||
a: []*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 10}, {Address: "c", Expires: 10}},
|
||||
b: []*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 10}, {Address: "c", Expires: 10}},
|
||||
},
|
||||
{
|
||||
a: []DatabaseAddress{{Address: "a", Expires: 5}, {Address: "b", Expires: 15}, {Address: "c", Expires: 5}, {Address: "d", Expires: 15}, {Address: "e", Expires: 5}},
|
||||
b: []DatabaseAddress{{Address: "b", Expires: 15}, {Address: "d", Expires: 15}},
|
||||
a: []*discosrv.DatabaseAddress{{Address: "a", Expires: 5}, {Address: "b", Expires: 15}, {Address: "c", Expires: 5}, {Address: "d", Expires: 15}, {Address: "e", Expires: 5}},
|
||||
b: []*discosrv.DatabaseAddress{{Address: "b", Expires: 15}, {Address: "d", Expires: 15}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
res := expire(tc.a, 10)
|
||||
res := expire(tc.a, time.Unix(0, 10))
|
||||
if fmt.Sprint(res) != fmt.Sprint(tc.b) {
|
||||
t.Errorf("Incorrect result %v, expected %v", res, tc.b)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMerge(t *testing.T) {
|
||||
cases := []struct {
|
||||
a, b, res []*discosrv.DatabaseAddress
|
||||
}{
|
||||
{nil, nil, nil},
|
||||
{
|
||||
nil,
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}},
|
||||
},
|
||||
{
|
||||
nil,
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 10}, {Address: "c", Expires: 10}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 10}, {Address: "c", Expires: 10}},
|
||||
},
|
||||
{
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 15}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 15}},
|
||||
},
|
||||
{
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "b", Expires: 15}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 15}},
|
||||
},
|
||||
{
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 15}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 15}, {Address: "b", Expires: 15}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 15}, {Address: "b", Expires: 15}},
|
||||
},
|
||||
{
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 15}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "b", Expires: 15}, {Address: "c", Expires: 20}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 15}, {Address: "c", Expires: 20}},
|
||||
},
|
||||
{
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 15}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "b", Expires: 5}, {Address: "c", Expires: 20}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 15}, {Address: "c", Expires: 20}},
|
||||
},
|
||||
{
|
||||
[]*discosrv.DatabaseAddress{{Address: "y", Expires: 10}, {Address: "z", Expires: 10}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 5}, {Address: "b", Expires: 15}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 5}, {Address: "b", Expires: 15}, {Address: "y", Expires: 10}, {Address: "z", Expires: 10}},
|
||||
},
|
||||
{
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 15}, {Address: "d", Expires: 10}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "b", Expires: 5}, {Address: "c", Expires: 20}},
|
||||
[]*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 15}, {Address: "c", Expires: 20}, {Address: "d", Expires: 10}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
rec := merge(&discosrv.DatabaseRecord{Addresses: tc.a}, &discosrv.DatabaseRecord{Addresses: tc.b})
|
||||
if fmt.Sprint(rec.Addresses) != fmt.Sprint(tc.res) {
|
||||
t.Errorf("Incorrect result %v, expected %v", rec.Addresses, tc.res)
|
||||
}
|
||||
rec = merge(&discosrv.DatabaseRecord{Addresses: tc.b}, &discosrv.DatabaseRecord{Addresses: tc.a})
|
||||
if fmt.Sprint(rec.Addresses) != fmt.Sprint(tc.res) {
|
||||
t.Errorf("Incorrect result %v, expected %v", rec.Addresses, tc.res)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMergeEqual(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
ar := []*discosrv.DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 15}}
|
||||
br := []*discosrv.DatabaseAddress{{Address: "a", Expires: 15}, {Address: "b", Expires: 10}}
|
||||
res := merge(&discosrv.DatabaseRecord{Addresses: ar}, &discosrv.DatabaseRecord{Addresses: br})
|
||||
if len(res.Addresses) != 2 {
|
||||
b.Fatal("wrong length")
|
||||
}
|
||||
if res.Addresses[0].Address != "a" || res.Addresses[1].Address != "b" {
|
||||
b.Fatal("wrong address")
|
||||
}
|
||||
if res.Addresses[0].Expires != 15 || res.Addresses[1].Expires != 15 {
|
||||
b.Fatal("wrong expiry")
|
||||
}
|
||||
}
|
||||
b.ReportAllocs() // should be zero per operation
|
||||
}
|
||||
|
||||
type testClock struct {
|
||||
now time.Time
|
||||
}
|
||||
|
||||
@@ -9,27 +9,30 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"flag"
|
||||
"log"
|
||||
"net"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alecthomas/kong"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/thejerf/suture/v4"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/blob"
|
||||
"github.com/syncthing/syncthing/internal/blob/azureblob"
|
||||
"github.com/syncthing/syncthing/internal/blob/s3"
|
||||
"github.com/syncthing/syncthing/internal/slogutil"
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/syncthing/syncthing/lib/build"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/rand"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/thejerf/suture/v4"
|
||||
)
|
||||
|
||||
const (
|
||||
addressExpiryTime = 2 * time.Hour
|
||||
databaseStatisticsInterval = 5 * time.Minute
|
||||
addressExpiryTime = 2 * time.Hour
|
||||
|
||||
// Reannounce-After is set to reannounceAfterSeconds +
|
||||
// random(reannounzeFuzzSeconds), similar for Retry-After
|
||||
@@ -38,17 +41,12 @@ const (
|
||||
errorRetryAfterSeconds = 1500
|
||||
errorRetryFuzzSeconds = 300
|
||||
|
||||
// Retry for not found is minSeconds + failures * incSeconds +
|
||||
// random(fuzz), where failures is the number of consecutive lookups
|
||||
// with no answer, up to maxSeconds. The fuzz is applied after capping
|
||||
// to maxSeconds.
|
||||
notFoundRetryMinSeconds = 60
|
||||
notFoundRetryMaxSeconds = 3540
|
||||
notFoundRetryIncSeconds = 10
|
||||
notFoundRetryFuzzSeconds = 60
|
||||
|
||||
// How often (in requests) we serialize the missed counter to database.
|
||||
notFoundMissesWriteInterval = 10
|
||||
// Retry for not found is notFoundRetrySeenSeconds for records we have
|
||||
// seen an announcement for (but it's not active right now) and
|
||||
// notFoundRetryUnknownSeconds for records we have never seen (or not
|
||||
// seen within the last week).
|
||||
notFoundRetryUnknownMinSeconds = 60
|
||||
notFoundRetryUnknownMaxSeconds = 3600
|
||||
|
||||
httpReadTimeout = 5 * time.Second
|
||||
httpWriteTimeout = 5 * time.Second
|
||||
@@ -58,164 +56,131 @@ const (
|
||||
replicationOutboxSize = 10000
|
||||
)
|
||||
|
||||
// These options make the database a little more optimized for writes, at
|
||||
// the expense of some memory usage and risk of losing writes in a (system)
|
||||
// crash.
|
||||
var levelDBOptions = &opt.Options{
|
||||
NoSync: true,
|
||||
WriteBuffer: 32 << 20, // default 4<<20
|
||||
}
|
||||
|
||||
var debug = false
|
||||
|
||||
type CLI struct {
|
||||
Cert string `group:"Listen" help:"Certificate file" default:"./cert.pem" env:"DISCOVERY_CERT_FILE"`
|
||||
Key string `group:"Listen" help:"Key file" default:"./key.pem" env:"DISCOVERY_KEY_FILE"`
|
||||
HTTP bool `group:"Listen" help:"Listen on HTTP (behind an HTTPS proxy)" env:"DISCOVERY_HTTP"`
|
||||
Compression bool `group:"Listen" help:"Enable GZIP compression of responses" env:"DISCOVERY_COMPRESSION"`
|
||||
Listen string `group:"Listen" help:"Listen address" default:":8443" env:"DISCOVERY_LISTEN"`
|
||||
MetricsListen string `group:"Listen" help:"Metrics listen address" env:"DISCOVERY_METRICS_LISTEN"`
|
||||
DesiredNotFoundRate float64 `group:"Listen" help:"Desired maximum rate of not-found replies (/s)" default:"1000"`
|
||||
|
||||
DBDir string `group:"Database" help:"Database directory" default:"." env:"DISCOVERY_DB_DIR"`
|
||||
DBFlushInterval time.Duration `group:"Database" help:"Interval between database flushes" default:"5m" env:"DISCOVERY_DB_FLUSH_INTERVAL"`
|
||||
|
||||
DBS3Endpoint string `name:"db-s3-endpoint" group:"Database (S3 backup)" hidden:"true" help:"S3 endpoint for database" env:"DISCOVERY_DB_S3_ENDPOINT"`
|
||||
DBS3Region string `name:"db-s3-region" group:"Database (S3 backup)" hidden:"true" help:"S3 region for database" env:"DISCOVERY_DB_S3_REGION"`
|
||||
DBS3Bucket string `name:"db-s3-bucket" group:"Database (S3 backup)" hidden:"true" help:"S3 bucket for database" env:"DISCOVERY_DB_S3_BUCKET"`
|
||||
DBS3AccessKeyID string `name:"db-s3-access-key-id" group:"Database (S3 backup)" hidden:"true" help:"S3 access key ID for database" env:"DISCOVERY_DB_S3_ACCESS_KEY_ID"`
|
||||
DBS3SecretKey string `name:"db-s3-secret-key" group:"Database (S3 backup)" hidden:"true" help:"S3 secret key for database" env:"DISCOVERY_DB_S3_SECRET_KEY"`
|
||||
|
||||
DBAzureBlobAccount string `name:"db-azure-blob-account" env:"DISCOVERY_DB_AZUREBLOB_ACCOUNT"`
|
||||
DBAzureBlobKey string `name:"db-azure-blob-key" env:"DISCOVERY_DB_AZUREBLOB_KEY"`
|
||||
DBAzureBlobContainer string `name:"db-azure-blob-container" env:"DISCOVERY_DB_AZUREBLOB_CONTAINER"`
|
||||
|
||||
AMQPAddress string `group:"AMQP replication" hidden:"true" help:"Address to AMQP broker" env:"DISCOVERY_AMQP_ADDRESS"`
|
||||
|
||||
Debug bool `short:"d" help:"Print debug output" env:"DISCOVERY_DEBUG"`
|
||||
Version bool `short:"v" help:"Print version and exit"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
var listen string
|
||||
var dir string
|
||||
var metricsListen string
|
||||
var replicationListen string
|
||||
var replicationPeers string
|
||||
var certFile string
|
||||
var keyFile string
|
||||
var replCertFile string
|
||||
var replKeyFile string
|
||||
var useHTTP bool
|
||||
var largeDB bool
|
||||
var cli CLI
|
||||
kong.Parse(&cli)
|
||||
|
||||
log.SetOutput(os.Stdout)
|
||||
log.SetFlags(0)
|
||||
level := slog.LevelInfo
|
||||
if cli.Debug {
|
||||
level = slog.LevelDebug
|
||||
}
|
||||
slogutil.SetDefaultLevel(level)
|
||||
|
||||
flag.StringVar(&certFile, "cert", "./cert.pem", "Certificate file")
|
||||
flag.StringVar(&keyFile, "key", "./key.pem", "Key file")
|
||||
flag.StringVar(&dir, "db-dir", "./discovery.db", "Database directory")
|
||||
flag.BoolVar(&debug, "debug", false, "Print debug output")
|
||||
flag.BoolVar(&useHTTP, "http", false, "Listen on HTTP (behind an HTTPS proxy)")
|
||||
flag.StringVar(&listen, "listen", ":8443", "Listen address")
|
||||
flag.StringVar(&metricsListen, "metrics-listen", "", "Metrics listen address")
|
||||
flag.StringVar(&replicationPeers, "replicate", "", "Replication peers, id@address, comma separated")
|
||||
flag.StringVar(&replicationListen, "replication-listen", ":19200", "Replication listen address")
|
||||
flag.StringVar(&replCertFile, "replication-cert", "", "Certificate file for replication")
|
||||
flag.StringVar(&replKeyFile, "replication-key", "", "Key file for replication")
|
||||
flag.BoolVar(&largeDB, "large-db", false, "Use larger database settings")
|
||||
showVersion := flag.Bool("version", false, "Show version")
|
||||
flag.Parse()
|
||||
|
||||
log.Println(build.LongVersionFor("stdiscosrv"))
|
||||
if *showVersion {
|
||||
slog.Info(build.LongVersionFor("stdiscosrv"))
|
||||
if cli.Version {
|
||||
return
|
||||
}
|
||||
|
||||
buildInfo.WithLabelValues(build.Version, runtime.Version(), build.User, build.Date.UTC().Format("2006-01-02T15:04:05Z")).Set(1)
|
||||
|
||||
if largeDB {
|
||||
levelDBOptions.BlockCacheCapacity = 64 << 20
|
||||
levelDBOptions.BlockSize = 64 << 10
|
||||
levelDBOptions.CompactionTableSize = 16 << 20
|
||||
levelDBOptions.CompactionTableSizeMultiplier = 2.0
|
||||
levelDBOptions.WriteBuffer = 64 << 20
|
||||
levelDBOptions.CompactionL0Trigger = 8
|
||||
}
|
||||
|
||||
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||
if os.IsNotExist(err) {
|
||||
log.Println("Failed to load keypair. Generating one, this might take a while...")
|
||||
cert, err = tlsutil.NewCertificate(certFile, keyFile, "stdiscosrv", 20*365)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to generate X509 key pair:", err)
|
||||
}
|
||||
} else if err != nil {
|
||||
log.Fatalln("Failed to load keypair:", err)
|
||||
}
|
||||
devID := protocol.NewDeviceID(cert.Certificate[0])
|
||||
log.Println("Server device ID is", devID)
|
||||
|
||||
replCert := cert
|
||||
if replCertFile != "" && replKeyFile != "" {
|
||||
replCert, err = tls.LoadX509KeyPair(replCertFile, replKeyFile)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to load replication keypair:", err)
|
||||
}
|
||||
}
|
||||
replDevID := protocol.NewDeviceID(replCert.Certificate[0])
|
||||
log.Println("Replication device ID is", replDevID)
|
||||
|
||||
// Parse the replication specs, if any.
|
||||
var allowedReplicationPeers []protocol.DeviceID
|
||||
var replicationDestinations []string
|
||||
parts := strings.Split(replicationPeers, ",")
|
||||
for _, part := range parts {
|
||||
if part == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
fields := strings.Split(part, "@")
|
||||
switch len(fields) {
|
||||
case 2:
|
||||
// This is an id@address specification. Grab the address for the
|
||||
// destination list. Try to resolve it once to catch obvious
|
||||
// syntax errors here rather than having the sender service fail
|
||||
// repeatedly later.
|
||||
_, err := net.ResolveTCPAddr("tcp", fields[1])
|
||||
var cert tls.Certificate
|
||||
if !cli.HTTP {
|
||||
var err error
|
||||
cert, err = tls.LoadX509KeyPair(cli.Cert, cli.Key)
|
||||
if os.IsNotExist(err) {
|
||||
slog.Info("Failed to load keypair. Generating one, this might take a while...")
|
||||
cert, err = tlsutil.NewCertificate(cli.Cert, cli.Key, "stdiscosrv", 20*365, false)
|
||||
if err != nil {
|
||||
log.Fatalln("Resolving address:", err)
|
||||
slog.Error("Failed to generate X509 key pair", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
replicationDestinations = append(replicationDestinations, fields[1])
|
||||
fallthrough // N.B.
|
||||
|
||||
case 1:
|
||||
// The first part is always a device ID.
|
||||
id, err := protocol.DeviceIDFromString(fields[0])
|
||||
if err != nil {
|
||||
log.Fatalln("Parsing device ID:", err)
|
||||
}
|
||||
if id == protocol.EmptyDeviceID {
|
||||
log.Fatalf("Missing device ID for peer in %q", part)
|
||||
}
|
||||
allowedReplicationPeers = append(allowedReplicationPeers, id)
|
||||
|
||||
default:
|
||||
log.Fatalln("Unrecognized replication spec:", part)
|
||||
} else if err != nil {
|
||||
slog.Error("Failed to load keypair", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
devID := protocol.NewDeviceID(cert.Certificate[0])
|
||||
slog.Info("Loaded certificate keypair", "deviceID", devID)
|
||||
}
|
||||
|
||||
// Root of the service tree.
|
||||
main := suture.New("main", suture.Spec{
|
||||
PassThroughPanics: true,
|
||||
Timeout: 2 * time.Minute,
|
||||
})
|
||||
|
||||
// Start the database.
|
||||
db, err := newLevelDBStore(dir)
|
||||
if err != nil {
|
||||
log.Fatalln("Open database:", err)
|
||||
// If configured, use blob storage for database backups.
|
||||
var blobs blob.Store
|
||||
var err error
|
||||
if cli.DBS3Endpoint != "" {
|
||||
blobs, err = s3.NewSession(cli.DBS3Endpoint, cli.DBS3Region, cli.DBS3Bucket, cli.DBS3AccessKeyID, cli.DBS3SecretKey)
|
||||
} else if cli.DBAzureBlobAccount != "" {
|
||||
blobs, err = azureblob.NewBlobStore(cli.DBAzureBlobAccount, cli.DBAzureBlobKey, cli.DBAzureBlobContainer)
|
||||
}
|
||||
if err != nil {
|
||||
slog.Error("Failed to create blob store", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Start the database.
|
||||
db := newInMemoryStore(cli.DBDir, cli.DBFlushInterval, blobs)
|
||||
main.Add(db)
|
||||
|
||||
// Start any replication senders.
|
||||
var repl replicationMultiplexer
|
||||
for _, dst := range replicationDestinations {
|
||||
rs := newReplicationSender(dst, replCert, allowedReplicationPeers)
|
||||
main.Add(rs)
|
||||
repl = append(repl, rs)
|
||||
}
|
||||
|
||||
// If we have replication configured, start the replication listener.
|
||||
if len(allowedReplicationPeers) > 0 {
|
||||
rl := newReplicationListener(replicationListen, replCert, allowedReplicationPeers, db)
|
||||
main.Add(rl)
|
||||
// If we have an AMQP broker for replication, start that
|
||||
var repl replicator
|
||||
if cli.AMQPAddress != "" {
|
||||
clientID := rand.String(10)
|
||||
kr := newAMQPReplicator(cli.AMQPAddress, clientID, db)
|
||||
main.Add(kr)
|
||||
repl = kr
|
||||
}
|
||||
|
||||
// Start the main API server.
|
||||
qs := newAPISrv(listen, cert, db, repl, useHTTP)
|
||||
qs := newAPISrv(cli.Listen, cert, db, repl, cli.HTTP, cli.Compression, cli.DesiredNotFoundRate)
|
||||
main.Add(qs)
|
||||
|
||||
// If we have a metrics port configured, start a metrics handler.
|
||||
if metricsListen != "" {
|
||||
if cli.MetricsListen != "" {
|
||||
go func() {
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/metrics", promhttp.Handler())
|
||||
log.Fatal(http.ListenAndServe(metricsListen, mux))
|
||||
err := http.ListenAndServe(cli.MetricsListen, mux)
|
||||
slog.Error("Failed to serve", "error", err)
|
||||
os.Exit(1)
|
||||
}()
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// Cancel on signal
|
||||
signalChan := make(chan os.Signal, 1)
|
||||
signal.Notify(signalChan, os.Interrupt)
|
||||
go func() {
|
||||
sig := <-signalChan
|
||||
slog.Info("Received signal; shutting down", "signal", sig)
|
||||
cancel()
|
||||
}()
|
||||
|
||||
// Engage!
|
||||
main.Serve(context.Background())
|
||||
main.Serve(ctx)
|
||||
}
|
||||
|
||||
@@ -1,324 +0,0 @@
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
io "io"
|
||||
"log"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
const (
|
||||
replicationReadTimeout = time.Minute
|
||||
replicationWriteTimeout = 30 * time.Second
|
||||
replicationHeartbeatInterval = time.Second * 30
|
||||
)
|
||||
|
||||
type replicator interface {
|
||||
send(key string, addrs []DatabaseAddress, seen int64)
|
||||
}
|
||||
|
||||
// a replicationSender tries to connect to the remote address and provide
|
||||
// them with a feed of replication updates.
|
||||
type replicationSender struct {
|
||||
dst string
|
||||
cert tls.Certificate // our certificate
|
||||
allowedIDs []protocol.DeviceID
|
||||
outbox chan ReplicationRecord
|
||||
}
|
||||
|
||||
func newReplicationSender(dst string, cert tls.Certificate, allowedIDs []protocol.DeviceID) *replicationSender {
|
||||
return &replicationSender{
|
||||
dst: dst,
|
||||
cert: cert,
|
||||
allowedIDs: allowedIDs,
|
||||
outbox: make(chan ReplicationRecord, replicationOutboxSize),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *replicationSender) Serve(ctx context.Context) error {
|
||||
// Sleep a little at startup. Peers often restart at the same time, and
|
||||
// this avoid the service failing and entering backoff state
|
||||
// unnecessarily, while also reducing the reconnect rate to something
|
||||
// reasonable by default.
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
tlsCfg := &tls.Config{
|
||||
Certificates: []tls.Certificate{s.cert},
|
||||
MinVersion: tls.VersionTLS12,
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
|
||||
// Dial the TLS connection.
|
||||
conn, err := tls.Dial("tcp", s.dst, tlsCfg)
|
||||
if err != nil {
|
||||
log.Println("Replication connect:", err)
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
conn.SetWriteDeadline(time.Now().Add(time.Second))
|
||||
conn.Close()
|
||||
}()
|
||||
|
||||
// The replication stream is not especially latency sensitive, but it is
|
||||
// quite a lot of data in small writes. Make it more efficient.
|
||||
if tcpc, ok := conn.NetConn().(*net.TCPConn); ok {
|
||||
_ = tcpc.SetNoDelay(false)
|
||||
}
|
||||
|
||||
// Get the other side device ID.
|
||||
remoteID, err := deviceID(conn)
|
||||
if err != nil {
|
||||
log.Println("Replication connect:", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify it's in the set of allowed device IDs.
|
||||
if !deviceIDIn(remoteID, s.allowedIDs) {
|
||||
log.Println("Replication connect: unexpected device ID:", remoteID)
|
||||
return err
|
||||
}
|
||||
|
||||
heartBeatTicker := time.NewTicker(replicationHeartbeatInterval)
|
||||
defer heartBeatTicker.Stop()
|
||||
|
||||
// Send records.
|
||||
buf := make([]byte, 1024)
|
||||
for {
|
||||
select {
|
||||
case <-heartBeatTicker.C:
|
||||
if len(s.outbox) > 0 {
|
||||
// No need to send heartbeats if there are events/prevrious
|
||||
// heartbeats to send, they will keep the connection alive.
|
||||
continue
|
||||
}
|
||||
// Empty replication message is the heartbeat:
|
||||
s.outbox <- ReplicationRecord{}
|
||||
|
||||
case rec := <-s.outbox:
|
||||
// Buffer must hold record plus four bytes for size
|
||||
size := rec.Size()
|
||||
if len(buf) < size+4 {
|
||||
buf = make([]byte, size+4)
|
||||
}
|
||||
|
||||
// Record comes after the four bytes size
|
||||
n, err := rec.MarshalTo(buf[4:])
|
||||
if err != nil {
|
||||
// odd to get an error here, but we haven't sent anything
|
||||
// yet so it's not fatal
|
||||
replicationSendsTotal.WithLabelValues("error").Inc()
|
||||
log.Println("Replication marshal:", err)
|
||||
continue
|
||||
}
|
||||
binary.BigEndian.PutUint32(buf, uint32(n))
|
||||
|
||||
// Send
|
||||
conn.SetWriteDeadline(time.Now().Add(replicationWriteTimeout))
|
||||
if _, err := conn.Write(buf[:4+n]); err != nil {
|
||||
replicationSendsTotal.WithLabelValues("error").Inc()
|
||||
log.Println("Replication write:", err)
|
||||
// Yes, we are losing the replication event here.
|
||||
return err
|
||||
}
|
||||
replicationSendsTotal.WithLabelValues("success").Inc()
|
||||
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *replicationSender) String() string {
|
||||
return fmt.Sprintf("replicationSender(%q)", s.dst)
|
||||
}
|
||||
|
||||
func (s *replicationSender) send(key string, ps []DatabaseAddress, _ int64) {
|
||||
item := ReplicationRecord{
|
||||
Key: key,
|
||||
Addresses: ps,
|
||||
}
|
||||
|
||||
// The send should never block. The inbox is suitably buffered for at
|
||||
// least a few seconds of stalls, which shouldn't happen in practice.
|
||||
select {
|
||||
case s.outbox <- item:
|
||||
default:
|
||||
replicationSendsTotal.WithLabelValues("drop").Inc()
|
||||
}
|
||||
}
|
||||
|
||||
// a replicationMultiplexer sends to multiple replicators
|
||||
type replicationMultiplexer []replicator
|
||||
|
||||
func (m replicationMultiplexer) send(key string, ps []DatabaseAddress, seen int64) {
|
||||
for _, s := range m {
|
||||
// each send is nonblocking
|
||||
s.send(key, ps, seen)
|
||||
}
|
||||
}
|
||||
|
||||
// replicationListener accepts incoming connections and reads replication
|
||||
// items from them. Incoming items are applied to the KV store.
|
||||
type replicationListener struct {
|
||||
addr string
|
||||
cert tls.Certificate
|
||||
allowedIDs []protocol.DeviceID
|
||||
db database
|
||||
}
|
||||
|
||||
func newReplicationListener(addr string, cert tls.Certificate, allowedIDs []protocol.DeviceID, db database) *replicationListener {
|
||||
return &replicationListener{
|
||||
addr: addr,
|
||||
cert: cert,
|
||||
allowedIDs: allowedIDs,
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
|
||||
func (l *replicationListener) Serve(ctx context.Context) error {
|
||||
tlsCfg := &tls.Config{
|
||||
Certificates: []tls.Certificate{l.cert},
|
||||
ClientAuth: tls.RequestClientCert,
|
||||
MinVersion: tls.VersionTLS12,
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
|
||||
lst, err := tls.Listen("tcp", l.addr, tlsCfg)
|
||||
if err != nil {
|
||||
log.Println("Replication listen:", err)
|
||||
return err
|
||||
}
|
||||
defer lst.Close()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
|
||||
// Accept a connection
|
||||
conn, err := lst.Accept()
|
||||
if err != nil {
|
||||
log.Println("Replication accept:", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Figure out the other side device ID
|
||||
remoteID, err := deviceID(conn.(*tls.Conn))
|
||||
if err != nil {
|
||||
log.Println("Replication accept:", err)
|
||||
conn.SetWriteDeadline(time.Now().Add(time.Second))
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
// Verify it is in the set of allowed device IDs
|
||||
if !deviceIDIn(remoteID, l.allowedIDs) {
|
||||
log.Println("Replication accept: unexpected device ID:", remoteID)
|
||||
conn.SetWriteDeadline(time.Now().Add(time.Second))
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
go l.handle(ctx, conn)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *replicationListener) String() string {
|
||||
return fmt.Sprintf("replicationListener(%q)", l.addr)
|
||||
}
|
||||
|
||||
func (l *replicationListener) handle(ctx context.Context, conn net.Conn) {
|
||||
defer func() {
|
||||
conn.SetWriteDeadline(time.Now().Add(time.Second))
|
||||
conn.Close()
|
||||
}()
|
||||
|
||||
buf := make([]byte, 1024)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
conn.SetReadDeadline(time.Now().Add(replicationReadTimeout))
|
||||
|
||||
// First four bytes are the size
|
||||
if _, err := io.ReadFull(conn, buf[:4]); err != nil {
|
||||
log.Println("Replication read size:", err)
|
||||
replicationRecvsTotal.WithLabelValues("error").Inc()
|
||||
return
|
||||
}
|
||||
|
||||
// Read the rest of the record
|
||||
size := int(binary.BigEndian.Uint32(buf[:4]))
|
||||
if len(buf) < size {
|
||||
buf = make([]byte, size)
|
||||
}
|
||||
|
||||
if size == 0 {
|
||||
// Heartbeat, ignore
|
||||
continue
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(conn, buf[:size]); err != nil {
|
||||
log.Println("Replication read record:", err)
|
||||
replicationRecvsTotal.WithLabelValues("error").Inc()
|
||||
return
|
||||
}
|
||||
|
||||
// Unmarshal
|
||||
var rec ReplicationRecord
|
||||
if err := rec.Unmarshal(buf[:size]); err != nil {
|
||||
log.Println("Replication unmarshal:", err)
|
||||
replicationRecvsTotal.WithLabelValues("error").Inc()
|
||||
continue
|
||||
}
|
||||
|
||||
// Store
|
||||
l.db.merge(rec.Key, rec.Addresses, rec.Seen)
|
||||
replicationRecvsTotal.WithLabelValues("success").Inc()
|
||||
}
|
||||
}
|
||||
|
||||
func deviceID(conn *tls.Conn) (protocol.DeviceID, error) {
|
||||
// Handshake may not be complete on the server side yet, which we need
|
||||
// to get the client certificate.
|
||||
if !conn.ConnectionState().HandshakeComplete {
|
||||
if err := conn.Handshake(); err != nil {
|
||||
return protocol.DeviceID{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// We expect exactly one certificate.
|
||||
certs := conn.ConnectionState().PeerCertificates
|
||||
if len(certs) != 1 {
|
||||
return protocol.DeviceID{}, fmt.Errorf("unexpected number of certificates (%d != 1)", len(certs))
|
||||
}
|
||||
|
||||
return protocol.NewDeviceID(certs[0].Raw), nil
|
||||
}
|
||||
|
||||
func deviceIDIn(id protocol.DeviceID, ids []protocol.DeviceID) bool {
|
||||
for _, candidate := range ids {
|
||||
if id == candidate {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -7,10 +7,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/collectors"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -99,13 +96,28 @@ var (
|
||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
||||
}, []string{"operation"})
|
||||
|
||||
retryAfterHistogram = prometheus.NewHistogram(prometheus.HistogramOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "discovery",
|
||||
Name: "retry_after_seconds",
|
||||
Help: "Retry-After header value in seconds.",
|
||||
Buckets: prometheus.ExponentialBuckets(60, 2, 7), // 60, 120, 240, 480, 960, 1920, 3840
|
||||
})
|
||||
databaseWriteSeconds = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "discovery",
|
||||
Name: "database_write_seconds",
|
||||
Help: "Time spent writing the database.",
|
||||
})
|
||||
databaseLastWritten = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "discovery",
|
||||
Name: "database_last_written",
|
||||
Help: "Timestamp of the last successful database write.",
|
||||
})
|
||||
|
||||
retryAfterLevel = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "discovery",
|
||||
Name: "retry_after_seconds",
|
||||
Help: "Retry-After header value in seconds.",
|
||||
}, []string{"name"})
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -126,16 +138,6 @@ func init() {
|
||||
replicationSendsTotal, replicationRecvsTotal,
|
||||
databaseKeys, databaseStatisticsSeconds,
|
||||
databaseOperations, databaseOperationSeconds,
|
||||
retryAfterHistogram)
|
||||
|
||||
processCollectorOpts := collectors.ProcessCollectorOpts{
|
||||
Namespace: "syncthing_discovery",
|
||||
PidFn: func() (int, error) {
|
||||
return os.Getpid(), nil
|
||||
},
|
||||
}
|
||||
|
||||
prometheus.MustRegister(
|
||||
collectors.NewProcessCollector(processCollectorOpts),
|
||||
)
|
||||
databaseWriteSeconds, databaseLastWritten,
|
||||
retryAfterLevel)
|
||||
}
|
||||
|
||||
@@ -12,9 +12,8 @@ import (
|
||||
"time"
|
||||
|
||||
syncthingprotocol "github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/relay/protocol"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -185,7 +184,7 @@ func protocolConnectionHandler(tcpConn net.Conn, config *tls.Config, token strin
|
||||
continue
|
||||
}
|
||||
// requestedPeer is the server, id is the client
|
||||
ses := newSession(requestedPeer, id, sessionLimiter, globalLimiter)
|
||||
ses := newSession(requestedPeer, id, sessionLimitBps, globalLimiter)
|
||||
|
||||
go ses.Serve()
|
||||
|
||||
|
||||
@@ -14,11 +14,14 @@ import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"golang.org/x/time/rate"
|
||||
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/syncthing/syncthing/lib/build"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
@@ -30,7 +33,6 @@ import (
|
||||
"github.com/syncthing/syncthing/lib/relay/protocol"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
_ "github.com/syncthing/syncthing/lib/upnp"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -50,7 +52,6 @@ var (
|
||||
globalLimitBps int
|
||||
overLimit atomic.Bool
|
||||
descriptorLimit int64
|
||||
sessionLimiter *rate.Limiter
|
||||
globalLimiter *rate.Limiter
|
||||
networkBufferSize int
|
||||
|
||||
@@ -157,7 +158,7 @@ func main() {
|
||||
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||
if err != nil {
|
||||
log.Println("Failed to load keypair. Generating one, this might take a while...")
|
||||
cert, err = tlsutil.NewCertificate(certFile, keyFile, "strelaysrv", 20*365)
|
||||
cert, err = tlsutil.NewCertificate(certFile, keyFile, "strelaysrv", 20*365, false)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to generate X509 key pair:", err)
|
||||
}
|
||||
@@ -227,9 +228,6 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
if sessionLimitBps > 0 {
|
||||
sessionLimiter = rate.NewLimiter(rate.Limit(sessionLimitBps), 2*sessionLimitBps)
|
||||
}
|
||||
if globalLimitBps > 0 {
|
||||
globalLimiter = rate.NewLimiter(rate.Limit(globalLimitBps), 2*globalLimitBps)
|
||||
}
|
||||
@@ -250,10 +248,10 @@ func main() {
|
||||
query.Set("pingInterval", pingInterval.String())
|
||||
query.Set("networkTimeout", networkTimeout.String())
|
||||
if sessionLimitBps > 0 {
|
||||
query.Set("sessionLimitBps", fmt.Sprint(sessionLimitBps))
|
||||
query.Set("sessionLimitBps", strconv.Itoa(sessionLimitBps))
|
||||
}
|
||||
if globalLimitBps > 0 {
|
||||
query.Set("globalLimitBps", fmt.Sprint(globalLimitBps))
|
||||
query.Set("globalLimitBps", strconv.Itoa(globalLimitBps))
|
||||
}
|
||||
if statusAddr != "" {
|
||||
query.Set("statusAddr", statusAddr)
|
||||
|
||||
@@ -27,7 +27,7 @@ var (
|
||||
bytesProxied atomic.Int64
|
||||
)
|
||||
|
||||
func newSession(serverid, clientid syncthingprotocol.DeviceID, sessionRateLimit, globalRateLimit *rate.Limiter) *session {
|
||||
func newSession(serverid, clientid syncthingprotocol.DeviceID, sessionLimitBps int, globalRateLimit *rate.Limiter) *session {
|
||||
serverkey := make([]byte, 32)
|
||||
_, err := rand.Read(serverkey)
|
||||
if err != nil {
|
||||
@@ -40,12 +40,17 @@ func newSession(serverid, clientid syncthingprotocol.DeviceID, sessionRateLimit,
|
||||
return nil
|
||||
}
|
||||
|
||||
var sessionRateLimit *rate.Limiter
|
||||
if sessionLimitBps > 0 {
|
||||
sessionRateLimit = rate.NewLimiter(rate.Limit(sessionLimitBps), 2*sessionLimitBps)
|
||||
}
|
||||
ses := &session{
|
||||
serverkey: serverkey,
|
||||
serverid: serverid,
|
||||
clientkey: clientkey,
|
||||
clientid: clientid,
|
||||
rateLimit: makeRateLimitFunc(sessionRateLimit, globalRateLimit),
|
||||
limiter: sessionRateLimit,
|
||||
connsChan: make(chan net.Conn),
|
||||
conns: make([]net.Conn, 0, 2),
|
||||
}
|
||||
@@ -68,7 +73,6 @@ func findSession(key string) *session {
|
||||
ses, ok := pendingSessions[key]
|
||||
if !ok {
|
||||
return nil
|
||||
|
||||
}
|
||||
delete(pendingSessions, key)
|
||||
return ses
|
||||
@@ -110,6 +114,7 @@ type session struct {
|
||||
clientid syncthingprotocol.DeviceID
|
||||
|
||||
rateLimit func(bytes int)
|
||||
limiter *rate.Limiter
|
||||
|
||||
connsChan chan net.Conn
|
||||
conns []net.Conn
|
||||
|
||||
@@ -122,7 +122,7 @@ func (r *rateCalculator) updateRates(interval time.Duration) {
|
||||
|
||||
func (r *rateCalculator) rate(periods int) int64 {
|
||||
var tot int64
|
||||
for i := 0; i < periods; i++ {
|
||||
for i := range periods {
|
||||
tot += r.rates[i]
|
||||
}
|
||||
return tot / int64(periods)
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"bufio"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"flag"
|
||||
"log"
|
||||
"net"
|
||||
@@ -133,7 +134,8 @@ func connectToStdio(stdin <-chan string, conn net.Conn) {
|
||||
conn.SetReadDeadline(time.Now().Add(time.Millisecond))
|
||||
n, err := conn.Read(buf[0:])
|
||||
if err != nil {
|
||||
nerr, ok := err.(net.Error)
|
||||
var nerr net.Error
|
||||
ok := errors.As(err, &nerr)
|
||||
if !ok || !nerr.Timeout() {
|
||||
log.Println(err)
|
||||
return
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
func setTCPOptions(conn net.Conn) error {
|
||||
tcpConn, ok := conn.(*net.TCPConn)
|
||||
if !ok {
|
||||
return errors.New("Not a TCP connection")
|
||||
return errors.New("not a TCP connection")
|
||||
}
|
||||
if err := tcpConn.SetLinger(0); err != nil {
|
||||
return err
|
||||
|
||||
@@ -1,149 +0,0 @@
|
||||
// Copyright (C) 2019 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alecthomas/kong"
|
||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||
"github.com/syncthing/syncthing/lib/httpcache"
|
||||
"github.com/syncthing/syncthing/lib/upgrade"
|
||||
)
|
||||
|
||||
type cli struct {
|
||||
Listen string `default:":8080" help:"Listen address"`
|
||||
URL string `short:"u" default:"https://api.github.com/repos/syncthing/syncthing/releases?per_page=25" help:"GitHub releases url"`
|
||||
Forward []string `short:"f" help:"Forwarded pages, format: /path->https://example/com/url"`
|
||||
CacheTime time.Duration `default:"15m" help:"Cache time"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
var params cli
|
||||
kong.Parse(¶ms)
|
||||
if err := server(¶ms); err != nil {
|
||||
fmt.Printf("Error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func server(params *cli) error {
|
||||
http.Handle("/meta.json", httpcache.SinglePath(&githubReleases{url: params.URL}, params.CacheTime))
|
||||
|
||||
for _, fwd := range params.Forward {
|
||||
path, url, ok := strings.Cut(fwd, "->")
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid forward: %q", fwd)
|
||||
}
|
||||
http.Handle(path, httpcache.SinglePath(&proxy{url: url}, params.CacheTime))
|
||||
}
|
||||
|
||||
return http.ListenAndServe(params.Listen, nil)
|
||||
}
|
||||
|
||||
type githubReleases struct {
|
||||
url string
|
||||
}
|
||||
|
||||
func (p *githubReleases) ServeHTTP(w http.ResponseWriter, _ *http.Request) {
|
||||
log.Println("Fetching", p.url)
|
||||
rels := upgrade.FetchLatestReleases(p.url, "")
|
||||
if rels == nil {
|
||||
http.Error(w, "no releases", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
sort.Sort(upgrade.SortByRelease(rels))
|
||||
rels = filterForLatest(rels)
|
||||
|
||||
// Move the URL used for browser downloads to the URL field, and remove
|
||||
// the browser URL field. This avoids going via the GitHub API for
|
||||
// downloads, since Syncthing uses the URL field.
|
||||
for _, rel := range rels {
|
||||
for j, asset := range rel.Assets {
|
||||
rel.Assets[j].URL = asset.BrowserURL
|
||||
rel.Assets[j].BrowserURL = ""
|
||||
}
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
_ = json.NewEncoder(buf).Encode(rels)
|
||||
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
w.Header().Set("Access-Control-Allow-Methods", "GET")
|
||||
w.Write(buf.Bytes())
|
||||
}
|
||||
|
||||
type proxy struct {
|
||||
url string
|
||||
}
|
||||
|
||||
func (p *proxy) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
log.Println("Fetching", p.url)
|
||||
req, err := http.NewRequestWithContext(req.Context(), http.MethodGet, p.url, nil)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
ct := resp.Header.Get("Content-Type")
|
||||
w.Header().Set("Content-Type", ct)
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
w.Header().Set("Cache-Control", "public, max-age=900")
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
w.Header().Set("Access-Control-Allow-Methods", "GET")
|
||||
}
|
||||
w.WriteHeader(resp.StatusCode)
|
||||
if strings.HasPrefix(ct, "application/json") {
|
||||
// Special JSON handling; clean it up a bit.
|
||||
var v interface{}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&v); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
_ = json.NewEncoder(w).Encode(v)
|
||||
} else {
|
||||
_, _ = io.Copy(w, resp.Body)
|
||||
}
|
||||
}
|
||||
|
||||
// filterForLatest returns the latest stable and prerelease only. If the
|
||||
// stable version is newer (comes first in the list) there is no need to go
|
||||
// looking for a prerelease at all.
|
||||
func filterForLatest(rels []upgrade.Release) []upgrade.Release {
|
||||
var filtered []upgrade.Release
|
||||
var havePre bool
|
||||
for _, rel := range rels {
|
||||
if !rel.Prerelease {
|
||||
// We found a stable version, we're good now.
|
||||
filtered = append(filtered, rel)
|
||||
break
|
||||
}
|
||||
if rel.Prerelease && !havePre {
|
||||
// We remember the first prerelease we find.
|
||||
filtered = append(filtered, rel)
|
||||
havePre = true
|
||||
}
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
@@ -8,11 +8,14 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/slogutil"
|
||||
)
|
||||
|
||||
func startBlockProfiler() {
|
||||
@@ -20,10 +23,10 @@ func startBlockProfiler() {
|
||||
if profiler == nil {
|
||||
panic("Couldn't find block profiler")
|
||||
}
|
||||
l.Debugln("Starting block profiling")
|
||||
slog.Debug("Starting block profiling")
|
||||
go func() {
|
||||
err := saveBlockingProfiles(profiler) // Only returns on error
|
||||
l.Warnln("Block profiler failed:", err)
|
||||
slog.Error("Block profiler failed", slogutil.Error(err))
|
||||
panic("Block profiler failed")
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -32,6 +32,7 @@ type APIClient interface {
|
||||
|
||||
type apiClient struct {
|
||||
http.Client
|
||||
|
||||
cfg config.GUIConfiguration
|
||||
apikey string
|
||||
}
|
||||
@@ -91,11 +92,11 @@ func loadGUIConfig() (config.GUIConfiguration, error) {
|
||||
guiCfg := cfg.GUI()
|
||||
|
||||
if guiCfg.Address() == "" {
|
||||
return config.GUIConfiguration{}, errors.New("Could not find GUI Address")
|
||||
return config.GUIConfiguration{}, errors.New("could not find GUI Address")
|
||||
}
|
||||
|
||||
if guiCfg.APIKey == "" {
|
||||
return config.GUIConfiguration{}, errors.New("Could not find GUI API key")
|
||||
return config.GUIConfiguration{}, errors.New("could not find GUI API key")
|
||||
}
|
||||
|
||||
return guiCfg, nil
|
||||
@@ -113,7 +114,7 @@ func (c *apiClient) Endpoint() string {
|
||||
}
|
||||
|
||||
func (c *apiClient) Do(req *http.Request) (*http.Response, error) {
|
||||
req.Header.Set("X-API-Key", c.apikey)
|
||||
req.Header.Set("X-Api-Key", c.apikey)
|
||||
resp, err := c.Client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
|
||||
"github.com/AudriusButkevicius/recli"
|
||||
@@ -18,6 +19,53 @@ import (
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
// Try to mimic the kong output format through custom help templates
|
||||
var customAppHelpTemplate = `Usage: {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}}{{if .Commands}} <command> [flags]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}
|
||||
|
||||
{{.Description}}{{if .VisibleFlags}}
|
||||
|
||||
Flags:
|
||||
{{range $index, $option := .VisibleFlags}}{{if $index}}
|
||||
{{end}}{{$option}}{{end}}{{end}}{{if .VisibleCommands}}
|
||||
|
||||
Commands:{{range .VisibleCategories}}{{if .Name}}
|
||||
|
||||
{{.Name}}:{{range .VisibleCommands}}
|
||||
{{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{else}}{{range .VisibleCommands}}
|
||||
{{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{end}}{{end}}{{end}}
|
||||
`
|
||||
|
||||
var customCommandHelpTemplate = `Usage: {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}}{{if .VisibleFlags}} [flags]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}
|
||||
|
||||
{{.Usage}}{{if .VisibleFlags}}
|
||||
|
||||
Flags:
|
||||
{{range $index, $option := .VisibleFlags}}{{if $index}}
|
||||
{{end}}{{$option}}{{end}}{{end}}{{if .Category}}
|
||||
|
||||
Category:
|
||||
{{.Category}}{{end}}{{if .Description}}
|
||||
|
||||
{{.Description}}{{end}}
|
||||
`
|
||||
|
||||
var customSubcommandHelpTemplate = `Usage: {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} <command>{{if .VisibleFlags}} [flags]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Description}}
|
||||
|
||||
{{.Description}}{{else}}{{if .Usage}}
|
||||
|
||||
{{.Usage}}{{end}}{{end}}{{if .VisibleFlags}}
|
||||
|
||||
Flags:
|
||||
{{range $index, $option := .VisibleFlags}}{{if $index}}
|
||||
{{end}}{{$option}}{{end}}{{end}}{{if .VisibleCommands}}
|
||||
|
||||
Commands:{{range .VisibleCategories}}{{if .Name}}
|
||||
|
||||
{{.Name}}:{{range .VisibleCommands}}
|
||||
{{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{else}}{{range .VisibleCommands}}
|
||||
{{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{end}}{{end}}{{end}}
|
||||
`
|
||||
|
||||
type configHandler struct {
|
||||
original, cfg config.Configuration
|
||||
client APIClient
|
||||
@@ -28,13 +76,18 @@ type configCommand struct {
|
||||
Args []string `arg:"" default:"-h"`
|
||||
}
|
||||
|
||||
func (c *configCommand) Run(ctx Context, _ *kong.Context) error {
|
||||
func (c *configCommand) Run(ctx Context, outerCtx *kong.Context) error {
|
||||
app := cli.NewApp()
|
||||
app.Name = "syncthing"
|
||||
app.Author = "The Syncthing Authors"
|
||||
app.Name = "syncthing cli config"
|
||||
app.HelpName = "syncthing cli config"
|
||||
app.Description = outerCtx.Selected().Help
|
||||
app.Metadata = map[string]interface{}{
|
||||
"clientFactory": ctx.clientFactory,
|
||||
}
|
||||
app.CustomAppHelpTemplate = customAppHelpTemplate
|
||||
// Override global templates, as this is out only usage of the package
|
||||
cli.CommandHelpTemplate = customCommandHelpTemplate
|
||||
cli.SubcommandHelpTemplate = customSubcommandHelpTemplate
|
||||
|
||||
h := new(configHandler)
|
||||
h.client, h.err = ctx.clientFactory.getClient()
|
||||
@@ -55,6 +108,8 @@ func (c *configCommand) Run(ctx Context, _ *kong.Context) error {
|
||||
|
||||
app.Commands = commands
|
||||
app.HideHelp = true
|
||||
// Explicitly re-add help only as flags, not as commands
|
||||
app.Flags = []cli.Flag{cli.HelpFlag}
|
||||
app.Before = h.configBefore
|
||||
app.After = h.configAfter
|
||||
|
||||
@@ -86,7 +141,7 @@ func (h *configHandler) configAfter(_ *cli.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, err := responseToBArray(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -41,5 +41,4 @@ func (p *profileCommand) Run(ctx Context) error {
|
||||
type debugCommand struct {
|
||||
File fileCommand `cmd:"" help:"Show information about a file (or directory/symlink)"`
|
||||
Profile profileCommand `cmd:"" help:"Save a profile to help figuring out what Syncthing does"`
|
||||
Index indexCommand `cmd:"" help:"Show information about the index (database)"`
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ package cli
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/alecthomas/kong"
|
||||
@@ -34,7 +35,7 @@ func (e *errorsPushCommand) Run(ctx Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if response.StatusCode != 200 {
|
||||
if response.StatusCode != http.StatusOK {
|
||||
errStr = fmt.Sprint("Failed to push error\nStatus code: ", response.StatusCode)
|
||||
bytes, err := responseToBArray(response)
|
||||
if err != nil {
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package cli
|
||||
|
||||
import (
|
||||
"github.com/alecthomas/kong"
|
||||
)
|
||||
|
||||
type indexCommand struct {
|
||||
Dump struct{} `cmd:"" help:"Print the entire db"`
|
||||
DumpSize struct{} `cmd:"" help:"Print the db size of different categories of information"`
|
||||
Check struct{} `cmd:"" help:"Check the database for inconsistencies"`
|
||||
Account struct{} `cmd:"" help:"Print key and value size statistics per key type"`
|
||||
}
|
||||
|
||||
func (*indexCommand) Run(kongCtx *kong.Context) error {
|
||||
switch kongCtx.Selected().Name {
|
||||
case "dump":
|
||||
return indexDump()
|
||||
case "dump-size":
|
||||
return indexDumpSize()
|
||||
case "check":
|
||||
return indexCheck()
|
||||
case "account":
|
||||
return indexAccount()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,62 +0,0 @@
|
||||
// Copyright (C) 2020 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"text/tabwriter"
|
||||
)
|
||||
|
||||
// indexAccount prints key and data size statistics per class
|
||||
func indexAccount() error {
|
||||
ldb, err := getDB()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
it, err := ldb.NewPrefixIterator(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var ksizes [256]int
|
||||
var dsizes [256]int
|
||||
var counts [256]int
|
||||
var max [256]int
|
||||
|
||||
for it.Next() {
|
||||
key := it.Key()
|
||||
t := key[0]
|
||||
ds := len(it.Value())
|
||||
ks := len(key)
|
||||
s := ks + ds
|
||||
|
||||
counts[t]++
|
||||
ksizes[t] += ks
|
||||
dsizes[t] += ds
|
||||
if s > max[t] {
|
||||
max[t] = s
|
||||
}
|
||||
}
|
||||
|
||||
tw := tabwriter.NewWriter(os.Stdout, 1, 1, 1, ' ', tabwriter.AlignRight)
|
||||
toti, totds, totks := 0, 0, 0
|
||||
for t := range ksizes {
|
||||
if ksizes[t] > 0 {
|
||||
// yes metric kilobytes 🤘
|
||||
fmt.Fprintf(tw, "0x%02x:\t%d items,\t%d KB keys +\t%d KB data,\t%d B +\t%d B avg,\t%d B max\t\n", t, counts[t], ksizes[t]/1000, dsizes[t]/1000, ksizes[t]/counts[t], dsizes[t]/counts[t], max[t])
|
||||
toti += counts[t]
|
||||
totds += dsizes[t]
|
||||
totks += ksizes[t]
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(tw, "Total\t%d items,\t%d KB keys +\t%d KB data.\t\n", toti, totks/1000, totds/1000)
|
||||
tw.Flush()
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,158 +0,0 @@
|
||||
// Copyright (C) 2015 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package cli
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
func indexDump() error {
|
||||
ldb, err := getDB()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
it, err := ldb.NewPrefixIterator(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for it.Next() {
|
||||
key := it.Key()
|
||||
switch key[0] {
|
||||
case db.KeyTypeDevice:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
device := binary.BigEndian.Uint32(key[1+4:])
|
||||
name := nulString(key[1+4+4:])
|
||||
fmt.Printf("[device] F:%d D:%d N:%q", folder, device, name)
|
||||
|
||||
var f protocol.FileInfo
|
||||
err := f.Unmarshal(it.Value())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf(" V:%v\n", f)
|
||||
|
||||
case db.KeyTypeGlobal:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
name := nulString(key[1+4:])
|
||||
var flv db.VersionList
|
||||
flv.Unmarshal(it.Value())
|
||||
fmt.Printf("[global] F:%d N:%q V:%s\n", folder, name, flv)
|
||||
|
||||
case db.KeyTypeBlock:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
hash := key[1+4 : 1+4+32]
|
||||
name := nulString(key[1+4+32:])
|
||||
fmt.Printf("[block] F:%d H:%x N:%q I:%d\n", folder, hash, name, binary.BigEndian.Uint32(it.Value()))
|
||||
|
||||
case db.KeyTypeDeviceStatistic:
|
||||
fmt.Printf("[dstat] K:%x V:%x\n", key, it.Value())
|
||||
|
||||
case db.KeyTypeFolderStatistic:
|
||||
fmt.Printf("[fstat] K:%x V:%x\n", key, it.Value())
|
||||
|
||||
case db.KeyTypeVirtualMtime:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
name := nulString(key[1+4:])
|
||||
val := it.Value()
|
||||
var realTime, virtualTime time.Time
|
||||
realTime.UnmarshalBinary(val[:len(val)/2])
|
||||
virtualTime.UnmarshalBinary(val[len(val)/2:])
|
||||
fmt.Printf("[mtime] F:%d N:%q R:%v V:%v\n", folder, name, realTime, virtualTime)
|
||||
|
||||
case db.KeyTypeFolderIdx:
|
||||
key := binary.BigEndian.Uint32(key[1:])
|
||||
fmt.Printf("[folderidx] K:%d V:%q\n", key, it.Value())
|
||||
|
||||
case db.KeyTypeDeviceIdx:
|
||||
key := binary.BigEndian.Uint32(key[1:])
|
||||
val := it.Value()
|
||||
device := "<nil>"
|
||||
if len(val) > 0 {
|
||||
dev, err := protocol.DeviceIDFromBytes(val)
|
||||
if err != nil {
|
||||
device = fmt.Sprintf("<invalid %d bytes>", len(val))
|
||||
} else {
|
||||
device = dev.String()
|
||||
}
|
||||
}
|
||||
fmt.Printf("[deviceidx] K:%d V:%s\n", key, device)
|
||||
|
||||
case db.KeyTypeIndexID:
|
||||
device := binary.BigEndian.Uint32(key[1:])
|
||||
folder := binary.BigEndian.Uint32(key[5:])
|
||||
fmt.Printf("[indexid] D:%d F:%d I:%x\n", device, folder, it.Value())
|
||||
|
||||
case db.KeyTypeFolderMeta:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
fmt.Printf("[foldermeta] F:%d", folder)
|
||||
var cs db.CountsSet
|
||||
if err := cs.Unmarshal(it.Value()); err != nil {
|
||||
fmt.Printf(" (invalid)\n")
|
||||
} else {
|
||||
fmt.Printf(" V:%v\n", cs)
|
||||
}
|
||||
|
||||
case db.KeyTypeMiscData:
|
||||
fmt.Printf("[miscdata] K:%q V:%q\n", key[1:], it.Value())
|
||||
|
||||
case db.KeyTypeSequence:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
seq := binary.BigEndian.Uint64(key[5:])
|
||||
fmt.Printf("[sequence] F:%d S:%d V:%q\n", folder, seq, it.Value())
|
||||
|
||||
case db.KeyTypeNeed:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
file := string(key[5:])
|
||||
fmt.Printf("[need] F:%d V:%q\n", folder, file)
|
||||
|
||||
case db.KeyTypeBlockList:
|
||||
fmt.Printf("[blocklist] H:%x\n", key[1:])
|
||||
|
||||
case db.KeyTypeBlockListMap:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
hash := key[5:37]
|
||||
fileName := string(key[37:])
|
||||
fmt.Printf("[blocklistmap] F:%d H:%x N:%s\n", folder, hash, fileName)
|
||||
|
||||
case db.KeyTypeVersion:
|
||||
fmt.Printf("[version] H:%x", key[1:])
|
||||
var v protocol.Vector
|
||||
err := v.Unmarshal(it.Value())
|
||||
if err != nil {
|
||||
fmt.Printf(" (invalid)\n")
|
||||
} else {
|
||||
fmt.Printf(" V:%v\n", v)
|
||||
}
|
||||
|
||||
case db.KeyTypePendingFolder:
|
||||
device := binary.BigEndian.Uint32(key[1:])
|
||||
folder := string(key[5:])
|
||||
var of db.ObservedFolder
|
||||
of.Unmarshal(it.Value())
|
||||
fmt.Printf("[pendingFolder] D:%d F:%s V:%v\n", device, folder, of)
|
||||
|
||||
case db.KeyTypePendingDevice:
|
||||
device := "<invalid>"
|
||||
dev, err := protocol.DeviceIDFromBytes(key[1:])
|
||||
if err == nil {
|
||||
device = dev.String()
|
||||
}
|
||||
var od db.ObservedDevice
|
||||
od.Unmarshal(it.Value())
|
||||
fmt.Printf("[pendingDevice] D:%v V:%v\n", device, od)
|
||||
|
||||
default:
|
||||
fmt.Printf("[??? %d]\n %x\n %x\n", key[0], key, it.Value())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,88 +0,0 @@
|
||||
// Copyright (C) 2015 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package cli
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
)
|
||||
|
||||
func indexDumpSize() error {
|
||||
type sizedElement struct {
|
||||
key string
|
||||
size int
|
||||
}
|
||||
|
||||
ldb, err := getDB()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
it, err := ldb.NewPrefixIterator(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var elems []sizedElement
|
||||
for it.Next() {
|
||||
var ele sizedElement
|
||||
|
||||
key := it.Key()
|
||||
switch key[0] {
|
||||
case db.KeyTypeDevice:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
device := binary.BigEndian.Uint32(key[1+4:])
|
||||
name := nulString(key[1+4+4:])
|
||||
ele.key = fmt.Sprintf("DEVICE:%d:%d:%s", folder, device, name)
|
||||
|
||||
case db.KeyTypeGlobal:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
name := nulString(key[1+4:])
|
||||
ele.key = fmt.Sprintf("GLOBAL:%d:%s", folder, name)
|
||||
|
||||
case db.KeyTypeBlock:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
hash := key[1+4 : 1+4+32]
|
||||
name := nulString(key[1+4+32:])
|
||||
ele.key = fmt.Sprintf("BLOCK:%d:%x:%s", folder, hash, name)
|
||||
|
||||
case db.KeyTypeDeviceStatistic:
|
||||
ele.key = fmt.Sprintf("DEVICESTATS:%s", key[1:])
|
||||
|
||||
case db.KeyTypeFolderStatistic:
|
||||
ele.key = fmt.Sprintf("FOLDERSTATS:%s", key[1:])
|
||||
|
||||
case db.KeyTypeVirtualMtime:
|
||||
ele.key = fmt.Sprintf("MTIME:%s", key[1:])
|
||||
|
||||
case db.KeyTypeFolderIdx:
|
||||
id := binary.BigEndian.Uint32(key[1:])
|
||||
ele.key = fmt.Sprintf("FOLDERIDX:%d", id)
|
||||
|
||||
case db.KeyTypeDeviceIdx:
|
||||
id := binary.BigEndian.Uint32(key[1:])
|
||||
ele.key = fmt.Sprintf("DEVICEIDX:%d", id)
|
||||
|
||||
default:
|
||||
ele.key = fmt.Sprintf("UNKNOWN:%x", key)
|
||||
}
|
||||
ele.size = len(it.Value())
|
||||
elems = append(elems, ele)
|
||||
}
|
||||
|
||||
sort.Slice(elems, func(i, j int) bool {
|
||||
return elems[i].size > elems[j].size
|
||||
})
|
||||
for _, ele := range elems {
|
||||
fmt.Println(ele.key, ele.size)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,355 +0,0 @@
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package cli
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
type fileInfoKey struct {
|
||||
folder uint32
|
||||
device uint32
|
||||
name string
|
||||
}
|
||||
|
||||
type globalKey struct {
|
||||
folder uint32
|
||||
name string
|
||||
}
|
||||
|
||||
type sequenceKey struct {
|
||||
folder uint32
|
||||
sequence uint64
|
||||
}
|
||||
|
||||
func indexCheck() (err error) {
|
||||
ldb, err := getDB()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
folders := make(map[uint32]string)
|
||||
devices := make(map[uint32]string)
|
||||
deviceToIDs := make(map[string]uint32)
|
||||
fileInfos := make(map[fileInfoKey]protocol.FileInfo)
|
||||
globals := make(map[globalKey]db.VersionList)
|
||||
sequences := make(map[sequenceKey]string)
|
||||
needs := make(map[globalKey]struct{})
|
||||
blocklists := make(map[string]struct{})
|
||||
versions := make(map[string]protocol.Vector)
|
||||
usedBlocklists := make(map[string]struct{})
|
||||
usedVersions := make(map[string]struct{})
|
||||
var localDeviceKey uint32
|
||||
success := true
|
||||
defer func() {
|
||||
if err == nil {
|
||||
if success {
|
||||
fmt.Println("Index check completed successfully.")
|
||||
} else {
|
||||
err = errors.New("Inconsistencies found in the index")
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
it, err := ldb.NewPrefixIterator(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for it.Next() {
|
||||
key := it.Key()
|
||||
switch key[0] {
|
||||
case db.KeyTypeDevice:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
device := binary.BigEndian.Uint32(key[1+4:])
|
||||
name := nulString(key[1+4+4:])
|
||||
|
||||
var f protocol.FileInfo
|
||||
err := f.Unmarshal(it.Value())
|
||||
if err != nil {
|
||||
fmt.Println("Unable to unmarshal FileInfo:", err)
|
||||
success = false
|
||||
continue
|
||||
}
|
||||
|
||||
fileInfos[fileInfoKey{folder, device, name}] = f
|
||||
|
||||
case db.KeyTypeGlobal:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
name := nulString(key[1+4:])
|
||||
var flv db.VersionList
|
||||
if err := flv.Unmarshal(it.Value()); err != nil {
|
||||
fmt.Println("Unable to unmarshal VersionList:", err)
|
||||
success = false
|
||||
continue
|
||||
}
|
||||
globals[globalKey{folder, name}] = flv
|
||||
|
||||
case db.KeyTypeFolderIdx:
|
||||
key := binary.BigEndian.Uint32(it.Key()[1:])
|
||||
folders[key] = string(it.Value())
|
||||
|
||||
case db.KeyTypeDeviceIdx:
|
||||
key := binary.BigEndian.Uint32(it.Key()[1:])
|
||||
devices[key] = string(it.Value())
|
||||
deviceToIDs[string(it.Value())] = key
|
||||
if bytes.Equal(it.Value(), protocol.LocalDeviceID[:]) {
|
||||
localDeviceKey = key
|
||||
}
|
||||
|
||||
case db.KeyTypeSequence:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
seq := binary.BigEndian.Uint64(key[5:])
|
||||
val := it.Value()
|
||||
sequences[sequenceKey{folder, seq}] = string(val[9:])
|
||||
|
||||
case db.KeyTypeNeed:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
name := nulString(key[1+4:])
|
||||
needs[globalKey{folder, name}] = struct{}{}
|
||||
|
||||
case db.KeyTypeBlockList:
|
||||
hash := string(key[1:])
|
||||
blocklists[hash] = struct{}{}
|
||||
|
||||
case db.KeyTypeVersion:
|
||||
hash := string(key[1:])
|
||||
var v protocol.Vector
|
||||
if err := v.Unmarshal(it.Value()); err != nil {
|
||||
fmt.Println("Unable to unmarshal Vector:", err)
|
||||
success = false
|
||||
continue
|
||||
}
|
||||
versions[hash] = v
|
||||
}
|
||||
}
|
||||
|
||||
if localDeviceKey == 0 {
|
||||
fmt.Println("Missing key for local device in device index (bailing out)")
|
||||
success = false
|
||||
return
|
||||
}
|
||||
|
||||
var missingSeq []sequenceKey
|
||||
for fk, fi := range fileInfos {
|
||||
if fk.name != fi.Name {
|
||||
fmt.Printf("Mismatching FileInfo name, %q (key) != %q (actual)\n", fk.name, fi.Name)
|
||||
success = false
|
||||
}
|
||||
|
||||
folder := folders[fk.folder]
|
||||
if folder == "" {
|
||||
fmt.Printf("Unknown folder ID %d for FileInfo %q\n", fk.folder, fk.name)
|
||||
success = false
|
||||
continue
|
||||
}
|
||||
if devices[fk.device] == "" {
|
||||
fmt.Printf("Unknown device ID %d for FileInfo %q, folder %q\n", fk.folder, fk.name, folder)
|
||||
success = false
|
||||
}
|
||||
|
||||
if fk.device == localDeviceKey {
|
||||
sk := sequenceKey{fk.folder, uint64(fi.Sequence)}
|
||||
name, ok := sequences[sk]
|
||||
if !ok {
|
||||
fmt.Printf("Sequence entry missing for FileInfo %q, folder %q, seq %d\n", fi.Name, folder, fi.Sequence)
|
||||
missingSeq = append(missingSeq, sk)
|
||||
success = false
|
||||
continue
|
||||
}
|
||||
if name != fi.Name {
|
||||
fmt.Printf("Sequence entry refers to wrong name, %q (seq) != %q (FileInfo), folder %q, seq %d\n", name, fi.Name, folder, fi.Sequence)
|
||||
success = false
|
||||
}
|
||||
}
|
||||
|
||||
if len(fi.Blocks) == 0 && len(fi.BlocksHash) != 0 {
|
||||
key := string(fi.BlocksHash)
|
||||
if _, ok := blocklists[key]; !ok {
|
||||
fmt.Printf("Missing block list for file %q, block list hash %x\n", fi.Name, fi.BlocksHash)
|
||||
success = false
|
||||
} else {
|
||||
usedBlocklists[key] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
if fi.VersionHash != nil {
|
||||
key := string(fi.VersionHash)
|
||||
if _, ok := versions[key]; !ok {
|
||||
fmt.Printf("Missing version vector for file %q, version hash %x\n", fi.Name, fi.VersionHash)
|
||||
success = false
|
||||
} else {
|
||||
usedVersions[key] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
_, ok := globals[globalKey{fk.folder, fk.name}]
|
||||
if !ok {
|
||||
fmt.Printf("Missing global for file %q\n", fi.Name)
|
||||
success = false
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Aggregate the ranges of missing sequence entries, print them
|
||||
|
||||
sort.Slice(missingSeq, func(a, b int) bool {
|
||||
if missingSeq[a].folder != missingSeq[b].folder {
|
||||
return missingSeq[a].folder < missingSeq[b].folder
|
||||
}
|
||||
return missingSeq[a].sequence < missingSeq[b].sequence
|
||||
})
|
||||
|
||||
var folder uint32
|
||||
var startSeq, prevSeq uint64
|
||||
for _, sk := range missingSeq {
|
||||
if folder != sk.folder || sk.sequence != prevSeq+1 {
|
||||
if folder != 0 {
|
||||
fmt.Printf("Folder %d missing %d sequence entries: #%d - #%d\n", folder, prevSeq-startSeq+1, startSeq, prevSeq)
|
||||
}
|
||||
startSeq = sk.sequence
|
||||
folder = sk.folder
|
||||
}
|
||||
prevSeq = sk.sequence
|
||||
}
|
||||
if folder != 0 {
|
||||
fmt.Printf("Folder %d missing %d sequence entries: #%d - #%d\n", folder, prevSeq-startSeq+1, startSeq, prevSeq)
|
||||
}
|
||||
|
||||
for gk, vl := range globals {
|
||||
folder := folders[gk.folder]
|
||||
if folder == "" {
|
||||
fmt.Printf("Unknown folder ID %d for VersionList %q\n", gk.folder, gk.name)
|
||||
success = false
|
||||
}
|
||||
checkGlobal := func(i int, device []byte, version protocol.Vector, invalid, deleted bool) {
|
||||
dev, ok := deviceToIDs[string(device)]
|
||||
if !ok {
|
||||
fmt.Printf("VersionList %q, folder %q refers to unknown device %q\n", gk.name, folder, device)
|
||||
success = false
|
||||
}
|
||||
fi, ok := fileInfos[fileInfoKey{gk.folder, dev, gk.name}]
|
||||
if !ok {
|
||||
fmt.Printf("VersionList %q, folder %q, entry %d refers to unknown FileInfo\n", gk.name, folder, i)
|
||||
success = false
|
||||
}
|
||||
|
||||
fiv := fi.Version
|
||||
if fi.VersionHash != nil {
|
||||
fiv = versions[string(fi.VersionHash)]
|
||||
}
|
||||
if !fiv.Equal(version) {
|
||||
fmt.Printf("VersionList %q, folder %q, entry %d, FileInfo version mismatch, %v (VersionList) != %v (FileInfo)\n", gk.name, folder, i, version, fi.Version)
|
||||
success = false
|
||||
}
|
||||
if fi.IsInvalid() != invalid {
|
||||
fmt.Printf("VersionList %q, folder %q, entry %d, FileInfo invalid mismatch, %v (VersionList) != %v (FileInfo)\n", gk.name, folder, i, invalid, fi.IsInvalid())
|
||||
success = false
|
||||
}
|
||||
if fi.IsDeleted() != deleted {
|
||||
fmt.Printf("VersionList %q, folder %q, entry %d, FileInfo deleted mismatch, %v (VersionList) != %v (FileInfo)\n", gk.name, folder, i, deleted, fi.IsDeleted())
|
||||
success = false
|
||||
}
|
||||
}
|
||||
for i, fv := range vl.RawVersions {
|
||||
for _, device := range fv.Devices {
|
||||
checkGlobal(i, device, fv.Version, false, fv.Deleted)
|
||||
}
|
||||
for _, device := range fv.InvalidDevices {
|
||||
checkGlobal(i, device, fv.Version, true, fv.Deleted)
|
||||
}
|
||||
}
|
||||
|
||||
// If we need this file we should have a need entry for it. False
|
||||
// positives from needsLocally for deleted files, where we might
|
||||
// legitimately lack an entry if we never had it, and ignored files.
|
||||
if needsLocally(vl) {
|
||||
_, ok := needs[gk]
|
||||
if !ok {
|
||||
fv, _ := vl.GetGlobal()
|
||||
devB, _ := fv.FirstDevice()
|
||||
dev := deviceToIDs[string(devB)]
|
||||
fi := fileInfos[fileInfoKey{gk.folder, dev, gk.name}]
|
||||
if !fi.IsDeleted() && !fi.IsIgnored() {
|
||||
fmt.Printf("Missing need entry for needed file %q, folder %q\n", gk.name, folder)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
seenSeq := make(map[fileInfoKey]uint64)
|
||||
for sk, name := range sequences {
|
||||
folder := folders[sk.folder]
|
||||
if folder == "" {
|
||||
fmt.Printf("Unknown folder ID %d for sequence entry %d, %q\n", sk.folder, sk.sequence, name)
|
||||
success = false
|
||||
continue
|
||||
}
|
||||
|
||||
if prev, ok := seenSeq[fileInfoKey{folder: sk.folder, name: name}]; ok {
|
||||
fmt.Printf("Duplicate sequence entry for %q, folder %q, seq %d (prev %d)\n", name, folder, sk.sequence, prev)
|
||||
success = false
|
||||
}
|
||||
seenSeq[fileInfoKey{folder: sk.folder, name: name}] = sk.sequence
|
||||
|
||||
fi, ok := fileInfos[fileInfoKey{sk.folder, localDeviceKey, name}]
|
||||
if !ok {
|
||||
fmt.Printf("Missing FileInfo for sequence entry %d, folder %q, %q\n", sk.sequence, folder, name)
|
||||
success = false
|
||||
continue
|
||||
}
|
||||
if fi.Sequence != int64(sk.sequence) {
|
||||
fmt.Printf("Sequence mismatch for %q, folder %q, %d (key) != %d (FileInfo)\n", name, folder, sk.sequence, fi.Sequence)
|
||||
success = false
|
||||
}
|
||||
}
|
||||
|
||||
for nk := range needs {
|
||||
folder := folders[nk.folder]
|
||||
if folder == "" {
|
||||
fmt.Printf("Unknown folder ID %d for need entry %q\n", nk.folder, nk.name)
|
||||
success = false
|
||||
continue
|
||||
}
|
||||
|
||||
vl, ok := globals[nk]
|
||||
if !ok {
|
||||
fmt.Printf("Missing global for need entry %q, folder %q\n", nk.name, folder)
|
||||
success = false
|
||||
continue
|
||||
}
|
||||
|
||||
if !needsLocally(vl) {
|
||||
fmt.Printf("Need entry for file we don't need, %q, folder %q\n", nk.name, folder)
|
||||
success = false
|
||||
}
|
||||
}
|
||||
|
||||
if d := len(blocklists) - len(usedBlocklists); d > 0 {
|
||||
fmt.Printf("%d block list entries out of %d needs GC\n", d, len(blocklists))
|
||||
}
|
||||
if d := len(versions) - len(usedVersions); d > 0 {
|
||||
fmt.Printf("%d version entries out of %d needs GC\n", d, len(versions))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func needsLocally(vl db.VersionList) bool {
|
||||
gfv, gok := vl.GetGlobal()
|
||||
if !gok { // That's weird, but we hardly need something non-existent
|
||||
return false
|
||||
}
|
||||
fv, ok := vl.Get(protocol.LocalDeviceID[:])
|
||||
return db.Need(gfv, ok, fv.Version)
|
||||
}
|
||||
@@ -12,17 +12,14 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/alecthomas/kong"
|
||||
"github.com/flynn-archive/go-shlex"
|
||||
"github.com/kballard/go-shellquote"
|
||||
|
||||
"github.com/syncthing/syncthing/cmd/syncthing/cmdutil"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
)
|
||||
|
||||
type CLI struct {
|
||||
cmdutil.CommonOptions
|
||||
DataDir string `name:"data" placeholder:"PATH" env:"STDATADIR" help:"Set data directory (database and logs)"`
|
||||
GUIAddress string `name:"gui-address"`
|
||||
GUIAPIKey string `name:"gui-apikey"`
|
||||
GUIAddress string `name:"gui-address" env:"STGUIADDRESS"`
|
||||
GUIAPIKey string `name:"gui-apikey" env:"STGUIAPIKEY"`
|
||||
|
||||
Show showCommand `cmd:"" help:"Show command group"`
|
||||
Debug debugCommand `cmd:"" help:"Debug command group"`
|
||||
@@ -37,11 +34,6 @@ type Context struct {
|
||||
}
|
||||
|
||||
func (cli CLI) AfterApply(kongCtx *kong.Context) error {
|
||||
err := cmdutil.SetConfigDataLocationsFromFlags(cli.HomeDir, cli.ConfDir, cli.DataDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("command line options: %w", err)
|
||||
}
|
||||
|
||||
clientFactory := &apiClientFactory{
|
||||
cfg: config.GUIConfiguration{
|
||||
RawAddress: cli.GUIAddress,
|
||||
@@ -67,7 +59,7 @@ func (*stdinCommand) Run() error {
|
||||
fmt.Println("Reading commands from stdin...", args)
|
||||
scanner := bufio.NewScanner(os.Stdin)
|
||||
for scanner.Scan() {
|
||||
input, err := shlex.Split(scanner.Text())
|
||||
input, err := shellquote.Split(scanner.Text())
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing input: %w", err)
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/alecthomas/kong"
|
||||
@@ -63,7 +64,7 @@ func (f *folderOverrideCommand) Run(ctx Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if response.StatusCode != 200 {
|
||||
if response.StatusCode != http.StatusOK {
|
||||
errStr := fmt.Sprint("Failed to override changes\nStatus code: ", response.StatusCode)
|
||||
bytes, err := responseToBArray(response)
|
||||
if err != nil {
|
||||
|
||||
@@ -17,8 +17,6 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/db/backend"
|
||||
"github.com/syncthing/syncthing/lib/locations"
|
||||
)
|
||||
|
||||
func responseToBArray(response *http.Response) ([]byte, error) {
|
||||
@@ -133,19 +131,6 @@ func prettyPrintResponse(response *http.Response) error {
|
||||
return prettyPrintJSON(data)
|
||||
}
|
||||
|
||||
func getDB() (backend.Backend, error) {
|
||||
return backend.OpenLevelDBRO(locations.Get(locations.Database))
|
||||
}
|
||||
|
||||
func nulString(bs []byte) string {
|
||||
for i := range bs {
|
||||
if bs[i] == 0 {
|
||||
return string(bs[:i])
|
||||
}
|
||||
}
|
||||
return string(bs)
|
||||
}
|
||||
|
||||
func normalizePath(path string) string {
|
||||
return filepath.ToSlash(filepath.Clean(path))
|
||||
}
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
// Copyright (C) 2021 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package cmdutil
|
||||
|
||||
// CommonOptions are reused among several subcommands
|
||||
type CommonOptions struct {
|
||||
buildCommonOptions
|
||||
ConfDir string `name:"config" placeholder:"PATH" env:"STCONFDIR" help:"Set configuration directory (config and keys)"`
|
||||
HomeDir string `name:"home" placeholder:"PATH" env:"STHOMEDIR" help:"Set configuration and data directory"`
|
||||
NoDefaultFolder bool `env:"STNODEFAULTFOLDER" help:"Don't create the \"default\" folder on first startup"`
|
||||
SkipPortProbing bool `help:"Don't try to find free ports for GUI and listen addresses on first startup"`
|
||||
}
|
||||
@@ -1,35 +0,0 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package cmdutil
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/locations"
|
||||
)
|
||||
|
||||
func SetConfigDataLocationsFromFlags(homeDir, confDir, dataDir string) error {
|
||||
homeSet := homeDir != ""
|
||||
confSet := confDir != ""
|
||||
dataSet := dataDir != ""
|
||||
switch {
|
||||
case dataSet != confSet:
|
||||
return errors.New("either both or none of --config and --data must be given, use --home to set both at once")
|
||||
case homeSet && dataSet:
|
||||
return errors.New("--home must not be used together with --config and --data")
|
||||
case homeSet:
|
||||
confDir = homeDir
|
||||
dataDir = homeDir
|
||||
fallthrough
|
||||
case dataSet:
|
||||
if err := locations.SetBaseDir(locations.ConfigBaseDir, confDir); err != nil {
|
||||
return err
|
||||
}
|
||||
return locations.SetBaseDir(locations.DataBaseDir, dataDir)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -9,15 +9,17 @@ package main
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
"github.com/syncthing/syncthing/internal/slogutil"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -29,16 +31,18 @@ const (
|
||||
// directory to the crash reporting server as urlBase. Uploads are attempted
|
||||
// with the newest log first.
|
||||
//
|
||||
// This can can block for a long time. The context can set a final deadline
|
||||
// This can block for a long time. The context can set a final deadline
|
||||
// for this.
|
||||
func uploadPanicLogs(ctx context.Context, urlBase, dir string) {
|
||||
files, err := filepath.Glob(filepath.Join(dir, "panic-*.log"))
|
||||
if err != nil {
|
||||
l.Warnln("Failed to list panic logs:", err)
|
||||
slog.ErrorContext(ctx, "Failed to list panic logs", slogutil.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
sort.Sort(sort.Reverse(sort.StringSlice(files)))
|
||||
slices.SortFunc(files, func(a, b string) int {
|
||||
return strings.Compare(b, a)
|
||||
})
|
||||
for _, file := range files {
|
||||
if strings.Contains(file, ".reported.") {
|
||||
// We've already sent this file. It'll be cleaned out at some
|
||||
@@ -47,7 +51,7 @@ func uploadPanicLogs(ctx context.Context, urlBase, dir string) {
|
||||
}
|
||||
|
||||
if err := uploadPanicLog(ctx, urlBase, file); err != nil {
|
||||
l.Warnln("Reporting crash:", err)
|
||||
slog.ErrorContext(ctx, "Reporting crash", slogutil.Error(err))
|
||||
} else {
|
||||
// Rename the log so we don't have to try to report it again. This
|
||||
// succeeds, or it does not. There is no point complaining about it.
|
||||
@@ -70,7 +74,7 @@ func uploadPanicLog(ctx context.Context, urlBase, file string) error {
|
||||
data = filterLogLines(data)
|
||||
|
||||
hash := fmt.Sprintf("%x", sha256.Sum256(data))
|
||||
l.Infof("Reporting crash found in %s (report ID %s) ...\n", filepath.Base(file), hash[:8])
|
||||
slog.InfoContext(ctx, "Reporting crash", slogutil.FilePath(filepath.Base(file)), slog.String("id", hash[:8]))
|
||||
|
||||
url := fmt.Sprintf("%s/%s", urlBase, hash)
|
||||
headReq, err := http.NewRequest(http.MethodHead, url, nil)
|
||||
|
||||
@@ -6,10 +6,6 @@
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/syncthing/syncthing/lib/logger"
|
||||
)
|
||||
import "github.com/syncthing/syncthing/internal/slogutil"
|
||||
|
||||
var (
|
||||
l = logger.DefaultLogger.NewFacility("main", "Main package")
|
||||
)
|
||||
func init() { slogutil.RegisterPackage("Main package") }
|
||||
|
||||
@@ -17,6 +17,9 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/gen/bep"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/fs"
|
||||
"github.com/syncthing/syncthing/lib/osutil"
|
||||
@@ -235,7 +238,7 @@ func (c *CLI) decryptFile(encFi *protocol.FileInfo, plainFi *protocol.FileInfo,
|
||||
}
|
||||
|
||||
// Verify the hash against the plaintext block info
|
||||
if !scanner.Validate(dec, plainBlock.Hash, 0) {
|
||||
if !scanner.Validate(dec, plainBlock.Hash) {
|
||||
// The block decrypted correctly but fails the hash check. This
|
||||
// is odd and unexpected, but it it's still a valid block from
|
||||
// the source. The file might have changed while we pulled it?
|
||||
@@ -280,10 +283,11 @@ func loadEncryptedFileInfo(fd fs.File) (*protocol.FileInfo, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var encFi protocol.FileInfo
|
||||
if err := encFi.Unmarshal(trailer); err != nil {
|
||||
var encFi bep.FileInfo
|
||||
if err := proto.Unmarshal(trailer, &encFi); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fi := protocol.FileInfoFromWire(&encFi)
|
||||
|
||||
return &encFi, nil
|
||||
return &fi, nil
|
||||
}
|
||||
|
||||
@@ -11,42 +11,25 @@ import (
|
||||
"bufio"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
|
||||
"github.com/syncthing/syncthing/cmd/syncthing/cmdutil"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
"github.com/syncthing/syncthing/lib/fs"
|
||||
"github.com/syncthing/syncthing/lib/locations"
|
||||
"github.com/syncthing/syncthing/lib/logger"
|
||||
"github.com/syncthing/syncthing/lib/osutil"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/syncthing"
|
||||
)
|
||||
|
||||
type CLI struct {
|
||||
cmdutil.CommonOptions
|
||||
GUIUser string `placeholder:"STRING" help:"Specify new GUI authentication user name"`
|
||||
GUIPassword string `placeholder:"STRING" help:"Specify new GUI authentication password (use - to read from standard input)"`
|
||||
GUIUser string `placeholder:"STRING" help:"Specify new GUI authentication user name"`
|
||||
GUIPassword string `placeholder:"STRING" help:"Specify new GUI authentication password (use - to read from standard input)"`
|
||||
NoPortProbing bool `help:"Don't try to find free ports for GUI and listen addresses on first startup" env:"STNOPORTPROBING"`
|
||||
}
|
||||
|
||||
func (c *CLI) Run(l logger.Logger) error {
|
||||
if c.HideConsole {
|
||||
osutil.HideConsole()
|
||||
}
|
||||
|
||||
if c.HomeDir != "" {
|
||||
if c.ConfDir != "" {
|
||||
return errors.New("--home must not be used together with --config")
|
||||
}
|
||||
c.ConfDir = c.HomeDir
|
||||
}
|
||||
if c.ConfDir == "" {
|
||||
c.ConfDir = locations.GetBaseDir(locations.ConfigBaseDir)
|
||||
}
|
||||
|
||||
func (c *CLI) Run() error {
|
||||
// Support reading the password from a pipe or similar
|
||||
if c.GUIPassword == "-" {
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
@@ -57,13 +40,13 @@ func (c *CLI) Run(l logger.Logger) error {
|
||||
c.GUIPassword = string(password)
|
||||
}
|
||||
|
||||
if err := Generate(l, c.ConfDir, c.GUIUser, c.GUIPassword, c.NoDefaultFolder, c.SkipPortProbing); err != nil {
|
||||
if err := Generate(locations.GetBaseDir(locations.ConfigBaseDir), c.GUIUser, c.GUIPassword, c.NoPortProbing); err != nil {
|
||||
return fmt.Errorf("failed to generate config and keys: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Generate(l logger.Logger, confDir, guiUser, guiPassword string, noDefaultFolder, skipPortProbing bool) error {
|
||||
func Generate(confDir, guiUser, guiPassword string, skipPortProbing bool) error {
|
||||
dir, err := fs.ExpandTilde(confDir)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -78,20 +61,21 @@ func Generate(l logger.Logger, confDir, guiUser, guiPassword string, noDefaultFo
|
||||
certFile, keyFile := locations.Get(locations.CertFile), locations.Get(locations.KeyFile)
|
||||
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||
if err == nil {
|
||||
l.Warnln("Key exists; will not overwrite.")
|
||||
slog.Warn("Key exists; will not overwrite")
|
||||
} else {
|
||||
cert, err = syncthing.GenerateCertificate(certFile, keyFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create certificate: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
myID = protocol.NewDeviceID(cert.Certificate[0])
|
||||
l.Infoln("Device ID:", myID)
|
||||
slog.Info("Calculated device ID", slog.String("device", myID.String()))
|
||||
|
||||
cfgFile := locations.Get(locations.ConfigFile)
|
||||
cfg, _, err := config.Load(cfgFile, myID, events.NoopLogger)
|
||||
if fs.IsNotExist(err) {
|
||||
if cfg, err = syncthing.DefaultConfig(cfgFile, myID, events.NoopLogger, noDefaultFolder, skipPortProbing); err != nil {
|
||||
if cfg, err = syncthing.DefaultConfig(cfgFile, myID, events.NoopLogger, skipPortProbing); err != nil {
|
||||
return fmt.Errorf("create config: %w", err)
|
||||
}
|
||||
} else if err != nil {
|
||||
@@ -104,7 +88,7 @@ func Generate(l logger.Logger, confDir, guiUser, guiPassword string, noDefaultFo
|
||||
|
||||
var updateErr error
|
||||
waiter, err := cfg.Modify(func(cfg *config.Configuration) {
|
||||
updateErr = updateGUIAuthentication(l, &cfg.GUI, guiUser, guiPassword)
|
||||
updateErr = updateGUIAuthentication(&cfg.GUI, guiUser, guiPassword)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("modify config: %w", err)
|
||||
@@ -120,17 +104,17 @@ func Generate(l logger.Logger, confDir, guiUser, guiPassword string, noDefaultFo
|
||||
return nil
|
||||
}
|
||||
|
||||
func updateGUIAuthentication(l logger.Logger, guiCfg *config.GUIConfiguration, guiUser, guiPassword string) error {
|
||||
func updateGUIAuthentication(guiCfg *config.GUIConfiguration, guiUser, guiPassword string) error {
|
||||
if guiUser != "" && guiCfg.User != guiUser {
|
||||
guiCfg.User = guiUser
|
||||
l.Infoln("Updated GUI authentication user name:", guiUser)
|
||||
slog.Info("Updated GUI authentication user", "name", guiUser)
|
||||
}
|
||||
|
||||
if guiPassword != "" && guiCfg.Password != guiPassword {
|
||||
if err := guiCfg.SetPassword(guiPassword); err != nil {
|
||||
return fmt.Errorf("failed to set GUI authentication password: %w", err)
|
||||
}
|
||||
l.Infoln("Updated GUI authentication password.")
|
||||
slog.Info("Updated GUI authentication password")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -8,18 +8,21 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/internal/slogutil"
|
||||
)
|
||||
|
||||
func startHeapProfiler() {
|
||||
l.Debugln("Starting heap profiling")
|
||||
slog.Debug("Starting heap profiling")
|
||||
go func() {
|
||||
err := saveHeapProfiles(1) // Only returns on error
|
||||
l.Warnln("Heap profiler failed:", err)
|
||||
slog.Error("Heap profiler failed", slogutil.Error(err))
|
||||
panic("Heap profiler failed")
|
||||
}()
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user