mirror of
https://github.com/syncthing/syncthing.git
synced 2025-12-24 06:28:10 -05:00
Compare commits
1125 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
45a711570e | ||
|
|
6d27cf6563 | ||
|
|
0cc77feabb | ||
|
|
d19b12d3fe | ||
|
|
1d406d62e3 | ||
|
|
1d99e5277a | ||
|
|
879f51b027 | ||
|
|
d3d7408b17 | ||
|
|
9b01e64c66 | ||
|
|
65c172cd8d | ||
|
|
85e6a77f25 | ||
|
|
88244b0c1f | ||
|
|
cd290d2d05 | ||
|
|
bee7cce081 | ||
|
|
f15a1528fc | ||
|
|
6be6de4b4a | ||
|
|
6755a9ca63 | ||
|
|
98a1adebe1 | ||
|
|
31569debeb | ||
|
|
cf420e135e | ||
|
|
3b5dff3f34 | ||
|
|
56cdf2f2d9 | ||
|
|
b1dbe925d4 | ||
|
|
bbdda059bd | ||
|
|
72f26c1e45 | ||
|
|
72194d137c | ||
|
|
8b5bd45a29 | ||
|
|
9084510e1b | ||
|
|
c4f161d8c5 | ||
|
|
ad2d3702ae | ||
|
|
95acb26249 | ||
|
|
4736cccda1 | ||
|
|
1a06ab68eb | ||
|
|
b8907b49f9 | ||
|
|
7b33294955 | ||
|
|
031684116b | ||
|
|
a0c9db1d09 | ||
|
|
aa4b918224 | ||
|
|
7043b1fbba | ||
|
|
9d6b663d1c | ||
|
|
7dc4ac6e1f | ||
|
|
7bad9b3a11 | ||
|
|
6b570ee8dc | ||
|
|
6408a116f9 | ||
|
|
67b8ef1f3e | ||
|
|
999d4a0e23 | ||
|
|
96bb1c8e29 | ||
|
|
8fb576ed54 | ||
|
|
5e31e6356f | ||
|
|
1b5a61e03e | ||
|
|
755e689627 | ||
|
|
3f5c9b578c | ||
|
|
a2a14c8424 | ||
|
|
cff7a091f5 | ||
|
|
757d9a5333 | ||
|
|
2c88e473cb | ||
|
|
875377981d | ||
|
|
52d80d8144 | ||
|
|
fd2e91c82d | ||
|
|
c744a75cdd | ||
|
|
35b699dc77 | ||
|
|
7127c13f18 | ||
|
|
dab29287da | ||
|
|
c0b5a70ce3 | ||
|
|
7bcdc5b08e | ||
|
|
db0ba2555a | ||
|
|
1398fbb681 | ||
|
|
f653f540f8 | ||
|
|
078923bd1a | ||
|
|
80a83b605c | ||
|
|
28b6e8b063 | ||
|
|
f7b2e79fdc | ||
|
|
c0b3de2680 | ||
|
|
9a9bcff3e9 | ||
|
|
88482b29ee | ||
|
|
22dff7207c | ||
|
|
0104e78589 | ||
|
|
80894948f6 | ||
|
|
e945e65b13 | ||
|
|
ebd2e5bf30 | ||
|
|
60c07b259e | ||
|
|
fe50f1a158 | ||
|
|
5851aabe02 | ||
|
|
0832285d79 | ||
|
|
c2ea9d119d | ||
|
|
534f07d9ca | ||
|
|
24d4290d03 | ||
|
|
09b872cef4 | ||
|
|
2d124e053c | ||
|
|
90b70c7a16 | ||
|
|
e910acdc17 | ||
|
|
96350d7600 | ||
|
|
77a5980747 | ||
|
|
b677464dfa | ||
|
|
b1c74860e8 | ||
|
|
f6f696c6c5 | ||
|
|
cf40ed6cec | ||
|
|
6fa02d5081 | ||
|
|
86e35f1879 | ||
|
|
720a6bf62e | ||
|
|
4a619e74f2 | ||
|
|
58ef5368f8 | ||
|
|
7b37d453f9 | ||
|
|
edf2399ce6 | ||
|
|
d43b0a4395 | ||
|
|
f2efd08e0f | ||
|
|
61b9f7bd55 | ||
|
|
1475c0344a | ||
|
|
77cc87dfca | ||
|
|
bc7dd02e2b | ||
|
|
8a06cf0973 | ||
|
|
05835ed81f | ||
|
|
df522576ac | ||
|
|
d681ac11fe | ||
|
|
7d5f7d508d | ||
|
|
1d182e4631 | ||
|
|
710f5c199f | ||
|
|
fd847d4efe | ||
|
|
15e51fc045 | ||
|
|
c1c976aa2b | ||
|
|
159d1a68e1 | ||
|
|
dd850f66bb | ||
|
|
d0c3697152 | ||
|
|
669bcb748f | ||
|
|
4e22a96602 | ||
|
|
a992559abc | ||
|
|
46e72d76b5 | ||
|
|
7a4c88d4e4 | ||
|
|
35f40e9a58 | ||
|
|
5de9b677c2 | ||
|
|
6f08162376 | ||
|
|
7b3d9a8dca | ||
|
|
942659fb06 | ||
|
|
15c262184b | ||
|
|
484fa0592e | ||
|
|
b5b54ff057 | ||
|
|
4d3432af3e | ||
|
|
1cb55904bc | ||
|
|
2b622d0774 | ||
|
|
1894123d3c | ||
|
|
e7e177a6fa | ||
|
|
eed1edcca0 | ||
|
|
0025e9ccfb | ||
|
|
82b70b9fae | ||
|
|
def4b8cee5 | ||
|
|
f1a7dd766e | ||
|
|
20c8dbd9ed | ||
|
|
4b3f9b1af9 | ||
|
|
3446d50201 | ||
|
|
9fef1552fc | ||
|
|
85318f3b82 | ||
|
|
485acda63b | ||
|
|
05e9e0bfa9 | ||
|
|
ba056578ec | ||
|
|
d0ab65a178 | ||
|
|
4cba433852 | ||
|
|
863fe23347 | ||
|
|
43b6ac9501 | ||
|
|
1cf352a722 | ||
|
|
b58f6ca886 | ||
|
|
5cbc9089fd | ||
|
|
20eab36a33 | ||
|
|
2b4df6b874 | ||
|
|
3c7e7e971d | ||
|
|
afde0727fe | ||
|
|
bf744ded31 | ||
|
|
0d86166890 | ||
|
|
cea5962417 | ||
|
|
02752af862 | ||
|
|
6b1d7ac727 | ||
|
|
abd363e8bb | ||
|
|
bff1a5f5e4 | ||
|
|
38302270d4 | ||
|
|
1b4fe39a89 | ||
|
|
6b74cdc613 | ||
|
|
13a746e0fb | ||
|
|
21f50e2f8f | ||
|
|
b7c70a9817 | ||
|
|
42ce6be9b9 | ||
|
|
93e57bd357 | ||
|
|
eb4fe808c5 | ||
|
|
1054ce9354 | ||
|
|
97ad575b1f | ||
|
|
ee746263fb | ||
|
|
41ff4b323e | ||
|
|
997bb5e7e1 | ||
|
|
ca2fa8de4e | ||
|
|
64185484b2 | ||
|
|
5541697d18 | ||
|
|
e39d3f95dd | ||
|
|
6e8aa0ec25 | ||
|
|
97057eb9de | ||
|
|
6664e01acf | ||
|
|
e2a647a6a4 | ||
|
|
5ce5b2c94a | ||
|
|
e714df013f | ||
|
|
d8fa61e27c | ||
|
|
d3f583c8c9 | ||
|
|
e096a14ff5 | ||
|
|
3775a64d5c | ||
|
|
129df0613b | ||
|
|
486230768e | ||
|
|
9e6db72535 | ||
|
|
d91da8feee | ||
|
|
64518b0f7e | ||
|
|
5d35b2c540 | ||
|
|
224775ca81 | ||
|
|
98cda96b1c | ||
|
|
5b306510a0 | ||
|
|
f593ac387c | ||
|
|
eb8df7f632 | ||
|
|
78d6eee74a | ||
|
|
1b2b970f32 | ||
|
|
5ffbb7668d | ||
|
|
1df8701c46 | ||
|
|
cc36621b11 | ||
|
|
441ea109a1 | ||
|
|
8015f3f937 | ||
|
|
2c866277a2 | ||
|
|
1da9317a09 | ||
|
|
e16a65bacb | ||
|
|
c02aed0a21 | ||
|
|
e4956358fb | ||
|
|
ad44d77a21 | ||
|
|
6bb5d140fa | ||
|
|
d082b2ba4c | ||
|
|
638789899c | ||
|
|
dfbbb286fc | ||
|
|
fbd445fe0a | ||
|
|
2b246eeb52 | ||
|
|
2558b021e5 | ||
|
|
31be810eb6 | ||
|
|
62a6d619e7 | ||
|
|
a04fcfe749 | ||
|
|
283f39ae5f | ||
|
|
59e1349499 | ||
|
|
b45d77b6be | ||
|
|
79e67b7f79 | ||
|
|
36a4a9fd34 | ||
|
|
92ed31fe21 | ||
|
|
5fa8467756 | ||
|
|
5954b105cd | ||
|
|
defc5dca65 | ||
|
|
9f358ecae0 | ||
|
|
fe4daf242b | ||
|
|
ec7c88ca55 | ||
|
|
26e6d94c00 | ||
|
|
a54d7a32d1 | ||
|
|
e2470c8bc8 | ||
|
|
19b51c9b92 | ||
|
|
0ca1f26ff8 | ||
|
|
2984d40641 | ||
|
|
5da41f75fa | ||
|
|
32dec4a00d | ||
|
|
04b927104f | ||
|
|
110806842c | ||
|
|
d9b3415dec | ||
|
|
d3d43d90f6 | ||
|
|
e7d11adf3c | ||
|
|
afeb606b5b | ||
|
|
c6a179fa4d | ||
|
|
e302ccf4b4 | ||
|
|
926e9228ed | ||
|
|
86e72d9973 | ||
|
|
952c8becf5 | ||
|
|
32ee8d783d | ||
|
|
3bea59b0d9 | ||
|
|
8a4b65b937 | ||
|
|
fca895a632 | ||
|
|
79360e2205 | ||
|
|
9b2a73f9ab | ||
|
|
c305265c62 | ||
|
|
1954239ffa | ||
|
|
ca823bd591 | ||
|
|
eabd972667 | ||
|
|
395e524e2d | ||
|
|
48b1a2b264 | ||
|
|
58953d799c | ||
|
|
f0f8bf7784 | ||
|
|
bf3834e367 | ||
|
|
8d1eff7e41 | ||
|
|
0cff66fcbc | ||
|
|
d23e8be39f | ||
|
|
43a5be1c4b | ||
|
|
b50039a920 | ||
|
|
d4e81fff8a | ||
|
|
3a557a43cd | ||
|
|
bc53782f88 | ||
|
|
e4ab9d3312 | ||
|
|
e31a116e6e | ||
|
|
24f41e169a | ||
|
|
675f289aef | ||
|
|
e7ae851900 | ||
|
|
73fa7a6e5b | ||
|
|
1a6d023ba8 | ||
|
|
9207535028 | ||
|
|
24967e99a7 | ||
|
|
50d8c43e7c | ||
|
|
90d9b2de2b | ||
|
|
04f05f102d | ||
|
|
289a02e994 | ||
|
|
84fe285659 | ||
|
|
445637ebec | ||
|
|
3f3d2c814b | ||
|
|
189e44488e | ||
|
|
27ff20faa3 | ||
|
|
b1564e53e4 | ||
|
|
3a75b63776 | ||
|
|
8e238c8e48 | ||
|
|
3d5af675db | ||
|
|
9da3273eb8 | ||
|
|
bd37f6da17 | ||
|
|
6940d79f5b | ||
|
|
0f80318ef6 | ||
|
|
d6622b1f68 | ||
|
|
43bcb3d5a5 | ||
|
|
e2e8f6e940 | ||
|
|
88b0ce892d | ||
|
|
55cd4b3d9b | ||
|
|
f24676ba5a | ||
|
|
722b3fce6a | ||
|
|
8a05492622 | ||
|
|
63c4e7f6d0 | ||
|
|
f0f79a3e3e | ||
|
|
fafd30f804 | ||
|
|
ad5a046843 | ||
|
|
4b8853bfde | ||
|
|
648fcf2c45 | ||
|
|
ca3ae64bbf | ||
|
|
e2204d0071 | ||
|
|
d5ff2c41dc | ||
|
|
7a40c42e8b | ||
|
|
905c3594b0 | ||
|
|
5fd333e4f7 | ||
|
|
225c0dda80 | ||
|
|
5fd2cab102 | ||
|
|
4299af1c63 | ||
|
|
d85ef949be | ||
|
|
dc929946fe | ||
|
|
7bac927ac8 | ||
|
|
93b4597d1a | ||
|
|
0928628e7b | ||
|
|
c5a79bdfe6 | ||
|
|
04fdafa280 | ||
|
|
cb49136269 | ||
|
|
2f415d8f09 | ||
|
|
b076031bfa | ||
|
|
e538797ce1 | ||
|
|
5df8219bcb | ||
|
|
af4fb97538 | ||
|
|
5d9d87f770 | ||
|
|
c75cfcfd06 | ||
|
|
6cdd1c5158 | ||
|
|
41d037da1f | ||
|
|
82afe73a9a | ||
|
|
6452e16f15 | ||
|
|
ca47b4c218 | ||
|
|
c2ddc83509 | ||
|
|
9fd270d78e | ||
|
|
0b2cabbc31 | ||
|
|
df5c1eaf01 | ||
|
|
2111386ee4 | ||
|
|
583172dc8d | ||
|
|
1529563332 | ||
|
|
5605877625 | ||
|
|
7236d56731 | ||
|
|
47d68a0aec | ||
|
|
657be162dd | ||
|
|
8815ef922b | ||
|
|
79d109a386 | ||
|
|
75dcff0a0e | ||
|
|
0e07f6bef4 | ||
|
|
a45ba70467 | ||
|
|
42bd42df5a | ||
|
|
6421693cce | ||
|
|
a371b15398 | ||
|
|
29e4b417f2 | ||
|
|
00fa77dd47 | ||
|
|
df4d754197 | ||
|
|
f3d735c56a | ||
|
|
1d99db9bc6 | ||
|
|
96bd691f55 | ||
|
|
22e133cce6 | ||
|
|
a80c0fdae8 | ||
|
|
1f87b874af | ||
|
|
1e69997ecd | ||
|
|
76af0cf07b | ||
|
|
c2cc1dadea | ||
|
|
f4bf18c1b4 | ||
|
|
b01edca420 | ||
|
|
c87411851d | ||
|
|
51f65bd23a | ||
|
|
801e9b57eb | ||
|
|
93ae9a1c2e | ||
|
|
d4f0544d9e | ||
|
|
0b03b6a9ec | ||
|
|
24ffd8be99 | ||
|
|
d924bd7bd9 | ||
|
|
1e71b00936 | ||
|
|
f3630a69f1 | ||
|
|
5503175854 | ||
|
|
ad30192dca | ||
|
|
158559023e | ||
|
|
04070b4848 | ||
|
|
597cee67d3 | ||
|
|
78ccedfeec | ||
|
|
69fe471dfa | ||
|
|
47e08797cb | ||
|
|
48dab8f201 | ||
|
|
643cfe2e98 | ||
|
|
8bb9878f26 | ||
|
|
9d075781ad | ||
|
|
2ad40734f8 | ||
|
|
952ab7db1c | ||
|
|
84ca86f095 | ||
|
|
0ac6ea6f1e | ||
|
|
41469c5393 | ||
|
|
4783294a09 | ||
|
|
c8123bda28 | ||
|
|
99c9d65ddf | ||
|
|
fc81e2b3d7 | ||
|
|
c3b3e02f21 | ||
|
|
2626143fc5 | ||
|
|
ae0dfcd7ca | ||
|
|
58f3e56729 | ||
|
|
944ddcf768 | ||
|
|
3cc8918eb4 | ||
|
|
c40c9a8d6a | ||
|
|
abb3fb8a31 | ||
|
|
fc860df514 | ||
|
|
c934918347 | ||
|
|
c98a34a5d4 | ||
|
|
bdcffe703b | ||
|
|
a09079ed25 | ||
|
|
1b59960ff9 | ||
|
|
f04d054b5a | ||
|
|
132789785d | ||
|
|
002de7b6a0 | ||
|
|
9e725026d1 | ||
|
|
da39dfada3 | ||
|
|
ff2cde469e | ||
|
|
0fe4c01a28 | ||
|
|
351e855464 | ||
|
|
7203ccb73d | ||
|
|
e89c4c053a | ||
|
|
0391c57a37 | ||
|
|
2f9840ddae | ||
|
|
513d3bc374 | ||
|
|
c0a26c918a | ||
|
|
d1704d5304 | ||
|
|
d3d0161fb9 | ||
|
|
db8777c29e | ||
|
|
ba4554f053 | ||
|
|
33bed5b1ec | ||
|
|
4f27bdfc27 | ||
|
|
9212303906 | ||
|
|
603da2dce2 | ||
|
|
d510e3cca3 | ||
|
|
f51514d0e7 | ||
|
|
e67be59c5f | ||
|
|
add12b43aa | ||
|
|
aec91d8f32 | ||
|
|
53b0f36be6 | ||
|
|
830bde2c83 | ||
|
|
be1744a481 | ||
|
|
01ade9c8ae | ||
|
|
b1acc37c16 | ||
|
|
64a591610b | ||
|
|
9a07b22d4a | ||
|
|
406bedf1e3 | ||
|
|
7f55fbbe84 | ||
|
|
75f9ea623c | ||
|
|
9745679c63 | ||
|
|
8519a24ba6 | ||
|
|
c0be9987d0 | ||
|
|
c1f1fd71fe | ||
|
|
3c657d1749 | ||
|
|
53f80fdf73 | ||
|
|
6325ae070c | ||
|
|
089c283ca6 | ||
|
|
8d7ea0424d | ||
|
|
f12d5771dc | ||
|
|
7b0c49a1b6 | ||
|
|
0690fe7585 | ||
|
|
3bc918ff78 | ||
|
|
3e50edf46f | ||
|
|
1b10607def | ||
|
|
9d7a811e72 | ||
|
|
4f7d43a6dd | ||
|
|
83675e7a1d | ||
|
|
34fee05a1d | ||
|
|
d10773c311 | ||
|
|
caa2356409 | ||
|
|
523ac45456 | ||
|
|
b50d57b7fd | ||
|
|
666314a9d9 | ||
|
|
8e645ab782 | ||
|
|
dfc1101307 | ||
|
|
f12ca95af2 | ||
|
|
f528923d1e | ||
|
|
c9d6366d75 | ||
|
|
714a47ffb0 | ||
|
|
86a22b8086 | ||
|
|
a4d27282ae | ||
|
|
2e425c4386 | ||
|
|
d27463268d | ||
|
|
3d74ff97af | ||
|
|
675846ac1e | ||
|
|
c2b0d309fb | ||
|
|
cb0950b3fd | ||
|
|
03d0f0dc34 | ||
|
|
91c3218a0c | ||
|
|
2ced65b9e7 | ||
|
|
03821d8bd3 | ||
|
|
92405ad1a6 | ||
|
|
5a69e85e80 | ||
|
|
4f034a01ed | ||
|
|
d7bc0659e4 | ||
|
|
4f4781d254 | ||
|
|
6a87aac84f | ||
|
|
797a999585 | ||
|
|
272fb3b444 | ||
|
|
60eb9088ff | ||
|
|
c8652222ef | ||
|
|
84494edab4 | ||
|
|
b19885fdb3 | ||
|
|
e39dafb584 | ||
|
|
710dba7f84 | ||
|
|
a57fa9cfab | ||
|
|
49d5eae66a | ||
|
|
d9d2cf74a0 | ||
|
|
1b1741de64 | ||
|
|
50ba0fd079 | ||
|
|
60a6a40175 | ||
|
|
323195be0e | ||
|
|
ae76f1e999 | ||
|
|
0a29fa65ab | ||
|
|
37cd5a0bec | ||
|
|
db0c85318b | ||
|
|
9e00b619ab | ||
|
|
8aa2d8d92c | ||
|
|
22d8a379c7 | ||
|
|
09aff7bb14 | ||
|
|
b068c8f346 | ||
|
|
c2ff49ed43 | ||
|
|
4cb6bb6f64 | ||
|
|
b80da29b23 | ||
|
|
836ca50570 | ||
|
|
e384c822b6 | ||
|
|
916182bc73 | ||
|
|
c62ce007ea | ||
|
|
aec66045ef | ||
|
|
03c0537340 | ||
|
|
7dde6c7e3c | ||
|
|
cb0f4ce55a | ||
|
|
d86d5e8bb2 | ||
|
|
d2c68fd163 | ||
|
|
a02db70a63 | ||
|
|
165417c462 | ||
|
|
ff3cbdc90d | ||
|
|
9028969617 | ||
|
|
f6da436f4b | ||
|
|
166a33037f | ||
|
|
7c0798b622 | ||
|
|
ee42c46bd3 | ||
|
|
885f6fcf28 | ||
|
|
ada5ab74d2 | ||
|
|
0bc3ae15ae | ||
|
|
dc57bab107 | ||
|
|
48795dba07 | ||
|
|
c55c0c8c28 | ||
|
|
2c9d96375b | ||
|
|
e20679afe1 | ||
|
|
b37c05c6b8 | ||
|
|
dfe4008607 | ||
|
|
54d610878d | ||
|
|
229b777f30 | ||
|
|
7812c2c937 | ||
|
|
500e02cdbb | ||
|
|
82c9e23206 | ||
|
|
705b7d18e8 | ||
|
|
f4bde023aa | ||
|
|
af48b069cc | ||
|
|
5cb4a9acf6 | ||
|
|
adc5bf6604 | ||
|
|
24d307531d | ||
|
|
93fdd1c012 | ||
|
|
5161f03f02 | ||
|
|
682ffcb8ed | ||
|
|
524ffe3fa5 | ||
|
|
d8366e4a88 | ||
|
|
b83c5b32bf | ||
|
|
3102e36a45 | ||
|
|
3d8344003e | ||
|
|
a4415bce10 | ||
|
|
9f87fd1fcf | ||
|
|
5592b8b190 | ||
|
|
f822b10550 | ||
|
|
1a6c7587c2 | ||
|
|
6b82538e62 | ||
|
|
3f17bda786 | ||
|
|
e10d7260c2 | ||
|
|
409cb2beb8 | ||
|
|
742ab17a2a | ||
|
|
9f254df091 | ||
|
|
290dcf0610 | ||
|
|
0f0290d574 | ||
|
|
5bb72dfe5d | ||
|
|
0b73a66516 | ||
|
|
0631fd2440 | ||
|
|
1e2e8650e1 | ||
|
|
21035b0c1a | ||
|
|
c884955ff7 | ||
|
|
b91ff430db | ||
|
|
b09c50bf9c | ||
|
|
7f0603effa | ||
|
|
ff441d3b3e | ||
|
|
950f4a8672 | ||
|
|
95eb81467c | ||
|
|
55ed883648 | ||
|
|
0c2cebb775 | ||
|
|
645588902b | ||
|
|
881e923105 | ||
|
|
ef5ca0c218 | ||
|
|
406b394704 | ||
|
|
7b0d8c2e77 | ||
|
|
b1b68ceedb | ||
|
|
6df3940c26 | ||
|
|
238476bfcd | ||
|
|
d69a9d52a9 | ||
|
|
b3007c03bd | ||
|
|
c2784d76e4 | ||
|
|
8ff7ceeddc | ||
|
|
84c8964865 | ||
|
|
7d3f94911f | ||
|
|
54e17c8bf4 | ||
|
|
793b86e604 | ||
|
|
7b47692ec0 | ||
|
|
35a75a95dc | ||
|
|
c7c7fbe9d9 | ||
|
|
9e0e04f4de | ||
|
|
1e2732aa21 | ||
|
|
b7234785f8 | ||
|
|
30056cd1ae | ||
|
|
4781e1d86f | ||
|
|
a467f6908c | ||
|
|
9fc20b41c6 | ||
|
|
e2c44f519c | ||
|
|
53a029a796 | ||
|
|
02da7414ab | ||
|
|
d1f953b0dd | ||
|
|
47cefb2ab2 | ||
|
|
4de8920642 | ||
|
|
76f9e5c5db | ||
|
|
27d675a793 | ||
|
|
dd9e1d61eb | ||
|
|
b670b5550a | ||
|
|
ee6516aa31 | ||
|
|
8b15624f7d | ||
|
|
5baa432906 | ||
|
|
d3a02a1663 | ||
|
|
e187a5f5cb | ||
|
|
e73631c5f0 | ||
|
|
b87e5ed13d | ||
|
|
c51365c634 | ||
|
|
d60f0e734c | ||
|
|
a83176c77a | ||
|
|
eb31be0432 | ||
|
|
07bf24a3b4 | ||
|
|
ef1633ac76 | ||
|
|
9f305f674a | ||
|
|
9d2b744c12 | ||
|
|
91f1f3067a | ||
|
|
119335930c | ||
|
|
370a3549e7 | ||
|
|
d64d954721 | ||
|
|
81f9a81c7d | ||
|
|
127c891526 | ||
|
|
c08024ebb8 | ||
|
|
677fde50b3 | ||
|
|
eabce48761 | ||
|
|
d59aecba31 | ||
|
|
22b0bd8e13 | ||
|
|
9260248543 | ||
|
|
c4a348db67 | ||
|
|
20aa53486a | ||
|
|
ed4807d9a4 | ||
|
|
7d07b19734 | ||
|
|
a7e30c925f | ||
|
|
0c81fa4191 | ||
|
|
9e696a154b | ||
|
|
344e52e311 | ||
|
|
a2f51c85c2 | ||
|
|
00f4900ba7 | ||
|
|
e125f8b05b | ||
|
|
fb198a0645 | ||
|
|
506181599c | ||
|
|
15d0bdcba9 | ||
|
|
b6b0d0664d | ||
|
|
baa3aa4bad | ||
|
|
a48a31e3f5 | ||
|
|
2343c82c33 | ||
|
|
616883304e | ||
|
|
1cb09b7d2d | ||
|
|
3b5e1fa0fc | ||
|
|
a94aceb22f | ||
|
|
f5d8243f15 | ||
|
|
c9c2e69f49 | ||
|
|
ef0dcea6a4 | ||
|
|
678c80ffe4 | ||
|
|
0f1d0380dc | ||
|
|
58bc722a1f | ||
|
|
53dc346583 | ||
|
|
c2f498fc82 | ||
|
|
a548014755 | ||
|
|
2c18640386 | ||
|
|
78094fa0cb | ||
|
|
f6458d1b8f | ||
|
|
56cf2db68b | ||
|
|
a1b5a3d5c0 | ||
|
|
4d3b5348ae | ||
|
|
3d02fcd473 | ||
|
|
25bf406f0e | ||
|
|
071910b255 | ||
|
|
bed6fb8baf | ||
|
|
d903bf79c5 | ||
|
|
49cfe57edc | ||
|
|
1f70f7e37c | ||
|
|
eca076cf7d | ||
|
|
dbcf7a02a0 | ||
|
|
7be53bbb88 | ||
|
|
17e3608865 | ||
|
|
19c7cd99f5 | ||
|
|
01aef75c96 | ||
|
|
8f6d587ecb | ||
|
|
3ef19411f9 | ||
|
|
ea43e089d4 | ||
|
|
5fa9237a62 | ||
|
|
fa367e92b0 | ||
|
|
4072ae4d05 | ||
|
|
e9c6795ef8 | ||
|
|
afb27f7f02 | ||
|
|
cf4d7ff50f | ||
|
|
29e7e54bb4 | ||
|
|
6982c06261 | ||
|
|
26d87ec3bb | ||
|
|
c51591b308 | ||
|
|
8b052a950d | ||
|
|
1ff5fc9620 | ||
|
|
4aaf8d4ceb | ||
|
|
720e8dedbc | ||
|
|
8ac11f19bd | ||
|
|
bea6ecaf35 | ||
|
|
69f2c26d50 | ||
|
|
59802c3981 | ||
|
|
bdbaa84989 | ||
|
|
8208bfa2b9 | ||
|
|
c49d864f14 | ||
|
|
2621c6fd2f | ||
|
|
3c920c61e9 | ||
|
|
a5e40563de | ||
|
|
a557d62c4a | ||
|
|
d6bb8e6e06 | ||
|
|
69d05a3637 | ||
|
|
da3b38ccce | ||
|
|
e7dc2f9190 | ||
|
|
ac2e9b0e09 | ||
|
|
6cb4ac9ec2 | ||
|
|
862eec34db | ||
|
|
7da829a86f | ||
|
|
81bd428b25 | ||
|
|
ae74ac8329 | ||
|
|
5520022766 | ||
|
|
7872bfa173 | ||
|
|
bea3c01772 | ||
|
|
55a7830ff9 | ||
|
|
470ef87dd5 | ||
|
|
b31bad1c4d | ||
|
|
8b4346c3ec | ||
|
|
2bdb37d412 | ||
|
|
1471c15b29 | ||
|
|
4b1782cf6d | ||
|
|
71fab4d250 | ||
|
|
22ebc80329 | ||
|
|
74b820f287 | ||
|
|
3949b750f5 | ||
|
|
a26778fa9b | ||
|
|
d4b7be009c | ||
|
|
c126831108 | ||
|
|
2751be57dc | ||
|
|
8df90bb475 | ||
|
|
36251b86f7 | ||
|
|
42cc64e2ed | ||
|
|
158859a1e2 | ||
|
|
5822222c74 | ||
|
|
e99be02055 | ||
|
|
1901a5a9f4 | ||
|
|
a27032f09e | ||
|
|
5e041dca9f | ||
|
|
c9ec6159e8 | ||
|
|
5b17aae1b2 | ||
|
|
5931231a32 | ||
|
|
d7dc37b3c4 | ||
|
|
6802505dda | ||
|
|
7a92f6c6b1 | ||
|
|
68c1b2dd47 | ||
|
|
b57c9b6af5 | ||
|
|
c120c3a403 | ||
|
|
2bbd2d6ed1 | ||
|
|
4955297bf6 | ||
|
|
6d3f9d5154 | ||
|
|
b97d5bcca8 | ||
|
|
97068c10f3 | ||
|
|
8cdab7231a | ||
|
|
bc7639b0ff | ||
|
|
3f4f6d5787 | ||
|
|
c17547159e | ||
|
|
8a3e584c19 | ||
|
|
043b04d8a6 | ||
|
|
f87f13081b | ||
|
|
649d4cf7b0 | ||
|
|
c7cf361a96 | ||
|
|
98f2875b22 | ||
|
|
2fdf9bc55d | ||
|
|
c0ab669142 | ||
|
|
14a5561e43 | ||
|
|
0fe3ae7c22 | ||
|
|
5d0eb80204 | ||
|
|
441230ff77 | ||
|
|
a0514bb1a7 | ||
|
|
80079e8322 | ||
|
|
ae760798e1 | ||
|
|
364f61bda6 | ||
|
|
050f9f8091 | ||
|
|
e0931e201e | ||
|
|
20e05cdcd0 | ||
|
|
4afe0f407a | ||
|
|
232c1172e5 | ||
|
|
a505231774 | ||
|
|
885e3f19bd | ||
|
|
93b5180e62 | ||
|
|
27d5b17096 | ||
|
|
c2119c9e62 | ||
|
|
91210cbb49 | ||
|
|
8e9c9b9553 | ||
|
|
fae2ca8458 | ||
|
|
3af4164c8b | ||
|
|
a1761795fe | ||
|
|
e147db5233 | ||
|
|
582539a1e6 | ||
|
|
5cfb9783b3 | ||
|
|
8de21be274 | ||
|
|
c554ffccc9 | ||
|
|
838c182b5b | ||
|
|
fcc6a677a5 | ||
|
|
bd63fd73b1 | ||
|
|
fecb21cdb1 | ||
|
|
89a021609b | ||
|
|
bbc178ccc4 | ||
|
|
f1c73999be | ||
|
|
916ec63af6 | ||
|
|
341b9691a7 | ||
|
|
6e0f64017a | ||
|
|
14df5211af | ||
|
|
5611173366 | ||
|
|
1afe8ab736 | ||
|
|
1c8803402e | ||
|
|
db03562d43 | ||
|
|
d87287c0d0 | ||
|
|
992bb0a98c | ||
|
|
e6551c8485 | ||
|
|
53f4dfe83c | ||
|
|
9410634c2b | ||
|
|
b0e2050cdb | ||
|
|
c7f136c2b8 | ||
|
|
a9f0659f2f | ||
|
|
9988044bbe | ||
|
|
c24bf7ea55 | ||
|
|
1296a22069 | ||
|
|
72172d853c | ||
|
|
5a05d9b867 | ||
|
|
841205dbfe | ||
|
|
c58b383b6d | ||
|
|
2547a29dd7 | ||
|
|
8fa2b7765a | ||
|
|
c7522063b3 | ||
|
|
d1d967f0cf | ||
|
|
8c91ced784 | ||
|
|
a4147d9019 | ||
|
|
673bd5fcc0 | ||
|
|
24c721cb5d | ||
|
|
230fb083fc | ||
|
|
509ae5e2d9 | ||
|
|
136b3f25f6 | ||
|
|
57eb1710e9 | ||
|
|
ece1defb2f | ||
|
|
8fd2937a58 | ||
|
|
cce634f340 | ||
|
|
47429d01e8 | ||
|
|
445c4edeca | ||
|
|
c005b8dcb0 | ||
|
|
b9ed6c4c2c | ||
|
|
3153e36a3d | ||
|
|
60bceb0f09 | ||
|
|
257c3f5e82 | ||
|
|
7d0723da68 | ||
|
|
205426a9c6 | ||
|
|
bd12e38b56 | ||
|
|
95a65bf0d0 | ||
|
|
429b3a0429 | ||
|
|
cc14563b62 | ||
|
|
99b00b6a5e | ||
|
|
b2af8f135b | ||
|
|
67c39b2512 | ||
|
|
d62820acf9 | ||
|
|
ce29d3a574 | ||
|
|
1e9769cdd7 | ||
|
|
6daa766fde | ||
|
|
ed95e80088 | ||
|
|
0dd7934405 | ||
|
|
8606b4dd8d | ||
|
|
4922b46fbd | ||
|
|
b99e92bad7 | ||
|
|
a17d953334 | ||
|
|
7817d092cb | ||
|
|
075a699aae | ||
|
|
44a542391e | ||
|
|
e589e6c19d | ||
|
|
0a50f374db | ||
|
|
4a58196959 | ||
|
|
4949721c0b | ||
|
|
0901350087 | ||
|
|
8078babf0a | ||
|
|
00ce889a8b | ||
|
|
7279644372 | ||
|
|
cd29e3c524 | ||
|
|
3312a29dde | ||
|
|
72d645865e | ||
|
|
9471b9f6af | ||
|
|
0518a92cdb | ||
|
|
6cf01c1d30 | ||
|
|
5f4ed66aa1 | ||
|
|
ee5d0dd43f | ||
|
|
7ebf58f1bc | ||
|
|
aecd7c64ce | ||
|
|
783dd612f7 | ||
|
|
4efff736b3 | ||
|
|
fa12a18190 | ||
|
|
2b65e1062e | ||
|
|
80031c59da | ||
|
|
6e35592e9e | ||
|
|
9e11d7b201 | ||
|
|
fd5f9f8968 | ||
|
|
d8a0a477ca | ||
|
|
6dd6ecde95 | ||
|
|
6e148e20cd | ||
|
|
72c5f2e5c6 | ||
|
|
d7d45d8092 | ||
|
|
57d5dfa80c | ||
|
|
c080f677cb | ||
|
|
725baf0971 | ||
|
|
ec4c3bae0d | ||
|
|
8d95c82d7c | ||
|
|
386cb274bd | ||
|
|
a9d422e008 | ||
|
|
75f64733f9 | ||
|
|
ad0a205116 | ||
|
|
40c952c0ae | ||
|
|
0ee1146e1c | ||
|
|
88180904f2 | ||
|
|
f6ea2a7f8e | ||
|
|
c5f90d64bc | ||
|
|
941c9f1531 | ||
|
|
62a4106a79 | ||
|
|
9c855ab22e | ||
|
|
166273b357 | ||
|
|
d304498027 | ||
|
|
7cbd92e1b1 | ||
|
|
9245d22f16 | ||
|
|
3f85f719de | ||
|
|
7ba05c18ee | ||
|
|
617bf173a4 | ||
|
|
5f8c0ca932 | ||
|
|
93a66deb7e | ||
|
|
997bed569e | ||
|
|
b999b58049 | ||
|
|
c3c6ff0093 | ||
|
|
2953fe40d1 | ||
|
|
4b69d0e093 | ||
|
|
ff0a83fe5b | ||
|
|
9c60bb8336 | ||
|
|
34903c4201 | ||
|
|
9ecf9a3f48 | ||
|
|
7ba9e7c322 | ||
|
|
dc42db444b | ||
|
|
a9c221189b | ||
|
|
b2966957e0 | ||
|
|
0d30166357 | ||
|
|
d65f1fb08a | ||
|
|
622b614f31 | ||
|
|
335b08e3a8 | ||
|
|
b1ade6d0c0 | ||
|
|
46becc5338 | ||
|
|
20fac4bb80 | ||
|
|
d84b6bb822 | ||
|
|
55b63941b8 | ||
|
|
c9bb3ba2e4 | ||
|
|
e70003737b | ||
|
|
f98c21b68e | ||
|
|
c704ba9ef9 | ||
|
|
dfad6a2aa9 | ||
|
|
fb7264a663 | ||
|
|
889814a1af | ||
|
|
694a7de59d | ||
|
|
b312f81e21 | ||
|
|
059185b325 | ||
|
|
f8eea13406 | ||
|
|
1e9e9cbebb | ||
|
|
cdbb32d0f0 | ||
|
|
becbb3b123 | ||
|
|
06da67e6fc | ||
|
|
2f08f8021f | ||
|
|
9d3f3847ed | ||
|
|
74c8d34805 | ||
|
|
5ec1490be0 | ||
|
|
2760d032ca | ||
|
|
813e6ddf83 | ||
|
|
6ffb95f6c8 | ||
|
|
0b5c11bf93 | ||
|
|
5aade9a4a5 | ||
|
|
9717c3d292 | ||
|
|
a365ae51c4 | ||
|
|
97222797a0 | ||
|
|
e588bb29b9 | ||
|
|
2dd9450793 | ||
|
|
3ee12464b4 | ||
|
|
59ebcea356 | ||
|
|
27d4896a13 | ||
|
|
1088eb12ea | ||
|
|
f40d219370 | ||
|
|
429cc20eb7 | ||
|
|
e85ce7c94e | ||
|
|
283c8d95e2 | ||
|
|
a9aa375109 | ||
|
|
f7d2c58783 | ||
|
|
4d3e0de4ba | ||
|
|
9dbc509996 | ||
|
|
baec3f909c | ||
|
|
c41aaad3bb | ||
|
|
9682bbfbda | ||
|
|
9e6a1fdcd4 | ||
|
|
49bddfbe53 | ||
|
|
55e0ac3e24 | ||
|
|
cbcc3ea132 | ||
|
|
ab132ff6fe | ||
|
|
19e52a10df | ||
|
|
a8145e187f | ||
|
|
e33fa10115 | ||
|
|
4b6e7e7867 | ||
|
|
70d121a94b | ||
|
|
33ffb07d31 | ||
|
|
7aaa92ac47 | ||
|
|
5883eb9a25 | ||
|
|
e60efa2b3d | ||
|
|
ddf6d64faa | ||
|
|
c7221b035d | ||
|
|
a69ba18f62 | ||
|
|
b31611a8d1 | ||
|
|
0ca0e3e9bd | ||
|
|
0a96a1150b | ||
|
|
b8c249cddc | ||
|
|
e8ba6d4771 | ||
|
|
606fce09ca | ||
|
|
3d8b4a42b7 | ||
|
|
ab8c2fb5c7 | ||
|
|
9c099f1337 | ||
|
|
f1e3b979a7 | ||
|
|
f8a84d4436 | ||
|
|
2b5a735091 | ||
|
|
dd175c5431 | ||
|
|
6e960f1972 | ||
|
|
f5b7e8addd | ||
|
|
82621f250c | ||
|
|
ed45c43033 | ||
|
|
13f9702a41 | ||
|
|
d884768344 | ||
|
|
1ded36fcff | ||
|
|
c8c4b090ef | ||
|
|
e55ff6cccc | ||
|
|
30d0e5d298 | ||
|
|
201ff39b80 | ||
|
|
ca9d3b597e | ||
|
|
0890b49e78 | ||
|
|
ca96b13233 | ||
|
|
525aac74aa | ||
|
|
cf227509bf | ||
|
|
cc4f32f259 | ||
|
|
6d9ed8ad92 | ||
|
|
3e1a562466 | ||
|
|
f3bf4cf722 | ||
|
|
e38fcbb566 | ||
|
|
ed240145c7 | ||
|
|
13f82a06ee | ||
|
|
349223b3b8 | ||
|
|
e5bea35515 | ||
|
|
dfbb245b60 | ||
|
|
cd5d546078 | ||
|
|
97a9ca24f3 | ||
|
|
9c51cf50ad | ||
|
|
5dab0e50aa | ||
|
|
48c40c87bc | ||
|
|
b37b19f344 | ||
|
|
0080ee6e86 | ||
|
|
caad69a312 | ||
|
|
518a4eb6ee | ||
|
|
113b110ab4 | ||
|
|
ec85c9fdfe | ||
|
|
d71a782179 | ||
|
|
d0aef07a38 | ||
|
|
8ad0a10c61 | ||
|
|
85bb725eb6 | ||
|
|
d8f9c096f3 | ||
|
|
e3e9f89861 | ||
|
|
2faecfa403 | ||
|
|
1589942fc2 |
5
.codecov.yml
Normal file
5
.codecov.yml
Normal file
@@ -0,0 +1,5 @@
|
||||
coverage:
|
||||
range: "40...100"
|
||||
|
||||
ignore:
|
||||
- "**.pb.go"
|
||||
2
.github/CODEOWNERS
vendored
Normal file
2
.github/CODEOWNERS
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
/AUTHORS @calmh
|
||||
/*.md @calmh
|
||||
42
.github/ISSUE_TEMPLATE.md
vendored
Normal file
42
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
### DO NOT REPORT SECURITY ISSUES IN THIS ISSUE TRACKER
|
||||
|
||||
Instead, contact security@syncthing.net directly - see
|
||||
https://syncthing.net/security.html for more information.
|
||||
|
||||
### DO NOT POST SUPPORT REQUESTS OR GENERAL QUESTIONS IN THIS ISSUE TRACKER
|
||||
|
||||
Please use the forum at https://forum.syncthing.net/ where a large number of
|
||||
helpful people hang out. This issue tracker is for reporting bugs or feature
|
||||
requests directly to the developers. Worst case you might get a short
|
||||
"that's a bug, please report it on GitHub" response on the forum, in which
|
||||
case we thank you for your patience and following our advice. :)
|
||||
|
||||
### Please use the correct issue tracker
|
||||
|
||||
If your problem relates to a Syncthing wrapper or [sub-project](https://github.com/syncthing) such as [Syncthing for Android](https://github.com/syncthing/syncthing-android/issues), [SyncTrayzor](https://github.com/canton7/synctrayzor) or the [documentation](https://github.com/syncthing/docs/issues), please use their respective issue trackers.
|
||||
|
||||
### Does your log mention database corruption?
|
||||
|
||||
If your Syncthing log reports panics because of database corruption it is most likely a fault with your system's storage or memory. Affected log entries will contain lines starting with `panic: leveldb`. You will need to delete the index database to clear this, by running `syncthing -reset-database`.
|
||||
|
||||
### Please do post actual bug reports and feature requests.
|
||||
|
||||
If your issue is a bug report, replace this boilerplate with a description
|
||||
of the problem, being sure to include at least:
|
||||
|
||||
- what happened,
|
||||
- what you expected to happen instead, and
|
||||
- any steps to reproduce the problem.
|
||||
|
||||
Also fill out the version information below and add log output or
|
||||
screenshots as appropriate.
|
||||
|
||||
If your issue is a feature request, simply replace this template text in
|
||||
its entirety.
|
||||
|
||||
### Version Information
|
||||
|
||||
Syncthing Version: v0.x.y
|
||||
OS Version: Windows 7 / Ubuntu 14.04 / ...
|
||||
Browser Version: (if applicable, for GUI issues)
|
||||
|
||||
@@ -20,13 +20,8 @@ If this is a user visible change (including API and protocol changes), add a lin
|
||||
to the corresponding pull request on https://github.com/syncthing/docs or describe
|
||||
the documentation changes necessary.
|
||||
|
||||
### Authorship
|
||||
## Authorship
|
||||
|
||||
Your name and email will be added automatically to the AUTHORS file
|
||||
based on the commit metadata.
|
||||
|
||||
Every author of a code contribution (Go, Javascript, HTML, CSS etc, with the
|
||||
possible exception of minor typo corrections and similar) is recorded in the
|
||||
AUTHORS and NICKS files and the in-GUI credits. If this is your first
|
||||
contribution, a maintainer will add you properly before accepting the
|
||||
contribution. You need not do so yourself or worry about the fact that the
|
||||
"authors" automated test fails. However, if your name (such as you want it
|
||||
presented in the credits) is not visible on your Github profile or in your
|
||||
commit messages, please assist by providing it here.
|
||||
48
.github/SECURITY.md
vendored
Normal file
48
.github/SECURITY.md
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
## Reporting a Vulnerability
|
||||
|
||||
If you believe that you've found a Syncthing-related security vulnerability, please report it by sending email to the address security@syncthing.net. The PGP key for security@syncthing.net (B683AD7B76CAB013) below can be used to send encrypted mail or to verify responses received from that address.
|
||||
|
||||
```
|
||||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
Version: GnuPG v1
|
||||
|
||||
mQENBFShFlIBCACsW346HYskhKhxrdZMjyU5Ntsjvg6/ogqINDPoL10/oaIP0G+t
|
||||
7zzC0K5Cq29ix43kNNLKTJNyPkdTeaJEcqslMUt6tovjHwoKJ073GP0W3KsNvBRg
|
||||
ffCZOAexGfOsBSL9KHaYGK67Py3TFgtN1H/EmboU1arrLfAMrmqOip++EGqOxjse
|
||||
gH0qk7Mk1TqEC5Xh3NGE7r1UobAlqdUv5E3v7U11NhAdP1zu/XZ/zvP5mwVQJMLv
|
||||
iZyeWGliNI8nEeRjYw+S85f4gq7H2mgoeNBN4WwwK1hhz9qpvCsgPW3XqlExTPI4
|
||||
1vM4PxpiFIuF0zuy2OwsmjrpTCZeBscr4Tj5ABEBAAG0K1N5bmN0aGluZyBTZWN1
|
||||
cml0eSA8c2VjdXJpdHlAc3luY3RoaW5nLm5ldD6JATgEEwECACIFAlShFlICGwMG
|
||||
CwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJELaDrXt2yrATB1UH/jnnIa6DekCA
|
||||
2V36N/2+pFSNLVWeoZQxZ9ne9S7WSubaoK4oCWiuChPSAy5hagnKnNA3a9wrz5iN
|
||||
0hgCA88NnTU18biZW6HX8xWEd3dnY3fX1sG0XdHuKlFria8ByfcrbShf/CttXkEd
|
||||
Y5qPHH9aLIMtksBS1MIsRYrOCHiLNYFCKlbjDDCGT3tuk/yaU7aBAFVPIag54afC
|
||||
zZAIvTIgRruLWkNT/sSEJx9ekhJzO/+pXNbSSHwhoj5OJh7sQjZWwPzNazelk48R
|
||||
+ku8VpB5Wxgk2MnJPf1RxF+7saBAaeAVZsJcPN+Jp7u9LCFelIRn1ISsHbhLhyqL
|
||||
cPXqllltLCKJAhwEEAECAAYFAlShFlsACgkQSfWuwLzlJMcEpA//VvZYHGZgZMM0
|
||||
bBkYnjManYnJkHSQ2pHsjquSFH+fbfq2FxxTk/nQ/IAvBzt8NDTT3ylPOmsl4A8q
|
||||
r8BxsRNENhU7zDQrKC5NtYUrzXhPGo8qfDwkeLyqd2msUvHn7EH3PNgiN2QaFWxE
|
||||
21FqoXIpERKJRgRtv1SYZoMyNGjmT2hta1ZbwEfrPJnzjYhneoUGsRApG7p+uPqe
|
||||
4LRpbG3Fa2vBm/UWUrOe+6jPzvMokjIJbdK0IjXamFAzwYW5fSZaFa94mR6L5f5m
|
||||
X8Dhp66mBRTx7qk6ldEqptvaYGqihaWP1xbDaApQsGHQujtcBYGp+edlkabuW5h7
|
||||
stl/7QTFkEPqHT4ybxEX0rLoFUGq56WUlKp27z40keStaXMgfrsxJtkz66Xs8vU4
|
||||
7qZcLAcPsin+y0toqavtwtM/L7uCMu1yhbFRGJ+JL6saJAqzwS8l7r6E2R7OnVj1
|
||||
BdASgxx1TgzW6ZFW5p1Iy6mtpkBypsnp8s3UcP3GFRnQe9gi1EXHjzuZAUGafe4Q
|
||||
juvJ8t8xIcQMFuAylNIVyXvIWJoehqsVY9EBgVtE4y1SRh8XTT3Tddn8ab5fl7uh
|
||||
HWAY8cRlv6WIOhK8w8oroiYx0SID8jMeEwJBS4DL7qWtJMkDo8ZEJiB+Pkd963MX
|
||||
05QXt33AvJJ9PmbGCHkcH7198tCmA+e5AQ0EVKEWUgEIAPGczHpa6NdxY0pm+tIp
|
||||
btiA6gdPE70pJYgJTKX7siCQ2w770H3CBSKmqEXadr7WnfIgUYIDaSxadeGzB/Mn
|
||||
3SHCYRCqbA7mwu2k4wNNvCEM0xZqFAvaDJ4avlZ5oiMT8IFHKsjC77nkhmfXaIGt
|
||||
hn10H2MFADjvJYul7vR+Ghg1wGhTGWo2u7VVj9BI+AfvnWaouFI0cx2KNWEI/Ocj
|
||||
z6jk8nmC3yOEFQECM/hF4lkAOv9CQUa8UcvAr31trzawmV1iSsKjmVZgqd0N4T8f
|
||||
hUikqUPZGNCRcqEUffTzggIyGPbedFnZw9Di7o1xByxyTrZycemAVqaVGF+9nFLG
|
||||
pccAEQEAAYkBHwQYAQIACQUCVKEWUgIbDAAKCRC2g617dsqwEzrjB/9q0T8A4XUE
|
||||
p0g6xq86jhmh4jlEedxrfXUL3R6ejFtuKMThulxEP0xiQ7xLzBhOnvyxLCsVbjp8
|
||||
0JtJTVCq44UzUiIBuVNRoYG29uXuTUL2UtI27VhjFxMzLDwZL97tbGR6lzdM/+U5
|
||||
9PC+PIvS0yz13z2t3/x3KUOnBgxZnpy9h4AdKwjrNVtnQdGDXSKlJBLb57TFcS94
|
||||
f3roZ5Gpw3AWYSmSSiZWbhks2UNzYSQ84LAKlV1NkktO+qRc/pUqr6IxMjOKc7XP
|
||||
e8u1Nst6fN3GNqZOV+jUYs/fqrJgp1TUWjNTuf22Rl0Idz7XLPJKYFh9W7T/4MbU
|
||||
M7Q8GZuww1rk
|
||||
=No/v
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
||||
```
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -17,3 +17,10 @@ RELEASE
|
||||
deb
|
||||
lib/auto/gui.files.go
|
||||
snapcraft.yaml
|
||||
prime/
|
||||
snap/
|
||||
parts/
|
||||
stage/
|
||||
*.snap
|
||||
*.bz2
|
||||
/repos
|
||||
|
||||
24
.golangci.yml
Normal file
24
.golangci.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
linters-settings:
|
||||
maligned:
|
||||
suggest-new: true
|
||||
|
||||
linters:
|
||||
enable-all: true
|
||||
disable:
|
||||
- goimports
|
||||
- depguard
|
||||
- lll
|
||||
- gochecknoinits
|
||||
- gochecknoglobals
|
||||
- gofmt
|
||||
- scopelint
|
||||
- gocyclo
|
||||
- funlen
|
||||
- wsl
|
||||
|
||||
service:
|
||||
golangci-lint-version: 1.21.x
|
||||
prepare:
|
||||
- rm -f go.sum # 1.12 -> 1.13 issues with QUIC-go
|
||||
- GO111MODULE=on go mod vendor
|
||||
- go run build.go assets
|
||||
104
AUTHORS
104
AUTHORS
@@ -1,39 +1,61 @@
|
||||
# This is the official list of Syncthing authors for copyright purposes.
|
||||
# The format is:
|
||||
#
|
||||
# THIS FILE IS MOSTLY AUTO GENERATED. IF YOU'VE MADE A COMMIT TO THE
|
||||
# REPOSITORY YOU WILL BE ADDED HERE AUTOMATICALLY WITHOUT THE NEED FOR
|
||||
# ANY MANUAL ACTION.
|
||||
#
|
||||
# That said, you are welcome to correct your name or add a nickname / GitHub
|
||||
# user name as appropriate. The format is:
|
||||
#
|
||||
# Name Name Name (nickname) <email1@example.com> <email2@example.com>
|
||||
#
|
||||
# The NICKS list is auto generated from this file.
|
||||
# The in-GUI authors list is periodically automatically updated from the
|
||||
# contents of this file.
|
||||
#
|
||||
|
||||
Aaron Bieber (qbit) <qbit@deftly.net>
|
||||
Adam Piggott (ProactiveServices) <aD@simplypeachy.co.uk> <simplypeachy@users.noreply.github.com> <ProactiveServices@users.noreply.github.com>
|
||||
Adam Piggott (ProactiveServices) <aD@simplypeachy.co.uk> <simplypeachy@users.noreply.github.com> <ProactiveServices@users.noreply.github.com> <adam@proactiveservices.co.uk>
|
||||
Adel Qalieh (adelq) <aqalieh95@gmail.com> <adelq@users.noreply.github.com>
|
||||
Alessandro G. (alessandro.g89) <alessandro.g89@gmail.com>
|
||||
Alexander Graf (alex2108) <register-github@alex-graf.de>
|
||||
Alexandre Viau (aviau) <alexandre@alexandreviau.net> <aviau@debian.org>
|
||||
Anderson Mesquita (andersonvom) <andersonvom@gmail.com>
|
||||
andresvia <andres.via@gmail.com>
|
||||
Andrew Dunham (andrew-d) <andrew@du.nham.ca>
|
||||
Andrey D (scienmind) <scintertech@cryptolab.net>
|
||||
Andrew Rabert (nvllsvm) <ar@nullsum.net> <6550543+nvllsvm@users.noreply.github.com>
|
||||
Andrey D (scienmind) <scintertech@cryptolab.net> <scienmind@users.noreply.github.com>
|
||||
André Colomb (acolomb) <src@andre.colomb.de> <github.com@andre.colomb.de>
|
||||
andyleap <andyleap@gmail.com>
|
||||
Antoine Lamielle (0x010C) <antoine.lamielle@0x010c.fr> <gh@0x010c.fr>
|
||||
Antony Male (canton7) <antony.male@gmail.com>
|
||||
Aranjedeath <Aranjedeath@users.noreply.github.com>
|
||||
Arthur Axel fREW Schmidt (frioux) <frew@afoolishmanifesto.com> <frioux@gmail.com>
|
||||
Audrius Butkevicius (AudriusButkevicius) <audrius.butkevicius@gmail.com>
|
||||
Audrius Butkevicius (AudriusButkevicius) <audrius.butkevicius@gmail.com> <github@audrius.rocks>
|
||||
BAHADIR YILMAZ <bahadiryilmaz32@gmail.com>
|
||||
Bart De Vries (mogwa1) <devriesb@gmail.com>
|
||||
Ben Curthoys (bencurthoys) <ben@bencurthoys.com>
|
||||
Ben Schulz (uok) <ueomkail@gmail.com> <uok@users.noreply.github.com>
|
||||
Ben Shepherd (benshep) <bjashepherd@gmail.com>
|
||||
Ben Sidhom (bsidhom) <bsidhom@gmail.com>
|
||||
Benedikt Heine (bebehei) <bebe@bebehei.de>
|
||||
Benedikt Morbach <benedikt.morbach@googlemail.com>
|
||||
Benno Fünfstück <benno.fuenfstueck@gmail.com>
|
||||
Benny Ng (tpng) <benny.tpng@gmail.com>
|
||||
Boris Rybalkin <ribalkin@gmail.com>
|
||||
Brandon Philips (philips) <brandon@ifup.org>
|
||||
Brendan Long (brendanlong) <self@brendanlong.com>
|
||||
Brian R. Becker (brbecker) <brbecker@gmail.com>
|
||||
Caleb Callaway (cqcallaw) <enlightened.despot@gmail.com>
|
||||
Carsten Hagemann (Moter8) <moter8@gmail.com>
|
||||
Cathryne Linenweaver (Cathryne) <cathryne.linenweaver@gmail.com> <Cathryne@users.noreply.github.com>
|
||||
Carsten Hagemann (carstenhag) <moter8@gmail.com> <carsten@chagemann.de>
|
||||
Cathryne Linenweaver (Cathryne) <cathryne.linenweaver@gmail.com> <Cathryne@users.noreply.github.com> <katrinleinweber@MAC.local>
|
||||
Cedric Staniewski (xduugu) <cedric@gmx.ca>
|
||||
Chris Howie (cdhowie) <me@chrishowie.com>
|
||||
Chris Joel (cdata) <chris@scriptolo.gy>
|
||||
Chris Tonkinson <chris@masterbran.ch>
|
||||
chucic <chucic@seznam.cz>
|
||||
Colin Kennedy (moshen) <moshen.colin@gmail.com>
|
||||
Cromefire_ <tim.l@nghorst.net>
|
||||
Dale Visser <dale.visser@live.com>
|
||||
Daniel Bergmann (brgmnn) <dan.arne.bergmann@gmail.com> <brgmnn@users.noreply.github.com>
|
||||
Daniel Harte (norgeous) <daniel@harte.me> <daniel@danielharte.co.uk> <norgeous@users.noreply.github.com>
|
||||
Daniel Martí (mvdan) <mvdan@mvdan.cc>
|
||||
@@ -41,35 +63,62 @@ Darshil Chanpura (dtchanpura) <dtchanpura@gmail.com> <dcprime314@gmail.com>
|
||||
David Rimmer (dinosore) <dinosore@dbrsoftware.co.uk>
|
||||
Denis A. (dva) <denisva@gmail.com>
|
||||
Dennis Wilson (snnd) <dw@risu.io>
|
||||
dependabot-preview[bot] <dependabot-preview[bot]@users.noreply.github.com>
|
||||
dependabot[bot] <dependabot[bot]@users.noreply.github.com>
|
||||
derekriemer <derek.riemer@colorado.edu>
|
||||
desbma <desbma@users.noreply.github.com>
|
||||
Dmitry Saveliev (dsaveliev) <d.e.saveliev@gmail.com>
|
||||
Dominik Heidler (asdil12) <dominik@heidler.eu>
|
||||
Elias Jarlebring (jarlebring) <jarlebring@gmail.com>
|
||||
Elliot Huffman <thelich2@gmail.com>
|
||||
Emil Hessman (ceh) <emil@hessman.se>
|
||||
Erik Meitner (WSGCSysadmin) <e.meitner@willystreet.coop>
|
||||
Evgeny Kuznetsov <evgeny@kuznetsov.md>
|
||||
Federico Castagnini (facastagnini) <federico.castagnini@gmail.com>
|
||||
Felix Ableitner (Nutomic) <me@nutomic.com>
|
||||
Felix Unterpaintner (bigbear2nd) <bigbear2nd@gmail.com>
|
||||
Francois-Xavier Gsell (zukoo) <fxgsell@gmail.com>
|
||||
Frank Isemann (fti7) <frank@isemann.name>
|
||||
georgespatton <georgespatton@users.noreply.github.com>
|
||||
Gilli Sigurdsson (gillisig) <gilli@vx.is>
|
||||
Graham Miln (grahammiln) <graham.miln@dssw.co.uk> <graham.miln@miln.eu>
|
||||
Han Boetes <han@boetes.org>
|
||||
Harrison Jones (harrisonhjones) <harrisonhjones@users.noreply.github.com>
|
||||
Heiko Zuerker (Smiley73) <heiko@zuerker.org>
|
||||
Hugo Locurcio <hugo.locurcio@hugo.pro>
|
||||
Iain Barnett <iainspeed@gmail.com>
|
||||
Ian Johnson (anonymouse64) <ian.johnson@canonical.com> <person.uwsome@gmail.com>
|
||||
Iskander Sharipov (Alex) <quasilyte@gmail.com>
|
||||
Jaakko Hannikainen (jgke) <jgke@jgke.fi>
|
||||
Jacek Szafarkiewicz (hadogenes) <szafar@linux.pl>
|
||||
Jake Peterson (acogdev) <jake@acogdev.com>
|
||||
Jakob Borg (calmh) <jakob@nym.se> <jakob@kastelo.net>
|
||||
James Patterson (jpjp) <jamespatterson@operamail.com> <jpjp@users.noreply.github.com>
|
||||
janost <janost@tuta.io>
|
||||
Jaroslav Malec (dzarda) <dzardacz@gmail.com>
|
||||
jaseg <githubaccount@jaseg.net>
|
||||
Jaya Chithra (jayachithra) <s.k.jayachithra@gmail.com>
|
||||
Jens Diemer (jedie) <github.com@jensdiemer.de> <git@jensdiemer.de>
|
||||
Jerry Jacobs (xor-gate) <jerry.jacobs@xor-gate.org> <xor-gate@users.noreply.github.com>
|
||||
Jochen Voss (seehuhn) <voss@seehuhn.de>
|
||||
Johan Andersson <j@i19.se>
|
||||
Johan Vromans (sciurius) <jvromans@squirrel.nl>
|
||||
John Rinehart (fuzzybear3965) <johnrichardrinehart@gmail.com>
|
||||
Jonas Thelemann <e-mail@jonas-thelemann.de>
|
||||
Jonathan Cross <jcross@gmail.com>
|
||||
Jose Manuel Delicado (jmdaweb) <jmdaweb@hotmail.com> <jmdaweb@users.noreply.github.com>
|
||||
Jörg Thalheim <Mic92@users.noreply.github.com>
|
||||
Kalle Laine <pahakalle@protonmail.com>
|
||||
Karol Różycki (krozycki) <rozycki.karol@gmail.com>
|
||||
Keith Turner <kturner@apache.org>
|
||||
Kelong Cong (kc1212) <kc04bc@gmx.com> <kc1212@users.noreply.github.com>
|
||||
Ken'ichi Kamada (kamadak) <kamada@nanohz.org>
|
||||
Kevin Allen (ironmig) <kma1660@gmail.com>
|
||||
Kevin White, Jr. (kwhite17) <kevinwhite1710@gmail.com>
|
||||
klemens <ka7@github.com>
|
||||
Kurt Fitzner (Kudalufi) <kurt@va1der.ca> <kurt.fitzner@gmail.com>
|
||||
Lars K.W. Gohlke (lkwg82) <lkwg82@gmx.de>
|
||||
Laurent Arnoud <laurent@spkdev.net>
|
||||
Laurent Etiemble (letiemble) <laurent.etiemble@gmail.com> <laurent.etiemble@monobjc.net>
|
||||
Leo Arias (elopio) <yo@elopio.net>
|
||||
Liu Siyuan (liusy182) <liusy182@gmail.com> <liusy182@hotmail.com>
|
||||
@@ -79,42 +128,81 @@ Majed Abdulaziz (majedev) <majed.alhajry@gmail.com>
|
||||
Marc Laporte (marclaporte) <marc@marclaporte.com> <marc@laporte.name>
|
||||
Marc Pujol (kilburn) <kilburn@la3.org>
|
||||
Marcin Dziadus (marcindziadus) <dziadus.marcin@gmail.com>
|
||||
marco-m <marco.molteni@laposte.net>
|
||||
Mark Pulford (mpx) <mark@kyne.com.au>
|
||||
Mateusz Naściszewski (mateon1) <matin1111@wp.pl>
|
||||
Matic Potočnik <hairyfotr@gmail.com>
|
||||
Matt Burke (burkemw3) <mburke@amplify.com> <burkemw3@gmail.com>
|
||||
Matt Robenolt <matt@ydekproductions.com>
|
||||
Matteo Ruina <matteo.ruina@gmail.com>
|
||||
Maurizio Tomasi <ziotom78@gmail.com>
|
||||
Max Schulze (kralo) <max.schulze@online.de> <kralo@users.noreply.github.com>
|
||||
MaximAL <almaximal@ya.ru>
|
||||
Maxime Thirouin <m@moox.io>
|
||||
Michael Jephcote (Rewt0r) <rewt0r@gmx.com> <Rewt0r@users.noreply.github.com>
|
||||
Michael Ploujnikov (plouj) <ploujj@gmail.com>
|
||||
Michael Tilli (pyfisch) <pyfisch@gmail.com>
|
||||
Mike Boone <mike@boonedocks.net>
|
||||
MikeLund <MikeLund@users.noreply.github.com>
|
||||
Mingxuan Lin <gdlmx@users.noreply.github.com>
|
||||
Nate Morrison (nrm21) <natemorrison@gmail.com>
|
||||
Nicholas Rishel (PrototypeNM1) <rishel.nick@gmail.com> <PrototypeNM1@users.noreply.github.com>
|
||||
Nico Stapelbroek <3368018+nstapelbroek@users.noreply.github.com>
|
||||
Nicolas Braud-Santoni <nicolas@braud-santoni.eu>
|
||||
Niels Peter Roest (Niller303) <nielsproest@hotmail.com> <seje.niels@hotmail.com>
|
||||
Nils Jakobi (thunderstorm99) <jakobi.nils@gmail.com>
|
||||
Nitroretro <43112364+Nitroretro@users.noreply.github.com>
|
||||
NoLooseEnds <jon.koslung@gmail.com>
|
||||
otbutz <tbutz@optitool.de>
|
||||
Oyebanji Jacob Mayowa <oyebanji05@gmail.com>
|
||||
Pascal Jungblut (pascalj) <github@pascalj.com> <mail@pascal-jungblut.com>
|
||||
Pawel Palenica (qepasa) <pawelpalenica11@gmail.com>
|
||||
Paweł Rozlach <vespian@users.noreply.github.com>
|
||||
perewa <cavalcante.ten@gmail.com>
|
||||
Peter Badida <KeyWeeUsr@users.noreply.github.com>
|
||||
Peter Dave Hello <hsu@peterdavehello.org>
|
||||
Peter Hoeg (peterhoeg) <peter@speartail.com>
|
||||
Peter Marquardt (wwwutz) <wwwutz@gmail.com> <wwwutz@googlemail.com>
|
||||
Phil Davis <phil.davis@inf.org>
|
||||
Philippe Schommers (filoozoom) <philippe@schommers.be>
|
||||
Phill Luby (pluby) <phill.luby@newredo.com>
|
||||
Pier Paolo Ramon <ramonpierre@gmail.com>
|
||||
Piotr Bejda (piobpl) <piotrb10@gmail.com>
|
||||
Pramodh KP (pramodhkp) <pramodh.p@directi.com> <1507241+pramodhkp@users.noreply.github.com>
|
||||
Richard Hartmann <RichiH@users.noreply.github.com>
|
||||
Robert Carosi (nov1n) <robert@carosi.nl>
|
||||
Roman Zaynetdinov (zaynetro) <romanznet@gmail.com>
|
||||
Ross Smith II (rasa) <ross@smithii.com>
|
||||
rubenbe <github-com-00ff86@vandamme.email>
|
||||
Ryan Sullivan (KayoticSully) <kayoticsully@gmail.com>
|
||||
Sacheendra Talluri (sacheendra) <sacheendra.t@gmail.com>
|
||||
Scott Klupfel (kluppy) <kluppy@going2blue.com>
|
||||
Sergey Mishin (ralder) <ralder@yandex.ru>
|
||||
Simon Frei (imsodin) <freisim93@gmail.com>
|
||||
Sly_tom_cat <slytomcat@mail.ru>
|
||||
Stefan Kuntz (Stefan-Code) <stefan.github@gmail.com> <Stefan.github@gmail.com>
|
||||
Stefan Tatschner (rumpelsepp) <stefan@sevenbyte.org> <rumpelsepp@sevenbyte.org>
|
||||
Stefan Tatschner (rumpelsepp) <stefan@sevenbyte.org> <rumpelsepp@sevenbyte.org> <stefan@rumpelsepp.org>
|
||||
Suhas Gundimeda (snugghash) <suhas.gundimeda@gmail.com> <snugghash@gmail.com>
|
||||
Taylor Khan (nelsonkhan) <nelsonkhan@gmail.com>
|
||||
Thomas Hipp <thomashipp@gmail.com>
|
||||
Tim Abell (timabell) <tim@timwise.co.uk>
|
||||
Tim Howes (timhowes) <timhowes@berkeley.edu>
|
||||
Tobias Nygren (tnn2) <tnn@nygren.pp.se>
|
||||
Tobias Tom (tobiastom) <t.tom@succont.de>
|
||||
Tom Jakubowski <tom@crystae.net>
|
||||
Tomas Cerveny (kozec) <kozec@kozec.com>
|
||||
Tommy Thorn <tommy-github-email@thorn.ws>
|
||||
Tully Robinson (tojrobinson) <tully@tojr.org>
|
||||
Tyler Brazier (tylerbrazier) <tyler@tylerbrazier.com>
|
||||
Unrud (Unrud) <unrud@openaliasbox.org> <Unrud@users.noreply.github.com>
|
||||
Veeti Paananen (veeti) <veeti.paananen@rojekti.fi>
|
||||
Victor Buinsky (buinsky) <vix_booja@tut.by>
|
||||
Vil Brekin (Vilbrekin) <vilbrekin@gmail.com>
|
||||
Vladimir Rusinov <vrusinov@google.com>
|
||||
wangguoliang <liangcszzu@163.com>
|
||||
William A. Kennington III (wkennington) <william@wkennington.com>
|
||||
Wulf Weich (wweich) <wweich@users.noreply.github.com> <wweich@gmx.de> <wulf@weich-kr.de>
|
||||
Xavier O. (damajor) <damajor@gmail.com>
|
||||
xjtdy888 (xjtdy888) <xjtdy888@163.com>
|
||||
Yannic A. (eipiminus1) <eipiminusone+github@gmail.com> <eipiminus1@users.noreply.github.com>
|
||||
佛跳墙 <daoquan@qq.com>
|
||||
|
||||
129
CONDUCT.md
129
CONDUCT.md
@@ -1,92 +1,73 @@
|
||||
## Conduct
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
* We are committed to providing a friendly, safe and welcoming
|
||||
environment for all, regardless of gender, sexual orientation,
|
||||
disability, ethnicity, religion, or similar personal characteristic.
|
||||
## Our Pledge
|
||||
|
||||
* On IRC, please avoid using overtly sexual nicknames or other nicknames
|
||||
that might detract from a friendly, safe and welcoming environment for
|
||||
all.
|
||||
In the interest of fostering an open and welcoming environment, we as
|
||||
contributors and maintainers pledge to making participation in our project and
|
||||
our community a harassment-free experience for everyone, regardless of age, body
|
||||
size, disability, ethnicity, gender identity and expression, level of experience,
|
||||
education, socio-economic status, nationality, personal appearance, race,
|
||||
religion, or sexual identity and orientation.
|
||||
|
||||
* Please be kind and courteous. There's no need to be mean or rude.
|
||||
## Our Standards
|
||||
|
||||
* Respect that people have differences of opinion and that every design
|
||||
or implementation choice carries a trade-off and numerous costs. There
|
||||
is seldom a right answer.
|
||||
Examples of behavior that contributes to creating a positive environment
|
||||
include:
|
||||
|
||||
* Please keep unstructured critique to a minimum. If you have solid
|
||||
ideas you want to experiment with, make a fork and see how it works.
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
* We will exclude you from interaction if you insult, demean or harass
|
||||
anyone. That is not welcome behaviour. We interpret the term
|
||||
"harassment" as including the definition in the <a
|
||||
href="http://citizencodeofconduct.org/">Citizen Code of Conduct</a>;
|
||||
if you have any lack of clarity about what might be included in that
|
||||
concept, please read their definition. In particular, we don't
|
||||
tolerate behavior that excludes people in socially marginalized
|
||||
groups.
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* Private harassment is also unacceptable. No matter who you are, if you
|
||||
feel you have been or are being harassed or made uncomfortable by a
|
||||
community member, please contact one of the channel ops or any of the
|
||||
Syncthing core team immediately. Whether you're a regular contributor
|
||||
or a newcomer, we care about making this community a safe place for
|
||||
you and we've got your back.
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or
|
||||
advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic
|
||||
address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
* Likewise any spamming, trolling, flaming, baiting or other
|
||||
attention-stealing behaviour is not welcome.
|
||||
## Our Responsibilities
|
||||
|
||||
## Moderation
|
||||
Project maintainers are responsible for clarifying the standards of acceptable
|
||||
behavior and are expected to take appropriate and fair corrective action in
|
||||
response to any instances of unacceptable behavior.
|
||||
|
||||
These are the policies for upholding our community's standards of
|
||||
conduct in our communication channels, most notably in Syncthing-related
|
||||
IRC channels and on the web forum.
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||
permanently any contributor for other behaviors that they deem inappropriate,
|
||||
threatening, offensive, or harmful.
|
||||
|
||||
1. Remarks that violate the Syncthing standards of conduct, including
|
||||
hateful, hurtful, oppressive, or exclusionary remarks, are not
|
||||
allowed. (Cursing is allowed, but never targeting another user, and
|
||||
never in a hateful manner.)
|
||||
## Scope
|
||||
|
||||
2. Remarks that moderators find inappropriate, whether listed in the
|
||||
code of conduct or not, are also not allowed.
|
||||
This Code of Conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community. Examples of
|
||||
representing a project or community include using an official project e-mail
|
||||
address, posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event. Representation of a project may be
|
||||
further defined and clarified by project maintainers.
|
||||
|
||||
3. Moderators will first respond to such remarks with a warning.
|
||||
## Enforcement
|
||||
|
||||
4. If the warning is unheeded, the user will be "kicked," i.e., kicked
|
||||
out of the communication channel to cool off.
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting the project team at security@syncthing.net. All
|
||||
complaints will be reviewed and investigated and will result in a response that
|
||||
is deemed necessary and appropriate to the circumstances. The project team is
|
||||
obligated to maintain confidentiality with regard to the reporter of an incident.
|
||||
Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
5. If the user comes back and continues to make trouble, they will be
|
||||
banned, i.e., indefinitely excluded.
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||
faith may face temporary or permanent repercussions as determined by other
|
||||
members of the project's leadership.
|
||||
|
||||
6. Moderators may choose at their discretion to un-ban the user if it
|
||||
was a first offense and they offer the offended party a genuine
|
||||
apology.
|
||||
## Attribution
|
||||
|
||||
7. If a moderator bans someone and you think it was unjustified, please
|
||||
take it up with that moderator, or with a different moderator, **in
|
||||
private**. Complaints about bans in-channel are not allowed.
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
||||
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
|
||||
|
||||
8. Moderators are held to a higher standard than other community
|
||||
members. If a moderator creates an inappropriate situation, they
|
||||
should expect less leeway than others.
|
||||
|
||||
In the Syncthing community we strive to go the extra step to look out
|
||||
for each other. Don't just aim to be technically unimpeachable, try to
|
||||
be your best self. In particular, avoid flirting with offensive or
|
||||
sensitive issues, particularly if they're off-topic; this all too
|
||||
often leads to unnecessary fights, hurt feelings, and damaged trust;
|
||||
worse, it can drive people away from the community entirely.
|
||||
|
||||
And if someone takes issue with something you said or did, resist the
|
||||
urge to be defensive. Just stop doing what it was they complained about
|
||||
and apologize. Even if you feel you were misinterpreted or unfairly
|
||||
accused, chances are good there was something you could've communicated
|
||||
better — remember that it's your responsibility to make your fellow
|
||||
community members comfortable. Everyone wants to get along and we are
|
||||
all here first and foremost because we want to talk about cool
|
||||
technology. You will find that people will be eager to assume good
|
||||
intent and forgive as long as you earn their trust.
|
||||
|
||||
*Adapted from the [Rust Code of Conduct](https://github.com/rust-lang/rust/wiki/Note-development-policy#conduct)*
|
||||
|
||||
*Adapted from the [Node.js Policy on Trolling](http://blog.izs.me/post/30036893703/policy-on-trolling)*
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
|
||||
28
Dockerfile
Normal file
28
Dockerfile
Normal file
@@ -0,0 +1,28 @@
|
||||
FROM golang:1.13 AS builder
|
||||
|
||||
WORKDIR /src
|
||||
COPY . .
|
||||
|
||||
ENV CGO_ENABLED=0
|
||||
ENV BUILD_HOST=syncthing.net
|
||||
ENV BUILD_USER=docker
|
||||
RUN rm -f syncthing && go run build.go -no-upgrade build syncthing
|
||||
|
||||
FROM alpine
|
||||
|
||||
EXPOSE 8384 22000 21027/udp
|
||||
|
||||
VOLUME ["/var/syncthing"]
|
||||
|
||||
RUN apk add --no-cache ca-certificates su-exec
|
||||
|
||||
COPY --from=builder /src/syncthing /bin/syncthing
|
||||
COPY --from=builder /src/script/docker-entrypoint.sh /bin/entrypoint.sh
|
||||
|
||||
ENV PUID=1000 PGID=1000 HOME=/var/syncthing
|
||||
|
||||
HEALTHCHECK --interval=1m --timeout=10s \
|
||||
CMD nc -z localhost 8384 || exit 1
|
||||
|
||||
ENV STGUIADDRESS=0.0.0.0:8384
|
||||
ENTRYPOINT ["/bin/entrypoint.sh", "/bin/syncthing", "-home", "/var/syncthing/config"]
|
||||
28
Dockerfile.stdiscosrv
Normal file
28
Dockerfile.stdiscosrv
Normal file
@@ -0,0 +1,28 @@
|
||||
FROM golang:1.13 AS builder
|
||||
|
||||
WORKDIR /src
|
||||
COPY . .
|
||||
|
||||
ENV CGO_ENABLED=0
|
||||
ENV BUILD_HOST=syncthing.net
|
||||
ENV BUILD_USER=docker
|
||||
RUN rm -f stdiscosrv && go run build.go -no-upgrade build stdiscosrv
|
||||
|
||||
FROM alpine
|
||||
|
||||
EXPOSE 19200 8443
|
||||
|
||||
VOLUME ["/var/stdiscosrv"]
|
||||
|
||||
RUN apk add --no-cache ca-certificates su-exec
|
||||
|
||||
COPY --from=builder /src/stdiscosrv /bin/stdiscosrv
|
||||
COPY --from=builder /src/script/docker-entrypoint.sh /bin/entrypoint.sh
|
||||
|
||||
ENV PUID=1000 PGID=1000 HOME=/var/stdiscosrv
|
||||
|
||||
HEALTHCHECK --interval=1m --timeout=10s \
|
||||
CMD nc -z localhost 8443 || exit 1
|
||||
|
||||
WORKDIR /var/stdiscosrv
|
||||
ENTRYPOINT ["/bin/entrypoint.sh", "/bin/stdiscosrv"]
|
||||
28
Dockerfile.strelaysrv
Normal file
28
Dockerfile.strelaysrv
Normal file
@@ -0,0 +1,28 @@
|
||||
FROM golang:1.13 AS builder
|
||||
|
||||
WORKDIR /src
|
||||
COPY . .
|
||||
|
||||
ENV CGO_ENABLED=0
|
||||
ENV BUILD_HOST=syncthing.net
|
||||
ENV BUILD_USER=docker
|
||||
RUN rm -f strelaysrv && go run build.go -no-upgrade build strelaysrv
|
||||
|
||||
FROM alpine
|
||||
|
||||
EXPOSE 22067 22070
|
||||
|
||||
VOLUME ["/var/strelaysrv"]
|
||||
|
||||
RUN apk add --no-cache ca-certificates su-exec
|
||||
|
||||
COPY --from=builder /src/strelaysrv /bin/strelaysrv
|
||||
COPY --from=builder /src/script/docker-entrypoint.sh /bin/entrypoint.sh
|
||||
|
||||
ENV PUID=1000 PGID=1000 HOME=/var/strelaysrv
|
||||
|
||||
HEALTHCHECK --interval=1m --timeout=10s \
|
||||
CMD nc -z localhost 22067 || exit 1
|
||||
|
||||
WORKDIR /var/strelaysrv
|
||||
ENTRYPOINT ["/bin/entrypoint.sh", "/bin/strelaysrv"]
|
||||
2
GOALS.md
2
GOALS.md
@@ -36,7 +36,7 @@ or modification by unauthorized parties.
|
||||
|
||||
Syncthing should be approachable, understandable and inclusive.
|
||||
|
||||
> Complex concepts and maths form the base of Synchting's functionality.
|
||||
> Complex concepts and maths form the base of Syncthing's functionality.
|
||||
> This should nonetheless be abstracted or hidden to a degree where
|
||||
> Syncthing is usable by the general public.
|
||||
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
Do not report security issues in this bug tracker. Instead, contact
|
||||
security@syncthing.net directly - see https://syncthing.net/security.html
|
||||
for more information.
|
||||
|
||||
If your issue is a support request ("How do I get my devices to connect?"
|
||||
or similar), please use the support forum at https://forum.syncthing.net/
|
||||
where a large number of helpful people hang out. This issue tracker is for
|
||||
reporting bugs or feature requests directly to the developers.
|
||||
|
||||
If your issue is a bug report, replace this boilerplate with a description
|
||||
of the problem, being sure to include at least:
|
||||
|
||||
- what happened,
|
||||
- what you expected to happen instead, and
|
||||
- any steps to reproduce the problem.
|
||||
|
||||
Also fill out the version information below and add log output or
|
||||
screenshots as appropriate.
|
||||
|
||||
If your issue is a feature request, simply replace this template text in
|
||||
its entirety.
|
||||
|
||||
### Version Information
|
||||
|
||||
Syncthing Version: v0.x.y
|
||||
OS Version: Windows 7 / Ubuntu 14.04 / ...
|
||||
Browser Version: (if applicable, for GUI issues)
|
||||
|
||||
149
NICKS
149
NICKS
@@ -1,149 +0,0 @@
|
||||
# This file maps email addresses used in commits to nicks used the changelog.
|
||||
# It is auto generated from the AUTHORS file by script/authors.go.
|
||||
|
||||
0x010C <antoine.lamielle@0x010c.fr>
|
||||
0x010C <gh@0x010c.fr>
|
||||
acogdev <jake@acogdev.com>
|
||||
adelq <aqalieh95@gmail.com>
|
||||
adelq <adelq@users.noreply.github.com>
|
||||
alessandro.g89 <alessandro.g89@gmail.com>
|
||||
alex2108 <register-github@alex-graf.de>
|
||||
andersonvom <andersonvom@gmail.com>
|
||||
andrew-d <andrew@du.nham.ca>
|
||||
asdil12 <dominik@heidler.eu>
|
||||
AudriusButkevicius <audrius.butkevicius@gmail.com>
|
||||
aviau <alexandre@alexandreviau.net>
|
||||
aviau <aviau@debian.org>
|
||||
bencurthoys <ben@bencurthoys.com>
|
||||
benshep <bjashepherd@gmail.com>
|
||||
bigbear2nd <bigbear2nd@gmail.com>
|
||||
brbecker <brbecker@gmail.com>
|
||||
brendanlong <self@brendanlong.com>
|
||||
brgmnn <dan.arne.bergmann@gmail.com>
|
||||
brgmnn <brgmnn@users.noreply.github.com>
|
||||
bsidhom <bsidhom@gmail.com>
|
||||
buinsky <vix_booja@tut.by>
|
||||
burkemw3 <mburke@amplify.com>
|
||||
burkemw3 <burkemw3@gmail.com>
|
||||
calmh <jakob@nym.se>
|
||||
calmh <jakob@kastelo.net>
|
||||
canton7 <antony.male@gmail.com>
|
||||
Cathryne <cathryne.linenweaver@gmail.com>
|
||||
Cathryne <Cathryne@users.noreply.github.com>
|
||||
cdata <chris@scriptolo.gy>
|
||||
cdhowie <me@chrishowie.com>
|
||||
ceh <emil@hessman.se>
|
||||
cqcallaw <enlightened.despot@gmail.com>
|
||||
damajor <damajor@gmail.com>
|
||||
dinosore <dinosore@dbrsoftware.co.uk>
|
||||
dtchanpura <dtchanpura@gmail.com>
|
||||
dtchanpura <dcprime314@gmail.com>
|
||||
dva <denisva@gmail.com>
|
||||
dzarda <dzardacz@gmail.com>
|
||||
eipiminus1 <eipiminusone+github@gmail.com>
|
||||
eipiminus1 <eipiminus1@users.noreply.github.com>
|
||||
elopio <yo@elopio.net>
|
||||
facastagnini <federico.castagnini@gmail.com>
|
||||
filoozoom <philippe@schommers.be>
|
||||
frioux <frew@afoolishmanifesto.com>
|
||||
frioux <frioux@gmail.com>
|
||||
fti7 <frank@isemann.name>
|
||||
gillisig <gilli@vx.is>
|
||||
hadogenes <szafar@linux.pl>
|
||||
imsodin <freisim93@gmail.com>
|
||||
ironmig <kma1660@gmail.com>
|
||||
jarlebring <jarlebring@gmail.com>
|
||||
jayachithra <s.k.jayachithra@gmail.com>
|
||||
jedie <github.com@jensdiemer.de>
|
||||
jedie <git@jensdiemer.de>
|
||||
jgke <jgke@jgke.fi>
|
||||
jmdaweb <jmdaweb@hotmail.com>
|
||||
jmdaweb <jmdaweb@users.noreply.github.com>
|
||||
jpjp <jamespatterson@operamail.com>
|
||||
jpjp <jpjp@users.noreply.github.com>
|
||||
kamadak <kamada@nanohz.org>
|
||||
KayoticSully <kayoticsully@gmail.com>
|
||||
kc1212 <kc04bc@gmx.com>
|
||||
kc1212 <kc1212@users.noreply.github.com>
|
||||
kilburn <kilburn@la3.org>
|
||||
kluppy <kluppy@going2blue.com>
|
||||
kozec <kozec@kozec.com>
|
||||
kralo <max.schulze@online.de>
|
||||
kralo <kralo@users.noreply.github.com>
|
||||
krozycki <rozycki.karol@gmail.com>
|
||||
Kudalufi <kurt@va1der.ca>
|
||||
Kudalufi <kurt.fitzner@gmail.com>
|
||||
kwhite17 <kevinwhite1710@gmail.com>
|
||||
letiemble <laurent.etiemble@gmail.com>
|
||||
letiemble <laurent.etiemble@monobjc.net>
|
||||
liusy182 <liusy182@gmail.com>
|
||||
liusy182 <liusy182@hotmail.com>
|
||||
lkwg82 <lkwg82@gmx.de>
|
||||
LordLandon <lordlandon@gmail.com>
|
||||
majedev <majed.alhajry@gmail.com>
|
||||
marcindziadus <dziadus.marcin@gmail.com>
|
||||
marclaporte <marc@marclaporte.com>
|
||||
marclaporte <marc@laporte.name>
|
||||
mateon1 <matin1111@wp.pl>
|
||||
mogwa1 <devriesb@gmail.com>
|
||||
moshen <moshen.colin@gmail.com>
|
||||
Moter8 <moter8@gmail.com>
|
||||
mpx <mark@kyne.com.au>
|
||||
mvdan <mvdan@mvdan.cc>
|
||||
Niller303 <nielsproest@hotmail.com>
|
||||
Niller303 <seje.niels@hotmail.com>
|
||||
norgeous <daniel@harte.me>
|
||||
norgeous <daniel@danielharte.co.uk>
|
||||
norgeous <norgeous@users.noreply.github.com>
|
||||
nov1n <robert@carosi.nl>
|
||||
nrm21 <natemorrison@gmail.com>
|
||||
Nutomic <me@nutomic.com>
|
||||
pascalj <github@pascalj.com>
|
||||
pascalj <mail@pascal-jungblut.com>
|
||||
peterhoeg <peter@speartail.com>
|
||||
philips <brandon@ifup.org>
|
||||
piobpl <piotrb10@gmail.com>
|
||||
plouj <ploujj@gmail.com>
|
||||
pluby <phill.luby@newredo.com>
|
||||
ProactiveServices <aD@simplypeachy.co.uk>
|
||||
ProactiveServices <simplypeachy@users.noreply.github.com>
|
||||
ProactiveServices <ProactiveServices@users.noreply.github.com>
|
||||
pyfisch <pyfisch@gmail.com>
|
||||
qbit <qbit@deftly.net>
|
||||
ralder <ralder@yandex.ru>
|
||||
rasa <ross@smithii.com>
|
||||
Rewt0r <rewt0r@gmx.com>
|
||||
Rewt0r <Rewt0r@users.noreply.github.com>
|
||||
rumpelsepp <stefan@sevenbyte.org>
|
||||
rumpelsepp <rumpelsepp@sevenbyte.org>
|
||||
sacheendra <sacheendra.t@gmail.com>
|
||||
scienmind <scintertech@cryptolab.net>
|
||||
sciurius <jvromans@squirrel.nl>
|
||||
seehuhn <voss@seehuhn.de>
|
||||
Smiley73 <heiko@zuerker.org>
|
||||
snnd <dw@risu.io>
|
||||
snugghash <suhas.gundimeda@gmail.com>
|
||||
snugghash <snugghash@gmail.com>
|
||||
Stefan-Code <stefan.github@gmail.com>
|
||||
Stefan-Code <Stefan.github@gmail.com>
|
||||
timabell <tim@timwise.co.uk>
|
||||
timhowes <timhowes@berkeley.edu>
|
||||
tnn2 <tnn@nygren.pp.se>
|
||||
tojrobinson <tully@tojr.org>
|
||||
tpng <benny.tpng@gmail.com>
|
||||
tylerbrazier <tyler@tylerbrazier.com>
|
||||
Unrud <unrud@openaliasbox.org>
|
||||
Unrud <Unrud@users.noreply.github.com>
|
||||
uok <ueomkail@gmail.com>
|
||||
uok <uok@users.noreply.github.com>
|
||||
veeti <veeti.paananen@rojekti.fi>
|
||||
Vilbrekin <vilbrekin@gmail.com>
|
||||
wkennington <william@wkennington.com>
|
||||
WSGCSysadmin <e.meitner@willystreet.coop>
|
||||
wweich <wweich@users.noreply.github.com>
|
||||
wweich <wweich@gmx.de>
|
||||
wweich <wulf@weich-kr.de>
|
||||
xduugu <cedric@gmx.ca>
|
||||
zaynetro <romanznet@gmail.com>
|
||||
Zillode <zillode@zillode.be>
|
||||
zukoo <fxgsell@gmail.com>
|
||||
59
README-Docker.md
Normal file
59
README-Docker.md
Normal file
@@ -0,0 +1,59 @@
|
||||
# Docker Container for Syncthing
|
||||
|
||||
Use the Dockerfile in this repo, or pull the `syncthing/syncthing` image
|
||||
from Docker Hub.
|
||||
|
||||
Use the `/var/syncthing` volume to have the synchronized files available on the
|
||||
host. You can add more folders and map them as you prefer.
|
||||
|
||||
Note that Syncthing runs as UID 1000 and GID 1000 by default. These may be
|
||||
altered with the ``PUID`` and ``PGID`` environment variables.
|
||||
|
||||
## Example Usage
|
||||
|
||||
```
|
||||
$ docker pull syncthing/syncthing
|
||||
$ docker run -p 8384:8384 -p 22000:22000 \
|
||||
-v /wherever/st-sync:/var/syncthing \
|
||||
syncthing/syncthing:latest
|
||||
```
|
||||
|
||||
## Discovery
|
||||
|
||||
Note that local device discovery will not work with the above command,
|
||||
resulting in poor local transfer rates if local device addresses are not
|
||||
manually configured.
|
||||
|
||||
To allow local discovery, the docker host network can be used instead:
|
||||
|
||||
```
|
||||
$ docker pull syncthing/syncthing
|
||||
$ docker run --network=host \
|
||||
-v /wherever/st-sync:/var/syncthing \
|
||||
syncthing/syncthing:latest
|
||||
```
|
||||
|
||||
Be aware that syncthing alone is now in control of what interfaces and ports it
|
||||
listens on. You can edit the syncthing configuration to change the defaults if
|
||||
there are conflicts.
|
||||
|
||||
## GUI Security
|
||||
|
||||
By default Syncthing inside the Docker image listens on 0.0.0.0:8384 to
|
||||
allow GUI connections via the Docker proxy. This is set by the
|
||||
`STGUIADDRESS` environment variable in the Dockerfile, as it differs from
|
||||
what Syncthing would otherwise use by default. This means you should set up
|
||||
authentication in the GUI, like for any other externally reachable Syncthing
|
||||
instance. If you do not require the GUI, or you use host networking, you can
|
||||
unset the `STGUIADDRESS` variable to have Syncthing fall back to listening
|
||||
on 127.0.0.1:
|
||||
|
||||
```
|
||||
$ docker pull syncthing/syncthing
|
||||
$ docker run -e STGUIADDRESS= \
|
||||
-v /wherever/st-sync:/var/syncthing \
|
||||
syncthing/syncthing:latest
|
||||
```
|
||||
|
||||
With the environment variable unset Syncthing will follow what is set in the
|
||||
configuration file / GUI settings dialog.
|
||||
34
README.md
34
README.md
@@ -2,11 +2,9 @@
|
||||
|
||||
---
|
||||
|
||||
[](https://build.syncthing.net/job/syncthing/lastBuild/)
|
||||
[](https://build.syncthing.net/job/syncthing/lastBuild/)
|
||||
[](https://build.syncthing.net/job/syncthing/lastBuild/)
|
||||
[](https://build.syncthing.net/job/syncthing/lastBuild/)
|
||||
[](https://godoc.org/github.com/syncthing/syncthing)
|
||||
[](https://build.syncthing.net/viewType.html?buildTypeId=Syncthing_BuildLinuxCross&guest=1)
|
||||
[](https://build.syncthing.net/viewType.html?buildTypeId=Syncthing_BuildWindows&guest=1)
|
||||
[](https://build.syncthing.net/viewType.html?buildTypeId=Syncthing_BuildMac&guest=1)
|
||||
[](https://www.mozilla.org/MPL/2.0/)
|
||||
[](https://bestpractices.coreinfrastructure.org/projects/88)
|
||||
[](https://goreportcard.com/report/github.com/syncthing/syncthing)
|
||||
@@ -21,36 +19,36 @@ commentary, see the full [Goals document][13].
|
||||
|
||||
Syncthing should be:
|
||||
|
||||
1. Safe From Data Loss
|
||||
1. **Safe From Data Loss**
|
||||
|
||||
Protecting the user's data is paramount. We take every reasonable
|
||||
precaution to avoid corrupting the user's files.
|
||||
|
||||
2. Secure Against Attackers
|
||||
2. **Secure Against Attackers**
|
||||
|
||||
Again, protecting the user's data is paramount. Regardless of our other
|
||||
goals we must never allow the user's data to be susceptible to
|
||||
eavesdropping or modification by unauthorized parties.
|
||||
|
||||
3. Easy to Use
|
||||
3. **Easy to Use**
|
||||
|
||||
Syncthing should be approachable, understandable and inclusive.
|
||||
|
||||
4. Automatic
|
||||
4. **Automatic**
|
||||
|
||||
User interaction should be required only when absolutely necessary.
|
||||
|
||||
5. Universally Available
|
||||
5. **Universally Available**
|
||||
|
||||
Syncthing should run on every common computer. We are mindful that the
|
||||
latest technology is not always available to any given individual.
|
||||
|
||||
6. For Individuals
|
||||
6. **For Individuals**
|
||||
|
||||
Syncthing is primarily about empowering the individual user with safe,
|
||||
secure and easy to use file synchronization.
|
||||
|
||||
7. Everything Else
|
||||
7. **Everything Else**
|
||||
|
||||
There are many things we care about that don't make it on to the list. It
|
||||
is fine to optimize for these values, as long as they are not in conflict
|
||||
@@ -64,6 +62,10 @@ There are a few examples for keeping Syncthing running in the background
|
||||
on your system in [the etc directory][3]. There are also several [GUI
|
||||
implementations][11] for Windows, Mac and Linux.
|
||||
|
||||
## Docker
|
||||
|
||||
To run Syncthing in Docker, see [the Docker README][16].
|
||||
|
||||
## Vote on features/bugs
|
||||
|
||||
We'd like to encourage you to [vote][12] on issues that matter to you.
|
||||
@@ -88,8 +90,8 @@ D26E6ED000654A3E, available from https://syncthing.net/security.html and
|
||||
most key servers.
|
||||
|
||||
There is also a built in automatic upgrade mechanism (disabled in some
|
||||
distribution channels) which uses a compiled in ECDSA signature. Mac OS
|
||||
X binaries are also properly code signed.
|
||||
distribution channels) which uses a compiled in ECDSA signature. macOS
|
||||
binaries are also properly code signed.
|
||||
|
||||
## Documentation
|
||||
|
||||
@@ -111,4 +113,6 @@ All code is licensed under the [MPLv2 License][7].
|
||||
[12]: https://www.bountysource.com/teams/syncthing/issues
|
||||
[13]: https://github.com/syncthing/syncthing/blob/master/GOALS.md
|
||||
[14]: assets/logo-text-128.png
|
||||
[15]: https://syncthing.net/
|
||||
[15]: https://syncthing.net/
|
||||
[16]: https://github.com/syncthing/syncthing/blob/master/README-Docker.md
|
||||
|
||||
|
||||
BIN
assets/logo.ico
Normal file
BIN
assets/logo.ico
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 160 KiB |
519
build.go
519
build.go
@@ -24,6 +24,7 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
@@ -39,23 +40,30 @@ var (
|
||||
goos string
|
||||
noupgrade bool
|
||||
version string
|
||||
goCmd string
|
||||
goVersion float64
|
||||
race bool
|
||||
debug = os.Getenv("BUILDDEBUG") != ""
|
||||
noBuildGopath bool
|
||||
extraTags string
|
||||
installSuffix string
|
||||
pkgdir string
|
||||
cc string
|
||||
debugBinary bool
|
||||
coverage bool
|
||||
timeout = "120s"
|
||||
)
|
||||
|
||||
type target struct {
|
||||
name string
|
||||
debname string
|
||||
debdeps []string
|
||||
debpre string
|
||||
debpost string
|
||||
description string
|
||||
buildPkg string
|
||||
buildPkgs []string
|
||||
binaryName string
|
||||
archiveFiles []archiveFile
|
||||
systemdServices []string
|
||||
installationFiles []archiveFile
|
||||
tags []string
|
||||
}
|
||||
@@ -69,9 +77,8 @@ type archiveFile struct {
|
||||
var targets = map[string]target{
|
||||
"all": {
|
||||
// Only valid for the "build" and "install" commands as it lacks all
|
||||
// the archive creation stuff.
|
||||
buildPkg: "github.com/syncthing/syncthing/cmd/...",
|
||||
tags: []string{"purego"},
|
||||
// the archive creation stuff. buildPkgs gets filled out in init()
|
||||
tags: []string{"purego"},
|
||||
},
|
||||
"syncthing": {
|
||||
// The default target for "build", "install", "tar", "zip", "deb", etc.
|
||||
@@ -80,7 +87,7 @@ var targets = map[string]target{
|
||||
debdeps: []string{"libc6", "procps"},
|
||||
debpost: "script/post-upgrade",
|
||||
description: "Open Source Continuous File Synchronization",
|
||||
buildPkg: "github.com/syncthing/syncthing/cmd/syncthing",
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/syncthing"},
|
||||
binaryName: "syncthing", // .exe will be added automatically for Windows builds
|
||||
archiveFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "{{binary}}", perm: 0755},
|
||||
@@ -108,27 +115,41 @@ var targets = map[string]target{
|
||||
{src: "etc/linux-systemd/system/syncthing-resume.service", dst: "deb/lib/systemd/system/syncthing-resume.service", perm: 0644},
|
||||
{src: "etc/linux-systemd/user/syncthing.service", dst: "deb/usr/lib/systemd/user/syncthing.service", perm: 0644},
|
||||
{src: "etc/firewall-ufw/syncthing", dst: "deb/etc/ufw/applications.d/syncthing", perm: 0644},
|
||||
{src: "etc/linux-desktop/syncthing-start.desktop", dst: "deb/usr/share/applications/syncthing-start.desktop", perm: 0644},
|
||||
{src: "etc/linux-desktop/syncthing-ui.desktop", dst: "deb/usr/share/applications/syncthing-ui.desktop", perm: 0644},
|
||||
{src: "assets/logo-32.png", dst: "deb/usr/share/icons/hicolor/32x32/apps/syncthing.png", perm: 0644},
|
||||
{src: "assets/logo-64.png", dst: "deb/usr/share/icons/hicolor/64x64/apps/syncthing.png", perm: 0644},
|
||||
{src: "assets/logo-128.png", dst: "deb/usr/share/icons/hicolor/128x128/apps/syncthing.png", perm: 0644},
|
||||
{src: "assets/logo-256.png", dst: "deb/usr/share/icons/hicolor/256x256/apps/syncthing.png", perm: 0644},
|
||||
{src: "assets/logo-512.png", dst: "deb/usr/share/icons/hicolor/512x512/apps/syncthing.png", perm: 0644},
|
||||
{src: "assets/logo-only.svg", dst: "deb/usr/share/icons/hicolor/scalable/apps/syncthing.svg", perm: 0644},
|
||||
},
|
||||
},
|
||||
"stdiscosrv": {
|
||||
name: "stdiscosrv",
|
||||
debname: "syncthing-discosrv",
|
||||
debdeps: []string{"libc6"},
|
||||
debpre: "cmd/stdiscosrv/scripts/preinst",
|
||||
description: "Syncthing Discovery Server",
|
||||
buildPkg: "github.com/syncthing/syncthing/cmd/stdiscosrv",
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/stdiscosrv"},
|
||||
binaryName: "stdiscosrv", // .exe will be added automatically for Windows builds
|
||||
archiveFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "{{binary}}", perm: 0755},
|
||||
{src: "cmd/stdiscosrv/README.md", dst: "README.txt", perm: 0644},
|
||||
{src: "cmd/stdiscosrv/LICENSE", dst: "LICENSE.txt", perm: 0644},
|
||||
{src: "LICENSE", dst: "LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0644},
|
||||
},
|
||||
systemdServices: []string{
|
||||
"cmd/stdiscosrv/etc/linux-systemd/stdiscosrv.service",
|
||||
},
|
||||
installationFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0755},
|
||||
{src: "cmd/stdiscosrv/README.md", dst: "deb/usr/share/doc/syncthing-discosrv/README.txt", perm: 0644},
|
||||
{src: "cmd/stdiscosrv/LICENSE", dst: "deb/usr/share/doc/syncthing-discosrv/LICENSE.txt", perm: 0644},
|
||||
{src: "LICENSE", dst: "deb/usr/share/doc/syncthing-discosrv/LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "deb/usr/share/doc/syncthing-discosrv/AUTHORS.txt", perm: 0644},
|
||||
{src: "man/stdiscosrv.1", dst: "deb/usr/share/man/man1/stdiscosrv.1", perm: 0644},
|
||||
{src: "cmd/stdiscosrv/etc/linux-systemd/default", dst: "deb/etc/default/syncthing-discosrv", perm: 0644},
|
||||
{src: "cmd/stdiscosrv/etc/firewall-ufw/stdiscosrv", dst: "deb/etc/ufw/applications.d/stdiscosrv", perm: 0644},
|
||||
},
|
||||
tags: []string{"purego"},
|
||||
},
|
||||
@@ -136,21 +157,29 @@ var targets = map[string]target{
|
||||
name: "strelaysrv",
|
||||
debname: "syncthing-relaysrv",
|
||||
debdeps: []string{"libc6"},
|
||||
debpre: "cmd/strelaysrv/scripts/preinst",
|
||||
description: "Syncthing Relay Server",
|
||||
buildPkg: "github.com/syncthing/syncthing/cmd/strelaysrv",
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/strelaysrv"},
|
||||
binaryName: "strelaysrv", // .exe will be added automatically for Windows builds
|
||||
archiveFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "{{binary}}", perm: 0755},
|
||||
{src: "cmd/strelaysrv/README.md", dst: "README.txt", perm: 0644},
|
||||
{src: "cmd/strelaysrv/LICENSE", dst: "LICENSE.txt", perm: 0644},
|
||||
{src: "LICENSE", dst: "LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0644},
|
||||
},
|
||||
systemdServices: []string{
|
||||
"cmd/strelaysrv/etc/linux-systemd/strelaysrv.service",
|
||||
},
|
||||
installationFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0755},
|
||||
{src: "cmd/strelaysrv/README.md", dst: "deb/usr/share/doc/syncthing-relaysrv/README.txt", perm: 0644},
|
||||
{src: "cmd/strelaysrv/LICENSE", dst: "deb/usr/share/doc/syncthing-relaysrv/LICENSE.txt", perm: 0644},
|
||||
{src: "LICENSE", dst: "deb/usr/share/doc/syncthing-relaysrv/LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "deb/usr/share/doc/syncthing-relaysrv/AUTHORS.txt", perm: 0644},
|
||||
{src: "man/strelaysrv.1", dst: "deb/usr/share/man/man1/strelaysrv.1", perm: 0644},
|
||||
{src: "cmd/strelaysrv/etc/linux-systemd/default", dst: "deb/etc/default/syncthing-relaysrv", perm: 0644},
|
||||
{src: "cmd/strelaysrv/etc/firewall-ufw/strelaysrv", dst: "deb/etc/ufw/applications.d/strelaysrv", perm: 0644},
|
||||
},
|
||||
},
|
||||
"strelaypoolsrv": {
|
||||
@@ -158,7 +187,7 @@ var targets = map[string]target{
|
||||
debname: "syncthing-relaypoolsrv",
|
||||
debdeps: []string{"libc6"},
|
||||
description: "Syncthing Relay Pool Server",
|
||||
buildPkg: "github.com/syncthing/syncthing/cmd/strelaypoolsrv",
|
||||
buildPkgs: []string{"github.com/syncthing/syncthing/cmd/strelaypoolsrv"},
|
||||
binaryName: "strelaypoolsrv", // .exe will be added automatically for Windows builds
|
||||
archiveFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "{{binary}}", perm: 0755},
|
||||
@@ -175,7 +204,31 @@ var targets = map[string]target{
|
||||
},
|
||||
}
|
||||
|
||||
// These are repos we need to clone to run "go generate"
|
||||
|
||||
type dependencyRepo struct {
|
||||
path string
|
||||
repo string
|
||||
commit string
|
||||
}
|
||||
|
||||
var dependencyRepos = []dependencyRepo{
|
||||
{path: "xdr", repo: "https://github.com/calmh/xdr.git", commit: "08e072f9cb16"},
|
||||
}
|
||||
|
||||
func init() {
|
||||
all := targets["all"]
|
||||
pkgs, _ := filepath.Glob("cmd/*")
|
||||
for _, pkg := range pkgs {
|
||||
pkg = filepath.Base(pkg)
|
||||
if strings.HasPrefix(pkg, ".") {
|
||||
// ignore dotfiles
|
||||
continue
|
||||
}
|
||||
all.buildPkgs = append(all.buildPkgs, fmt.Sprintf("github.com/syncthing/syncthing/cmd/%s", pkg))
|
||||
}
|
||||
targets["all"] = all
|
||||
|
||||
// The "syncthing" target includes a few more files found in the "etc"
|
||||
// and "extra" dirs.
|
||||
syncthingPkg := targets["syncthing"]
|
||||
@@ -203,27 +256,6 @@ func main() {
|
||||
}()
|
||||
}
|
||||
|
||||
if gopath() == "" {
|
||||
gopath, err := temporaryBuildDir()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if !noBuildGopath {
|
||||
lazyRebuildAssets()
|
||||
if err := buildGOPATH(gopath); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
os.Setenv("GOPATH", gopath)
|
||||
log.Println("GOPATH is", gopath)
|
||||
}
|
||||
|
||||
// Set path to $GOPATH/bin:$PATH so that we can for sure find tools we
|
||||
// might have installed during "build.go setup".
|
||||
os.Setenv("PATH", fmt.Sprintf("%s%cbin%c%s", os.Getenv("GOPATH"), os.PathSeparator, os.PathListSeparator, os.Getenv("PATH")))
|
||||
|
||||
checkArchitecture()
|
||||
|
||||
// Invoking build.go with no parameters at all builds everything (incrementally),
|
||||
// which is what you want for maximum error checking during development.
|
||||
if flag.NArg() == 0 {
|
||||
@@ -245,20 +277,8 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
func checkArchitecture() {
|
||||
switch goarch {
|
||||
case "386", "amd64", "arm", "arm64", "ppc64", "ppc64le", "mips", "mipsle":
|
||||
break
|
||||
default:
|
||||
log.Printf("Unknown goarch %q; proceed with caution!", goarch)
|
||||
}
|
||||
}
|
||||
|
||||
func runCommand(cmd string, target target) {
|
||||
switch cmd {
|
||||
case "setup":
|
||||
setup()
|
||||
|
||||
case "install":
|
||||
var tags []string
|
||||
if noupgrade {
|
||||
@@ -306,9 +326,6 @@ func runCommand(cmd string, target target) {
|
||||
case "snap":
|
||||
buildSnap(target)
|
||||
|
||||
case "clean":
|
||||
clean()
|
||||
|
||||
case "vet":
|
||||
metalintShort()
|
||||
|
||||
@@ -321,13 +338,6 @@ func runCommand(cmd string, target target) {
|
||||
case "version":
|
||||
fmt.Println(getVersion())
|
||||
|
||||
case "gopath":
|
||||
gopath, err := temporaryBuildDir()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Println(gopath)
|
||||
|
||||
default:
|
||||
log.Fatalf("Unknown command %q", cmd)
|
||||
}
|
||||
@@ -336,61 +346,41 @@ func runCommand(cmd string, target target) {
|
||||
func parseFlags() {
|
||||
flag.StringVar(&goarch, "goarch", runtime.GOARCH, "GOARCH")
|
||||
flag.StringVar(&goos, "goos", runtime.GOOS, "GOOS")
|
||||
flag.StringVar(&goCmd, "gocmd", "go", "Specify `go` command")
|
||||
flag.BoolVar(&noupgrade, "no-upgrade", noupgrade, "Disable upgrade functionality")
|
||||
flag.StringVar(&version, "version", getVersion(), "Set compiled in version string")
|
||||
flag.BoolVar(&race, "race", race, "Use race detector")
|
||||
flag.BoolVar(&noBuildGopath, "no-build-gopath", noBuildGopath, "Don't build GOPATH, assume it's OK")
|
||||
flag.StringVar(&extraTags, "tags", extraTags, "Extra tags, space separated")
|
||||
flag.StringVar(&installSuffix, "installsuffix", installSuffix, "Install suffix, optional")
|
||||
flag.StringVar(&pkgdir, "pkgdir", "", "Set -pkgdir parameter for `go build`")
|
||||
flag.StringVar(&cc, "cc", os.Getenv("CC"), "Set CC environment variable for `go build`")
|
||||
flag.BoolVar(&debugBinary, "debug-binary", debugBinary, "Create unoptimized binary to use with delve, set -gcflags='-N -l' and omit -ldflags")
|
||||
flag.BoolVar(&coverage, "coverage", coverage, "Write coverage profile of tests to coverage.txt")
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
func setup() {
|
||||
packages := []string{
|
||||
"github.com/alecthomas/gometalinter",
|
||||
"github.com/AlekSi/gocov-xml",
|
||||
"github.com/axw/gocov/gocov",
|
||||
"github.com/FiloSottile/gvt",
|
||||
"github.com/golang/lint/golint",
|
||||
"github.com/gordonklaus/ineffassign",
|
||||
"github.com/mdempsky/unconvert",
|
||||
"github.com/mitchellh/go-wordwrap",
|
||||
"github.com/opennota/check/cmd/...",
|
||||
"github.com/tsenart/deadcode",
|
||||
"golang.org/x/net/html",
|
||||
"golang.org/x/tools/cmd/cover",
|
||||
"honnef.co/go/simple/cmd/gosimple",
|
||||
"honnef.co/go/staticcheck/cmd/staticcheck",
|
||||
"honnef.co/go/unused/cmd/unused",
|
||||
}
|
||||
for _, pkg := range packages {
|
||||
fmt.Println(pkg)
|
||||
runPrint("go", "get", "-u", pkg)
|
||||
}
|
||||
|
||||
runPrint("go", "install", "-v", "github.com/syncthing/syncthing/vendor/github.com/gogo/protobuf/protoc-gen-gogofast")
|
||||
}
|
||||
|
||||
func test(pkgs ...string) {
|
||||
lazyRebuildAssets()
|
||||
|
||||
useRace := runtime.GOARCH == "amd64"
|
||||
switch runtime.GOOS {
|
||||
case "darwin", "linux", "freebsd", "windows":
|
||||
default:
|
||||
useRace = false
|
||||
args := []string{"test", "-short", "-timeout", timeout, "-tags", "purego"}
|
||||
|
||||
if runtime.GOARCH == "amd64" {
|
||||
switch runtime.GOOS {
|
||||
case "darwin", "linux", "freebsd": // , "windows": # See https://github.com/golang/go/issues/27089
|
||||
args = append(args, "-race")
|
||||
}
|
||||
}
|
||||
|
||||
if useRace {
|
||||
runPrint("go", append([]string{"test", "-short", "-race", "-timeout", "60s", "-tags", "purego"}, pkgs...)...)
|
||||
} else {
|
||||
runPrint("go", append([]string{"test", "-short", "-timeout", "60s", "-tags", "purego"}, pkgs...)...)
|
||||
if coverage {
|
||||
args = append(args, "-covermode", "atomic", "-coverprofile", "coverage.txt", "-coverpkg", strings.Join(pkgs, ","))
|
||||
}
|
||||
|
||||
runPrint(goCmd, append(args, pkgs...)...)
|
||||
}
|
||||
|
||||
func bench(pkgs ...string) {
|
||||
lazyRebuildAssets()
|
||||
runPrint("go", append([]string{"test", "-run", "NONE", "-bench", "."}, pkgs...)...)
|
||||
runPrint(goCmd, append([]string{"test", "-run", "NONE", "-bench", "."}, pkgs...)...)
|
||||
}
|
||||
|
||||
func install(target target, tags []string) {
|
||||
@@ -403,30 +393,67 @@ func install(target target, tags []string) {
|
||||
log.Fatal(err)
|
||||
}
|
||||
os.Setenv("GOBIN", filepath.Join(cwd, "bin"))
|
||||
args := []string{"install", "-v", "-ldflags", ldflags()}
|
||||
if len(tags) > 0 {
|
||||
args = append(args, "-tags", strings.Join(tags, " "))
|
||||
}
|
||||
if installSuffix != "" {
|
||||
args = append(args, "-installsuffix", installSuffix)
|
||||
}
|
||||
if race {
|
||||
args = append(args, "-race")
|
||||
}
|
||||
args = append(args, target.buildPkg)
|
||||
|
||||
os.Setenv("GOOS", goos)
|
||||
os.Setenv("GOARCH", goarch)
|
||||
runPrint("go", args...)
|
||||
os.Setenv("CC", cc)
|
||||
|
||||
// On Windows generate a special file which the Go compiler will
|
||||
// automatically use when generating Windows binaries to set things like
|
||||
// the file icon, version, etc.
|
||||
if goos == "windows" {
|
||||
sysoPath, err := shouldBuildSyso(cwd)
|
||||
if err != nil {
|
||||
log.Printf("Warning: Windows binaries will not have file information encoded: %v", err)
|
||||
}
|
||||
defer shouldCleanupSyso(sysoPath)
|
||||
}
|
||||
|
||||
for _, pkg := range target.buildPkgs {
|
||||
args := []string{"install", "-v"}
|
||||
args = appendParameters(args, tags, pkg)
|
||||
|
||||
runPrint(goCmd, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func build(target target, tags []string) {
|
||||
lazyRebuildAssets()
|
||||
|
||||
tags = append(target.tags, tags...)
|
||||
|
||||
rmr(target.BinaryName())
|
||||
args := []string{"build", "-i", "-v", "-ldflags", ldflags()}
|
||||
|
||||
os.Setenv("GOOS", goos)
|
||||
os.Setenv("GOARCH", goarch)
|
||||
os.Setenv("CC", cc)
|
||||
|
||||
// On Windows generate a special file which the Go compiler will
|
||||
// automatically use when generating Windows binaries to set things like
|
||||
// the file icon, version, etc.
|
||||
if goos == "windows" {
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
sysoPath, err := shouldBuildSyso(cwd)
|
||||
if err != nil {
|
||||
log.Printf("Warning: Windows binaries will not have file information encoded: %v", err)
|
||||
}
|
||||
defer shouldCleanupSyso(sysoPath)
|
||||
}
|
||||
|
||||
for _, pkg := range target.buildPkgs {
|
||||
args := []string{"build", "-v"}
|
||||
args = appendParameters(args, tags, pkg)
|
||||
|
||||
runPrint(goCmd, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func appendParameters(args []string, tags []string, pkg string) []string {
|
||||
if pkgdir != "" {
|
||||
args = append(args, "-pkgdir", pkgdir)
|
||||
}
|
||||
if len(tags) > 0 {
|
||||
args = append(args, "-tags", strings.Join(tags, " "))
|
||||
}
|
||||
@@ -436,11 +463,19 @@ func build(target target, tags []string) {
|
||||
if race {
|
||||
args = append(args, "-race")
|
||||
}
|
||||
args = append(args, target.buildPkg)
|
||||
|
||||
os.Setenv("GOOS", goos)
|
||||
os.Setenv("GOARCH", goarch)
|
||||
runPrint("go", args...)
|
||||
if !debugBinary {
|
||||
// Regular binaries get version tagged and skip some debug symbols
|
||||
args = append(args, "-ldflags", ldflags(path.Base(pkg)))
|
||||
} else {
|
||||
// -gcflags to disable optimizations and inlining. Skip -ldflags
|
||||
// because `Could not launch program: decoding dwarf section info at
|
||||
// offset 0x0: too short` on 'dlv exec ...' see
|
||||
// https://github.com/derekparker/delve/issues/79
|
||||
args = append(args, "-gcflags", "-N -l")
|
||||
}
|
||||
|
||||
return append(args, pkg)
|
||||
}
|
||||
|
||||
func buildTar(target target) {
|
||||
@@ -481,6 +516,10 @@ func buildZip(target target) {
|
||||
|
||||
build(target, tags)
|
||||
|
||||
if goos == "windows" {
|
||||
windowsCodesign(target.BinaryName())
|
||||
}
|
||||
|
||||
for i := range target.archiveFiles {
|
||||
target.archiveFiles[i].src = strings.Replace(target.archiveFiles[i].src, "{{binary}}", target.BinaryName(), 1)
|
||||
target.archiveFiles[i].dst = strings.Replace(target.archiveFiles[i].dst, "{{binary}}", target.BinaryName(), 1)
|
||||
@@ -543,9 +582,15 @@ func buildDeb(target target) {
|
||||
for _, dep := range target.debdeps {
|
||||
args = append(args, "-d", dep)
|
||||
}
|
||||
for _, service := range target.systemdServices {
|
||||
args = append(args, "--deb-systemd", service)
|
||||
}
|
||||
if target.debpost != "" {
|
||||
args = append(args, "--after-upgrade", target.debpost)
|
||||
}
|
||||
if target.debpre != "" {
|
||||
args = append(args, "--before-install", target.debpre)
|
||||
}
|
||||
runPrint("fpm", args...)
|
||||
}
|
||||
|
||||
@@ -565,6 +610,8 @@ func buildSnap(target target) {
|
||||
snaparch := goarch
|
||||
if snaparch == "armhf" {
|
||||
goarch = "arm"
|
||||
} else if snaparch == "i386" {
|
||||
goarch = "386"
|
||||
}
|
||||
snapver := version
|
||||
if strings.HasPrefix(snapver, "v") {
|
||||
@@ -575,9 +622,10 @@ func buildSnap(target target) {
|
||||
snapgrade = "stable"
|
||||
}
|
||||
err = tmpl.Execute(f, map[string]string{
|
||||
"Version": snapver,
|
||||
"Architecture": snaparch,
|
||||
"Grade": snapgrade,
|
||||
"Version": snapver,
|
||||
"HostArchitecture": runtime.GOARCH,
|
||||
"TargetArchitecture": snaparch,
|
||||
"Grade": snapgrade,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
@@ -587,6 +635,56 @@ func buildSnap(target target) {
|
||||
runPrint("snapcraft")
|
||||
}
|
||||
|
||||
func shouldBuildSyso(dir string) (string, error) {
|
||||
jsonPath := filepath.Join(dir, "versioninfo.json")
|
||||
file, err := os.Create(filepath.Join(dir, "versioninfo.json"))
|
||||
if err != nil {
|
||||
return "", errors.New("failed to create " + jsonPath + ": " + err.Error())
|
||||
}
|
||||
|
||||
major, minor, patch, build := semanticVersion()
|
||||
fmt.Fprintf(file, `{
|
||||
"FixedFileInfo": {
|
||||
"FileVersion": {
|
||||
"Major": %s,
|
||||
"Minor": %s,
|
||||
"Patch": %s,
|
||||
"Build": %s
|
||||
}
|
||||
},
|
||||
"StringFileInfo": {
|
||||
"FileDescription": "Open Source Continuous File Synchronization",
|
||||
"LegalCopyright": "The Syncthing Authors",
|
||||
"ProductVersion": "%s",
|
||||
"ProductName": "Syncthing"
|
||||
},
|
||||
"IconPath": "assets/logo.ico"
|
||||
}`, major, minor, patch, build, getVersion())
|
||||
file.Close()
|
||||
defer func() {
|
||||
if err := os.Remove(jsonPath); err != nil {
|
||||
log.Printf("Warning: unable to remove generated %s: %v. Please remove it manually.", jsonPath, err)
|
||||
}
|
||||
}()
|
||||
|
||||
sysoPath := filepath.Join(dir, "cmd", "syncthing", "resource.syso")
|
||||
|
||||
if _, err := runError("goversioninfo", "-o", sysoPath); err != nil {
|
||||
return "", errors.New("failed to create " + sysoPath + ": " + err.Error())
|
||||
}
|
||||
|
||||
return sysoPath, nil
|
||||
}
|
||||
|
||||
func shouldCleanupSyso(sysoFilePath string) {
|
||||
if sysoFilePath == "" {
|
||||
return
|
||||
}
|
||||
if err := os.Remove(sysoFilePath); err != nil {
|
||||
log.Printf("Warning: unable to remove generated %s: %v. Please remove it manually.", sysoFilePath, err)
|
||||
}
|
||||
}
|
||||
|
||||
// copyFile copies a file from src to dst, ensuring the containing directory
|
||||
// exists. The permission bits are copied as well. If dst already exists and
|
||||
// the contents are identical to src the modification time is not updated.
|
||||
@@ -625,6 +723,7 @@ func listFiles(dir string) []string {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if fi.Mode().IsRegular() {
|
||||
res = append(res, path)
|
||||
}
|
||||
@@ -634,12 +733,12 @@ func listFiles(dir string) []string {
|
||||
}
|
||||
|
||||
func rebuildAssets() {
|
||||
runPipe("lib/auto/gui.files.go", "go", "run", "script/genassets.go", "gui")
|
||||
runPipe("cmd/strelaypoolsrv/auto/gui.go", "go", "run", "script/genassets.go", "cmd/strelaypoolsrv/gui")
|
||||
os.Setenv("SOURCE_DATE_EPOCH", fmt.Sprint(buildStamp()))
|
||||
runPrint(goCmd, "generate", "github.com/syncthing/syncthing/lib/auto", "github.com/syncthing/syncthing/cmd/strelaypoolsrv/auto")
|
||||
}
|
||||
|
||||
func lazyRebuildAssets() {
|
||||
if shouldRebuildAssets("lib/auto/gui.files.go", "gui") || shouldRebuildAssets("cmd/strelaypoolsrv/auto/gui.go", "cmd/strelaypoolsrv/auto/gui") {
|
||||
if shouldRebuildAssets("lib/auto/gui.files.go", "gui") || shouldRebuildAssets("cmd/strelaypoolsrv/auto/gui.files.go", "cmd/strelaypoolsrv/auto/gui") {
|
||||
rebuildAssets()
|
||||
}
|
||||
}
|
||||
@@ -671,12 +770,28 @@ func shouldRebuildAssets(target, srcdir string) bool {
|
||||
}
|
||||
|
||||
func proto() {
|
||||
runPrint("go", "generate", "github.com/syncthing/syncthing/lib/...")
|
||||
pv := protobufVersion()
|
||||
dependencyRepos = append(dependencyRepos,
|
||||
dependencyRepo{path: "protobuf", repo: "https://github.com/gogo/protobuf.git", commit: pv},
|
||||
)
|
||||
|
||||
runPrint(goCmd, "get", fmt.Sprintf("github.com/gogo/protobuf/protoc-gen-gogofast@%v", pv))
|
||||
os.MkdirAll("repos", 0755)
|
||||
for _, dep := range dependencyRepos {
|
||||
path := filepath.Join("repos", dep.path)
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
runPrintInDir("repos", "git", "clone", dep.repo, dep.path)
|
||||
} else {
|
||||
runPrintInDir(path, "git", "fetch")
|
||||
}
|
||||
runPrintInDir(path, "git", "checkout", dep.commit)
|
||||
}
|
||||
runPrint(goCmd, "generate", "github.com/syncthing/syncthing/lib/...", "github.com/syncthing/syncthing/cmd/stdiscosrv")
|
||||
}
|
||||
|
||||
func translate() {
|
||||
os.Chdir("gui/default/assets/lang")
|
||||
runPipe("lang-en-new.json", "go", "run", "../../../../script/translate.go", "lang-en.json", "../../../")
|
||||
runPipe("lang-en-new.json", goCmd, "run", "../../../../script/translate.go", "lang-en.json", "../../../")
|
||||
os.Remove("lang-en.json")
|
||||
err := os.Rename("lang-en-new.json", "lang-en.json")
|
||||
if err != nil {
|
||||
@@ -687,15 +802,10 @@ func translate() {
|
||||
|
||||
func transifex() {
|
||||
os.Chdir("gui/default/assets/lang")
|
||||
runPrint("go", "run", "../../../../script/transifexdl.go")
|
||||
runPrint(goCmd, "run", "../../../../script/transifexdl.go")
|
||||
}
|
||||
|
||||
func clean() {
|
||||
rmr("bin")
|
||||
rmr(filepath.Join(os.Getenv("GOPATH"), fmt.Sprintf("pkg/%s_%s/github.com/syncthing", goos, goarch)))
|
||||
}
|
||||
|
||||
func ldflags() string {
|
||||
func ldflags(program string) string {
|
||||
sep := '='
|
||||
if goVersion > 0 && goVersion < 1.5 {
|
||||
sep = ' '
|
||||
@@ -703,10 +813,14 @@ func ldflags() string {
|
||||
|
||||
b := new(bytes.Buffer)
|
||||
b.WriteString("-w")
|
||||
fmt.Fprintf(b, " -X main.Version%c%s", sep, version)
|
||||
fmt.Fprintf(b, " -X main.BuildStamp%c%d", sep, buildStamp())
|
||||
fmt.Fprintf(b, " -X main.BuildUser%c%s", sep, buildUser())
|
||||
fmt.Fprintf(b, " -X main.BuildHost%c%s", sep, buildHost())
|
||||
fmt.Fprintf(b, " -X github.com/syncthing/syncthing/lib/build.Version%c%s", sep, version)
|
||||
fmt.Fprintf(b, " -X github.com/syncthing/syncthing/lib/build.Stamp%c%d", sep, buildStamp())
|
||||
fmt.Fprintf(b, " -X github.com/syncthing/syncthing/lib/build.User%c%s", sep, buildUser())
|
||||
fmt.Fprintf(b, " -X github.com/syncthing/syncthing/lib/build.Host%c%s", sep, buildHost())
|
||||
fmt.Fprintf(b, " -X github.com/syncthing/syncthing/lib/build.Program%c%s", sep, program)
|
||||
if v := os.Getenv("EXTRA_LDFLAGS"); v != "" {
|
||||
fmt.Fprintf(b, " %s", v)
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
@@ -763,6 +877,15 @@ func getVersion() string {
|
||||
return "unknown-dev"
|
||||
}
|
||||
|
||||
func semanticVersion() (major, minor, patch, build string) {
|
||||
r := regexp.MustCompile(`v(?P<Major>\d+)\.(?P<Minor>\d+).(?P<Patch>\d+).*\+(?P<CommitsAhead>\d+)`)
|
||||
matches := r.FindStringSubmatch(getVersion())
|
||||
if len(matches) != 5 {
|
||||
return "0", "0", "0", "0"
|
||||
}
|
||||
return matches[1], matches[2], matches[3], matches[4]
|
||||
}
|
||||
|
||||
func getBranchSuffix() string {
|
||||
bs, err := runError("git", "branch", "-a", "--contains")
|
||||
if err != nil {
|
||||
@@ -857,7 +980,7 @@ func buildHost() string {
|
||||
func buildArch() string {
|
||||
os := goos
|
||||
if os == "darwin" {
|
||||
os = "macosx"
|
||||
os = "macos"
|
||||
}
|
||||
return fmt.Sprintf("%s-%s", os, goarch)
|
||||
}
|
||||
@@ -880,6 +1003,10 @@ func runError(cmd string, args ...string) ([]byte, error) {
|
||||
}
|
||||
|
||||
func runPrint(cmd string, args ...string) {
|
||||
runPrintInDir(".", cmd, args...)
|
||||
}
|
||||
|
||||
func runPrintInDir(dir string, cmd string, args ...string) {
|
||||
if debug {
|
||||
t0 := time.Now()
|
||||
log.Println("runPrint:", cmd, strings.Join(args, " "))
|
||||
@@ -890,6 +1017,7 @@ func runPrint(cmd string, args ...string) {
|
||||
ecmd := exec.Command(cmd, args...)
|
||||
ecmd.Stdout = os.Stdout
|
||||
ecmd.Stderr = os.Stderr
|
||||
ecmd.Dir = dir
|
||||
err := ecmd.Run()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
@@ -1070,14 +1198,51 @@ func macosCodesign(file string) {
|
||||
}
|
||||
}
|
||||
|
||||
func windowsCodesign(file string) {
|
||||
st := "signtool.exe"
|
||||
|
||||
if path := os.Getenv("CODESIGN_SIGNTOOL"); path != "" {
|
||||
st = path
|
||||
}
|
||||
|
||||
for i, algo := range []string{"sha1", "sha256"} {
|
||||
args := []string{"sign", "/fd", algo}
|
||||
if f := os.Getenv("CODESIGN_CERTIFICATE_FILE"); f != "" {
|
||||
args = append(args, "/f", f)
|
||||
}
|
||||
if p := os.Getenv("CODESIGN_CERTIFICATE_PASSWORD"); p != "" {
|
||||
args = append(args, "/p", p)
|
||||
}
|
||||
if tr := os.Getenv("CODESIGN_TIMESTAMP_SERVER"); tr != "" {
|
||||
switch algo {
|
||||
case "sha256":
|
||||
args = append(args, "/tr", tr, "/td", algo)
|
||||
default:
|
||||
args = append(args, "/t", tr)
|
||||
}
|
||||
}
|
||||
if i > 0 {
|
||||
args = append(args, "/as")
|
||||
}
|
||||
args = append(args, file)
|
||||
|
||||
bs, err := runError(st, args...)
|
||||
if err != nil {
|
||||
log.Println("Codesign: signing failed:", string(bs))
|
||||
return
|
||||
}
|
||||
log.Println("Codesign: successfully signed", file, "using", algo)
|
||||
}
|
||||
}
|
||||
|
||||
func metalint() {
|
||||
lazyRebuildAssets()
|
||||
runPrint("go", "test", "-run", "Metalint", "./meta")
|
||||
runPrint(goCmd, "test", "-run", "Metalint", "./meta")
|
||||
}
|
||||
|
||||
func metalintShort() {
|
||||
lazyRebuildAssets()
|
||||
runPrint("go", "test", "-short", "-run", "Metalint", "./meta")
|
||||
runPrint(goCmd, "test", "-short", "-run", "Metalint", "./meta")
|
||||
}
|
||||
|
||||
func temporaryBuildDir() (string, error) {
|
||||
@@ -1106,93 +1271,17 @@ func temporaryBuildDir() (string, error) {
|
||||
return filepath.Join(tmpDir, base), nil
|
||||
}
|
||||
|
||||
func buildGOPATH(gopath string) error {
|
||||
pkg := filepath.Join(gopath, "src/github.com/syncthing/syncthing")
|
||||
dirs := []string{"cmd", "lib", "meta", "script", "test", "vendor"}
|
||||
|
||||
if debug {
|
||||
t0 := time.Now()
|
||||
log.Println("build temporary GOPATH in", gopath)
|
||||
defer func() {
|
||||
log.Println("... in", time.Since(t0))
|
||||
}()
|
||||
}
|
||||
|
||||
// Walk the sources and copy the files into the temporary GOPATH.
|
||||
// Remember which files are supposed to be present so we can clean
|
||||
// out everything else in the next step. The copyFile() step will
|
||||
// only actually copy the file if it doesn't exist or the contents
|
||||
// differ.
|
||||
|
||||
exists := map[string]struct{}{}
|
||||
for _, dir := range dirs {
|
||||
err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
dst := filepath.Join(pkg, path)
|
||||
exists[dst] = struct{}{}
|
||||
|
||||
if err := copyFile(path, dst, info.Mode()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Walk the temporary GOPATH and remove any files that we wouldn't
|
||||
// have copied there in the previous step.
|
||||
|
||||
filepath.Walk(pkg, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
if _, ok := exists[path]; !ok {
|
||||
os.Remove(path)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func gopath() string {
|
||||
if gopath := os.Getenv("GOPATH"); gopath != "" {
|
||||
// The env var is set, use that.
|
||||
return gopath
|
||||
}
|
||||
|
||||
// Ask Go what it thinks.
|
||||
bs, err := runError("go", "env", "GOPATH")
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
// We got something. Check if we are in fact available in that location.
|
||||
gopath := string(bs)
|
||||
if _, err := os.Stat(filepath.Join(gopath, "src/github.com/syncthing/syncthing/build.go")); err == nil {
|
||||
// That seems to be the gopath.
|
||||
return gopath
|
||||
}
|
||||
|
||||
// The gopath is not valid.
|
||||
return ""
|
||||
}
|
||||
|
||||
func (t target) BinaryName() string {
|
||||
if goos == "windows" {
|
||||
return t.binaryName + ".exe"
|
||||
}
|
||||
return t.binaryName
|
||||
}
|
||||
|
||||
func protobufVersion() string {
|
||||
bs, err := runError(goCmd, "list", "-f", "{{.Version}}", "-m", "github.com/gogo/protobuf")
|
||||
if err != nil {
|
||||
log.Fatal("Getting protobuf version:", err)
|
||||
}
|
||||
return string(bs)
|
||||
}
|
||||
|
||||
10
build.sh
10
build.sh
@@ -48,9 +48,6 @@ case "${1:-default}" in
|
||||
;;
|
||||
|
||||
test)
|
||||
ulimit -t 600 &>/dev/null || true
|
||||
ulimit -d 512000 &>/dev/null || true
|
||||
ulimit -m 512000 &>/dev/null || true
|
||||
LOGGER_DISCARD=1 build test
|
||||
;;
|
||||
|
||||
@@ -62,8 +59,8 @@ case "${1:-default}" in
|
||||
go run script/authors.go
|
||||
build transifex
|
||||
pushd man ; ./refresh.sh ; popd
|
||||
git add -A gui man
|
||||
git commit -m 'gui, man: Update docs & translations'
|
||||
git add -A gui man AUTHORS
|
||||
git commit -m 'gui, man, authors: Update docs, translations, and contributors'
|
||||
;;
|
||||
|
||||
noupgrade)
|
||||
@@ -94,9 +91,6 @@ case "${1:-default}" in
|
||||
;;
|
||||
|
||||
test-xunit)
|
||||
ulimit -t 600 &>/dev/null || true
|
||||
ulimit -d 512000 &>/dev/null || true
|
||||
ulimit -m 512000 &>/dev/null || true
|
||||
|
||||
(GOPATH="$(pwd)/Godeps/_workspace:$GOPATH" go test -v -race ./lib/... ./cmd/... || true) > tests.out
|
||||
go2xunit -output tests.xml -fail < tests.out
|
||||
|
||||
@@ -1,143 +0,0 @@
|
||||
// Copyright (C) 2016 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
// This doesn't build on Windows due to the Rusage stuff.
|
||||
|
||||
// +build !windows
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/rc"
|
||||
)
|
||||
|
||||
var homeDir = "h1"
|
||||
var syncthingBin = "./bin/syncthing"
|
||||
var test = "scan"
|
||||
|
||||
func main() {
|
||||
flag.StringVar(&homeDir, "home", homeDir, "Home directory location")
|
||||
flag.StringVar(&syncthingBin, "bin", syncthingBin, "Binary location")
|
||||
flag.StringVar(&test, "test", test, "Test to run")
|
||||
flag.Parse()
|
||||
|
||||
switch test {
|
||||
case "scan":
|
||||
// scan measures the resource usage required to perform the initial
|
||||
// scan, without cleaning away the database first.
|
||||
testScan()
|
||||
}
|
||||
}
|
||||
|
||||
// testScan starts a process and reports on the resource usage required to
|
||||
// perform the initial scan.
|
||||
func testScan() {
|
||||
log.Println("Starting...")
|
||||
p := rc.NewProcess("127.0.0.1:8081")
|
||||
if err := p.Start(syncthingBin, "-home", homeDir, "-no-browser"); err != nil {
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
defer p.Stop()
|
||||
|
||||
wallTime := awaitScanComplete(p)
|
||||
|
||||
report(p, wallTime)
|
||||
}
|
||||
|
||||
// awaitScanComplete waits for a folder to transition idle->scanning and
|
||||
// then scanning->idle and returns the time taken for the scan.
|
||||
func awaitScanComplete(p *rc.Process) time.Duration {
|
||||
log.Println("Awaiting scan completion...")
|
||||
var t0, t1 time.Time
|
||||
lastEvent := 0
|
||||
loop:
|
||||
for {
|
||||
evs, err := p.Events(lastEvent)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, ev := range evs {
|
||||
if ev.Type == "StateChanged" {
|
||||
data := ev.Data.(map[string]interface{})
|
||||
log.Println(ev)
|
||||
|
||||
if data["to"].(string) == "scanning" {
|
||||
t0 = ev.Time
|
||||
continue
|
||||
}
|
||||
|
||||
if !t0.IsZero() && data["to"].(string) == "idle" {
|
||||
t1 = ev.Time
|
||||
break loop
|
||||
}
|
||||
}
|
||||
lastEvent = ev.ID
|
||||
}
|
||||
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
}
|
||||
|
||||
return t1.Sub(t0)
|
||||
}
|
||||
|
||||
// report stops the given process and reports on it's resource usage in two
|
||||
// ways: human readable to stderr, and CSV to stdout.
|
||||
func report(p *rc.Process, wallTime time.Duration) {
|
||||
sv, err := p.SystemVersion()
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
ss, err := p.SystemStatus()
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
proc, err := p.Stop()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
rusage, ok := proc.SysUsage().(*syscall.Rusage)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
log.Println("Version:", sv.Version)
|
||||
log.Println("Alloc:", ss.Alloc/1024, "KiB")
|
||||
log.Println("Sys:", ss.Sys/1024, "KiB")
|
||||
log.Println("Goroutines:", ss.Goroutines)
|
||||
log.Println("Wall time:", wallTime)
|
||||
log.Println("Utime:", time.Duration(rusage.Utime.Nano()))
|
||||
log.Println("Stime:", time.Duration(rusage.Stime.Nano()))
|
||||
if runtime.GOOS == "darwin" {
|
||||
// Darwin reports in bytes, Linux seems to report in KiB even
|
||||
// though the manpage says otherwise.
|
||||
rusage.Maxrss /= 1024
|
||||
}
|
||||
log.Println("MaxRSS:", rusage.Maxrss, "KiB")
|
||||
|
||||
fmt.Printf("%s,%d,%d,%d,%.02f,%.02f,%.02f,%d\n",
|
||||
sv.Version,
|
||||
ss.Alloc/1024,
|
||||
ss.Sys/1024,
|
||||
ss.Goroutines,
|
||||
wallTime.Seconds(),
|
||||
time.Duration(rusage.Utime.Nano()).Seconds(),
|
||||
time.Duration(rusage.Stime.Nano()).Seconds(),
|
||||
rusage.Maxrss)
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
- The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
@@ -1,115 +1,95 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
// Copyright (C) 2019 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/AudriusButkevicius/cli"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
)
|
||||
|
||||
type APIClient struct {
|
||||
httpClient http.Client
|
||||
endpoint string
|
||||
apikey string
|
||||
username string
|
||||
password string
|
||||
id string
|
||||
csrf string
|
||||
http.Client
|
||||
cfg config.GUIConfiguration
|
||||
apikey string
|
||||
}
|
||||
|
||||
var instance *APIClient
|
||||
|
||||
func getClient(c *cli.Context) *APIClient {
|
||||
if instance != nil {
|
||||
return instance
|
||||
}
|
||||
endpoint := c.GlobalString("endpoint")
|
||||
if !strings.HasPrefix(endpoint, "http") {
|
||||
endpoint = "http://" + endpoint
|
||||
}
|
||||
func getClient(cfg config.GUIConfiguration) *APIClient {
|
||||
httpClient := http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: c.GlobalBool("insecure"),
|
||||
InsecureSkipVerify: true,
|
||||
},
|
||||
DialContext: func(_ context.Context, _, _ string) (net.Conn, error) {
|
||||
return net.Dial(cfg.Network(), cfg.Address())
|
||||
},
|
||||
},
|
||||
}
|
||||
client := APIClient{
|
||||
httpClient: httpClient,
|
||||
endpoint: endpoint,
|
||||
apikey: c.GlobalString("apikey"),
|
||||
username: c.GlobalString("username"),
|
||||
password: c.GlobalString("password"),
|
||||
return &APIClient{
|
||||
Client: httpClient,
|
||||
cfg: cfg,
|
||||
apikey: cfg.APIKey,
|
||||
}
|
||||
|
||||
if client.apikey == "" {
|
||||
request, err := http.NewRequest("GET", client.endpoint, nil)
|
||||
die(err)
|
||||
response := client.handleRequest(request)
|
||||
client.id = response.Header.Get("X-Syncthing-ID")
|
||||
if client.id == "" {
|
||||
die("Failed to get device ID")
|
||||
}
|
||||
for _, item := range response.Cookies() {
|
||||
if item.Name == "CSRF-Token-"+client.id[:5] {
|
||||
client.csrf = item.Value
|
||||
goto csrffound
|
||||
}
|
||||
}
|
||||
die("Failed to get CSRF token")
|
||||
csrffound:
|
||||
}
|
||||
instance = &client
|
||||
return &client
|
||||
}
|
||||
|
||||
func (client *APIClient) handleRequest(request *http.Request) *http.Response {
|
||||
if client.apikey != "" {
|
||||
request.Header.Set("X-API-Key", client.apikey)
|
||||
func (c *APIClient) Endpoint() string {
|
||||
if c.cfg.Network() == "unix" {
|
||||
return "http://unix/"
|
||||
}
|
||||
if client.username != "" || client.password != "" {
|
||||
request.SetBasicAuth(client.username, client.password)
|
||||
}
|
||||
if client.csrf != "" {
|
||||
request.Header.Set("X-CSRF-Token-"+client.id[:5], client.csrf)
|
||||
url := c.cfg.URL()
|
||||
if !strings.HasSuffix(url, "/") {
|
||||
url += "/"
|
||||
}
|
||||
return url
|
||||
}
|
||||
|
||||
response, err := client.httpClient.Do(request)
|
||||
die(err)
|
||||
func (c *APIClient) Do(req *http.Request) (*http.Response, error) {
|
||||
req.Header.Set("X-API-Key", c.apikey)
|
||||
resp, err := c.Client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, checkResponse(resp)
|
||||
}
|
||||
|
||||
func (c *APIClient) Get(url string) (*http.Response, error) {
|
||||
request, err := http.NewRequest("GET", c.Endpoint()+"rest/"+url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.Do(request)
|
||||
}
|
||||
|
||||
func (c *APIClient) Post(url, body string) (*http.Response, error) {
|
||||
request, err := http.NewRequest("POST", c.Endpoint()+"rest/"+url, bytes.NewBufferString(body))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.Do(request)
|
||||
}
|
||||
|
||||
func checkResponse(response *http.Response) error {
|
||||
if response.StatusCode == 404 {
|
||||
die("Invalid endpoint or API call")
|
||||
} else if response.StatusCode == 401 {
|
||||
die("Invalid username or password")
|
||||
return fmt.Errorf("Invalid endpoint or API call")
|
||||
} else if response.StatusCode == 403 {
|
||||
if client.apikey == "" {
|
||||
die("Invalid CSRF token")
|
||||
}
|
||||
die("Invalid API key")
|
||||
return fmt.Errorf("Invalid API key")
|
||||
} else if response.StatusCode != 200 {
|
||||
body := strings.TrimSpace(string(responseToBArray(response)))
|
||||
if body != "" {
|
||||
die(body)
|
||||
data, err := responseToBArray(response)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
die("Unknown HTTP status returned: " + response.Status)
|
||||
body := strings.TrimSpace(string(data))
|
||||
return fmt.Errorf("Unexpected HTTP status returned: %s\n%s", response.Status, body)
|
||||
}
|
||||
return response
|
||||
}
|
||||
|
||||
func httpGet(c *cli.Context, url string) *http.Response {
|
||||
client := getClient(c)
|
||||
request, err := http.NewRequest("GET", client.endpoint+"/rest/"+url, nil)
|
||||
die(err)
|
||||
return client.handleRequest(request)
|
||||
}
|
||||
|
||||
func httpPost(c *cli.Context, url string, body string) *http.Response {
|
||||
client := getClient(c)
|
||||
request, err := http.NewRequest("POST", client.endpoint+"/rest/"+url, bytes.NewBufferString(body))
|
||||
die(err)
|
||||
return client.handleRequest(request)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,188 +0,0 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/AudriusButkevicius/cli"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cliCommands = append(cliCommands, cli.Command{
|
||||
Name: "devices",
|
||||
HideHelp: true,
|
||||
Usage: "Device command group",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "list",
|
||||
Usage: "List registered devices",
|
||||
Requires: &cli.Requires{},
|
||||
Action: devicesList,
|
||||
},
|
||||
{
|
||||
Name: "add",
|
||||
Usage: "Add a new device",
|
||||
Requires: &cli.Requires{"device id", "device name?"},
|
||||
Action: devicesAdd,
|
||||
},
|
||||
{
|
||||
Name: "remove",
|
||||
Usage: "Remove an existing device",
|
||||
Requires: &cli.Requires{"device id"},
|
||||
Action: devicesRemove,
|
||||
},
|
||||
{
|
||||
Name: "get",
|
||||
Usage: "Get a property of a device",
|
||||
Requires: &cli.Requires{"device id", "property"},
|
||||
Action: devicesGet,
|
||||
},
|
||||
{
|
||||
Name: "set",
|
||||
Usage: "Set a property of a device",
|
||||
Requires: &cli.Requires{"device id", "property", "value..."},
|
||||
Action: devicesSet,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func devicesList(c *cli.Context) {
|
||||
cfg := getConfig(c)
|
||||
first := true
|
||||
writer := newTableWriter()
|
||||
for _, device := range cfg.Devices {
|
||||
if !first {
|
||||
fmt.Fprintln(writer)
|
||||
}
|
||||
fmt.Fprintln(writer, "ID:\t", device.DeviceID, "\t")
|
||||
fmt.Fprintln(writer, "Name:\t", device.Name, "\t(name)")
|
||||
fmt.Fprintln(writer, "Address:\t", strings.Join(device.Addresses, " "), "\t(address)")
|
||||
fmt.Fprintln(writer, "Compression:\t", device.Compression, "\t(compression)")
|
||||
fmt.Fprintln(writer, "Certificate name:\t", device.CertName, "\t(certname)")
|
||||
fmt.Fprintln(writer, "Introducer:\t", device.Introducer, "\t(introducer)")
|
||||
first = false
|
||||
}
|
||||
writer.Flush()
|
||||
}
|
||||
|
||||
func devicesAdd(c *cli.Context) {
|
||||
nid := c.Args()[0]
|
||||
id := parseDeviceID(nid)
|
||||
|
||||
newDevice := config.DeviceConfiguration{
|
||||
DeviceID: id,
|
||||
Name: nid,
|
||||
Addresses: []string{"dynamic"},
|
||||
}
|
||||
|
||||
if len(c.Args()) > 1 {
|
||||
newDevice.Name = c.Args()[1]
|
||||
}
|
||||
|
||||
if len(c.Args()) > 2 {
|
||||
addresses := c.Args()[2:]
|
||||
for _, item := range addresses {
|
||||
if item == "dynamic" {
|
||||
continue
|
||||
}
|
||||
validAddress(item)
|
||||
}
|
||||
newDevice.Addresses = addresses
|
||||
}
|
||||
|
||||
cfg := getConfig(c)
|
||||
for _, device := range cfg.Devices {
|
||||
if device.DeviceID == id {
|
||||
die("Device " + nid + " already exists")
|
||||
}
|
||||
}
|
||||
cfg.Devices = append(cfg.Devices, newDevice)
|
||||
setConfig(c, cfg)
|
||||
}
|
||||
|
||||
func devicesRemove(c *cli.Context) {
|
||||
nid := c.Args()[0]
|
||||
id := parseDeviceID(nid)
|
||||
if nid == getMyID(c) {
|
||||
die("Cannot remove yourself")
|
||||
}
|
||||
cfg := getConfig(c)
|
||||
for i, device := range cfg.Devices {
|
||||
if device.DeviceID == id {
|
||||
last := len(cfg.Devices) - 1
|
||||
cfg.Devices[i] = cfg.Devices[last]
|
||||
cfg.Devices = cfg.Devices[:last]
|
||||
setConfig(c, cfg)
|
||||
return
|
||||
}
|
||||
}
|
||||
die("Device " + nid + " not found")
|
||||
}
|
||||
|
||||
func devicesGet(c *cli.Context) {
|
||||
nid := c.Args()[0]
|
||||
id := parseDeviceID(nid)
|
||||
arg := c.Args()[1]
|
||||
cfg := getConfig(c)
|
||||
for _, device := range cfg.Devices {
|
||||
if device.DeviceID != id {
|
||||
continue
|
||||
}
|
||||
switch strings.ToLower(arg) {
|
||||
case "name":
|
||||
fmt.Println(device.Name)
|
||||
case "address":
|
||||
fmt.Println(strings.Join(device.Addresses, "\n"))
|
||||
case "compression":
|
||||
fmt.Println(device.Compression.String())
|
||||
case "certname":
|
||||
fmt.Println(device.CertName)
|
||||
case "introducer":
|
||||
fmt.Println(device.Introducer)
|
||||
default:
|
||||
die("Invalid property: " + arg + "\nAvailable properties: name, address, compression, certname, introducer")
|
||||
}
|
||||
return
|
||||
}
|
||||
die("Device " + nid + " not found")
|
||||
}
|
||||
|
||||
func devicesSet(c *cli.Context) {
|
||||
nid := c.Args()[0]
|
||||
id := parseDeviceID(nid)
|
||||
arg := c.Args()[1]
|
||||
config := getConfig(c)
|
||||
for i, device := range config.Devices {
|
||||
if device.DeviceID != id {
|
||||
continue
|
||||
}
|
||||
switch strings.ToLower(arg) {
|
||||
case "name":
|
||||
config.Devices[i].Name = strings.Join(c.Args()[2:], " ")
|
||||
case "address":
|
||||
for _, item := range c.Args()[2:] {
|
||||
if item == "dynamic" {
|
||||
continue
|
||||
}
|
||||
validAddress(item)
|
||||
}
|
||||
config.Devices[i].Addresses = c.Args()[2:]
|
||||
case "compression":
|
||||
err := config.Devices[i].Compression.UnmarshalText([]byte(c.Args()[2]))
|
||||
die(err)
|
||||
case "certname":
|
||||
config.Devices[i].CertName = strings.Join(c.Args()[2:], " ")
|
||||
case "introducer":
|
||||
config.Devices[i].Introducer = parseBool(c.Args()[2])
|
||||
default:
|
||||
die("Invalid property: " + arg + "\nAvailable properties: name, address, compression, certname, introducer")
|
||||
}
|
||||
setConfig(c, config)
|
||||
return
|
||||
}
|
||||
die("Device " + nid + " not found")
|
||||
}
|
||||
@@ -1,67 +0,0 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/AudriusButkevicius/cli"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cliCommands = append(cliCommands, cli.Command{
|
||||
Name: "errors",
|
||||
HideHelp: true,
|
||||
Usage: "Error command group",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "show",
|
||||
Usage: "Show pending errors",
|
||||
Requires: &cli.Requires{},
|
||||
Action: errorsShow,
|
||||
},
|
||||
{
|
||||
Name: "push",
|
||||
Usage: "Push an error to active clients",
|
||||
Requires: &cli.Requires{"error message..."},
|
||||
Action: errorsPush,
|
||||
},
|
||||
{
|
||||
Name: "clear",
|
||||
Usage: "Clear pending errors",
|
||||
Requires: &cli.Requires{},
|
||||
Action: wrappedHTTPPost("system/error/clear"),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func errorsShow(c *cli.Context) {
|
||||
response := httpGet(c, "system/error")
|
||||
var data map[string][]map[string]interface{}
|
||||
json.Unmarshal(responseToBArray(response), &data)
|
||||
writer := newTableWriter()
|
||||
for _, item := range data["errors"] {
|
||||
time := item["time"].(string)[:19]
|
||||
time = strings.Replace(time, "T", " ", 1)
|
||||
err := item["error"].(string)
|
||||
err = strings.TrimSpace(err)
|
||||
fmt.Fprintln(writer, time+":\t"+err)
|
||||
}
|
||||
writer.Flush()
|
||||
}
|
||||
|
||||
func errorsPush(c *cli.Context) {
|
||||
err := strings.Join(c.Args(), " ")
|
||||
response := httpPost(c, "system/error", strings.TrimSpace(err))
|
||||
if response.StatusCode != 200 {
|
||||
err = fmt.Sprint("Failed to push error\nStatus code: ", response.StatusCode)
|
||||
body := string(responseToBArray(response))
|
||||
if body != "" {
|
||||
err += "\nBody: " + body
|
||||
}
|
||||
die(err)
|
||||
}
|
||||
}
|
||||
@@ -1,351 +0,0 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/AudriusButkevicius/cli"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cliCommands = append(cliCommands, cli.Command{
|
||||
Name: "folders",
|
||||
HideHelp: true,
|
||||
Usage: "Folder command group",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "list",
|
||||
Usage: "List available folders",
|
||||
Requires: &cli.Requires{},
|
||||
Action: foldersList,
|
||||
},
|
||||
{
|
||||
Name: "add",
|
||||
Usage: "Add a new folder",
|
||||
Requires: &cli.Requires{"folder id", "directory"},
|
||||
Action: foldersAdd,
|
||||
},
|
||||
{
|
||||
Name: "remove",
|
||||
Usage: "Remove an existing folder",
|
||||
Requires: &cli.Requires{"folder id"},
|
||||
Action: foldersRemove,
|
||||
},
|
||||
{
|
||||
Name: "override",
|
||||
Usage: "Override changes from other nodes for a master folder",
|
||||
Requires: &cli.Requires{"folder id"},
|
||||
Action: foldersOverride,
|
||||
},
|
||||
{
|
||||
Name: "get",
|
||||
Usage: "Get a property of a folder",
|
||||
Requires: &cli.Requires{"folder id", "property"},
|
||||
Action: foldersGet,
|
||||
},
|
||||
{
|
||||
Name: "set",
|
||||
Usage: "Set a property of a folder",
|
||||
Requires: &cli.Requires{"folder id", "property", "value..."},
|
||||
Action: foldersSet,
|
||||
},
|
||||
{
|
||||
Name: "unset",
|
||||
Usage: "Unset a property of a folder",
|
||||
Requires: &cli.Requires{"folder id", "property"},
|
||||
Action: foldersUnset,
|
||||
},
|
||||
{
|
||||
Name: "devices",
|
||||
Usage: "Folder devices command group",
|
||||
HideHelp: true,
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "list",
|
||||
Usage: "List of devices which the folder is shared with",
|
||||
Requires: &cli.Requires{"folder id"},
|
||||
Action: foldersDevicesList,
|
||||
},
|
||||
{
|
||||
Name: "add",
|
||||
Usage: "Share a folder with a device",
|
||||
Requires: &cli.Requires{"folder id", "device id"},
|
||||
Action: foldersDevicesAdd,
|
||||
},
|
||||
{
|
||||
Name: "remove",
|
||||
Usage: "Unshare a folder with a device",
|
||||
Requires: &cli.Requires{"folder id", "device id"},
|
||||
Action: foldersDevicesRemove,
|
||||
},
|
||||
{
|
||||
Name: "clear",
|
||||
Usage: "Unshare a folder with all devices",
|
||||
Requires: &cli.Requires{"folder id"},
|
||||
Action: foldersDevicesClear,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func foldersList(c *cli.Context) {
|
||||
cfg := getConfig(c)
|
||||
first := true
|
||||
writer := newTableWriter()
|
||||
for _, folder := range cfg.Folders {
|
||||
if !first {
|
||||
fmt.Fprintln(writer)
|
||||
}
|
||||
fmt.Fprintln(writer, "ID:\t", folder.ID, "\t")
|
||||
fmt.Fprintln(writer, "Path:\t", folder.RawPath, "\t(directory)")
|
||||
fmt.Fprintln(writer, "Folder type:\t", folder.Type, "\t(type)")
|
||||
fmt.Fprintln(writer, "Ignore permissions:\t", folder.IgnorePerms, "\t(permissions)")
|
||||
fmt.Fprintln(writer, "Rescan interval in seconds:\t", folder.RescanIntervalS, "\t(rescan)")
|
||||
|
||||
if folder.Versioning.Type != "" {
|
||||
fmt.Fprintln(writer, "Versioning:\t", folder.Versioning.Type, "\t(versioning)")
|
||||
for key, value := range folder.Versioning.Params {
|
||||
fmt.Fprintf(writer, "Versioning %s:\t %s \t(versioning-%s)\n", key, value, key)
|
||||
}
|
||||
}
|
||||
first = false
|
||||
}
|
||||
writer.Flush()
|
||||
}
|
||||
|
||||
func foldersAdd(c *cli.Context) {
|
||||
cfg := getConfig(c)
|
||||
abs, err := filepath.Abs(c.Args()[1])
|
||||
die(err)
|
||||
folder := config.FolderConfiguration{
|
||||
ID: c.Args()[0],
|
||||
RawPath: filepath.Clean(abs),
|
||||
}
|
||||
cfg.Folders = append(cfg.Folders, folder)
|
||||
setConfig(c, cfg)
|
||||
}
|
||||
|
||||
func foldersRemove(c *cli.Context) {
|
||||
cfg := getConfig(c)
|
||||
rid := c.Args()[0]
|
||||
for i, folder := range cfg.Folders {
|
||||
if folder.ID == rid {
|
||||
last := len(cfg.Folders) - 1
|
||||
cfg.Folders[i] = cfg.Folders[last]
|
||||
cfg.Folders = cfg.Folders[:last]
|
||||
setConfig(c, cfg)
|
||||
return
|
||||
}
|
||||
}
|
||||
die("Folder " + rid + " not found")
|
||||
}
|
||||
|
||||
func foldersOverride(c *cli.Context) {
|
||||
cfg := getConfig(c)
|
||||
rid := c.Args()[0]
|
||||
for _, folder := range cfg.Folders {
|
||||
if folder.ID == rid && folder.Type == config.FolderTypeSendOnly {
|
||||
response := httpPost(c, "db/override", "")
|
||||
if response.StatusCode != 200 {
|
||||
err := fmt.Sprint("Failed to override changes\nStatus code: ", response.StatusCode)
|
||||
body := string(responseToBArray(response))
|
||||
if body != "" {
|
||||
err += "\nBody: " + body
|
||||
}
|
||||
die(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
die("Folder " + rid + " not found or folder not master")
|
||||
}
|
||||
|
||||
func foldersGet(c *cli.Context) {
|
||||
cfg := getConfig(c)
|
||||
rid := c.Args()[0]
|
||||
arg := strings.ToLower(c.Args()[1])
|
||||
for _, folder := range cfg.Folders {
|
||||
if folder.ID != rid {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(arg, "versioning-") {
|
||||
arg = arg[11:]
|
||||
value, ok := folder.Versioning.Params[arg]
|
||||
if ok {
|
||||
fmt.Println(value)
|
||||
return
|
||||
}
|
||||
die("Versioning property " + c.Args()[1][11:] + " not found")
|
||||
}
|
||||
switch arg {
|
||||
case "directory":
|
||||
fmt.Println(folder.RawPath)
|
||||
case "type":
|
||||
fmt.Println(folder.Type)
|
||||
case "permissions":
|
||||
fmt.Println(folder.IgnorePerms)
|
||||
case "rescan":
|
||||
fmt.Println(folder.RescanIntervalS)
|
||||
case "versioning":
|
||||
if folder.Versioning.Type != "" {
|
||||
fmt.Println(folder.Versioning.Type)
|
||||
}
|
||||
default:
|
||||
die("Invalid property: " + c.Args()[1] + "\nAvailable properties: directory, type, permissions, versioning, versioning-<key>")
|
||||
}
|
||||
return
|
||||
}
|
||||
die("Folder " + rid + " not found")
|
||||
}
|
||||
|
||||
func foldersSet(c *cli.Context) {
|
||||
rid := c.Args()[0]
|
||||
arg := strings.ToLower(c.Args()[1])
|
||||
val := strings.Join(c.Args()[2:], " ")
|
||||
cfg := getConfig(c)
|
||||
for i, folder := range cfg.Folders {
|
||||
if folder.ID != rid {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(arg, "versioning-") {
|
||||
cfg.Folders[i].Versioning.Params[arg[11:]] = val
|
||||
setConfig(c, cfg)
|
||||
return
|
||||
}
|
||||
switch arg {
|
||||
case "directory":
|
||||
cfg.Folders[i].RawPath = val
|
||||
case "type":
|
||||
var t config.FolderType
|
||||
if err := t.UnmarshalText([]byte(val)); err != nil {
|
||||
die("Invalid folder type: " + err.Error())
|
||||
}
|
||||
cfg.Folders[i].Type = t
|
||||
case "permissions":
|
||||
cfg.Folders[i].IgnorePerms = parseBool(val)
|
||||
case "rescan":
|
||||
cfg.Folders[i].RescanIntervalS = parseInt(val)
|
||||
case "versioning":
|
||||
cfg.Folders[i].Versioning.Type = val
|
||||
default:
|
||||
die("Invalid property: " + c.Args()[1] + "\nAvailable properties: directory, master, permissions, versioning, versioning-<key>")
|
||||
}
|
||||
setConfig(c, cfg)
|
||||
return
|
||||
}
|
||||
die("Folder " + rid + " not found")
|
||||
}
|
||||
|
||||
func foldersUnset(c *cli.Context) {
|
||||
rid := c.Args()[0]
|
||||
arg := strings.ToLower(c.Args()[1])
|
||||
cfg := getConfig(c)
|
||||
for i, folder := range cfg.Folders {
|
||||
if folder.ID != rid {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(arg, "versioning-") {
|
||||
arg = arg[11:]
|
||||
if _, ok := folder.Versioning.Params[arg]; ok {
|
||||
delete(cfg.Folders[i].Versioning.Params, arg)
|
||||
setConfig(c, cfg)
|
||||
return
|
||||
}
|
||||
die("Versioning property " + c.Args()[1][11:] + " not found")
|
||||
}
|
||||
switch arg {
|
||||
case "versioning":
|
||||
cfg.Folders[i].Versioning.Type = ""
|
||||
cfg.Folders[i].Versioning.Params = make(map[string]string)
|
||||
default:
|
||||
die("Invalid property: " + c.Args()[1] + "\nAvailable properties: versioning, versioning-<key>")
|
||||
}
|
||||
setConfig(c, cfg)
|
||||
return
|
||||
}
|
||||
die("Folder " + rid + " not found")
|
||||
}
|
||||
|
||||
func foldersDevicesList(c *cli.Context) {
|
||||
rid := c.Args()[0]
|
||||
cfg := getConfig(c)
|
||||
for _, folder := range cfg.Folders {
|
||||
if folder.ID != rid {
|
||||
continue
|
||||
}
|
||||
for _, device := range folder.Devices {
|
||||
fmt.Println(device.DeviceID)
|
||||
}
|
||||
return
|
||||
}
|
||||
die("Folder " + rid + " not found")
|
||||
}
|
||||
|
||||
func foldersDevicesAdd(c *cli.Context) {
|
||||
rid := c.Args()[0]
|
||||
nid := parseDeviceID(c.Args()[1])
|
||||
cfg := getConfig(c)
|
||||
for i, folder := range cfg.Folders {
|
||||
if folder.ID != rid {
|
||||
continue
|
||||
}
|
||||
for _, device := range folder.Devices {
|
||||
if device.DeviceID == nid {
|
||||
die("Device " + c.Args()[1] + " is already part of this folder")
|
||||
}
|
||||
}
|
||||
for _, device := range cfg.Devices {
|
||||
if device.DeviceID == nid {
|
||||
cfg.Folders[i].Devices = append(folder.Devices, config.FolderDeviceConfiguration{
|
||||
DeviceID: device.DeviceID,
|
||||
})
|
||||
setConfig(c, cfg)
|
||||
return
|
||||
}
|
||||
}
|
||||
die("Device " + c.Args()[1] + " not found in device list")
|
||||
}
|
||||
die("Folder " + rid + " not found")
|
||||
}
|
||||
|
||||
func foldersDevicesRemove(c *cli.Context) {
|
||||
rid := c.Args()[0]
|
||||
nid := parseDeviceID(c.Args()[1])
|
||||
cfg := getConfig(c)
|
||||
for ri, folder := range cfg.Folders {
|
||||
if folder.ID != rid {
|
||||
continue
|
||||
}
|
||||
for ni, device := range folder.Devices {
|
||||
if device.DeviceID == nid {
|
||||
last := len(folder.Devices) - 1
|
||||
cfg.Folders[ri].Devices[ni] = folder.Devices[last]
|
||||
cfg.Folders[ri].Devices = cfg.Folders[ri].Devices[:last]
|
||||
setConfig(c, cfg)
|
||||
return
|
||||
}
|
||||
}
|
||||
die("Device " + c.Args()[1] + " not found")
|
||||
}
|
||||
die("Folder " + rid + " not found")
|
||||
}
|
||||
|
||||
func foldersDevicesClear(c *cli.Context) {
|
||||
rid := c.Args()[0]
|
||||
cfg := getConfig(c)
|
||||
for i, folder := range cfg.Folders {
|
||||
if folder.ID != rid {
|
||||
continue
|
||||
}
|
||||
cfg.Folders[i].Devices = []config.FolderDeviceConfiguration{}
|
||||
setConfig(c, cfg)
|
||||
return
|
||||
}
|
||||
die("Folder " + rid + " not found")
|
||||
}
|
||||
@@ -1,78 +0,0 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/AudriusButkevicius/cli"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cliCommands = append(cliCommands, []cli.Command{
|
||||
{
|
||||
Name: "id",
|
||||
Usage: "Get ID of the Syncthing client",
|
||||
Requires: &cli.Requires{},
|
||||
Action: generalID,
|
||||
},
|
||||
{
|
||||
Name: "status",
|
||||
Usage: "Configuration status, whether or not a restart is required for changes to take effect",
|
||||
Requires: &cli.Requires{},
|
||||
Action: generalStatus,
|
||||
},
|
||||
{
|
||||
Name: "restart",
|
||||
Usage: "Restart syncthing",
|
||||
Requires: &cli.Requires{},
|
||||
Action: wrappedHTTPPost("system/restart"),
|
||||
},
|
||||
{
|
||||
Name: "shutdown",
|
||||
Usage: "Shutdown syncthing",
|
||||
Requires: &cli.Requires{},
|
||||
Action: wrappedHTTPPost("system/shutdown"),
|
||||
},
|
||||
{
|
||||
Name: "reset",
|
||||
Usage: "Reset syncthing deleting all folders and devices",
|
||||
Requires: &cli.Requires{},
|
||||
Action: wrappedHTTPPost("system/reset"),
|
||||
},
|
||||
{
|
||||
Name: "upgrade",
|
||||
Usage: "Upgrade syncthing (if a newer version is available)",
|
||||
Requires: &cli.Requires{},
|
||||
Action: wrappedHTTPPost("system/upgrade"),
|
||||
},
|
||||
{
|
||||
Name: "version",
|
||||
Usage: "Syncthing client version",
|
||||
Requires: &cli.Requires{},
|
||||
Action: generalVersion,
|
||||
},
|
||||
}...)
|
||||
}
|
||||
|
||||
func generalID(c *cli.Context) {
|
||||
fmt.Println(getMyID(c))
|
||||
}
|
||||
|
||||
func generalStatus(c *cli.Context) {
|
||||
response := httpGet(c, "system/config/insync")
|
||||
var status struct{ ConfigInSync bool }
|
||||
json.Unmarshal(responseToBArray(response), &status)
|
||||
if !status.ConfigInSync {
|
||||
die("Config out of sync")
|
||||
}
|
||||
fmt.Println("Config in sync")
|
||||
}
|
||||
|
||||
func generalVersion(c *cli.Context) {
|
||||
response := httpGet(c, "system/version")
|
||||
version := make(map[string]interface{})
|
||||
json.Unmarshal(responseToBArray(response), &version)
|
||||
prettyPrintJSON(version)
|
||||
}
|
||||
@@ -1,127 +0,0 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/AudriusButkevicius/cli"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cliCommands = append(cliCommands, cli.Command{
|
||||
Name: "gui",
|
||||
HideHelp: true,
|
||||
Usage: "GUI command group",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "dump",
|
||||
Usage: "Show all GUI configuration settings",
|
||||
Requires: &cli.Requires{},
|
||||
Action: guiDump,
|
||||
},
|
||||
{
|
||||
Name: "get",
|
||||
Usage: "Get a GUI configuration setting",
|
||||
Requires: &cli.Requires{"setting"},
|
||||
Action: guiGet,
|
||||
},
|
||||
{
|
||||
Name: "set",
|
||||
Usage: "Set a GUI configuration setting",
|
||||
Requires: &cli.Requires{"setting", "value"},
|
||||
Action: guiSet,
|
||||
},
|
||||
{
|
||||
Name: "unset",
|
||||
Usage: "Unset a GUI configuration setting",
|
||||
Requires: &cli.Requires{"setting"},
|
||||
Action: guiUnset,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func guiDump(c *cli.Context) {
|
||||
cfg := getConfig(c).GUI
|
||||
writer := newTableWriter()
|
||||
fmt.Fprintln(writer, "Enabled:\t", cfg.Enabled, "\t(enabled)")
|
||||
fmt.Fprintln(writer, "Use HTTPS:\t", cfg.UseTLS(), "\t(tls)")
|
||||
fmt.Fprintln(writer, "Listen Addresses:\t", cfg.Address(), "\t(address)")
|
||||
if cfg.User != "" {
|
||||
fmt.Fprintln(writer, "Authentication User:\t", cfg.User, "\t(username)")
|
||||
fmt.Fprintln(writer, "Authentication Password:\t", cfg.Password, "\t(password)")
|
||||
}
|
||||
if cfg.APIKey != "" {
|
||||
fmt.Fprintln(writer, "API Key:\t", cfg.APIKey, "\t(apikey)")
|
||||
}
|
||||
writer.Flush()
|
||||
}
|
||||
|
||||
func guiGet(c *cli.Context) {
|
||||
cfg := getConfig(c).GUI
|
||||
arg := c.Args()[0]
|
||||
switch strings.ToLower(arg) {
|
||||
case "enabled":
|
||||
fmt.Println(cfg.Enabled)
|
||||
case "tls":
|
||||
fmt.Println(cfg.UseTLS())
|
||||
case "address":
|
||||
fmt.Println(cfg.Address())
|
||||
case "user":
|
||||
if cfg.User != "" {
|
||||
fmt.Println(cfg.User)
|
||||
}
|
||||
case "password":
|
||||
if cfg.User != "" {
|
||||
fmt.Println(cfg.Password)
|
||||
}
|
||||
case "apikey":
|
||||
if cfg.APIKey != "" {
|
||||
fmt.Println(cfg.APIKey)
|
||||
}
|
||||
default:
|
||||
die("Invalid setting: " + arg + "\nAvailable settings: enabled, tls, address, user, password, apikey")
|
||||
}
|
||||
}
|
||||
|
||||
func guiSet(c *cli.Context) {
|
||||
cfg := getConfig(c)
|
||||
arg := c.Args()[0]
|
||||
val := c.Args()[1]
|
||||
switch strings.ToLower(arg) {
|
||||
case "enabled":
|
||||
cfg.GUI.Enabled = parseBool(val)
|
||||
case "tls":
|
||||
cfg.GUI.RawUseTLS = parseBool(val)
|
||||
case "address":
|
||||
validAddress(val)
|
||||
cfg.GUI.RawAddress = val
|
||||
case "user":
|
||||
cfg.GUI.User = val
|
||||
case "password":
|
||||
cfg.GUI.Password = val
|
||||
case "apikey":
|
||||
cfg.GUI.APIKey = val
|
||||
default:
|
||||
die("Invalid setting: " + arg + "\nAvailable settings: enabled, tls, address, user, password, apikey")
|
||||
}
|
||||
setConfig(c, cfg)
|
||||
}
|
||||
|
||||
func guiUnset(c *cli.Context) {
|
||||
cfg := getConfig(c)
|
||||
arg := c.Args()[0]
|
||||
switch strings.ToLower(arg) {
|
||||
case "user":
|
||||
cfg.GUI.User = ""
|
||||
case "password":
|
||||
cfg.GUI.Password = ""
|
||||
case "apikey":
|
||||
cfg.GUI.APIKey = ""
|
||||
default:
|
||||
die("Invalid setting: " + arg + "\nAvailable settings: user, password, apikey")
|
||||
}
|
||||
setConfig(c, cfg)
|
||||
}
|
||||
@@ -1,173 +0,0 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/AudriusButkevicius/cli"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cliCommands = append(cliCommands, cli.Command{
|
||||
Name: "options",
|
||||
HideHelp: true,
|
||||
Usage: "Options command group",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "dump",
|
||||
Usage: "Show all Syncthing option settings",
|
||||
Requires: &cli.Requires{},
|
||||
Action: optionsDump,
|
||||
},
|
||||
{
|
||||
Name: "get",
|
||||
Usage: "Get a Syncthing option setting",
|
||||
Requires: &cli.Requires{"setting"},
|
||||
Action: optionsGet,
|
||||
},
|
||||
{
|
||||
Name: "set",
|
||||
Usage: "Set a Syncthing option setting",
|
||||
Requires: &cli.Requires{"setting", "value..."},
|
||||
Action: optionsSet,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func optionsDump(c *cli.Context) {
|
||||
cfg := getConfig(c).Options
|
||||
writer := newTableWriter()
|
||||
|
||||
fmt.Fprintln(writer, "Sync protocol listen addresses:\t", strings.Join(cfg.ListenAddresses, " "), "\t(addresses)")
|
||||
fmt.Fprintln(writer, "Global discovery enabled:\t", cfg.GlobalAnnEnabled, "\t(globalannenabled)")
|
||||
fmt.Fprintln(writer, "Global discovery servers:\t", strings.Join(cfg.GlobalAnnServers, " "), "\t(globalannserver)")
|
||||
|
||||
fmt.Fprintln(writer, "Local discovery enabled:\t", cfg.LocalAnnEnabled, "\t(localannenabled)")
|
||||
fmt.Fprintln(writer, "Local discovery port:\t", cfg.LocalAnnPort, "\t(localannport)")
|
||||
|
||||
fmt.Fprintln(writer, "Outgoing rate limit in KiB/s:\t", cfg.MaxSendKbps, "\t(maxsend)")
|
||||
fmt.Fprintln(writer, "Incoming rate limit in KiB/s:\t", cfg.MaxRecvKbps, "\t(maxrecv)")
|
||||
fmt.Fprintln(writer, "Reconnect interval in seconds:\t", cfg.ReconnectIntervalS, "\t(reconnect)")
|
||||
fmt.Fprintln(writer, "Start browser:\t", cfg.StartBrowser, "\t(browser)")
|
||||
fmt.Fprintln(writer, "Enable UPnP:\t", cfg.NATEnabled, "\t(nat)")
|
||||
fmt.Fprintln(writer, "UPnP Lease in minutes:\t", cfg.NATLeaseM, "\t(natlease)")
|
||||
fmt.Fprintln(writer, "UPnP Renewal period in minutes:\t", cfg.NATRenewalM, "\t(natrenew)")
|
||||
fmt.Fprintln(writer, "Restart on Wake Up:\t", cfg.RestartOnWakeup, "\t(wake)")
|
||||
|
||||
reporting := "unrecognized value"
|
||||
switch cfg.URAccepted {
|
||||
case -1:
|
||||
reporting = "false"
|
||||
case 0:
|
||||
reporting = "undecided/false"
|
||||
case 1:
|
||||
reporting = "true"
|
||||
}
|
||||
fmt.Fprintln(writer, "Anonymous usage reporting:\t", reporting, "\t(reporting)")
|
||||
|
||||
writer.Flush()
|
||||
}
|
||||
|
||||
func optionsGet(c *cli.Context) {
|
||||
cfg := getConfig(c).Options
|
||||
arg := c.Args()[0]
|
||||
switch strings.ToLower(arg) {
|
||||
case "address":
|
||||
fmt.Println(strings.Join(cfg.ListenAddresses, "\n"))
|
||||
case "globalannenabled":
|
||||
fmt.Println(cfg.GlobalAnnEnabled)
|
||||
case "globalannservers":
|
||||
fmt.Println(strings.Join(cfg.GlobalAnnServers, "\n"))
|
||||
case "localannenabled":
|
||||
fmt.Println(cfg.LocalAnnEnabled)
|
||||
case "localannport":
|
||||
fmt.Println(cfg.LocalAnnPort)
|
||||
case "maxsend":
|
||||
fmt.Println(cfg.MaxSendKbps)
|
||||
case "maxrecv":
|
||||
fmt.Println(cfg.MaxRecvKbps)
|
||||
case "reconnect":
|
||||
fmt.Println(cfg.ReconnectIntervalS)
|
||||
case "browser":
|
||||
fmt.Println(cfg.StartBrowser)
|
||||
case "nat":
|
||||
fmt.Println(cfg.NATEnabled)
|
||||
case "natlease":
|
||||
fmt.Println(cfg.NATLeaseM)
|
||||
case "natrenew":
|
||||
fmt.Println(cfg.NATRenewalM)
|
||||
case "reporting":
|
||||
switch cfg.URAccepted {
|
||||
case -1:
|
||||
fmt.Println("false")
|
||||
case 0:
|
||||
fmt.Println("undecided/false")
|
||||
case 1:
|
||||
fmt.Println("true")
|
||||
default:
|
||||
fmt.Println("unknown")
|
||||
}
|
||||
case "wake":
|
||||
fmt.Println(cfg.RestartOnWakeup)
|
||||
default:
|
||||
die("Invalid setting: " + arg + "\nAvailable settings: address, globalannenabled, globalannserver, localannenabled, localannport, maxsend, maxrecv, reconnect, browser, upnp, upnplease, upnprenew, reporting, wake")
|
||||
}
|
||||
}
|
||||
|
||||
func optionsSet(c *cli.Context) {
|
||||
config := getConfig(c)
|
||||
arg := c.Args()[0]
|
||||
val := c.Args()[1]
|
||||
switch strings.ToLower(arg) {
|
||||
case "address":
|
||||
for _, item := range c.Args().Tail() {
|
||||
validAddress(item)
|
||||
}
|
||||
config.Options.ListenAddresses = c.Args().Tail()
|
||||
case "globalannenabled":
|
||||
config.Options.GlobalAnnEnabled = parseBool(val)
|
||||
case "globalannserver":
|
||||
for _, item := range c.Args().Tail() {
|
||||
validAddress(item)
|
||||
}
|
||||
config.Options.GlobalAnnServers = c.Args().Tail()
|
||||
case "localannenabled":
|
||||
config.Options.LocalAnnEnabled = parseBool(val)
|
||||
case "localannport":
|
||||
config.Options.LocalAnnPort = parsePort(val)
|
||||
case "maxsend":
|
||||
config.Options.MaxSendKbps = parseUint(val)
|
||||
case "maxrecv":
|
||||
config.Options.MaxRecvKbps = parseUint(val)
|
||||
case "reconnect":
|
||||
config.Options.ReconnectIntervalS = parseUint(val)
|
||||
case "browser":
|
||||
config.Options.StartBrowser = parseBool(val)
|
||||
case "nat":
|
||||
config.Options.NATEnabled = parseBool(val)
|
||||
case "natlease":
|
||||
config.Options.NATLeaseM = parseUint(val)
|
||||
case "natrenew":
|
||||
config.Options.NATRenewalM = parseUint(val)
|
||||
case "reporting":
|
||||
switch strings.ToLower(val) {
|
||||
case "u", "undecided", "unset":
|
||||
config.Options.URAccepted = 0
|
||||
default:
|
||||
boolvalue := parseBool(val)
|
||||
if boolvalue {
|
||||
config.Options.URAccepted = 1
|
||||
} else {
|
||||
config.Options.URAccepted = -1
|
||||
}
|
||||
}
|
||||
case "wake":
|
||||
config.Options.RestartOnWakeup = parseBool(val)
|
||||
default:
|
||||
die("Invalid setting: " + arg + "\nAvailable settings: address, globalannenabled, globalannserver, localannenabled, localannport, maxsend, maxrecv, reconnect, browser, upnp, upnplease, upnprenew, reporting, wake")
|
||||
}
|
||||
setConfig(c, config)
|
||||
}
|
||||
@@ -1,72 +0,0 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/AudriusButkevicius/cli"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cliCommands = append(cliCommands, cli.Command{
|
||||
Name: "report",
|
||||
HideHelp: true,
|
||||
Usage: "Reporting command group",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "system",
|
||||
Usage: "Report system state",
|
||||
Requires: &cli.Requires{},
|
||||
Action: reportSystem,
|
||||
},
|
||||
{
|
||||
Name: "connections",
|
||||
Usage: "Report about connections to other devices",
|
||||
Requires: &cli.Requires{},
|
||||
Action: reportConnections,
|
||||
},
|
||||
{
|
||||
Name: "usage",
|
||||
Usage: "Usage report",
|
||||
Requires: &cli.Requires{},
|
||||
Action: reportUsage,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func reportSystem(c *cli.Context) {
|
||||
response := httpGet(c, "system/status")
|
||||
data := make(map[string]interface{})
|
||||
json.Unmarshal(responseToBArray(response), &data)
|
||||
prettyPrintJSON(data)
|
||||
}
|
||||
|
||||
func reportConnections(c *cli.Context) {
|
||||
response := httpGet(c, "system/connections")
|
||||
data := make(map[string]map[string]interface{})
|
||||
json.Unmarshal(responseToBArray(response), &data)
|
||||
var overall map[string]interface{}
|
||||
for key, value := range data {
|
||||
if key == "total" {
|
||||
overall = value
|
||||
continue
|
||||
}
|
||||
value["Device ID"] = key
|
||||
prettyPrintJSON(value)
|
||||
fmt.Println()
|
||||
}
|
||||
if overall != nil {
|
||||
fmt.Println("=== Overall statistics ===")
|
||||
prettyPrintJSON(overall)
|
||||
}
|
||||
}
|
||||
|
||||
func reportUsage(c *cli.Context) {
|
||||
response := httpGet(c, "svc/report")
|
||||
report := make(map[string]interface{})
|
||||
json.Unmarshal(responseToBArray(response), &report)
|
||||
prettyPrintJSON(report)
|
||||
}
|
||||
60
cmd/stcli/errors.go
Normal file
60
cmd/stcli/errors.go
Normal file
@@ -0,0 +1,60 @@
|
||||
// Copyright (C) 2019 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
var errorsCommand = cli.Command{
|
||||
Name: "errors",
|
||||
HideHelp: true,
|
||||
Usage: "Error command group",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "show",
|
||||
Usage: "Show pending errors",
|
||||
Action: expects(0, dumpOutput("system/error")),
|
||||
},
|
||||
{
|
||||
Name: "push",
|
||||
Usage: "Push an error to active clients",
|
||||
ArgsUsage: "[error message]",
|
||||
Action: expects(1, errorsPush),
|
||||
},
|
||||
{
|
||||
Name: "clear",
|
||||
Usage: "Clear pending errors",
|
||||
Action: expects(0, emptyPost("system/error/clear")),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func errorsPush(c *cli.Context) error {
|
||||
client := c.App.Metadata["client"].(*APIClient)
|
||||
errStr := strings.Join(c.Args(), " ")
|
||||
response, err := client.Post("system/error", strings.TrimSpace(errStr))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if response.StatusCode != 200 {
|
||||
errStr = fmt.Sprint("Failed to push error\nStatus code: ", response.StatusCode)
|
||||
bytes, err := responseToBArray(response)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
body := string(bytes)
|
||||
if body != "" {
|
||||
errStr += "\nBody: " + body
|
||||
}
|
||||
return fmt.Errorf(errStr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
|
||||
package main
|
||||
|
||||
var jsonAttributeLabels = map[string]string{
|
||||
"folderMaxMiB": "Largest folder size in MiB",
|
||||
"folderMaxFiles": "Largest folder file count",
|
||||
"longVersion": "Long version",
|
||||
"totMiB": "Total size in MiB",
|
||||
"totFiles": "Total files",
|
||||
"uniqueID": "Unique ID",
|
||||
"numFolders": "Folder count",
|
||||
"numDevices": "Device count",
|
||||
"memoryUsageMiB": "Memory usage in MiB",
|
||||
"memorySize": "Total memory in MiB",
|
||||
"sha256Perf": "SHA256 Benchmark",
|
||||
"At": "Last contacted",
|
||||
"Completion": "Percent complete",
|
||||
"InBytesTotal": "Total bytes received",
|
||||
"OutBytesTotal": "Total bytes sent",
|
||||
"ClientVersion": "Client version",
|
||||
"alloc": "Memory allocated in bytes",
|
||||
"sys": "Memory using in bytes",
|
||||
"cpuPercent": "CPU load in percent",
|
||||
"extAnnounceOK": "External announcments working",
|
||||
"goroutines": "Number of Go routines",
|
||||
"myID": "Client ID",
|
||||
"tilde": "Tilde expands to",
|
||||
"arch": "Architecture",
|
||||
"os": "OS",
|
||||
}
|
||||
@@ -1,63 +1,192 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
// Copyright (C) 2019 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"bufio"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
|
||||
"github.com/AudriusButkevicius/cli"
|
||||
"github.com/AudriusButkevicius/recli"
|
||||
"github.com/flynn-archive/go-shlex"
|
||||
"github.com/mattn/go-isatty"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/syncthing/syncthing/lib/build"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
"github.com/syncthing/syncthing/lib/locations"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
type ByAlphabet []cli.Command
|
||||
|
||||
func (a ByAlphabet) Len() int { return len(a) }
|
||||
func (a ByAlphabet) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a ByAlphabet) Less(i, j int) bool { return a[i].Name < a[j].Name }
|
||||
|
||||
var cliCommands []cli.Command
|
||||
|
||||
func main() {
|
||||
app := cli.NewApp()
|
||||
app.Name = "syncthing-cli"
|
||||
app.Author = "Audrius Butkevičius"
|
||||
app.Email = "audrius.butkevicius@gmail.com"
|
||||
app.Usage = "Syncthing command line interface"
|
||||
app.Version = "0.1"
|
||||
app.HideHelp = true
|
||||
// This is somewhat a hack around a chicken and egg problem.
|
||||
// We need to set the home directory and potentially other flags to know where the syncthing instance is running
|
||||
// in order to get it's config ... which we then use to construct the actual CLI ... at which point it's too late
|
||||
// to add flags there...
|
||||
homeBaseDir := locations.GetBaseDir(locations.ConfigBaseDir)
|
||||
guiCfg := config.GUIConfiguration{}
|
||||
|
||||
app.Flags = []cli.Flag{
|
||||
flags := flag.NewFlagSet("", flag.ContinueOnError)
|
||||
flags.StringVar(&guiCfg.RawAddress, "gui-address", guiCfg.RawAddress, "Override GUI address (e.g. \"http://192.0.2.42:8443\")")
|
||||
flags.StringVar(&guiCfg.APIKey, "gui-apikey", guiCfg.APIKey, "Override GUI API key")
|
||||
flags.StringVar(&homeBaseDir, "home", homeBaseDir, "Set configuration directory")
|
||||
|
||||
// Implement the same flags at the lower CLI, with the same default values (pre-parse), but do nothing with them.
|
||||
// This is so that we could reuse os.Args
|
||||
fakeFlags := []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "endpoint, e",
|
||||
Value: "http://127.0.0.1:8384",
|
||||
Usage: "End point to connect to",
|
||||
EnvVar: "STENDPOINT",
|
||||
Name: "gui-address",
|
||||
Value: guiCfg.RawAddress,
|
||||
Usage: "Override GUI address (e.g. \"http://192.0.2.42:8443\")",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "apikey, k",
|
||||
Value: "",
|
||||
Usage: "API Key",
|
||||
EnvVar: "STAPIKEY",
|
||||
Name: "gui-apikey",
|
||||
Value: guiCfg.APIKey,
|
||||
Usage: "Override GUI API key",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "username, u",
|
||||
Value: "",
|
||||
Usage: "Username",
|
||||
EnvVar: "STUSERNAME",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "password, p",
|
||||
Value: "",
|
||||
Usage: "Password",
|
||||
EnvVar: "STPASSWORD",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "insecure, i",
|
||||
Usage: "Do not verify SSL certificate",
|
||||
EnvVar: "STINSECURE",
|
||||
Name: "home",
|
||||
Value: homeBaseDir,
|
||||
Usage: "Set configuration directory",
|
||||
},
|
||||
}
|
||||
|
||||
sort.Sort(ByAlphabet(cliCommands))
|
||||
app.Commands = cliCommands
|
||||
app.RunAndExitOnError()
|
||||
// Do not print usage of these flags, and ignore errors as this can't understand plenty of things
|
||||
flags.Usage = func() {}
|
||||
_ = flags.Parse(os.Args[1:])
|
||||
|
||||
// Now if the API key and address is not provided (we are not connecting to a remote instance),
|
||||
// try to rip it out of the config.
|
||||
if guiCfg.RawAddress == "" && guiCfg.APIKey == "" {
|
||||
// Update the base directory
|
||||
err := locations.SetBaseDir(locations.ConfigBaseDir, homeBaseDir)
|
||||
if err != nil {
|
||||
log.Fatal(errors.Wrap(err, "setting home"))
|
||||
}
|
||||
|
||||
// Load the certs and get the ID
|
||||
cert, err := tls.LoadX509KeyPair(
|
||||
locations.Get(locations.CertFile),
|
||||
locations.Get(locations.KeyFile),
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatal(errors.Wrap(err, "reading device ID"))
|
||||
}
|
||||
|
||||
myID := protocol.NewDeviceID(cert.Certificate[0])
|
||||
|
||||
// Load the config
|
||||
cfg, err := config.Load(locations.Get(locations.ConfigFile), myID, events.NoopLogger)
|
||||
if err != nil {
|
||||
log.Fatalln(errors.Wrap(err, "loading config"))
|
||||
}
|
||||
|
||||
guiCfg = cfg.GUI()
|
||||
} else if guiCfg.Address() == "" || guiCfg.APIKey == "" {
|
||||
log.Fatalln("Both -gui-address and -gui-apikey should be specified")
|
||||
}
|
||||
|
||||
if guiCfg.Address() == "" {
|
||||
log.Fatalln("Could not find GUI Address")
|
||||
}
|
||||
|
||||
if guiCfg.APIKey == "" {
|
||||
log.Fatalln("Could not find GUI API key")
|
||||
}
|
||||
|
||||
client := getClient(guiCfg)
|
||||
|
||||
cfg, err := getConfig(client)
|
||||
original := cfg.Copy()
|
||||
if err != nil {
|
||||
log.Fatalln(errors.Wrap(err, "getting config"))
|
||||
}
|
||||
|
||||
// Copy the config and set the default flags
|
||||
recliCfg := recli.DefaultConfig
|
||||
recliCfg.IDTag.Name = "xml"
|
||||
recliCfg.SkipTag.Name = "json"
|
||||
|
||||
commands, err := recli.New(recliCfg).Construct(&cfg)
|
||||
if err != nil {
|
||||
log.Fatalln(errors.Wrap(err, "config reflect"))
|
||||
}
|
||||
|
||||
// Construct the actual CLI
|
||||
app := cli.NewApp()
|
||||
app.Name = "stcli"
|
||||
app.HelpName = app.Name
|
||||
app.Author = "The Syncthing Authors"
|
||||
app.Usage = "Syncthing command line interface"
|
||||
app.Version = build.Version
|
||||
app.Flags = fakeFlags
|
||||
app.Metadata = map[string]interface{}{
|
||||
"client": client,
|
||||
}
|
||||
app.Commands = []cli.Command{
|
||||
{
|
||||
Name: "config",
|
||||
HideHelp: true,
|
||||
Usage: "Configuration modification command group",
|
||||
Subcommands: commands,
|
||||
},
|
||||
showCommand,
|
||||
operationCommand,
|
||||
errorsCommand,
|
||||
}
|
||||
|
||||
tty := isatty.IsTerminal(os.Stdin.Fd()) || isatty.IsCygwinTerminal(os.Stdin.Fd())
|
||||
if !tty {
|
||||
// Not a TTY, consume from stdin
|
||||
scanner := bufio.NewScanner(os.Stdin)
|
||||
for scanner.Scan() {
|
||||
input, err := shlex.Split(scanner.Text())
|
||||
if err != nil {
|
||||
log.Fatalln(errors.Wrap(err, "parsing input"))
|
||||
}
|
||||
if len(input) == 0 {
|
||||
continue
|
||||
}
|
||||
err = app.Run(append(os.Args, input...))
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
err = scanner.Err()
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
} else {
|
||||
err = app.Run(os.Args)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(cfg, original) {
|
||||
body, err := json.MarshalIndent(cfg, "", " ")
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
resp, err := client.Post("system/config", string(body))
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
body, err := responseToBArray(resp)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
log.Fatalln(string(body))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
78
cmd/stcli/operations.go
Normal file
78
cmd/stcli/operations.go
Normal file
@@ -0,0 +1,78 @@
|
||||
// Copyright (C) 2019 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
var operationCommand = cli.Command{
|
||||
Name: "operations",
|
||||
HideHelp: true,
|
||||
Usage: "Operation command group",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "restart",
|
||||
Usage: "Restart syncthing",
|
||||
Action: expects(0, emptyPost("system/restart")),
|
||||
},
|
||||
{
|
||||
Name: "shutdown",
|
||||
Usage: "Shutdown syncthing",
|
||||
Action: expects(0, emptyPost("system/shutdown")),
|
||||
},
|
||||
{
|
||||
Name: "reset",
|
||||
Usage: "Reset syncthing deleting all folders and devices",
|
||||
Action: expects(0, emptyPost("system/reset")),
|
||||
},
|
||||
{
|
||||
Name: "upgrade",
|
||||
Usage: "Upgrade syncthing (if a newer version is available)",
|
||||
Action: expects(0, emptyPost("system/upgrade")),
|
||||
},
|
||||
{
|
||||
Name: "folder-override",
|
||||
Usage: "Override changes on folder (remote for sendonly, local for receiveonly)",
|
||||
ArgsUsage: "[folder id]",
|
||||
Action: expects(1, foldersOverride),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func foldersOverride(c *cli.Context) error {
|
||||
client := c.App.Metadata["client"].(*APIClient)
|
||||
cfg, err := getConfig(client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rid := c.Args()[0]
|
||||
for _, folder := range cfg.Folders {
|
||||
if folder.ID == rid {
|
||||
response, err := client.Post("db/override", "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if response.StatusCode != 200 {
|
||||
errStr := fmt.Sprint("Failed to override changes\nStatus code: ", response.StatusCode)
|
||||
bytes, err := responseToBArray(response)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
body := string(bytes)
|
||||
if body != "" {
|
||||
errStr += "\nBody: " + body
|
||||
}
|
||||
return fmt.Errorf(errStr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("Folder " + rid + " not found")
|
||||
}
|
||||
44
cmd/stcli/show.go
Normal file
44
cmd/stcli/show.go
Normal file
@@ -0,0 +1,44 @@
|
||||
// Copyright (C) 2019 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
var showCommand = cli.Command{
|
||||
Name: "show",
|
||||
HideHelp: true,
|
||||
Usage: "Show command group",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "version",
|
||||
Usage: "Show syncthing client version",
|
||||
Action: expects(0, dumpOutput("system/version")),
|
||||
},
|
||||
{
|
||||
Name: "config-status",
|
||||
Usage: "Show configuration status, whether or not a restart is required for changes to take effect",
|
||||
Action: expects(0, dumpOutput("system/config/insync")),
|
||||
},
|
||||
{
|
||||
Name: "system",
|
||||
Usage: "Show system status",
|
||||
Action: expects(0, dumpOutput("system/status")),
|
||||
},
|
||||
{
|
||||
Name: "connections",
|
||||
Usage: "Report about connections to other devices",
|
||||
Action: expects(0, dumpOutput("system/connections")),
|
||||
},
|
||||
{
|
||||
Name: "usage",
|
||||
Usage: "Show usage report",
|
||||
Action: expects(0, dumpOutput("svc/report")),
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -1,4 +1,8 @@
|
||||
// Copyright (C) 2014 Audrius Butkevičius
|
||||
// Copyright (C) 2019 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
@@ -8,158 +12,83 @@ import (
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"unicode"
|
||||
|
||||
"github.com/AudriusButkevicius/cli"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func responseToBArray(response *http.Response) []byte {
|
||||
defer response.Body.Close()
|
||||
func responseToBArray(response *http.Response) ([]byte, error) {
|
||||
bytes, err := ioutil.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
die(err)
|
||||
return nil, err
|
||||
}
|
||||
return bytes
|
||||
return bytes, response.Body.Close()
|
||||
}
|
||||
|
||||
func die(vals ...interface{}) {
|
||||
if len(vals) > 1 || vals[0] != nil {
|
||||
os.Stderr.WriteString(fmt.Sprintln(vals...))
|
||||
os.Exit(1)
|
||||
func emptyPost(url string) cli.ActionFunc {
|
||||
return func(c *cli.Context) error {
|
||||
client := c.App.Metadata["client"].(*APIClient)
|
||||
_, err := client.Post(url, "")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func wrappedHTTPPost(url string) func(c *cli.Context) {
|
||||
return func(c *cli.Context) {
|
||||
httpPost(c, url, "")
|
||||
}
|
||||
}
|
||||
|
||||
func prettyPrintJSON(json map[string]interface{}) {
|
||||
writer := newTableWriter()
|
||||
remap := make(map[string]interface{})
|
||||
for k, v := range json {
|
||||
key, ok := jsonAttributeLabels[k]
|
||||
if !ok {
|
||||
key = firstUpper(k)
|
||||
func dumpOutput(url string) cli.ActionFunc {
|
||||
return func(c *cli.Context) error {
|
||||
client := c.App.Metadata["client"].(*APIClient)
|
||||
response, err := client.Get(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
remap[key] = v
|
||||
return prettyPrintResponse(c, response)
|
||||
}
|
||||
}
|
||||
|
||||
jsonKeys := make([]string, 0, len(remap))
|
||||
for key := range remap {
|
||||
jsonKeys = append(jsonKeys, key)
|
||||
func getConfig(c *APIClient) (config.Configuration, error) {
|
||||
cfg := config.Configuration{}
|
||||
response, err := c.Get("system/config")
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
sort.Strings(jsonKeys)
|
||||
for _, k := range jsonKeys {
|
||||
var value string
|
||||
rvalue := remap[k]
|
||||
switch rvalue.(type) {
|
||||
case int, int16, int32, int64, uint, uint16, uint32, uint64, float32, float64:
|
||||
value = fmt.Sprintf("%.0f", rvalue)
|
||||
default:
|
||||
value = fmt.Sprint(rvalue)
|
||||
bytes, err := responseToBArray(response)
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
err = json.Unmarshal(bytes, &cfg)
|
||||
if err == nil {
|
||||
return cfg, err
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func expects(n int, actionFunc cli.ActionFunc) cli.ActionFunc {
|
||||
return func(ctx *cli.Context) error {
|
||||
if ctx.NArg() != n {
|
||||
plural := ""
|
||||
if n != 1 {
|
||||
plural = "s"
|
||||
}
|
||||
return fmt.Errorf("expected %d argument%s, got %d", n, plural, ctx.NArg())
|
||||
}
|
||||
if value == "" {
|
||||
continue
|
||||
}
|
||||
fmt.Fprintln(writer, k+":\t"+value)
|
||||
}
|
||||
writer.Flush()
|
||||
}
|
||||
|
||||
func firstUpper(str string) string {
|
||||
for i, v := range str {
|
||||
return string(unicode.ToUpper(v)) + str[i+1:]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func newTableWriter() *tabwriter.Writer {
|
||||
writer := new(tabwriter.Writer)
|
||||
writer.Init(os.Stdout, 0, 8, 0, '\t', 0)
|
||||
return writer
|
||||
}
|
||||
|
||||
func getMyID(c *cli.Context) string {
|
||||
response := httpGet(c, "system/status")
|
||||
data := make(map[string]interface{})
|
||||
json.Unmarshal(responseToBArray(response), &data)
|
||||
return data["myID"].(string)
|
||||
}
|
||||
|
||||
func getConfig(c *cli.Context) config.Configuration {
|
||||
response := httpGet(c, "system/config")
|
||||
config := config.Configuration{}
|
||||
json.Unmarshal(responseToBArray(response), &config)
|
||||
return config
|
||||
}
|
||||
|
||||
func setConfig(c *cli.Context, cfg config.Configuration) {
|
||||
body, err := json.Marshal(cfg)
|
||||
die(err)
|
||||
response := httpPost(c, "system/config", string(body))
|
||||
if response.StatusCode != 200 {
|
||||
die("Unexpected status code", response.StatusCode)
|
||||
return actionFunc(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func parseBool(input string) bool {
|
||||
val, err := strconv.ParseBool(input)
|
||||
func prettyPrintJSON(data interface{}) error {
|
||||
enc := json.NewEncoder(os.Stdout)
|
||||
enc.SetIndent("", " ")
|
||||
return enc.Encode(data)
|
||||
}
|
||||
|
||||
func prettyPrintResponse(c *cli.Context, response *http.Response) error {
|
||||
bytes, err := responseToBArray(response)
|
||||
if err != nil {
|
||||
die(input + " is not a valid value for a boolean")
|
||||
return err
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
func parseInt(input string) int {
|
||||
val, err := strconv.ParseInt(input, 0, 64)
|
||||
if err != nil {
|
||||
die(input + " is not a valid value for an integer")
|
||||
}
|
||||
return int(val)
|
||||
}
|
||||
|
||||
func parseUint(input string) int {
|
||||
val, err := strconv.ParseUint(input, 0, 64)
|
||||
if err != nil {
|
||||
die(input + " is not a valid value for an unsigned integer")
|
||||
}
|
||||
return int(val)
|
||||
}
|
||||
|
||||
func parsePort(input string) int {
|
||||
port := parseUint(input)
|
||||
if port < 1 || port > 65535 {
|
||||
die(input + " is not a valid port\nExpected value between 1 and 65535")
|
||||
}
|
||||
return port
|
||||
}
|
||||
|
||||
func validAddress(input string) {
|
||||
tokens := strings.Split(input, ":")
|
||||
if len(tokens) != 2 {
|
||||
die(input + " is not a valid value for an address\nExpected format <ip or hostname>:<port>")
|
||||
}
|
||||
matched, err := regexp.MatchString("^[a-zA-Z0-9]+([-a-zA-Z0-9.]+[-a-zA-Z0-9]+)?$", tokens[0])
|
||||
die(err)
|
||||
if !matched {
|
||||
die(input + " is not a valid value for an address\nExpected format <ip or hostname>:<port>")
|
||||
}
|
||||
parsePort(tokens[1])
|
||||
}
|
||||
|
||||
func parseDeviceID(input string) protocol.DeviceID {
|
||||
device, err := protocol.DeviceIDFromString(input)
|
||||
if err != nil {
|
||||
die(input + " is not a valid device id")
|
||||
}
|
||||
return device
|
||||
var data interface{}
|
||||
if err := json.Unmarshal(bytes, &data); err != nil {
|
||||
return err
|
||||
}
|
||||
// TODO: Check flag for pretty print format
|
||||
return prettyPrintJSON(data)
|
||||
}
|
||||
|
||||
@@ -88,10 +88,10 @@ func startWalker(dir string, res chan<- fileInfo, abort <-chan struct{}) chan er
|
||||
}
|
||||
|
||||
rn, _ := filepath.Rel(dir, path)
|
||||
if rn == "." || rn == ".stfolder" {
|
||||
if rn == "." {
|
||||
return nil
|
||||
}
|
||||
if rn == ".stversions" {
|
||||
if rn == ".stversions" || rn == ".stfolder" {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
|
||||
|
||||
1172
cmd/stcrashreceiver/_testdata/panic.log
Normal file
1172
cmd/stcrashreceiver/_testdata/panic.log
Normal file
File diff suppressed because it is too large
Load Diff
204
cmd/stcrashreceiver/sentry.go
Normal file
204
cmd/stcrashreceiver/sentry.go
Normal file
@@ -0,0 +1,204 @@
|
||||
// Copyright (C) 2019 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
raven "github.com/getsentry/raven-go"
|
||||
"github.com/maruel/panicparse/stack"
|
||||
)
|
||||
|
||||
const reportServer = "https://crash.syncthing.net/report/"
|
||||
|
||||
var loader = newGithubSourceCodeLoader()
|
||||
|
||||
func init() {
|
||||
raven.SetSourceCodeLoader(loader)
|
||||
}
|
||||
|
||||
var (
|
||||
clients = make(map[string]*raven.Client)
|
||||
clientsMut sync.Mutex
|
||||
)
|
||||
|
||||
func sendReport(dsn, path string, report []byte) error {
|
||||
pkt, err := parseReport(path, report)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
clientsMut.Lock()
|
||||
defer clientsMut.Unlock()
|
||||
|
||||
cli, ok := clients[dsn]
|
||||
if !ok {
|
||||
cli, err = raven.New(dsn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clients[dsn] = cli
|
||||
}
|
||||
|
||||
// The client sets release and such on the packet before sending, in the
|
||||
// misguided idea that it knows this better than than the packet we give
|
||||
// it. So we copy the values from the packet to the client first...
|
||||
cli.SetRelease(pkt.Release)
|
||||
cli.SetEnvironment(pkt.Environment)
|
||||
|
||||
defer cli.Wait()
|
||||
_, errC := cli.Capture(pkt, nil)
|
||||
return <-errC
|
||||
}
|
||||
|
||||
func parseReport(path string, report []byte) (*raven.Packet, error) {
|
||||
parts := bytes.SplitN(report, []byte("\n"), 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, errors.New("no first line")
|
||||
}
|
||||
|
||||
version, err := parseVersion(string(parts[0]))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
report = parts[1]
|
||||
|
||||
foundPanic := false
|
||||
var subjectLine []byte
|
||||
for {
|
||||
parts = bytes.SplitN(report, []byte("\n"), 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, errors.New("no panic line found")
|
||||
}
|
||||
|
||||
line := parts[0]
|
||||
report = parts[1]
|
||||
|
||||
if foundPanic {
|
||||
// The previous line was our "Panic at ..." header. We are now
|
||||
// at the beginning of the real panic trace and this is our
|
||||
// subject line.
|
||||
subjectLine = line
|
||||
break
|
||||
} else if bytes.HasPrefix(line, []byte("Panic at")) {
|
||||
foundPanic = true
|
||||
}
|
||||
}
|
||||
|
||||
r := bytes.NewReader(report)
|
||||
ctx, err := stack.ParseDump(r, ioutil.Discard, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Lock the source code loader to the version we are processing here.
|
||||
if version.commit != "" {
|
||||
// We have a commit hash, so we know exactly which source to use
|
||||
loader.LockWithVersion(version.commit)
|
||||
} else if strings.HasPrefix(version.tag, "v") {
|
||||
// Lets hope the tag is close enough
|
||||
loader.LockWithVersion(version.tag)
|
||||
} else {
|
||||
// Last resort
|
||||
loader.LockWithVersion("master")
|
||||
}
|
||||
defer loader.Unlock()
|
||||
|
||||
var trace raven.Stacktrace
|
||||
for _, gr := range ctx.Goroutines {
|
||||
if gr.First {
|
||||
trace.Frames = make([]*raven.StacktraceFrame, len(gr.Stack.Calls))
|
||||
for i, sc := range gr.Stack.Calls {
|
||||
trace.Frames[len(trace.Frames)-1-i] = raven.NewStacktraceFrame(0, sc.Func.Name(), sc.SrcPath, sc.Line, 3, nil)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
pkt := &raven.Packet{
|
||||
Message: string(subjectLine),
|
||||
Platform: "go",
|
||||
Release: version.tag,
|
||||
Environment: version.environment(),
|
||||
Tags: raven.Tags{
|
||||
raven.Tag{Key: "version", Value: version.version},
|
||||
raven.Tag{Key: "tag", Value: version.tag},
|
||||
raven.Tag{Key: "codename", Value: version.codename},
|
||||
raven.Tag{Key: "runtime", Value: version.runtime},
|
||||
raven.Tag{Key: "goos", Value: version.goos},
|
||||
raven.Tag{Key: "goarch", Value: version.goarch},
|
||||
raven.Tag{Key: "builder", Value: version.builder},
|
||||
},
|
||||
Extra: raven.Extra{
|
||||
"url": reportServer + path,
|
||||
},
|
||||
Interfaces: []raven.Interface{&trace},
|
||||
}
|
||||
if version.commit != "" {
|
||||
pkt.Tags = append(pkt.Tags, raven.Tag{Key: "commit", Value: version.commit})
|
||||
}
|
||||
|
||||
return pkt, nil
|
||||
}
|
||||
|
||||
// syncthing v1.1.4-rc.1+30-g6aaae618-dirty-crashrep "Erbium Earthworm" (go1.12.5 darwin-amd64) jb@kvin.kastelo.net 2019-05-23 16:08:14 UTC
|
||||
var longVersionRE = regexp.MustCompile(`syncthing\s+(v[^\s]+)\s+"([^"]+)"\s\(([^\s]+)\s+([^-]+)-([^)]+)\)\s+([^\s]+)`)
|
||||
|
||||
type version struct {
|
||||
version string // "v1.1.4-rc.1+30-g6aaae618-dirty-crashrep"
|
||||
tag string // "v1.1.4-rc.1"
|
||||
commit string // "6aaae618", blank when absent
|
||||
codename string // "Erbium Earthworm"
|
||||
runtime string // "go1.12.5"
|
||||
goos string // "darwin"
|
||||
goarch string // "amd64"
|
||||
builder string // "jb@kvin.kastelo.net"
|
||||
}
|
||||
|
||||
func (v version) environment() string {
|
||||
if v.commit != "" {
|
||||
return "Development"
|
||||
}
|
||||
if strings.Contains(v.tag, "-rc.") {
|
||||
return "Candidate"
|
||||
}
|
||||
if strings.Contains(v.tag, "-") {
|
||||
return "Beta"
|
||||
}
|
||||
return "Stable"
|
||||
}
|
||||
|
||||
func parseVersion(line string) (version, error) {
|
||||
m := longVersionRE.FindStringSubmatch(line)
|
||||
if len(m) == 0 {
|
||||
return version{}, errors.New("unintelligeble version string")
|
||||
}
|
||||
|
||||
v := version{
|
||||
version: m[1],
|
||||
codename: m[2],
|
||||
runtime: m[3],
|
||||
goos: m[4],
|
||||
goarch: m[5],
|
||||
builder: m[6],
|
||||
}
|
||||
parts := strings.Split(v.version, "+")
|
||||
v.tag = parts[0]
|
||||
if len(parts) > 1 {
|
||||
fields := strings.Split(parts[1], "-")
|
||||
if len(fields) >= 2 && strings.HasPrefix(fields[1], "g") {
|
||||
v.commit = fields[1][1:]
|
||||
}
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
64
cmd/stcrashreceiver/sentry_test.go
Normal file
64
cmd/stcrashreceiver/sentry_test.go
Normal file
@@ -0,0 +1,64 @@
|
||||
// Copyright (C) 2019 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParseVersion(t *testing.T) {
|
||||
cases := []struct {
|
||||
longVersion string
|
||||
parsed version
|
||||
}{
|
||||
{
|
||||
longVersion: `syncthing v1.1.4-rc.1+30-g6aaae618-dirty-crashrep "Erbium Earthworm" (go1.12.5 darwin-amd64) jb@kvin.kastelo.net 2019-05-23 16:08:14 UTC`,
|
||||
parsed: version{
|
||||
version: "v1.1.4-rc.1+30-g6aaae618-dirty-crashrep",
|
||||
tag: "v1.1.4-rc.1",
|
||||
commit: "6aaae618",
|
||||
codename: "Erbium Earthworm",
|
||||
runtime: "go1.12.5",
|
||||
goos: "darwin",
|
||||
goarch: "amd64",
|
||||
builder: "jb@kvin.kastelo.net",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
v, err := parseVersion(tc.longVersion)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
if v != tc.parsed {
|
||||
t.Error(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseReport(t *testing.T) {
|
||||
bs, err := ioutil.ReadFile("_testdata/panic.log")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pkt, err := parseReport("1/2/345", bs)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
bs, err = pkt.JSON()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Printf("%s\n", bs)
|
||||
}
|
||||
114
cmd/stcrashreceiver/sourcecodeloader.go
Normal file
114
cmd/stcrashreceiver/sourcecodeloader.go
Normal file
@@ -0,0 +1,114 @@
|
||||
// Copyright (C) 2019 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
urlPrefix = "https://raw.githubusercontent.com/syncthing/syncthing/"
|
||||
httpTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
type githubSourceCodeLoader struct {
|
||||
mut sync.Mutex
|
||||
version string
|
||||
cache map[string]map[string][][]byte // version -> file -> lines
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
func newGithubSourceCodeLoader() *githubSourceCodeLoader {
|
||||
return &githubSourceCodeLoader{
|
||||
cache: make(map[string]map[string][][]byte),
|
||||
client: &http.Client{Timeout: httpTimeout},
|
||||
}
|
||||
}
|
||||
|
||||
func (l *githubSourceCodeLoader) LockWithVersion(version string) {
|
||||
l.mut.Lock()
|
||||
l.version = version
|
||||
if _, ok := l.cache[version]; !ok {
|
||||
l.cache[version] = make(map[string][][]byte)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *githubSourceCodeLoader) Unlock() {
|
||||
l.mut.Unlock()
|
||||
}
|
||||
|
||||
func (l *githubSourceCodeLoader) Load(filename string, line, context int) ([][]byte, int) {
|
||||
filename = filepath.ToSlash(filename)
|
||||
lines, ok := l.cache[l.version][filename]
|
||||
if !ok {
|
||||
// Cache whatever we managed to find (or nil if nothing, so we don't try again)
|
||||
defer func() {
|
||||
l.cache[l.version][filename] = lines
|
||||
}()
|
||||
|
||||
knownPrefixes := []string{"/lib/", "/cmd/"}
|
||||
var idx int
|
||||
for _, pref := range knownPrefixes {
|
||||
idx = strings.Index(filename, pref)
|
||||
if idx >= 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if idx == -1 {
|
||||
return nil, 0
|
||||
}
|
||||
|
||||
url := urlPrefix + l.version + filename[idx:]
|
||||
resp, err := l.client.Get(url)
|
||||
|
||||
if err != nil || resp.StatusCode != http.StatusOK {
|
||||
fmt.Println("Loading source:", err.Error())
|
||||
return nil, 0
|
||||
}
|
||||
data, err := ioutil.ReadAll(resp.Body)
|
||||
_ = resp.Body.Close()
|
||||
if err != nil {
|
||||
fmt.Println("Loading source:", err.Error())
|
||||
return nil, 0
|
||||
}
|
||||
lines = bytes.Split(data, []byte{'\n'})
|
||||
}
|
||||
|
||||
return getLineFromLines(lines, line, context)
|
||||
}
|
||||
|
||||
func getLineFromLines(lines [][]byte, line, context int) ([][]byte, int) {
|
||||
if lines == nil {
|
||||
// cached error from ReadFile: return no lines
|
||||
return nil, 0
|
||||
}
|
||||
|
||||
line-- // stack trace lines are 1-indexed
|
||||
start := line - context
|
||||
var idx int
|
||||
if start < 0 {
|
||||
start = 0
|
||||
idx = line
|
||||
} else {
|
||||
idx = context
|
||||
}
|
||||
end := line + context + 1
|
||||
if line >= len(lines) {
|
||||
return nil, 0
|
||||
}
|
||||
if end > len(lines) {
|
||||
end = len(lines)
|
||||
}
|
||||
return lines[start:end], idx
|
||||
}
|
||||
160
cmd/stcrashreceiver/stcrashreceiver.go
Normal file
160
cmd/stcrashreceiver/stcrashreceiver.go
Normal file
@@ -0,0 +1,160 @@
|
||||
// Copyright (C) 2019 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
// Command stcrashreceiver is a trivial HTTP server that allows two things:
|
||||
//
|
||||
// - uploading files (crash reports) named like a SHA256 hash using a PUT request
|
||||
// - checking whether such file exists using a HEAD request
|
||||
//
|
||||
// Typically this should be deployed behind something that manages HTTPS.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"flag"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const maxRequestSize = 1 << 20 // 1 MiB
|
||||
|
||||
func main() {
|
||||
dir := flag.String("dir", ".", "Directory to store reports in")
|
||||
dsn := flag.String("dsn", "", "Sentry DSN")
|
||||
listen := flag.String("listen", ":22039", "HTTP listen address")
|
||||
flag.Parse()
|
||||
|
||||
cr := &crashReceiver{
|
||||
dir: *dir,
|
||||
dsn: *dsn,
|
||||
}
|
||||
|
||||
log.SetOutput(os.Stdout)
|
||||
if err := http.ListenAndServe(*listen, cr); err != nil {
|
||||
log.Fatalln("HTTP serve:", err)
|
||||
}
|
||||
}
|
||||
|
||||
type crashReceiver struct {
|
||||
dir string
|
||||
dsn string
|
||||
}
|
||||
|
||||
func (r *crashReceiver) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
// The final path component should be a SHA256 hash in hex, so 64 hex
|
||||
// characters. We don't care about case on the request but use lower
|
||||
// case internally.
|
||||
reportID := strings.ToLower(path.Base(req.URL.Path))
|
||||
if len(reportID) != 64 {
|
||||
http.Error(w, "Bad request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
for _, c := range reportID {
|
||||
if c >= 'a' && c <= 'f' {
|
||||
continue
|
||||
}
|
||||
if c >= '0' && c <= '9' {
|
||||
continue
|
||||
}
|
||||
http.Error(w, "Bad request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// The location of the report on disk, compressed
|
||||
fullPath := filepath.Join(r.dir, r.dirFor(reportID), reportID) + ".gz"
|
||||
|
||||
switch req.Method {
|
||||
case http.MethodGet:
|
||||
r.serveGet(fullPath, w, req)
|
||||
case http.MethodHead:
|
||||
r.serveHead(fullPath, w, req)
|
||||
case http.MethodPut:
|
||||
r.servePut(reportID, fullPath, w, req)
|
||||
default:
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
}
|
||||
}
|
||||
|
||||
// serveGet responds to GET requests by serving the uncompressed report.
|
||||
func (r *crashReceiver) serveGet(fullPath string, w http.ResponseWriter, _ *http.Request) {
|
||||
fd, err := os.Open(fullPath)
|
||||
if err != nil {
|
||||
http.Error(w, "Not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
defer fd.Close()
|
||||
gr, err := gzip.NewReader(fd)
|
||||
if err != nil {
|
||||
http.Error(w, "Internal server error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
_, _ = io.Copy(w, gr) // best effort
|
||||
}
|
||||
|
||||
// serveHead responds to HEAD requests by checking if the named report
|
||||
// already exists in the system.
|
||||
func (r *crashReceiver) serveHead(fullPath string, w http.ResponseWriter, _ *http.Request) {
|
||||
if _, err := os.Lstat(fullPath); err != nil {
|
||||
http.Error(w, "Not found", http.StatusNotFound)
|
||||
}
|
||||
}
|
||||
|
||||
// servePut accepts and stores the given report.
|
||||
func (r *crashReceiver) servePut(reportID, fullPath string, w http.ResponseWriter, req *http.Request) {
|
||||
// Ensure the destination directory exists
|
||||
if err := os.MkdirAll(filepath.Dir(fullPath), 0755); err != nil {
|
||||
log.Println("Creating directory:", err)
|
||||
http.Error(w, "Internal server error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Read at most maxRequestSize of report data.
|
||||
log.Println("Receiving report", reportID)
|
||||
lr := io.LimitReader(req.Body, maxRequestSize)
|
||||
bs, err := ioutil.ReadAll(lr)
|
||||
if err != nil {
|
||||
log.Println("Reading report:", err)
|
||||
http.Error(w, "Internal server error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Compress the report for storage
|
||||
buf := new(bytes.Buffer)
|
||||
gw := gzip.NewWriter(buf)
|
||||
_, _ = gw.Write(bs) // can't fail
|
||||
gw.Close()
|
||||
|
||||
// Create an output file with the compressed report
|
||||
err = ioutil.WriteFile(fullPath, buf.Bytes(), 0644)
|
||||
if err != nil {
|
||||
log.Println("Saving report:", err)
|
||||
http.Error(w, "Internal server error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Send the report to Sentry
|
||||
if r.dsn != "" {
|
||||
go func() {
|
||||
// There's no need for the client to have to wait for this part.
|
||||
if err := sendReport(r.dsn, reportID, bs); err != nil {
|
||||
log.Println("Failed to send report:", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// 01234567890abcdef... => 01/23
|
||||
func (r *crashReceiver) dirFor(base string) string {
|
||||
return filepath.Join(base[0:2], base[2:4])
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
Copyright (C) 2014-2015 The Discosrv Authors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
- The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
@@ -6,33 +6,5 @@ This is the global discovery server for the `syncthing` project.
|
||||
Usage
|
||||
-----
|
||||
|
||||
The discovery server supports `ql` and `postgres` backends.
|
||||
Specify the backend via `-db-backend` and the database DSN via `-db-dsn`.
|
||||
https://docs.syncthing.net/users/stdiscosrv.html
|
||||
|
||||
By default it will use in-memory `ql` backend. If you wish to persist the
|
||||
information on disk between restarts in `ql`, specify a file DSN:
|
||||
|
||||
```bash
|
||||
$ stdiscosrv -db-dsn="file:///var/run/stdiscosrv.db"
|
||||
```
|
||||
|
||||
For `postgres`, you will need to create a database and a user with permissions
|
||||
to create tables in it, then start the stdiscosrv as follows:
|
||||
|
||||
```bash
|
||||
$ export STDISCOSRV_DB_DSN="postgres://user:password@localhost/databasename"
|
||||
$ stdiscosrv -db-backend="postgres"
|
||||
```
|
||||
|
||||
You can pass the DSN as command line option, but the value what you pass in will
|
||||
be visible in most process managers, potentially exposing the database password
|
||||
to other users.
|
||||
|
||||
In all cases, the appropriate tables and indexes will be created at first
|
||||
startup. If it doesn't exit with an error, you're fine.
|
||||
|
||||
See `stdiscosrv -help` for other options.
|
||||
|
||||
##### Third-party attribution
|
||||
|
||||
[cznic/lldb](https://github.com/cznic/lldb), Copyright (C) 2014 The lldb Authors.
|
||||
|
||||
454
cmd/stdiscosrv/apisrv.go
Normal file
454
cmd/stdiscosrv/apisrv.go
Normal file
@@ -0,0 +1,454 @@
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
// announcement is the format received from and sent to clients
|
||||
type announcement struct {
|
||||
Seen time.Time `json:"seen"`
|
||||
Addresses []string `json:"addresses"`
|
||||
}
|
||||
|
||||
type apiSrv struct {
|
||||
addr string
|
||||
cert tls.Certificate
|
||||
db database
|
||||
listener net.Listener
|
||||
repl replicator // optional
|
||||
useHTTP bool
|
||||
|
||||
mapsMut sync.Mutex
|
||||
misses map[string]int32
|
||||
}
|
||||
|
||||
type requestID int64
|
||||
|
||||
func (i requestID) String() string {
|
||||
return fmt.Sprintf("%016x", int64(i))
|
||||
}
|
||||
|
||||
type contextKey int
|
||||
|
||||
const idKey contextKey = iota
|
||||
|
||||
func newAPISrv(addr string, cert tls.Certificate, db database, repl replicator, useHTTP bool) *apiSrv {
|
||||
return &apiSrv{
|
||||
addr: addr,
|
||||
cert: cert,
|
||||
db: db,
|
||||
repl: repl,
|
||||
useHTTP: useHTTP,
|
||||
misses: make(map[string]int32),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *apiSrv) Serve() {
|
||||
if s.useHTTP {
|
||||
listener, err := net.Listen("tcp", s.addr)
|
||||
if err != nil {
|
||||
log.Println("Listen:", err)
|
||||
return
|
||||
}
|
||||
s.listener = listener
|
||||
} else {
|
||||
tlsCfg := &tls.Config{
|
||||
Certificates: []tls.Certificate{s.cert},
|
||||
ClientAuth: tls.RequestClientCert,
|
||||
SessionTicketsDisabled: true,
|
||||
MinVersion: tls.VersionTLS12,
|
||||
CipherSuites: []uint16{
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
||||
},
|
||||
}
|
||||
|
||||
tlsListener, err := tls.Listen("tcp", s.addr, tlsCfg)
|
||||
if err != nil {
|
||||
log.Println("Listen:", err)
|
||||
return
|
||||
}
|
||||
s.listener = tlsListener
|
||||
}
|
||||
|
||||
http.HandleFunc("/", s.handler)
|
||||
http.HandleFunc("/ping", handlePing)
|
||||
|
||||
srv := &http.Server{
|
||||
ReadTimeout: httpReadTimeout,
|
||||
WriteTimeout: httpWriteTimeout,
|
||||
MaxHeaderBytes: httpMaxHeaderBytes,
|
||||
}
|
||||
|
||||
if err := srv.Serve(s.listener); err != nil {
|
||||
log.Println("Serve:", err)
|
||||
}
|
||||
}
|
||||
|
||||
var topCtx = context.Background()
|
||||
|
||||
func (s *apiSrv) handler(w http.ResponseWriter, req *http.Request) {
|
||||
t0 := time.Now()
|
||||
|
||||
lw := NewLoggingResponseWriter(w)
|
||||
|
||||
defer func() {
|
||||
diff := time.Since(t0)
|
||||
apiRequestsSeconds.WithLabelValues(req.Method).Observe(diff.Seconds())
|
||||
apiRequestsTotal.WithLabelValues(req.Method, strconv.Itoa(lw.statusCode)).Inc()
|
||||
}()
|
||||
|
||||
reqID := requestID(rand.Int63())
|
||||
ctx := context.WithValue(topCtx, idKey, reqID)
|
||||
|
||||
if debug {
|
||||
log.Println(reqID, req.Method, req.URL)
|
||||
}
|
||||
|
||||
var remoteIP net.IP
|
||||
if s.useHTTP {
|
||||
remoteIP = net.ParseIP(req.Header.Get("X-Forwarded-For"))
|
||||
} else {
|
||||
addr, err := net.ResolveTCPAddr("tcp", req.RemoteAddr)
|
||||
if err != nil {
|
||||
log.Println("remoteAddr:", err)
|
||||
lw.Header().Set("Retry-After", errorRetryAfterString())
|
||||
http.Error(lw, "Internal Server Error", http.StatusInternalServerError)
|
||||
apiRequestsTotal.WithLabelValues("no_remote_addr").Inc()
|
||||
return
|
||||
}
|
||||
remoteIP = addr.IP
|
||||
}
|
||||
|
||||
switch req.Method {
|
||||
case "GET":
|
||||
s.handleGET(ctx, lw, req)
|
||||
case "POST":
|
||||
s.handlePOST(ctx, remoteIP, lw, req)
|
||||
default:
|
||||
http.Error(lw, "Method Not Allowed", http.StatusMethodNotAllowed)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *apiSrv) handleGET(ctx context.Context, w http.ResponseWriter, req *http.Request) {
|
||||
reqID := ctx.Value(idKey).(requestID)
|
||||
|
||||
deviceID, err := protocol.DeviceIDFromString(req.URL.Query().Get("device"))
|
||||
if err != nil {
|
||||
if debug {
|
||||
log.Println(reqID, "bad device param")
|
||||
}
|
||||
lookupRequestsTotal.WithLabelValues("bad_request").Inc()
|
||||
w.Header().Set("Retry-After", errorRetryAfterString())
|
||||
http.Error(w, "Bad Request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
key := deviceID.String()
|
||||
rec, err := s.db.get(key)
|
||||
if err != nil {
|
||||
// some sort of internal error
|
||||
lookupRequestsTotal.WithLabelValues("internal_error").Inc()
|
||||
w.Header().Set("Retry-After", errorRetryAfterString())
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if len(rec.Addresses) == 0 {
|
||||
lookupRequestsTotal.WithLabelValues("not_found").Inc()
|
||||
|
||||
s.mapsMut.Lock()
|
||||
misses := s.misses[key]
|
||||
if misses < rec.Misses {
|
||||
misses = rec.Misses + 1
|
||||
} else {
|
||||
misses++
|
||||
}
|
||||
s.misses[key] = misses
|
||||
s.mapsMut.Unlock()
|
||||
|
||||
if misses%notFoundMissesWriteInterval == 0 {
|
||||
rec.Misses = misses
|
||||
rec.Missed = time.Now().UnixNano()
|
||||
rec.Addresses = nil
|
||||
// rec.Seen retained from get
|
||||
s.db.put(key, rec)
|
||||
}
|
||||
|
||||
w.Header().Set("Retry-After", notFoundRetryAfterString(int(misses)))
|
||||
http.Error(w, "Not Found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
lookupRequestsTotal.WithLabelValues("success").Inc()
|
||||
|
||||
bs, _ := json.Marshal(announcement{
|
||||
Seen: time.Unix(0, rec.Seen),
|
||||
Addresses: addressStrs(rec.Addresses),
|
||||
})
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write(bs)
|
||||
}
|
||||
|
||||
func (s *apiSrv) handlePOST(ctx context.Context, remoteIP net.IP, w http.ResponseWriter, req *http.Request) {
|
||||
reqID := ctx.Value(idKey).(requestID)
|
||||
|
||||
rawCert := certificateBytes(req)
|
||||
if rawCert == nil {
|
||||
if debug {
|
||||
log.Println(reqID, "no certificates")
|
||||
}
|
||||
announceRequestsTotal.WithLabelValues("no_certificate").Inc()
|
||||
w.Header().Set("Retry-After", errorRetryAfterString())
|
||||
http.Error(w, "Forbidden", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
|
||||
var ann announcement
|
||||
if err := json.NewDecoder(req.Body).Decode(&ann); err != nil {
|
||||
if debug {
|
||||
log.Println(reqID, "decode:", err)
|
||||
}
|
||||
announceRequestsTotal.WithLabelValues("bad_request").Inc()
|
||||
w.Header().Set("Retry-After", errorRetryAfterString())
|
||||
http.Error(w, "Bad Request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
deviceID := protocol.NewDeviceID(rawCert)
|
||||
|
||||
addresses := fixupAddresses(remoteIP, ann.Addresses)
|
||||
if len(addresses) == 0 {
|
||||
announceRequestsTotal.WithLabelValues("bad_request").Inc()
|
||||
w.Header().Set("Retry-After", errorRetryAfterString())
|
||||
http.Error(w, "Bad Request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.handleAnnounce(remoteIP, deviceID, addresses); err != nil {
|
||||
announceRequestsTotal.WithLabelValues("internal_error").Inc()
|
||||
w.Header().Set("Retry-After", errorRetryAfterString())
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
announceRequestsTotal.WithLabelValues("success").Inc()
|
||||
|
||||
w.Header().Set("Reannounce-After", reannounceAfterString())
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
func (s *apiSrv) Stop() {
|
||||
s.listener.Close()
|
||||
}
|
||||
|
||||
func (s *apiSrv) handleAnnounce(remote net.IP, deviceID protocol.DeviceID, addresses []string) error {
|
||||
key := deviceID.String()
|
||||
now := time.Now()
|
||||
expire := now.Add(addressExpiryTime).UnixNano()
|
||||
|
||||
dbAddrs := make([]DatabaseAddress, len(addresses))
|
||||
for i := range addresses {
|
||||
dbAddrs[i].Address = addresses[i]
|
||||
dbAddrs[i].Expires = expire
|
||||
}
|
||||
|
||||
// The address slice must always be sorted for database merges to work
|
||||
// properly.
|
||||
sort.Sort(databaseAddressOrder(dbAddrs))
|
||||
|
||||
seen := now.UnixNano()
|
||||
if s.repl != nil {
|
||||
s.repl.send(key, dbAddrs, seen)
|
||||
}
|
||||
return s.db.merge(key, dbAddrs, seen)
|
||||
}
|
||||
|
||||
func handlePing(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(204)
|
||||
}
|
||||
|
||||
func certificateBytes(req *http.Request) []byte {
|
||||
if req.TLS != nil && len(req.TLS.PeerCertificates) > 0 {
|
||||
return req.TLS.PeerCertificates[0].Raw
|
||||
}
|
||||
|
||||
var bs []byte
|
||||
|
||||
if hdr := req.Header.Get("X-SSL-Cert"); hdr != "" {
|
||||
if strings.Contains(hdr, "%") {
|
||||
// Nginx using $ssl_client_escaped_cert
|
||||
// The certificate is in PEM format with url encoding.
|
||||
// We need to decode for the PEM decoder
|
||||
hdr, err := url.QueryUnescape(hdr)
|
||||
if err != nil {
|
||||
// Decoding failed
|
||||
return nil
|
||||
}
|
||||
|
||||
bs = []byte(hdr)
|
||||
} else {
|
||||
// Nginx using $ssl_client_cert
|
||||
// The certificate is in PEM format but with spaces for newlines. We
|
||||
// need to reinstate the newlines for the PEM decoder. But we need to
|
||||
// leave the spaces in the BEGIN and END lines - the first and last
|
||||
// space - alone.
|
||||
bs = []byte(hdr)
|
||||
firstSpace := bytes.Index(bs, []byte(" "))
|
||||
lastSpace := bytes.LastIndex(bs, []byte(" "))
|
||||
for i := firstSpace + 1; i < lastSpace; i++ {
|
||||
if bs[i] == ' ' {
|
||||
bs[i] = '\n'
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if hdr := req.Header.Get("X-Forwarded-Tls-Client-Cert"); hdr != "" {
|
||||
// Traefik 2 passtlsclientcert
|
||||
// The certificate is in PEM format with url encoding but without newlines
|
||||
// and start/end statements. We need to decode, reinstate the newlines every 64
|
||||
// character and add statements for the PEM decoder
|
||||
hdr, err := url.QueryUnescape(hdr)
|
||||
if err != nil {
|
||||
// Decoding failed
|
||||
return nil
|
||||
}
|
||||
|
||||
for i := 64; i < len(hdr); i += 65 {
|
||||
hdr = hdr[:i] + "\n" + hdr[i:]
|
||||
}
|
||||
|
||||
hdr = "-----BEGIN CERTIFICATE-----\n" + hdr
|
||||
hdr = hdr + "\n-----END CERTIFICATE-----\n"
|
||||
bs = []byte(hdr)
|
||||
}
|
||||
|
||||
if bs == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
block, _ := pem.Decode(bs)
|
||||
if block == nil {
|
||||
// Decoding failed
|
||||
return nil
|
||||
}
|
||||
|
||||
return block.Bytes
|
||||
}
|
||||
|
||||
// fixupAddresses checks the list of addresses, removing invalid ones and
|
||||
// replacing unspecified IPs with the given remote IP.
|
||||
func fixupAddresses(remote net.IP, addresses []string) []string {
|
||||
fixed := make([]string, 0, len(addresses))
|
||||
for _, annAddr := range addresses {
|
||||
uri, err := url.Parse(annAddr)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
host, port, err := net.SplitHostPort(uri.Host)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
ip := net.ParseIP(host)
|
||||
|
||||
// Some classes of IP are no-go.
|
||||
if ip.IsLoopback() || ip.IsMulticast() {
|
||||
continue
|
||||
}
|
||||
|
||||
if host == "" || ip.IsUnspecified() {
|
||||
// Replace the unspecified IP with the request source.
|
||||
|
||||
// ... unless the request source is the loopback address or
|
||||
// multicast/unspecified (can't happen, really).
|
||||
if remote.IsLoopback() || remote.IsMulticast() || remote.IsUnspecified() {
|
||||
continue
|
||||
}
|
||||
|
||||
// Do not use IPv6 remote address if requested scheme is ...4
|
||||
// (i.e., tcp4, etc.)
|
||||
if strings.HasSuffix(uri.Scheme, "4") && remote.To4() == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Do not use IPv4 remote address if requested scheme is ...6
|
||||
if strings.HasSuffix(uri.Scheme, "6") && remote.To4() != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
host = remote.String()
|
||||
}
|
||||
|
||||
uri.Host = net.JoinHostPort(host, port)
|
||||
fixed = append(fixed, uri.String())
|
||||
}
|
||||
|
||||
return fixed
|
||||
}
|
||||
|
||||
type loggingResponseWriter struct {
|
||||
http.ResponseWriter
|
||||
statusCode int
|
||||
}
|
||||
|
||||
func NewLoggingResponseWriter(w http.ResponseWriter) *loggingResponseWriter {
|
||||
return &loggingResponseWriter{w, http.StatusOK}
|
||||
}
|
||||
|
||||
func (lrw *loggingResponseWriter) WriteHeader(code int) {
|
||||
lrw.statusCode = code
|
||||
lrw.ResponseWriter.WriteHeader(code)
|
||||
}
|
||||
|
||||
func addressStrs(dbAddrs []DatabaseAddress) []string {
|
||||
res := make([]string, len(dbAddrs))
|
||||
for i, a := range dbAddrs {
|
||||
res[i] = a.Address
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func errorRetryAfterString() string {
|
||||
return strconv.Itoa(errorRetryAfterSeconds + rand.Intn(errorRetryFuzzSeconds))
|
||||
}
|
||||
|
||||
func notFoundRetryAfterString(misses int) string {
|
||||
retryAfterS := notFoundRetryMinSeconds + notFoundRetryIncSeconds*misses
|
||||
if retryAfterS > notFoundRetryMaxSeconds {
|
||||
retryAfterS = notFoundRetryMaxSeconds
|
||||
}
|
||||
retryAfterS += rand.Intn(notFoundRetryFuzzSeconds)
|
||||
return strconv.Itoa(retryAfterS)
|
||||
}
|
||||
|
||||
func reannounceAfterString() string {
|
||||
return strconv.Itoa(reannounceAfterSeconds + rand.Intn(reannounzeFuzzSeconds))
|
||||
}
|
||||
65
cmd/stdiscosrv/apisrv_test.go
Normal file
65
cmd/stdiscosrv/apisrv_test.go
Normal file
@@ -0,0 +1,65 @@
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFixupAddresses(t *testing.T) {
|
||||
cases := []struct {
|
||||
remote net.IP
|
||||
in []string
|
||||
out []string
|
||||
}{
|
||||
{ // verbatim passthrough
|
||||
in: []string{"tcp://1.2.3.4:22000"},
|
||||
out: []string{"tcp://1.2.3.4:22000"},
|
||||
}, { // unspecified replaced by remote
|
||||
remote: net.ParseIP("1.2.3.4"),
|
||||
in: []string{"tcp://:22000", "tcp://192.0.2.42:22000"},
|
||||
out: []string{"tcp://1.2.3.4:22000", "tcp://192.0.2.42:22000"},
|
||||
}, { // unspecified not used as replacement
|
||||
remote: net.ParseIP("0.0.0.0"),
|
||||
in: []string{"tcp://:22000", "tcp://192.0.2.42:22000"},
|
||||
out: []string{"tcp://192.0.2.42:22000"},
|
||||
}, { // unspecified not used as replacement
|
||||
remote: net.ParseIP("::"),
|
||||
in: []string{"tcp://:22000", "tcp://192.0.2.42:22000"},
|
||||
out: []string{"tcp://192.0.2.42:22000"},
|
||||
}, { // localhost not used as replacement
|
||||
remote: net.ParseIP("127.0.0.1"),
|
||||
in: []string{"tcp://:22000", "tcp://192.0.2.42:22000"},
|
||||
out: []string{"tcp://192.0.2.42:22000"},
|
||||
}, { // localhost not used as replacement
|
||||
remote: net.ParseIP("::1"),
|
||||
in: []string{"tcp://:22000", "tcp://192.0.2.42:22000"},
|
||||
out: []string{"tcp://192.0.2.42:22000"},
|
||||
}, { // multicast not used as replacement
|
||||
remote: net.ParseIP("224.0.0.1"),
|
||||
in: []string{"tcp://:22000", "tcp://192.0.2.42:22000"},
|
||||
out: []string{"tcp://192.0.2.42:22000"},
|
||||
}, { // multicast not used as replacement
|
||||
remote: net.ParseIP("ff80::42"),
|
||||
in: []string{"tcp://:22000", "tcp://192.0.2.42:22000"},
|
||||
out: []string{"tcp://192.0.2.42:22000"},
|
||||
}, { // explicitly announced weirdness is also filtered
|
||||
remote: net.ParseIP("192.0.2.42"),
|
||||
in: []string{"tcp://:22000", "tcp://127.1.2.3:22000", "tcp://[::1]:22000", "tcp://[ff80::42]:22000"},
|
||||
out: []string{"tcp://192.0.2.42:22000"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
out := fixupAddresses(tc.remote, tc.in)
|
||||
if fmt.Sprint(out) != fmt.Sprint(tc.out) {
|
||||
t.Errorf("fixupAddresses(%v, %v) => %v, expected %v", tc.remote, tc.in, out, tc.out)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,75 +0,0 @@
|
||||
// Copyright (C) 2014-2015 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"log"
|
||||
"time"
|
||||
)
|
||||
|
||||
type cleansrv struct {
|
||||
intv time.Duration
|
||||
db *sql.DB
|
||||
prep map[string]*sql.Stmt
|
||||
}
|
||||
|
||||
func (s *cleansrv) Serve() {
|
||||
for {
|
||||
time.Sleep(next(s.intv))
|
||||
|
||||
err := s.cleanOldEntries()
|
||||
if err != nil {
|
||||
log.Println("Clean:", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *cleansrv) Stop() {
|
||||
panic("stop unimplemented")
|
||||
}
|
||||
|
||||
func (s *cleansrv) cleanOldEntries() (err error) {
|
||||
var tx *sql.Tx
|
||||
tx, err = s.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err == nil {
|
||||
err = tx.Commit()
|
||||
} else {
|
||||
tx.Rollback()
|
||||
}
|
||||
}()
|
||||
|
||||
res, err := tx.Stmt(s.prep["cleanAddress"]).Exec()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rows, _ := res.RowsAffected(); rows > 0 {
|
||||
log.Printf("Clean: %d old addresses", rows)
|
||||
}
|
||||
|
||||
res, err = tx.Stmt(s.prep["cleanDevice"]).Exec()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rows, _ := res.RowsAffected(); rows > 0 {
|
||||
log.Printf("Clean: %d old devices", rows)
|
||||
}
|
||||
|
||||
var devs, addrs int
|
||||
row := tx.Stmt(s.prep["countDevice"]).QueryRow()
|
||||
if err = row.Scan(&devs); err != nil {
|
||||
return err
|
||||
}
|
||||
row = tx.Stmt(s.prep["countAddress"]).QueryRow()
|
||||
if err = row.Scan(&addrs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("Database: %d devices, %d addresses", devs, addrs)
|
||||
return nil
|
||||
}
|
||||
379
cmd/stdiscosrv/database.go
Normal file
379
cmd/stdiscosrv/database.go
Normal file
@@ -0,0 +1,379 @@
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
//go:generate go run ../../script/protofmt.go database.proto
|
||||
//go:generate protoc -I ../../ -I . --gogofast_out=. database.proto
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
type clock interface {
|
||||
Now() time.Time
|
||||
}
|
||||
|
||||
type defaultClock struct{}
|
||||
|
||||
func (defaultClock) Now() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
type database interface {
|
||||
put(key string, rec DatabaseRecord) error
|
||||
merge(key string, addrs []DatabaseAddress, seen int64) error
|
||||
get(key string) (DatabaseRecord, error)
|
||||
}
|
||||
|
||||
type levelDBStore struct {
|
||||
db *leveldb.DB
|
||||
inbox chan func()
|
||||
stop chan struct{}
|
||||
clock clock
|
||||
marshalBuf []byte
|
||||
}
|
||||
|
||||
func newLevelDBStore(dir string) (*levelDBStore, error) {
|
||||
db, err := leveldb.OpenFile(dir, levelDBOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &levelDBStore{
|
||||
db: db,
|
||||
inbox: make(chan func(), 16),
|
||||
stop: make(chan struct{}),
|
||||
clock: defaultClock{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *levelDBStore) put(key string, rec DatabaseRecord) error {
|
||||
t0 := time.Now()
|
||||
defer func() {
|
||||
databaseOperationSeconds.WithLabelValues(dbOpPut).Observe(time.Since(t0).Seconds())
|
||||
}()
|
||||
|
||||
rc := make(chan error)
|
||||
|
||||
s.inbox <- func() {
|
||||
size := rec.Size()
|
||||
if len(s.marshalBuf) < size {
|
||||
s.marshalBuf = make([]byte, size)
|
||||
}
|
||||
n, _ := rec.MarshalTo(s.marshalBuf)
|
||||
rc <- s.db.Put([]byte(key), s.marshalBuf[:n], nil)
|
||||
}
|
||||
|
||||
err := <-rc
|
||||
if err != nil {
|
||||
databaseOperations.WithLabelValues(dbOpPut, dbResError).Inc()
|
||||
} else {
|
||||
databaseOperations.WithLabelValues(dbOpPut, dbResSuccess).Inc()
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *levelDBStore) merge(key string, addrs []DatabaseAddress, seen int64) error {
|
||||
t0 := time.Now()
|
||||
defer func() {
|
||||
databaseOperationSeconds.WithLabelValues(dbOpMerge).Observe(time.Since(t0).Seconds())
|
||||
}()
|
||||
|
||||
rc := make(chan error)
|
||||
newRec := DatabaseRecord{
|
||||
Addresses: addrs,
|
||||
Seen: seen,
|
||||
}
|
||||
|
||||
s.inbox <- func() {
|
||||
// grab the existing record
|
||||
oldRec, err := s.get(key)
|
||||
if err != nil {
|
||||
// "not found" is not an error from get, so this is serious
|
||||
// stuff only
|
||||
rc <- err
|
||||
return
|
||||
}
|
||||
newRec = merge(newRec, oldRec)
|
||||
|
||||
// We replicate s.put() functionality here ourselves instead of
|
||||
// calling it because we want to serialize our get above together
|
||||
// with the put in the same function.
|
||||
size := newRec.Size()
|
||||
if len(s.marshalBuf) < size {
|
||||
s.marshalBuf = make([]byte, size)
|
||||
}
|
||||
n, _ := newRec.MarshalTo(s.marshalBuf)
|
||||
rc <- s.db.Put([]byte(key), s.marshalBuf[:n], nil)
|
||||
}
|
||||
|
||||
err := <-rc
|
||||
if err != nil {
|
||||
databaseOperations.WithLabelValues(dbOpMerge, dbResError).Inc()
|
||||
} else {
|
||||
databaseOperations.WithLabelValues(dbOpMerge, dbResSuccess).Inc()
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *levelDBStore) get(key string) (DatabaseRecord, error) {
|
||||
t0 := time.Now()
|
||||
defer func() {
|
||||
databaseOperationSeconds.WithLabelValues(dbOpGet).Observe(time.Since(t0).Seconds())
|
||||
}()
|
||||
|
||||
keyBs := []byte(key)
|
||||
val, err := s.db.Get(keyBs, nil)
|
||||
if err == leveldb.ErrNotFound {
|
||||
databaseOperations.WithLabelValues(dbOpGet, dbResNotFound).Inc()
|
||||
return DatabaseRecord{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
databaseOperations.WithLabelValues(dbOpGet, dbResError).Inc()
|
||||
return DatabaseRecord{}, err
|
||||
}
|
||||
|
||||
var rec DatabaseRecord
|
||||
|
||||
if err := rec.Unmarshal(val); err != nil {
|
||||
databaseOperations.WithLabelValues(dbOpGet, dbResUnmarshalError).Inc()
|
||||
return DatabaseRecord{}, nil
|
||||
}
|
||||
|
||||
rec.Addresses = expire(rec.Addresses, s.clock.Now().UnixNano())
|
||||
databaseOperations.WithLabelValues(dbOpGet, dbResSuccess).Inc()
|
||||
return rec, nil
|
||||
}
|
||||
|
||||
func (s *levelDBStore) Serve() {
|
||||
t := time.NewTimer(0)
|
||||
defer t.Stop()
|
||||
defer s.db.Close()
|
||||
|
||||
// Start the statistics serve routine. It will exit with us when
|
||||
// statisticsTrigger is closed.
|
||||
statisticsTrigger := make(chan struct{})
|
||||
statisticsDone := make(chan struct{})
|
||||
go s.statisticsServe(statisticsTrigger, statisticsDone)
|
||||
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case fn := <-s.inbox:
|
||||
// Run function in serialized order.
|
||||
fn()
|
||||
|
||||
case <-t.C:
|
||||
// Trigger the statistics routine to do its thing in the
|
||||
// background.
|
||||
statisticsTrigger <- struct{}{}
|
||||
|
||||
case <-statisticsDone:
|
||||
// The statistics routine is done with one iteratation, schedule
|
||||
// the next.
|
||||
t.Reset(databaseStatisticsInterval)
|
||||
|
||||
case <-s.stop:
|
||||
// We're done.
|
||||
close(statisticsTrigger)
|
||||
break loop
|
||||
}
|
||||
}
|
||||
|
||||
// Also wait for statisticsServe to return
|
||||
<-statisticsDone
|
||||
}
|
||||
|
||||
func (s *levelDBStore) statisticsServe(trigger <-chan struct{}, done chan<- struct{}) {
|
||||
defer close(done)
|
||||
|
||||
for range trigger {
|
||||
t0 := time.Now()
|
||||
nowNanos := t0.UnixNano()
|
||||
cutoff24h := t0.Add(-24 * time.Hour).UnixNano()
|
||||
cutoff1w := t0.Add(-7 * 24 * time.Hour).UnixNano()
|
||||
cutoff2Mon := t0.Add(-60 * 24 * time.Hour).UnixNano()
|
||||
current, last24h, last1w, inactive, errors := 0, 0, 0, 0, 0
|
||||
|
||||
iter := s.db.NewIterator(&util.Range{}, nil)
|
||||
for iter.Next() {
|
||||
// Attempt to unmarshal the record and count the
|
||||
// failure if there's something wrong with it.
|
||||
var rec DatabaseRecord
|
||||
if err := rec.Unmarshal(iter.Value()); err != nil {
|
||||
errors++
|
||||
continue
|
||||
}
|
||||
|
||||
// If there are addresses that have not expired it's a current
|
||||
// record, otherwise account it based on when it was last seen
|
||||
// (last 24 hours or last week) or finally as inactice.
|
||||
switch {
|
||||
case len(expire(rec.Addresses, nowNanos)) > 0:
|
||||
current++
|
||||
case rec.Seen > cutoff24h:
|
||||
last24h++
|
||||
case rec.Seen > cutoff1w:
|
||||
last1w++
|
||||
case rec.Seen > cutoff2Mon:
|
||||
inactive++
|
||||
case rec.Missed < cutoff2Mon:
|
||||
// It hasn't been seen lately and we haven't recorded
|
||||
// someone asking for this device in a long time either;
|
||||
// delete the record.
|
||||
if err := s.db.Delete(iter.Key(), nil); err != nil {
|
||||
databaseOperations.WithLabelValues(dbOpDelete, dbResError).Inc()
|
||||
} else {
|
||||
databaseOperations.WithLabelValues(dbOpDelete, dbResSuccess).Inc()
|
||||
}
|
||||
default:
|
||||
inactive++
|
||||
}
|
||||
}
|
||||
|
||||
iter.Release()
|
||||
|
||||
databaseKeys.WithLabelValues("current").Set(float64(current))
|
||||
databaseKeys.WithLabelValues("last24h").Set(float64(last24h))
|
||||
databaseKeys.WithLabelValues("last1w").Set(float64(last1w))
|
||||
databaseKeys.WithLabelValues("inactive").Set(float64(inactive))
|
||||
databaseKeys.WithLabelValues("error").Set(float64(errors))
|
||||
databaseStatisticsSeconds.Set(time.Since(t0).Seconds())
|
||||
|
||||
// Signal that we are done and can be scheduled again.
|
||||
done <- struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *levelDBStore) Stop() {
|
||||
close(s.stop)
|
||||
}
|
||||
|
||||
// merge returns the merged result of the two database records a and b. The
|
||||
// result is the union of the two address sets, with the newer expiry time
|
||||
// chosen for any duplicates.
|
||||
func merge(a, b DatabaseRecord) DatabaseRecord {
|
||||
// Both lists must be sorted for this to work.
|
||||
if !sort.IsSorted(databaseAddressOrder(a.Addresses)) {
|
||||
log.Println("Warning: bug: addresses not correctly sorted in merge")
|
||||
a.Addresses = sortedAddressCopy(a.Addresses)
|
||||
}
|
||||
if !sort.IsSorted(databaseAddressOrder(b.Addresses)) {
|
||||
// no warning because this is the side we read from disk and it may
|
||||
// legitimately predate correct sorting.
|
||||
b.Addresses = sortedAddressCopy(b.Addresses)
|
||||
}
|
||||
|
||||
res := DatabaseRecord{
|
||||
Addresses: make([]DatabaseAddress, 0, len(a.Addresses)+len(b.Addresses)),
|
||||
Seen: a.Seen,
|
||||
}
|
||||
if b.Seen > a.Seen {
|
||||
res.Seen = b.Seen
|
||||
}
|
||||
|
||||
aIdx := 0
|
||||
bIdx := 0
|
||||
aAddrs := a.Addresses
|
||||
bAddrs := b.Addresses
|
||||
loop:
|
||||
for {
|
||||
switch {
|
||||
case aIdx == len(aAddrs) && bIdx == len(bAddrs):
|
||||
// both lists are exhausted, we are done
|
||||
break loop
|
||||
|
||||
case aIdx == len(aAddrs):
|
||||
// a is exhausted, pick from b and continue
|
||||
res.Addresses = append(res.Addresses, bAddrs[bIdx])
|
||||
bIdx++
|
||||
continue
|
||||
|
||||
case bIdx == len(bAddrs):
|
||||
// b is exhausted, pick from a and continue
|
||||
res.Addresses = append(res.Addresses, aAddrs[aIdx])
|
||||
aIdx++
|
||||
continue
|
||||
}
|
||||
|
||||
// We have values left on both sides.
|
||||
aVal := aAddrs[aIdx]
|
||||
bVal := bAddrs[bIdx]
|
||||
|
||||
switch {
|
||||
case aVal.Address == bVal.Address:
|
||||
// update for same address, pick newer
|
||||
if aVal.Expires > bVal.Expires {
|
||||
res.Addresses = append(res.Addresses, aVal)
|
||||
} else {
|
||||
res.Addresses = append(res.Addresses, bVal)
|
||||
}
|
||||
aIdx++
|
||||
bIdx++
|
||||
|
||||
case aVal.Address < bVal.Address:
|
||||
// a is smallest, pick it and continue
|
||||
res.Addresses = append(res.Addresses, aVal)
|
||||
aIdx++
|
||||
|
||||
default:
|
||||
// b is smallest, pick it and continue
|
||||
res.Addresses = append(res.Addresses, bVal)
|
||||
bIdx++
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// expire returns the list of addresses after removing expired entries.
|
||||
// Expiration happen in place, so the slice given as the parameter is
|
||||
// destroyed. Internal order is not preserved.
|
||||
func expire(addrs []DatabaseAddress, now int64) []DatabaseAddress {
|
||||
i := 0
|
||||
for i < len(addrs) {
|
||||
if addrs[i].Expires < now {
|
||||
// This item is expired. Replace it with the last in the list
|
||||
// (noop if we are at the last item).
|
||||
addrs[i] = addrs[len(addrs)-1]
|
||||
// Wipe the last item of the list to release references to
|
||||
// strings and stuff.
|
||||
addrs[len(addrs)-1] = DatabaseAddress{}
|
||||
// Shorten the slice.
|
||||
addrs = addrs[:len(addrs)-1]
|
||||
continue
|
||||
}
|
||||
i++
|
||||
}
|
||||
return addrs
|
||||
}
|
||||
|
||||
func sortedAddressCopy(addrs []DatabaseAddress) []DatabaseAddress {
|
||||
sorted := make([]DatabaseAddress, len(addrs))
|
||||
copy(sorted, addrs)
|
||||
sort.Sort(databaseAddressOrder(sorted))
|
||||
return sorted
|
||||
}
|
||||
|
||||
type databaseAddressOrder []DatabaseAddress
|
||||
|
||||
func (s databaseAddressOrder) Less(a, b int) bool {
|
||||
return s[a].Address < s[b].Address
|
||||
}
|
||||
|
||||
func (s databaseAddressOrder) Swap(a, b int) {
|
||||
s[a], s[b] = s[b], s[a]
|
||||
}
|
||||
|
||||
func (s databaseAddressOrder) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
856
cmd/stdiscosrv/database.pb.go
Normal file
856
cmd/stdiscosrv/database.pb.go
Normal file
@@ -0,0 +1,856 @@
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: database.proto
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
_ "github.com/gogo/protobuf/gogoproto"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
io "io"
|
||||
math "math"
|
||||
math_bits "math/bits"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
type DatabaseRecord struct {
|
||||
Addresses []DatabaseAddress `protobuf:"bytes,1,rep,name=addresses,proto3" json:"addresses"`
|
||||
Misses int32 `protobuf:"varint,2,opt,name=misses,proto3" json:"misses,omitempty"`
|
||||
Seen int64 `protobuf:"varint,3,opt,name=seen,proto3" json:"seen,omitempty"`
|
||||
Missed int64 `protobuf:"varint,4,opt,name=missed,proto3" json:"missed,omitempty"`
|
||||
}
|
||||
|
||||
func (m *DatabaseRecord) Reset() { *m = DatabaseRecord{} }
|
||||
func (m *DatabaseRecord) String() string { return proto.CompactTextString(m) }
|
||||
func (*DatabaseRecord) ProtoMessage() {}
|
||||
func (*DatabaseRecord) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b90fe3356ea5df07, []int{0}
|
||||
}
|
||||
func (m *DatabaseRecord) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *DatabaseRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_DatabaseRecord.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *DatabaseRecord) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_DatabaseRecord.Merge(m, src)
|
||||
}
|
||||
func (m *DatabaseRecord) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *DatabaseRecord) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_DatabaseRecord.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_DatabaseRecord proto.InternalMessageInfo
|
||||
|
||||
type ReplicationRecord struct {
|
||||
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
|
||||
Addresses []DatabaseAddress `protobuf:"bytes,2,rep,name=addresses,proto3" json:"addresses"`
|
||||
Seen int64 `protobuf:"varint,3,opt,name=seen,proto3" json:"seen,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ReplicationRecord) Reset() { *m = ReplicationRecord{} }
|
||||
func (m *ReplicationRecord) String() string { return proto.CompactTextString(m) }
|
||||
func (*ReplicationRecord) ProtoMessage() {}
|
||||
func (*ReplicationRecord) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b90fe3356ea5df07, []int{1}
|
||||
}
|
||||
func (m *ReplicationRecord) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *ReplicationRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_ReplicationRecord.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *ReplicationRecord) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ReplicationRecord.Merge(m, src)
|
||||
}
|
||||
func (m *ReplicationRecord) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *ReplicationRecord) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ReplicationRecord.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ReplicationRecord proto.InternalMessageInfo
|
||||
|
||||
type DatabaseAddress struct {
|
||||
Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"`
|
||||
Expires int64 `protobuf:"varint,2,opt,name=expires,proto3" json:"expires,omitempty"`
|
||||
}
|
||||
|
||||
func (m *DatabaseAddress) Reset() { *m = DatabaseAddress{} }
|
||||
func (m *DatabaseAddress) String() string { return proto.CompactTextString(m) }
|
||||
func (*DatabaseAddress) ProtoMessage() {}
|
||||
func (*DatabaseAddress) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b90fe3356ea5df07, []int{2}
|
||||
}
|
||||
func (m *DatabaseAddress) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *DatabaseAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_DatabaseAddress.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *DatabaseAddress) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_DatabaseAddress.Merge(m, src)
|
||||
}
|
||||
func (m *DatabaseAddress) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *DatabaseAddress) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_DatabaseAddress.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_DatabaseAddress proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*DatabaseRecord)(nil), "main.DatabaseRecord")
|
||||
proto.RegisterType((*ReplicationRecord)(nil), "main.ReplicationRecord")
|
||||
proto.RegisterType((*DatabaseAddress)(nil), "main.DatabaseAddress")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("database.proto", fileDescriptor_b90fe3356ea5df07) }
|
||||
|
||||
var fileDescriptor_b90fe3356ea5df07 = []byte{
|
||||
// 270 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x90, 0x41, 0x4a, 0xc4, 0x30,
|
||||
0x18, 0x85, 0x9b, 0x49, 0x1d, 0x99, 0x08, 0xa3, 0x06, 0x94, 0x20, 0x12, 0x4b, 0xdd, 0x74, 0xd5,
|
||||
0x01, 0x5d, 0xb9, 0x74, 0xd0, 0x0b, 0xe4, 0x06, 0xe9, 0xe4, 0x77, 0x08, 0x3a, 0x4d, 0x49, 0x2a,
|
||||
0xe8, 0x29, 0xf4, 0x58, 0x5d, 0xce, 0xd2, 0x95, 0x68, 0x7b, 0x11, 0x69, 0x26, 0x55, 0x14, 0x37,
|
||||
0xb3, 0x7b, 0xdf, 0xff, 0xbf, 0x97, 0xbc, 0x84, 0x4c, 0x95, 0xac, 0x65, 0x21, 0x1d, 0xe4, 0x95,
|
||||
0x35, 0xb5, 0xa1, 0xf1, 0x4a, 0xea, 0xf2, 0xe4, 0xdc, 0x42, 0x65, 0xdc, 0xcc, 0x8f, 0x8a, 0xc7,
|
||||
0xbb, 0xd9, 0xd2, 0x2c, 0x8d, 0x07, 0xaf, 0x36, 0xd6, 0xf4, 0x05, 0x91, 0xe9, 0x4d, 0x48, 0x0b,
|
||||
0x58, 0x18, 0xab, 0xe8, 0x15, 0x99, 0x48, 0xa5, 0x2c, 0x38, 0x07, 0x8e, 0xa1, 0x04, 0x67, 0x7b,
|
||||
0x17, 0x47, 0x79, 0x7f, 0x62, 0x3e, 0x18, 0xaf, 0x37, 0xeb, 0x79, 0xdc, 0xbc, 0x9f, 0x45, 0xe2,
|
||||
0xc7, 0x4d, 0x8f, 0xc9, 0x78, 0xa5, 0x7d, 0x6e, 0x94, 0xa0, 0x6c, 0x47, 0x04, 0xa2, 0x94, 0xc4,
|
||||
0x0e, 0xa0, 0x64, 0x38, 0x41, 0x19, 0x16, 0x5e, 0x7f, 0x7b, 0x15, 0x8b, 0xfd, 0x34, 0x50, 0x5a,
|
||||
0x93, 0x43, 0x01, 0xd5, 0x83, 0x5e, 0xc8, 0x5a, 0x9b, 0x32, 0x74, 0x3a, 0x20, 0xf8, 0x1e, 0x9e,
|
||||
0x19, 0x4a, 0x50, 0x36, 0x11, 0xbd, 0xfc, 0xdd, 0x72, 0xb4, 0x55, 0xcb, 0x7f, 0xda, 0xa4, 0xb7,
|
||||
0x64, 0xff, 0x4f, 0x8e, 0x32, 0xb2, 0x1b, 0x32, 0xe1, 0xde, 0x01, 0xfb, 0x0d, 0x3c, 0x55, 0xda,
|
||||
0x86, 0x77, 0x62, 0x31, 0xe0, 0xfc, 0xb4, 0xf9, 0xe4, 0x51, 0xd3, 0x72, 0xb4, 0x6e, 0x39, 0xfa,
|
||||
0x68, 0x39, 0x7a, 0xed, 0x78, 0xb4, 0xee, 0x78, 0xf4, 0xd6, 0xf1, 0xa8, 0x18, 0xfb, 0x3f, 0xbf,
|
||||
0xfc, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x7a, 0xa2, 0xf6, 0x1e, 0xb0, 0x01, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *DatabaseRecord) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *DatabaseRecord) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *DatabaseRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.Missed != 0 {
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(m.Missed))
|
||||
i--
|
||||
dAtA[i] = 0x20
|
||||
}
|
||||
if m.Seen != 0 {
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(m.Seen))
|
||||
i--
|
||||
dAtA[i] = 0x18
|
||||
}
|
||||
if m.Misses != 0 {
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(m.Misses))
|
||||
i--
|
||||
dAtA[i] = 0x10
|
||||
}
|
||||
if len(m.Addresses) > 0 {
|
||||
for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.Addresses[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *ReplicationRecord) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *ReplicationRecord) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *ReplicationRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.Seen != 0 {
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(m.Seen))
|
||||
i--
|
||||
dAtA[i] = 0x18
|
||||
}
|
||||
if len(m.Addresses) > 0 {
|
||||
for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.Addresses[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x12
|
||||
}
|
||||
}
|
||||
if len(m.Key) > 0 {
|
||||
i -= len(m.Key)
|
||||
copy(dAtA[i:], m.Key)
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(len(m.Key)))
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *DatabaseAddress) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *DatabaseAddress) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *DatabaseAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.Expires != 0 {
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(m.Expires))
|
||||
i--
|
||||
dAtA[i] = 0x10
|
||||
}
|
||||
if len(m.Address) > 0 {
|
||||
i -= len(m.Address)
|
||||
copy(dAtA[i:], m.Address)
|
||||
i = encodeVarintDatabase(dAtA, i, uint64(len(m.Address)))
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func encodeVarintDatabase(dAtA []byte, offset int, v uint64) int {
|
||||
offset -= sovDatabase(v)
|
||||
base := offset
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return base
|
||||
}
|
||||
func (m *DatabaseRecord) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Addresses) > 0 {
|
||||
for _, e := range m.Addresses {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovDatabase(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.Misses != 0 {
|
||||
n += 1 + sovDatabase(uint64(m.Misses))
|
||||
}
|
||||
if m.Seen != 0 {
|
||||
n += 1 + sovDatabase(uint64(m.Seen))
|
||||
}
|
||||
if m.Missed != 0 {
|
||||
n += 1 + sovDatabase(uint64(m.Missed))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *ReplicationRecord) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Key)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovDatabase(uint64(l))
|
||||
}
|
||||
if len(m.Addresses) > 0 {
|
||||
for _, e := range m.Addresses {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovDatabase(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.Seen != 0 {
|
||||
n += 1 + sovDatabase(uint64(m.Seen))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *DatabaseAddress) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Address)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovDatabase(uint64(l))
|
||||
}
|
||||
if m.Expires != 0 {
|
||||
n += 1 + sovDatabase(uint64(m.Expires))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovDatabase(x uint64) (n int) {
|
||||
return (math_bits.Len64(x|1) + 6) / 7
|
||||
}
|
||||
func sozDatabase(x uint64) (n int) {
|
||||
return sovDatabase(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (m *DatabaseRecord) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: DatabaseRecord: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: DatabaseRecord: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Addresses = append(m.Addresses, DatabaseAddress{})
|
||||
if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Misses", wireType)
|
||||
}
|
||||
m.Misses = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Misses |= int32(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 3:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Seen", wireType)
|
||||
}
|
||||
m.Seen = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Seen |= int64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 4:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Missed", wireType)
|
||||
}
|
||||
m.Missed = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Missed |= int64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipDatabase(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *ReplicationRecord) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: ReplicationRecord: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: ReplicationRecord: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Key = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Addresses = append(m.Addresses, DatabaseAddress{})
|
||||
if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Seen", wireType)
|
||||
}
|
||||
m.Seen = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Seen |= int64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipDatabase(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *DatabaseAddress) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: DatabaseAddress: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: DatabaseAddress: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Address = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Expires", wireType)
|
||||
}
|
||||
m.Expires = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Expires |= int64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipDatabase(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthDatabase
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipDatabase(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
depth := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowDatabase
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthDatabase
|
||||
}
|
||||
iNdEx += length
|
||||
case 3:
|
||||
depth++
|
||||
case 4:
|
||||
if depth == 0 {
|
||||
return 0, ErrUnexpectedEndOfGroupDatabase
|
||||
}
|
||||
depth--
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthDatabase
|
||||
}
|
||||
if depth == 0 {
|
||||
return iNdEx, nil
|
||||
}
|
||||
}
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthDatabase = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowDatabase = fmt.Errorf("proto: integer overflow")
|
||||
ErrUnexpectedEndOfGroupDatabase = fmt.Errorf("proto: unexpected end of group")
|
||||
)
|
||||
36
cmd/stdiscosrv/database.proto
Normal file
36
cmd/stdiscosrv/database.proto
Normal file
@@ -0,0 +1,36 @@
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package main;
|
||||
|
||||
import "repos/protobuf/gogoproto/gogo.proto";
|
||||
|
||||
option (gogoproto.goproto_getters_all) = false;
|
||||
option (gogoproto.goproto_unkeyed_all) = false;
|
||||
option (gogoproto.goproto_unrecognized_all) = false;
|
||||
option (gogoproto.goproto_sizecache_all) = false;
|
||||
|
||||
message DatabaseRecord {
|
||||
repeated DatabaseAddress addresses = 1 [(gogoproto.nullable) = false];
|
||||
int32 misses = 2; // Number of lookups* without hits
|
||||
int64 seen = 3; // Unix nanos, last device announce
|
||||
int64 missed = 4; // Unix nanos, last* failed lookup
|
||||
}
|
||||
|
||||
// *) Not every lookup results in a write, so may not be completely accurate
|
||||
|
||||
message ReplicationRecord {
|
||||
string key = 1;
|
||||
repeated DatabaseAddress addresses = 2 [(gogoproto.nullable) = false];
|
||||
int64 seen = 3; // Unix nanos, last device announce
|
||||
}
|
||||
|
||||
message DatabaseAddress {
|
||||
string address = 1;
|
||||
int64 expires = 2; // Unix nanos
|
||||
}
|
||||
211
cmd/stdiscosrv/database_test.go
Normal file
211
cmd/stdiscosrv/database_test.go
Normal file
@@ -0,0 +1,211 @@
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestDatabaseGetSet(t *testing.T) {
|
||||
os.RemoveAll("_database")
|
||||
defer os.RemoveAll("_database")
|
||||
db, err := newLevelDBStore("_database")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
go db.Serve()
|
||||
defer db.Stop()
|
||||
|
||||
// Check missing record
|
||||
|
||||
rec, err := db.get("abcd")
|
||||
if err != nil {
|
||||
t.Error("not found should not be an error")
|
||||
}
|
||||
if len(rec.Addresses) != 0 {
|
||||
t.Error("addresses should be empty")
|
||||
}
|
||||
if rec.Misses != 0 {
|
||||
t.Error("missing should be zero")
|
||||
}
|
||||
|
||||
// Set up a clock
|
||||
|
||||
now := time.Now()
|
||||
tc := &testClock{now}
|
||||
db.clock = tc
|
||||
|
||||
// Put a record
|
||||
|
||||
rec.Addresses = []DatabaseAddress{
|
||||
{Address: "tcp://1.2.3.4:5", Expires: tc.Now().Add(time.Minute).UnixNano()},
|
||||
}
|
||||
if err := db.put("abcd", rec); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify it
|
||||
|
||||
rec, err = db.get("abcd")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(rec.Addresses) != 1 {
|
||||
t.Log(rec.Addresses)
|
||||
t.Fatal("should have one address")
|
||||
}
|
||||
if rec.Addresses[0].Address != "tcp://1.2.3.4:5" {
|
||||
t.Log(rec.Addresses)
|
||||
t.Error("incorrect address")
|
||||
}
|
||||
|
||||
// Wind the clock one half expiry, and merge in a new address
|
||||
|
||||
tc.wind(30 * time.Second)
|
||||
|
||||
addrs := []DatabaseAddress{
|
||||
{Address: "tcp://6.7.8.9:0", Expires: tc.Now().Add(time.Minute).UnixNano()},
|
||||
}
|
||||
if err := db.merge("abcd", addrs, tc.Now().UnixNano()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify it
|
||||
|
||||
rec, err = db.get("abcd")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(rec.Addresses) != 2 {
|
||||
t.Log(rec.Addresses)
|
||||
t.Fatal("should have two addresses")
|
||||
}
|
||||
if rec.Addresses[0].Address != "tcp://1.2.3.4:5" {
|
||||
t.Log(rec.Addresses)
|
||||
t.Error("incorrect address[0]")
|
||||
}
|
||||
if rec.Addresses[1].Address != "tcp://6.7.8.9:0" {
|
||||
t.Log(rec.Addresses)
|
||||
t.Error("incorrect address[1]")
|
||||
}
|
||||
|
||||
// Pass the first expiry time
|
||||
|
||||
tc.wind(45 * time.Second)
|
||||
|
||||
// Verify it
|
||||
|
||||
rec, err = db.get("abcd")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(rec.Addresses) != 1 {
|
||||
t.Log(rec.Addresses)
|
||||
t.Fatal("should have one address")
|
||||
}
|
||||
if rec.Addresses[0].Address != "tcp://6.7.8.9:0" {
|
||||
t.Log(rec.Addresses)
|
||||
t.Error("incorrect address")
|
||||
}
|
||||
|
||||
// Put a record with misses
|
||||
|
||||
rec = DatabaseRecord{Misses: 42}
|
||||
if err := db.put("efgh", rec); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify it
|
||||
|
||||
rec, err = db.get("efgh")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(rec.Addresses) != 0 {
|
||||
t.Log(rec.Addresses)
|
||||
t.Fatal("should have no addresses")
|
||||
}
|
||||
if rec.Misses != 42 {
|
||||
t.Log(rec.Misses)
|
||||
t.Error("incorrect misses")
|
||||
}
|
||||
|
||||
// Set an address
|
||||
|
||||
addrs = []DatabaseAddress{
|
||||
{Address: "tcp://6.7.8.9:0", Expires: tc.Now().Add(time.Minute).UnixNano()},
|
||||
}
|
||||
if err := db.merge("efgh", addrs, tc.Now().UnixNano()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify it
|
||||
|
||||
rec, err = db.get("efgh")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(rec.Addresses) != 1 {
|
||||
t.Log(rec.Addresses)
|
||||
t.Fatal("should have one address")
|
||||
}
|
||||
if rec.Misses != 0 {
|
||||
t.Log(rec.Misses)
|
||||
t.Error("should have no misses")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilter(t *testing.T) {
|
||||
// all cases are expired with t=10
|
||||
cases := []struct {
|
||||
a []DatabaseAddress
|
||||
b []DatabaseAddress
|
||||
}{
|
||||
{
|
||||
a: nil,
|
||||
b: nil,
|
||||
},
|
||||
{
|
||||
a: []DatabaseAddress{{Address: "a", Expires: 9}, {Address: "b", Expires: 9}, {Address: "c", Expires: 9}},
|
||||
b: []DatabaseAddress{},
|
||||
},
|
||||
{
|
||||
a: []DatabaseAddress{{Address: "a", Expires: 10}},
|
||||
b: []DatabaseAddress{{Address: "a", Expires: 10}},
|
||||
},
|
||||
{
|
||||
a: []DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 10}, {Address: "c", Expires: 10}},
|
||||
b: []DatabaseAddress{{Address: "a", Expires: 10}, {Address: "b", Expires: 10}, {Address: "c", Expires: 10}},
|
||||
},
|
||||
{
|
||||
a: []DatabaseAddress{{Address: "a", Expires: 5}, {Address: "b", Expires: 15}, {Address: "c", Expires: 5}, {Address: "d", Expires: 15}, {Address: "e", Expires: 5}},
|
||||
b: []DatabaseAddress{{Address: "d", Expires: 15}, {Address: "b", Expires: 15}}, // gets reordered
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
res := expire(tc.a, 10)
|
||||
if fmt.Sprint(res) != fmt.Sprint(tc.b) {
|
||||
t.Errorf("Incorrect result %v, expected %v", res, tc.b)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type testClock struct {
|
||||
now time.Time
|
||||
}
|
||||
|
||||
func (t *testClock) wind(d time.Duration) {
|
||||
t.now = t.now.Add(d)
|
||||
}
|
||||
|
||||
func (t *testClock) Now() time.Time {
|
||||
return t.now
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
// Copyright (C) 2014-2015 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type setupFunc func(db *sql.DB) error
|
||||
type compileFunc func(db *sql.DB) (map[string]*sql.Stmt, error)
|
||||
|
||||
var (
|
||||
setupFuncs = make(map[string]setupFunc)
|
||||
compileFuncs = make(map[string]compileFunc)
|
||||
)
|
||||
|
||||
func register(name string, setup setupFunc, compile compileFunc) {
|
||||
setupFuncs[name] = setup
|
||||
compileFuncs[name] = compile
|
||||
}
|
||||
|
||||
func setup(backend string, db *sql.DB) (map[string]*sql.Stmt, error) {
|
||||
setup, ok := setupFuncs[backend]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Unsupported backend")
|
||||
}
|
||||
if err := setup(db); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return compileFuncs[backend](db)
|
||||
}
|
||||
4
cmd/stdiscosrv/etc/firewall-ufw/stdiscosrv
Normal file
4
cmd/stdiscosrv/etc/firewall-ufw/stdiscosrv
Normal file
@@ -0,0 +1,4 @@
|
||||
[stdiscosrv]
|
||||
title=Syncthing discovery server
|
||||
description=Lets syncthing clients discover each other
|
||||
ports=8443/tcp
|
||||
3
cmd/stdiscosrv/etc/linux-systemd/default
Normal file
3
cmd/stdiscosrv/etc/linux-systemd/default
Normal file
@@ -0,0 +1,3 @@
|
||||
# Default settings for syncthing-relaysrv (strelaysrv).
|
||||
## Add Options here:
|
||||
DISCOSRV_OPTS=
|
||||
25
cmd/stdiscosrv/etc/linux-systemd/stdiscosrv.service
Normal file
25
cmd/stdiscosrv/etc/linux-systemd/stdiscosrv.service
Normal file
@@ -0,0 +1,25 @@
|
||||
[Unit]
|
||||
Description=Syncthing Discovery Server
|
||||
After=network.target
|
||||
Documentation=man:stdiscosrv(1)
|
||||
|
||||
[Service]
|
||||
WorkingDirectory=/var/lib/syncthing-discosrv
|
||||
EnvironmentFile=/etc/default/syncthing-discosrv
|
||||
ExecStart=/usr/bin/stdiscosrv $DISCOSRV_OPTS
|
||||
|
||||
# Hardening
|
||||
User=syncthing-discosrv
|
||||
Group=syncthing
|
||||
ProtectSystem=strict
|
||||
ReadWritePaths=/var/lib/syncthing-discosrv
|
||||
NoNewPrivileges=true
|
||||
PrivateTmp=true
|
||||
PrivateDevices=true
|
||||
ProtectHome=true
|
||||
SystemCallArchitectures=native
|
||||
MemoryDenyWriteExecute=true
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
Alias=syncthing-discosrv.service
|
||||
@@ -1,146 +1,186 @@
|
||||
// Copyright (C) 2014-2015 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"database/sql"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/syncthing/syncthing/lib/build"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/thejerf/suture"
|
||||
)
|
||||
|
||||
const (
|
||||
minNegCache = 60 // seconds
|
||||
maxNegCache = 3600 // seconds
|
||||
maxDeviceAge = 7 * 86400 // one week, in seconds
|
||||
addressExpiryTime = 2 * time.Hour
|
||||
databaseStatisticsInterval = 5 * time.Minute
|
||||
|
||||
// Reannounce-After is set to reannounceAfterSeconds +
|
||||
// random(reannounzeFuzzSeconds), similar for Retry-After
|
||||
reannounceAfterSeconds = 3300
|
||||
reannounzeFuzzSeconds = 300
|
||||
errorRetryAfterSeconds = 1500
|
||||
errorRetryFuzzSeconds = 300
|
||||
|
||||
// Retry for not found is minSeconds + failures * incSeconds +
|
||||
// random(fuzz), where failures is the number of consecutive lookups
|
||||
// with no answer, up to maxSeconds. The fuzz is applied after capping
|
||||
// to maxSeconds.
|
||||
notFoundRetryMinSeconds = 60
|
||||
notFoundRetryMaxSeconds = 3540
|
||||
notFoundRetryIncSeconds = 10
|
||||
notFoundRetryFuzzSeconds = 60
|
||||
|
||||
// How often (in requests) we serialize the missed counter to database.
|
||||
notFoundMissesWriteInterval = 10
|
||||
|
||||
httpReadTimeout = 5 * time.Second
|
||||
httpWriteTimeout = 5 * time.Second
|
||||
httpMaxHeaderBytes = 1 << 10
|
||||
|
||||
// Size of the replication outbox channel
|
||||
replicationOutboxSize = 10000
|
||||
)
|
||||
|
||||
var (
|
||||
Version string
|
||||
BuildStamp string
|
||||
BuildUser string
|
||||
BuildHost string
|
||||
|
||||
BuildDate time.Time
|
||||
LongVersion string
|
||||
)
|
||||
|
||||
func init() {
|
||||
stamp, _ := strconv.Atoi(BuildStamp)
|
||||
BuildDate = time.Unix(int64(stamp), 0)
|
||||
|
||||
date := BuildDate.UTC().Format("2006-01-02 15:04:05 MST")
|
||||
LongVersion = fmt.Sprintf(`stdiscosrv %s (%s %s-%s) %s@%s %s`, Version, runtime.Version(), runtime.GOOS, runtime.GOARCH, BuildUser, BuildHost, date)
|
||||
// These options make the database a little more optimized for writes, at
|
||||
// the expense of some memory usage and risk of losing writes in a (system)
|
||||
// crash.
|
||||
var levelDBOptions = &opt.Options{
|
||||
NoSync: true,
|
||||
WriteBuffer: 32 << 20, // default 4<<20
|
||||
}
|
||||
|
||||
var (
|
||||
lruSize = 10240
|
||||
limitAvg = 5
|
||||
limitBurst = 20
|
||||
globalStats stats
|
||||
statsFile string
|
||||
backend = "ql"
|
||||
dsn = getEnvDefault("STDISCOSRV_DB_DSN", "memory://stdiscosrv")
|
||||
certFile = "cert.pem"
|
||||
keyFile = "key.pem"
|
||||
debug = false
|
||||
useHTTP = false
|
||||
debug = false
|
||||
)
|
||||
|
||||
func main() {
|
||||
const (
|
||||
cleanIntv = 1 * time.Hour
|
||||
statsIntv = 5 * time.Minute
|
||||
)
|
||||
|
||||
var listen string
|
||||
var dir string
|
||||
var metricsListen string
|
||||
var replicationListen string
|
||||
var replicationPeers string
|
||||
var certFile string
|
||||
var keyFile string
|
||||
var useHTTP bool
|
||||
|
||||
log.SetOutput(os.Stdout)
|
||||
log.SetFlags(0)
|
||||
|
||||
flag.StringVar(&certFile, "cert", "./cert.pem", "Certificate file")
|
||||
flag.StringVar(&dir, "db-dir", "./discovery.db", "Database directory")
|
||||
flag.BoolVar(&debug, "debug", false, "Print debug output")
|
||||
flag.BoolVar(&useHTTP, "http", false, "Listen on HTTP (behind an HTTPS proxy)")
|
||||
flag.StringVar(&listen, "listen", ":8443", "Listen address")
|
||||
flag.IntVar(&lruSize, "limit-cache", lruSize, "Limiter cache entries")
|
||||
flag.IntVar(&limitAvg, "limit-avg", limitAvg, "Allowed average package rate, per 10 s")
|
||||
flag.IntVar(&limitBurst, "limit-burst", limitBurst, "Allowed burst size, packets")
|
||||
flag.StringVar(&statsFile, "stats-file", statsFile, "File to write periodic operation stats to")
|
||||
flag.StringVar(&backend, "db-backend", backend, "Database backend to use")
|
||||
flag.StringVar(&dsn, "db-dsn", dsn, "Database DSN")
|
||||
flag.StringVar(&certFile, "cert", certFile, "Certificate file")
|
||||
flag.StringVar(&keyFile, "key", keyFile, "Key file")
|
||||
flag.BoolVar(&debug, "debug", debug, "Debug")
|
||||
flag.BoolVar(&useHTTP, "http", useHTTP, "Listen on HTTP (behind an HTTPS proxy)")
|
||||
flag.StringVar(&keyFile, "key", "./key.pem", "Key file")
|
||||
flag.StringVar(&metricsListen, "metrics-listen", "", "Metrics listen address")
|
||||
flag.StringVar(&replicationPeers, "replicate", "", "Replication peers, id@address, comma separated")
|
||||
flag.StringVar(&replicationListen, "replication-listen", ":19200", "Replication listen address")
|
||||
showVersion := flag.Bool("version", false, "Show version")
|
||||
flag.Parse()
|
||||
|
||||
log.Println(LongVersion)
|
||||
log.Println(build.LongVersion)
|
||||
if *showVersion {
|
||||
return
|
||||
}
|
||||
|
||||
var cert tls.Certificate
|
||||
var err error
|
||||
if !useHTTP {
|
||||
cert, err = tls.LoadX509KeyPair(certFile, keyFile)
|
||||
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||
if err != nil {
|
||||
log.Println("Failed to load keypair. Generating one, this might take a while...")
|
||||
cert, err = tlsutil.NewCertificate(certFile, keyFile, "stdiscosrv", 20*365)
|
||||
if err != nil {
|
||||
log.Println("Failed to load keypair. Generating one, this might take a while...")
|
||||
cert, err = tlsutil.NewCertificate(certFile, keyFile, "stdiscosrv", 3072)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to generate X509 key pair:", err)
|
||||
}
|
||||
log.Fatalln("Failed to generate X509 key pair:", err)
|
||||
}
|
||||
|
||||
devID := protocol.NewDeviceID(cert.Certificate[0])
|
||||
log.Println("Server device ID is", devID)
|
||||
}
|
||||
|
||||
db, err := sql.Open(backend, dsn)
|
||||
devID := protocol.NewDeviceID(cert.Certificate[0])
|
||||
log.Println("Server device ID is", devID)
|
||||
|
||||
// Parse the replication specs, if any.
|
||||
var allowedReplicationPeers []protocol.DeviceID
|
||||
var replicationDestinations []string
|
||||
parts := strings.Split(replicationPeers, ",")
|
||||
for _, part := range parts {
|
||||
fields := strings.Split(part, "@")
|
||||
|
||||
switch len(fields) {
|
||||
case 2:
|
||||
// This is an id@address specification. Grab the address for the
|
||||
// destination list. Try to resolve it once to catch obvious
|
||||
// syntax errors here rather than having the sender service fail
|
||||
// repeatedly later.
|
||||
_, err := net.ResolveTCPAddr("tcp", fields[1])
|
||||
if err != nil {
|
||||
log.Fatalln("Resolving address:", err)
|
||||
}
|
||||
replicationDestinations = append(replicationDestinations, fields[1])
|
||||
fallthrough // N.B.
|
||||
|
||||
case 1:
|
||||
// The first part is always a device ID.
|
||||
id, err := protocol.DeviceIDFromString(fields[0])
|
||||
if err != nil {
|
||||
log.Fatalln("Parsing device ID:", err)
|
||||
}
|
||||
allowedReplicationPeers = append(allowedReplicationPeers, id)
|
||||
|
||||
default:
|
||||
log.Fatalln("Unrecognized replication spec:", part)
|
||||
}
|
||||
}
|
||||
|
||||
// Root of the service tree.
|
||||
main := suture.New("main", suture.Spec{
|
||||
PassThroughPanics: true,
|
||||
})
|
||||
|
||||
// Start the database.
|
||||
db, err := newLevelDBStore(dir)
|
||||
if err != nil {
|
||||
log.Fatalln("sql.Open:", err)
|
||||
log.Fatalln("Open database:", err)
|
||||
}
|
||||
prep, err := setup(backend, db)
|
||||
if err != nil {
|
||||
log.Fatalln("Setup:", err)
|
||||
main.Add(db)
|
||||
|
||||
// Start any replication senders.
|
||||
var repl replicationMultiplexer
|
||||
for _, dst := range replicationDestinations {
|
||||
rs := newReplicationSender(dst, cert, allowedReplicationPeers)
|
||||
main.Add(rs)
|
||||
repl = append(repl, rs)
|
||||
}
|
||||
|
||||
main := suture.NewSimple("main")
|
||||
// If we have replication configured, start the replication listener.
|
||||
if len(allowedReplicationPeers) > 0 {
|
||||
rl := newReplicationListener(replicationListen, cert, allowedReplicationPeers, db)
|
||||
main.Add(rl)
|
||||
}
|
||||
|
||||
main.Add(&querysrv{
|
||||
addr: listen,
|
||||
cert: cert,
|
||||
db: db,
|
||||
prep: prep,
|
||||
})
|
||||
// Start the main API server.
|
||||
qs := newAPISrv(listen, cert, db, repl, useHTTP)
|
||||
main.Add(qs)
|
||||
|
||||
main.Add(&cleansrv{
|
||||
intv: cleanIntv,
|
||||
db: db,
|
||||
prep: prep,
|
||||
})
|
||||
// If we have a metrics port configured, start a metrics handler.
|
||||
if metricsListen != "" {
|
||||
go func() {
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/metrics", promhttp.Handler())
|
||||
log.Fatal(http.ListenAndServe(metricsListen, mux))
|
||||
}()
|
||||
}
|
||||
|
||||
main.Add(&statssrv{
|
||||
intv: statsIntv,
|
||||
file: statsFile,
|
||||
db: db,
|
||||
})
|
||||
|
||||
globalStats.Reset()
|
||||
// Engage!
|
||||
main.Serve()
|
||||
}
|
||||
|
||||
func getEnvDefault(key, def string) string {
|
||||
if val := os.Getenv(key); val != "" {
|
||||
return val
|
||||
}
|
||||
return def
|
||||
}
|
||||
|
||||
func next(intv time.Duration) time.Duration {
|
||||
t0 := time.Now()
|
||||
t1 := t0.Add(intv).Truncate(intv)
|
||||
return t1.Sub(t0)
|
||||
}
|
||||
|
||||
@@ -1,98 +0,0 @@
|
||||
// Copyright (C) 2014-2015 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
_ "github.com/lib/pq"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("postgres", postgresSetup, postgresCompile)
|
||||
}
|
||||
|
||||
func postgresSetup(db *sql.DB) error {
|
||||
var err error
|
||||
|
||||
db.SetMaxIdleConns(4)
|
||||
db.SetMaxOpenConns(8)
|
||||
|
||||
_, err = db.Exec(`CREATE TABLE IF NOT EXISTS Devices (
|
||||
DeviceID CHAR(63) NOT NULL PRIMARY KEY,
|
||||
Seen TIMESTAMP NOT NULL
|
||||
)`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var tmp string
|
||||
row := db.QueryRow(`SELECT 'DevicesDeviceIDIndex'::regclass`)
|
||||
if err = row.Scan(&tmp); err != nil {
|
||||
_, err = db.Exec(`CREATE INDEX DevicesDeviceIDIndex ON Devices (DeviceID)`)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
row = db.QueryRow(`SELECT 'DevicesSeenIndex'::regclass`)
|
||||
if err = row.Scan(&tmp); err != nil {
|
||||
_, err = db.Exec(`CREATE INDEX DevicesSeenIndex ON Devices (Seen)`)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = db.Exec(`CREATE TABLE IF NOT EXISTS Addresses (
|
||||
DeviceID CHAR(63) NOT NULL,
|
||||
Seen TIMESTAMP NOT NULL,
|
||||
Address VARCHAR(2048) NOT NULL
|
||||
)`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
row = db.QueryRow(`SELECT 'AddressesDeviceIDSeenIndex'::regclass`)
|
||||
if err = row.Scan(&tmp); err != nil {
|
||||
_, err = db.Exec(`CREATE INDEX AddressesDeviceIDSeenIndex ON Addresses (DeviceID, Seen)`)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
row = db.QueryRow(`SELECT 'AddressesDeviceIDAddressIndex'::regclass`)
|
||||
if err = row.Scan(&tmp); err != nil {
|
||||
_, err = db.Exec(`CREATE INDEX AddressesDeviceIDAddressIndex ON Addresses (DeviceID, Address)`)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func postgresCompile(db *sql.DB) (map[string]*sql.Stmt, error) {
|
||||
stmts := map[string]string{
|
||||
"cleanAddress": "DELETE FROM Addresses WHERE Seen < now() - '2 hour'::INTERVAL",
|
||||
"cleanDevice": fmt.Sprintf("DELETE FROM Devices WHERE Seen < now() - '%d hour'::INTERVAL", maxDeviceAge/3600),
|
||||
"countAddress": "SELECT count(*) FROM Addresses",
|
||||
"countDevice": "SELECT count(*) FROM Devices",
|
||||
"insertAddress": "INSERT INTO Addresses (DeviceID, Seen, Address) VALUES ($1, now(), $2)",
|
||||
"insertDevice": "INSERT INTO Devices (DeviceID, Seen) VALUES ($1, now())",
|
||||
"selectAddress": "SELECT Address FROM Addresses WHERE DeviceID=$1 AND Seen > now() - '1 hour'::INTERVAL ORDER BY random() LIMIT 16",
|
||||
"selectDevice": "SELECT Seen FROM Devices WHERE DeviceID=$1",
|
||||
"updateAddress": "UPDATE Addresses SET Seen=now() WHERE DeviceID=$1 AND Address=$2",
|
||||
"updateDevice": "UPDATE Devices SET Seen=now() WHERE DeviceID=$1",
|
||||
}
|
||||
|
||||
res := make(map[string]*sql.Stmt, len(stmts))
|
||||
for key, stmt := range stmts {
|
||||
prep, err := db.Prepare(stmt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res[key] = prep
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
@@ -1,81 +0,0 @@
|
||||
// Copyright (C) 2015 Audrius Butkevicius and Contributors (see the CONTRIBUTORS file).
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/cznic/ql"
|
||||
)
|
||||
|
||||
func init() {
|
||||
ql.RegisterDriver()
|
||||
register("ql", qlSetup, qlCompile)
|
||||
}
|
||||
|
||||
func qlSetup(db *sql.DB) (err error) {
|
||||
tx, err := db.Begin()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err == nil {
|
||||
err = tx.Commit()
|
||||
} else {
|
||||
tx.Rollback()
|
||||
}
|
||||
}()
|
||||
|
||||
_, err = tx.Exec(`CREATE TABLE IF NOT EXISTS Devices (
|
||||
DeviceID STRING NOT NULL,
|
||||
Seen TIME NOT NULL
|
||||
)`)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = tx.Exec(`CREATE INDEX IF NOT EXISTS DevicesDeviceIDIndex ON Devices (DeviceID)`); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
_, err = tx.Exec(`CREATE TABLE IF NOT EXISTS Addresses (
|
||||
DeviceID STRING NOT NULL,
|
||||
Seen TIME NOT NULL,
|
||||
Address STRING NOT NULL,
|
||||
)`)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
_, err = tx.Exec(`CREATE INDEX IF NOT EXISTS AddressesDeviceIDAddressIndex ON Addresses (DeviceID, Address)`)
|
||||
return
|
||||
}
|
||||
|
||||
func qlCompile(db *sql.DB) (map[string]*sql.Stmt, error) {
|
||||
stmts := map[string]string{
|
||||
"cleanAddress": `DELETE FROM Addresses WHERE Seen < now() - duration("2h")`,
|
||||
"cleanDevice": fmt.Sprintf(`DELETE FROM Devices WHERE Seen < now() - duration("%dh")`, maxDeviceAge/3600),
|
||||
"countAddress": "SELECT count(*) FROM Addresses",
|
||||
"countDevice": "SELECT count(*) FROM Devices",
|
||||
"insertAddress": "INSERT INTO Addresses (DeviceID, Seen, Address) VALUES ($1, now(), $2)",
|
||||
"insertDevice": "INSERT INTO Devices (DeviceID, Seen) VALUES ($1, now())",
|
||||
"selectAddress": `SELECT Address from Addresses WHERE DeviceID==$1 AND Seen > now() - duration("1h") LIMIT 16`,
|
||||
"selectDevice": "SELECT Seen FROM Devices WHERE DeviceID==$1",
|
||||
"updateAddress": "UPDATE Addresses Seen=now() WHERE DeviceID==$1 AND Address==$2",
|
||||
"updateDevice": "UPDATE Devices Seen=now() WHERE DeviceID==$1",
|
||||
}
|
||||
|
||||
res := make(map[string]*sql.Stmt, len(stmts))
|
||||
for key, stmt := range stmts {
|
||||
prep, err := db.Prepare(stmt)
|
||||
if err != nil {
|
||||
log.Println("Failed to compile", stmt)
|
||||
return nil, err
|
||||
}
|
||||
res[key] = prep
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
@@ -1,492 +0,0 @@
|
||||
// Copyright (C) 2014-2015 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/groupcache/lru"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
type querysrv struct {
|
||||
addr string
|
||||
db *sql.DB
|
||||
prep map[string]*sql.Stmt
|
||||
limiter *safeCache
|
||||
cert tls.Certificate
|
||||
listener net.Listener
|
||||
}
|
||||
|
||||
type announcement struct {
|
||||
Seen time.Time `json:"seen"`
|
||||
Addresses []string `json:"addresses"`
|
||||
}
|
||||
|
||||
type safeCache struct {
|
||||
*lru.Cache
|
||||
mut sync.Mutex
|
||||
}
|
||||
|
||||
func (s *safeCache) Get(key string) (val interface{}, ok bool) {
|
||||
s.mut.Lock()
|
||||
val, ok = s.Cache.Get(key)
|
||||
s.mut.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
func (s *safeCache) Add(key string, val interface{}) {
|
||||
s.mut.Lock()
|
||||
s.Cache.Add(key, val)
|
||||
s.mut.Unlock()
|
||||
}
|
||||
|
||||
type requestID int64
|
||||
|
||||
func (i requestID) String() string {
|
||||
return fmt.Sprintf("%016x", int64(i))
|
||||
}
|
||||
|
||||
type contextKey int
|
||||
|
||||
const idKey contextKey = iota
|
||||
|
||||
func negCacheFor(lastSeen time.Time) int {
|
||||
since := time.Since(lastSeen).Seconds()
|
||||
if since >= maxDeviceAge {
|
||||
return maxNegCache
|
||||
}
|
||||
if since < 0 {
|
||||
// That's weird
|
||||
return minNegCache
|
||||
}
|
||||
|
||||
// Return a value linearly scaled from minNegCache (at zero seconds ago)
|
||||
// to maxNegCache (at maxDeviceAge seconds ago).
|
||||
r := since / maxDeviceAge
|
||||
return int(minNegCache + r*(maxNegCache-minNegCache))
|
||||
}
|
||||
|
||||
func (s *querysrv) Serve() {
|
||||
s.limiter = &safeCache{
|
||||
Cache: lru.New(lruSize),
|
||||
}
|
||||
|
||||
if useHTTP {
|
||||
listener, err := net.Listen("tcp", s.addr)
|
||||
if err != nil {
|
||||
log.Println("Listen:", err)
|
||||
return
|
||||
}
|
||||
s.listener = listener
|
||||
} else {
|
||||
tlsCfg := &tls.Config{
|
||||
Certificates: []tls.Certificate{s.cert},
|
||||
ClientAuth: tls.RequestClientCert,
|
||||
SessionTicketsDisabled: true,
|
||||
MinVersion: tls.VersionTLS12,
|
||||
CipherSuites: []uint16{
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
||||
},
|
||||
}
|
||||
|
||||
tlsListener, err := tls.Listen("tcp", s.addr, tlsCfg)
|
||||
if err != nil {
|
||||
log.Println("Listen:", err)
|
||||
return
|
||||
}
|
||||
s.listener = tlsListener
|
||||
}
|
||||
|
||||
http.HandleFunc("/v2/", s.handler)
|
||||
http.HandleFunc("/ping", handlePing)
|
||||
|
||||
srv := &http.Server{
|
||||
ReadTimeout: 5 * time.Second,
|
||||
WriteTimeout: 5 * time.Second,
|
||||
MaxHeaderBytes: 1 << 10,
|
||||
}
|
||||
|
||||
if err := srv.Serve(s.listener); err != nil {
|
||||
log.Println("Serve:", err)
|
||||
}
|
||||
}
|
||||
|
||||
var topCtx = context.Background()
|
||||
|
||||
func (s *querysrv) handler(w http.ResponseWriter, req *http.Request) {
|
||||
reqID := requestID(rand.Int63())
|
||||
ctx := context.WithValue(topCtx, idKey, reqID)
|
||||
|
||||
if debug {
|
||||
log.Println(reqID, req.Method, req.URL)
|
||||
}
|
||||
|
||||
t0 := time.Now()
|
||||
defer func() {
|
||||
diff := time.Since(t0)
|
||||
var comment string
|
||||
if diff > time.Second {
|
||||
comment = "(very slow request)"
|
||||
} else if diff > 100*time.Millisecond {
|
||||
comment = "(slow request)"
|
||||
}
|
||||
if comment != "" || debug {
|
||||
log.Println(reqID, req.Method, req.URL, "completed in", diff, comment)
|
||||
}
|
||||
}()
|
||||
|
||||
var remoteIP net.IP
|
||||
if useHTTP {
|
||||
remoteIP = net.ParseIP(req.Header.Get("X-Forwarded-For"))
|
||||
} else {
|
||||
addr, err := net.ResolveTCPAddr("tcp", req.RemoteAddr)
|
||||
if err != nil {
|
||||
log.Println("remoteAddr:", err)
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
remoteIP = addr.IP
|
||||
}
|
||||
|
||||
if s.limit(remoteIP) {
|
||||
if debug {
|
||||
log.Println(remoteIP, "is limited")
|
||||
}
|
||||
w.Header().Set("Retry-After", "60")
|
||||
http.Error(w, "Too Many Requests", 429)
|
||||
return
|
||||
}
|
||||
|
||||
switch req.Method {
|
||||
case "GET":
|
||||
s.handleGET(ctx, w, req)
|
||||
case "POST":
|
||||
s.handlePOST(ctx, remoteIP, w, req)
|
||||
default:
|
||||
globalStats.Error()
|
||||
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *querysrv) handleGET(ctx context.Context, w http.ResponseWriter, req *http.Request) {
|
||||
reqID := ctx.Value(idKey).(requestID)
|
||||
|
||||
deviceID, err := protocol.DeviceIDFromString(req.URL.Query().Get("device"))
|
||||
if err != nil {
|
||||
if debug {
|
||||
log.Println(reqID, "bad device param")
|
||||
}
|
||||
globalStats.Error()
|
||||
http.Error(w, "Bad Request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
var ann announcement
|
||||
|
||||
ann.Seen, err = s.getDeviceSeen(deviceID)
|
||||
negCache := strconv.Itoa(negCacheFor(ann.Seen))
|
||||
w.Header().Set("Retry-After", negCache)
|
||||
w.Header().Set("Cache-Control", "public, max-age="+negCache)
|
||||
|
||||
if err != nil {
|
||||
// The device is not in the database.
|
||||
globalStats.Query()
|
||||
http.Error(w, "Not Found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
t0 := time.Now()
|
||||
ann.Addresses, err = s.getAddresses(ctx, deviceID)
|
||||
if err != nil {
|
||||
log.Println(reqID, "getAddresses:", err)
|
||||
globalStats.Error()
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if debug {
|
||||
log.Println(reqID, "getAddresses in", time.Since(t0))
|
||||
}
|
||||
|
||||
globalStats.Query()
|
||||
|
||||
if len(ann.Addresses) == 0 {
|
||||
http.Error(w, "Not Found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
globalStats.Answer()
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(ann)
|
||||
}
|
||||
|
||||
func (s *querysrv) handlePOST(ctx context.Context, remoteIP net.IP, w http.ResponseWriter, req *http.Request) {
|
||||
reqID := ctx.Value(idKey).(requestID)
|
||||
|
||||
rawCert := certificateBytes(req)
|
||||
if rawCert == nil {
|
||||
if debug {
|
||||
log.Println(reqID, "no certificates")
|
||||
}
|
||||
globalStats.Error()
|
||||
http.Error(w, "Forbidden", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
|
||||
var ann announcement
|
||||
if err := json.NewDecoder(req.Body).Decode(&ann); err != nil {
|
||||
if debug {
|
||||
log.Println(reqID, "decode:", err)
|
||||
}
|
||||
globalStats.Error()
|
||||
http.Error(w, "Bad Request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
deviceID := protocol.NewDeviceID(rawCert)
|
||||
|
||||
// handleAnnounce returns *two* errors. The first indicates a problem with
|
||||
// something the client posted to us. We should return a 400 Bad Request
|
||||
// and not worry about it. The second indicates that the request was fine,
|
||||
// but something internal messed up. We should log it and respond with a
|
||||
// more apologetic 500 Internal Server Error.
|
||||
userErr, internalErr := s.handleAnnounce(ctx, remoteIP, deviceID, ann.Addresses)
|
||||
if userErr != nil {
|
||||
if debug {
|
||||
log.Println(reqID, "handleAnnounce:", userErr)
|
||||
}
|
||||
globalStats.Error()
|
||||
http.Error(w, "Bad Request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if internalErr != nil {
|
||||
log.Println(reqID, "handleAnnounce:", internalErr)
|
||||
globalStats.Error()
|
||||
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
globalStats.Announce()
|
||||
|
||||
// TODO: Slowly increase this for stable clients
|
||||
w.Header().Set("Reannounce-After", "1800")
|
||||
|
||||
// We could return the lookup result here, but it's kind of unnecessarily
|
||||
// expensive to go query the database again so we let the client decide to
|
||||
// do a lookup if they really care.
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
func (s *querysrv) Stop() {
|
||||
s.listener.Close()
|
||||
}
|
||||
|
||||
func (s *querysrv) handleAnnounce(ctx context.Context, remote net.IP, deviceID protocol.DeviceID, addresses []string) (userErr, internalErr error) {
|
||||
reqID := ctx.Value(idKey).(requestID)
|
||||
|
||||
tx, err := s.db.Begin()
|
||||
if err != nil {
|
||||
internalErr = err
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
// Since we return from a bunch of different places, we handle
|
||||
// rollback in the defer.
|
||||
if internalErr != nil || userErr != nil {
|
||||
tx.Rollback()
|
||||
}
|
||||
}()
|
||||
|
||||
for _, annAddr := range addresses {
|
||||
uri, err := url.Parse(annAddr)
|
||||
if err != nil {
|
||||
userErr = err
|
||||
return
|
||||
}
|
||||
|
||||
host, port, err := net.SplitHostPort(uri.Host)
|
||||
if err != nil {
|
||||
userErr = err
|
||||
return
|
||||
}
|
||||
|
||||
ip := net.ParseIP(host)
|
||||
if host == "" || ip.IsUnspecified() {
|
||||
// Do not use IPv6 remote address if requested scheme is tcp4
|
||||
if uri.Scheme == "tcp4" && remote.To4() == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Do not use IPv4 remote address if requested scheme is tcp6
|
||||
if uri.Scheme == "tcp6" && remote.To4() != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
host = remote.String()
|
||||
}
|
||||
|
||||
uri.Host = net.JoinHostPort(host, port)
|
||||
|
||||
if err := s.updateAddress(ctx, tx, deviceID, uri.String()); err != nil {
|
||||
internalErr = err
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.updateDevice(ctx, tx, deviceID); err != nil {
|
||||
internalErr = err
|
||||
return
|
||||
}
|
||||
|
||||
t0 := time.Now()
|
||||
internalErr = tx.Commit()
|
||||
if debug {
|
||||
log.Println(reqID, "commit in", time.Since(t0))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *querysrv) limit(remote net.IP) bool {
|
||||
key := remote.String()
|
||||
|
||||
bkt, ok := s.limiter.Get(key)
|
||||
if ok {
|
||||
bkt := bkt.(*rate.Limiter)
|
||||
if !bkt.Allow() {
|
||||
// Rate limit exceeded; ignore packet
|
||||
return true
|
||||
}
|
||||
} else {
|
||||
// limitAvg is in packets per ten seconds.
|
||||
s.limiter.Add(key, rate.NewLimiter(rate.Limit(limitAvg)/10, limitBurst))
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *querysrv) updateDevice(ctx context.Context, tx *sql.Tx, device protocol.DeviceID) error {
|
||||
reqID := ctx.Value(idKey).(requestID)
|
||||
t0 := time.Now()
|
||||
res, err := tx.Stmt(s.prep["updateDevice"]).Exec(device.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if debug {
|
||||
log.Println(reqID, "updateDevice in", time.Since(t0))
|
||||
}
|
||||
|
||||
if rows, _ := res.RowsAffected(); rows == 0 {
|
||||
t0 = time.Now()
|
||||
_, err := tx.Stmt(s.prep["insertDevice"]).Exec(device.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if debug {
|
||||
log.Println(reqID, "insertDevice in", time.Since(t0))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *querysrv) updateAddress(ctx context.Context, tx *sql.Tx, device protocol.DeviceID, uri string) error {
|
||||
res, err := tx.Stmt(s.prep["updateAddress"]).Exec(device.String(), uri)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if rows, _ := res.RowsAffected(); rows == 0 {
|
||||
_, err := tx.Stmt(s.prep["insertAddress"]).Exec(device.String(), uri)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *querysrv) getAddresses(ctx context.Context, device protocol.DeviceID) ([]string, error) {
|
||||
rows, err := s.prep["selectAddress"].Query(device.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var res []string
|
||||
for rows.Next() {
|
||||
var addr string
|
||||
|
||||
err := rows.Scan(&addr)
|
||||
if err != nil {
|
||||
log.Println("Scan:", err)
|
||||
continue
|
||||
}
|
||||
res = append(res, addr)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (s *querysrv) getDeviceSeen(device protocol.DeviceID) (time.Time, error) {
|
||||
row := s.prep["selectDevice"].QueryRow(device.String())
|
||||
var seen time.Time
|
||||
if err := row.Scan(&seen); err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
return seen.In(time.UTC), nil
|
||||
}
|
||||
|
||||
func handlePing(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(204)
|
||||
}
|
||||
|
||||
func certificateBytes(req *http.Request) []byte {
|
||||
if req.TLS != nil && len(req.TLS.PeerCertificates) > 0 {
|
||||
return req.TLS.PeerCertificates[0].Raw
|
||||
}
|
||||
|
||||
if hdr := req.Header.Get("X-SSL-Cert"); hdr != "" {
|
||||
bs := []byte(hdr)
|
||||
// The certificate is in PEM format but with spaces for newlines. We
|
||||
// need to reinstate the newlines for the PEM decoder. But we need to
|
||||
// leave the spaces in the BEGIN and END lines - the first and last
|
||||
// space - alone.
|
||||
firstSpace := bytes.Index(bs, []byte(" "))
|
||||
lastSpace := bytes.LastIndex(bs, []byte(" "))
|
||||
for i := firstSpace + 1; i < lastSpace; i++ {
|
||||
if bs[i] == ' ' {
|
||||
bs[i] = '\n'
|
||||
}
|
||||
}
|
||||
block, _ := pem.Decode(bs)
|
||||
if block == nil {
|
||||
// Decoding failed
|
||||
return nil
|
||||
}
|
||||
return block.Bytes
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
326
cmd/stdiscosrv/replication.go
Normal file
326
cmd/stdiscosrv/replication.go
Normal file
@@ -0,0 +1,326 @@
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
io "io"
|
||||
"log"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
const replicationReadTimeout = time.Minute
|
||||
const replicationHeartbeatInterval = time.Second * 30
|
||||
|
||||
type replicator interface {
|
||||
send(key string, addrs []DatabaseAddress, seen int64)
|
||||
}
|
||||
|
||||
// a replicationSender tries to connect to the remote address and provide
|
||||
// them with a feed of replication updates.
|
||||
type replicationSender struct {
|
||||
dst string
|
||||
cert tls.Certificate // our certificate
|
||||
allowedIDs []protocol.DeviceID
|
||||
outbox chan ReplicationRecord
|
||||
stop chan struct{}
|
||||
}
|
||||
|
||||
func newReplicationSender(dst string, cert tls.Certificate, allowedIDs []protocol.DeviceID) *replicationSender {
|
||||
return &replicationSender{
|
||||
dst: dst,
|
||||
cert: cert,
|
||||
allowedIDs: allowedIDs,
|
||||
outbox: make(chan ReplicationRecord, replicationOutboxSize),
|
||||
stop: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *replicationSender) Serve() {
|
||||
// Sleep a little at startup. Peers often restart at the same time, and
|
||||
// this avoid the service failing and entering backoff state
|
||||
// unnecessarily, while also reducing the reconnect rate to something
|
||||
// reasonable by default.
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
tlsCfg := &tls.Config{
|
||||
Certificates: []tls.Certificate{s.cert},
|
||||
MinVersion: tls.VersionTLS12,
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
|
||||
// Dial the TLS connection.
|
||||
conn, err := tls.Dial("tcp", s.dst, tlsCfg)
|
||||
if err != nil {
|
||||
log.Println("Replication connect:", err)
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
conn.SetWriteDeadline(time.Now().Add(time.Second))
|
||||
conn.Close()
|
||||
}()
|
||||
|
||||
// Get the other side device ID.
|
||||
remoteID, err := deviceID(conn)
|
||||
if err != nil {
|
||||
log.Println("Replication connect:", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Verify it's in the set of allowed device IDs.
|
||||
if !deviceIDIn(remoteID, s.allowedIDs) {
|
||||
log.Println("Replication connect: unexpected device ID:", remoteID)
|
||||
return
|
||||
}
|
||||
|
||||
heartBeatTicker := time.NewTicker(replicationHeartbeatInterval)
|
||||
defer heartBeatTicker.Stop()
|
||||
|
||||
// Send records.
|
||||
buf := make([]byte, 1024)
|
||||
for {
|
||||
select {
|
||||
case <-heartBeatTicker.C:
|
||||
if len(s.outbox) > 0 {
|
||||
// No need to send heartbeats if there are events/prevrious
|
||||
// heartbeats to send, they will keep the connection alive.
|
||||
continue
|
||||
}
|
||||
// Empty replication message is the heartbeat:
|
||||
s.outbox <- ReplicationRecord{}
|
||||
|
||||
case rec := <-s.outbox:
|
||||
// Buffer must hold record plus four bytes for size
|
||||
size := rec.Size()
|
||||
if len(buf) < size+4 {
|
||||
buf = make([]byte, size+4)
|
||||
}
|
||||
|
||||
// Record comes after the four bytes size
|
||||
n, err := rec.MarshalTo(buf[4:])
|
||||
if err != nil {
|
||||
// odd to get an error here, but we haven't sent anything
|
||||
// yet so it's not fatal
|
||||
replicationSendsTotal.WithLabelValues("error").Inc()
|
||||
log.Println("Replication marshal:", err)
|
||||
continue
|
||||
}
|
||||
binary.BigEndian.PutUint32(buf, uint32(n))
|
||||
|
||||
// Send
|
||||
conn.SetWriteDeadline(time.Now().Add(5 * time.Second))
|
||||
if _, err := conn.Write(buf[:4+n]); err != nil {
|
||||
replicationSendsTotal.WithLabelValues("error").Inc()
|
||||
log.Println("Replication write:", err)
|
||||
// Yes, we are loosing the replication event here.
|
||||
return
|
||||
}
|
||||
replicationSendsTotal.WithLabelValues("success").Inc()
|
||||
|
||||
case <-s.stop:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *replicationSender) Stop() {
|
||||
close(s.stop)
|
||||
}
|
||||
|
||||
func (s *replicationSender) String() string {
|
||||
return fmt.Sprintf("replicationSender(%q)", s.dst)
|
||||
}
|
||||
|
||||
func (s *replicationSender) send(key string, ps []DatabaseAddress, seen int64) {
|
||||
item := ReplicationRecord{
|
||||
Key: key,
|
||||
Addresses: ps,
|
||||
}
|
||||
|
||||
// The send should never block. The inbox is suitably buffered for at
|
||||
// least a few seconds of stalls, which shouldn't happen in practice.
|
||||
select {
|
||||
case s.outbox <- item:
|
||||
default:
|
||||
replicationSendsTotal.WithLabelValues("drop").Inc()
|
||||
}
|
||||
}
|
||||
|
||||
// a replicationMultiplexer sends to multiple replicators
|
||||
type replicationMultiplexer []replicator
|
||||
|
||||
func (m replicationMultiplexer) send(key string, ps []DatabaseAddress, seen int64) {
|
||||
for _, s := range m {
|
||||
// each send is nonblocking
|
||||
s.send(key, ps, seen)
|
||||
}
|
||||
}
|
||||
|
||||
// replicationListener accepts incoming connections and reads replication
|
||||
// items from them. Incoming items are applied to the KV store.
|
||||
type replicationListener struct {
|
||||
addr string
|
||||
cert tls.Certificate
|
||||
allowedIDs []protocol.DeviceID
|
||||
db database
|
||||
stop chan struct{}
|
||||
}
|
||||
|
||||
func newReplicationListener(addr string, cert tls.Certificate, allowedIDs []protocol.DeviceID, db database) *replicationListener {
|
||||
return &replicationListener{
|
||||
addr: addr,
|
||||
cert: cert,
|
||||
allowedIDs: allowedIDs,
|
||||
db: db,
|
||||
stop: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (l *replicationListener) Serve() {
|
||||
tlsCfg := &tls.Config{
|
||||
Certificates: []tls.Certificate{l.cert},
|
||||
ClientAuth: tls.RequestClientCert,
|
||||
MinVersion: tls.VersionTLS12,
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
|
||||
lst, err := tls.Listen("tcp", l.addr, tlsCfg)
|
||||
if err != nil {
|
||||
log.Println("Replication listen:", err)
|
||||
return
|
||||
}
|
||||
defer lst.Close()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-l.stop:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
// Accept a connection
|
||||
conn, err := lst.Accept()
|
||||
if err != nil {
|
||||
log.Println("Replication accept:", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Figure out the other side device ID
|
||||
remoteID, err := deviceID(conn.(*tls.Conn))
|
||||
if err != nil {
|
||||
log.Println("Replication accept:", err)
|
||||
conn.SetWriteDeadline(time.Now().Add(time.Second))
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
// Verify it is in the set of allowed device IDs
|
||||
if !deviceIDIn(remoteID, l.allowedIDs) {
|
||||
log.Println("Replication accept: unexpected device ID:", remoteID)
|
||||
conn.SetWriteDeadline(time.Now().Add(time.Second))
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
go l.handle(conn)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *replicationListener) Stop() {
|
||||
close(l.stop)
|
||||
}
|
||||
|
||||
func (l *replicationListener) String() string {
|
||||
return fmt.Sprintf("replicationListener(%q)", l.addr)
|
||||
}
|
||||
|
||||
func (l *replicationListener) handle(conn net.Conn) {
|
||||
defer func() {
|
||||
conn.SetWriteDeadline(time.Now().Add(time.Second))
|
||||
conn.Close()
|
||||
}()
|
||||
|
||||
buf := make([]byte, 1024)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-l.stop:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
conn.SetReadDeadline(time.Now().Add(replicationReadTimeout))
|
||||
|
||||
// First four bytes are the size
|
||||
if _, err := io.ReadFull(conn, buf[:4]); err != nil {
|
||||
log.Println("Replication read size:", err)
|
||||
replicationRecvsTotal.WithLabelValues("error").Inc()
|
||||
return
|
||||
}
|
||||
|
||||
// Read the rest of the record
|
||||
size := int(binary.BigEndian.Uint32(buf[:4]))
|
||||
if len(buf) < size {
|
||||
buf = make([]byte, size)
|
||||
}
|
||||
|
||||
if size == 0 {
|
||||
// Heartbeat, ignore
|
||||
continue
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(conn, buf[:size]); err != nil {
|
||||
log.Println("Replication read record:", err)
|
||||
replicationRecvsTotal.WithLabelValues("error").Inc()
|
||||
return
|
||||
}
|
||||
|
||||
// Unmarshal
|
||||
var rec ReplicationRecord
|
||||
if err := rec.Unmarshal(buf[:size]); err != nil {
|
||||
log.Println("Replication unmarshal:", err)
|
||||
replicationRecvsTotal.WithLabelValues("error").Inc()
|
||||
continue
|
||||
}
|
||||
|
||||
// Store
|
||||
l.db.merge(rec.Key, rec.Addresses, rec.Seen)
|
||||
replicationRecvsTotal.WithLabelValues("success").Inc()
|
||||
}
|
||||
}
|
||||
|
||||
func deviceID(conn *tls.Conn) (protocol.DeviceID, error) {
|
||||
// Handshake may not be complete on the server side yet, which we need
|
||||
// to get the client certificate.
|
||||
if !conn.ConnectionState().HandshakeComplete {
|
||||
if err := conn.Handshake(); err != nil {
|
||||
return protocol.DeviceID{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// We expect exactly one certificate.
|
||||
certs := conn.ConnectionState().PeerCertificates
|
||||
if len(certs) != 1 {
|
||||
return protocol.DeviceID{}, fmt.Errorf("unexpected number of certificates (%d != 1)", len(certs))
|
||||
}
|
||||
|
||||
return protocol.NewDeviceID(certs[0].Raw), nil
|
||||
}
|
||||
|
||||
func deviceIDIn(id protocol.DeviceID, ids []protocol.DeviceID) bool {
|
||||
for _, candidate := range ids {
|
||||
if id == candidate {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
4
cmd/stdiscosrv/scripts/preinst
Normal file
4
cmd/stdiscosrv/scripts/preinst
Normal file
@@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
|
||||
addgroup --system syncthing
|
||||
adduser --system --home /var/lib/syncthing-discosrv --ingroup syncthing syncthing-discosrv
|
||||
@@ -1,141 +1,123 @@
|
||||
// Copyright (C) 2014-2015 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
type stats struct {
|
||||
// Incremented atomically
|
||||
announces int64
|
||||
queries int64
|
||||
answers int64
|
||||
errors int64
|
||||
}
|
||||
var (
|
||||
apiRequestsTotal = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "discovery",
|
||||
Name: "api_requests_total",
|
||||
Help: "Number of API requests.",
|
||||
}, []string{"type", "result"})
|
||||
apiRequestsSeconds = prometheus.NewSummaryVec(
|
||||
prometheus.SummaryOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "discovery",
|
||||
Name: "api_requests_seconds",
|
||||
Help: "Latency of API requests.",
|
||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
||||
}, []string{"type"})
|
||||
|
||||
func (s *stats) Announce() {
|
||||
atomic.AddInt64(&s.announces, 1)
|
||||
}
|
||||
lookupRequestsTotal = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "discovery",
|
||||
Name: "lookup_requests_total",
|
||||
Help: "Number of lookup requests.",
|
||||
}, []string{"result"})
|
||||
announceRequestsTotal = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "discovery",
|
||||
Name: "announcement_requests_total",
|
||||
Help: "Number of announcement requests.",
|
||||
}, []string{"result"})
|
||||
|
||||
func (s *stats) Query() {
|
||||
atomic.AddInt64(&s.queries, 1)
|
||||
}
|
||||
replicationSendsTotal = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "discovery",
|
||||
Name: "replication_sends_total",
|
||||
Help: "Number of replication sends.",
|
||||
}, []string{"result"})
|
||||
replicationRecvsTotal = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "discovery",
|
||||
Name: "replication_recvs_total",
|
||||
Help: "Number of replication receives.",
|
||||
}, []string{"result"})
|
||||
|
||||
func (s *stats) Answer() {
|
||||
atomic.AddInt64(&s.answers, 1)
|
||||
}
|
||||
databaseKeys = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "discovery",
|
||||
Name: "database_keys",
|
||||
Help: "Number of database keys at last count.",
|
||||
}, []string{"category"})
|
||||
databaseStatisticsSeconds = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "discovery",
|
||||
Name: "database_statistics_seconds",
|
||||
Help: "Time spent running the statistics routine.",
|
||||
})
|
||||
|
||||
func (s *stats) Error() {
|
||||
atomic.AddInt64(&s.errors, 1)
|
||||
}
|
||||
databaseOperations = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "discovery",
|
||||
Name: "database_operations_total",
|
||||
Help: "Number of database operations.",
|
||||
}, []string{"operation", "result"})
|
||||
databaseOperationSeconds = prometheus.NewSummaryVec(
|
||||
prometheus.SummaryOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "discovery",
|
||||
Name: "database_operation_seconds",
|
||||
Help: "Latency of database operations.",
|
||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
||||
}, []string{"operation"})
|
||||
)
|
||||
|
||||
// Reset returns a copy of the current stats and resets the counters to
|
||||
// zero.
|
||||
func (s *stats) Reset() stats {
|
||||
// Create a copy of the stats using atomic reads
|
||||
copy := stats{
|
||||
announces: atomic.LoadInt64(&s.announces),
|
||||
queries: atomic.LoadInt64(&s.queries),
|
||||
answers: atomic.LoadInt64(&s.answers),
|
||||
errors: atomic.LoadInt64(&s.errors),
|
||||
const (
|
||||
dbOpGet = "get"
|
||||
dbOpPut = "put"
|
||||
dbOpMerge = "merge"
|
||||
dbOpDelete = "delete"
|
||||
dbResSuccess = "success"
|
||||
dbResNotFound = "not_found"
|
||||
dbResError = "error"
|
||||
dbResUnmarshalError = "unmarsh_err"
|
||||
)
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(apiRequestsTotal, apiRequestsSeconds,
|
||||
lookupRequestsTotal, announceRequestsTotal,
|
||||
replicationSendsTotal, replicationRecvsTotal,
|
||||
databaseKeys, databaseStatisticsSeconds,
|
||||
databaseOperations, databaseOperationSeconds)
|
||||
|
||||
processCollectorOpts := prometheus.ProcessCollectorOpts{
|
||||
Namespace: "syncthing_discovery",
|
||||
PidFn: func() (int, error) {
|
||||
return os.Getpid(), nil
|
||||
},
|
||||
}
|
||||
|
||||
// Reset the stats by subtracting the values that we copied
|
||||
atomic.AddInt64(&s.announces, -copy.announces)
|
||||
atomic.AddInt64(&s.queries, -copy.queries)
|
||||
atomic.AddInt64(&s.answers, -copy.answers)
|
||||
atomic.AddInt64(&s.errors, -copy.errors)
|
||||
prometheus.MustRegister(
|
||||
prometheus.NewProcessCollector(processCollectorOpts),
|
||||
)
|
||||
|
||||
return copy
|
||||
}
|
||||
|
||||
type statssrv struct {
|
||||
intv time.Duration
|
||||
file string
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
func (s *statssrv) Serve() {
|
||||
lastReset := time.Now()
|
||||
for {
|
||||
time.Sleep(next(s.intv))
|
||||
|
||||
stats := globalStats.Reset()
|
||||
d := time.Since(lastReset).Seconds()
|
||||
lastReset = time.Now()
|
||||
|
||||
log.Printf("Stats: %.02f announces/s, %.02f queries/s, %.02f answers/s, %.02f errors/s",
|
||||
float64(stats.announces)/d, float64(stats.queries)/d, float64(stats.answers)/d, float64(stats.errors)/d)
|
||||
|
||||
if s.file != "" {
|
||||
s.writeToFile(stats, d)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *statssrv) Stop() {
|
||||
panic("stop unimplemented")
|
||||
}
|
||||
|
||||
func (s *statssrv) writeToFile(stats stats, secs float64) {
|
||||
newLine := []byte("\n")
|
||||
|
||||
var addrs int
|
||||
row := s.db.QueryRow("SELECT COUNT(*) FROM Addresses")
|
||||
if err := row.Scan(&addrs); err != nil {
|
||||
log.Println("stats query:", err)
|
||||
return
|
||||
}
|
||||
|
||||
fd, err := os.OpenFile(s.file, os.O_RDWR|os.O_CREATE, 0666)
|
||||
if err != nil {
|
||||
log.Println("stats file:", err)
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
err = fd.Close()
|
||||
if err != nil {
|
||||
log.Println("stats file:", err)
|
||||
}
|
||||
}()
|
||||
|
||||
bs, err := ioutil.ReadAll(fd)
|
||||
if err != nil {
|
||||
log.Println("stats file:", err)
|
||||
return
|
||||
}
|
||||
lines := bytes.Split(bytes.TrimSpace(bs), newLine)
|
||||
if len(lines) > 12 {
|
||||
lines = lines[len(lines)-12:]
|
||||
}
|
||||
|
||||
latest := fmt.Sprintf("%v: %6d addresses, %8.02f announces/s, %8.02f queries/s, %8.02f answers/s, %8.02f errors/s\n",
|
||||
time.Now().UTC().Format(time.RFC3339), addrs,
|
||||
float64(stats.announces)/secs, float64(stats.queries)/secs, float64(stats.answers)/secs, float64(stats.errors)/secs)
|
||||
lines = append(lines, []byte(latest))
|
||||
|
||||
_, err = fd.Seek(0, 0)
|
||||
if err != nil {
|
||||
log.Println("stats file:", err)
|
||||
return
|
||||
}
|
||||
err = fd.Truncate(0)
|
||||
if err != nil {
|
||||
log.Println("stats file:", err)
|
||||
return
|
||||
}
|
||||
|
||||
_, err = fd.Write(bytes.Join(lines, newLine))
|
||||
if err != nil {
|
||||
log.Println("stats file:", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -68,8 +68,8 @@ func main() {
|
||||
}
|
||||
|
||||
blockSize := int(fi.Size())
|
||||
if *standardBlocks || blockSize < protocol.BlockSize {
|
||||
blockSize = protocol.BlockSize
|
||||
if *standardBlocks || blockSize < protocol.MinBlockSize {
|
||||
blockSize = protocol.BlockSize(fi.Size())
|
||||
}
|
||||
bs, err := scanner.Blocks(context.TODO(), fd, blockSize, fi.Size(), nil, true)
|
||||
if err != nil {
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/discover"
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
@@ -82,7 +83,7 @@ func checkServers(deviceID protocol.DeviceID, servers ...string) {
|
||||
}
|
||||
|
||||
func checkServer(deviceID protocol.DeviceID, server string) checkResult {
|
||||
disco, err := discover.NewGlobal(server, tls.Certificate{}, nil)
|
||||
disco, err := discover.NewGlobal(server, tls.Certificate{}, nil, events.NoopLogger)
|
||||
if err != nil {
|
||||
return checkResult{error: err}
|
||||
}
|
||||
|
||||
44
cmd/stfindignored/main.go
Normal file
44
cmd/stfindignored/main.go
Normal file
@@ -0,0 +1,44 @@
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
// Commmand stfindignored lists ignored files under a given folder root.
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/fs"
|
||||
"github.com/syncthing/syncthing/lib/ignore"
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
root := flag.Arg(0)
|
||||
if root == "" {
|
||||
root = "."
|
||||
}
|
||||
|
||||
vfs := fs.NewWalkFilesystem(fs.NewFilesystem(fs.FilesystemTypeBasic, root))
|
||||
|
||||
ign := ignore.New(vfs)
|
||||
if err := ign.Load(".stignore"); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Fatal: loading ignores: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
vfs.Walk(".", func(path string, info fs.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: %s: %v\n", path, err)
|
||||
return fs.SkipDir
|
||||
}
|
||||
if ign.Match(path).IsIgnored() {
|
||||
fmt.Println(path)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
@@ -82,7 +82,7 @@ func generateOneFile(fd io.ReadSeeker, p1 string, s int64) error {
|
||||
return err
|
||||
}
|
||||
|
||||
_ = os.Chmod(p1, os.FileMode(rand.Intn(0777)|0400))
|
||||
os.Chmod(p1, os.FileMode(rand.Intn(0777)|0400))
|
||||
|
||||
t := time.Now().Add(-time.Duration(rand.Intn(30*86400)) * time.Second)
|
||||
return os.Chtimes(p1, t, t)
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
func dump(ldb *db.Instance) {
|
||||
func dump(ldb *db.Lowlevel) {
|
||||
it := ldb.NewIterator(nil, nil)
|
||||
for it.Next() {
|
||||
key := it.Key()
|
||||
|
||||
@@ -37,7 +37,7 @@ func (h *ElementHeap) Pop() interface{} {
|
||||
return x
|
||||
}
|
||||
|
||||
func dumpsize(ldb *db.Instance) {
|
||||
func dumpsize(ldb *db.Lowlevel) {
|
||||
h := &ElementHeap{}
|
||||
heap.Init(h)
|
||||
|
||||
|
||||
242
cmd/stindex/idxck.go
Normal file
242
cmd/stindex/idxck.go
Normal file
@@ -0,0 +1,242 @@
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
type fileInfoKey struct {
|
||||
folder uint32
|
||||
device uint32
|
||||
name string
|
||||
}
|
||||
|
||||
type globalKey struct {
|
||||
folder uint32
|
||||
name string
|
||||
}
|
||||
|
||||
type sequenceKey struct {
|
||||
folder uint32
|
||||
sequence uint64
|
||||
}
|
||||
|
||||
func idxck(ldb *db.Lowlevel) (success bool) {
|
||||
folders := make(map[uint32]string)
|
||||
devices := make(map[uint32]string)
|
||||
deviceToIDs := make(map[string]uint32)
|
||||
fileInfos := make(map[fileInfoKey]protocol.FileInfo)
|
||||
globals := make(map[globalKey]db.VersionList)
|
||||
sequences := make(map[sequenceKey]string)
|
||||
needs := make(map[globalKey]struct{})
|
||||
var localDeviceKey uint32
|
||||
success = true
|
||||
|
||||
it := ldb.NewIterator(nil, nil)
|
||||
for it.Next() {
|
||||
key := it.Key()
|
||||
switch key[0] {
|
||||
case db.KeyTypeDevice:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
device := binary.BigEndian.Uint32(key[1+4:])
|
||||
name := nulString(key[1+4+4:])
|
||||
|
||||
var f protocol.FileInfo
|
||||
err := f.Unmarshal(it.Value())
|
||||
if err != nil {
|
||||
fmt.Println("Unable to unmarshal FileInfo:", err)
|
||||
success = false
|
||||
continue
|
||||
}
|
||||
|
||||
fileInfos[fileInfoKey{folder, device, name}] = f
|
||||
|
||||
case db.KeyTypeGlobal:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
name := nulString(key[1+4:])
|
||||
var flv db.VersionList
|
||||
if err := flv.Unmarshal(it.Value()); err != nil {
|
||||
fmt.Println("Unable to unmarshal VersionList:", err)
|
||||
success = false
|
||||
continue
|
||||
}
|
||||
globals[globalKey{folder, name}] = flv
|
||||
|
||||
case db.KeyTypeFolderIdx:
|
||||
key := binary.BigEndian.Uint32(it.Key()[1:])
|
||||
folders[key] = string(it.Value())
|
||||
|
||||
case db.KeyTypeDeviceIdx:
|
||||
key := binary.BigEndian.Uint32(it.Key()[1:])
|
||||
devices[key] = string(it.Value())
|
||||
deviceToIDs[string(it.Value())] = key
|
||||
if bytes.Equal(it.Value(), protocol.LocalDeviceID[:]) {
|
||||
localDeviceKey = key
|
||||
}
|
||||
|
||||
case db.KeyTypeSequence:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
seq := binary.BigEndian.Uint64(key[5:])
|
||||
val := it.Value()
|
||||
sequences[sequenceKey{folder, seq}] = string(val[9:])
|
||||
|
||||
case db.KeyTypeNeed:
|
||||
folder := binary.BigEndian.Uint32(key[1:])
|
||||
name := nulString(key[1+4:])
|
||||
needs[globalKey{folder, name}] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
if localDeviceKey == 0 {
|
||||
fmt.Println("Missing key for local device in device index (bailing out)")
|
||||
success = false
|
||||
return
|
||||
}
|
||||
|
||||
for fk, fi := range fileInfos {
|
||||
if fk.name != fi.Name {
|
||||
fmt.Printf("Mismatching FileInfo name, %q (key) != %q (actual)\n", fk.name, fi.Name)
|
||||
success = false
|
||||
}
|
||||
|
||||
folder := folders[fk.folder]
|
||||
if folder == "" {
|
||||
fmt.Printf("Unknown folder ID %d for FileInfo %q\n", fk.folder, fk.name)
|
||||
success = false
|
||||
continue
|
||||
}
|
||||
if devices[fk.device] == "" {
|
||||
fmt.Printf("Unknown device ID %d for FileInfo %q, folder %q\n", fk.folder, fk.name, folder)
|
||||
success = false
|
||||
}
|
||||
|
||||
if fk.device == localDeviceKey {
|
||||
name, ok := sequences[sequenceKey{fk.folder, uint64(fi.Sequence)}]
|
||||
if !ok {
|
||||
fmt.Printf("Sequence entry missing for FileInfo %q, folder %q, seq %d\n", fi.Name, folder, fi.Sequence)
|
||||
success = false
|
||||
continue
|
||||
}
|
||||
if name != fi.Name {
|
||||
fmt.Printf("Sequence entry refers to wrong name, %q (seq) != %q (FileInfo), folder %q, seq %d\n", name, fi.Name, folder, fi.Sequence)
|
||||
success = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for gk, vl := range globals {
|
||||
folder := folders[gk.folder]
|
||||
if folder == "" {
|
||||
fmt.Printf("Unknown folder ID %d for VersionList %q\n", gk.folder, gk.name)
|
||||
success = false
|
||||
}
|
||||
for i, fv := range vl.Versions {
|
||||
dev, ok := deviceToIDs[string(fv.Device)]
|
||||
if !ok {
|
||||
fmt.Printf("VersionList %q, folder %q refers to unknown device %q\n", gk.name, folder, fv.Device)
|
||||
success = false
|
||||
}
|
||||
fi, ok := fileInfos[fileInfoKey{gk.folder, dev, gk.name}]
|
||||
if !ok {
|
||||
fmt.Printf("VersionList %q, folder %q, entry %d refers to unknown FileInfo\n", gk.name, folder, i)
|
||||
success = false
|
||||
}
|
||||
if !fi.Version.Equal(fv.Version) {
|
||||
fmt.Printf("VersionList %q, folder %q, entry %d, FileInfo version mismatch, %v (VersionList) != %v (FileInfo)\n", gk.name, folder, i, fv.Version, fi.Version)
|
||||
success = false
|
||||
}
|
||||
if fi.IsInvalid() != fv.Invalid {
|
||||
fmt.Printf("VersionList %q, folder %q, entry %d, FileInfo invalid mismatch, %v (VersionList) != %v (FileInfo)\n", gk.name, folder, i, fv.Invalid, fi.IsInvalid())
|
||||
success = false
|
||||
}
|
||||
}
|
||||
|
||||
// If we need this file we should have a need entry for it. False
|
||||
// positives from needsLocally for deleted files, where we might
|
||||
// legitimately lack an entry if we never had it, and ignored files.
|
||||
if needsLocally(vl) {
|
||||
_, ok := needs[gk]
|
||||
if !ok {
|
||||
dev := deviceToIDs[string(vl.Versions[0].Device)]
|
||||
fi := fileInfos[fileInfoKey{gk.folder, dev, gk.name}]
|
||||
if !fi.IsDeleted() && !fi.IsIgnored() {
|
||||
fmt.Printf("Missing need entry for needed file %q, folder %q\n", gk.name, folder)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
seenSeq := make(map[fileInfoKey]uint64)
|
||||
for sk, name := range sequences {
|
||||
folder := folders[sk.folder]
|
||||
if folder == "" {
|
||||
fmt.Printf("Unknown folder ID %d for sequence entry %d, %q\n", sk.folder, sk.sequence, name)
|
||||
success = false
|
||||
continue
|
||||
}
|
||||
|
||||
if prev, ok := seenSeq[fileInfoKey{folder: sk.folder, name: name}]; ok {
|
||||
fmt.Printf("Duplicate sequence entry for %q, folder %q, seq %d (prev %d)\n", name, folder, sk.sequence, prev)
|
||||
success = false
|
||||
}
|
||||
seenSeq[fileInfoKey{folder: sk.folder, name: name}] = sk.sequence
|
||||
|
||||
fi, ok := fileInfos[fileInfoKey{sk.folder, localDeviceKey, name}]
|
||||
if !ok {
|
||||
fmt.Printf("Missing FileInfo for sequence entry %d, folder %q, %q\n", sk.sequence, folder, name)
|
||||
success = false
|
||||
continue
|
||||
}
|
||||
if fi.Sequence != int64(sk.sequence) {
|
||||
fmt.Printf("Sequence mismatch for %q, folder %q, %d (key) != %d (FileInfo)\n", name, folder, sk.sequence, fi.Sequence)
|
||||
success = false
|
||||
}
|
||||
}
|
||||
|
||||
for nk := range needs {
|
||||
folder := folders[nk.folder]
|
||||
if folder == "" {
|
||||
fmt.Printf("Unknown folder ID %d for need entry %q\n", nk.folder, nk.name)
|
||||
success = false
|
||||
continue
|
||||
}
|
||||
|
||||
vl, ok := globals[nk]
|
||||
if !ok {
|
||||
fmt.Printf("Missing global for need entry %q, folder %q\n", nk.name, folder)
|
||||
success = false
|
||||
continue
|
||||
}
|
||||
|
||||
if !needsLocally(vl) {
|
||||
fmt.Printf("Need entry for file we don't need, %q, folder %q\n", nk.name, folder)
|
||||
success = false
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func needsLocally(vl db.VersionList) bool {
|
||||
var lv *protocol.Vector
|
||||
for _, fv := range vl.Versions {
|
||||
if bytes.Equal(fv.Device, protocol.LocalDeviceID[:]) {
|
||||
lv = &fv.Version
|
||||
break
|
||||
}
|
||||
}
|
||||
if lv == nil {
|
||||
return true // proviosinally, it looks like we need the file
|
||||
}
|
||||
return !lv.GreaterEqual(vl.Versions[0].Version)
|
||||
}
|
||||
@@ -21,7 +21,7 @@ func main() {
|
||||
log.SetFlags(0)
|
||||
log.SetOutput(os.Stdout)
|
||||
|
||||
flag.StringVar(&mode, "mode", "dump", "Mode of operation: dump, dumpsize")
|
||||
flag.StringVar(&mode, "mode", "dump", "Mode of operation: dump, dumpsize, idxck")
|
||||
|
||||
flag.Parse()
|
||||
|
||||
@@ -30,9 +30,7 @@ func main() {
|
||||
path = filepath.Join(defaultConfigDir(), "index-v0.14.0.db")
|
||||
}
|
||||
|
||||
fmt.Println("Path:", path)
|
||||
|
||||
ldb, err := db.Open(path)
|
||||
ldb, err := db.OpenRO(path)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
@@ -41,6 +39,10 @@ func main() {
|
||||
dump(ldb)
|
||||
} else if mode == "dumpsize" {
|
||||
dumpsize(ldb)
|
||||
} else if mode == "idxck" {
|
||||
if !idxck(ldb) {
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
fmt.Println("Unknown mode")
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/osutil"
|
||||
"github.com/syncthing/syncthing/lib/fs"
|
||||
)
|
||||
|
||||
func nulString(bs []byte) string {
|
||||
@@ -33,7 +33,7 @@ func defaultConfigDir() string {
|
||||
return filepath.Join(os.Getenv("AppData"), "Syncthing")
|
||||
|
||||
case "darwin":
|
||||
dir, err := osutil.ExpandTilde("~/Library/Application Support/Syncthing")
|
||||
dir, err := fs.ExpandTilde("~/Library/Application Support/Syncthing")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
@@ -43,7 +43,7 @@ func defaultConfigDir() string {
|
||||
if xdgCfg := os.Getenv("XDG_CONFIG_HOME"); xdgCfg != "" {
|
||||
return filepath.Join(xdgCfg, "syncthing")
|
||||
}
|
||||
dir, err := osutil.ExpandTilde("~/.config/syncthing")
|
||||
dir, err := fs.ExpandTilde("~/.config/syncthing")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
2
cmd/strelaypoolsrv/auto/.gitignore
vendored
2
cmd/strelaypoolsrv/auto/.gitignore
vendored
@@ -1 +1 @@
|
||||
gui.go
|
||||
gui.files.go
|
||||
|
||||
10
cmd/strelaypoolsrv/auto/doc.go
Normal file
10
cmd/strelaypoolsrv/auto/doc.go
Normal file
@@ -0,0 +1,10 @@
|
||||
// Copyright (C) 2018 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
//go:generate go run ../../../script/genassets.go -o gui.files.go ../gui
|
||||
|
||||
// Package auto contains auto generated files for web assets.
|
||||
package auto
|
||||
@@ -10,7 +10,7 @@
|
||||
|
||||
<title>Relay stats</title>
|
||||
<link href="//maxcdn.bootstrapcdn.com/bootstrap/3.3.5/css/bootstrap.min.css" rel="stylesheet"/>
|
||||
<link rel="stylesheet" href="//maxcdn.bootstrapcdn.com/font-awesome/4.6.1/css/font-awesome.min.css"/>
|
||||
<link rel="stylesheet" href="//use.fontawesome.com/releases/v5.0.13/css/all.css"/>
|
||||
|
||||
<style>
|
||||
#map {
|
||||
@@ -56,83 +56,83 @@
|
||||
<tr>
|
||||
<th rowspan="2">Address</td>
|
||||
<th rowspan="2">
|
||||
<a ng-click="sortType = 'status.numActiveSessions'; sortReverse = !sortReverse">
|
||||
<a ng-click="sortType = 'stats.numActiveSessions'; sortReverse = !sortReverse">
|
||||
Sessions
|
||||
<span ng-show="sortType == 'status.numActiveSessions' && !sortReverse" class="fa fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'status.numActiveSessions' && sortReverse" class="fa fa-caret-up"></span>
|
||||
<span ng-show="sortType == 'stats.numActiveSessions' && !sortReverse" class="fas fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'stats.numActiveSessions' && sortReverse" class="fas fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
<th rowspan="2">
|
||||
<a ng-click="sortType = 'status.numConnections'; sortReverse = !sortReverse">
|
||||
<a ng-click="sortType = 'stats.numConnections'; sortReverse = !sortReverse">
|
||||
Connections
|
||||
<span ng-show="sortType == 'status.numConnections' && !sortReverse" class="fa fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'status.numConnections' && sortReverse" class="fa fa-caret-up"></span>
|
||||
<span ng-show="sortType == 'stats.numConnections' && !sortReverse" class="fas fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'stats.numConnections' && sortReverse" class="fas fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
<th rowspan="2">
|
||||
<a ng-click="sortType = 'status.bytesProxied'; sortReverse = !sortReverse">
|
||||
<a ng-click="sortType = 'stats.bytesProxied'; sortReverse = !sortReverse">
|
||||
Data relayed
|
||||
<span ng-show="sortType == 'status.bytesProxied' && !sortReverse" class="fa fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'status.bytesProxied' && sortReverse" class="fa fa-caret-up"></span>
|
||||
<span ng-show="sortType == 'stats.bytesProxied' && !sortReverse" class="fas fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'stats.bytesProxied' && sortReverse" class="fas fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
<th colspan="6" class="text-center">Transfer rate in the last period</th>
|
||||
<th rowspan="2">
|
||||
<a ng-click="sortType = 'status.uptimeSeconds'; sortReverse = !sortReverse">
|
||||
<a ng-click="sortType = 'stats.uptimeSeconds'; sortReverse = !sortReverse">
|
||||
Uptime hours
|
||||
<span ng-show="sortType == 'status.uptimeSeconds' && !sortReverse" class="fa fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'status.uptimeSeconds' && sortReverse" class="fa fa-caret-up"></span>
|
||||
<span ng-show="sortType == 'stats.uptimeSeconds' && !sortReverse" class="fas fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'status.uptimeSeconds' && sortReverse" class="fas fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
<th rowspan="2">
|
||||
<a ng-click="sortType = 'status.options[\'provided-by\'] || \'\''; sortReverse = !sortReverse">
|
||||
<a ng-click="sortType = 'stats.options[\'provided-by\'] || \'\''; sortReverse = !sortReverse">
|
||||
Provided by
|
||||
<span ng-show="sortType == 'status.options[\'provided-by\'] || \'\'' && !sortReverse" class="fa fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'status.options[\'provided-by\'] || \'\'' && sortReverse" class="fa fa-caret-up"></span>
|
||||
<span ng-show="sortType == 'stats.options[\'provided-by\'] || \'\'' && !sortReverse" class="fas fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'stats.options[\'provided-by\'] || \'\'' && sortReverse" class="fas fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<th>
|
||||
<a ng-click="sortType = 'status.kbps10s1m5m15m30m60m[0]'; sortReverse = !sortReverse">
|
||||
<a ng-click="sortType = 'stats.kbps10s1m5m15m30m60m[0]'; sortReverse = !sortReverse">
|
||||
10s
|
||||
<span ng-show="sortType == 'status.kbps10s1m5m15m30m60m[0]' && !sortReverse" class="fa fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'status.kbps10s1m5m15m30m60m[0]' && sortReverse" class="fa fa-caret-up"></span>
|
||||
<span ng-show="sortType == 'stats.kbps10s1m5m15m30m60m[0]' && !sortReverse" class="fas fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'stats.kbps10s1m5m15m30m60m[0]' && sortReverse" class="fas fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
<th>
|
||||
<a ng-click="sortType = 'status.kbps10s1m5m15m30m60m[1]'; sortReverse = !sortReverse">
|
||||
<a ng-click="sortType = 'stats.kbps10s1m5m15m30m60m[1]'; sortReverse = !sortReverse">
|
||||
1m
|
||||
<span ng-show="sortType == 'status.kbps10s1m5m15m30m60m[1]' && !sortReverse" class="fa fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'status.kbps10s1m5m15m30m60m[1]' && sortReverse" class="fa fa-caret-up"></span>
|
||||
<span ng-show="sortType == 'stats.kbps10s1m5m15m30m60m[1]' && !sortReverse" class="fas fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'stats.kbps10s1m5m15m30m60m[1]' && sortReverse" class="fas fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
<th>
|
||||
<a ng-click="sortType = 'status.kbps10s1m5m15m30m60m[2]'; sortReverse = !sortReverse">
|
||||
<a ng-click="sortType = 'stats.kbps10s1m5m15m30m60m[2]'; sortReverse = !sortReverse">
|
||||
5m
|
||||
<span ng-show="sortType == 'status.kbps10s1m5m15m30m60m[2]' && !sortReverse" class="fa fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'status.kbps10s1m5m15m30m60m[2]' && sortReverse" class="fa fa-caret-up"></span>
|
||||
<span ng-show="sortType == 'stats.kbps10s1m5m15m30m60m[2]' && !sortReverse" class="fas fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'stats.kbps10s1m5m15m30m60m[2]' && sortReverse" class="fas fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
<th>
|
||||
<a ng-click="sortType = 'status.kbps10s1m5m15m30m60m[3]'; sortReverse = !sortReverse">
|
||||
<a ng-click="sortType = 'stats.kbps10s1m5m15m30m60m[3]'; sortReverse = !sortReverse">
|
||||
15m
|
||||
<span ng-show="sortType == 'status.kbps10s1m5m15m30m60m[3]' && !sortReverse" class="fa fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'status.kbps10s1m5m15m30m60m[3]' && sortReverse" class="fa fa-caret-up"></span>
|
||||
<span ng-show="sortType == 'stats.kbps10s1m5m15m30m60m[3]' && !sortReverse" class="fas fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'stats.kbps10s1m5m15m30m60m[3]' && sortReverse" class="fas fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
<th>
|
||||
<a ng-click="sortType = 'status.kbps10s1m5m15m30m60m[4]'; sortReverse = !sortReverse">
|
||||
<a ng-click="sortType = 'stats.kbps10s1m5m15m30m60m[4]'; sortReverse = !sortReverse">
|
||||
30m
|
||||
<span ng-show="sortType == 'status.kbps10s1m5m15m30m60m[4]' && !sortReverse" class="fa fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'status.kbps10s1m5m15m30m60m[4]' && sortReverse" class="fa fa-caret-up"></span>
|
||||
<span ng-show="sortType == 'stats.kbps10s1m5m15m30m60m[4]' && !sortReverse" class="fas fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'stats.kbps10s1m5m15m30m60m[4]' && sortReverse" class="fas fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
<th>
|
||||
<a ng-click="sortType = 'status.kbps10s1m5m15m30m60m[5]'; sortReverse = !sortReverse">
|
||||
<a ng-click="sortType = 'stats.kbps10s1m5m15m30m60m[5]'; sortReverse = !sortReverse">
|
||||
60m
|
||||
<span ng-show="sortType == 'status.kbps10s1m5m15m30m60m[5]' && !sortReverse" class="fa fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'status.kbps10s1m5m15m30m60m[5]' && sortReverse" class="fa fa-caret-up"></span>
|
||||
<span ng-show="sortType == 'stats.kbps10s1m5m15m30m60m[5]' && !sortReverse" class="fas fa-caret-down"></span>
|
||||
<span ng-show="sortType == 'stats.kbps10s1m5m15m30m60m[5]' && sortReverse" class="fas fa-caret-up"></span>
|
||||
</a>
|
||||
</th>
|
||||
</tr>
|
||||
@@ -140,21 +140,21 @@
|
||||
<tbody>
|
||||
<tr ng-repeat="relay in relays | orderBy:sortType:sortReverse:sortCompare" ng-mouseover="relay.showMarker()" ng-mouseleave="relay.hideMarker()">
|
||||
<td>{{ relay.address }}</td>
|
||||
<td ng-if="relay.status === undefined" colspan="11" class="text-center">Looking up...</td>
|
||||
<td ng-if-start="relay.status !== undefined">{{ relay.status.numActiveSessions }}</td>
|
||||
<td>{{ relay.status.numConnections }}</td>
|
||||
<td>{{ relay.status.bytesProxied | bytes }}</td>
|
||||
<td>{{ relay.status.kbps10s1m5m15m30m60m[0] * 128 | bytes }}/s</td>
|
||||
<td>{{ relay.status.kbps10s1m5m15m30m60m[1] * 128 | bytes }}/s</td>
|
||||
<td>{{ relay.status.kbps10s1m5m15m30m60m[2] * 128 | bytes }}/s</td>
|
||||
<td>{{ relay.status.kbps10s1m5m15m30m60m[3] * 128 | bytes }}/s</td>
|
||||
<td>{{ relay.status.kbps10s1m5m15m30m60m[4] * 128 | bytes }}/s</td>
|
||||
<td>{{ relay.status.kbps10s1m5m15m30m60m[5] * 128 | bytes }}/s</td>
|
||||
<td ng-if="relay.status.uptimeSeconds != undefined">{{ relay.status.uptimeSeconds/60/60 | number:0 }}</td>
|
||||
<td ng-if="relay.status.uptimeSeconds == undefined"></td>
|
||||
<td title="{{ relay.status.options['provided-by'] || '' }}" ng-if-end>
|
||||
{{ relay.status.options['provided-by'] || '' | limitTo:50 }}
|
||||
<span ng-if="(relay.status.options['provided-by'] || '').length > 50">…
|
||||
<td ng-if="!relay.stats" colspan="11"></td>
|
||||
<td ng-if-start="relay.stats">{{ relay.stats.numActiveSessions }}</td>
|
||||
<td>{{ relay.stats.numConnections }}</td>
|
||||
<td>{{ relay.stats.bytesProxied | bytes }}</td>
|
||||
<td>{{ relay.stats.kbps10s1m5m15m30m60m[0] * 128 | bytes }}/s</td>
|
||||
<td>{{ relay.stats.kbps10s1m5m15m30m60m[1] * 128 | bytes }}/s</td>
|
||||
<td>{{ relay.stats.kbps10s1m5m15m30m60m[2] * 128 | bytes }}/s</td>
|
||||
<td>{{ relay.stats.kbps10s1m5m15m30m60m[3] * 128 | bytes }}/s</td>
|
||||
<td>{{ relay.stats.kbps10s1m5m15m30m60m[4] * 128 | bytes }}/s</td>
|
||||
<td>{{ relay.stats.kbps10s1m5m15m30m60m[5] * 128 | bytes }}/s</td>
|
||||
<td ng-if="relay.stats.uptimeSeconds != undefined">{{ relay.stats.uptimeSeconds/60/60 | number:0 }}</td>
|
||||
<td ng-if="relay.stats.uptimeSeconds == undefined"></td>
|
||||
<td title="{{ relay.stats.options['provided-by'] || '' }}" ng-if-end>
|
||||
{{ relay.stats.options['provided-by'] || '' | limitTo:50 }}
|
||||
<span ng-if="(relay.stats.options['provided-by'] || '').length > 50">…
|
||||
</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
@@ -187,15 +187,15 @@
|
||||
<script type="text/javascript" src="//code.jquery.com/jquery-2.1.4.min.js"></script>
|
||||
<script type="text/javascript" src="//cdnjs.cloudflare.com/ajax/libs/angular.js/1.5.8/angular.min.js"></script>
|
||||
<script type="text/javascript" src="//maxcdn.bootstrapcdn.com/bootstrap/3.3.5/js/bootstrap.min.js"></script>
|
||||
<script type="text/javascript" src="//maps.googleapis.com/maps/api/js"></script>
|
||||
<script type="text/javascript" src="//maps.googleapis.com/maps/api/js?key=AIzaSyDk5WJ8s7ueLKb99X5DbQ-vkWtPDAKqYs0"></script>
|
||||
</body>
|
||||
|
||||
<script>
|
||||
angular.module('syncthing', [
|
||||
])
|
||||
.config(function($httpProvider) {
|
||||
.config(['$httpProvider', function($httpProvider) {
|
||||
$httpProvider.defaults.timeout = 5000;
|
||||
})
|
||||
}])
|
||||
.filter('bytes', function() {
|
||||
return function(bytes, precision) {
|
||||
if (isNaN(parseFloat(bytes)) || !isFinite(bytes)) return '-';
|
||||
@@ -235,16 +235,16 @@
|
||||
$scope.mapBounds = new google.maps.LatLngBounds();
|
||||
$scope.tooltipTemplate = $('#infoTemplate').html();
|
||||
$scope.usedLocations = {};
|
||||
$scope.sortType = 'status.numActiveSessions';
|
||||
$scope.sortType = 'stats.numActiveSessions';
|
||||
$scope.sortReverse = true;
|
||||
$scope.sortCompare = function(a, b) {
|
||||
if (a.value == b.value) {
|
||||
return 0;
|
||||
}
|
||||
if (a.type == "undefined") {
|
||||
if (a.type == "undefined" || a.type == "null") {
|
||||
return -1;
|
||||
}
|
||||
if (b.type == "undefined") {
|
||||
if (b.type == "undefined" || b.type == "null") {
|
||||
return 1;
|
||||
}
|
||||
return a.value > b.value ? 1 : -1;
|
||||
@@ -252,25 +252,31 @@
|
||||
|
||||
$http.get("/endpoint").then(function(response) {
|
||||
$scope.relays = response.data.relays;
|
||||
var promises = [];
|
||||
angular.forEach($scope.relays, function(relay) {
|
||||
|
||||
angular.forEach($scope.relays, function(relay) {
|
||||
relay.uri = constructURI(relay.url);
|
||||
relay.address = relay.url.split('/')[2];
|
||||
|
||||
addMarkerToMap(relay);
|
||||
|
||||
promises.push(getRelayStatus(relay));
|
||||
if (relay.stats) {
|
||||
angular.forEach($scope.totals, function(value, key) {
|
||||
if (typeof $scope.totals[key] == 'number') {
|
||||
$scope.totals[key] += relay.stats[key];
|
||||
} else if (typeof $scope.totals[key] == 'object' && $scope.totals[key] instanceof Array) {
|
||||
angular.forEach($scope.totals[key], function(value, index) {
|
||||
$scope.totals[key][index] += relay.stats[key][index];
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Can only add circles once we know the totals for transfers, which means
|
||||
// we need to resolve all statuses.
|
||||
$q.all(promises).then(function() {
|
||||
angular.forEach($scope.relays, function(relay) {
|
||||
if (relay.status) {
|
||||
addCircleToMap(relay);
|
||||
}
|
||||
});
|
||||
// After the totals were calculated, add circles.
|
||||
angular.forEach($scope.relays, function(relay) {
|
||||
if (relay.stats) {
|
||||
addCircleToMap(relay);
|
||||
}
|
||||
});
|
||||
|
||||
$scope.map.fitBounds($scope.mapBounds);
|
||||
@@ -330,41 +336,10 @@
|
||||
fillOpacity: 0.35,
|
||||
map: $scope.map,
|
||||
center: relay.marker.position,
|
||||
radius: ((relay.status.bytesProxied * 100) / $scope.totals.bytesProxied) * 10000
|
||||
radius: ((relay.stats.bytesProxied * 100) / $scope.totals.bytesProxied) * 10000
|
||||
});
|
||||
}
|
||||
|
||||
function getRelayStatus(relay) {
|
||||
// Normal timeout doesn't deal with relays which accept the TCP connection
|
||||
// but don't respond (some firewalls do that), so deal with it this way.
|
||||
var timeoutRequest = $q.defer();
|
||||
var resolveStatus = $q.defer();
|
||||
|
||||
|
||||
$http.get("http://" + relay.uri.hostname + ':' + ((relay.uri.args.statusAddr && relay.uri.args.statusAddr.split(':')[1]) || "22070") + "/status", { timeout: timeoutRequest.promise }).then(function (response) {
|
||||
relay.status = response.data;
|
||||
resolveStatus.resolve();
|
||||
angular.forEach($scope.totals, function(value, key) {
|
||||
if (typeof $scope.totals[key] == 'number') {
|
||||
$scope.totals[key] += response.data[key];
|
||||
} else if (typeof $scope.totals[key] == 'object' && $scope.totals[key] instanceof Array) {
|
||||
angular.forEach($scope.totals[key], function(value, index) {
|
||||
$scope.totals[key][index] += response.data[key][index];
|
||||
});
|
||||
}
|
||||
});
|
||||
}, function() {
|
||||
relay.status = null;
|
||||
resolveStatus.resolve();
|
||||
});
|
||||
|
||||
$timeout(function() {
|
||||
timeoutRequest.resolve();
|
||||
}, 5000);
|
||||
|
||||
return resolveStatus.promise;
|
||||
}
|
||||
|
||||
function constructURI(url) {
|
||||
var uri = document.createElement('a');
|
||||
|
||||
@@ -385,25 +360,25 @@
|
||||
|
||||
<script type="text/template" id="infoTemplate">
|
||||
<div>
|
||||
<p><b>{{ relay.uri.hostname }}</b> <span ng-if="relay.status.options['provided-by']">provided by <u>{{ relay.status.options['provided-by'] }}</u></span></p>
|
||||
<div ng-if="relay.status">
|
||||
<span ng-if="relay.status.startTime">Start time: {{ relay.status.startTime | date:"medium" }}</br></span>
|
||||
<span ng-if="relay.status.bytesProxied != undefined">Proxied: {{ relay.status.bytesProxied | bytes }}</br></span>
|
||||
<span ng-if="relay.status.numActiveSessions != undefined">Sessions: {{ relay.status.numActiveSessions }}</br></span>
|
||||
<span ng-if="relay.status.numConnections != undefined">Clients: {{ relay.status.numConnections }}</br></span>
|
||||
<span ng-if="relay.status.options.pools">Pools: {{ relay.status.options.pools.join(', ') }}</br></span>
|
||||
<span ng-if="relay.status.options['global-rate'] != undefined">
|
||||
<span ng-if="relay.status.options['global-rate'] > 0">Global rate limit: {{ relay.status.options['global-rate'] | bytes }}/s</span>
|
||||
<span ng-if="relay.status.options['global-rate'] == 0">Global rate limit: unlimited</span>
|
||||
<p><b>{{ relay.uri.hostname }}</b> <span ng-if="relay.stats.options['provided-by']">provided by <u>{{ relay.stats.options['provided-by'] }}</u></span></p>
|
||||
<div ng-if="relay.stats">
|
||||
<span ng-if="relay.stats.startTime">Start time: {{ relay.stats.startTime | date:"medium" }}</br></span>
|
||||
<span ng-if="relay.stats.bytesProxied != undefined">Proxied: {{ relay.stats.bytesProxied | bytes }}</br></span>
|
||||
<span ng-if="relay.stats.numActiveSessions != undefined">Sessions: {{ relay.stats.numActiveSessions }}</br></span>
|
||||
<span ng-if="relay.stats.numConnections != undefined">Clients: {{ relay.stats.numConnections }}</br></span>
|
||||
<span ng-if="relay.stats.options.pools">Pools: {{ relay.stats.options.pools.join(', ') }}</br></span>
|
||||
<span ng-if="relay.stats.options['global-rate'] != undefined">
|
||||
<span ng-if="relay.stats.options['global-rate'] > 0">Global rate limit: {{ relay.stats.options['global-rate'] | bytes }}/s</span>
|
||||
<span ng-if="relay.stats.options['global-rate'] == 0">Global rate limit: unlimited</span>
|
||||
<br/>
|
||||
</span>
|
||||
<span ng-if="relay.status.options['per-session-rate'] != undefined">
|
||||
<span ng-if="relay.status.options['per-session-rate'] > 0">Session rate limit: {{ relay.status.options['per-session-rate'] | bytes }}/s</span>
|
||||
<span ng-if="relay.status.options['per-session-rate'] == 0">Session rate limit: unlimited</span>
|
||||
<span ng-if="relay.stats.options['per-session-rate'] != undefined">
|
||||
<span ng-if="relay.stats.options['per-session-rate'] > 0">Session rate limit: {{ relay.stats.options['per-session-rate'] | bytes }}/s</span>
|
||||
<span ng-if="relay.stats.options['per-session-rate'] == 0">Session rate limit: unlimited</span>
|
||||
<br/>
|
||||
</span>
|
||||
</div>
|
||||
<div ng-if="!relay.status">
|
||||
<div ng-if="!relay.stats">
|
||||
Data unavailable.
|
||||
<div>
|
||||
</div>
|
||||
|
||||
@@ -13,18 +13,22 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"mime"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/groupcache/lru"
|
||||
"github.com/oschwald/geoip2-golang"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/syncthing/syncthing/cmd/strelaypoolsrv/auto"
|
||||
"github.com/syncthing/syncthing/lib/rand"
|
||||
"github.com/syncthing/syncthing/lib/relay/client"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
@@ -34,12 +38,42 @@ import (
|
||||
type location struct {
|
||||
Latitude float64 `json:"latitude"`
|
||||
Longitude float64 `json:"longitude"`
|
||||
City string `json:"city"`
|
||||
Country string `json:"country"`
|
||||
Continent string `json:"continent"`
|
||||
}
|
||||
|
||||
type relay struct {
|
||||
URL string `json:"url"`
|
||||
Location location `json:"location"`
|
||||
uri *url.URL
|
||||
URL string `json:"url"`
|
||||
Location location `json:"location"`
|
||||
uri *url.URL
|
||||
Stats *stats `json:"stats"`
|
||||
StatsRetrieved time.Time `json:"statsRetrieved"`
|
||||
}
|
||||
|
||||
type stats struct {
|
||||
StartTime time.Time `json:"startTime"`
|
||||
UptimeSeconds int `json:"uptimeSeconds"`
|
||||
PendingSessionKeys int `json:"numPendingSessionKeys"`
|
||||
ActiveSessions int `json:"numActiveSessions"`
|
||||
Connections int `json:"numConnections"`
|
||||
Proxies int `json:"numProxies"`
|
||||
BytesProxied int `json:"bytesProxied"`
|
||||
GoVersion string `json:"goVersion"`
|
||||
GoOS string `json:"goOS"`
|
||||
GoArch string `json:"goArch"`
|
||||
GoMaxProcs int `json:"goMaxProcs"`
|
||||
GoRoutines int `json:"goNumRoutine"`
|
||||
Rates []int64 `json:"kbps10s1m5m15m30m60m"`
|
||||
Options struct {
|
||||
NetworkTimeout int `json:"network-timeout"`
|
||||
PintInterval int `json:"ping-interval"`
|
||||
MessageTimeout int `json:"message-timeout"`
|
||||
SessionRate int `json:"per-session-rate"`
|
||||
GlobalRate int `json:"global-rate"`
|
||||
Pools []string `json:"pools"`
|
||||
ProvidedBy string `json:"provided-by"`
|
||||
} `json:"options"`
|
||||
}
|
||||
|
||||
func (r relay) String() string {
|
||||
@@ -47,9 +81,9 @@ func (r relay) String() string {
|
||||
}
|
||||
|
||||
type request struct {
|
||||
relay relay
|
||||
uri *url.URL
|
||||
result chan result
|
||||
relay *relay
|
||||
result chan result
|
||||
queueTimer *prometheus.Timer
|
||||
}
|
||||
|
||||
type result struct {
|
||||
@@ -58,23 +92,25 @@ type result struct {
|
||||
}
|
||||
|
||||
var (
|
||||
testCert tls.Certificate
|
||||
listen = ":80"
|
||||
dir string
|
||||
evictionTime = time.Hour
|
||||
debug bool
|
||||
getLRUSize = 10 << 10
|
||||
getLimitBurst = 10
|
||||
getLimitAvg = 1
|
||||
postLRUSize = 1 << 10
|
||||
postLimitBurst = 2
|
||||
postLimitAvg = 1
|
||||
getLimit time.Duration
|
||||
postLimit time.Duration
|
||||
permRelaysFile string
|
||||
ipHeader string
|
||||
geoipPath string
|
||||
proto string
|
||||
testCert tls.Certificate
|
||||
knownRelaysFile = filepath.Join(os.TempDir(), "strelaypoolsrv_known_relays")
|
||||
listen = ":80"
|
||||
dir string
|
||||
evictionTime = time.Hour
|
||||
debug bool
|
||||
getLRUSize = 10 << 10
|
||||
getLimitBurst = 10
|
||||
getLimitAvg = 2
|
||||
postLRUSize = 1 << 10
|
||||
postLimitBurst = 2
|
||||
postLimitAvg = 2
|
||||
getLimit time.Duration
|
||||
postLimit time.Duration
|
||||
permRelaysFile string
|
||||
ipHeader string
|
||||
geoipPath string
|
||||
proto string
|
||||
statsRefresh = time.Minute / 2
|
||||
|
||||
getMut = sync.NewRWMutex()
|
||||
getLRUCache *lru.Cache
|
||||
@@ -85,26 +121,31 @@ var (
|
||||
requests = make(chan request, 10)
|
||||
|
||||
mut = sync.NewRWMutex()
|
||||
knownRelays = make([]relay, 0)
|
||||
permanentRelays = make([]relay, 0)
|
||||
knownRelays = make([]*relay, 0)
|
||||
permanentRelays = make([]*relay, 0)
|
||||
evictionTimers = make(map[string]*time.Timer)
|
||||
)
|
||||
|
||||
const (
|
||||
httpStatusEnhanceYourCalm = 429
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.StringVar(&listen, "listen", listen, "Listen address")
|
||||
flag.StringVar(&dir, "keys", dir, "Directory where http-cert.pem and http-key.pem is stored for TLS listening")
|
||||
flag.BoolVar(&debug, "debug", debug, "Enable debug output")
|
||||
flag.DurationVar(&evictionTime, "eviction", evictionTime, "After how long the relay is evicted")
|
||||
flag.IntVar(&getLRUSize, "get-limit-cache", getLRUSize, "Get request limiter cache size")
|
||||
flag.IntVar(&getLimitAvg, "get-limit-avg", 2, "Allowed average get request rate, per 10 s")
|
||||
flag.IntVar(&getLimitAvg, "get-limit-avg", getLimitAvg, "Allowed average get request rate, per 10 s")
|
||||
flag.IntVar(&getLimitBurst, "get-limit-burst", getLimitBurst, "Allowed burst get requests")
|
||||
flag.IntVar(&postLRUSize, "post-limit-cache", postLRUSize, "Post request limiter cache size")
|
||||
flag.IntVar(&postLimitAvg, "post-limit-avg", 2, "Allowed average post request rate, per minute")
|
||||
flag.IntVar(&postLimitAvg, "post-limit-avg", postLimitAvg, "Allowed average post request rate, per minute")
|
||||
flag.IntVar(&postLimitBurst, "post-limit-burst", postLimitBurst, "Allowed burst post requests")
|
||||
flag.StringVar(&permRelaysFile, "perm-relays", "", "Path to list of permanent relays")
|
||||
flag.StringVar(&ipHeader, "ip-header", "", "Name of header which holds clients ip:port. Only meaningful when running behind a reverse proxy.")
|
||||
flag.StringVar(&geoipPath, "geoip", "GeoLite2-City.mmdb", "Path to GeoLite2-City database")
|
||||
flag.StringVar(&proto, "protocol", "tcp", "Protocol used for listening. 'tcp' for IPv4 and IPv6, 'tcp4' for IPv4, 'tcp6' for IPv6")
|
||||
flag.DurationVar(&statsRefresh, "stats-refresh", statsRefresh, "Interval at which to refresh relay stats")
|
||||
|
||||
flag.Parse()
|
||||
|
||||
@@ -118,13 +159,31 @@ func main() {
|
||||
var err error
|
||||
|
||||
if permRelaysFile != "" {
|
||||
loadPermanentRelays(permRelaysFile)
|
||||
permanentRelays = loadRelays(permRelaysFile)
|
||||
}
|
||||
|
||||
testCert = createTestCertificate()
|
||||
|
||||
go requestProcessor()
|
||||
|
||||
// Load relays from cache in the background.
|
||||
// Load them in a serial fashion to make sure any genuine requests
|
||||
// are not dropped.
|
||||
go func() {
|
||||
for _, relay := range loadRelays(knownRelaysFile) {
|
||||
resultChan := make(chan result)
|
||||
requests <- request{relay, resultChan, nil}
|
||||
result := <-resultChan
|
||||
if result.err != nil {
|
||||
relayTestsTotal.WithLabelValues("failed").Inc()
|
||||
} else {
|
||||
relayTestsTotal.WithLabelValues("success").Inc()
|
||||
}
|
||||
}
|
||||
// Run the the stats refresher once the relays are loaded.
|
||||
statsRefresher(statsRefresh)
|
||||
}()
|
||||
|
||||
if dir != "" {
|
||||
if debug {
|
||||
log.Println("Starting TLS listener on", listen)
|
||||
@@ -169,6 +228,7 @@ func main() {
|
||||
handler := http.NewServeMux()
|
||||
handler.HandleFunc("/", handleAssets)
|
||||
handler.HandleFunc("/endpoint", handleRequest)
|
||||
handler.HandleFunc("/metrics", handleMetrics)
|
||||
|
||||
srv := http.Server{
|
||||
Handler: handler,
|
||||
@@ -181,7 +241,18 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
func handleMetrics(w http.ResponseWriter, r *http.Request) {
|
||||
timer := prometheus.NewTimer(metricsRequestsSeconds)
|
||||
// Acquire the mutex just to make sure we're not caught mid-way stats collection
|
||||
mut.RLock()
|
||||
promhttp.Handler().ServeHTTP(w, r)
|
||||
mut.RUnlock()
|
||||
timer.ObserveDuration()
|
||||
}
|
||||
|
||||
func handleAssets(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Cache-Control", "no-cache, must-revalidate")
|
||||
|
||||
assets := auto.Assets()
|
||||
path := r.URL.Path[1:]
|
||||
if path == "" {
|
||||
@@ -194,11 +265,28 @@ func handleAssets(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
etag := fmt.Sprintf("%d", auto.Generated)
|
||||
modified := time.Unix(auto.Generated, 0).UTC()
|
||||
|
||||
w.Header().Set("Last-Modified", modified.Format(http.TimeFormat))
|
||||
w.Header().Set("Etag", etag)
|
||||
|
||||
mtype := mimeTypeForFile(path)
|
||||
if len(mtype) != 0 {
|
||||
w.Header().Set("Content-Type", mtype)
|
||||
}
|
||||
|
||||
if t, err := time.Parse(http.TimeFormat, r.Header.Get("If-Modified-Since")); err == nil && modified.Add(time.Second).After(t) {
|
||||
w.WriteHeader(http.StatusNotModified)
|
||||
return
|
||||
}
|
||||
|
||||
if match := r.Header.Get("If-None-Match"); match != "" {
|
||||
if strings.Contains(match, etag) {
|
||||
w.WriteHeader(http.StatusNotModified)
|
||||
return
|
||||
}
|
||||
}
|
||||
if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
|
||||
w.Header().Set("Content-Encoding", "gzip")
|
||||
} else {
|
||||
@@ -241,6 +329,15 @@ func mimeTypeForFile(file string) string {
|
||||
}
|
||||
|
||||
func handleRequest(w http.ResponseWriter, r *http.Request) {
|
||||
timer := prometheus.NewTimer(apiRequestsSeconds.WithLabelValues(r.Method))
|
||||
|
||||
lw := NewLoggingResponseWriter(w)
|
||||
|
||||
defer func() {
|
||||
timer.ObserveDuration()
|
||||
apiRequestsTotal.WithLabelValues(r.Method, strconv.Itoa(lw.statusCode)).Inc()
|
||||
}()
|
||||
|
||||
if ipHeader != "" {
|
||||
r.RemoteAddr = r.Header.Get(ipHeader)
|
||||
}
|
||||
@@ -248,13 +345,13 @@ func handleRequest(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.Method {
|
||||
case "GET":
|
||||
if limit(r.RemoteAddr, getLRUCache, getMut, getLimit, getLimitBurst) {
|
||||
w.WriteHeader(429)
|
||||
w.WriteHeader(httpStatusEnhanceYourCalm)
|
||||
return
|
||||
}
|
||||
handleGetRequest(w, r)
|
||||
case "POST":
|
||||
if limit(r.RemoteAddr, postLRUCache, postMut, postLimit, postLimitBurst) {
|
||||
w.WriteHeader(429)
|
||||
w.WriteHeader(httpStatusEnhanceYourCalm)
|
||||
return
|
||||
}
|
||||
handlePostRequest(w, r)
|
||||
@@ -273,12 +370,9 @@ func handleGetRequest(w http.ResponseWriter, r *http.Request) {
|
||||
mut.RUnlock()
|
||||
|
||||
// Shuffle
|
||||
for i := range relays {
|
||||
j := rand.Intn(i + 1)
|
||||
relays[i], relays[j] = relays[j], relays[i]
|
||||
}
|
||||
rand.Shuffle(relays)
|
||||
|
||||
json.NewEncoder(w).Encode(map[string][]relay{
|
||||
json.NewEncoder(w).Encode(map[string][]*relay{
|
||||
"relays": relays,
|
||||
})
|
||||
}
|
||||
@@ -315,13 +409,9 @@ func handlePostRequest(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// Get the IP address of the client
|
||||
rhost, _, err := net.SplitHostPort(r.RemoteAddr)
|
||||
if err != nil {
|
||||
if debug {
|
||||
log.Println("Failed to split remote address", r.RemoteAddr)
|
||||
}
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
rhost := r.RemoteAddr
|
||||
if host, _, err := net.SplitHostPort(rhost); err == nil {
|
||||
rhost = host
|
||||
}
|
||||
|
||||
ip := net.ParseIP(host)
|
||||
@@ -333,18 +423,18 @@ func handlePostRequest(w http.ResponseWriter, r *http.Request) {
|
||||
if debug {
|
||||
log.Println("IP address advertised does not match client IP address", r.RemoteAddr, uri)
|
||||
}
|
||||
http.Error(w, "IP address does not match client IP", http.StatusUnauthorized)
|
||||
http.Error(w, fmt.Sprintf("IP advertised %s does not match client IP %s", host, rhost), http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
newRelay.uri = uri
|
||||
newRelay.Location = getLocation(uri.Host)
|
||||
|
||||
for _, current := range permanentRelays {
|
||||
if current.uri.Host == newRelay.uri.Host {
|
||||
if debug {
|
||||
log.Println("Asked to add a relay", newRelay, "which exists in permanent list")
|
||||
}
|
||||
http.Error(w, "Invalid request", 500)
|
||||
http.Error(w, "Invalid request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -352,78 +442,105 @@ func handlePostRequest(w http.ResponseWriter, r *http.Request) {
|
||||
reschan := make(chan result)
|
||||
|
||||
select {
|
||||
case requests <- request{newRelay, uri, reschan}:
|
||||
case requests <- request{&newRelay, reschan, prometheus.NewTimer(relayTestActionsSeconds.WithLabelValues("queue"))}:
|
||||
result := <-reschan
|
||||
if result.err != nil {
|
||||
http.Error(w, result.err.Error(), 500)
|
||||
relayTestsTotal.WithLabelValues("failed").Inc()
|
||||
http.Error(w, result.err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
relayTestsTotal.WithLabelValues("success").Inc()
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
json.NewEncoder(w).Encode(map[string]time.Duration{
|
||||
"evictionIn": result.eviction,
|
||||
})
|
||||
|
||||
default:
|
||||
relayTestsTotal.WithLabelValues("dropped").Inc()
|
||||
if debug {
|
||||
log.Println("Dropping request")
|
||||
}
|
||||
w.WriteHeader(429)
|
||||
w.WriteHeader(httpStatusEnhanceYourCalm)
|
||||
}
|
||||
}
|
||||
|
||||
func requestProcessor() {
|
||||
for request := range requests {
|
||||
if debug {
|
||||
log.Println("Request for", request.relay)
|
||||
}
|
||||
if !client.TestRelay(request.uri, []tls.Certificate{testCert}, time.Second, 2*time.Second, 3) {
|
||||
if debug {
|
||||
log.Println("Test for relay", request.relay, "failed")
|
||||
}
|
||||
request.result <- result{fmt.Errorf("test failed"), 0}
|
||||
continue
|
||||
if request.queueTimer != nil {
|
||||
request.queueTimer.ObserveDuration()
|
||||
}
|
||||
|
||||
mut.Lock()
|
||||
timer, ok := evictionTimers[request.relay.uri.Host]
|
||||
if ok {
|
||||
if debug {
|
||||
log.Println("Stopping existing timer for", request.relay)
|
||||
}
|
||||
timer.Stop()
|
||||
}
|
||||
|
||||
for i, current := range knownRelays {
|
||||
if current.uri.Host == request.relay.uri.Host {
|
||||
if debug {
|
||||
log.Println("Relay", request.relay, "already exists")
|
||||
}
|
||||
|
||||
// Evict the old entry anyway, as configuration might have changed.
|
||||
last := len(knownRelays) - 1
|
||||
knownRelays[i] = knownRelays[last]
|
||||
knownRelays = knownRelays[:last]
|
||||
|
||||
goto found
|
||||
}
|
||||
}
|
||||
|
||||
if debug {
|
||||
log.Println("Adding new relay", request.relay)
|
||||
}
|
||||
|
||||
found:
|
||||
|
||||
knownRelays = append(knownRelays, request.relay)
|
||||
|
||||
evictionTimers[request.relay.uri.Host] = time.AfterFunc(evictionTime, evict(request.relay))
|
||||
mut.Unlock()
|
||||
request.result <- result{nil, evictionTime}
|
||||
timer := prometheus.NewTimer(relayTestActionsSeconds.WithLabelValues("test"))
|
||||
handleRelayTest(request)
|
||||
timer.ObserveDuration()
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func evict(relay relay) func() {
|
||||
func handleRelayTest(request request) {
|
||||
if debug {
|
||||
log.Println("Request for", request.relay)
|
||||
}
|
||||
if !client.TestRelay(request.relay.uri, []tls.Certificate{testCert}, time.Second, 2*time.Second, 3) {
|
||||
if debug {
|
||||
log.Println("Test for relay", request.relay, "failed")
|
||||
}
|
||||
request.result <- result{fmt.Errorf("connection test failed"), 0}
|
||||
return
|
||||
}
|
||||
|
||||
stats := fetchStats(request.relay)
|
||||
location := getLocation(request.relay.uri.Host)
|
||||
|
||||
mut.Lock()
|
||||
if stats != nil {
|
||||
updateMetrics(request.relay.uri.Host, *stats, location)
|
||||
}
|
||||
request.relay.Stats = stats
|
||||
request.relay.StatsRetrieved = time.Now()
|
||||
request.relay.Location = location
|
||||
|
||||
timer, ok := evictionTimers[request.relay.uri.Host]
|
||||
if ok {
|
||||
if debug {
|
||||
log.Println("Stopping existing timer for", request.relay)
|
||||
}
|
||||
timer.Stop()
|
||||
}
|
||||
|
||||
for i, current := range knownRelays {
|
||||
if current.uri.Host == request.relay.uri.Host {
|
||||
if debug {
|
||||
log.Println("Relay", request.relay, "already exists")
|
||||
}
|
||||
|
||||
// Evict the old entry anyway, as configuration might have changed.
|
||||
last := len(knownRelays) - 1
|
||||
knownRelays[i] = knownRelays[last]
|
||||
knownRelays = knownRelays[:last]
|
||||
|
||||
goto found
|
||||
}
|
||||
}
|
||||
|
||||
if debug {
|
||||
log.Println("Adding new relay", request.relay)
|
||||
}
|
||||
|
||||
found:
|
||||
|
||||
knownRelays = append(knownRelays, request.relay)
|
||||
evictionTimers[request.relay.uri.Host] = time.AfterFunc(evictionTime, evict(request.relay))
|
||||
|
||||
mut.Unlock()
|
||||
|
||||
if err := saveRelays(knownRelaysFile, knownRelays); err != nil {
|
||||
log.Println("Failed to write known relays: " + err.Error())
|
||||
}
|
||||
|
||||
request.result <- result{nil, evictionTime}
|
||||
}
|
||||
|
||||
func evict(relay *relay) func() {
|
||||
return func() {
|
||||
mut.Lock()
|
||||
defer mut.Unlock()
|
||||
@@ -438,6 +555,7 @@ func evict(relay relay) func() {
|
||||
last := len(knownRelays) - 1
|
||||
knownRelays[i] = knownRelays[last]
|
||||
knownRelays = knownRelays[:last]
|
||||
deleteMetrics(current.uri.Host)
|
||||
}
|
||||
}
|
||||
delete(evictionTimers, relay.uri.Host)
|
||||
@@ -445,13 +563,12 @@ func evict(relay relay) func() {
|
||||
}
|
||||
|
||||
func limit(addr string, cache *lru.Cache, lock sync.RWMutex, intv time.Duration, burst int) bool {
|
||||
host, _, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
return false
|
||||
if host, _, err := net.SplitHostPort(addr); err == nil {
|
||||
addr = host
|
||||
}
|
||||
|
||||
lock.RLock()
|
||||
bkt, ok := cache.Get(host)
|
||||
bkt, ok := cache.Get(addr)
|
||||
lock.RUnlock()
|
||||
if ok {
|
||||
bkt := bkt.(*rate.Limiter)
|
||||
@@ -461,18 +578,20 @@ func limit(addr string, cache *lru.Cache, lock sync.RWMutex, intv time.Duration,
|
||||
}
|
||||
} else {
|
||||
lock.Lock()
|
||||
cache.Add(host, rate.NewLimiter(rate.Every(intv), burst))
|
||||
cache.Add(addr, rate.NewLimiter(rate.Every(intv), burst))
|
||||
lock.Unlock()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func loadPermanentRelays(file string) {
|
||||
func loadRelays(file string) []*relay {
|
||||
content, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
log.Println("Failed to load relays: " + err.Error())
|
||||
return nil
|
||||
}
|
||||
|
||||
var relays []*relay
|
||||
for _, line := range strings.Split(string(content), "\n") {
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
@@ -481,21 +600,30 @@ func loadPermanentRelays(file string) {
|
||||
uri, err := url.Parse(line)
|
||||
if err != nil {
|
||||
if debug {
|
||||
log.Println("Skipping permanent relay", line, "due to parse error", err)
|
||||
log.Println("Skipping relay", line, "due to parse error", err)
|
||||
}
|
||||
continue
|
||||
|
||||
}
|
||||
|
||||
permanentRelays = append(permanentRelays, relay{
|
||||
relays = append(relays, &relay{
|
||||
URL: line,
|
||||
Location: getLocation(uri.Host),
|
||||
uri: uri,
|
||||
})
|
||||
if debug {
|
||||
log.Println("Adding permanent relay", line)
|
||||
log.Println("Adding relay", line)
|
||||
}
|
||||
}
|
||||
return relays
|
||||
}
|
||||
|
||||
func saveRelays(file string, relays []*relay) error {
|
||||
var content string
|
||||
for _, relay := range relays {
|
||||
content += relay.uri.String() + "\n"
|
||||
}
|
||||
return ioutil.WriteFile(file, []byte(content), 0777)
|
||||
}
|
||||
|
||||
func createTestCertificate() tls.Certificate {
|
||||
@@ -505,7 +633,7 @@ func createTestCertificate() tls.Certificate {
|
||||
}
|
||||
|
||||
certFile, keyFile := filepath.Join(tmpDir, "cert.pem"), filepath.Join(tmpDir, "key.pem")
|
||||
cert, err := tlsutil.NewCertificate(certFile, keyFile, "relaypoolsrv", 3072)
|
||||
cert, err := tlsutil.NewCertificate(certFile, keyFile, "relaypoolsrv", 20*365)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to create test X509 key pair:", err)
|
||||
}
|
||||
@@ -514,6 +642,8 @@ func createTestCertificate() tls.Certificate {
|
||||
}
|
||||
|
||||
func getLocation(host string) location {
|
||||
timer := prometheus.NewTimer(locationLookupSeconds)
|
||||
defer timer.ObserveDuration()
|
||||
db, err := geoip2.Open(geoipPath)
|
||||
if err != nil {
|
||||
return location{}
|
||||
@@ -531,7 +661,24 @@ func getLocation(host string) location {
|
||||
}
|
||||
|
||||
return location{
|
||||
Latitude: city.Location.Latitude,
|
||||
Longitude: city.Location.Longitude,
|
||||
Latitude: city.Location.Latitude,
|
||||
City: city.City.Names["en"],
|
||||
Country: city.Country.IsoCode,
|
||||
Continent: city.Continent.Code,
|
||||
}
|
||||
}
|
||||
|
||||
type loggingResponseWriter struct {
|
||||
http.ResponseWriter
|
||||
statusCode int
|
||||
}
|
||||
|
||||
func NewLoggingResponseWriter(w http.ResponseWriter) *loggingResponseWriter {
|
||||
return &loggingResponseWriter{w, http.StatusOK}
|
||||
}
|
||||
|
||||
func (lrw *loggingResponseWriter) WriteHeader(code int) {
|
||||
lrw.statusCode = code
|
||||
lrw.ResponseWriter.WriteHeader(code)
|
||||
}
|
||||
|
||||
259
cmd/strelaypoolsrv/stats.go
Normal file
259
cmd/strelaypoolsrv/stats.go
Normal file
@@ -0,0 +1,259 @@
|
||||
// Copyright (C) 2018 Audrius Butkevicius and Contributors (see the CONTRIBUTORS file).
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
)
|
||||
|
||||
func init() {
|
||||
processCollectorOpts := prometheus.ProcessCollectorOpts{
|
||||
Namespace: "syncthing_relaypoolsrv",
|
||||
PidFn: func() (int, error) {
|
||||
return os.Getpid(), nil
|
||||
},
|
||||
}
|
||||
|
||||
prometheus.MustRegister(
|
||||
prometheus.NewProcessCollector(processCollectorOpts),
|
||||
)
|
||||
}
|
||||
|
||||
var (
|
||||
statusClient = http.Client{
|
||||
Timeout: 5 * time.Second,
|
||||
}
|
||||
|
||||
apiRequestsTotal = makeCounter("api_requests_total", "Number of API requests.", "type", "result")
|
||||
apiRequestsSeconds = makeSummary("api_requests_seconds", "Latency of API requests.", "type")
|
||||
|
||||
relayTestsTotal = makeCounter("tests_total", "Number of relay tests.", "result")
|
||||
relayTestActionsSeconds = makeSummary("test_actions_seconds", "Latency of relay test actions.", "type")
|
||||
|
||||
locationLookupSeconds = makeSummary("location_lookup_seconds", "Latency of location lookups.").WithLabelValues()
|
||||
|
||||
metricsRequestsSeconds = makeSummary("metrics_requests_seconds", "Latency of metric requests.").WithLabelValues()
|
||||
scrapeSeconds = makeSummary("relay_scrape_seconds", "Latency of metric scrapes from remote relays.", "result")
|
||||
|
||||
relayUptime = makeGauge("relay_uptime", "Uptime of relay", "relay")
|
||||
relayPendingSessionKeys = makeGauge("relay_pending_session_keys", "Number of pending session keys (two keys per session, one per each side of the connection)", "relay")
|
||||
relayActiveSessions = makeGauge("relay_active_sessions", "Number of sessions that are happening, a session contains two parties", "relay")
|
||||
relayConnections = makeGauge("relay_connections", "Number of devices connected to the relay", "relay")
|
||||
relayProxies = makeGauge("relay_proxies", "Number of active proxy routines sending data between peers (two proxies per session, one for each way)", "relay")
|
||||
relayBytesProxied = makeGauge("relay_bytes_proxied", "Number of bytes proxied by the relay", "relay")
|
||||
relayGoRoutines = makeGauge("relay_go_routines", "Number of Go routines in the process", "relay")
|
||||
relaySessionRate = makeGauge("relay_session_rate", "Rate applied per session", "relay")
|
||||
relayGlobalRate = makeGauge("relay_global_rate", "Global rate applied on the whole relay", "relay")
|
||||
relayBuildInfo = makeGauge("relay_build_info", "Build information about a relay", "relay", "go_version", "go_os", "go_arch")
|
||||
relayLocationInfo = makeGauge("relay_location_info", "Location information about a relay", "relay", "city", "country", "continent")
|
||||
|
||||
lastStats = make(map[string]stats)
|
||||
)
|
||||
|
||||
func makeGauge(name string, help string, labels ...string) *prometheus.GaugeVec {
|
||||
gauge := prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "relaypoolsrv",
|
||||
Name: name,
|
||||
Help: help,
|
||||
},
|
||||
labels,
|
||||
)
|
||||
prometheus.MustRegister(gauge)
|
||||
return gauge
|
||||
}
|
||||
|
||||
func makeSummary(name string, help string, labels ...string) *prometheus.SummaryVec {
|
||||
summary := prometheus.NewSummaryVec(
|
||||
prometheus.SummaryOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "relaypoolsrv",
|
||||
Name: name,
|
||||
Help: help,
|
||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
||||
},
|
||||
labels,
|
||||
)
|
||||
prometheus.MustRegister(summary)
|
||||
return summary
|
||||
}
|
||||
|
||||
func makeCounter(name string, help string, labels ...string) *prometheus.CounterVec {
|
||||
counter := prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "syncthing",
|
||||
Subsystem: "relaypoolsrv",
|
||||
Name: name,
|
||||
Help: help,
|
||||
},
|
||||
labels,
|
||||
)
|
||||
prometheus.MustRegister(counter)
|
||||
return counter
|
||||
}
|
||||
|
||||
func statsRefresher(interval time.Duration) {
|
||||
ticker := time.NewTicker(interval)
|
||||
for range ticker.C {
|
||||
refreshStats()
|
||||
}
|
||||
}
|
||||
|
||||
type statsFetchResult struct {
|
||||
relay *relay
|
||||
stats *stats
|
||||
}
|
||||
|
||||
func refreshStats() {
|
||||
mut.RLock()
|
||||
relays := append(permanentRelays, knownRelays...)
|
||||
mut.RUnlock()
|
||||
|
||||
now := time.Now()
|
||||
wg := sync.NewWaitGroup()
|
||||
|
||||
results := make(chan statsFetchResult, len(relays))
|
||||
for _, rel := range relays {
|
||||
wg.Add(1)
|
||||
go func(rel *relay) {
|
||||
t0 := time.Now()
|
||||
stats := fetchStats(rel)
|
||||
duration := time.Since(t0).Seconds()
|
||||
result := "success"
|
||||
if stats == nil {
|
||||
result = "failed"
|
||||
}
|
||||
scrapeSeconds.WithLabelValues(result).Observe(duration)
|
||||
|
||||
results <- statsFetchResult{
|
||||
relay: rel,
|
||||
stats: fetchStats(rel),
|
||||
}
|
||||
wg.Done()
|
||||
}(rel)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(results)
|
||||
|
||||
mut.Lock()
|
||||
relayBuildInfo.Reset()
|
||||
relayLocationInfo.Reset()
|
||||
for result := range results {
|
||||
result.relay.StatsRetrieved = now
|
||||
result.relay.Stats = result.stats
|
||||
if result.stats == nil {
|
||||
deleteMetrics(result.relay.uri.Host)
|
||||
} else {
|
||||
updateMetrics(result.relay.uri.Host, *result.stats, result.relay.Location)
|
||||
}
|
||||
}
|
||||
mut.Unlock()
|
||||
}
|
||||
|
||||
func fetchStats(relay *relay) *stats {
|
||||
statusAddr := relay.uri.Query().Get("statusAddr")
|
||||
if statusAddr == "" {
|
||||
statusAddr = ":22070"
|
||||
}
|
||||
|
||||
statusHost, statusPort, err := net.SplitHostPort(statusAddr)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if statusHost == "" {
|
||||
if host, _, err := net.SplitHostPort(relay.uri.Host); err != nil {
|
||||
return nil
|
||||
} else {
|
||||
statusHost = host
|
||||
}
|
||||
}
|
||||
|
||||
url := "http://" + net.JoinHostPort(statusHost, statusPort) + "/status"
|
||||
|
||||
response, err := statusClient.Get(url)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var stats stats
|
||||
|
||||
if json.NewDecoder(response.Body).Decode(&stats); err != nil {
|
||||
return nil
|
||||
}
|
||||
return &stats
|
||||
}
|
||||
|
||||
func updateMetrics(host string, stats stats, location location) {
|
||||
if stats.GoVersion != "" || stats.GoOS != "" || stats.GoArch != "" {
|
||||
relayBuildInfo.WithLabelValues(host, stats.GoVersion, stats.GoOS, stats.GoArch).Add(1)
|
||||
}
|
||||
if location.City != "" || location.Country != "" || location.Continent != "" {
|
||||
relayLocationInfo.WithLabelValues(host, location.City, location.Country, location.Continent).Add(1)
|
||||
}
|
||||
|
||||
if lastStat, ok := lastStats[host]; ok {
|
||||
stats = mergeStats(stats, lastStat)
|
||||
}
|
||||
|
||||
relayUptime.WithLabelValues(host).Set(float64(stats.UptimeSeconds))
|
||||
relayPendingSessionKeys.WithLabelValues(host).Set(float64(stats.PendingSessionKeys))
|
||||
relayActiveSessions.WithLabelValues(host).Set(float64(stats.ActiveSessions))
|
||||
relayConnections.WithLabelValues(host).Set(float64(stats.Connections))
|
||||
relayProxies.WithLabelValues(host).Set(float64(stats.Proxies))
|
||||
relayBytesProxied.WithLabelValues(host).Set(float64(stats.BytesProxied))
|
||||
relayGoRoutines.WithLabelValues(host).Set(float64(stats.GoRoutines))
|
||||
relaySessionRate.WithLabelValues(host).Set(float64(stats.Options.SessionRate))
|
||||
relayGlobalRate.WithLabelValues(host).Set(float64(stats.Options.GlobalRate))
|
||||
lastStats[host] = stats
|
||||
}
|
||||
|
||||
func deleteMetrics(host string) {
|
||||
relayUptime.DeleteLabelValues(host)
|
||||
relayPendingSessionKeys.DeleteLabelValues(host)
|
||||
relayActiveSessions.DeleteLabelValues(host)
|
||||
relayConnections.DeleteLabelValues(host)
|
||||
relayProxies.DeleteLabelValues(host)
|
||||
relayBytesProxied.DeleteLabelValues(host)
|
||||
relayGoRoutines.DeleteLabelValues(host)
|
||||
relaySessionRate.DeleteLabelValues(host)
|
||||
relayGlobalRate.DeleteLabelValues(host)
|
||||
delete(lastStats, host)
|
||||
}
|
||||
|
||||
// Due to some unexplainable behaviour, some of the numbers sometimes travel slightly backwards (by less than 1%)
|
||||
// This happens between scrapes, which is 30s, so this can't be a race.
|
||||
// This causes prometheus to assume a "rate reset", hence causes phenomenal spikes.
|
||||
// One of the number that moves backwards is BytesProxied, which atomically increments a counter with numeric value
|
||||
// returned by net.Conn.Read(). I don't think that can return a negative value, so I have no idea what's going on.
|
||||
func mergeStats(new stats, old stats) stats {
|
||||
new.UptimeSeconds = mergeValue(new.UptimeSeconds, old.UptimeSeconds)
|
||||
new.PendingSessionKeys = mergeValue(new.PendingSessionKeys, old.PendingSessionKeys)
|
||||
new.ActiveSessions = mergeValue(new.ActiveSessions, old.ActiveSessions)
|
||||
new.Connections = mergeValue(new.Connections, old.Connections)
|
||||
new.Proxies = mergeValue(new.Proxies, old.Proxies)
|
||||
new.BytesProxied = mergeValue(new.BytesProxied, old.BytesProxied)
|
||||
new.GoRoutines = mergeValue(new.GoRoutines, old.GoRoutines)
|
||||
new.Options.SessionRate = mergeValue(new.Options.SessionRate, old.Options.SessionRate)
|
||||
new.Options.GlobalRate = mergeValue(new.Options.GlobalRate, old.Options.GlobalRate)
|
||||
return new
|
||||
}
|
||||
|
||||
func mergeValue(new, old int) int {
|
||||
if new >= old {
|
||||
return new // normal increase
|
||||
}
|
||||
if float64(new) > 0.99*float64(old) {
|
||||
return old // slight backward movement
|
||||
}
|
||||
return new // reset (relay restart)
|
||||
}
|
||||
21
cmd/strelaypoolsrv/stats_test.go
Normal file
21
cmd/strelaypoolsrv/stats_test.go
Normal file
@@ -0,0 +1,21 @@
|
||||
// Copyright (C) 2015 Audrius Butkevicius and Contributors (see the CONTRIBUTORS file).
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMerge(t *testing.T) {
|
||||
if mergeValue(1001, 1000) != 1001 {
|
||||
t.Error("the computer says no")
|
||||
}
|
||||
|
||||
if mergeValue(999, 1000) != 1000 {
|
||||
t.Error("the computer says no")
|
||||
}
|
||||
|
||||
if mergeValue(1, 1000) != 1 {
|
||||
t.Error("the computer says no")
|
||||
}
|
||||
}
|
||||
9
cmd/strelaysrv/etc/firewall-ufw/strelaysrv
Normal file
9
cmd/strelaysrv/etc/firewall-ufw/strelaysrv
Normal file
@@ -0,0 +1,9 @@
|
||||
[strelaysrv]
|
||||
title=Syncthing relay server
|
||||
description=Proxies traffic of syncthing client behind firewalls
|
||||
ports=22067/tcp
|
||||
|
||||
[strelaysrv-metrics]
|
||||
title=Syncthing relay metrics
|
||||
description=Provides metrics about the syncthing relay server
|
||||
ports=22070/tcp
|
||||
5
cmd/strelaysrv/etc/linux-systemd/default
Normal file
5
cmd/strelaysrv/etc/linux-systemd/default
Normal file
@@ -0,0 +1,5 @@
|
||||
# Default settings for syncthing-relaysrv (strelaysrv).
|
||||
NAT=true
|
||||
|
||||
## Add Options here:
|
||||
RELAYSRV_OPTS=
|
||||
@@ -1,17 +1,25 @@
|
||||
[Unit]
|
||||
Description=Syncthing relay server
|
||||
Description=Syncthing Relay Server
|
||||
After=network.target
|
||||
Documentation=man:strelaysrv(1)
|
||||
|
||||
[Service]
|
||||
User=strelaysrv
|
||||
Group=strelaysrv
|
||||
ExecStart=/usr/bin/strelaysrv
|
||||
WorkingDirectory=/var/lib/strelaysrv
|
||||
WorkingDirectory=/var/lib/syncthing-relaysrv
|
||||
EnvironmentFile=/etc/default/syncthing-relaysrv
|
||||
ExecStart=/usr/bin/strelaysrv -nat=${NAT} $RELAYSRV_OPTS
|
||||
|
||||
PrivateTmp=true
|
||||
ProtectSystem=full
|
||||
ProtectHome=true
|
||||
# Hardening
|
||||
User=syncthing-relaysrv
|
||||
Group=syncthing
|
||||
ProtectSystem=strict
|
||||
ReadWritePaths=/var/lib/syncthing-relaysrv
|
||||
NoNewPrivileges=true
|
||||
PrivateTmp=true
|
||||
PrivateDevices=true
|
||||
ProtectHome=true
|
||||
SystemCallArchitectures=native
|
||||
MemoryDenyWriteExecute=true
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
Alias=syncthing-relaysrv.service
|
||||
|
||||
@@ -104,7 +104,9 @@ func protocolConnectionHandler(tcpConn net.Conn, config *tls.Config) {
|
||||
go messageReader(conn, messages, errors)
|
||||
|
||||
pingTicker := time.NewTicker(pingInterval)
|
||||
defer pingTicker.Stop()
|
||||
timeoutTicker := time.NewTimer(networkTimeout)
|
||||
defer timeoutTicker.Stop()
|
||||
joined := false
|
||||
|
||||
for {
|
||||
|
||||
@@ -14,12 +14,13 @@ import (
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/build"
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
"github.com/syncthing/syncthing/lib/osutil"
|
||||
"github.com/syncthing/syncthing/lib/relay/protocol"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
@@ -33,24 +34,6 @@ import (
|
||||
syncthingprotocol "github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
var (
|
||||
Version string
|
||||
BuildStamp string
|
||||
BuildUser string
|
||||
BuildHost string
|
||||
|
||||
BuildDate time.Time
|
||||
LongVersion string
|
||||
)
|
||||
|
||||
func init() {
|
||||
stamp, _ := strconv.Atoi(BuildStamp)
|
||||
BuildDate = time.Unix(int64(stamp), 0)
|
||||
|
||||
date := BuildDate.UTC().Format("2006-01-02 15:04:05 MST")
|
||||
LongVersion = fmt.Sprintf(`strelaysrv %s (%s %s-%s) %s@%s %s`, Version, runtime.Version(), runtime.GOOS, runtime.GOARCH, BuildUser, BuildHost, date)
|
||||
}
|
||||
|
||||
var (
|
||||
listen string
|
||||
debug bool
|
||||
@@ -64,12 +47,13 @@ var (
|
||||
|
||||
limitCheckTimer *time.Timer
|
||||
|
||||
sessionLimitBps int
|
||||
globalLimitBps int
|
||||
overLimit int32
|
||||
descriptorLimit int64
|
||||
sessionLimiter *rate.Limiter
|
||||
globalLimiter *rate.Limiter
|
||||
sessionLimitBps int
|
||||
globalLimitBps int
|
||||
overLimit int32
|
||||
descriptorLimit int64
|
||||
sessionLimiter *rate.Limiter
|
||||
globalLimiter *rate.Limiter
|
||||
networkBufferSize int
|
||||
|
||||
statusAddr string
|
||||
poolAddrs string
|
||||
@@ -81,8 +65,16 @@ var (
|
||||
natLease int
|
||||
natRenewal int
|
||||
natTimeout int
|
||||
|
||||
pprofEnabled bool
|
||||
)
|
||||
|
||||
// httpClient is the HTTP client we use for outbound requests. It has a
|
||||
// timeout and may get further options set during initialization.
|
||||
var httpClient = &http.Client{
|
||||
Timeout: 30 * time.Second,
|
||||
}
|
||||
|
||||
func main() {
|
||||
log.SetFlags(log.Lshortfile | log.LstdFlags)
|
||||
|
||||
@@ -105,8 +97,16 @@ func main() {
|
||||
flag.IntVar(&natLease, "nat-lease", 60, "NAT lease length in minutes")
|
||||
flag.IntVar(&natRenewal, "nat-renewal", 30, "NAT renewal frequency in minutes")
|
||||
flag.IntVar(&natTimeout, "nat-timeout", 10, "NAT discovery timeout in seconds")
|
||||
flag.BoolVar(&pprofEnabled, "pprof", false, "Enable the built in profiling on the status server")
|
||||
flag.IntVar(&networkBufferSize, "network-buffer", 2048, "Network buffer size (two of these per proxied connection)")
|
||||
showVersion := flag.Bool("version", false, "Show version")
|
||||
flag.Parse()
|
||||
|
||||
if *showVersion {
|
||||
fmt.Println(build.LongVersion)
|
||||
return
|
||||
}
|
||||
|
||||
if extAddress == "" {
|
||||
extAddress = listen
|
||||
}
|
||||
@@ -124,18 +124,18 @@ func main() {
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if laddr.IP != nil && !laddr.IP.IsUnspecified() {
|
||||
// We bind to a specific address. Our outgoing HTTP requests should
|
||||
// also come from that address.
|
||||
laddr.Port = 0
|
||||
transport, ok := http.DefaultTransport.(*http.Transport)
|
||||
if ok {
|
||||
transport.Dial = (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
LocalAddr: laddr,
|
||||
}).Dial
|
||||
boundDialer := &net.Dialer{LocalAddr: laddr}
|
||||
httpClient.Transport = &http.Transport{
|
||||
DialContext: boundDialer.DialContext,
|
||||
}
|
||||
}
|
||||
|
||||
log.Println(LongVersion)
|
||||
log.Println(build.LongVersion)
|
||||
|
||||
maxDescriptors, err := osutil.MaximizeOpenFileLimit()
|
||||
if maxDescriptors > 0 {
|
||||
@@ -155,7 +155,7 @@ func main() {
|
||||
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||
if err != nil {
|
||||
log.Println("Failed to load keypair. Generating one, this might take a while...")
|
||||
cert, err = tlsutil.NewCertificate(certFile, keyFile, "strelaysrv", 3072)
|
||||
cert, err = tlsutil.NewCertificate(certFile, keyFile, "strelaysrv", 20*365)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to generate X509 key pair:", err)
|
||||
}
|
||||
@@ -183,7 +183,7 @@ func main() {
|
||||
log.Println("ID:", id)
|
||||
}
|
||||
|
||||
wrapper := config.Wrap("config", config.New(id))
|
||||
wrapper := config.Wrap("config", config.New(id), events.NoopLogger)
|
||||
wrapper.SetOptions(config.OptionsConfiguration{
|
||||
NATLeaseM: natLease,
|
||||
NATRenewalM: natRenewal,
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
)
|
||||
@@ -27,7 +26,7 @@ func poolHandler(pool string, uri *url.URL, mapping mapping) {
|
||||
uriCopy.String(),
|
||||
})
|
||||
|
||||
resp, err := http.Post(pool, "application/json", &b)
|
||||
resp, err := httpClient.Post(pool, "application/json", &b)
|
||||
if err != nil {
|
||||
log.Println("Error joining pool", pool, err)
|
||||
} else if resp.StatusCode == 500 {
|
||||
|
||||
4
cmd/strelaysrv/scripts/preinst
Normal file
4
cmd/strelaysrv/scripts/preinst
Normal file
@@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
|
||||
addgroup --system syncthing
|
||||
adduser --system --home /var/lib/syncthing-relaysrv --ingroup syncthing syncthing-relaysrv
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
var (
|
||||
sessionMut = sync.RWMutex{}
|
||||
activeSessions = make([]*session, 0)
|
||||
pendingSessions = make(map[string]*session, 0)
|
||||
pendingSessions = make(map[string]*session)
|
||||
numProxies int64
|
||||
bytesProxied int64
|
||||
)
|
||||
@@ -189,7 +189,7 @@ done:
|
||||
// We can end up here in 3 cases:
|
||||
// 1. Timeout joining, in which case there are potentially entries in pendingSessions
|
||||
// 2. General session end/timeout, in which case there are entries in activeSessions
|
||||
// 3. Protocol handler calls dropSession as one of it's clients disconnects.
|
||||
// 3. Protocol handler calls dropSession as one of its clients disconnects.
|
||||
|
||||
sessionMut.Lock()
|
||||
delete(pendingSessions, string(s.serverkey))
|
||||
@@ -254,7 +254,7 @@ func (s *session) proxy(c1, c2 net.Conn) error {
|
||||
atomic.AddInt64(&numProxies, 1)
|
||||
defer atomic.AddInt64(&numProxies, -1)
|
||||
|
||||
buf := make([]byte, 65536)
|
||||
buf := make([]byte, networkBufferSize)
|
||||
for {
|
||||
c1.SetReadDeadline(time.Now().Add(networkTimeout))
|
||||
n, err := c1.Read(buf)
|
||||
|
||||
@@ -6,9 +6,12 @@ import (
|
||||
"encoding/json"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/pprof"
|
||||
"runtime"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/build"
|
||||
)
|
||||
|
||||
var rc *rateCalculator
|
||||
@@ -18,6 +21,9 @@ func statusService(addr string) {
|
||||
|
||||
handler := http.NewServeMux()
|
||||
handler.HandleFunc("/status", getStatus)
|
||||
if pprofEnabled {
|
||||
handler.HandleFunc("/debug/pprof/", pprof.Index)
|
||||
}
|
||||
|
||||
srv := http.Server{
|
||||
Addr: addr,
|
||||
@@ -36,6 +42,10 @@ func getStatus(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
sessionMut.Lock()
|
||||
// This can potentially be double the number of pending sessions, as each session has two keys, one for each side.
|
||||
status["version"] = build.Version
|
||||
status["buildHost"] = build.Host
|
||||
status["buildUser"] = build.User
|
||||
status["buildDate"] = build.Date
|
||||
status["startTime"] = rc.startTime
|
||||
status["uptimeSeconds"] = time.Since(rc.startTime) / time.Second
|
||||
status["numPendingSessionKeys"] = len(pendingSessions)
|
||||
@@ -78,9 +88,9 @@ func getStatus(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
type rateCalculator struct {
|
||||
counter *int64 // atomic, must remain 64-bit aligned
|
||||
rates []int64
|
||||
prev int64
|
||||
counter *int64
|
||||
startTime time.Time
|
||||
}
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ func main() {
|
||||
flag.Parse()
|
||||
|
||||
if flag.NArg() < 1 {
|
||||
log.Println(`Usage:
|
||||
log.Print(`Usage:
|
||||
stsigtool <command>
|
||||
|
||||
Where command is one of:
|
||||
@@ -40,6 +40,7 @@ Where command is one of:
|
||||
|
||||
verify <signaturefile> <datafile> <pubkeyfile>
|
||||
- verify a signature, using the specified public key file
|
||||
|
||||
`)
|
||||
}
|
||||
|
||||
|
||||
57
cmd/stupgrades/main.go
Normal file
57
cmd/stupgrades/main.go
Normal file
@@ -0,0 +1,57 @@
|
||||
// Copyright (C) 2019 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"os"
|
||||
"sort"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/upgrade"
|
||||
)
|
||||
|
||||
const defaultURL = "https://api.github.com/repos/syncthing/syncthing/releases?per_page=25"
|
||||
|
||||
func main() {
|
||||
url := flag.String("u", defaultURL, "GitHub releases url")
|
||||
flag.Parse()
|
||||
|
||||
rels := upgrade.FetchLatestReleases(*url, "")
|
||||
if rels == nil {
|
||||
// An error was already logged
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
sort.Sort(upgrade.SortByRelease(rels))
|
||||
rels = filterForLatest(rels)
|
||||
|
||||
if err := json.NewEncoder(os.Stdout).Encode(rels); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// filterForLatest returns the latest stable and prerelease only. If the
|
||||
// stable version is newer (comes first in the list) there is no need to go
|
||||
// looking for a prerelease at all.
|
||||
func filterForLatest(rels []upgrade.Release) []upgrade.Release {
|
||||
var filtered []upgrade.Release
|
||||
var havePre bool
|
||||
for _, rel := range rels {
|
||||
if !rel.Prerelease {
|
||||
// We found a stable version, we're good now.
|
||||
filtered = append(filtered, rel)
|
||||
break
|
||||
}
|
||||
if rel.Prerelease && !havePre {
|
||||
// We remember the first prerelease we find.
|
||||
filtered = append(filtered, rel)
|
||||
havePre = true
|
||||
}
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
@@ -1,69 +0,0 @@
|
||||
// Copyright (C) 2015 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
)
|
||||
|
||||
// The auditService subscribes to events and writes these in JSON format, one
|
||||
// event per line, to the specified writer.
|
||||
type auditService struct {
|
||||
w io.Writer // audit destination
|
||||
stop chan struct{} // signals time to stop
|
||||
started chan struct{} // signals startup complete
|
||||
stopped chan struct{} // signals stop complete
|
||||
}
|
||||
|
||||
func newAuditService(w io.Writer) *auditService {
|
||||
return &auditService{
|
||||
w: w,
|
||||
stop: make(chan struct{}),
|
||||
started: make(chan struct{}),
|
||||
stopped: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// Serve runs the audit service.
|
||||
func (s *auditService) Serve() {
|
||||
defer close(s.stopped)
|
||||
sub := events.Default.Subscribe(events.AllEvents)
|
||||
defer events.Default.Unsubscribe(sub)
|
||||
enc := json.NewEncoder(s.w)
|
||||
|
||||
// We're ready to start processing events.
|
||||
close(s.started)
|
||||
|
||||
for {
|
||||
select {
|
||||
case ev := <-sub.C():
|
||||
enc.Encode(ev)
|
||||
case <-s.stop:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stop stops the audit service.
|
||||
func (s *auditService) Stop() {
|
||||
close(s.stop)
|
||||
}
|
||||
|
||||
// WaitForStart returns once the audit service is ready to receive events, or
|
||||
// immediately if it's already running.
|
||||
func (s *auditService) WaitForStart() {
|
||||
<-s.started
|
||||
}
|
||||
|
||||
// WaitForStop returns once the audit service has stopped.
|
||||
// (Needed by the tests.)
|
||||
func (s *auditService) WaitForStop() {
|
||||
<-s.stopped
|
||||
}
|
||||
@@ -22,11 +22,15 @@ func init() {
|
||||
panic("Couldn't find block profiler")
|
||||
}
|
||||
l.Debugln("Starting block profiling")
|
||||
go saveBlockingProfiles(profiler)
|
||||
go func() {
|
||||
err := saveBlockingProfiles(profiler) // Only returns on error
|
||||
l.Warnln("Block profiler failed:", err)
|
||||
panic("Block profiler failed")
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func saveBlockingProfiles(profiler *pprof.Profile) {
|
||||
func saveBlockingProfiles(profiler *pprof.Profile) error {
|
||||
runtime.SetBlockProfileRate(1)
|
||||
|
||||
t0 := time.Now()
|
||||
@@ -35,16 +39,16 @@ func saveBlockingProfiles(profiler *pprof.Profile) {
|
||||
|
||||
fd, err := os.Create(fmt.Sprintf("block-%05d-%07d.pprof", syscall.Getpid(), startms))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
return err
|
||||
}
|
||||
err = profiler.WriteTo(fd, 0)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
return err
|
||||
}
|
||||
err = fd.Close()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
152
cmd/syncthing/crash_reporting.go
Normal file
152
cmd/syncthing/crash_reporting.go
Normal file
@@ -0,0 +1,152 @@
|
||||
// Copyright (C) 2019 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
)
|
||||
|
||||
const (
|
||||
headRequestTimeout = 10 * time.Second
|
||||
putRequestTimeout = time.Minute
|
||||
)
|
||||
|
||||
// uploadPanicLogs attempts to upload all the panic logs in the named
|
||||
// directory to the crash reporting server as urlBase. Uploads are attempted
|
||||
// with the newest log first.
|
||||
//
|
||||
// This can can block for a long time. The context can set a final deadline
|
||||
// for this.
|
||||
func uploadPanicLogs(ctx context.Context, urlBase, dir string) {
|
||||
files, err := filepath.Glob(filepath.Join(dir, "panic-*.log"))
|
||||
if err != nil {
|
||||
l.Warnln("Failed to list panic logs:", err)
|
||||
return
|
||||
}
|
||||
|
||||
sort.Sort(sort.Reverse(sort.StringSlice(files)))
|
||||
for _, file := range files {
|
||||
if strings.Contains(file, ".reported.") {
|
||||
// We've already sent this file. It'll be cleaned out at some
|
||||
// point.
|
||||
continue
|
||||
}
|
||||
|
||||
if err := uploadPanicLog(ctx, urlBase, file); err != nil {
|
||||
l.Warnln("Reporting crash:", err)
|
||||
} else {
|
||||
// Rename the log so we don't have to try to report it again. This
|
||||
// succeeds, or it does not. There is no point complaining about it.
|
||||
_ = os.Rename(file, strings.Replace(file, ".log", ".reported.log", 1))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// uploadPanicLog attempts to upload the named panic log to the crash
|
||||
// reporting server at urlBase. The panic ID is constructed as the sha256 of
|
||||
// the log contents. A HEAD request is made to see if the log has already
|
||||
// been reported. If not, a PUT is made with the log contents.
|
||||
func uploadPanicLog(ctx context.Context, urlBase, file string) error {
|
||||
data, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove log lines, for privacy.
|
||||
data = filterLogLines(data)
|
||||
|
||||
hash := fmt.Sprintf("%x", sha256.Sum256(data))
|
||||
l.Infof("Reporting crash found in %s (report ID %s) ...\n", filepath.Base(file), hash[:8])
|
||||
|
||||
url := fmt.Sprintf("%s/%s", urlBase, hash)
|
||||
headReq, err := http.NewRequest(http.MethodHead, url, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set a reasonable timeout on the HEAD request
|
||||
headCtx, headCancel := context.WithTimeout(ctx, headRequestTimeout)
|
||||
defer headCancel()
|
||||
headReq = headReq.WithContext(headCtx)
|
||||
|
||||
resp, err := http.DefaultClient.Do(headReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
// It's known, we're done
|
||||
return nil
|
||||
}
|
||||
|
||||
putReq, err := http.NewRequest(http.MethodPut, url, bytes.NewReader(data))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set a reasonable timeout on the PUT request
|
||||
putCtx, putCancel := context.WithTimeout(ctx, putRequestTimeout)
|
||||
defer putCancel()
|
||||
putReq = putReq.WithContext(putCtx)
|
||||
|
||||
resp, err = http.DefaultClient.Do(putReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("upload: %s", resp.Status)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// filterLogLines returns the data without any log lines between the first
|
||||
// line and the panic trace. This is done in-place: the original data slice
|
||||
// is destroyed.
|
||||
func filterLogLines(data []byte) []byte {
|
||||
filtered := data[:0]
|
||||
matched := false
|
||||
for _, line := range bytes.Split(data, []byte("\n")) {
|
||||
switch {
|
||||
case !matched && bytes.HasPrefix(line, []byte("Panic ")):
|
||||
// This begins the panic trace, set the matched flag and append.
|
||||
matched = true
|
||||
fallthrough
|
||||
case len(filtered) == 0 || matched:
|
||||
// This is the first line or inside the panic trace.
|
||||
if len(filtered) > 0 {
|
||||
// We add the newline before rather than after because
|
||||
// bytes.Split sees the \n as *separator* and not line
|
||||
// ender, so ir will generate a last empty line that we
|
||||
// don't really want. (We want to keep blank lines in the
|
||||
// middle of the trace though.)
|
||||
filtered = append(filtered, '\n')
|
||||
}
|
||||
// Remove the device ID prefix. The "plus two" stuff is because
|
||||
// the line will look like "[foo] whatever" and the end variable
|
||||
// will end up pointing at the ] and we want to step over that
|
||||
// and the following space.
|
||||
if end := bytes.Index(line, []byte("]")); end > 1 && end < len(line)-2 && bytes.HasPrefix(line, []byte("[")) {
|
||||
line = line[end+2:]
|
||||
}
|
||||
filtered = append(filtered, line...)
|
||||
}
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
37
cmd/syncthing/crash_reporting_test.go
Normal file
37
cmd/syncthing/crash_reporting_test.go
Normal file
@@ -0,0 +1,37 @@
|
||||
// Copyright (C) 2019 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFilterLogLines(t *testing.T) {
|
||||
in := []byte(`[ABCD123] syncthing version whatever
|
||||
here is more log data
|
||||
and more
|
||||
...
|
||||
and some more
|
||||
yet more
|
||||
Panic detected at like right now
|
||||
here is panic data
|
||||
and yet more panic stuff
|
||||
`)
|
||||
|
||||
filtered := []byte(`syncthing version whatever
|
||||
Panic detected at like right now
|
||||
here is panic data
|
||||
and yet more panic stuff
|
||||
`)
|
||||
|
||||
result := filterLogLines(in)
|
||||
if !bytes.Equal(result, filtered) {
|
||||
t.Logf("%q\n", result)
|
||||
t.Error("it should have been filtered")
|
||||
}
|
||||
}
|
||||
@@ -7,22 +7,9 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/logger"
|
||||
)
|
||||
|
||||
var (
|
||||
l = logger.DefaultLogger.NewFacility("main", "Main package")
|
||||
httpl = logger.DefaultLogger.NewFacility("http", "REST API")
|
||||
l = logger.DefaultLogger.NewFacility("main", "Main package")
|
||||
)
|
||||
|
||||
func shouldDebugHTTP() bool {
|
||||
return l.ShouldDebug("http")
|
||||
}
|
||||
|
||||
func init() {
|
||||
l.SetDebug("main", strings.Contains(os.Getenv("STTRACE"), "main") || os.Getenv("STTRACE") == "all")
|
||||
l.SetDebug("http", strings.Contains(os.Getenv("STTRACE"), "http") || os.Getenv("STTRACE") == "all")
|
||||
}
|
||||
|
||||
@@ -1,141 +0,0 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
"github.com/syncthing/syncthing/lib/rand"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
)
|
||||
|
||||
var (
|
||||
sessions = make(map[string]bool)
|
||||
sessionsMut = sync.NewMutex()
|
||||
)
|
||||
|
||||
func emitLoginAttempt(success bool, username string) {
|
||||
events.Default.Log(events.LoginAttempt, map[string]interface{}{
|
||||
"success": success,
|
||||
"username": username,
|
||||
})
|
||||
}
|
||||
|
||||
func basicAuthAndSessionMiddleware(cookieName string, cfg config.GUIConfiguration, next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if cfg.IsValidAPIKey(r.Header.Get("X-API-Key")) {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
cookie, err := r.Cookie(cookieName)
|
||||
if err == nil && cookie != nil {
|
||||
sessionsMut.Lock()
|
||||
_, ok := sessions[cookie.Value]
|
||||
sessionsMut.Unlock()
|
||||
if ok {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
httpl.Debugln("Sessionless HTTP request with authentication; this is expensive.")
|
||||
|
||||
error := func() {
|
||||
time.Sleep(time.Duration(rand.Intn(100)+100) * time.Millisecond)
|
||||
w.Header().Set("WWW-Authenticate", "Basic realm=\"Authorization Required\"")
|
||||
http.Error(w, "Not Authorized", http.StatusUnauthorized)
|
||||
}
|
||||
|
||||
hdr := r.Header.Get("Authorization")
|
||||
if !strings.HasPrefix(hdr, "Basic ") {
|
||||
error()
|
||||
return
|
||||
}
|
||||
|
||||
hdr = hdr[6:]
|
||||
bs, err := base64.StdEncoding.DecodeString(hdr)
|
||||
if err != nil {
|
||||
error()
|
||||
return
|
||||
}
|
||||
|
||||
fields := bytes.SplitN(bs, []byte(":"), 2)
|
||||
if len(fields) != 2 {
|
||||
error()
|
||||
return
|
||||
}
|
||||
|
||||
// Check if the username is correct, assuming it was sent as UTF-8
|
||||
username := string(fields[0])
|
||||
if username == cfg.User {
|
||||
goto usernameOK
|
||||
}
|
||||
|
||||
// ... check it again, converting it from assumed ISO-8859-1 to UTF-8
|
||||
username = string(iso88591ToUTF8(fields[0]))
|
||||
if username == cfg.User {
|
||||
goto usernameOK
|
||||
}
|
||||
|
||||
// Neither of the possible interpretations match the configured username
|
||||
emitLoginAttempt(false, username)
|
||||
error()
|
||||
return
|
||||
|
||||
usernameOK:
|
||||
// Check password as given (assumes UTF-8 encoding)
|
||||
password := fields[1]
|
||||
if err := bcrypt.CompareHashAndPassword([]byte(cfg.Password), password); err == nil {
|
||||
goto passwordOK
|
||||
}
|
||||
|
||||
// ... check it again, converting it from assumed ISO-8859-1 to UTF-8
|
||||
password = iso88591ToUTF8(password)
|
||||
if err := bcrypt.CompareHashAndPassword([]byte(cfg.Password), password); err == nil {
|
||||
goto passwordOK
|
||||
}
|
||||
|
||||
// Neither of the attempts to verify the password checked out
|
||||
emitLoginAttempt(false, username)
|
||||
error()
|
||||
return
|
||||
|
||||
passwordOK:
|
||||
sessionid := rand.String(32)
|
||||
sessionsMut.Lock()
|
||||
sessions[sessionid] = true
|
||||
sessionsMut.Unlock()
|
||||
http.SetCookie(w, &http.Cookie{
|
||||
Name: cookieName,
|
||||
Value: sessionid,
|
||||
MaxAge: 0,
|
||||
})
|
||||
|
||||
emitLoginAttempt(true, username)
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// Convert an ISO-8859-1 encoded byte string to UTF-8. Works by the
|
||||
// principle that ISO-8859-1 bytes are equivalent to unicode code points,
|
||||
// that a rune slice is a list of code points, and that stringifying a slice
|
||||
// of runes generates UTF-8 in Go.
|
||||
func iso88591ToUTF8(s []byte) []byte {
|
||||
runes := make([]rune, len(s))
|
||||
for i := range s {
|
||||
runes[i] = rune(s[i])
|
||||
}
|
||||
return []byte(string(runes))
|
||||
}
|
||||
@@ -1,142 +0,0 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/osutil"
|
||||
"github.com/syncthing/syncthing/lib/rand"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
)
|
||||
|
||||
// csrfTokens is a list of valid tokens. It is sorted so that the most
|
||||
// recently used token is first in the list. New tokens are added to the front
|
||||
// of the list (as it is the most recently used at that time). The list is
|
||||
// pruned to a maximum of maxCsrfTokens, throwing away the least recently used
|
||||
// tokens.
|
||||
var csrfTokens []string
|
||||
var csrfMut = sync.NewMutex()
|
||||
|
||||
const maxCsrfTokens = 25
|
||||
|
||||
// Check for CSRF token on /rest/ URLs. If a correct one is not given, reject
|
||||
// the request with 403. For / and /index.html, set a new CSRF cookie if none
|
||||
// is currently set.
|
||||
func csrfMiddleware(unique string, prefix string, cfg config.GUIConfiguration, next http.Handler) http.Handler {
|
||||
loadCsrfTokens()
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Allow requests carrying a valid API key
|
||||
if cfg.IsValidAPIKey(r.Header.Get("X-API-Key")) {
|
||||
// Set the access-control-allow-origin header for CORS requests
|
||||
// since a valid API key has been provided
|
||||
w.Header().Add("Access-Control-Allow-Origin", "*")
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
if strings.HasPrefix(r.URL.Path, "/rest/debug") {
|
||||
// Debugging functions are only available when explicitly
|
||||
// enabled, and can be accessed without a CSRF token
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// Allow requests for anything not under the protected path prefix,
|
||||
// and set a CSRF cookie if there isn't already a valid one.
|
||||
if !strings.HasPrefix(r.URL.Path, prefix) {
|
||||
cookie, err := r.Cookie("CSRF-Token-" + unique)
|
||||
if err != nil || !validCsrfToken(cookie.Value) {
|
||||
httpl.Debugln("new CSRF cookie in response to request for", r.URL)
|
||||
cookie = &http.Cookie{
|
||||
Name: "CSRF-Token-" + unique,
|
||||
Value: newCsrfToken(),
|
||||
}
|
||||
http.SetCookie(w, cookie)
|
||||
}
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// Verify the CSRF token
|
||||
token := r.Header.Get("X-CSRF-Token-" + unique)
|
||||
if !validCsrfToken(token) {
|
||||
http.Error(w, "CSRF Error", 403)
|
||||
return
|
||||
}
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
func validCsrfToken(token string) bool {
|
||||
csrfMut.Lock()
|
||||
defer csrfMut.Unlock()
|
||||
for i, t := range csrfTokens {
|
||||
if t == token {
|
||||
if i > 0 {
|
||||
// Move this token to the head of the list. Copy the tokens at
|
||||
// the front one step to the right and then replace the token
|
||||
// at the head.
|
||||
copy(csrfTokens[1:], csrfTokens[:i+1])
|
||||
csrfTokens[0] = token
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func newCsrfToken() string {
|
||||
token := rand.String(32)
|
||||
|
||||
csrfMut.Lock()
|
||||
csrfTokens = append([]string{token}, csrfTokens...)
|
||||
if len(csrfTokens) > maxCsrfTokens {
|
||||
csrfTokens = csrfTokens[:maxCsrfTokens]
|
||||
}
|
||||
defer csrfMut.Unlock()
|
||||
|
||||
saveCsrfTokens()
|
||||
|
||||
return token
|
||||
}
|
||||
|
||||
func saveCsrfTokens() {
|
||||
// We're ignoring errors in here. It's not super critical and there's
|
||||
// nothing relevant we can do about them anyway...
|
||||
|
||||
name := locations[locCsrfTokens]
|
||||
f, err := osutil.CreateAtomic(name)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, t := range csrfTokens {
|
||||
fmt.Fprintln(f, t)
|
||||
}
|
||||
|
||||
f.Close()
|
||||
}
|
||||
|
||||
func loadCsrfTokens() {
|
||||
f, err := os.Open(locations[locCsrfTokens])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
s := bufio.NewScanner(f)
|
||||
for s.Scan() {
|
||||
csrfTokens = append(csrfTokens, s.Text())
|
||||
}
|
||||
}
|
||||
@@ -23,11 +23,15 @@ func init() {
|
||||
rate = i
|
||||
}
|
||||
l.Debugln("Starting heap profiling")
|
||||
go saveHeapProfiles(rate)
|
||||
go func() {
|
||||
err := saveHeapProfiles(rate) // Only returns on error
|
||||
l.Warnln("Heap profiler failed:", err)
|
||||
panic("Heap profiler failed")
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func saveHeapProfiles(rate int) {
|
||||
func saveHeapProfiles(rate int) error {
|
||||
runtime.MemProfileRate = rate
|
||||
var memstats, prevMemstats runtime.MemStats
|
||||
|
||||
@@ -38,21 +42,21 @@ func saveHeapProfiles(rate int) {
|
||||
if memstats.HeapInuse > prevMemstats.HeapInuse {
|
||||
fd, err := os.Create(name + ".tmp")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
return err
|
||||
}
|
||||
err = pprof.WriteHeapProfile(fd)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
return err
|
||||
}
|
||||
err = fd.Close()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
return err
|
||||
}
|
||||
|
||||
_ = os.Remove(name) // Error deliberately ignored
|
||||
os.Remove(name) // Error deliberately ignored
|
||||
err = os.Rename(name+".tmp", name)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
return err
|
||||
}
|
||||
|
||||
prevMemstats = memstats
|
||||
|
||||
@@ -1,125 +0,0 @@
|
||||
// Copyright (C) 2015 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/osutil"
|
||||
)
|
||||
|
||||
type locationEnum string
|
||||
|
||||
// Use strings as keys to make printout and serialization of the locations map
|
||||
// more meaningful.
|
||||
const (
|
||||
locConfigFile locationEnum = "config"
|
||||
locCertFile = "certFile"
|
||||
locKeyFile = "keyFile"
|
||||
locHTTPSCertFile = "httpsCertFile"
|
||||
locHTTPSKeyFile = "httpsKeyFile"
|
||||
locDatabase = "database"
|
||||
locLogFile = "logFile"
|
||||
locCsrfTokens = "csrfTokens"
|
||||
locPanicLog = "panicLog"
|
||||
locAuditLog = "auditLog"
|
||||
locGUIAssets = "GUIAssets"
|
||||
locDefFolder = "defFolder"
|
||||
)
|
||||
|
||||
// Platform dependent directories
|
||||
var baseDirs = map[string]string{
|
||||
"config": defaultConfigDir(), // Overridden by -home flag
|
||||
"home": homeDir(), // User's home directory, *not* -home flag
|
||||
}
|
||||
|
||||
// Use the variables from baseDirs here
|
||||
var locations = map[locationEnum]string{
|
||||
locConfigFile: "${config}/config.xml",
|
||||
locCertFile: "${config}/cert.pem",
|
||||
locKeyFile: "${config}/key.pem",
|
||||
locHTTPSCertFile: "${config}/https-cert.pem",
|
||||
locHTTPSKeyFile: "${config}/https-key.pem",
|
||||
locDatabase: "${config}/index-v0.14.0.db",
|
||||
locLogFile: "${config}/syncthing.log", // -logfile on Windows
|
||||
locCsrfTokens: "${config}/csrftokens.txt",
|
||||
locPanicLog: "${config}/panic-${timestamp}.log",
|
||||
locAuditLog: "${config}/audit-${timestamp}.log",
|
||||
locGUIAssets: "${config}/gui",
|
||||
locDefFolder: "${home}/Sync",
|
||||
}
|
||||
|
||||
// expandLocations replaces the variables in the location map with actual
|
||||
// directory locations.
|
||||
func expandLocations() error {
|
||||
for key, dir := range locations {
|
||||
for varName, value := range baseDirs {
|
||||
dir = strings.Replace(dir, "${"+varName+"}", value, -1)
|
||||
}
|
||||
var err error
|
||||
dir, err = osutil.ExpandTilde(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
locations[key] = dir
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// defaultConfigDir returns the default configuration directory, as figured
|
||||
// out by various the environment variables present on each platform, or dies
|
||||
// trying.
|
||||
func defaultConfigDir() string {
|
||||
switch runtime.GOOS {
|
||||
case "windows":
|
||||
if p := os.Getenv("LocalAppData"); p != "" {
|
||||
return filepath.Join(p, "Syncthing")
|
||||
}
|
||||
return filepath.Join(os.Getenv("AppData"), "Syncthing")
|
||||
|
||||
case "darwin":
|
||||
dir, err := osutil.ExpandTilde("~/Library/Application Support/Syncthing")
|
||||
if err != nil {
|
||||
l.Fatalln(err)
|
||||
}
|
||||
return dir
|
||||
|
||||
default:
|
||||
if xdgCfg := os.Getenv("XDG_CONFIG_HOME"); xdgCfg != "" {
|
||||
return filepath.Join(xdgCfg, "syncthing")
|
||||
}
|
||||
dir, err := osutil.ExpandTilde("~/.config/syncthing")
|
||||
if err != nil {
|
||||
l.Fatalln(err)
|
||||
}
|
||||
return dir
|
||||
}
|
||||
}
|
||||
|
||||
// homeDir returns the user's home directory, or dies trying.
|
||||
func homeDir() string {
|
||||
home, err := osutil.ExpandTilde("~")
|
||||
if err != nil {
|
||||
l.Fatalln(err)
|
||||
}
|
||||
return home
|
||||
}
|
||||
|
||||
func timestampedLoc(key locationEnum) string {
|
||||
// We take the roundtrip via "${timestamp}" instead of passing the path
|
||||
// directly through time.Format() to avoid issues when the path we are
|
||||
// expanding contains numbers; otherwise for example
|
||||
// /home/user2006/.../panic-20060102-150405.log would get both instances of
|
||||
// 2006 replaced by 2015...
|
||||
tpl := locations[key]
|
||||
now := time.Now().Format("20060102-150405")
|
||||
return strings.Replace(tpl, "${timestamp}", now, -1)
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,65 +0,0 @@
|
||||
// Copyright (C) 2016 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/util"
|
||||
)
|
||||
|
||||
type mockedConfig struct {
|
||||
gui config.GUIConfiguration
|
||||
}
|
||||
|
||||
func (c *mockedConfig) GUI() config.GUIConfiguration {
|
||||
return c.gui
|
||||
}
|
||||
|
||||
func (c *mockedConfig) ListenAddresses() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *mockedConfig) RawCopy() config.Configuration {
|
||||
cfg := config.Configuration{}
|
||||
util.SetDefaults(&cfg.Options)
|
||||
return cfg
|
||||
}
|
||||
|
||||
func (c *mockedConfig) Options() config.OptionsConfiguration {
|
||||
return config.OptionsConfiguration{}
|
||||
}
|
||||
|
||||
func (c *mockedConfig) Replace(cfg config.Configuration) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *mockedConfig) Subscribe(cm config.Committer) {}
|
||||
|
||||
func (c *mockedConfig) Folders() map[string]config.FolderConfiguration {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *mockedConfig) Devices() map[protocol.DeviceID]config.DeviceConfiguration {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *mockedConfig) SetDevice(config.DeviceConfiguration) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *mockedConfig) SetDevices([]config.DeviceConfiguration) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *mockedConfig) Save() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *mockedConfig) RequiresRestart() bool {
|
||||
return false
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user