Compare commits

...

231 Commits

Author SHA1 Message Date
Jakob Borg
c5243cd4d5 Translation update 2014-09-11 20:26:10 +02:00
Jakob Borg
db868ed29d Increase restart delay to 60s 2014-09-11 20:25:08 +02:00
Jakob Borg
450c7d80f8 Don't crash on walk error (fixes #663) 2014-09-11 20:23:22 +02:00
Jakob Borg
abbb001975 Typo in panic message (fixes ##662) 2014-09-11 18:42:42 +02:00
Jakob Borg
f35d83ae48 We have an extra field in compressed messages 2014-09-11 18:42:03 +02:00
Jakob Borg
a2315dc95e Merge pull request #665 from spaam/discover-readme
Update magic number in PROTOCOL.md
2014-09-11 18:36:15 +02:00
Johan Andersson
4e2feb6fbc Update magic number in PROTOCOL.md
Use the same magic number as in packets.go
2014-09-11 15:29:27 +02:00
Jakob Borg
13602b6769 Make the restart on wakeup configurable 2014-09-10 22:24:53 +02:00
Jakob Borg
85dba25246 Add pause before restart after standby 2014-09-10 22:20:03 +02:00
Jakob Borg
66432672b3 Clearfix to not hide Add Node on small screens (fixes #659) 2014-09-10 16:57:08 +02:00
Jakob Borg
e6d96e4c18 Fix hit zone for remote nodes accordion (ref #651) 2014-09-10 14:43:23 +02:00
Jakob Borg
9812305bb9 Translation update 2014-09-10 14:21:44 +02:00
Jakob Borg
f680a63a1f Woops, broke LastSeen 2014-09-10 11:29:01 +02:00
Jakob Borg
781d63cb2a UI Tweaks 2014-09-10 11:27:21 +02:00
Jakob Borg
9ff04ee3d8 Don't start when the config dir is not a dir 2014-09-10 10:36:05 +02:00
Jakob Borg
5d85a24977 Don't potentially block forever in Close() (fixes #655) 2014-09-10 08:48:15 +02:00
Jakob Borg
1e51fca0b0 Don't crash on new nodes (fixes #656) 2014-09-10 08:31:30 +02:00
Jakob Borg
5537d53f9a Timestamp the panic log 2014-09-10 08:25:56 +02:00
Jakob Borg
50a4170541 Announce actual port when UPnP is disabled (fixes #657) 2014-09-10 08:22:38 +02:00
Jakob Borg
3a8255bda1 Update lang-en.json 2014-09-10 07:48:35 +02:00
Jakob Borg
a617846f0f Merge pull request #654 from AudriusButkevicius/disco
Check if global discovery was actually started before trying to stop it (fixes #653)
2014-09-10 07:41:49 +02:00
Audrius Butkevicius
5772588c29 Check if global discovery was actually started before trying to stop it (fixes #653) 2014-09-10 00:05:28 +01:00
Jakob Borg
9d0dc45f74 Clarify clickability of top Edit menu (ref #651) 2014-09-08 19:54:11 +02:00
Jakob Borg
c6aefbc9a0 Entire panel title should be clickable (ref #651) 2014-09-08 19:46:33 +02:00
Jakob Borg
dbbafb0cc9 Hide irrelevant fields for disconnected nodes (ref #592) 2014-09-08 19:41:20 +02:00
Jakob Borg
6e8272f78f Implement incoming rate limit (fixes #613) 2014-09-08 17:25:55 +02:00
Jakob Borg
baf8a63121 Announce Server -> Discovery Server 2014-09-08 09:42:33 +02:00
Jakob Borg
fc4a76ee50 Only add one instance of a file to the need list (fixes #592) 2014-09-08 09:37:42 +02:00
Jakob Borg
2117d1d035 Tone down insignificant discovery error messages (ref #241) 2014-09-08 09:14:21 +02:00
Jakob Borg
0a70e0b7b6 Remove orphaned temp files before attempting to remove directories (fixes #492) 2014-09-07 21:29:06 +02:00
Jakob Borg
64ffac5671 Update goleveldb (fixes #644, closes #648) 2014-09-07 14:18:00 +02:00
Jakob Borg
ac384e8a9c Merge remote-tracking branch 'origin/pr/647'
* origin/pr/647:
  Listen for ConfigSaved event in the UI (fixes #244)
  Emit ConfigSaved event
  Save config after updating node name
2014-09-07 14:15:17 +02:00
Jakob Borg
f97c8222c7 Cleanup imports in previous 2014-09-07 14:08:49 +02:00
Jakob Borg
728289ee3a Merge remote-tracking branch 'origin/pr/646'
* origin/pr/646:
  Add session support (fixes #611)
2014-09-07 14:07:21 +02:00
Alexander Graf
5faa16f9ee use modification time for version timestamp; change version format to filename~yyyymmdd-hhmmss 2014-09-07 13:40:22 +02:00
Audrius Butkevicius
4e608b116a Add session support (fixes #611) 2014-09-07 12:10:17 +01:00
Audrius Butkevicius
521b49166e Listen for ConfigSaved event in the UI (fixes #244) 2014-09-07 12:07:25 +01:00
Audrius Butkevicius
8f32decf2d Emit ConfigSaved event 2014-09-07 12:04:40 +01:00
Audrius Butkevicius
0d51f83d2d Save config after updating node name 2014-09-07 12:04:40 +01:00
Audrius Butkevicius
78c6a68db9 Merge pull request #645 from AudriusButkevicius/cfg
Allow saving config from anywhere
2014-09-07 12:02:35 +01:00
Audrius Butkevicius
2949ab73e2 Add tests 2014-09-07 12:00:41 +01:00
Audrius Butkevicius
223741820d Fix tests 2014-09-07 12:00:41 +01:00
Audrius Butkevicius
4b57821f52 Allow saving config from anywhere 2014-09-07 12:00:37 +01:00
Audrius Butkevicius
74271a479f Silence failing ulimit calls 2014-09-06 15:04:49 +01:00
Audrius Butkevicius
c377177108 Fix tests on Windows 2014-09-06 14:56:12 +01:00
Jakob Borg
84eb729bd4 Don't start the browser on restarts (fixes #636) 2014-09-06 07:35:30 +02:00
Jakob Borg
14aea365c5 Don't stop permanently on exit (fixes #637) 2014-09-06 07:28:57 +02:00
Jakob Borg
97cb3fa5a5 Translation update (add Catalan) 2014-09-05 14:24:20 +02:00
Jakob Borg
b5368db704 Update assets 2014-09-05 13:26:17 +02:00
Jakob Borg
8c442b72f3 Merge remote-tracking branch 'origin/pr/634'
* origin/pr/634:
  Removed unused `optionEditor` directive from app.js
  Removed unused `clean` filter from app.js.
  Removed unused `shortPath` filter from app.js.
  Removed  unused `short` filter from app.js.
2014-09-05 13:25:53 +02:00
Jakob Borg
f8f6791d39 Add pyfisch 2014-09-05 13:25:40 +02:00
Pyfisch
0c09f077aa Removed unused optionEditor directive from app.js 2014-09-05 12:42:52 +02:00
Pyfisch
af2831d7b6 Removed unused clean filter from app.js. 2014-09-05 12:40:45 +02:00
Pyfisch
64d5d4aec7 Removed unused shortPath filter from app.js. 2014-09-05 12:39:35 +02:00
Pyfisch
619a6b2adb Removed unused short filter from app.js. 2014-09-05 12:38:21 +02:00
Jakob Borg
33a26bc0cf Merge pull request #631 from AudriusButkevicius/upnp
Check if we had successfully acquired a UPnP mapping before (fixes #627)
2014-09-05 09:09:23 +02:00
Audrius Butkevicius
b445a7c4d3 Check if we had successfully acquired a UPnP mapping before (fixes #627) 2014-09-04 23:02:10 +01:00
Jakob Borg
e6892d0c3e Autogen warning in lang dir 2014-09-04 23:37:23 +02:00
Jakob Borg
33e9a88b56 Proper signal handling in monitor process 2014-09-04 23:31:22 +02:00
Jakob Borg
df00a2251e Pesky copyright is pesky 2014-09-04 22:33:01 +02:00
Jakob Borg
92c44c8abe Rework .stignore functionality (fixes #561) (...)
- Only one .stignore is supported, at the repo root
 - Negative patterns (!) are supported
 - Ignore patterns affect sent and received indexes, not only scanning
2014-09-04 22:30:42 +02:00
Jakob Borg
8e4f7bbd3e Merge pull request #626 from alex2108/master
staggered versioner: count directories as files (fixes #607)
2014-09-04 21:59:38 +02:00
Jakob Borg
a40217cf07 Trim dead bits of code 2014-09-04 22:07:59 +02:00
Jakob Borg
e586fda5f2 Woops, close the right fd 2014-09-04 22:03:25 +02:00
Alexander Graf
a58564ff88 count directories as files (fixes #607) 2014-09-04 16:48:24 +02:00
Jakob Borg
89885b9fb9 Clean up GUI directory 2014-09-04 08:53:28 +02:00
Jakob Borg
5c7d977ae0 Use woff instead of ttf font 2014-09-04 08:47:23 +02:00
Jakob Borg
2cd3ee9698 Dead code cleanup 2014-09-04 08:39:39 +02:00
Jakob Borg
dd3080e018 Copyright cleanup 2014-09-04 08:31:38 +02:00
Jakob Borg
5915e8e86a Don't trust mime.TypeByExtension for the easy stuff (fixes #598) 2014-09-04 08:26:12 +02:00
Jakob Borg
3c67c06654 Merge pull request #619 from marcindziadus/sorting-order
Change sorting order (fix #618)
2014-09-03 23:26:20 +02:00
Marcin
76232ca573 change sorting order 2014-09-03 18:41:45 +02:00
Jakob Borg
5235e82bda Limit number of open db files (fixes #587) 2014-09-02 14:47:36 +02:00
Jakob Borg
10f0713257 Use a monitor process to handle panics and restarts (fixes #586) 2014-09-02 13:24:41 +02:00
Jakob Borg
e9c7970ea4 Only create assets map on demand 2014-09-02 13:07:33 +02:00
Jakob Borg
1a6ac4aeb1 Integration tests should use v4 localhost 2014-09-02 12:10:18 +02:00
Jakob Borg
f633bdddf0 Update goleveldb 2014-09-02 09:44:07 +02:00
Jakob Borg
de0b91d157 Show IPv6 GUI URL correctly 2014-09-01 20:04:22 +02:00
Jakob Borg
2e77e498f5 Use more compact base64 encoding for assets 2014-09-01 20:04:22 +02:00
Jakob Borg
4ac67eb1f9 Merge pull request #589 from AudriusButkevicius/include
Add #include directive to .stignore (fixes #424)
2014-09-01 18:08:53 +02:00
Jakob Borg
2b536de37f Don't fake indexes for stopped repos 2014-09-01 17:48:39 +02:00
Jakob Borg
2ffa92ba1b Warn on startup for stopped repositories 2014-09-01 17:47:18 +02:00
Jakob Borg
6ecddd8388 Don't fail build on Solaris 2014-09-01 17:26:28 +02:00
Jakob Borg
bd2772ea4c If all instances of the global version is invalid, the file should not be on the need list 2014-09-01 09:07:51 +02:00
Audrius Butkevicius
92bf79d53b Fix tests 2014-08-31 22:34:13 +01:00
Audrius Butkevicius
eebe0eeb71 Handle recursive includes 2014-08-31 22:33:49 +01:00
Jakob Borg
1068eaa0b9 Translation update 2014-08-31 21:52:29 +02:00
Jakob Borg
faac3e7d7c Don't clobber staggeredMaxAge = 0 (fixes #604) 2014-08-31 21:44:06 +02:00
Jakob Borg
dab4340207 Merge pull request #603 from AudriusButkevicius/restart
Fix GUI breaking during restarts (fixes #577)
2014-08-31 21:30:51 +02:00
Audrius Butkevicius
fd2567748f Fix GUI breaking during restarts (fixes #577) 2014-08-31 15:49:08 +01:00
Jakob Borg
c2daedbd11 Try not to crash the box with failing tests 2014-08-31 15:36:05 +01:00
Jakob Borg
7c604beb73 Test cases for ignore #include 2014-08-31 15:35:48 +01:00
Audrius Butkevicius
8c42aea827 Add #include directive to .stignore (fixes #424)
Though breaks #502 in a way, as .stignore is not the only place where
stuff gets defined anymore.

Though it never was, as .stignore can be placed in each dir, but I think we
should phase that out in favor of globbing which means that we can then
have a single file, which means that we can have a UI for editing that.

Alternative would be as suggested to include a .stglobalignore which is then synced
as a normal file, but gets included by default.

Then when the UI would have two editors, a local ignore, and a global ignore.
2014-08-31 15:32:22 +01:00
Jakob Borg
cf1bfdfb61 Hold rmut read lock when looking at nodeStatRefs 2014-08-31 13:48:43 +02:00
Jakob Borg
75b26513e1 Don't crash under suspicious circumstances... (fixes #602) 2014-08-31 13:48:16 +02:00
Jakob Borg
6c09a77a97 Clean out index for nonexistent repositories (fixes #549) 2014-08-31 13:34:17 +02:00
Jakob Borg
67389c39fb For now, don't allow changing repo path (ref #549) 2014-08-31 13:05:08 +02:00
Jakob Borg
c326103e6e Add X-Syncthing-Version header to HTTP responses 2014-08-31 12:59:20 +02:00
Jakob Borg
c2120a16da Try to set some reasonable resource limits when running tests 2014-08-30 10:02:10 +02:00
Jakob Borg
258ad4352e Fix connecting to discovered IPv6 address 2014-08-29 17:18:25 +02:00
Jakob Borg
435d3958f4 Update goleveldb 2014-08-29 12:36:45 +02:00
Jakob Borg
b0408ef5c6 Info line formatting (ref #583) 2014-08-28 21:35:55 +02:00
Jakob Borg
1c41b0bc2f Document GOMAXPROCS instead of (useless) STDEADLOCKTIMEOUT 2014-08-28 15:29:49 +02:00
Jakob Borg
aa827f3042 Fix language detection, never show untranslated strings (fixes #543) 2014-08-28 13:23:23 +02:00
Audrius Butkevicius
f44f5964bb Set rescan interval on default repository (fixes #579) 2014-08-27 23:45:09 +01:00
Audrius Butkevicius
91ba93bd7a Merge pull request #571 from syncthing/recheck
Add routine for checking possible standby (fixes #565)
2014-08-27 22:44:36 +01:00
Audrius Butkevicius
0abe4cefb4 Add routine for checking possible standby (fixes #565) 2014-08-27 22:42:59 +01:00
Jakob Borg
bccd460f3b Translation update 2014-08-27 10:20:44 +02:00
Jakob Borg
d1023004e1 Saner error/debug messsages for permission issues 2014-08-27 07:00:15 +02:00
Jakob Borg
04a5f9cb04 Fix fnmatch tests for Windows 2014-08-26 13:26:52 +02:00
Jakob Borg
9818e2b550 Use more fnmatch-like matcher in .stignore (fixes #426) 2014-08-26 11:12:20 +02:00
Jakob Borg
fe43e3b89d Try not to leave directories behind with incorrect permissions 2014-08-26 11:12:20 +02:00
Jakob Borg
e1f1ae041f Don't leak request slots (fixes #483) 2014-08-25 17:48:18 +02:00
Jakob Borg
5bcf26e324 Fix table layout for wide elements, at the price of ellipsis (fixes #326, fixes #309) 2014-08-25 16:37:15 +02:00
Jakob Borg
5f47a8149f Use ISO date format because I'm opinionated 2014-08-25 15:53:32 +02:00
Jakob Borg
00b662b53a Merge branch 'pr/556'
* pr/556:
  Add translation strings
  Display Last Seen value in the UI
  Add /rest/stats/node endpoint
  Add stats package and node related statistics model
2014-08-25 15:52:59 +02:00
Jakob Borg
faf519ab1b Warn about incorrect -goarch values 2014-08-25 14:55:19 +02:00
Jakob Borg
fce73f6f17 Verify CONTRIBUTORS file 2014-08-25 14:55:19 +02:00
Audrius Butkevicius
887890baf5 Add translation strings 2014-08-25 12:57:44 +01:00
Audrius Butkevicius
c66b24feeb Display Last Seen value in the UI 2014-08-25 12:54:50 +01:00
Audrius Butkevicius
84c6f147ad Add /rest/stats/node endpoint 2014-08-25 12:49:22 +01:00
Audrius Butkevicius
0cdb0daa8c Add stats package and node related statistics model 2014-08-25 12:49:21 +01:00
Jakob Borg
eee702f299 Don't run tests in build.sh all 2014-08-25 08:50:13 +02:00
Jakob Borg
df65247325 Increase max path length 1024 -> 8192 bytes (fixes #551)
PATH_MAX seems to be 4096 most of the time; Windows limit is much lower.
2014-08-25 08:48:49 +02:00
Jakob Borg
1a174e75d3 Merge pull request #562 from AudriusButkevicius/restart
Fix race condition while restarting (fixes #560)
2014-08-25 08:03:18 +02:00
Audrius Butkevicius
9e1fd3454f Fix race condition while restarting (fixes #560) 2014-08-25 00:15:28 +01:00
Audrius Butkevicius
3b1603cadf Merge pull request #557 from AudriusButkevicius/opts
Allow configuring GUI options from command line and environment (fixes #505, closes #507)
2014-08-24 16:56:15 +01:00
Audrius Butkevicius
8803bac708 Allow configuring GUI options from command line and environment (fixes #505, closes #507) 2014-08-24 16:55:35 +01:00
Audrius Butkevicius
3a01eaa4a6 Fix build.go on Windows 2014-08-23 21:19:29 +01:00
Jakob Borg
9f84c1c448 New repos must have a default rescan interval (fixes #555) 2014-08-23 19:40:39 +02:00
Jakob Borg
dda0390156 Correctly set GOARM on ARM builds 2014-08-23 10:52:12 +02:00
Jakob Borg
c74509dd5f Add forgotten lang-*.json files 2014-08-23 10:44:08 +02:00
Jakob Borg
f61bbb2ff4 Tweaks and optimizations 2014-08-23 10:43:48 +02:00
Jakob Borg
e7f60161a3 Don't leak fd 2014-08-23 10:37:58 +02:00
Jakob Borg
ebec4fbc24 Translation update (add Bulgarian, Lithuanian) 2014-08-22 18:18:13 +02:00
Jakob Borg
1d4105ae3d UI tweaks for staggered versioner 2014-08-22 18:16:05 +02:00
Jakob Borg
586d49f0c3 Merge pull request #541 from alex2108/master 2014-08-22 17:58:01 +02:00
Jakob Borg
5b0fab0697 Add alex2108 2014-08-22 17:57:43 +02:00
Alexander Graf
2b3359dff3 add staggered versioner 2014-08-22 00:41:17 +02:00
Jakob Borg
63203aa14c Merge pull request #548 from AudriusButkevicius/warning
Do not warn about failed IPv6 discovery, warn about no discovery
2014-08-21 18:54:33 +02:00
Audrius Butkevicius
716a8329c2 Do not warn about failed IPv6 discovery 2014-08-20 22:06:58 +01:00
Jakob Borg
dab0aec85e Latest build badge should link to latest build 2014-08-20 12:23:04 +02:00
Jakob Borg
1f1ab017c0 Show rescan interval per repo 2014-08-20 01:44:05 +02:00
Audrius Butkevicius
b6912ef95e Merge pull request #544 from marcindziadus/rescan-interval
Per repository scan intervals
2014-08-20 00:02:34 +01:00
Audrius Butkevicius
db54dca694 Do not fire UIOffline when navigating away
Fixes #487
2014-08-19 23:44:40 +01:00
Marcin
0e751b983c Enable to configure scan interval per each repository independently
Fix broken tests

Bugfix

Clean up

Refactor variable name

Adjust tests

Minor fixes

Fix typo. Remove indent.
2014-08-20 00:36:36 +02:00
Audrius Butkevicius
997b20a975 Set Content-Type before sending out headers 2014-08-19 23:30:32 +01:00
Jakob Borg
386f9c42c2 Merge pull request #545 from AudriusButkevicius/flush
Flush headers before potentially blocking
2014-08-20 00:21:49 +02:00
Audrius Butkevicius
cfae06db65 Flush headers before potentially blocking 2014-08-19 23:18:28 +01:00
Jakob Borg
44260b7b5c Add marcindziadus 2014-08-20 00:05:43 +02:00
Jakob Borg
13063b957f Use drained legacy pool in goleveldb 2014-08-19 23:49:03 +02:00
Jakob Borg
ee05e12480 Windows nodes should ignore deleted impossible files 2014-08-19 15:36:57 +02:00
Jakob Borg
5538545fb0 README links to build guide 2014-08-19 15:33:20 +02:00
Jakob Borg
bc1167c2c5 README links to build, not only artefacts 2014-08-19 15:20:53 +02:00
Jakob Borg
c57656e4c3 Do honest test coverage analysis in Jenkins 2014-08-19 12:43:50 +02:00
Jakob Borg
264400a984 Check for supported go version build.go 2014-08-19 11:04:20 +02:00
Jakob Borg
408db4eb1d rm -rf travis 2014-08-19 10:05:40 +02:00
Jakob Borg
9347f223ef Note about review of pull requests 2014-08-19 09:55:50 +02:00
Jakob Borg
518aa30c9c Don't consider empty language codes when selecting language (fixes #540) 2014-08-18 23:43:58 +02:00
Jakob Borg
6bbf1f9355 Emit Node/Repo Rejected events on unknown nodes / repos. 2014-08-18 23:34:03 +02:00
Jakob Borg
b221e4d445 build.sh is a shim 2014-08-18 22:05:26 +02:00
Jakob Borg
580fccbfca Don't build build.go on go get 2014-08-18 21:57:10 +02:00
Jakob Borg
045916efcc ARM builds in build.go 2014-08-18 21:53:08 +02:00
Jakob Borg
4f92482294 build.sh -> build.go for better cross platform support 2014-08-18 21:39:35 +02:00
Jakob Borg
2f055a75a0 Merge pull request #537 from marclaporte/patch-2
Fix some typos
2014-08-18 10:43:29 +02:00
Marc Laporte
f0621207e3 Fix some typos 2014-08-17 23:27:04 -04:00
Jakob Borg
d657bc4e3d Implement IPv6 multicast again (fixes #346) 2014-08-17 15:14:44 +02:00
Jakob Borg
a1fd07b27c beacon.Beacon -> beacon.Broadcast 2014-08-17 15:14:44 +02:00
Audrius Butkevicius
52219c5f3f Merge pull request #532 from AudriusButkevicius/config
Replace NodeConfiguration with RepositoryNodeConfiguration (Fixes #522)
2014-08-17 12:47:12 +01:00
Jakob Borg
1a66461e07 All printed warnings should have some context 2014-08-17 10:28:36 +02:00
Jakob Borg
d20df12168 Add repoPath and repoID as parameters to versioner factory (fixes #531) 2014-08-17 07:52:49 +02:00
Audrius Butkevicius
668b429615 Better error message
Closes #526
2014-08-17 00:03:41 +01:00
Audrius Butkevicius
7db528be39 Replace NodeConfiguration with RepositoryNodeConfiguration 2014-08-16 23:20:21 +01:00
Jakob Borg
60f760ee49 Translation update 2014-08-16 23:05:57 +02:00
Jakob Borg
884aaab751 Always print hostname on connect (even if something is set in config) 2014-08-16 22:55:05 +02:00
Jakob Borg
e968560ea4 Spelling 2014-08-16 22:35:15 +02:00
Jakob Borg
07caaa96e4 New translation strings 2014-08-16 22:29:21 +02:00
Audrius Butkevicius
e8a679c280 Advertise and update node names on cluster config exchange
Closes #244
2014-08-16 21:26:30 +01:00
Jakob Borg
bc885f1d08 Don't attempt to create default repo before config (fixes #530)
We'll create it anyway a little later during startup, as part of the
general "check all repos for viability" step.
2014-08-16 22:22:33 +02:00
Jakob Borg
f2f051d6de Merge pull request #529 from syncthing/windows-build
Fix tests on Windows
2014-08-16 21:37:00 +02:00
Jakob Borg
49a0bfccba Cache discovery results up to five minutes (fixes #358) 2014-08-16 21:27:00 +02:00
Audrius Butkevicius
0c1e60894f Fix tests on Windows 2014-08-16 17:33:01 +01:00
Jakob Borg
ace87ad7bb Normalize file name format in on disk db (fixes #479) 2014-08-15 12:52:16 +02:00
Jakob Borg
50f0097843 Add Rescan button to repositories 2014-08-15 12:48:36 +02:00
Jakob Borg
32a9466277 Update goleveldb 2014-08-15 09:18:38 +02:00
Jakob Borg
1ee3407946 Merge pull request #524 from marclaporte/patch-1
Fix typo
2014-08-15 08:35:25 +02:00
Marc Laporte
f1120d7aa9 Fix typo 2014-08-14 19:58:25 -04:00
Jakob Borg
2e7d6b2f99 Translation update, zh-CN 2014-08-14 17:09:29 +02:00
Jakob Borg
dfef929187 Translation update, handle locales precisely 2014-08-14 17:04:17 +02:00
Jakob Borg
e78d9ad592 Translation update (add Hungarian) 2014-08-14 14:00:33 +02:00
Jakob Borg
9f2948f595 Fix tests for UPnP options 2014-08-14 12:59:09 +02:00
Jakob Borg
198da910ed Use new StopGlobal on the discovery when external port changes 2014-08-14 12:49:41 +02:00
Jakob Borg
5f1bf9d9d6 Merge branch 'master' into pr/511
* master: (21 commits)
  Mechanism to stop external announcement routine
  Update goleveldb
  Perfstats are not supported on Windows
  Build should fail if a platform does not build
  Include perfstats and heap profiles in standard build
  Actually no, lets not do uploads at all from the build script.
  ./build.sh upload build server artifacts
  Sign checksums, not files.
  Badges, add build server
  Remove Solaris build again, for now
  Travis should build with 1.3 + tip
  Translation update
  Indicate aproximativeness of repo sizes...
  Slightly more conservative guess on file size
  Fix set tests
  Small goleveldb hack to reduce allocations somewhat
  Don't load block lists from db unless necessary
  Rip out the Suppressor (maybe to be reintroduced)
  Reduce allocations while hash scanning
  Add heap profiling support
  ...

Conflicts:
	discover/discover.go
2014-08-14 12:48:33 +02:00
Jakob Borg
798c4aef9a Mechanism to stop external announcement routine 2014-08-14 12:44:49 +02:00
Jakob Borg
f80f5b3bda Update goleveldb 2014-08-14 12:14:48 +02:00
Audrius Butkevicius
cbb07b0d67 Set default UPnP renewal to 30 minutes 2014-08-13 22:45:44 +01:00
Audrius Butkevicius
7cc9921615 Restart port sequence when UPnP renewal fails 2014-08-13 22:42:58 +01:00
Jakob Borg
7555fe065e Perfstats are not supported on Windows 2014-08-13 22:31:56 +02:00
Jakob Borg
d977f4278e Build should fail if a platform does not build 2014-08-13 22:27:16 +02:00
Audrius Butkevicius
870e3ca893 Rediscover gateway on UPnP renewal 2014-08-13 21:15:20 +01:00
Jakob Borg
213acaee3b Include perfstats and heap profiles in standard build 2014-08-13 14:39:47 +02:00
Jakob Borg
58381496a2 Actually no, lets not do uploads at all from the build script. 2014-08-13 13:11:41 +02:00
Jakob Borg
5981e42aed ./build.sh upload build server artifacts 2014-08-13 12:58:59 +02:00
Jakob Borg
3c9165d295 Sign checksums, not files. 2014-08-13 12:52:04 +02:00
Jakob Borg
60d0ef93ac Badges, add build server 2014-08-13 10:15:22 +02:00
Jakob Borg
f45d5b0066 Remove Solaris build again, for now 2014-08-13 09:42:21 +02:00
Jakob Borg
b71306480f Travis should build with 1.3 + tip 2014-08-13 09:01:17 +02:00
Jakob Borg
0c7771ccc5 Translation update 2014-08-13 00:35:37 +02:00
Audrius Butkevicius
dc9df0a79a Reannounce renewed UPnP mapping 2014-08-12 23:29:29 +01:00
Jakob Borg
17cd49fbdc Indicate aproximativeness of repo sizes... 2014-08-12 23:59:20 +02:00
Jakob Borg
ad273adb78 Slightly more conservative guess on file size 2014-08-12 16:36:24 +02:00
Jakob Borg
150e7daf2d Fix set tests 2014-08-12 16:17:32 +02:00
Jakob Borg
b004155e8f Small goleveldb hack to reduce allocations somewhat 2014-08-12 15:39:24 +02:00
Jakob Borg
92eed3b33b Don't load block lists from db unless necessary 2014-08-12 15:04:32 +02:00
Jakob Borg
fe7b77198c Rip out the Suppressor (maybe to be reintroduced) 2014-08-12 15:04:02 +02:00
Jakob Borg
f51b775698 Reduce allocations while hash scanning 2014-08-12 15:04:02 +02:00
Jakob Borg
939dd5cb31 Add heap profiling support 2014-08-12 15:04:01 +02:00
Jakob Borg
adcbe13ecd Update goleveldb 2014-08-12 09:24:36 +02:00
Audrius Butkevicius
8976e53998 Add UPnP renewal 2014-08-11 23:10:24 +01:00
Jakob Borg
97dda6a4bb Correct the memory stats in perfstats-*.csv 2014-08-11 22:10:15 +02:00
Jakob Borg
9e395eb883 Use a slightly heavier Raleway for headings (fixes #493) 2014-08-11 21:50:15 +02:00
Jakob Borg
60da59623e Limit size of sent indexes a bit, taking number of blocks into account 2014-08-11 20:54:59 +02:00
Jakob Borg
9752ea9ac3 Implement external scan request (fixes #9) 2014-08-11 20:20:01 +02:00
Jakob Borg
279693078a Update deps 2014-08-11 14:24:20 +02:00
Jakob Borg
19b93045a4 Merge pull request #508 from AudriusButkevicius/modals
Fix and refactor modals
2014-08-11 12:12:28 +02:00
Jakob Borg
5231a09820 Add ./build.sh noupgrade and all-noupgrade 2014-08-11 11:59:33 +02:00
Jakob Borg
ab952e6103 Add ./build.sh clean 2014-08-11 11:54:48 +02:00
Jakob Borg
a418771c04 Puller entrance warning 2014-08-11 07:52:03 +02:00
Audrius Butkevicius
b41590ce38 Fix and refactor modals 2014-08-10 23:28:04 +01:00
Jakob Borg
c7dde9499f Verify locking and correct update order for global 2014-08-10 07:27:24 +02:00
Jakob Borg
528cbf62ec POST to /config should return an error when something bad happens (fixes #489) 2014-08-08 14:09:27 +02:00
194 changed files with 9038 additions and 3323 deletions

1
.gitignore vendored
View File

@@ -9,3 +9,4 @@ coverage.out
files/pidx
bin
perfstats*.csv
coverage.xml

View File

@@ -1,18 +0,0 @@
language: go
go:
- tip
install:
- export PATH=$PATH:$HOME/gopath/bin
- ./build.sh setup
script:
- ./build.sh test-cov
after_success:
- goveralls -coverprofile=coverage.out -service=travis-ci -package=syncthing/syncthing -repotoken="$COVERALLS_TOKEN"
env:
global:
secure: "TSPJDsokGCQhKLjgG3c58qHn8Qxhh4zEkWFf0XIOOY2nlDVzdgXDsC+Nq0YaP4106Ee4FgkSefsUTQV5lq/IyYW8elgqlgghjOtOi6RJa14eIS9Yy5Bkx6MXn0QfZX/lG+sy42pKSNk43y9GWx/qrt4nkfTtTvI5cXgwDGYdmX8="

View File

@@ -34,7 +34,10 @@ latest info on Transifex.
Please do contribute! If you want to contribute but are unsure where to
start, the [Contributions Needed
topic](http://discourse.syncthing.net/t/49) lists areas in need of
attention. In general, any open issues are fair game!
attention. In general, any open issues are fair game! Be prepared for a
[certain amount of
review](https://discourse.syncthing.net/t/733); it's all in the name of
quality. :)
## Licensing

View File

@@ -1,12 +1,15 @@
Aaron Bieber <qbit@deftly.net>
Alexander Graf <register-github@alex-graf.de>
Andrew Dunham <andrew@du.nham.ca>
Audrius Butkevicius <audrius.butkevicius@gmail.com>
Arthur Axel fREW Schmidt <frew@afoolishmanifesto.com>
Arthur Axel fREW Schmidt <frew@afoolishmanifesto.com> <frioux@gmail.com>
Ben Sidhom <bsidhom@gmail.com>
Brandon Philips <brandon@ifup.org>
Gilli Sigurdsson <gilli@vx.is>
James Patterson <jamespatterson@operamail.com>
Jens Diemer <github.com@jensdiemer.de>
James Patterson <jamespatterson@operamail.com> <jpjp@users.noreply.github.com>
Jens Diemer <github.com@jensdiemer.de> <git@jensdiemer.de>
Marcin Dziadus <dziadus.marcin@gmail.com>
Michael Tilli <pyfisch@gmail.com>
Philippe Schommers <philippe@schommers.be>
Ryan Sullivan <kayoticsully@gmail.com>
Tully Robinson <tully@tojr.org>

22
Godeps/Godeps.json generated
View File

@@ -1,6 +1,6 @@
{
"ImportPath": "github.com/syncthing/syncthing",
"GoVersion": "go1.3",
"GoVersion": "go1.3.1",
"Packages": [
"./cmd/..."
],
@@ -12,23 +12,23 @@
},
{
"ImportPath": "code.google.com/p/go.crypto/bcrypt",
"Comment": "null-213",
"Rev": "aa2644fe4aa50e3b38d75187b4799b1f0c9ddcef"
"Comment": "null-216",
"Rev": "41cd4647fccc72b0b79ef1bd1fe6735e718257cd"
},
{
"ImportPath": "code.google.com/p/go.crypto/blowfish",
"Comment": "null-213",
"Rev": "aa2644fe4aa50e3b38d75187b4799b1f0c9ddcef"
"Comment": "null-216",
"Rev": "41cd4647fccc72b0b79ef1bd1fe6735e718257cd"
},
{
"ImportPath": "code.google.com/p/go.text/transform",
"Comment": "null-89",
"Rev": "df15baaf13e3f62b6b7a901e74caa3818a7c0a7c"
"Comment": "null-90",
"Rev": "d65bffbc88a153d23a6d2a864531e6e7c2cde59b"
},
{
"ImportPath": "code.google.com/p/go.text/unicode/norm",
"Comment": "null-89",
"Rev": "df15baaf13e3f62b6b7a901e74caa3818a7c0a7c"
"Comment": "null-90",
"Rev": "d65bffbc88a153d23a6d2a864531e6e7c2cde59b"
},
{
"ImportPath": "code.google.com/p/snappy-go/snappy",
@@ -41,7 +41,7 @@
},
{
"ImportPath": "github.com/calmh/xdr",
"Rev": "694859acb207675085232438780db923ceb43e96"
"Rev": "a597b63b87d6140f79084c8aab214b4d533833a1"
},
{
"ImportPath": "github.com/juju/ratelimit",
@@ -49,7 +49,7 @@
},
{
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
"Rev": "c5955912e3287376475731c5bc59c79a5a799105"
"Rev": "457e6f75905f7c1316afd8c43ad323f4c32b31c2"
},
{
"ImportPath": "github.com/vitrun/qart/coding",

View File

@@ -4,6 +4,22 @@
package blowfish
// getNextWord returns the next big-endian uint32 value from the byte slice
// at the given position in a circular manner, updating the position.
func getNextWord(b []byte, pos *int) uint32 {
var w uint32
j := *pos
for i := 0; i < 4; i++ {
w = w<<8 | uint32(b[j])
j++
if j >= len(b) {
j = 0
}
}
*pos = j
return w
}
// ExpandKey performs a key expansion on the given *Cipher. Specifically, it
// performs the Blowfish algorithm's key schedule which sets up the *Cipher's
// pi and substitution tables for calls to Encrypt. This is used, primarily,
@@ -12,6 +28,7 @@ package blowfish
func ExpandKey(key []byte, c *Cipher) {
j := 0
for i := 0; i < 18; i++ {
// Using inlined getNextWord for performance.
var d uint32
for k := 0; k < 4; k++ {
d = d<<8 | uint32(key[j])
@@ -54,86 +71,44 @@ func ExpandKey(key []byte, c *Cipher) {
func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) {
j := 0
for i := 0; i < 18; i++ {
var d uint32
for k := 0; k < 4; k++ {
d = d<<8 | uint32(key[j])
j++
if j >= len(key) {
j = 0
}
}
c.p[i] ^= d
c.p[i] ^= getNextWord(key, &j)
}
j = 0
var expandedSalt [4]uint32
for i := range expandedSalt {
var d uint32
for k := 0; k < 4; k++ {
d = d<<8 | uint32(salt[j])
j++
if j >= len(salt) {
j = 0
}
}
expandedSalt[i] = d
}
var l, r uint32
for i := 0; i < 18; i += 2 {
l ^= expandedSalt[i&2]
r ^= expandedSalt[(i&2)+1]
l ^= getNextWord(salt, &j)
r ^= getNextWord(salt, &j)
l, r = encryptBlock(l, r, c)
c.p[i], c.p[i+1] = l, r
}
for i := 0; i < 256; i += 4 {
l ^= expandedSalt[2]
r ^= expandedSalt[3]
for i := 0; i < 256; i += 2 {
l ^= getNextWord(salt, &j)
r ^= getNextWord(salt, &j)
l, r = encryptBlock(l, r, c)
c.s0[i], c.s0[i+1] = l, r
l ^= expandedSalt[0]
r ^= expandedSalt[1]
l, r = encryptBlock(l, r, c)
c.s0[i+2], c.s0[i+3] = l, r
}
for i := 0; i < 256; i += 4 {
l ^= expandedSalt[2]
r ^= expandedSalt[3]
for i := 0; i < 256; i += 2 {
l ^= getNextWord(salt, &j)
r ^= getNextWord(salt, &j)
l, r = encryptBlock(l, r, c)
c.s1[i], c.s1[i+1] = l, r
l ^= expandedSalt[0]
r ^= expandedSalt[1]
l, r = encryptBlock(l, r, c)
c.s1[i+2], c.s1[i+3] = l, r
}
for i := 0; i < 256; i += 4 {
l ^= expandedSalt[2]
r ^= expandedSalt[3]
for i := 0; i < 256; i += 2 {
l ^= getNextWord(salt, &j)
r ^= getNextWord(salt, &j)
l, r = encryptBlock(l, r, c)
c.s2[i], c.s2[i+1] = l, r
l ^= expandedSalt[0]
r ^= expandedSalt[1]
l, r = encryptBlock(l, r, c)
c.s2[i+2], c.s2[i+3] = l, r
}
for i := 0; i < 256; i += 4 {
l ^= expandedSalt[2]
r ^= expandedSalt[3]
for i := 0; i < 256; i += 2 {
l ^= getNextWord(salt, &j)
r ^= getNextWord(salt, &j)
l, r = encryptBlock(l, r, c)
c.s3[i], c.s3[i+1] = l, r
l ^= expandedSalt[0]
r ^= expandedSalt[1]
l, r = encryptBlock(l, r, c)
c.s3[i+2], c.s3[i+3] = l, r
}
}
@@ -182,9 +157,3 @@ func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) {
xr ^= c.p[0]
return xr, xl
}
func zero(x []uint32) {
for i := range x {
x[i] = 0
}
}

View File

@@ -4,9 +4,7 @@
package blowfish
import (
"testing"
)
import "testing"
type CryptTest struct {
key []byte
@@ -202,3 +200,75 @@ func TestSaltedCipherKeyLength(t *testing.T) {
t.Errorf("NewSaltedCipher with long key, gave error %#v", err)
}
}
// Test vectors generated with Blowfish from OpenSSH.
var saltedVectors = [][8]byte{
{0x0c, 0x82, 0x3b, 0x7b, 0x8d, 0x01, 0x4b, 0x7e},
{0xd1, 0xe1, 0x93, 0xf0, 0x70, 0xa6, 0xdb, 0x12},
{0xfc, 0x5e, 0xba, 0xde, 0xcb, 0xf8, 0x59, 0xad},
{0x8a, 0x0c, 0x76, 0xe7, 0xdd, 0x2c, 0xd3, 0xa8},
{0x2c, 0xcb, 0x7b, 0xee, 0xac, 0x7b, 0x7f, 0xf8},
{0xbb, 0xf6, 0x30, 0x6f, 0xe1, 0x5d, 0x62, 0xbf},
{0x97, 0x1e, 0xc1, 0x3d, 0x3d, 0xe0, 0x11, 0xe9},
{0x06, 0xd7, 0x4d, 0xb1, 0x80, 0xa3, 0xb1, 0x38},
{0x67, 0xa1, 0xa9, 0x75, 0x0e, 0x5b, 0xc6, 0xb4},
{0x51, 0x0f, 0x33, 0x0e, 0x4f, 0x67, 0xd2, 0x0c},
{0xf1, 0x73, 0x7e, 0xd8, 0x44, 0xea, 0xdb, 0xe5},
{0x14, 0x0e, 0x16, 0xce, 0x7f, 0x4a, 0x9c, 0x7b},
{0x4b, 0xfe, 0x43, 0xfd, 0xbf, 0x36, 0x04, 0x47},
{0xb1, 0xeb, 0x3e, 0x15, 0x36, 0xa7, 0xbb, 0xe2},
{0x6d, 0x0b, 0x41, 0xdd, 0x00, 0x98, 0x0b, 0x19},
{0xd3, 0xce, 0x45, 0xce, 0x1d, 0x56, 0xb7, 0xfc},
{0xd9, 0xf0, 0xfd, 0xda, 0xc0, 0x23, 0xb7, 0x93},
{0x4c, 0x6f, 0xa1, 0xe4, 0x0c, 0xa8, 0xca, 0x57},
{0xe6, 0x2f, 0x28, 0xa7, 0x0c, 0x94, 0x0d, 0x08},
{0x8f, 0xe3, 0xf0, 0xb6, 0x29, 0xe3, 0x44, 0x03},
{0xff, 0x98, 0xdd, 0x04, 0x45, 0xb4, 0x6d, 0x1f},
{0x9e, 0x45, 0x4d, 0x18, 0x40, 0x53, 0xdb, 0xef},
{0xb7, 0x3b, 0xef, 0x29, 0xbe, 0xa8, 0x13, 0x71},
{0x02, 0x54, 0x55, 0x41, 0x8e, 0x04, 0xfc, 0xad},
{0x6a, 0x0a, 0xee, 0x7c, 0x10, 0xd9, 0x19, 0xfe},
{0x0a, 0x22, 0xd9, 0x41, 0xcc, 0x23, 0x87, 0x13},
{0x6e, 0xff, 0x1f, 0xff, 0x36, 0x17, 0x9c, 0xbe},
{0x79, 0xad, 0xb7, 0x40, 0xf4, 0x9f, 0x51, 0xa6},
{0x97, 0x81, 0x99, 0xa4, 0xde, 0x9e, 0x9f, 0xb6},
{0x12, 0x19, 0x7a, 0x28, 0xd0, 0xdc, 0xcc, 0x92},
{0x81, 0xda, 0x60, 0x1e, 0x0e, 0xdd, 0x65, 0x56},
{0x7d, 0x76, 0x20, 0xb2, 0x73, 0xc9, 0x9e, 0xee},
}
func TestSaltedCipher(t *testing.T) {
var key, salt [32]byte
for i := range key {
key[i] = byte(i)
salt[i] = byte(i + 32)
}
for i, v := range saltedVectors {
c, err := NewSaltedCipher(key[:], salt[:i])
if err != nil {
t.Fatal(err)
}
var buf [8]byte
c.Encrypt(buf[:], buf[:])
if v != buf {
t.Errorf("%d: expected %x, got %x", i, v, buf)
}
}
}
func BenchmarkExpandKeyWithSalt(b *testing.B) {
key := make([]byte, 32)
salt := make([]byte, 16)
c, _ := NewCipher(key)
for i := 0; i < b.N; i++ {
expandKeyWithSalt(key, salt, c)
}
}
func BenchmarkExpandKey(b *testing.B) {
key := make([]byte, 32)
c, _ := NewCipher(key)
for i := 0; i < b.N; i++ {
ExpandKey(key, c)
}
}

View File

@@ -40,8 +40,11 @@ func NewCipher(key []byte) (*Cipher, error) {
// NewSaltedCipher creates a returns a Cipher that folds a salt into its key
// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is
// sufficient and desirable. For bcrypt compatiblity, the key can be over 56
// bytes. Only the first 16 bytes of salt are used.
// bytes.
func NewSaltedCipher(key, salt []byte) (*Cipher, error) {
if len(salt) == 0 {
return NewCipher(key)
}
var result Cipher
if k := len(key); k < 1 {
return nil, KeySizeError(k)

View File

@@ -7,6 +7,8 @@ import (
"io"
"io/ioutil"
"testing"
"github.com/calmh/xdr"
)
type XDRBenchStruct struct {
@@ -58,6 +60,16 @@ func BenchmarkThisEncode(b *testing.B) {
}
}
func BenchmarkThisEncoder(b *testing.B) {
w := xdr.NewWriter(ioutil.Discard)
for i := 0; i < b.N; i++ {
_, err := s.encodeXDR(w)
if err != nil {
b.Fatal(err)
}
}
}
type repeatReader struct {
data []byte
}
@@ -86,3 +98,16 @@ func BenchmarkThisDecode(b *testing.B) {
rr.Reset(e)
}
}
func BenchmarkThisDecoder(b *testing.B) {
rr := &repeatReader{e}
r := xdr.NewReader(rr)
var t XDRBenchStruct
for i := 0; i < b.N; i++ {
err := t.decodeXDR(r)
if err != nil {
b.Fatal(err)
}
rr.Reset(e)
}
}

View File

@@ -7,6 +7,8 @@ package xdr
import (
"errors"
"io"
"reflect"
"unsafe"
)
var ErrElementSizeExceeded = errors.New("element size exceeded")
@@ -15,7 +17,6 @@ type Reader struct {
r io.Reader
err error
b [8]byte
sb []byte
}
func NewReader(r io.Reader) *Reader {
@@ -35,23 +36,17 @@ func (r *Reader) ReadRaw(bs []byte) (int, error) {
}
func (r *Reader) ReadString() string {
if r.sb == nil {
r.sb = make([]byte, 64)
} else {
r.sb = r.sb[:cap(r.sb)]
}
r.sb = r.ReadBytesInto(r.sb)
return string(r.sb)
return r.ReadStringMax(0)
}
func (r *Reader) ReadStringMax(max int) string {
if r.sb == nil {
r.sb = make([]byte, 64)
} else {
r.sb = r.sb[:cap(r.sb)]
buf := r.ReadBytesMaxInto(max, nil)
bh := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
sh := reflect.StringHeader{
Data: bh.Data,
Len: bh.Len,
}
r.sb = r.ReadBytesMaxInto(max, r.sb)
return string(r.sb)
return *((*string)(unsafe.Pointer(&sh)))
}
func (r *Reader) ReadBytes() []byte {
@@ -80,10 +75,10 @@ func (r *Reader) ReadBytesMaxInto(max int, dst []byte) []byte {
return nil
}
if l+pad(l) > len(dst) {
dst = make([]byte, l+pad(l))
if fullLen := l + pad(l); fullLen > len(dst) {
dst = make([]byte, fullLen)
} else {
dst = dst[:l+pad(l)]
dst = dst[:fullLen]
}
var n int

View File

@@ -22,7 +22,7 @@ func (r *Reader) ReadUint8() uint8 {
}
if debug {
dl.Printf("rd uint8=%d (0x%08x)", r.b[0], r.b[0])
dl.Printf("rd uint8=%d (0x%02x)", r.b[0], r.b[0])
}
return r.b[0]
}
@@ -43,7 +43,7 @@ func (r *Reader) ReadUint16() uint16 {
v := uint16(r.b[1]) | uint16(r.b[0])<<8
if debug {
dl.Printf("rd uint16=%d (0x%08x)", v, v)
dl.Printf("rd uint16=%d (0x%04x)", v, v)
}
return v
}

View File

@@ -3,7 +3,11 @@
package xdr
import "io"
import (
"io"
"reflect"
"unsafe"
)
var padBytes = []byte{0, 0, 0}
@@ -38,7 +42,13 @@ func (w *Writer) WriteRaw(bs []byte) (int, error) {
}
func (w *Writer) WriteString(s string) (int, error) {
return w.WriteBytes([]byte(s))
sh := *((*reflect.StringHeader)(unsafe.Pointer(&s)))
bh := reflect.SliceHeader{
Data: sh.Data,
Len: sh.Len,
Cap: sh.Len,
}
return w.WriteBytes(*(*[]byte)(unsafe.Pointer(&bh)))
}
func (w *Writer) WriteBytes(bs []byte) (int, error) {

View File

@@ -170,7 +170,7 @@ func (p *dbBench) writes(perBatch int) {
b.SetBytes(116)
}
func (p *dbBench) drop() {
func (p *dbBench) gc() {
p.keys, p.values = nil, nil
runtime.GC()
}
@@ -249,6 +249,9 @@ func (p *dbBench) newIter() iterator.Iterator {
}
func (p *dbBench) close() {
if bp, err := p.db.GetProperty("leveldb.blockpool"); err == nil {
p.b.Log("Block pool stats: ", bp)
}
p.db.Close()
p.stor.Close()
os.RemoveAll(benchDB)
@@ -331,7 +334,7 @@ func BenchmarkDBRead(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.fill()
p.drop()
p.gc()
iter := p.newIter()
b.ResetTimer()
@@ -362,7 +365,7 @@ func BenchmarkDBReadUncompressed(b *testing.B) {
p := openDBBench(b, true)
p.populate(b.N)
p.fill()
p.drop()
p.gc()
iter := p.newIter()
b.ResetTimer()
@@ -379,7 +382,7 @@ func BenchmarkDBReadTable(b *testing.B) {
p.populate(b.N)
p.fill()
p.reopen()
p.drop()
p.gc()
iter := p.newIter()
b.ResetTimer()
@@ -395,7 +398,7 @@ func BenchmarkDBReadReverse(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.fill()
p.drop()
p.gc()
iter := p.newIter()
b.ResetTimer()
@@ -413,7 +416,7 @@ func BenchmarkDBReadReverseTable(b *testing.B) {
p.populate(b.N)
p.fill()
p.reopen()
p.drop()
p.gc()
iter := p.newIter()
b.ResetTimer()

View File

@@ -11,115 +11,149 @@ import (
"sync/atomic"
)
// SetFunc used by Namespace.Get method to create a cache object. SetFunc
// may return ok false, in that case the cache object will not be created.
type SetFunc func() (ok bool, value interface{}, charge int, fin SetFin)
// SetFunc is the function that will be called by Namespace.Get to create
// a cache object, if charge is less than one than the cache object will
// not be registered to cache tree, if value is nil then the cache object
// will not be created.
type SetFunc func() (charge int, value interface{})
// SetFin will be called when corresponding cache object are released.
type SetFin func()
// DelFin is the function that will be called as the result of a delete operation.
// Exist == true is indication that the object is exist, and pending == true is
// indication of deletion already happen but haven't done yet (wait for all handles
// to be released). And exist == false means the object doesn't exist.
type DelFin func(exist, pending bool)
// DelFin will be called when corresponding cache object are released.
// DelFin will be called after SetFin. The exist is true if the corresponding
// cache object is actually exist in the cache tree.
type DelFin func(exist bool)
// PurgeFin will be called when corresponding cache object are released.
// PurgeFin will be called after SetFin. If PurgeFin present DelFin will
// not be executed but passed to the PurgeFin, it is up to the caller
// to call it or not.
type PurgeFin func(ns, key uint64, delfin DelFin)
// PurgeFin is the function that will be called as the result of a purge operation.
type PurgeFin func(ns, key uint64)
// Cache is a cache tree. A cache instance must be goroutine-safe.
type Cache interface {
// SetCapacity sets cache capacity.
// SetCapacity sets cache tree capacity.
SetCapacity(capacity int)
// GetNamespace gets or creates a cache namespace for the given id.
// Capacity returns cache tree capacity.
Capacity() int
// Used returns used cache tree capacity.
Used() int
// Size returns entire alive cache objects size.
Size() int
// NumObjects returns number of alive objects.
NumObjects() int
// GetNamespace gets cache namespace with the given id.
// GetNamespace is never return nil.
GetNamespace(id uint64) Namespace
// Purge purges all cache namespaces, read Namespace.Purge method documentation.
// PurgeNamespace purges cache namespace with the given id from this cache tree.
// Also read Namespace.Purge.
PurgeNamespace(id uint64, fin PurgeFin)
// ZapNamespace detaches cache namespace with the given id from this cache tree.
// Also read Namespace.Zap.
ZapNamespace(id uint64)
// Purge purges all cache namespace from this cache tree.
// This is behave the same as calling Namespace.Purge method on all cache namespace.
Purge(fin PurgeFin)
// Zap zaps all cache namespaces, read Namespace.Zap method documentation.
Zap(closed bool)
// Zap detaches all cache namespace from this cache tree.
// This is behave the same as calling Namespace.Zap method on all cache namespace.
Zap()
}
// Namespace is a cache namespace. A namespace instance must be goroutine-safe.
type Namespace interface {
// Get gets cache object for the given key. The given SetFunc (if not nil) will
// be called if the given key does not exist.
// If the given key does not exist, SetFunc is nil or SetFunc return ok false, Get
// will return ok false.
Get(key uint64, setf SetFunc) (obj Object, ok bool)
// Get deletes cache object for the given key. If exist the cache object will
// be deleted later when all of its handles have been released (i.e. no one use
// it anymore) and the given DelFin (if not nil) will finally be executed. If
// such cache object does not exist the given DelFin will be executed anyway.
// Get gets cache object with the given key.
// If cache object is not found and setf is not nil, Get will atomically creates
// the cache object by calling setf. Otherwise Get will returns nil.
//
// Delete returns true if such cache object exist.
// The returned cache handle should be released after use by calling Release
// method.
Get(key uint64, setf SetFunc) Handle
// Delete removes cache object with the given key from cache tree.
// A deleted cache object will be released as soon as all of its handles have
// been released.
// Delete only happen once, subsequent delete will consider cache object doesn't
// exist, even if the cache object ins't released yet.
//
// If not nil, fin will be called if the cache object doesn't exist or when
// finally be released.
//
// Delete returns true if such cache object exist and never been deleted.
Delete(key uint64, fin DelFin) bool
// Purge deletes all cache objects, read Delete method documentation.
// Purge removes all cache objects within this namespace from cache tree.
// This is the same as doing delete on all cache objects.
//
// If not nil, fin will be called on all cache objects when its finally be
// released.
Purge(fin PurgeFin)
// Zap detaches the namespace from the cache tree and delete all its cache
// objects. The cache objects deletion and finalizers execution are happen
// immediately, even if its existing handles haven't yet been released.
// A zapped namespace can't never be filled again.
// If closed is false then the Get function will always call the given SetFunc
// if it is not nil, but resultant of the SetFunc will not be cached.
Zap(closed bool)
// Zap detaches namespace from cache tree and release all its cache objects.
// A zapped namespace can never be filled again.
// Calling Get on zapped namespace will always return nil.
Zap()
}
// Object is a cache object.
type Object interface {
// Release releases the cache object. Other methods should not be called
// after the cache object has been released.
// Handle is a cache handle.
type Handle interface {
// Release releases this cache handle. This method can be safely called mutiple
// times.
Release()
// Value returns value of the cache object.
// Value returns value of this cache handle.
// Value will returns nil after this cache handle have be released.
Value() interface{}
}
const (
DelNotExist = iota
DelExist
DelPendig
)
// Namespace state.
type nsState int
const (
nsEffective nsState = iota
nsZapped
nsClosed
)
// Node state.
type nodeState int
const (
nodeEffective nodeState = iota
nodeZero nodeState = iota
nodeEffective
nodeEvicted
nodeRemoved
nodeDeleted
)
// Fake object.
type fakeObject struct {
// Fake handle.
type fakeHandle struct {
value interface{}
fin func()
once uint32
}
func (o *fakeObject) Value() interface{} {
if atomic.LoadUint32(&o.once) == 0 {
return o.value
func (h *fakeHandle) Value() interface{} {
if atomic.LoadUint32(&h.once) == 0 {
return h.value
}
return nil
}
func (o *fakeObject) Release() {
if !atomic.CompareAndSwapUint32(&o.once, 0, 1) {
func (h *fakeHandle) Release() {
if !atomic.CompareAndSwapUint32(&h.once, 0, 1) {
return
}
if o.fin != nil {
o.fin()
o.fin = nil
if h.fin != nil {
h.fin()
h.fin = nil
}
}

View File

@@ -7,15 +7,35 @@
package cache
import (
"fmt"
"math/rand"
"runtime"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
)
func set(ns Namespace, key uint64, value interface{}, charge int, fin func()) Object {
obj, _ := ns.Get(key, func() (bool, interface{}, int, SetFin) {
return true, value, charge, fin
type releaserFunc struct {
fn func()
value interface{}
}
func (r releaserFunc) Release() {
if r.fn != nil {
r.fn()
}
}
func set(ns Namespace, key uint64, value interface{}, charge int, relf func()) Handle {
return ns.Get(key, func() (int, interface{}) {
if relf != nil {
return charge, releaserFunc{relf, value}
} else {
return charge, value
}
})
return obj
}
func TestCache_HitMiss(t *testing.T) {
@@ -43,29 +63,31 @@ func TestCache_HitMiss(t *testing.T) {
setfin++
}).Release()
for j, y := range cases {
r, ok := ns.Get(y.key, nil)
h := ns.Get(y.key, nil)
if j <= i {
// should hit
if !ok {
if h == nil {
t.Errorf("case '%d' iteration '%d' is miss", i, j)
} else if r.Value().(string) != y.value {
t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, r.Value().(string), y.value)
} else {
if x := h.Value().(releaserFunc).value.(string); x != y.value {
t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value)
}
}
} else {
// should miss
if ok {
t.Errorf("case '%d' iteration '%d' is hit , value '%s'", i, j, r.Value().(string))
if h != nil {
t.Errorf("case '%d' iteration '%d' is hit , value '%s'", i, j, h.Value().(releaserFunc).value.(string))
}
}
if ok {
r.Release()
if h != nil {
h.Release()
}
}
}
for i, x := range cases {
finalizerOk := false
ns.Delete(x.key, func(exist bool) {
ns.Delete(x.key, func(exist, pending bool) {
finalizerOk = true
})
@@ -74,22 +96,24 @@ func TestCache_HitMiss(t *testing.T) {
}
for j, y := range cases {
r, ok := ns.Get(y.key, nil)
h := ns.Get(y.key, nil)
if j > i {
// should hit
if !ok {
if h == nil {
t.Errorf("case '%d' iteration '%d' is miss", i, j)
} else if r.Value().(string) != y.value {
t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, r.Value().(string), y.value)
} else {
if x := h.Value().(releaserFunc).value.(string); x != y.value {
t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value)
}
}
} else {
// should miss
if ok {
t.Errorf("case '%d' iteration '%d' is hit, value '%s'", i, j, r.Value().(string))
if h != nil {
t.Errorf("case '%d' iteration '%d' is hit, value '%s'", i, j, h.Value().(releaserFunc).value.(string))
}
}
if ok {
r.Release()
if h != nil {
h.Release()
}
}
}
@@ -107,42 +131,42 @@ func TestLRUCache_Eviction(t *testing.T) {
set(ns, 3, 3, 1, nil).Release()
set(ns, 4, 4, 1, nil).Release()
set(ns, 5, 5, 1, nil).Release()
if r, ok := ns.Get(2, nil); ok { // 1,3,4,5,2
r.Release()
if h := ns.Get(2, nil); h != nil { // 1,3,4,5,2
h.Release()
}
set(ns, 9, 9, 10, nil).Release() // 5,2,9
for _, x := range []uint64{9, 2, 5, 1} {
r, ok := ns.Get(x, nil)
if !ok {
t.Errorf("miss for key '%d'", x)
for _, key := range []uint64{9, 2, 5, 1} {
h := ns.Get(key, nil)
if h == nil {
t.Errorf("miss for key '%d'", key)
} else {
if r.Value().(int) != int(x) {
t.Errorf("invalid value for key '%d' want '%d', got '%d'", x, x, r.Value().(int))
if x := h.Value().(int); x != int(key) {
t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
}
r.Release()
h.Release()
}
}
o1.Release()
for _, x := range []uint64{1, 2, 5} {
r, ok := ns.Get(x, nil)
if !ok {
t.Errorf("miss for key '%d'", x)
for _, key := range []uint64{1, 2, 5} {
h := ns.Get(key, nil)
if h == nil {
t.Errorf("miss for key '%d'", key)
} else {
if r.Value().(int) != int(x) {
t.Errorf("invalid value for key '%d' want '%d', got '%d'", x, x, r.Value().(int))
if x := h.Value().(int); x != int(key) {
t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
}
r.Release()
h.Release()
}
}
for _, x := range []uint64{3, 4, 9} {
r, ok := ns.Get(x, nil)
if ok {
t.Errorf("hit for key '%d'", x)
if r.Value().(int) != int(x) {
t.Errorf("invalid value for key '%d' want '%d', got '%d'", x, x, r.Value().(int))
for _, key := range []uint64{3, 4, 9} {
h := ns.Get(key, nil)
if h != nil {
t.Errorf("hit for key '%d'", key)
if x := h.Value().(int); x != int(key) {
t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
}
r.Release()
h.Release()
}
}
}
@@ -153,16 +177,15 @@ func TestLRUCache_SetGet(t *testing.T) {
for i := 0; i < 200; i++ {
n := uint64(rand.Intn(99999) % 20)
set(ns, n, n, 1, nil).Release()
if p, ok := ns.Get(n, nil); ok {
if p.Value() == nil {
if h := ns.Get(n, nil); h != nil {
if h.Value() == nil {
t.Errorf("key '%d' contains nil value", n)
} else {
got := p.Value().(uint64)
if got != n {
t.Errorf("invalid value for key '%d' want '%d', got '%d'", n, n, got)
if x := h.Value().(uint64); x != n {
t.Errorf("invalid value for key '%d' want '%d', got '%d'", n, n, x)
}
}
p.Release()
h.Release()
} else {
t.Errorf("key '%d' doesn't exist", n)
}
@@ -176,31 +199,369 @@ func TestLRUCache_Purge(t *testing.T) {
o2 := set(ns1, 2, 2, 1, nil)
ns1.Purge(nil)
set(ns1, 3, 3, 1, nil).Release()
for _, x := range []uint64{1, 2, 3} {
r, ok := ns1.Get(x, nil)
if !ok {
t.Errorf("miss for key '%d'", x)
for _, key := range []uint64{1, 2, 3} {
h := ns1.Get(key, nil)
if h == nil {
t.Errorf("miss for key '%d'", key)
} else {
if r.Value().(int) != int(x) {
t.Errorf("invalid value for key '%d' want '%d', got '%d'", x, x, r.Value().(int))
if x := h.Value().(int); x != int(key) {
t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
}
r.Release()
h.Release()
}
}
o1.Release()
o2.Release()
for _, x := range []uint64{1, 2} {
r, ok := ns1.Get(x, nil)
if ok {
t.Errorf("hit for key '%d'", x)
if r.Value().(int) != int(x) {
t.Errorf("invalid value for key '%d' want '%d', got '%d'", x, x, r.Value().(int))
for _, key := range []uint64{1, 2} {
h := ns1.Get(key, nil)
if h != nil {
t.Errorf("hit for key '%d'", key)
if x := h.Value().(int); x != int(key) {
t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
}
r.Release()
h.Release()
}
}
}
type testingCacheObjectCounter struct {
created uint32
released uint32
}
func (c *testingCacheObjectCounter) createOne() {
atomic.AddUint32(&c.created, 1)
}
func (c *testingCacheObjectCounter) releaseOne() {
atomic.AddUint32(&c.released, 1)
}
type testingCacheObject struct {
t *testing.T
cnt *testingCacheObjectCounter
ns, key uint64
releaseCalled uint32
}
func (x *testingCacheObject) Release() {
if atomic.CompareAndSwapUint32(&x.releaseCalled, 0, 1) {
x.cnt.releaseOne()
} else {
x.t.Errorf("duplicate setfin NS#%d KEY#%s", x.ns, x.key)
}
}
func TestLRUCache_Finalizer(t *testing.T) {
const (
capacity = 100
goroutines = 100
iterations = 10000
keymax = 8000
)
runtime.GOMAXPROCS(runtime.NumCPU())
defer runtime.GOMAXPROCS(1)
wg := &sync.WaitGroup{}
cnt := &testingCacheObjectCounter{}
c := NewLRUCache(capacity)
type instance struct {
seed int64
rnd *rand.Rand
ns uint64
effective int32
handles []Handle
handlesMap map[uint64]int
delete bool
purge bool
zap bool
wantDel int32
delfinCalledAll int32
delfinCalledEff int32
purgefinCalled int32
}
instanceGet := func(p *instance, ns Namespace, key uint64) {
h := ns.Get(key, func() (charge int, value interface{}) {
to := &testingCacheObject{
t: t, cnt: cnt,
ns: p.ns,
key: key,
}
atomic.AddInt32(&p.effective, 1)
cnt.createOne()
return 1, releaserFunc{func() {
to.Release()
atomic.AddInt32(&p.effective, -1)
}, to}
})
p.handles = append(p.handles, h)
p.handlesMap[key] = p.handlesMap[key] + 1
}
instanceRelease := func(p *instance, ns Namespace, i int) {
h := p.handles[i]
key := h.Value().(releaserFunc).value.(*testingCacheObject).key
if n := p.handlesMap[key]; n == 0 {
t.Fatal("key ref == 0")
} else if n > 1 {
p.handlesMap[key] = n - 1
} else {
delete(p.handlesMap, key)
}
h.Release()
p.handles = append(p.handles[:i], p.handles[i+1:]...)
p.handles[len(p.handles) : len(p.handles)+1][0] = nil
}
seeds := make([]int64, goroutines)
instances := make([]instance, goroutines)
for i := range instances {
p := &instances[i]
p.handlesMap = make(map[uint64]int)
if seeds[i] == 0 {
seeds[i] = time.Now().UnixNano()
}
p.seed = seeds[i]
p.rnd = rand.New(rand.NewSource(p.seed))
p.ns = uint64(i)
p.delete = i%6 == 0
p.purge = i%8 == 0
p.zap = i%12 == 0 || i%3 == 0
}
seedsStr := make([]string, len(seeds))
for i, seed := range seeds {
seedsStr[i] = fmt.Sprint(seed)
}
t.Logf("seeds := []int64{%s}", strings.Join(seedsStr, ", "))
// Get and release.
for i := range instances {
p := &instances[i]
wg.Add(1)
go func(p *instance) {
defer wg.Done()
ns := c.GetNamespace(p.ns)
for i := 0; i < iterations; i++ {
if len(p.handles) == 0 || p.rnd.Int()%2 == 0 {
instanceGet(p, ns, uint64(p.rnd.Intn(keymax)))
} else {
instanceRelease(p, ns, p.rnd.Intn(len(p.handles)))
}
}
}(p)
}
wg.Wait()
if used, cap := c.Used(), c.Capacity(); used > cap {
t.Errorf("Used > capacity, used=%d cap=%d", used, cap)
}
// Check effective objects.
for i := range instances {
p := &instances[i]
if int(p.effective) < len(p.handlesMap) {
t.Errorf("#%d effective objects < acquired handle, eo=%d ah=%d", i, p.effective, len(p.handlesMap))
}
}
if want := int(cnt.created - cnt.released); c.Size() != want {
t.Errorf("Invalid cache size, want=%d got=%d", want, c.Size())
}
// Delete and purge.
for i := range instances {
p := &instances[i]
p.wantDel = p.effective
wg.Add(1)
go func(p *instance) {
defer wg.Done()
ns := c.GetNamespace(p.ns)
if p.delete {
for key := uint64(0); key < keymax; key++ {
_, wantExist := p.handlesMap[key]
gotExist := ns.Delete(key, func(exist, pending bool) {
atomic.AddInt32(&p.delfinCalledAll, 1)
if exist {
atomic.AddInt32(&p.delfinCalledEff, 1)
}
})
if !gotExist && wantExist {
t.Errorf("delete on NS#%d KEY#%d not found", p.ns, key)
}
}
var delfinCalled int
for key := uint64(0); key < keymax; key++ {
func(key uint64) {
gotExist := ns.Delete(key, func(exist, pending bool) {
if exist && !pending {
t.Errorf("delete fin on NS#%d KEY#%d exist and not pending for deletion", p.ns, key)
}
delfinCalled++
})
if gotExist {
t.Errorf("delete on NS#%d KEY#%d found", p.ns, key)
}
}(key)
}
if delfinCalled != keymax {
t.Errorf("(2) #%d not all delete fin called, diff=%d", p.ns, keymax-delfinCalled)
}
}
if p.purge {
ns.Purge(func(ns, key uint64) {
atomic.AddInt32(&p.purgefinCalled, 1)
})
}
}(p)
}
wg.Wait()
if want := int(cnt.created - cnt.released); c.Size() != want {
t.Errorf("Invalid cache size, want=%d got=%d", want, c.Size())
}
// Release.
for i := range instances {
p := &instances[i]
if !p.zap {
wg.Add(1)
go func(p *instance) {
defer wg.Done()
ns := c.GetNamespace(p.ns)
for i := len(p.handles) - 1; i >= 0; i-- {
instanceRelease(p, ns, i)
}
}(p)
}
}
wg.Wait()
if want := int(cnt.created - cnt.released); c.Size() != want {
t.Errorf("Invalid cache size, want=%d got=%d", want, c.Size())
}
// Zap.
for i := range instances {
p := &instances[i]
if p.zap {
wg.Add(1)
go func(p *instance) {
defer wg.Done()
ns := c.GetNamespace(p.ns)
ns.Zap()
p.handles = nil
p.handlesMap = nil
}(p)
}
}
wg.Wait()
if want := int(cnt.created - cnt.released); c.Size() != want {
t.Errorf("Invalid cache size, want=%d got=%d", want, c.Size())
}
if notrel, used := int(cnt.created-cnt.released), c.Used(); notrel != used {
t.Errorf("Invalid used value, want=%d got=%d", notrel, used)
}
c.Purge(nil)
for i := range instances {
p := &instances[i]
if p.delete {
if p.delfinCalledAll != keymax {
t.Errorf("#%d not all delete fin called, purge=%v zap=%v diff=%d", p.ns, p.purge, p.zap, keymax-p.delfinCalledAll)
}
if p.delfinCalledEff != p.wantDel {
t.Errorf("#%d not all effective delete fin called, diff=%d", p.ns, p.wantDel-p.delfinCalledEff)
}
if p.purge && p.purgefinCalled > 0 {
t.Errorf("#%d some purge fin called, delete=%v zap=%v n=%d", p.ns, p.delete, p.zap, p.purgefinCalled)
}
} else {
if p.purge {
if p.purgefinCalled != p.wantDel {
t.Errorf("#%d not all purge fin called, delete=%v zap=%v diff=%d", p.ns, p.delete, p.zap, p.wantDel-p.purgefinCalled)
}
}
}
}
if cnt.created != cnt.released {
t.Errorf("Some cache object weren't released, created=%d released=%d", cnt.created, cnt.released)
}
}
func BenchmarkLRUCache_Set(b *testing.B) {
c := NewLRUCache(0)
ns := c.GetNamespace(0)
b.ResetTimer()
for i := uint64(0); i < uint64(b.N); i++ {
set(ns, i, "", 1, nil)
}
}
func BenchmarkLRUCache_Get(b *testing.B) {
c := NewLRUCache(0)
ns := c.GetNamespace(0)
b.ResetTimer()
for i := uint64(0); i < uint64(b.N); i++ {
set(ns, i, "", 1, nil)
}
b.ResetTimer()
for i := uint64(0); i < uint64(b.N); i++ {
ns.Get(i, nil)
}
}
func BenchmarkLRUCache_Get2(b *testing.B) {
c := NewLRUCache(0)
ns := c.GetNamespace(0)
b.ResetTimer()
for i := uint64(0); i < uint64(b.N); i++ {
set(ns, i, "", 1, nil)
}
b.ResetTimer()
for i := uint64(0); i < uint64(b.N); i++ {
ns.Get(i, func() (charge int, value interface{}) {
return 0, nil
})
}
}
func BenchmarkLRUCache_Release(b *testing.B) {
c := NewLRUCache(0)
ns := c.GetNamespace(0)
handles := make([]Handle, b.N)
for i := uint64(0); i < uint64(b.N); i++ {
handles[i] = set(ns, i, "", 1, nil)
}
b.ResetTimer()
for _, h := range handles {
h.Release()
}
}
func BenchmarkLRUCache_SetRelease(b *testing.B) {
capacity := b.N / 100
if capacity <= 0 {
@@ -210,7 +571,7 @@ func BenchmarkLRUCache_SetRelease(b *testing.B) {
ns := c.GetNamespace(0)
b.ResetTimer()
for i := uint64(0); i < uint64(b.N); i++ {
set(ns, i, nil, 1, nil).Release()
set(ns, i, "", 1, nil).Release()
}
}
@@ -227,10 +588,10 @@ func BenchmarkLRUCache_SetReleaseTwice(b *testing.B) {
nb := b.N - na
for i := uint64(0); i < uint64(na); i++ {
set(ns, i, nil, 1, nil).Release()
set(ns, i, "", 1, nil).Release()
}
for i := uint64(0); i < uint64(nb); i++ {
set(ns, i, nil, 1, nil).Release()
set(ns, i, "", 1, nil).Release()
}
}

View File

@@ -1,246 +0,0 @@
// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package cache
import (
"sync"
"sync/atomic"
)
type emptyCache struct {
sync.Mutex
table map[uint64]*emptyNS
}
// NewEmptyCache creates a new initialized empty cache.
func NewEmptyCache() Cache {
return &emptyCache{
table: make(map[uint64]*emptyNS),
}
}
func (c *emptyCache) GetNamespace(id uint64) Namespace {
c.Lock()
defer c.Unlock()
if ns, ok := c.table[id]; ok {
return ns
}
ns := &emptyNS{
cache: c,
id: id,
table: make(map[uint64]*emptyNode),
}
c.table[id] = ns
return ns
}
func (c *emptyCache) Purge(fin PurgeFin) {
c.Lock()
for _, ns := range c.table {
ns.purgeNB(fin)
}
c.Unlock()
}
func (c *emptyCache) Zap(closed bool) {
c.Lock()
for _, ns := range c.table {
ns.zapNB(closed)
}
c.table = make(map[uint64]*emptyNS)
c.Unlock()
}
func (*emptyCache) SetCapacity(capacity int) {}
type emptyNS struct {
cache *emptyCache
id uint64
table map[uint64]*emptyNode
state nsState
}
func (ns *emptyNS) Get(key uint64, setf SetFunc) (o Object, ok bool) {
ns.cache.Lock()
switch ns.state {
case nsZapped:
ns.cache.Unlock()
if setf == nil {
return
}
var value interface{}
var fin func()
ok, value, _, fin = setf()
if ok {
o = &fakeObject{
value: value,
fin: fin,
}
}
return
case nsClosed:
ns.cache.Unlock()
return
}
n, ok := ns.table[key]
if ok {
n.ref++
} else {
if setf == nil {
ns.cache.Unlock()
return
}
var value interface{}
var fin func()
ok, value, _, fin = setf()
if !ok {
ns.cache.Unlock()
return
}
n = &emptyNode{
ns: ns,
key: key,
value: value,
setfin: fin,
ref: 1,
}
ns.table[key] = n
}
ns.cache.Unlock()
o = &emptyObject{node: n}
return
}
func (ns *emptyNS) Delete(key uint64, fin DelFin) bool {
ns.cache.Lock()
if ns.state != nsEffective {
ns.cache.Unlock()
if fin != nil {
fin(false)
}
return false
}
n, ok := ns.table[key]
if !ok {
ns.cache.Unlock()
if fin != nil {
fin(false)
}
return false
}
n.delfin = fin
ns.cache.Unlock()
return true
}
func (ns *emptyNS) purgeNB(fin PurgeFin) {
if ns.state != nsEffective {
return
}
for _, n := range ns.table {
n.purgefin = fin
}
}
func (ns *emptyNS) Purge(fin PurgeFin) {
ns.cache.Lock()
ns.purgeNB(fin)
ns.cache.Unlock()
}
func (ns *emptyNS) zapNB(closed bool) {
if ns.state != nsEffective {
return
}
for _, n := range ns.table {
n.execFin()
}
if closed {
ns.state = nsClosed
} else {
ns.state = nsZapped
}
ns.table = nil
}
func (ns *emptyNS) Zap(closed bool) {
ns.cache.Lock()
ns.zapNB(closed)
delete(ns.cache.table, ns.id)
ns.cache.Unlock()
}
type emptyNode struct {
ns *emptyNS
key uint64
value interface{}
ref int
setfin SetFin
delfin DelFin
purgefin PurgeFin
}
func (n *emptyNode) execFin() {
if n.setfin != nil {
n.setfin()
n.setfin = nil
}
if n.purgefin != nil {
n.purgefin(n.ns.id, n.key, n.delfin)
n.delfin = nil
n.purgefin = nil
} else if n.delfin != nil {
n.delfin(true)
n.delfin = nil
}
}
func (n *emptyNode) evict() {
n.ns.cache.Lock()
n.ref--
if n.ref == 0 {
if n.ns.state == nsEffective {
// Remove elem.
delete(n.ns.table, n.key)
// Execute finalizer.
n.execFin()
}
} else if n.ref < 0 {
panic("leveldb/cache: emptyNode: negative node reference")
}
n.ns.cache.Unlock()
}
type emptyObject struct {
node *emptyNode
once uint32
}
func (o *emptyObject) Value() interface{} {
if atomic.LoadUint32(&o.once) == 0 {
return o.node.value
}
return nil
}
func (o *emptyObject) Release() {
if !atomic.CompareAndSwapUint32(&o.once, 0, 1) {
return
}
o.node.evict()
o.node = nil
}

View File

@@ -9,16 +9,24 @@ package cache
import (
"sync"
"sync/atomic"
"github.com/syndtr/goleveldb/leveldb/util"
)
// The LLRB implementation were taken from https://github.com/petar/GoLLRB,
// which conatins the following header:
//
// Copyright 2010 Petar Maymounkov. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// lruCache represent a LRU cache state.
type lruCache struct {
sync.Mutex
recent lruNode
table map[uint64]*lruNs
capacity int
size int
mu sync.Mutex
recent lruNode
table map[uint64]*lruNs
capacity int
used, size, alive int
}
// NewLRUCache creates a new initialized LRU cache with the given capacity.
@@ -32,245 +40,408 @@ func NewLRUCache(capacity int) Cache {
return c
}
func (c *lruCache) Capacity() int {
c.mu.Lock()
defer c.mu.Unlock()
return c.capacity
}
func (c *lruCache) Used() int {
c.mu.Lock()
defer c.mu.Unlock()
return c.used
}
func (c *lruCache) Size() int {
c.mu.Lock()
defer c.mu.Unlock()
return c.size
}
func (c *lruCache) NumObjects() int {
c.mu.Lock()
defer c.mu.Unlock()
return c.alive
}
// SetCapacity set cache capacity.
func (c *lruCache) SetCapacity(capacity int) {
c.Lock()
c.mu.Lock()
c.capacity = capacity
c.evict()
c.Unlock()
c.mu.Unlock()
}
// GetNamespace return namespace object for given id.
func (c *lruCache) GetNamespace(id uint64) Namespace {
c.Lock()
defer c.Unlock()
c.mu.Lock()
defer c.mu.Unlock()
if p, ok := c.table[id]; ok {
return p
if ns, ok := c.table[id]; ok {
return ns
}
p := &lruNs{
lru: c,
id: id,
table: make(map[uint64]*lruNode),
ns := &lruNs{lru: c, id: id}
c.table[id] = ns
return ns
}
func (c *lruCache) ZapNamespace(id uint64) {
c.mu.Lock()
if ns, exist := c.table[id]; exist {
ns.zapNB()
delete(c.table, id)
}
c.table[id] = p
return p
c.mu.Unlock()
}
func (c *lruCache) PurgeNamespace(id uint64, fin PurgeFin) {
c.mu.Lock()
if ns, exist := c.table[id]; exist {
ns.purgeNB(fin)
}
c.mu.Unlock()
}
// Purge purge entire cache.
func (c *lruCache) Purge(fin PurgeFin) {
c.Lock()
c.mu.Lock()
for _, ns := range c.table {
ns.purgeNB(fin)
}
c.Unlock()
c.mu.Unlock()
}
func (c *lruCache) Zap(closed bool) {
c.Lock()
func (c *lruCache) Zap() {
c.mu.Lock()
for _, ns := range c.table {
ns.zapNB(closed)
ns.zapNB()
}
c.table = make(map[uint64]*lruNs)
c.Unlock()
c.mu.Unlock()
}
func (c *lruCache) evict() {
top := &c.recent
for n := c.recent.rPrev; c.size > c.capacity && n != top; {
for n := c.recent.rPrev; c.used > c.capacity && n != top; {
n.state = nodeEvicted
n.rRemove()
n.evictNB()
c.size -= n.charge
n.derefNB()
c.used -= n.charge
n = c.recent.rPrev
}
}
type lruNs struct {
lru *lruCache
id uint64
table map[uint64]*lruNode
state nsState
lru *lruCache
id uint64
rbRoot *lruNode
state nsState
}
func (ns *lruNs) Get(key uint64, setf SetFunc) (o Object, ok bool) {
lru := ns.lru
lru.Lock()
switch ns.state {
case nsZapped:
lru.Unlock()
if setf == nil {
return
}
var value interface{}
var fin func()
ok, value, _, fin = setf()
if ok {
o = &fakeObject{
value: value,
fin: fin,
}
}
return
case nsClosed:
lru.Unlock()
return
func (ns *lruNs) rbGetOrCreateNode(h *lruNode, key uint64) (hn, n *lruNode) {
if h == nil {
n = &lruNode{ns: ns, key: key}
return n, n
}
n, ok := ns.table[key]
if ok {
switch n.state {
case nodeEvicted:
// Insert to recent list.
n.state = nodeEffective
n.ref++
lru.size += n.charge
lru.evict()
fallthrough
case nodeEffective:
// Bump to front
n.rRemove()
n.rInsert(&lru.recent)
if key < h.key {
hn, n = ns.rbGetOrCreateNode(h.rbLeft, key)
if hn != nil {
h.rbLeft = hn
} else {
return nil, n
}
} else if key > h.key {
hn, n = ns.rbGetOrCreateNode(h.rbRight, key)
if hn != nil {
h.rbRight = hn
} else {
return nil, n
}
n.ref++
} else {
if setf == nil {
lru.Unlock()
return
}
var value interface{}
var charge int
var fin func()
ok, value, charge, fin = setf()
if !ok {
lru.Unlock()
return
}
n = &lruNode{
ns: ns,
key: key,
value: value,
charge: charge,
setfin: fin,
ref: 2,
}
ns.table[key] = n
n.rInsert(&lru.recent)
lru.size += charge
lru.evict()
return nil, h
}
lru.Unlock()
o = &lruObject{node: n}
return
if rbIsRed(h.rbRight) && !rbIsRed(h.rbLeft) {
h = rbRotLeft(h)
}
if rbIsRed(h.rbLeft) && rbIsRed(h.rbLeft.rbLeft) {
h = rbRotRight(h)
}
if rbIsRed(h.rbLeft) && rbIsRed(h.rbRight) {
rbFlip(h)
}
return h, n
}
func (ns *lruNs) getOrCreateNode(key uint64) *lruNode {
hn, n := ns.rbGetOrCreateNode(ns.rbRoot, key)
if hn != nil {
ns.rbRoot = hn
ns.rbRoot.rbBlack = true
}
return n
}
func (ns *lruNs) rbGetNode(key uint64) *lruNode {
h := ns.rbRoot
for h != nil {
switch {
case key < h.key:
h = h.rbLeft
case key > h.key:
h = h.rbRight
default:
return h
}
}
return nil
}
func (ns *lruNs) getNode(key uint64) *lruNode {
return ns.rbGetNode(key)
}
func (ns *lruNs) rbDeleteNode(h *lruNode, key uint64) *lruNode {
if h == nil {
return nil
}
if key < h.key {
if h.rbLeft == nil { // key not present. Nothing to delete
return h
}
if !rbIsRed(h.rbLeft) && !rbIsRed(h.rbLeft.rbLeft) {
h = rbMoveLeft(h)
}
h.rbLeft = ns.rbDeleteNode(h.rbLeft, key)
} else {
if rbIsRed(h.rbLeft) {
h = rbRotRight(h)
}
// If @key equals @h.key and no right children at @h
if h.key == key && h.rbRight == nil {
return nil
}
if h.rbRight != nil && !rbIsRed(h.rbRight) && !rbIsRed(h.rbRight.rbLeft) {
h = rbMoveRight(h)
}
// If @key equals @h.key, and (from above) 'h.Right != nil'
if h.key == key {
var x *lruNode
h.rbRight, x = rbDeleteMin(h.rbRight)
if x == nil {
panic("logic")
}
x.rbLeft, h.rbLeft = h.rbLeft, nil
x.rbRight, h.rbRight = h.rbRight, nil
x.rbBlack = h.rbBlack
h = x
} else { // Else, @key is bigger than @h.key
h.rbRight = ns.rbDeleteNode(h.rbRight, key)
}
}
return rbFixup(h)
}
func (ns *lruNs) deleteNode(key uint64) {
ns.rbRoot = ns.rbDeleteNode(ns.rbRoot, key)
if ns.rbRoot != nil {
ns.rbRoot.rbBlack = true
}
}
func (ns *lruNs) rbIterateNodes(h *lruNode, pivot uint64, iter func(n *lruNode) bool) bool {
if h == nil {
return true
}
if h.key >= pivot {
if !ns.rbIterateNodes(h.rbLeft, pivot, iter) {
return false
}
if !iter(h) {
return false
}
}
return ns.rbIterateNodes(h.rbRight, pivot, iter)
}
func (ns *lruNs) iterateNodes(iter func(n *lruNode) bool) {
ns.rbIterateNodes(ns.rbRoot, 0, iter)
}
func (ns *lruNs) Get(key uint64, setf SetFunc) Handle {
ns.lru.mu.Lock()
defer ns.lru.mu.Unlock()
if ns.state != nsEffective {
return nil
}
var n *lruNode
if setf == nil {
n = ns.getNode(key)
if n == nil {
return nil
}
} else {
n = ns.getOrCreateNode(key)
}
switch n.state {
case nodeZero:
charge, value := setf()
if value == nil {
ns.deleteNode(key)
return nil
}
if charge < 0 {
charge = 0
}
n.value = value
n.charge = charge
n.state = nodeEvicted
ns.lru.size += charge
ns.lru.alive++
fallthrough
case nodeEvicted:
if n.charge == 0 {
break
}
// Insert to recent list.
n.state = nodeEffective
n.ref++
ns.lru.used += n.charge
ns.lru.evict()
fallthrough
case nodeEffective:
// Bump to front.
n.rRemove()
n.rInsert(&ns.lru.recent)
}
n.ref++
return &lruHandle{node: n}
}
func (ns *lruNs) Delete(key uint64, fin DelFin) bool {
lru := ns.lru
lru.Lock()
ns.lru.mu.Lock()
defer ns.lru.mu.Unlock()
if ns.state != nsEffective {
lru.Unlock()
if fin != nil {
fin(false)
fin(false, false)
}
return false
}
n, ok := ns.table[key]
if !ok {
lru.Unlock()
n := ns.getNode(key)
if n == nil {
if fin != nil {
fin(false)
fin(false, false)
}
return false
}
n.delfin = fin
switch n.state {
case nodeRemoved:
lru.Unlock()
return false
case nodeEffective:
lru.size -= n.charge
ns.lru.used -= n.charge
n.state = nodeDeleted
n.delfin = fin
n.rRemove()
n.evictNB()
n.derefNB()
case nodeEvicted:
n.state = nodeDeleted
n.delfin = fin
case nodeDeleted:
if fin != nil {
fin(true, true)
}
return false
default:
panic("invalid state")
}
n.state = nodeRemoved
lru.Unlock()
return true
}
func (ns *lruNs) purgeNB(fin PurgeFin) {
lru := ns.lru
if ns.state != nsEffective {
return
}
for _, n := range ns.table {
n.purgefin = fin
if n.state == nodeEffective {
lru.size -= n.charge
n.rRemove()
n.evictNB()
if ns.state == nsEffective {
var nodes []*lruNode
ns.iterateNodes(func(n *lruNode) bool {
nodes = append(nodes, n)
return true
})
for _, n := range nodes {
switch n.state {
case nodeEffective:
ns.lru.used -= n.charge
n.state = nodeDeleted
n.purgefin = fin
n.rRemove()
n.derefNB()
case nodeEvicted:
n.state = nodeDeleted
n.purgefin = fin
case nodeDeleted:
default:
panic("invalid state")
}
}
n.state = nodeRemoved
}
}
func (ns *lruNs) Purge(fin PurgeFin) {
ns.lru.Lock()
ns.lru.mu.Lock()
ns.purgeNB(fin)
ns.lru.Unlock()
ns.lru.mu.Unlock()
}
func (ns *lruNs) zapNB(closed bool) {
lru := ns.lru
if ns.state != nsEffective {
return
}
if closed {
ns.state = nsClosed
} else {
func (ns *lruNs) zapNB() {
if ns.state == nsEffective {
ns.state = nsZapped
ns.iterateNodes(func(n *lruNode) bool {
if n.state == nodeEffective {
ns.lru.used -= n.charge
n.rRemove()
}
ns.lru.size -= n.charge
n.state = nodeDeleted
n.fin()
return true
})
ns.rbRoot = nil
}
for _, n := range ns.table {
if n.state == nodeEffective {
lru.size -= n.charge
n.rRemove()
}
n.state = nodeRemoved
n.execFin()
}
ns.table = nil
}
func (ns *lruNs) Zap(closed bool) {
ns.lru.Lock()
ns.zapNB(closed)
func (ns *lruNs) Zap() {
ns.lru.mu.Lock()
ns.zapNB()
delete(ns.lru.table, ns.id)
ns.lru.Unlock()
ns.lru.mu.Unlock()
}
type lruNode struct {
ns *lruNs
rNext, rPrev *lruNode
rNext, rPrev *lruNode
rbLeft, rbRight *lruNode
rbBlack bool
key uint64
value interface{}
charge int
ref int
state nodeState
setfin SetFin
delfin DelFin
purgefin PurgeFin
}
@@ -284,7 +455,6 @@ func (n *lruNode) rInsert(at *lruNode) {
}
func (n *lruNode) rRemove() bool {
// only remove if not already removed
if n.rPrev == nil {
return false
}
@@ -297,58 +467,147 @@ func (n *lruNode) rRemove() bool {
return true
}
func (n *lruNode) execFin() {
if n.setfin != nil {
n.setfin()
n.setfin = nil
func (n *lruNode) fin() {
if r, ok := n.value.(util.Releaser); ok {
r.Release()
}
if n.purgefin != nil {
n.purgefin(n.ns.id, n.key, n.delfin)
n.purgefin(n.ns.id, n.key)
n.delfin = nil
n.purgefin = nil
} else if n.delfin != nil {
n.delfin(true)
n.delfin(true, false)
n.delfin = nil
}
}
func (n *lruNode) evictNB() {
func (n *lruNode) derefNB() {
n.ref--
if n.ref == 0 {
if n.ns.state == nsEffective {
// remove elem
delete(n.ns.table, n.key)
// execute finalizer
n.execFin()
// Remove elemement.
n.ns.deleteNode(n.key)
n.ns.lru.size -= n.charge
n.ns.lru.alive--
n.fin()
}
n.value = nil
} else if n.ref < 0 {
panic("leveldb/cache: lruCache: negative node reference")
}
}
func (n *lruNode) evict() {
n.ns.lru.Lock()
n.evictNB()
n.ns.lru.Unlock()
func (n *lruNode) deref() {
n.ns.lru.mu.Lock()
n.derefNB()
n.ns.lru.mu.Unlock()
}
type lruObject struct {
type lruHandle struct {
node *lruNode
once uint32
}
func (o *lruObject) Value() interface{} {
if atomic.LoadUint32(&o.once) == 0 {
return o.node.value
func (h *lruHandle) Value() interface{} {
if atomic.LoadUint32(&h.once) == 0 {
return h.node.value
}
return nil
}
func (o *lruObject) Release() {
if !atomic.CompareAndSwapUint32(&o.once, 0, 1) {
func (h *lruHandle) Release() {
if !atomic.CompareAndSwapUint32(&h.once, 0, 1) {
return
}
o.node.evict()
o.node = nil
h.node.deref()
h.node = nil
}
func rbIsRed(h *lruNode) bool {
if h == nil {
return false
}
return !h.rbBlack
}
func rbRotLeft(h *lruNode) *lruNode {
x := h.rbRight
if x.rbBlack {
panic("rotating a black link")
}
h.rbRight = x.rbLeft
x.rbLeft = h
x.rbBlack = h.rbBlack
h.rbBlack = false
return x
}
func rbRotRight(h *lruNode) *lruNode {
x := h.rbLeft
if x.rbBlack {
panic("rotating a black link")
}
h.rbLeft = x.rbRight
x.rbRight = h
x.rbBlack = h.rbBlack
h.rbBlack = false
return x
}
func rbFlip(h *lruNode) {
h.rbBlack = !h.rbBlack
h.rbLeft.rbBlack = !h.rbLeft.rbBlack
h.rbRight.rbBlack = !h.rbRight.rbBlack
}
func rbMoveLeft(h *lruNode) *lruNode {
rbFlip(h)
if rbIsRed(h.rbRight.rbLeft) {
h.rbRight = rbRotRight(h.rbRight)
h = rbRotLeft(h)
rbFlip(h)
}
return h
}
func rbMoveRight(h *lruNode) *lruNode {
rbFlip(h)
if rbIsRed(h.rbLeft.rbLeft) {
h = rbRotRight(h)
rbFlip(h)
}
return h
}
func rbFixup(h *lruNode) *lruNode {
if rbIsRed(h.rbRight) {
h = rbRotLeft(h)
}
if rbIsRed(h.rbLeft) && rbIsRed(h.rbLeft.rbLeft) {
h = rbRotRight(h)
}
if rbIsRed(h.rbLeft) && rbIsRed(h.rbRight) {
rbFlip(h)
}
return h
}
func rbDeleteMin(h *lruNode) (hn, n *lruNode) {
if h == nil {
return nil, nil
}
if h.rbLeft == nil {
return nil, h
}
if !rbIsRed(h.rbLeft) && !rbIsRed(h.rbLeft.rbLeft) {
h = rbMoveLeft(h)
}
h.rbLeft, n = rbDeleteMin(h.rbLeft)
return rbFixup(h), n
}

View File

@@ -14,6 +14,7 @@ import (
"runtime"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/syndtr/goleveldb/leveldb/iterator"
@@ -35,8 +36,8 @@ type DB struct {
// MemDB.
memMu sync.RWMutex
mem *memdb.DB
frozenMem *memdb.DB
memPool chan *memdb.DB
mem, frozenMem *memDB
journal *journal.Writer
journalWriter storage.Writer
journalFile storage.File
@@ -47,6 +48,9 @@ type DB struct {
snapsMu sync.Mutex
snapsRoot snapshotElement
// Stats.
aliveSnaps, aliveIters int32
// Write.
writeC chan *Batch
writeMergedC chan bool
@@ -79,6 +83,8 @@ func openDB(s *session) (*DB, error) {
s: s,
// Initial sequence
seq: s.stSeq,
// MemDB
memPool: make(chan *memdb.DB, 1),
// Write
writeC: make(chan *Batch),
writeMergedC: make(chan bool),
@@ -120,6 +126,7 @@ func openDB(s *session) (*DB, error) {
go db.tCompaction()
go db.mCompaction()
go db.jWriter()
go db.mpoolDrain()
s.logf("db@open done T·%v", time.Since(start))
@@ -257,6 +264,7 @@ func recoverTable(s *session, o *opt.Options) error {
var mSeq uint64
var good, corrupted int
rec := new(sessionRecord)
bpool := util.NewBufferPool(o.GetBlockSize() + 5)
buildTable := func(iter iterator.Iterator) (tmp storage.File, size int64, err error) {
tmp = s.newTemp()
writer, err := tmp.Create()
@@ -314,7 +322,7 @@ func recoverTable(s *session, o *opt.Options) error {
var tSeq uint64
var tgood, tcorrupted, blockerr int
var imin, imax []byte
tr := table.NewReader(reader, size, nil, o)
tr := table.NewReader(reader, size, nil, bpool, o)
iter := tr.NewIterator(nil, nil)
iter.(iterator.ErrorCallbackSetter).SetErrorCallback(func(err error) {
s.logf("table@recovery found error @%d %q", file.Num(), err)
@@ -481,10 +489,11 @@ func (db *DB) recoverJournal() error {
buf.Reset()
if _, err := buf.ReadFrom(r); err != nil {
if strict {
if err == io.ErrUnexpectedEOF {
continue
} else {
return err
}
continue
}
if err := batch.decode(buf.Bytes()); err != nil {
return err
@@ -558,19 +567,20 @@ func (db *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, er
ikey := newIKey(key, seq, tSeek)
em, fm := db.getMems()
for _, m := range [...]*memdb.DB{em, fm} {
for _, m := range [...]*memDB{em, fm} {
if m == nil {
continue
}
defer m.decref()
mk, mv, me := m.Find(ikey)
mk, mv, me := m.mdb.Find(ikey)
if me == nil {
ukey, _, t, ok := parseIkey(mk)
if ok && db.s.icmp.uCompare(ukey, key) == 0 {
if t == tDel {
return nil, ErrNotFound
}
return mv, nil
return append([]byte{}, mv...), nil
}
} else if me != ErrNotFound {
return nil, me
@@ -590,8 +600,9 @@ func (db *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, er
// Get gets the value for the given key. It returns ErrNotFound if the
// DB does not contain the key.
//
// The caller should not modify the contents of the returned slice, but
// it is safe to modify the contents of the argument after Get returns.
// The returned slice is its own copy, it is safe to modify the contents
// of the returned slice.
// It is safe to modify the contents of the argument after Get returns.
func (db *DB) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
err = db.ok()
if err != nil {
@@ -649,6 +660,16 @@ func (db *DB) GetSnapshot() (*Snapshot, error) {
// Returns statistics of the underlying DB.
// leveldb.sstables
// Returns sstables list for each level.
// leveldb.blockpool
// Returns block pool stats.
// leveldb.cachedblock
// Returns size of cached block.
// leveldb.openedtables
// Returns number of opened tables.
// leveldb.alivesnaps
// Returns number of alive snapshots.
// leveldb.aliveiters
// Returns number of alive iterators.
func (db *DB) GetProperty(name string) (value string, err error) {
err = db.ok()
if err != nil {
@@ -694,6 +715,20 @@ func (db *DB) GetProperty(name string) (value string, err error) {
value += fmt.Sprintf("%d:%d[%q .. %q]\n", t.file.Num(), t.size, t.imin, t.imax)
}
}
case p == "blockpool":
value = fmt.Sprintf("%v", db.s.tops.bpool)
case p == "cachedblock":
if bc := db.s.o.GetBlockCache(); bc != nil {
value = fmt.Sprintf("%d", bc.Size())
} else {
value = "<nil>"
}
case p == "openedtables":
value = fmt.Sprintf("%d", db.s.tops.cache.Size())
case p == "alivesnaps":
value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveSnaps))
case p == "aliveiters":
value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveIters))
default:
err = errors.New("leveldb: GetProperty: unknown property: " + name)
}

View File

@@ -216,14 +216,15 @@ func (db *DB) memCompaction() {
if mem == nil {
return
}
defer mem.decref()
c := newCMem(db.s)
stats := new(cStatsStaging)
db.logf("mem@flush N·%d S·%s", mem.Len(), shortenb(mem.Size()))
db.logf("mem@flush N·%d S·%s", mem.mdb.Len(), shortenb(mem.mdb.Size()))
// Don't compact empty memdb.
if mem.Len() == 0 {
if mem.mdb.Len() == 0 {
db.logf("mem@flush skipping")
// drop frozen mem
db.dropFrozenMem()
@@ -241,7 +242,7 @@ func (db *DB) memCompaction() {
db.compactionTransact("mem@flush", func(cnt *compactionTransactCounter) (err error) {
stats.startTimer()
defer stats.stopTimer()
return c.flush(mem, -1)
return c.flush(mem.mdb, -1)
}, func() error {
for _, r := range c.rec.addedTables {
db.logf("mem@flush rollback @%d", r.num)

View File

@@ -9,6 +9,8 @@ package leveldb
import (
"errors"
"runtime"
"sync"
"sync/atomic"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/opt"
@@ -19,6 +21,17 @@ var (
errInvalidIkey = errors.New("leveldb: Iterator: invalid internal key")
)
type memdbReleaser struct {
once sync.Once
m *memDB
}
func (mr *memdbReleaser) Release() {
mr.once.Do(func() {
mr.m.decref()
})
}
func (db *DB) newRawIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
em, fm := db.getMems()
v := db.s.version()
@@ -26,9 +39,13 @@ func (db *DB) newRawIterator(slice *util.Range, ro *opt.ReadOptions) iterator.It
ti := v.getIterators(slice, ro)
n := len(ti) + 2
i := make([]iterator.Iterator, 0, n)
i = append(i, em.NewIterator(slice))
emi := em.mdb.NewIterator(slice)
emi.SetReleaser(&memdbReleaser{m: em})
i = append(i, emi)
if fm != nil {
i = append(i, fm.NewIterator(slice))
fmi := fm.mdb.NewIterator(slice)
fmi.SetReleaser(&memdbReleaser{m: fm})
i = append(i, fmi)
}
i = append(i, ti...)
strict := db.s.o.GetStrict(opt.StrictIterator) || ro.GetStrict(opt.StrictIterator)
@@ -50,6 +67,7 @@ func (db *DB) newIterator(seq uint64, slice *util.Range, ro *opt.ReadOptions) *d
}
rawIter := db.newRawIterator(islice, ro)
iter := &dbIter{
db: db,
icmp: db.s.icmp,
iter: rawIter,
seq: seq,
@@ -57,6 +75,7 @@ func (db *DB) newIterator(seq uint64, slice *util.Range, ro *opt.ReadOptions) *d
key: make([]byte, 0),
value: make([]byte, 0),
}
atomic.AddInt32(&db.aliveIters, 1)
runtime.SetFinalizer(iter, (*dbIter).Release)
return iter
}
@@ -73,6 +92,7 @@ const (
// dbIter represent an interator states over a database session.
type dbIter struct {
db *DB
icmp *iComparer
iter iterator.Iterator
seq uint64
@@ -287,6 +307,7 @@ func (i *dbIter) Release() {
if i.releaser != nil {
i.releaser.Release()
i.releaser = nil
}
i.dir = dirReleased
@@ -294,6 +315,8 @@ func (i *dbIter) Release() {
i.value = nil
i.iter.Release()
i.iter = nil
atomic.AddInt32(&i.db.aliveIters, -1)
i.db = nil
}
}

View File

@@ -9,6 +9,7 @@ package leveldb
import (
"runtime"
"sync"
"sync/atomic"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/opt"
@@ -81,7 +82,7 @@ func (db *DB) minSeq() uint64 {
type Snapshot struct {
db *DB
elem *snapshotElement
mu sync.Mutex
mu sync.RWMutex
released bool
}
@@ -91,6 +92,7 @@ func (db *DB) newSnapshot() *Snapshot {
db: db,
elem: db.acquireSnapshot(),
}
atomic.AddInt32(&db.aliveSnaps, 1)
runtime.SetFinalizer(snap, (*Snapshot).Release)
return snap
}
@@ -105,8 +107,8 @@ func (snap *Snapshot) Get(key []byte, ro *opt.ReadOptions) (value []byte, err er
if err != nil {
return
}
snap.mu.Lock()
defer snap.mu.Unlock()
snap.mu.RLock()
defer snap.mu.RUnlock()
if snap.released {
err = ErrSnapshotReleased
return
@@ -160,6 +162,7 @@ func (snap *Snapshot) Release() {
snap.released = true
snap.db.releaseSnapshot(snap.elem)
atomic.AddInt32(&snap.db.aliveSnaps, -1)
snap.db = nil
snap.elem = nil
}

View File

@@ -8,11 +8,36 @@ package leveldb
import (
"sync/atomic"
"time"
"github.com/syndtr/goleveldb/leveldb/journal"
"github.com/syndtr/goleveldb/leveldb/memdb"
)
type memDB struct {
db *DB
mdb *memdb.DB
ref int32
}
func (m *memDB) incref() {
atomic.AddInt32(&m.ref, 1)
}
func (m *memDB) decref() {
if ref := atomic.AddInt32(&m.ref, -1); ref == 0 {
// Only put back memdb with std capacity.
if m.mdb.Capacity() == m.db.s.o.GetWriteBuffer() {
m.mdb.Reset()
m.db.mpoolPut(m.mdb)
}
m.db = nil
m.mdb = nil
} else if ref < 0 {
panic("negative memdb ref")
}
}
// Get latest sequence number.
func (db *DB) getSeq() uint64 {
return atomic.LoadUint64(&db.seq)
@@ -23,9 +48,44 @@ func (db *DB) addSeq(delta uint64) {
atomic.AddUint64(&db.seq, delta)
}
func (db *DB) mpoolPut(mem *memdb.DB) {
defer func() {
recover()
}()
select {
case db.memPool <- mem:
default:
}
}
func (db *DB) mpoolGet() *memdb.DB {
select {
case mem := <-db.memPool:
return mem
default:
return nil
}
}
func (db *DB) mpoolDrain() {
ticker := time.NewTicker(30 * time.Second)
for {
select {
case <-ticker.C:
select {
case <-db.memPool:
default:
}
case _, _ = <-db.closeC:
close(db.memPool)
return
}
}
}
// Create new memdb and froze the old one; need external synchronization.
// newMem only called synchronously by the writer.
func (db *DB) newMem(n int) (mem *memdb.DB, err error) {
func (db *DB) newMem(n int) (mem *memDB, err error) {
num := db.s.allocFileNum()
file := db.s.getJournalFile(num)
w, err := file.Create()
@@ -37,6 +97,10 @@ func (db *DB) newMem(n int) (mem *memdb.DB, err error) {
db.memMu.Lock()
defer db.memMu.Unlock()
if db.frozenMem != nil {
panic("still has frozen mem")
}
if db.journal == nil {
db.journal = journal.NewWriter(w)
} else {
@@ -47,8 +111,16 @@ func (db *DB) newMem(n int) (mem *memdb.DB, err error) {
db.journalWriter = w
db.journalFile = file
db.frozenMem = db.mem
db.mem = memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n))
mem = db.mem
mdb := db.mpoolGet()
if mdb == nil || mdb.Capacity() < n {
mdb = memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n))
}
mem = &memDB{
db: db,
mdb: mdb,
ref: 2,
}
db.mem = mem
// The seq only incremented by the writer. And whoever called newMem
// should hold write lock, so no need additional synchronization here.
db.frozenSeq = db.seq
@@ -56,16 +128,27 @@ func (db *DB) newMem(n int) (mem *memdb.DB, err error) {
}
// Get all memdbs.
func (db *DB) getMems() (e *memdb.DB, f *memdb.DB) {
func (db *DB) getMems() (e, f *memDB) {
db.memMu.RLock()
defer db.memMu.RUnlock()
if db.mem == nil {
panic("nil effective mem")
}
db.mem.incref()
if db.frozenMem != nil {
db.frozenMem.incref()
}
return db.mem, db.frozenMem
}
// Get frozen memdb.
func (db *DB) getEffectiveMem() *memdb.DB {
func (db *DB) getEffectiveMem() *memDB {
db.memMu.RLock()
defer db.memMu.RUnlock()
if db.mem == nil {
panic("nil effective mem")
}
db.mem.incref()
return db.mem
}
@@ -77,9 +160,12 @@ func (db *DB) hasFrozenMem() bool {
}
// Get frozen memdb.
func (db *DB) getFrozenMem() *memdb.DB {
func (db *DB) getFrozenMem() *memDB {
db.memMu.RLock()
defer db.memMu.RUnlock()
if db.frozenMem != nil {
db.frozenMem.incref()
}
return db.frozenMem
}
@@ -92,6 +178,7 @@ func (db *DB) dropFrozenMem() {
db.logf("journal@remove removed @%d", db.frozenJournalFile.Num())
}
db.frozenJournalFile = nil
db.frozenMem.decref()
db.frozenMem = nil
db.memMu.Unlock()
}

View File

@@ -1210,7 +1210,7 @@ func TestDb_DeletionMarkers2(t *testing.T) {
}
func TestDb_CompactionTableOpenError(t *testing.T) {
h := newDbHarnessWopt(t, &opt.Options{MaxOpenFiles: 0})
h := newDbHarnessWopt(t, &opt.Options{CachedOpenFiles: -1})
defer h.close()
im := 10
@@ -1577,7 +1577,11 @@ func TestDb_BloomFilter(t *testing.T) {
return fmt.Sprintf("key%06d", i)
}
n := 10000
const (
n = 10000
indexOverheat = 19898
filterOverheat = 19799
)
// Populate multiple layers
for i := 0; i < n; i++ {
@@ -1601,7 +1605,7 @@ func TestDb_BloomFilter(t *testing.T) {
cnt := int(h.stor.ReadCounter())
t.Logf("lookup of %d present keys yield %d sstable I/O reads", n, cnt)
if min, max := n, n+2*n/100; cnt < min || cnt > max {
if min, max := n+indexOverheat+filterOverheat, n+indexOverheat+filterOverheat+2*n/100; cnt < min || cnt > max {
t.Errorf("num of sstable I/O reads of present keys not in range of %d - %d, got %d", min, max, cnt)
}
@@ -1612,7 +1616,7 @@ func TestDb_BloomFilter(t *testing.T) {
}
cnt = int(h.stor.ReadCounter())
t.Logf("lookup of %d missing keys yield %d sstable I/O reads", n, cnt)
if max := 3 * n / 100; cnt > max {
if max := 3*n/100 + indexOverheat + filterOverheat; cnt > max {
t.Errorf("num of sstable I/O reads of missing keys was more than %d, got %d", max, cnt)
}

View File

@@ -45,7 +45,7 @@ func (db *DB) jWriter() {
}
}
func (db *DB) rotateMem(n int) (mem *memdb.DB, err error) {
func (db *DB) rotateMem(n int) (mem *memDB, err error) {
// Wait for pending memdb compaction.
err = db.compSendIdle(db.mcompCmdC)
if err != nil {
@@ -63,13 +63,19 @@ func (db *DB) rotateMem(n int) (mem *memdb.DB, err error) {
return
}
func (db *DB) flush(n int) (mem *memdb.DB, nn int, err error) {
func (db *DB) flush(n int) (mem *memDB, nn int, err error) {
delayed := false
flush := func() bool {
flush := func() (retry bool) {
v := db.s.version()
defer v.release()
mem = db.getEffectiveMem()
nn = mem.Free()
defer func() {
if retry {
mem.decref()
mem = nil
}
}()
nn = mem.mdb.Free()
switch {
case v.tLen(0) >= kL0_SlowdownWritesTrigger && !delayed:
delayed = true
@@ -84,12 +90,17 @@ func (db *DB) flush(n int) (mem *memdb.DB, nn int, err error) {
}
default:
// Allow memdb to grow if it has no entry.
if mem.Len() == 0 {
if mem.mdb.Len() == 0 {
nn = n
return false
} else {
mem.decref()
mem, err = db.rotateMem(n)
if err == nil {
nn = mem.mdb.Free()
} else {
nn = 0
}
}
mem, err = db.rotateMem(n)
nn = mem.Free()
return false
}
return true
@@ -140,6 +151,7 @@ retry:
if err != nil {
return
}
defer mem.decref()
// Calculate maximum size of the batch.
m := 1 << 20
@@ -178,7 +190,7 @@ drain:
return
case db.journalC <- b:
// Write into memdb
b.memReplay(mem)
b.memReplay(mem.mdb)
}
// Wait for journal writer
select {
@@ -188,7 +200,7 @@ drain:
case err = <-db.journalAckC:
if err != nil {
// Revert memdb if error detected
b.revertMemReplay(mem)
b.revertMemReplay(mem.mdb)
return
}
}
@@ -197,7 +209,7 @@ drain:
if err != nil {
return
}
b.memReplay(mem)
b.memReplay(mem.mdb)
}
// Set last seq number.
@@ -258,7 +270,8 @@ func (db *DB) CompactRange(r util.Range) error {
// Check for overlaps in memdb.
mem := db.getEffectiveMem()
if isMemOverlaps(db.s.icmp, mem, r.Start, r.Limit) {
defer mem.decref()
if isMemOverlaps(db.s.icmp, mem.mdb, r.Start, r.Limit) {
// Memdb compaction.
if _, err := db.rotateMem(0); err != nil {
<-db.writeLockC

View File

@@ -37,6 +37,16 @@
// err = iter.Error()
// ...
//
// Iterate over subset of database content with a particular prefix:
// iter := db.NewIterator(util.BytesPrefix([]byte("foo-")), nil)
// for iter.Next() {
// // Use key/value.
// ...
// }
// iter.Release()
// err = iter.Error()
// ...
//
// Seek-then-Iterate:
//
// iter := db.NewIterator(nil, nil)

View File

@@ -21,7 +21,7 @@ var _ = testutil.Defer(func() {
BlockRestartInterval: 5,
BlockSize: 50,
Compression: opt.NoCompression,
MaxOpenFiles: 0,
CachedOpenFiles: -1,
Strict: opt.StrictAll,
WriteBuffer: 1000,
}

View File

@@ -0,0 +1,58 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// +build go1.3
package leveldb
import (
"sync/atomic"
"testing"
)
func BenchmarkDBReadConcurrent(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.fill()
p.gc()
defer p.close()
b.ResetTimer()
b.SetBytes(116)
b.RunParallel(func(pb *testing.PB) {
iter := p.newIter()
defer iter.Release()
for pb.Next() && iter.Next() {
}
})
}
func BenchmarkDBReadConcurrent2(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.fill()
p.gc()
defer p.close()
b.ResetTimer()
b.SetBytes(116)
var dir uint32
b.RunParallel(func(pb *testing.PB) {
iter := p.newIter()
defer iter.Release()
if atomic.AddUint32(&dir, 1)%2 == 0 {
for pb.Next() && iter.Next() {
}
} else {
if pb.Next() && iter.Last() {
for pb.Next() && iter.Prev() {
}
}
}
})
}

View File

@@ -103,18 +103,18 @@ type flusher interface {
Flush() error
}
// DroppedError is the error type that passed to Dropper.Drop method.
type DroppedError struct {
// ErrCorrupted is the error type that generated by corrupted block or chunk.
type ErrCorrupted struct {
Size int
Reason string
}
func (e DroppedError) Error() string {
return fmt.Sprintf("leveldb/journal: dropped %d bytes: %s", e.Size, e.Reason)
func (e ErrCorrupted) Error() string {
return fmt.Sprintf("leveldb/journal: block/chunk corrupted: %s (%d bytes)", e.Reason, e.Size)
}
// Dropper is the interface that wrap simple Drop method. The Drop
// method will be called when the journal reader dropping a chunk.
// method will be called when the journal reader dropping a block or chunk.
type Dropper interface {
Drop(err error)
}
@@ -158,76 +158,78 @@ func NewReader(r io.Reader, dropper Dropper, strict, checksum bool) *Reader {
}
}
var errSkip = errors.New("leveldb/journal: skipped")
func (r *Reader) corrupt(n int, reason string, skip bool) error {
if r.dropper != nil {
r.dropper.Drop(ErrCorrupted{n, reason})
}
if r.strict && !skip {
r.err = ErrCorrupted{n, reason}
return r.err
}
return errSkip
}
// nextChunk sets r.buf[r.i:r.j] to hold the next chunk's payload, reading the
// next block into the buffer if necessary.
func (r *Reader) nextChunk(wantFirst, skip bool) error {
func (r *Reader) nextChunk(first bool) error {
for {
if r.j+headerSize <= r.n {
checksum := binary.LittleEndian.Uint32(r.buf[r.j+0 : r.j+4])
length := binary.LittleEndian.Uint16(r.buf[r.j+4 : r.j+6])
chunkType := r.buf[r.j+6]
var err error
if checksum == 0 && length == 0 && chunkType == 0 {
// Drop entire block.
err = DroppedError{r.n - r.j, "zero header"}
m := r.n - r.j
r.i = r.n
r.j = r.n
return r.corrupt(m, "zero header", false)
} else {
m := r.n - r.j
r.i = r.j + headerSize
r.j = r.j + headerSize + int(length)
if r.j > r.n {
// Drop entire block.
err = DroppedError{m, "chunk length overflows block"}
r.i = r.n
r.j = r.n
return r.corrupt(m, "chunk length overflows block", false)
} else if r.checksum && checksum != util.NewCRC(r.buf[r.i-1:r.j]).Value() {
// Drop entire block.
err = DroppedError{m, "checksum mismatch"}
r.i = r.n
r.j = r.n
return r.corrupt(m, "checksum mismatch", false)
}
}
if wantFirst && err == nil && chunkType != fullChunkType && chunkType != firstChunkType {
if skip {
// The chunk are intentionally skipped.
if chunkType == lastChunkType {
skip = false
}
continue
} else {
// Drop the chunk.
err = DroppedError{r.j - r.i + headerSize, "orphan chunk"}
}
if first && chunkType != fullChunkType && chunkType != firstChunkType {
m := r.j - r.i
r.i = r.j
// Report the error, but skip it.
return r.corrupt(m+headerSize, "orphan chunk", true)
}
if err == nil {
r.last = chunkType == fullChunkType || chunkType == lastChunkType
} else {
if r.dropper != nil {
r.dropper.Drop(err)
}
if r.strict {
r.err = err
}
r.last = chunkType == fullChunkType || chunkType == lastChunkType
return nil
}
// The last block.
if r.n < blockSize && r.n > 0 {
if !first {
return r.corrupt(0, "missing chunk part", false)
}
r.err = io.EOF
return r.err
}
// Read block.
n, err := io.ReadFull(r.r, r.buf[:])
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
return err
}
if r.n < blockSize && r.n > 0 {
// This is the last block.
if r.j != r.n {
r.err = io.ErrUnexpectedEOF
} else {
r.err = io.EOF
}
return r.err
}
n, err := io.ReadFull(r.r, r.buf[:])
if err != nil && err != io.ErrUnexpectedEOF {
r.err = err
return r.err
}
if n == 0 {
if !first {
return r.corrupt(0, "missing chunk part", false)
}
r.err = io.EOF
return r.err
}
@@ -237,29 +239,26 @@ func (r *Reader) nextChunk(wantFirst, skip bool) error {
// Next returns a reader for the next journal. It returns io.EOF if there are no
// more journals. The reader returned becomes stale after the next Next call,
// and should no longer be used.
// and should no longer be used. If strict is false, the reader will returns
// io.ErrUnexpectedEOF error when found corrupted journal.
func (r *Reader) Next() (io.Reader, error) {
r.seq++
if r.err != nil {
return nil, r.err
}
skip := !r.last
r.i = r.j
for {
r.i = r.j
if r.nextChunk(true, skip) != nil {
// So that 'orphan chunk' drop will be reported.
skip = false
} else {
if err := r.nextChunk(true); err == nil {
break
}
if r.err != nil {
return nil, r.err
} else if err != errSkip {
return nil, err
}
}
return &singleReader{r, r.seq, nil}, nil
}
// Reset resets the journal reader, allows reuse of the journal reader.
// Reset resets the journal reader, allows reuse of the journal reader. Reset returns
// last accumulated error.
func (r *Reader) Reset(reader io.Reader, dropper Dropper, strict, checksum bool) error {
r.seq++
err := r.err
@@ -296,7 +295,11 @@ func (x *singleReader) Read(p []byte) (int, error) {
if r.last {
return 0, io.EOF
}
if x.err = r.nextChunk(false, false); x.err != nil {
x.err = r.nextChunk(false)
if x.err != nil {
if x.err == errSkip {
x.err = io.ErrUnexpectedEOF
}
return 0, x.err
}
}
@@ -320,7 +323,11 @@ func (x *singleReader) ReadByte() (byte, error) {
if r.last {
return 0, io.EOF
}
if x.err = r.nextChunk(false, false); x.err != nil {
x.err = r.nextChunk(false)
if x.err != nil {
if x.err == errSkip {
x.err = io.ErrUnexpectedEOF
}
return 0, x.err
}
}

View File

@@ -12,6 +12,7 @@ package journal
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"io/ioutil"
@@ -326,3 +327,492 @@ func TestStaleWriter(t *testing.T) {
t.Fatalf("stale write #1: unexpected error: %v", err)
}
}
func TestCorrupt_MissingLastBlock(t *testing.T) {
buf := new(bytes.Buffer)
w := NewWriter(buf)
// First record.
ww, err := w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-1024)); err != nil {
t.Fatalf("write #0: unexpected error: %v", err)
}
// Second record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
t.Fatalf("write #1: unexpected error: %v", err)
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
// Cut the last block.
b := buf.Bytes()[:blockSize]
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
// First read.
rr, err := r.Next()
if err != nil {
t.Fatal(err)
}
n, err := io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #0: %v", err)
}
if n != blockSize-1024 {
t.Fatalf("read #0: got %d bytes want %d", n, blockSize-1024)
}
// Second read.
rr, err = r.Next()
if err != nil {
t.Fatal(err)
}
n, err = io.Copy(ioutil.Discard, rr)
if err != io.ErrUnexpectedEOF {
t.Fatalf("read #1: unexpected error: %v", err)
}
if _, err := r.Next(); err != io.EOF {
t.Fatalf("last next: unexpected error: %v", err)
}
}
func TestCorrupt_CorruptedFirstBlock(t *testing.T) {
buf := new(bytes.Buffer)
w := NewWriter(buf)
// First record.
ww, err := w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
t.Fatalf("write #0: unexpected error: %v", err)
}
// Second record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
t.Fatalf("write #1: unexpected error: %v", err)
}
// Third record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
t.Fatalf("write #2: unexpected error: %v", err)
}
// Fourth record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil {
t.Fatalf("write #3: unexpected error: %v", err)
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
b := buf.Bytes()
// Corrupting block #0.
for i := 0; i < 1024; i++ {
b[i] = '1'
}
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
// First read (third record).
rr, err := r.Next()
if err != nil {
t.Fatal(err)
}
n, err := io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #0: %v", err)
}
if want := int64(blockSize-headerSize) + 1; n != want {
t.Fatalf("read #0: got %d bytes want %d", n, want)
}
// Second read (fourth record).
rr, err = r.Next()
if err != nil {
t.Fatal(err)
}
n, err = io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #1: %v", err)
}
if want := int64(blockSize-headerSize) + 2; n != want {
t.Fatalf("read #1: got %d bytes want %d", n, want)
}
if _, err := r.Next(); err != io.EOF {
t.Fatalf("last next: unexpected error: %v", err)
}
}
func TestCorrupt_CorruptedMiddleBlock(t *testing.T) {
buf := new(bytes.Buffer)
w := NewWriter(buf)
// First record.
ww, err := w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
t.Fatalf("write #0: unexpected error: %v", err)
}
// Second record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
t.Fatalf("write #1: unexpected error: %v", err)
}
// Third record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
t.Fatalf("write #2: unexpected error: %v", err)
}
// Fourth record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil {
t.Fatalf("write #3: unexpected error: %v", err)
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
b := buf.Bytes()
// Corrupting block #1.
for i := 0; i < 1024; i++ {
b[blockSize+i] = '1'
}
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
// First read (first record).
rr, err := r.Next()
if err != nil {
t.Fatal(err)
}
n, err := io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #0: %v", err)
}
if want := int64(blockSize / 2); n != want {
t.Fatalf("read #0: got %d bytes want %d", n, want)
}
// Second read (second record).
rr, err = r.Next()
if err != nil {
t.Fatal(err)
}
n, err = io.Copy(ioutil.Discard, rr)
if err != io.ErrUnexpectedEOF {
t.Fatalf("read #1: unexpected error: %v", err)
}
// Third read (fourth record).
rr, err = r.Next()
if err != nil {
t.Fatal(err)
}
n, err = io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #2: %v", err)
}
if want := int64(blockSize-headerSize) + 2; n != want {
t.Fatalf("read #2: got %d bytes want %d", n, want)
}
if _, err := r.Next(); err != io.EOF {
t.Fatalf("last next: unexpected error: %v", err)
}
}
func TestCorrupt_CorruptedLastBlock(t *testing.T) {
buf := new(bytes.Buffer)
w := NewWriter(buf)
// First record.
ww, err := w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
t.Fatalf("write #0: unexpected error: %v", err)
}
// Second record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
t.Fatalf("write #1: unexpected error: %v", err)
}
// Third record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
t.Fatalf("write #2: unexpected error: %v", err)
}
// Fourth record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil {
t.Fatalf("write #3: unexpected error: %v", err)
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
b := buf.Bytes()
// Corrupting block #3.
for i := len(b) - 1; i > len(b)-1024; i-- {
b[i] = '1'
}
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
// First read (first record).
rr, err := r.Next()
if err != nil {
t.Fatal(err)
}
n, err := io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #0: %v", err)
}
if want := int64(blockSize / 2); n != want {
t.Fatalf("read #0: got %d bytes want %d", n, want)
}
// Second read (second record).
rr, err = r.Next()
if err != nil {
t.Fatal(err)
}
n, err = io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #1: %v", err)
}
if want := int64(blockSize - headerSize); n != want {
t.Fatalf("read #1: got %d bytes want %d", n, want)
}
// Third read (third record).
rr, err = r.Next()
if err != nil {
t.Fatal(err)
}
n, err = io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #2: %v", err)
}
if want := int64(blockSize-headerSize) + 1; n != want {
t.Fatalf("read #2: got %d bytes want %d", n, want)
}
// Fourth read (fourth record).
rr, err = r.Next()
if err != nil {
t.Fatal(err)
}
n, err = io.Copy(ioutil.Discard, rr)
if err != io.ErrUnexpectedEOF {
t.Fatalf("read #3: unexpected error: %v", err)
}
if _, err := r.Next(); err != io.EOF {
t.Fatalf("last next: unexpected error: %v", err)
}
}
func TestCorrupt_FirstChuckLengthOverflow(t *testing.T) {
buf := new(bytes.Buffer)
w := NewWriter(buf)
// First record.
ww, err := w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
t.Fatalf("write #0: unexpected error: %v", err)
}
// Second record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
t.Fatalf("write #1: unexpected error: %v", err)
}
// Third record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
t.Fatalf("write #2: unexpected error: %v", err)
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
b := buf.Bytes()
// Corrupting record #1.
x := blockSize
binary.LittleEndian.PutUint16(b[x+4:], 0xffff)
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
// First read (first record).
rr, err := r.Next()
if err != nil {
t.Fatal(err)
}
n, err := io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #0: %v", err)
}
if want := int64(blockSize / 2); n != want {
t.Fatalf("read #0: got %d bytes want %d", n, want)
}
// Second read (second record).
rr, err = r.Next()
if err != nil {
t.Fatal(err)
}
n, err = io.Copy(ioutil.Discard, rr)
if err != io.ErrUnexpectedEOF {
t.Fatalf("read #1: unexpected error: %v", err)
}
if _, err := r.Next(); err != io.EOF {
t.Fatalf("last next: unexpected error: %v", err)
}
}
func TestCorrupt_MiddleChuckLengthOverflow(t *testing.T) {
buf := new(bytes.Buffer)
w := NewWriter(buf)
// First record.
ww, err := w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
t.Fatalf("write #0: unexpected error: %v", err)
}
// Second record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
t.Fatalf("write #1: unexpected error: %v", err)
}
// Third record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
t.Fatalf("write #2: unexpected error: %v", err)
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
b := buf.Bytes()
// Corrupting record #1.
x := blockSize/2 + headerSize
binary.LittleEndian.PutUint16(b[x+4:], 0xffff)
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
// First read (first record).
rr, err := r.Next()
if err != nil {
t.Fatal(err)
}
n, err := io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #0: %v", err)
}
if want := int64(blockSize / 2); n != want {
t.Fatalf("read #0: got %d bytes want %d", n, want)
}
// Second read (third record).
rr, err = r.Next()
if err != nil {
t.Fatal(err)
}
n, err = io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #1: %v", err)
}
if want := int64(blockSize-headerSize) + 1; n != want {
t.Fatalf("read #1: got %d bytes want %d", n, want)
}
if _, err := r.Next(); err != io.EOF {
t.Fatalf("last next: unexpected error: %v", err)
}
}

View File

@@ -24,16 +24,22 @@ const (
DefaultBlockRestartInterval = 16
DefaultBlockSize = 4 * KiB
DefaultCompressionType = SnappyCompression
DefaultMaxOpenFiles = 1000
DefaultCachedOpenFiles = 500
DefaultWriteBuffer = 4 * MiB
)
type noCache struct{}
func (noCache) SetCapacity(capacity int) {}
func (noCache) GetNamespace(id uint64) cache.Namespace { return nil }
func (noCache) Purge(fin cache.PurgeFin) {}
func (noCache) Zap(closed bool) {}
func (noCache) SetCapacity(capacity int) {}
func (noCache) Capacity() int { return 0 }
func (noCache) Used() int { return 0 }
func (noCache) Size() int { return 0 }
func (noCache) NumObjects() int { return 0 }
func (noCache) GetNamespace(id uint64) cache.Namespace { return nil }
func (noCache) PurgeNamespace(id uint64, fin cache.PurgeFin) {}
func (noCache) ZapNamespace(id uint64) {}
func (noCache) Purge(fin cache.PurgeFin) {}
func (noCache) Zap() {}
var NoCache cache.Cache = noCache{}
@@ -119,6 +125,13 @@ type Options struct {
// The default value is 4KiB.
BlockSize int
// CachedOpenFiles defines number of open files to kept around when not
// in-use, the counting includes still in-use files.
// Set this to negative value to disable caching.
//
// The default value is 500.
CachedOpenFiles int
// Comparer defines a total ordering over the space of []byte keys: a 'less
// than' relationship. The same comparison algorithm must be used for reads
// and writes over the lifetime of the DB.
@@ -159,13 +172,6 @@ type Options struct {
// The default value is nil.
Filter filter.Filter
// MaxOpenFiles defines maximum number of open files to kept around
// (cached). This is not an hard limit, actual open files may exceed
// the defined value.
//
// The default value is 1000.
MaxOpenFiles int
// Strict defines the DB strict level.
Strict Strict
@@ -207,6 +213,15 @@ func (o *Options) GetBlockSize() int {
return o.BlockSize
}
func (o *Options) GetCachedOpenFiles() int {
if o == nil || o.CachedOpenFiles == 0 {
return DefaultCachedOpenFiles
} else if o.CachedOpenFiles < 0 {
return 0
}
return o.CachedOpenFiles
}
func (o *Options) GetComparer() comparer.Comparer {
if o == nil || o.Comparer == nil {
return comparer.DefaultComparer
@@ -242,13 +257,6 @@ func (o *Options) GetFilter() filter.Filter {
return o.Filter
}
func (o *Options) GetMaxOpenFiles() int {
if o == nil || o.MaxOpenFiles <= 0 {
return DefaultMaxOpenFiles
}
return o.MaxOpenFiles
}
func (o *Options) GetStrict(strict Strict) bool {
if o == nil || o.Strict == 0 {
return DefaultStrict&strict != 0

View File

@@ -58,7 +58,7 @@ func newSession(stor storage.Storage, o *opt.Options) (s *session, err error) {
storLock: storLock,
}
s.setOptions(o)
s.tops = newTableOps(s, s.o.GetMaxOpenFiles())
s.tops = newTableOps(s, s.o.GetCachedOpenFiles())
s.setVersion(&version{s: s})
s.log("log@legend F·NumFile S·FileSize N·Entry C·BadEntry B·BadBlock D·DeletedEntry L·Level Q·SeqNum T·TimeElapsed")
return

View File

@@ -22,7 +22,7 @@ type dropper struct {
}
func (d dropper) Drop(err error) {
if e, ok := err.(journal.DroppedError); ok {
if e, ok := err.(journal.ErrCorrupted); ok {
d.s.logf("journal@drop %s-%d S·%s %q", d.file.Type(), d.file.Num(), shortenb(e.Size), e.Reason)
} else {
d.s.logf("journal@drop %s-%d %q", d.file.Type(), d.file.Num(), err)

View File

@@ -275,6 +275,7 @@ type tOps struct {
s *session
cache cache.Cache
cacheNS cache.Namespace
bpool *util.BufferPool
}
// Creates an empty table and returns table writer.
@@ -296,7 +297,7 @@ func (t *tOps) create() (*tWriter, error) {
func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) {
w, err := t.create()
if err != nil {
return f, n, err
return
}
defer func() {
@@ -321,33 +322,24 @@ func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) {
return
}
// Opens table. It returns a cache object, which should
// Opens table. It returns a cache handle, which should
// be released after use.
func (t *tOps) open(f *tFile) (c cache.Object, err error) {
func (t *tOps) open(f *tFile) (ch cache.Handle, err error) {
num := f.file.Num()
c, ok := t.cacheNS.Get(num, func() (ok bool, value interface{}, charge int, fin cache.SetFin) {
ch = t.cacheNS.Get(num, func() (charge int, value interface{}) {
var r storage.Reader
r, err = f.file.Open()
if err != nil {
return
return 0, nil
}
o := t.s.o
var cacheNS cache.Namespace
if bc := o.GetBlockCache(); bc != nil {
cacheNS = bc.GetNamespace(num)
var bcacheNS cache.Namespace
if bc := t.s.o.GetBlockCache(); bc != nil {
bcacheNS = bc.GetNamespace(num)
}
ok = true
value = table.NewReader(r, int64(f.size), cacheNS, o)
charge = 1
fin = func() {
r.Close()
}
return
return 1, table.NewReader(r, int64(f.size), bcacheNS, t.bpool, t.s.o)
})
if !ok && err == nil {
if ch == nil && err == nil {
err = ErrClosed
}
return
@@ -356,34 +348,33 @@ func (t *tOps) open(f *tFile) (c cache.Object, err error) {
// Finds key/value pair whose key is greater than or equal to the
// given key.
func (t *tOps) find(f *tFile, key []byte, ro *opt.ReadOptions) (rkey, rvalue []byte, err error) {
c, err := t.open(f)
ch, err := t.open(f)
if err != nil {
return nil, nil, err
}
defer c.Release()
return c.Value().(*table.Reader).Find(key, ro)
defer ch.Release()
return ch.Value().(*table.Reader).Find(key, ro)
}
// Returns approximate offset of the given key.
func (t *tOps) offsetOf(f *tFile, key []byte) (offset uint64, err error) {
c, err := t.open(f)
ch, err := t.open(f)
if err != nil {
return
}
_offset, err := c.Value().(*table.Reader).OffsetOf(key)
offset = uint64(_offset)
c.Release()
return
defer ch.Release()
offset_, err := ch.Value().(*table.Reader).OffsetOf(key)
return uint64(offset_), err
}
// Creates an iterator from the given table.
func (t *tOps) newIterator(f *tFile, slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
c, err := t.open(f)
ch, err := t.open(f)
if err != nil {
return iterator.NewEmptyIterator(err)
}
iter := c.Value().(*table.Reader).NewIterator(slice, ro)
iter.SetReleaser(c)
iter := ch.Value().(*table.Reader).NewIterator(slice, ro)
iter.SetReleaser(ch)
return iter
}
@@ -391,14 +382,16 @@ func (t *tOps) newIterator(f *tFile, slice *util.Range, ro *opt.ReadOptions) ite
// no one use the the table.
func (t *tOps) remove(f *tFile) {
num := f.file.Num()
t.cacheNS.Delete(num, func(exist bool) {
if err := f.file.Remove(); err != nil {
t.s.logf("table@remove removing @%d %q", num, err)
} else {
t.s.logf("table@remove removed @%d", num)
}
if bc := t.s.o.GetBlockCache(); bc != nil {
bc.GetNamespace(num).Zap(false)
t.cacheNS.Delete(num, func(exist, pending bool) {
if !pending {
if err := f.file.Remove(); err != nil {
t.s.logf("table@remove removing @%d %q", num, err)
} else {
t.s.logf("table@remove removed @%d", num)
}
if bc := t.s.o.GetBlockCache(); bc != nil {
bc.ZapNamespace(num)
}
}
})
}
@@ -406,14 +399,19 @@ func (t *tOps) remove(f *tFile) {
// Closes the table ops instance. It will close all tables,
// regadless still used or not.
func (t *tOps) close() {
t.cache.Zap(true)
t.cache.Zap()
t.bpool.Close()
}
// Creates new initialized table ops instance.
func newTableOps(s *session, cacheCap int) *tOps {
c := cache.NewLRUCache(cacheCap)
ns := c.GetNamespace(0)
return &tOps{s, c, ns}
return &tOps{
s: s,
cache: c,
cacheNS: c.GetNamespace(0),
bpool: util.NewBufferPool(s.o.GetBlockSize() + 5),
}
}
// tWriter wraps the table writer. It keep track of file descriptor

View File

@@ -40,7 +40,7 @@ var _ = testutil.Defer(func() {
data := bw.buf.Bytes()
restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:]))
return &block{
cmp: comparer.DefaultComparer,
tr: &Reader{cmp: comparer.DefaultComparer},
data: data,
restartsLen: restartsLen,
restartsOffset: len(data) - (restartsLen+1)*4,

View File

@@ -37,7 +37,7 @@ func max(x, y int) int {
}
type block struct {
cmp comparer.BasicComparer
tr *Reader
data []byte
restartsLen int
restartsOffset int
@@ -46,31 +46,25 @@ type block struct {
}
func (b *block) seek(rstart, rlimit int, key []byte) (index, offset int, err error) {
n := b.restartsOffset
data := b.data
cmp := b.cmp
index = sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool {
offset := int(binary.LittleEndian.Uint32(data[n+4*(rstart+i):]))
offset += 1 // shared always zero, since this is a restart point
v1, n1 := binary.Uvarint(data[offset:]) // key length
_, n2 := binary.Uvarint(data[offset+n1:]) // value length
offset := int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*(rstart+i):]))
offset += 1 // shared always zero, since this is a restart point
v1, n1 := binary.Uvarint(b.data[offset:]) // key length
_, n2 := binary.Uvarint(b.data[offset+n1:]) // value length
m := offset + n1 + n2
return cmp.Compare(data[m:m+int(v1)], key) > 0
return b.tr.cmp.Compare(b.data[m:m+int(v1)], key) > 0
}) + rstart - 1
if index < rstart {
// The smallest key is greater-than key sought.
index = rstart
}
offset = int(binary.LittleEndian.Uint32(data[n+4*index:]))
offset = int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*index:]))
return
}
func (b *block) restartIndex(rstart, rlimit, offset int) int {
n := b.restartsOffset
data := b.data
return sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool {
return int(binary.LittleEndian.Uint32(data[n+4*(rstart+i):])) > offset
return int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*(rstart+i):])) > offset
}) + rstart - 1
}
@@ -139,6 +133,14 @@ func (b *block) newIterator(slice *util.Range, inclLimit bool, cache util.Releas
return bi
}
func (b *block) Release() {
if b.tr.bpool != nil {
b.tr.bpool.Put(b.data)
}
b.tr = nil
b.data = nil
}
type dir int
const (
@@ -261,7 +263,7 @@ func (i *blockIter) Seek(key []byte) bool {
i.dir = dirForward
}
for i.Next() {
if i.block.cmp.Compare(i.key, key) >= 0 {
if i.block.tr.cmp.Compare(i.key, key) >= 0 {
return true
}
}
@@ -437,18 +439,21 @@ func (i *blockIter) Value() []byte {
}
func (i *blockIter) Release() {
i.prevNode = nil
i.prevKeys = nil
i.key = nil
i.value = nil
i.dir = dirReleased
if i.cache != nil {
i.cache.Release()
i.cache = nil
}
if i.releaser != nil {
i.releaser.Release()
i.releaser = nil
if i.dir > dirReleased {
i.block = nil
i.prevNode = nil
i.prevKeys = nil
i.key = nil
i.value = nil
i.dir = dirReleased
if i.cache != nil {
i.cache.Release()
i.cache = nil
}
if i.releaser != nil {
i.releaser.Release()
i.releaser = nil
}
}
}
@@ -467,7 +472,7 @@ func (i *blockIter) Error() error {
}
type filterBlock struct {
filter filter.Filter
tr *Reader
data []byte
oOffset int
baseLg uint
@@ -481,7 +486,7 @@ func (b *filterBlock) contains(offset uint64, key []byte) bool {
n := int(binary.LittleEndian.Uint32(o))
m := int(binary.LittleEndian.Uint32(o[4:]))
if n < m && m <= b.oOffset {
return b.filter.Contains(b.data[n:m], key)
return b.tr.filter.Contains(b.data[n:m], key)
} else if n == m {
return false
}
@@ -489,10 +494,17 @@ func (b *filterBlock) contains(offset uint64, key []byte) bool {
return true
}
func (b *filterBlock) Release() {
if b.tr.bpool != nil {
b.tr.bpool.Put(b.data)
}
b.tr = nil
b.data = nil
}
type indexIter struct {
blockIter
tableReader *Reader
slice *util.Range
*blockIter
slice *util.Range
// Options
checksum bool
fillCache bool
@@ -511,7 +523,7 @@ func (i *indexIter) Get() iterator.Iterator {
if i.slice != nil && (i.blockIter.isFirst() || i.blockIter.isLast()) {
slice = i.slice
}
return i.tableReader.getDataIter(dataBH, slice, i.checksum, i.fillCache)
return i.blockIter.block.tr.getDataIter(dataBH, slice, i.checksum, i.fillCache)
}
// Reader is a table reader.
@@ -519,15 +531,15 @@ type Reader struct {
reader io.ReaderAt
cache cache.Namespace
err error
bpool *util.BufferPool
// Options
cmp comparer.Comparer
filter filter.Filter
checksum bool
strictIter bool
dataEnd int64
indexBlock *block
filterBlock *filterBlock
dataEnd int64
indexBH, filterBH blockHandle
}
func verifyChecksum(data []byte) bool {
@@ -538,12 +550,13 @@ func verifyChecksum(data []byte) bool {
}
func (r *Reader) readRawBlock(bh blockHandle, checksum bool) ([]byte, error) {
data := make([]byte, bh.length+blockTrailerLen)
data := r.bpool.Get(int(bh.length + blockTrailerLen))
if _, err := r.reader.ReadAt(data, int64(bh.offset)); err != nil && err != io.EOF {
return nil, err
}
if checksum || r.checksum {
if !verifyChecksum(data) {
r.bpool.Put(data)
return nil, errors.New("leveldb/table: Reader: invalid block (checksum mismatch)")
}
}
@@ -551,12 +564,18 @@ func (r *Reader) readRawBlock(bh blockHandle, checksum bool) ([]byte, error) {
case blockTypeNoCompression:
data = data[:bh.length]
case blockTypeSnappyCompression:
var err error
data, err = snappy.Decode(nil, data[:bh.length])
decLen, err := snappy.DecodedLen(data[:bh.length])
if err != nil {
return nil, err
}
tmp := data
data, err = snappy.Decode(r.bpool.Get(decLen), tmp[:bh.length])
r.bpool.Put(tmp)
if err != nil {
return nil, err
}
default:
r.bpool.Put(data)
return nil, fmt.Errorf("leveldb/table: Reader: unknown block compression type: %d", data[bh.length])
}
return data, nil
@@ -569,7 +588,7 @@ func (r *Reader) readBlock(bh blockHandle, checksum bool) (*block, error) {
}
restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:]))
b := &block{
cmp: r.cmp,
tr: r,
data: data,
restartsLen: restartsLen,
restartsOffset: len(data) - (restartsLen+1)*4,
@@ -578,7 +597,44 @@ func (r *Reader) readBlock(bh blockHandle, checksum bool) (*block, error) {
return b, nil
}
func (r *Reader) readFilterBlock(bh blockHandle, filter filter.Filter) (*filterBlock, error) {
func (r *Reader) readBlockCached(bh blockHandle, checksum, fillCache bool) (*block, util.Releaser, error) {
if r.cache != nil {
var err error
ch := r.cache.Get(bh.offset, func() (charge int, value interface{}) {
if !fillCache {
return 0, nil
}
var b *block
b, err = r.readBlock(bh, checksum)
if err != nil {
return 0, nil
}
return cap(b.data), b
})
if ch != nil {
b, ok := ch.Value().(*block)
if !ok {
ch.Release()
return nil, nil, errors.New("leveldb/table: Reader: inconsistent block type")
}
if !b.checksum && (r.checksum || checksum) {
if !verifyChecksum(b.data) {
ch.Release()
return nil, nil, errors.New("leveldb/table: Reader: invalid block (checksum mismatch)")
}
b.checksum = true
}
return b, ch, err
} else if err != nil {
return nil, nil, err
}
}
b, err := r.readBlock(bh, checksum)
return b, b, err
}
func (r *Reader) readFilterBlock(bh blockHandle) (*filterBlock, error) {
data, err := r.readRawBlock(bh, true)
if err != nil {
return nil, err
@@ -593,7 +649,7 @@ func (r *Reader) readFilterBlock(bh blockHandle, filter filter.Filter) (*filterB
return nil, errors.New("leveldb/table: Reader: invalid filter block (invalid offset)")
}
b := &filterBlock{
filter: filter,
tr: r,
data: data,
oOffset: oOffset,
baseLg: uint(data[n-1]),
@@ -602,44 +658,42 @@ func (r *Reader) readFilterBlock(bh blockHandle, filter filter.Filter) (*filterB
return b, nil
}
func (r *Reader) getDataIter(dataBH blockHandle, slice *util.Range, checksum, fillCache bool) iterator.Iterator {
func (r *Reader) readFilterBlockCached(bh blockHandle, fillCache bool) (*filterBlock, util.Releaser, error) {
if r.cache != nil {
// Get/set block cache.
var err error
cache, ok := r.cache.Get(dataBH.offset, func() (ok bool, value interface{}, charge int, fin cache.SetFin) {
ch := r.cache.Get(bh.offset, func() (charge int, value interface{}) {
if !fillCache {
return
return 0, nil
}
var dataBlock *block
dataBlock, err = r.readBlock(dataBH, checksum)
if err == nil {
ok = true
value = dataBlock
charge = int(dataBH.length)
var b *filterBlock
b, err = r.readFilterBlock(bh)
if err != nil {
return 0, nil
}
return
return cap(b.data), b
})
if err != nil {
return iterator.NewEmptyIterator(err)
}
if ok {
dataBlock := cache.Value().(*block)
if !dataBlock.checksum && (r.checksum || checksum) {
if !verifyChecksum(dataBlock.data) {
return iterator.NewEmptyIterator(errors.New("leveldb/table: Reader: invalid block (checksum mismatch)"))
}
dataBlock.checksum = true
if ch != nil {
b, ok := ch.Value().(*filterBlock)
if !ok {
ch.Release()
return nil, nil, errors.New("leveldb/table: Reader: inconsistent block type")
}
iter := dataBlock.newIterator(slice, false, cache)
return iter
return b, ch, err
} else if err != nil {
return nil, nil, err
}
}
dataBlock, err := r.readBlock(dataBH, checksum)
b, err := r.readFilterBlock(bh)
return b, b, err
}
func (r *Reader) getDataIter(dataBH blockHandle, slice *util.Range, checksum, fillCache bool) iterator.Iterator {
b, rel, err := r.readBlockCached(dataBH, checksum, fillCache)
if err != nil {
return iterator.NewEmptyIterator(err)
}
iter := dataBlock.newIterator(slice, false, nil)
return iter
return b.newIterator(slice, false, rel)
}
// NewIterator creates an iterator from the table.
@@ -653,18 +707,21 @@ func (r *Reader) getDataIter(dataBH blockHandle, slice *util.Range, checksum, fi
// when not used.
//
// Also read Iterator documentation of the leveldb/iterator package.
func (r *Reader) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
if r.err != nil {
return iterator.NewEmptyIterator(r.err)
}
fillCache := !ro.GetDontFillCache()
b, rel, err := r.readBlockCached(r.indexBH, true, fillCache)
if err != nil {
return iterator.NewEmptyIterator(err)
}
index := &indexIter{
blockIter: *r.indexBlock.newIterator(slice, true, nil),
tableReader: r,
slice: slice,
checksum: ro.GetStrict(opt.StrictBlockChecksum),
fillCache: !ro.GetDontFillCache(),
blockIter: b.newIterator(slice, true, rel),
slice: slice,
checksum: ro.GetStrict(opt.StrictBlockChecksum),
fillCache: !ro.GetDontFillCache(),
}
return iterator.NewIndexedIterator(index, r.strictIter || ro.GetStrict(opt.StrictIterator), false)
}
@@ -681,7 +738,13 @@ func (r *Reader) Find(key []byte, ro *opt.ReadOptions) (rkey, value []byte, err
return
}
index := r.indexBlock.newIterator(nil, true, nil)
indexBlock, rel, err := r.readBlockCached(r.indexBH, true, true)
if err != nil {
return
}
defer rel.Release()
index := indexBlock.newIterator(nil, true, nil)
defer index.Release()
if !index.Seek(key) {
err = index.Error()
@@ -695,9 +758,15 @@ func (r *Reader) Find(key []byte, ro *opt.ReadOptions) (rkey, value []byte, err
err = errors.New("leveldb/table: Reader: invalid table (bad data block handle)")
return
}
if r.filterBlock != nil && !r.filterBlock.contains(dataBH.offset, key) {
err = ErrNotFound
return
if r.filter != nil {
filterBlock, rel, ferr := r.readFilterBlockCached(r.filterBH, true)
if ferr == nil {
if !filterBlock.contains(dataBH.offset, key) {
rel.Release()
return nil, nil, ErrNotFound
}
rel.Release()
}
}
data := r.getDataIter(dataBH, nil, ro.GetStrict(opt.StrictBlockChecksum), !ro.GetDontFillCache())
defer data.Release()
@@ -708,8 +777,11 @@ func (r *Reader) Find(key []byte, ro *opt.ReadOptions) (rkey, value []byte, err
}
return
}
// Don't use block buffer, no need to copy the buffer.
rkey = data.Key()
value = data.Value()
// Use block buffer, and since the buffer will be recycled, the buffer
// need to be copied.
value = append([]byte{}, data.Value()...)
return
}
@@ -741,7 +813,13 @@ func (r *Reader) OffsetOf(key []byte) (offset int64, err error) {
return
}
index := r.indexBlock.newIterator(nil, true, nil)
indexBlock, rel, err := r.readBlockCached(r.indexBH, true, true)
if err != nil {
return
}
defer rel.Release()
index := indexBlock.newIterator(nil, true, nil)
defer index.Release()
if index.Seek(key) {
dataBH, n := decodeBlockHandle(index.Value())
@@ -759,14 +837,29 @@ func (r *Reader) OffsetOf(key []byte) (offset int64, err error) {
return
}
// Release implements util.Releaser.
// It also close the file if it is an io.Closer.
func (r *Reader) Release() {
if closer, ok := r.reader.(io.Closer); ok {
closer.Close()
}
r.reader = nil
r.cache = nil
r.bpool = nil
}
// NewReader creates a new initialized table reader for the file.
// The cache is optional and can be nil.
// The cache and bpool is optional and can be nil.
//
// The returned table reader instance is goroutine-safe.
func NewReader(f io.ReaderAt, size int64, cache cache.Namespace, o *opt.Options) *Reader {
func NewReader(f io.ReaderAt, size int64, cache cache.Namespace, bpool *util.BufferPool, o *opt.Options) *Reader {
if bpool == nil {
bpool = util.NewBufferPool(o.GetBlockSize() + blockTrailerLen)
}
r := &Reader{
reader: f,
cache: cache,
bpool: bpool,
cmp: o.GetComparer(),
checksum: o.GetStrict(opt.StrictBlockChecksum),
strictIter: o.GetStrict(opt.StrictIterator),
@@ -794,16 +887,11 @@ func NewReader(f io.ReaderAt, size int64, cache cache.Namespace, o *opt.Options)
return r
}
// Decode the index block handle.
indexBH, n := decodeBlockHandle(footer[n:])
r.indexBH, n = decodeBlockHandle(footer[n:])
if n == 0 {
r.err = errors.New("leveldb/table: Reader: invalid table (bad index block handle)")
return r
}
// Read index block.
r.indexBlock, r.err = r.readBlock(indexBH, true)
if r.err != nil {
return r
}
// Read metaindex block.
metaBlock, err := r.readBlock(metaBH, true)
if err != nil {
@@ -819,32 +907,28 @@ func NewReader(f io.ReaderAt, size int64, cache cache.Namespace, o *opt.Options)
continue
}
fn := key[7:]
var filter filter.Filter
if f0 := o.GetFilter(); f0 != nil && f0.Name() == fn {
filter = f0
r.filter = f0
} else {
for _, f0 := range o.GetAltFilters() {
if f0.Name() == fn {
filter = f0
r.filter = f0
break
}
}
}
if filter != nil {
if r.filter != nil {
filterBH, n := decodeBlockHandle(metaIter.Value())
if n == 0 {
continue
}
r.filterBH = filterBH
// Update data end.
r.dataEnd = int64(filterBH.offset)
filterBlock, err := r.readFilterBlock(filterBH, filter)
if err != nil {
continue
}
r.filterBlock = filterBlock
break
}
}
metaIter.Release()
metaBlock.Release()
return r
}

View File

@@ -59,7 +59,7 @@ var _ = testutil.Defer(func() {
It("Should be able to approximate offset of a key correctly", func() {
Expect(err).ShouldNot(HaveOccurred())
tr := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, o)
tr := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, nil, o)
CheckOffset := func(key string, expect, threshold int) {
offset, err := tr.OffsetOf([]byte(key))
Expect(err).ShouldNot(HaveOccurred())
@@ -95,7 +95,7 @@ var _ = testutil.Defer(func() {
tw.Close()
// Opening the table.
tr := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, o)
tr := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, nil, o)
return tableWrapper{tr}
}
Test := func(kv *testutil.KeyValue, body func(r *Reader)) func() {
@@ -111,7 +111,9 @@ var _ = testutil.Defer(func() {
testutil.AllKeyValueTesting(nil, Build)
Describe("with one key per block", Test(testutil.KeyValue_Generate(nil, 9, 1, 10, 512, 512), func(r *Reader) {
It("should have correct blocks number", func() {
Expect(r.indexBlock.restartsLen).Should(Equal(9))
indexBlock, err := r.readBlock(r.indexBH, true)
Expect(err).To(BeNil())
Expect(indexBlock.restartsLen).Should(Equal(9))
})
}))
})

View File

@@ -0,0 +1,205 @@
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package util
import (
"fmt"
"sync/atomic"
"time"
)
type buffer struct {
b []byte
miss int
}
// BufferPool is a 'buffer pool'.
type BufferPool struct {
pool [6]chan []byte
size [5]uint32
sizeMiss [5]uint32
sizeHalf [5]uint32
baseline [4]int
baselinex0 int
baselinex1 int
baseline0 int
baseline1 int
baseline2 int
close chan struct{}
get uint32
put uint32
half uint32
less uint32
equal uint32
greater uint32
miss uint32
}
func (p *BufferPool) poolNum(n int) int {
if n <= p.baseline0 && n > p.baseline0/2 {
return 0
}
for i, x := range p.baseline {
if n <= x {
return i + 1
}
}
return len(p.baseline) + 1
}
// Get returns buffer with length of n.
func (p *BufferPool) Get(n int) []byte {
atomic.AddUint32(&p.get, 1)
poolNum := p.poolNum(n)
pool := p.pool[poolNum]
if poolNum == 0 {
// Fast path.
select {
case b := <-pool:
switch {
case cap(b) > n:
if cap(b)-n >= n {
atomic.AddUint32(&p.half, 1)
select {
case pool <- b:
default:
}
return make([]byte, n)
} else {
atomic.AddUint32(&p.less, 1)
return b[:n]
}
case cap(b) == n:
atomic.AddUint32(&p.equal, 1)
return b[:n]
default:
atomic.AddUint32(&p.greater, 1)
}
default:
atomic.AddUint32(&p.miss, 1)
}
return make([]byte, n, p.baseline0)
} else {
sizePtr := &p.size[poolNum-1]
select {
case b := <-pool:
switch {
case cap(b) > n:
if cap(b)-n >= n {
atomic.AddUint32(&p.half, 1)
sizeHalfPtr := &p.sizeHalf[poolNum-1]
if atomic.AddUint32(sizeHalfPtr, 1) == 20 {
atomic.StoreUint32(sizePtr, uint32(cap(b)/2))
atomic.StoreUint32(sizeHalfPtr, 0)
} else {
select {
case pool <- b:
default:
}
}
return make([]byte, n)
} else {
atomic.AddUint32(&p.less, 1)
return b[:n]
}
case cap(b) == n:
atomic.AddUint32(&p.equal, 1)
return b[:n]
default:
atomic.AddUint32(&p.greater, 1)
if uint32(cap(b)) >= atomic.LoadUint32(sizePtr) {
select {
case pool <- b:
default:
}
}
}
default:
atomic.AddUint32(&p.miss, 1)
}
if size := atomic.LoadUint32(sizePtr); uint32(n) > size {
if size == 0 {
atomic.CompareAndSwapUint32(sizePtr, 0, uint32(n))
} else {
sizeMissPtr := &p.sizeMiss[poolNum-1]
if atomic.AddUint32(sizeMissPtr, 1) == 20 {
atomic.StoreUint32(sizePtr, uint32(n))
atomic.StoreUint32(sizeMissPtr, 0)
}
}
return make([]byte, n)
} else {
return make([]byte, n, size)
}
}
}
// Put adds given buffer to the pool.
func (p *BufferPool) Put(b []byte) {
atomic.AddUint32(&p.put, 1)
pool := p.pool[p.poolNum(cap(b))]
select {
case pool <- b:
default:
}
}
func (p *BufferPool) Close() {
select {
case p.close <- struct{}{}:
default:
}
}
func (p *BufferPool) String() string {
return fmt.Sprintf("BufferPool{B·%d Z·%v Zm·%v Zh·%v G·%d P·%d H·%d <·%d =·%d >·%d M·%d}",
p.baseline0, p.size, p.sizeMiss, p.sizeHalf, p.get, p.put, p.half, p.less, p.equal, p.greater, p.miss)
}
func (p *BufferPool) drain() {
ticker := time.NewTicker(2 * time.Second)
for {
select {
case <-ticker.C:
for _, ch := range p.pool {
select {
case <-ch:
default:
}
}
case <-p.close:
for _, ch := range p.pool {
close(ch)
}
return
}
}
}
// NewBufferPool creates a new initialized 'buffer pool'.
func NewBufferPool(baseline int) *BufferPool {
if baseline <= 0 {
panic("baseline can't be <= 0")
}
p := &BufferPool{
baseline0: baseline,
baseline: [...]int{baseline / 4, baseline / 2, baseline * 2, baseline * 4},
close: make(chan struct{}, 1),
}
for i, cap := range []int{2, 2, 4, 4, 2, 1} {
p.pool[i] = make(chan []byte, cap)
}
go p.drain()
return p
}

View File

@@ -0,0 +1,21 @@
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// +build go1.3
package util
import (
"sync"
)
type Pool struct {
sync.Pool
}
func NewPool(cap int) *Pool {
return &Pool{}
}

View File

@@ -0,0 +1,33 @@
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// +build !go1.3
package util
type Pool struct {
pool chan interface{}
}
func (p *Pool) Get() interface{} {
select {
case x := <-p.pool:
return x
default:
return nil
}
}
func (p *Pool) Put(x interface{}) {
select {
case p.pool <- x:
default:
}
}
func NewPool(cap int) *Pool {
return &Pool{pool: make(chan interface{}, cap)}
}

View File

@@ -14,3 +14,19 @@ type Range struct {
// Limit of the key range, not include in the range.
Limit []byte
}
// BytesPrefix returns key range that satisfy the given prefix.
// This only applicable for the standard 'bytes comparer'.
func BytesPrefix(prefix []byte) *Range {
var limit []byte
for i := len(prefix) - 1; i >= 0; i-- {
c := prefix[i]
if c < 0xff {
limit = make([]byte, i+1)
copy(limit, prefix)
limit[i] = c + 1
break
}
}
return &Range{prefix, limit}
}

View File

@@ -1,10 +1,9 @@
syncthing
=========
[![Build Status](https://img.shields.io/travis/syncthing/syncthing.svg?style=flat)](https://travis-ci.org/syncthing/syncthing)
[![Coverage Status](https://img.shields.io/coveralls/syncthing/syncthing.svg?style=flat)](https://coveralls.io/r/syncthing/syncthing?branch=master)
[![API Documentation](http://img.shields.io/badge/api-Godoc-blue.svg?style=flat)](http://godoc.org/github.com/syncthing/syncthing)
[![MIT License](http://img.shields.io/badge/license-MIT-blue.svg?style=flat)](http://opensource.org/licenses/MIT)
[![Latest Build](http://img.shields.io/jenkins/s/http/build.syncthing.net/syncthing.svg?style=flat-square)](http://build.syncthing.net/job/syncthing/lastBuild/)
[![API Documentation](http://img.shields.io/badge/api-Godoc-blue.svg?style=flat-square)](http://godoc.org/github.com/syncthing/syncthing)
[![MIT License](http://img.shields.io/badge/license-MIT-blue.svg?style=flat-square)](http://opensource.org/licenses/MIT)
This is the `syncthing` project. The following are the project goals:
@@ -26,14 +25,22 @@ for incompatible changes.
Getting Started
---------------
Take a look at the [getting started guide](http://discourse.syncthing.net/t/getting-started/46).
Take a look at the [getting started guide](http://discourse.syncthing.net/t/46).
Building
--------
Building Syncthing from source is easy, and there's a
[guide](http://discourse.syncthing.net/t/44)
that describes it for both Unix and Windows.
Signed Releases
---------------
As of v0.7.0 and onwards, git tags and release binaries are GPG signed with
the key BCE524C7 (http://nym.se/gpg.txt). The signature is included in the
normal release bundle as `syncthing.asc` or `syncthing.exe.asc`.
the key BCE524C7 (http://nym.se/gpg.txt). For release binaries, MD5 and
SHA1 checksums are calculated and signed, available in the
md5sum.txt.asc and sha1sum.txt.asc files.
Documentation
=============

23
auto/auto_test.go Normal file
View File

@@ -0,0 +1,23 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package auto_test
import (
"bytes"
"testing"
"github.com/syncthing/syncthing/auto"
)
func TestAssets(t *testing.T) {
assets := auto.Assets()
idx, ok := assets["index.html"]
if !ok {
t.Fatal("No index.html in compiled in assets")
}
if !bytes.Contains(idx, []byte("<html")) {
t.Fatal("No html in index.html")
}
}

View File

@@ -1,2 +1,6 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// Package auto contains auto generated files for web assets.
package auto

View File

File diff suppressed because one or more lines are too long

View File

@@ -11,52 +11,17 @@ type recv struct {
src net.Addr
}
type dst struct {
intf string
conn *net.UDPConn
type Interface interface {
Send(data []byte)
Recv() ([]byte, net.Addr)
}
type Beacon struct {
conn *net.UDPConn
port int
conns []dst
inbox chan []byte
outbox chan recv
}
func New(port int) (*Beacon, error) {
conn, err := net.ListenUDP("udp", &net.UDPAddr{Port: port})
if err != nil {
return nil, err
}
b := &Beacon{
conn: conn,
port: port,
inbox: make(chan []byte),
outbox: make(chan recv, 16),
}
go b.reader()
go b.writer()
return b, nil
}
func (b *Beacon) Send(data []byte) {
b.inbox <- data
}
func (b *Beacon) Recv() ([]byte, net.Addr) {
recv := <-b.outbox
return recv.data, recv.src
}
func (b *Beacon) reader() {
func genericReader(conn *net.UDPConn, outbox chan<- recv) {
bs := make([]byte, 65536)
for {
n, addr, err := b.conn.ReadFrom(bs)
n, addr, err := conn.ReadFrom(bs)
if err != nil {
l.Warnln("Beacon read:", err)
l.Warnln("multicast read:", err)
return
}
if debug {
@@ -66,7 +31,7 @@ func (b *Beacon) reader() {
c := make([]byte, n)
copy(c, bs)
select {
case b.outbox <- recv{c, addr}:
case outbox <- recv{c, addr}:
default:
if debug {
l.Debugln("dropping message")
@@ -74,59 +39,3 @@ func (b *Beacon) reader() {
}
}
}
func (b *Beacon) writer() {
for bs := range b.inbox {
addrs, err := net.InterfaceAddrs()
if err != nil {
l.Warnln("Beacon: interface addresses:", err)
continue
}
var dsts []net.IP
for _, addr := range addrs {
if iaddr, ok := addr.(*net.IPNet); ok && iaddr.IP.IsGlobalUnicast() && iaddr.IP.To4() != nil {
baddr := bcast(iaddr)
dsts = append(dsts, baddr.IP)
}
}
if len(dsts) == 0 {
// Fall back to the general IPv4 broadcast address
dsts = append(dsts, net.IP{0xff, 0xff, 0xff, 0xff})
}
if debug {
l.Debugln("addresses:", dsts)
}
for _, ip := range dsts {
dst := &net.UDPAddr{IP: ip, Port: b.port}
_, err := b.conn.WriteTo(bs, dst)
if err != nil {
if debug {
l.Debugln(err)
}
} else if debug {
l.Debugf("sent %d bytes to %s", len(bs), dst)
}
}
}
}
func bcast(ip *net.IPNet) *net.IPNet {
var bc = &net.IPNet{}
bc.IP = make([]byte, len(ip.IP))
copy(bc.IP, ip.IP)
bc.Mask = ip.Mask
offset := len(bc.IP) - len(bc.Mask)
for i := range bc.IP {
if i-offset >= 0 {
bc.IP[i] = ip.IP[i] | ^ip.Mask[i-offset]
}
}
return bc
}

97
beacon/broadcast.go Normal file
View File

@@ -0,0 +1,97 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package beacon
import "net"
type Broadcast struct {
conn *net.UDPConn
port int
inbox chan []byte
outbox chan recv
}
func NewBroadcast(port int) (*Broadcast, error) {
conn, err := net.ListenUDP("udp", &net.UDPAddr{Port: port})
if err != nil {
return nil, err
}
b := &Broadcast{
conn: conn,
port: port,
inbox: make(chan []byte),
outbox: make(chan recv, 16),
}
go genericReader(b.conn, b.outbox)
go b.writer()
return b, nil
}
func (b *Broadcast) Send(data []byte) {
b.inbox <- data
}
func (b *Broadcast) Recv() ([]byte, net.Addr) {
recv := <-b.outbox
return recv.data, recv.src
}
func (b *Broadcast) writer() {
for bs := range b.inbox {
addrs, err := net.InterfaceAddrs()
if err != nil {
l.Warnln("Broadcast: interface addresses:", err)
continue
}
var dsts []net.IP
for _, addr := range addrs {
if iaddr, ok := addr.(*net.IPNet); ok && iaddr.IP.IsGlobalUnicast() && iaddr.IP.To4() != nil {
baddr := bcast(iaddr)
dsts = append(dsts, baddr.IP)
}
}
if len(dsts) == 0 {
// Fall back to the general IPv4 broadcast address
dsts = append(dsts, net.IP{0xff, 0xff, 0xff, 0xff})
}
if debug {
l.Debugln("addresses:", dsts)
}
for _, ip := range dsts {
dst := &net.UDPAddr{IP: ip, Port: b.port}
_, err := b.conn.WriteTo(bs, dst)
if err != nil {
if debug {
l.Debugln(err)
}
} else if debug {
l.Debugf("sent %d bytes to %s", len(bs), dst)
}
}
}
}
func bcast(ip *net.IPNet) *net.IPNet {
var bc = &net.IPNet{}
bc.IP = make([]byte, len(ip.IP))
copy(bc.IP, ip.IP)
bc.Mask = ip.Mask
offset := len(bc.IP) - len(bc.Mask)
for i := range bc.IP {
if i-offset >= 0 {
bc.IP[i] = ip.IP[i] | ^ip.Mask[i-offset]
}
}
return bc
}

69
beacon/multicast.go Normal file
View File

@@ -0,0 +1,69 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package beacon
import "net"
type Multicast struct {
conn *net.UDPConn
addr *net.UDPAddr
inbox chan []byte
outbox chan recv
}
func NewMulticast(addr string) (*Multicast, error) {
gaddr, err := net.ResolveUDPAddr("udp", addr)
if err != nil {
return nil, err
}
conn, err := net.ListenMulticastUDP("udp", nil, gaddr)
if err != nil {
return nil, err
}
b := &Multicast{
conn: conn,
addr: gaddr,
inbox: make(chan []byte),
outbox: make(chan recv, 16),
}
go genericReader(b.conn, b.outbox)
go b.writer()
return b, nil
}
func (b *Multicast) Send(data []byte) {
b.inbox <- data
}
func (b *Multicast) Recv() ([]byte, net.Addr) {
recv := <-b.outbox
return recv.data, recv.src
}
func (b *Multicast) writer() {
for bs := range b.inbox {
intfs, err := net.Interfaces()
if err != nil {
l.Warnln("multicast interfaces:", err)
continue
}
for _, intf := range intfs {
if intf.Flags&net.FlagUp != 0 && intf.Flags&net.FlagMulticast != 0 {
addr := *b.addr
addr.Zone = intf.Name
_, err = b.conn.WriteTo(bs, &addr)
if err != nil {
if debug {
l.Debugln(err, "on write to", addr)
}
} else if debug {
l.Debugf("sent %d bytes to %s", len(bs), addr.String())
}
}
}
}
}

500
build.go Normal file
View File

@@ -0,0 +1,500 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build ignore
package main
import (
"archive/tar"
"archive/zip"
"bytes"
"compress/gzip"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"os/user"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
)
var (
versionRe = regexp.MustCompile(`-[0-9]{1,3}-g[0-9a-f]{5,10}`)
goarch string
goos string
noupgrade bool
)
const minGoVersion = 1.3
func main() {
log.SetOutput(os.Stdout)
log.SetFlags(0)
if os.Getenv("GOPATH") == "" {
cwd, err := os.Getwd()
if err != nil {
log.Fatal(err)
}
gopath := filepath.Clean(filepath.Join(cwd, "../../../../"))
log.Println("GOPATH is", gopath)
os.Setenv("GOPATH", gopath)
}
os.Setenv("PATH", fmt.Sprintf("%s%cbin%c%s", os.Getenv("GOPATH"), os.PathSeparator, os.PathListSeparator, os.Getenv("PATH")))
flag.StringVar(&goarch, "goarch", runtime.GOARCH, "GOARCH")
flag.StringVar(&goos, "goos", runtime.GOOS, "GOOS")
flag.BoolVar(&noupgrade, "no-upgrade", false, "Disable upgrade functionality")
flag.Parse()
switch goarch {
case "386", "amd64", "armv5", "armv6", "armv7":
break
case "arm":
log.Println("Invalid goarch \"arm\". Use one of \"armv5\", \"armv6\", \"armv7\".")
log.Fatalln("Note that producing a correct \"armv5\" binary requires a rebuilt stdlib.")
default:
log.Printf("Unknown goarch %q; proceed with caution!", goarch)
}
checkRequiredGoVersion()
if check() != nil {
setup()
}
if flag.NArg() == 0 {
install("./cmd/...")
return
}
switch flag.Arg(0) {
case "install":
pkg := "./cmd/..."
if flag.NArg() > 2 {
pkg = flag.Arg(1)
}
install(pkg)
case "build":
pkg := "./cmd/syncthing"
if flag.NArg() > 2 {
pkg = flag.Arg(1)
}
var tags []string
if noupgrade {
tags = []string{"noupgrade"}
}
build(pkg, tags)
case "test":
pkg := "./..."
if flag.NArg() > 2 {
pkg = flag.Arg(1)
}
test(pkg)
case "assets":
assets()
case "xdr":
xdr()
case "translate":
translate()
case "transifex":
transifex()
case "deps":
deps()
case "tar":
buildTar()
case "zip":
buildZip()
case "clean":
clean()
default:
log.Fatalf("Unknown command %q", flag.Arg(0))
}
}
func check() error {
_, err := exec.LookPath("godep")
return err
}
func checkRequiredGoVersion() {
ver := run("go", "version")
re := regexp.MustCompile(`go version go(\d+\.\d+)`)
if m := re.FindSubmatch(ver); len(m) == 2 {
vs := string(m[1])
// This is a standard go build. Verify that it's new enough.
f, err := strconv.ParseFloat(vs, 64)
if err != nil {
log.Printf("*** Could parse Go version out of %q.\n*** This isn't known to work, proceed on your own risk.", vs)
return
}
if f < minGoVersion {
log.Fatalf("*** Go version %.01f is less than required %.01f.\n*** This is known not to work, not proceeding.", f, minGoVersion)
}
} else {
log.Printf("*** Unknown Go version %q.\n*** This isn't known to work, proceed on your own risk.", ver)
}
}
func setup() {
runPrint("go", "get", "-v", "code.google.com/p/go.tools/cmd/cover")
runPrint("go", "get", "-v", "code.google.com/p/go.tools/cmd/vet")
runPrint("go", "get", "-v", "code.google.com/p/go.net/html")
runPrint("go", "get", "-v", "github.com/tools/godep")
}
func test(pkg string) {
runPrint("godep", "go", "test", "-short", "-timeout", "10s", pkg)
}
func install(pkg string) {
os.Setenv("GOBIN", "./bin")
setBuildEnv()
runPrint("godep", "go", "install", "-ldflags", ldflags(), pkg)
}
func build(pkg string, tags []string) {
rmr("syncthing", "syncthing.exe")
args := []string{"go", "build", "-ldflags", ldflags()}
if len(tags) > 0 {
args = append(args, "-tags", strings.Join(tags, ","))
}
args = append(args, pkg)
setBuildEnv()
runPrint("godep", args...)
}
func buildTar() {
name := archiveName()
var tags []string
if noupgrade {
tags = []string{"noupgrade"}
name += "-noupgrade"
}
build("./cmd/syncthing", tags)
filename := name + ".tar.gz"
tarGz(filename, []archiveFile{
{"README.md", name + "/README.txt"},
{"LICENSE", name + "/LICENSE.txt"},
{"CONTRIBUTORS", name + "/CONTRIBUTORS.txt"},
{"syncthing", name + "/syncthing"},
})
log.Println(filename)
}
func buildZip() {
name := archiveName()
var tags []string
if noupgrade {
tags = []string{"noupgrade"}
name += "-noupgrade"
}
build("./cmd/syncthing", tags)
filename := name + ".zip"
zipFile(filename, []archiveFile{
{"README.md", name + "/README.txt"},
{"LICENSE", name + "/LICENSE.txt"},
{"CONTRIBUTORS", name + "/CONTRIBUTORS.txt"},
{"syncthing.exe", name + "/syncthing.exe"},
})
log.Println(filename)
}
func setBuildEnv() {
os.Setenv("GOOS", goos)
if strings.HasPrefix(goarch, "arm") {
os.Setenv("GOARCH", "arm")
os.Setenv("GOARM", goarch[4:])
} else {
os.Setenv("GOARCH", goarch)
}
if goarch == "386" {
os.Setenv("GO386", "387")
}
}
func assets() {
runPipe("auto/gui.files.go", "godep", "go", "run", "cmd/genassets/main.go", "gui")
}
func xdr() {
for _, f := range []string{"discover/packets", "files/leveldb", "protocol/message"} {
runPipe(f+"_xdr.go", "go", "run", "./Godeps/_workspace/src/github.com/calmh/xdr/cmd/genxdr/main.go", "--", f+".go")
}
}
func translate() {
os.Chdir("gui/lang")
runPipe("lang-en-new.json", "go", "run", "../../cmd/translate/main.go", "lang-en.json", "../index.html")
os.Remove("lang-en.json")
err := os.Rename("lang-en-new.json", "lang-en.json")
if err != nil {
log.Fatal(err)
}
os.Chdir("../..")
}
func transifex() {
os.Chdir("gui/lang")
runPrint("go", "run", "../../cmd/transifexdl/main.go")
os.Chdir("../..")
assets()
}
func deps() {
rmr("Godeps")
runPrint("godep", "save", "./cmd/...")
}
func clean() {
rmr("bin", "Godeps/_workspace/pkg", "Godeps/_workspace/bin")
rmr(filepath.Join(os.Getenv("GOPATH"), fmt.Sprintf("pkg/%s_%s/github.com/syncthing", goos, goarch)))
}
func ldflags() string {
var b bytes.Buffer
b.WriteString("-w")
b.WriteString(fmt.Sprintf(" -X main.Version %s", version()))
b.WriteString(fmt.Sprintf(" -X main.BuildStamp %d", buildStamp()))
b.WriteString(fmt.Sprintf(" -X main.BuildUser %s", buildUser()))
b.WriteString(fmt.Sprintf(" -X main.BuildHost %s", buildHost()))
b.WriteString(fmt.Sprintf(" -X main.BuildEnv %s", buildEnvironment()))
if strings.HasPrefix(goarch, "arm") {
b.WriteString(fmt.Sprintf(" -X main.GoArchExtra %s", goarch[3:]))
}
return b.String()
}
func rmr(paths ...string) {
for _, path := range paths {
log.Println("rm -r", path)
os.RemoveAll(path)
}
}
func version() string {
v := run("git", "describe", "--always", "--dirty")
v = versionRe.ReplaceAllFunc(v, func(s []byte) []byte {
s[0] = '+'
return s
})
return string(v)
}
func buildStamp() int64 {
bs := run("git", "show", "-s", "--format=%ct")
s, _ := strconv.ParseInt(string(bs), 10, 64)
return s
}
func buildUser() string {
u, err := user.Current()
if err != nil {
return "unknown-user"
}
return strings.Replace(u.Username, " ", "-", -1)
}
func buildHost() string {
h, err := os.Hostname()
if err != nil {
return "unknown-host"
}
return h
}
func buildEnvironment() string {
if v := os.Getenv("ENVIRONMENT"); len(v) > 0 {
return v
}
return "default"
}
func buildArch() string {
os := goos
if os == "darwin" {
os = "macosx"
}
return fmt.Sprintf("%s-%s", os, goarch)
}
func archiveName() string {
return fmt.Sprintf("syncthing-%s-%s", buildArch(), version())
}
func run(cmd string, args ...string) []byte {
ecmd := exec.Command(cmd, args...)
bs, err := ecmd.CombinedOutput()
if err != nil {
log.Println(cmd, strings.Join(args, " "))
log.Println(string(bs))
log.Fatal(err)
}
return bytes.TrimSpace(bs)
}
func runPrint(cmd string, args ...string) {
log.Println(cmd, strings.Join(args, " "))
ecmd := exec.Command(cmd, args...)
ecmd.Stdout = os.Stdout
ecmd.Stderr = os.Stderr
err := ecmd.Run()
if err != nil {
log.Fatal(err)
}
}
func runPipe(file, cmd string, args ...string) {
log.Println(cmd, strings.Join(args, " "), ">", file)
fd, err := os.Create(file)
if err != nil {
log.Fatal(err)
}
ecmd := exec.Command(cmd, args...)
ecmd.Stdout = fd
ecmd.Stderr = os.Stderr
err = ecmd.Run()
if err != nil {
log.Fatal(err)
}
fd.Close()
}
type archiveFile struct {
src string
dst string
}
func tarGz(out string, files []archiveFile) {
fd, err := os.Create(out)
if err != nil {
log.Fatal(err)
}
gw := gzip.NewWriter(fd)
tw := tar.NewWriter(gw)
for _, f := range files {
sf, err := os.Open(f.src)
if err != nil {
log.Fatal(err)
}
info, err := sf.Stat()
if err != nil {
log.Fatal(err)
}
h := &tar.Header{
Name: f.dst,
Size: info.Size(),
Mode: int64(info.Mode()),
ModTime: info.ModTime(),
}
err = tw.WriteHeader(h)
if err != nil {
log.Fatal(err)
}
_, err = io.Copy(tw, sf)
if err != nil {
log.Fatal(err)
}
sf.Close()
}
err = tw.Close()
if err != nil {
log.Fatal(err)
}
err = gw.Close()
if err != nil {
log.Fatal(err)
}
err = fd.Close()
if err != nil {
log.Fatal(err)
}
}
func zipFile(out string, files []archiveFile) {
fd, err := os.Create(out)
if err != nil {
log.Fatal(err)
}
zw := zip.NewWriter(fd)
for _, f := range files {
sf, err := os.Open(f.src)
if err != nil {
log.Fatal(err)
}
info, err := sf.Stat()
if err != nil {
log.Fatal(err)
}
fh, err := zip.FileInfoHeader(info)
if err != nil {
log.Fatal(err)
}
fh.Name = f.dst
fh.Method = zip.Deflate
if strings.HasSuffix(f.dst, ".txt") {
// Text file. Read it and convert line endings.
bs, err := ioutil.ReadAll(sf)
if err != nil {
log.Fatal(err)
}
bs = bytes.Replace(bs, []byte{'\n'}, []byte{'\n', '\r'}, -1)
fh.UncompressedSize = uint32(len(bs))
fh.UncompressedSize64 = uint64(len(bs))
of, err := zw.CreateHeader(fh)
if err != nil {
log.Fatal(err)
}
of.Write(bs)
} else {
// Binary file. Copy verbatim.
of, err := zw.CreateHeader(fh)
if err != nil {
log.Fatal(err)
}
_, err = io.Copy(of, sf)
if err != nil {
log.Fatal(err)
}
}
}
err = zw.Close()
if err != nil {
log.Fatal(err)
}
err = fd.Close()
if err != nil {
log.Fatal(err)
}
}

282
build.sh
View File

@@ -1,239 +1,105 @@
#!/usr/bin/env bash
set -euo pipefail
IFS=$'\n\t'
export COPYFILE_DISABLE=true
export GO386=387 # Don't use SSE on 32 bit builds
distFiles=(README.md LICENSE CONTRIBUTORS) # apart from the binary itself
# replace "...-12-g123abc" with "...+12-g123abc" to remain semver compatible-ish
version=$(git describe --always --dirty)
version=$(echo "$version" | sed 's/-\([0-9]\{1,3\}-g[0-9a-f]\{5,10\}\)/+\1/')
date=$(git show -s --format=%ct)
user=$(whoami)
host=$(hostname)
host=${host%%.*}
bldenv=${ENVIRONMENT:-default}
ldflags="-w -X main.Version $version -X main.BuildStamp $date -X main.BuildUser $user -X main.BuildHost $host -X main.BuildEnv $bldenv"
check() {
if ! command -v godep >/dev/null ; then
echo "Error: no godep. Try \"$0 setup\"."
exit 1
fi
}
build() {
check
godep go build $* -ldflags "$ldflags" ./cmd/syncthing
}
assets() {
check
godep go run cmd/genassets/main.go gui > auto/gui.files.go
}
test-cov() {
echo "mode: set" > coverage.out
fail=0
for dir in $(go list ./...) ; do
godep go test -coverprofile=profile.out $dir || fail=1
if [ -f profile.out ] ; then
grep -v "mode: set" profile.out >> coverage.out
rm profile.out
fi
done
exit $fail
}
test() {
check
go vet ./...
godep go test -cpu=1,2,4 $* ./...
}
sign() {
if git describe --exact-match 2>/dev/null >/dev/null ; then
# HEAD is a tag
id=BCE524C7
if gpg --list-keys "$id" >/dev/null 2>&1 ; then
gpg -ab -u "$id" "$1"
fi
fi
}
tarDist() {
name="$1"
rm -rf "$name"
mkdir -p "$name"
cp syncthing "${distFiles[@]}" "$name"
sign "$name/syncthing"
tar zcvf "$name.tar.gz" "$name"
rm -rf "$name"
}
zipDist() {
name="$1"
rm -rf "$name"
mkdir -p "$name"
for f in "${distFiles[@]}" ; do
GOARCH="" GOOS="" go run cmd/todos/main.go < "$f" > "$name/$f.txt"
done
cp syncthing.exe "$name"
sign "$name/syncthing.exe"
zip -r "$name.zip" "$name"
rm -rf "$name"
}
deps() {
check
godep save ./cmd/...
}
setup() {
go get -v code.google.com/p/go.tools/cmd/cover
go get -v code.google.com/p/go.tools/cmd/vet
go get -v github.com/mattn/goveralls
go get -v github.com/tools/godep
}
xdr() {
for f in discover/packets files/leveldb protocol/message ; do
go run "$(godep path)/src/github.com/calmh/xdr/cmd/genxdr/main.go" -- "${f}.go" > "${f}_xdr.go"
done
}
translate() {
pushd gui
go run ../cmd/translate/main.go lang-en.json < index.html > lang-en-new.json
mv lang-en-new.json lang-en.json
popd
}
transifex() {
pushd gui
go run ../cmd/transifexdl/main.go
popd
assets
}
case "$1" in
"")
shift
export GOBIN=$(pwd)/bin
godep go install $* -ldflags "$ldflags" ./cmd/...
case "${1:-default}" in
default)
go run build.go
;;
race)
build -race
;;
guidev)
echo "Syncthing is already built for GUI developments. Try:"
echo " STGUIASSETS=~/someDir/gui syncthing"
clean)
go run build.go "$1"
;;
test)
test -short
;;
ulimit -t 60 &>/dev/null || true
ulimit -d 512000 &>/dev/null || true
ulimit -m 512000 &>/dev/null || true
test-cov)
test-cov
go run build.go "$1"
;;
tar)
rm -f *.tar.gz *.zip
test -short || exit 1
assets
build
eval $(go env)
name="syncthing-${GOOS/darwin/macosx}-$GOARCH-$version"
tarDist "$name"
;;
all)
rm -f *.tar.gz *.zip
test -short || exit 1
assets
for os in darwin-amd64 freebsd-amd64 freebsd-386 linux-amd64 linux-386 windows-amd64 windows-386 solaris-amd64 ; do
export GOOS=${os%-*}
export GOARCH=${os#*-}
build
name="syncthing-${os/darwin/macosx}-$version"
case $GOOS in
windows)
zipDist "$name"
rm -f syncthing.exe
;;
*)
tarDist "$name"
rm -f syncthing
;;
esac
done
export GOOS=linux
export GOARCH=arm
origldflags="$ldflags"
export GOARM=7
ldflags="$origldflags -X main.GoArchExtra v7"
build
tarDist "syncthing-linux-armv7-$version"
export GOARM=6
ldflags="$origldflags -X main.GoArchExtra v6"
build
tarDist "syncthing-linux-armv6-$version"
export GOARM=5
ldflags="$origldflags -X main.GoArchExtra v5"
build
tarDist "syncthing-linux-armv5-$version"
;;
upload)
tag=$(git describe)
shopt -s nullglob
for f in *.tar.gz *.zip *.asc ; do
relup syncthing/syncthing "$tag" "$f"
done
go run build.go "$1"
;;
deps)
deps
go run build.go "$1"
;;
assets)
assets
;;
setup)
setup
go run build.go "$1"
;;
xdr)
xdr
go run build.go "$1"
;;
translate)
translate
go run build.go "$1"
;;
transifex)
transifex
go run build.go "$1"
;;
noupgrade)
go run build.go -no-upgrade tar
;;
all)
go run build.go -goos linux -goarch amd64 tar
go run build.go -goos linux -goarch 386 tar
go run build.go -goos linux -goarch armv5 tar
go run build.go -goos linux -goarch armv6 tar
go run build.go -goos linux -goarch armv7 tar
go run build.go -goos freebsd -goarch amd64 tar
go run build.go -goos freebsd -goarch 386 tar
go run build.go -goos darwin -goarch amd64 tar
go run build.go -goos windows -goarch amd64 zip
go run build.go -goos windows -goarch 386 zip
;;
setup)
echo "Don't worry, just build."
;;
test-cov)
ulimit -t 60 &>/dev/null || true
ulimit -d 512000 &>/dev/null || true
ulimit -m 512000 &>/dev/null || true
go get github.com/axw/gocov/gocov
go get github.com/AlekSi/gocov-xml
echo "mode: set" > coverage.out
fail=0
# For every package in the repo
for dir in $(go list ./...) ; do
# run the tests
godep go test -coverprofile=profile.out $dir
if [ -f profile.out ] ; then
# and if there was test output, append it to coverage.out
grep -v "mode: set" profile.out >> coverage.out
rm profile.out
fi
done
gocov convert coverage.out | gocov-xml > coverage.xml
# This is usually run from within Jenkins. If it is, we need to
# tweak the paths in coverage.xml so cobertura finds the
# source.
if [[ "${WORKSPACE:-default}" != "default" ]] ; then
sed "s#$WORKSPACE##g" < coverage.xml > coverage.xml.new && mv coverage.xml.new coverage.xml
fi
;;
*)
echo "Unknown build parameter $1"
echo "Unknown build command $1"
;;
esac

29
check-contrib.sh Executable file
View File

@@ -0,0 +1,29 @@
#!/bin/bash
missing-contribs() {
for email in $(git log --format=%ae master | grep -v jakob@nym.se | sort | uniq) ; do
grep -q "$email" CONTRIBUTORS || echo $email
done
}
no-docs-typos() {
# Commits that are known to not change code
grep -v f2459ef3319b2f060dbcdacd0c35a1788a94b8bd |\
grep -v b61f418bf2d1f7d5a9d7088a20a2a448e5e66801 |\
grep -v f0621207e3953711f9ab86d99724f1d0faac45b1 |\
grep -v f1120d7aa936c0658429edef0037792520b46334
}
print-missing-contribs() {
for email in $(missing-contribs) ; do
git log --author="$email" --format="%H %ae %s" | no-docs-typos
done
}
print-missing-copyright() {
find . -name \*.go | xargs grep -L 'Copyright (C)' | grep -v Godeps
}
print-missing-contribs
print-missing-copyright

View File

@@ -9,8 +9,8 @@ package main
import (
"bytes"
"compress/gzip"
"encoding/base64"
"flag"
"fmt"
"go/format"
"io"
"os"
@@ -23,27 +23,27 @@ var tpl = template.Must(template.New("assets").Parse(`package auto
import (
"bytes"
"compress/gzip"
"encoding/hex"
"encoding/base64"
"io/ioutil"
)
var Assets = make(map[string][]byte)
func init() {
func Assets() map[string][]byte {
var assets = make(map[string][]byte, {{.assets | len}})
var bs []byte
var gr *gzip.Reader
{{range $asset := .assets}}
bs, _ = hex.DecodeString("{{$asset.HexData}}")
bs, _ = base64.StdEncoding.DecodeString("{{$asset.Data}}")
gr, _ = gzip.NewReader(bytes.NewBuffer(bs))
bs, _ = ioutil.ReadAll(gr)
Assets["{{$asset.Name}}"] = bs
assets["{{$asset.Name}}"] = bs
{{end}}
return assets
}
`))
type asset struct {
Name string
HexData string
Name string
Data string
}
var assets []asset
@@ -69,8 +69,8 @@ func walkerFor(basePath string) filepath.WalkFunc {
name, _ = filepath.Rel(basePath, name)
assets = append(assets, asset{
Name: filepath.ToSlash(name),
HexData: fmt.Sprintf("%x", buf.Bytes()),
Name: filepath.ToSlash(name),
Data: base64.StdEncoding.EncodeToString(buf.Bytes()),
})
}

View File

@@ -32,7 +32,8 @@ func main() {
if *node == "" {
log.Printf("*** Global index for repo %q", *repo)
fs.WithGlobal(func(f protocol.FileInfo) bool {
fs.WithGlobalTruncated(func(fi protocol.FileIntf) bool {
f := fi.(protocol.FileInfoTruncated)
fmt.Println(f)
fmt.Println("\t", fs.Availability(f.Name))
return true
@@ -43,7 +44,8 @@ func main() {
log.Fatal(err)
}
log.Printf("*** Have index for repo %q node %q", *repo, n)
fs.WithHave(n, func(f protocol.FileInfo) bool {
fs.WithHaveTruncated(n, func(fi protocol.FileIntf) bool {
f := fi.(protocol.FileInfoTruncated)
fmt.Println(f)
return true
})

View File

@@ -5,13 +5,10 @@
package main
import (
"bytes"
"encoding/base64"
"crypto/tls"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"math/rand"
"mime"
"net"
"net/http"
@@ -24,7 +21,6 @@ import (
"sync"
"time"
"crypto/tls"
"code.google.com/p/go.crypto/bcrypt"
"github.com/syncthing/syncthing/auto"
"github.com/syncthing/syncthing/config"
@@ -45,8 +41,6 @@ var (
configInSync = true
guiErrors = []guiError{}
guiErrorsMut sync.Mutex
static func(http.ResponseWriter, *http.Request, *log.Logger)
apiKey string
modt = time.Now().UTC().Format(http.TimeFormat)
eventSub *events.BufferedSubscription
)
@@ -90,9 +84,6 @@ func startGUI(cfg config.GUIConfiguration, assetDir string, m *model.Model) erro
}
}
apiKey = cfg.APIKey
loadCsrfTokens()
// The GET handlers
getRestMux := http.NewServeMux()
getRestMux.HandleFunc("/rest/completion", withModel(m, restGetCompletion))
@@ -111,6 +102,7 @@ func startGUI(cfg config.GUIConfiguration, assetDir string, m *model.Model) erro
getRestMux.HandleFunc("/rest/system", restGetSystem)
getRestMux.HandleFunc("/rest/upgrade", restGetUpgrade)
getRestMux.HandleFunc("/rest/version", restGetVersion)
getRestMux.HandleFunc("/rest/stats/node", withModel(m, restGetNodeStats))
// Debug endpoints, not for general use
getRestMux.HandleFunc("/rest/debug/peerCompletion", withModel(m, restGetPeerCompletion))
@@ -126,6 +118,7 @@ func startGUI(cfg config.GUIConfiguration, assetDir string, m *model.Model) erro
postRestMux.HandleFunc("/rest/restart", restPostRestart)
postRestMux.HandleFunc("/rest/shutdown", restPostShutdown)
postRestMux.HandleFunc("/rest/upgrade", restPostUpgrade)
postRestMux.HandleFunc("/rest/scan", withModel(m, restPostScan))
// A handler that splits requests between the two above and disables
// caching
@@ -141,11 +134,14 @@ func startGUI(cfg config.GUIConfiguration, assetDir string, m *model.Model) erro
// Wrap everything in CSRF protection. The /rest prefix should be
// protected, other requests will grant cookies.
handler := csrfMiddleware("/rest", mux)
handler := csrfMiddleware("/rest", cfg.APIKey, mux)
// Add our version as a header to responses
handler = withVersionMiddleware(handler)
// Wrap everything in basic auth, if user/password is set.
if len(cfg.User) > 0 {
handler = basicAuthMiddleware(cfg.User, cfg.Password, handler)
handler = basicAuthAndSessionMiddleware(cfg, handler)
}
go http.Serve(listener, handler)
@@ -172,6 +168,13 @@ func noCacheMiddleware(h http.Handler) http.Handler {
})
}
func withVersionMiddleware(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("X-Syncthing-Version", Version)
h.ServeHTTP(w, r)
})
}
func withModel(m *model.Model, h func(m *model.Model, w http.ResponseWriter, r *http.Request)) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
h(m, w, r)
@@ -245,7 +248,7 @@ func restGetModel(m *model.Model, w http.ResponseWriter, r *http.Request) {
func restPostOverride(m *model.Model, w http.ResponseWriter, r *http.Request) {
var qs = r.URL.Query()
var repo = qs.Get("repo")
m.Override(repo)
go m.Override(repo)
}
func restGetNeed(m *model.Model, w http.ResponseWriter, r *http.Request) {
@@ -264,6 +267,12 @@ func restGetConnections(m *model.Model, w http.ResponseWriter, r *http.Request)
json.NewEncoder(w).Encode(res)
}
func restGetNodeStats(m *model.Model, w http.ResponseWriter, r *http.Request) {
var res = m.NodeStatistics()
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(res)
}
func restGetConfig(w http.ResponseWriter, r *http.Request) {
encCfg := cfg
if encCfg.GUI.Password != "" {
@@ -277,7 +286,9 @@ func restPostConfig(m *model.Model, w http.ResponseWriter, r *http.Request) {
var newCfg config.Configuration
err := json.NewDecoder(r.Body).Decode(&newCfg)
if err != nil {
l.Warnln(err)
l.Warnln("decoding posted config:", err)
http.Error(w, err.Error(), 500)
return
} else {
if newCfg.GUI.Password == "" {
// Leave it empty
@@ -286,7 +297,9 @@ func restPostConfig(m *model.Model, w http.ResponseWriter, r *http.Request) {
} else {
hash, err := bcrypt.GenerateFromPassword([]byte(newCfg.GUI.Password), 0)
if err != nil {
l.Warnln(err)
l.Warnln("bcrypting password:", err)
http.Error(w, err.Error(), 500)
return
} else {
newCfg.GUI.Password = string(hash)
}
@@ -341,8 +354,9 @@ func restPostConfig(m *model.Model, w http.ResponseWriter, r *http.Request) {
// Activate and save
newCfg.Location = cfg.Location
newCfg.Save()
cfg = newCfg
saveConfig()
}
}
@@ -454,12 +468,18 @@ func restGetEvents(w http.ResponseWriter, r *http.Request) {
since, _ := strconv.Atoi(sinceStr)
limit, _ := strconv.Atoi(limitStr)
w.Header().Set("Content-Type", "application/json; charset=utf-8")
// Flush before blocking, to indicate that we've received the request
// and that it should not be retried.
f := w.(http.Flusher)
f.Flush()
evs := eventSub.Since(since, nil)
if 0 < limit && limit < len(evs) {
evs = evs[len(evs)-limit:]
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(evs)
}
@@ -498,9 +518,8 @@ func restGetLang(w http.ResponseWriter, r *http.Request) {
lang := r.Header.Get("Accept-Language")
var langs []string
for _, l := range strings.Split(lang, ",") {
if len(l) >= 2 {
langs = append(langs, l[:2])
}
parts := strings.SplitN(l, ";", 2)
langs = append(langs, strings.ToLower(strings.TrimSpace(parts[0])))
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(langs)
@@ -509,7 +528,7 @@ func restGetLang(w http.ResponseWriter, r *http.Request) {
func restPostUpgrade(w http.ResponseWriter, r *http.Request) {
rel, err := upgrade.LatestRelease(strings.Contains(Version, "-beta"))
if err != nil {
l.Warnln(err)
l.Warnln("getting latest release:", err)
http.Error(w, err.Error(), 500)
return
}
@@ -517,7 +536,7 @@ func restPostUpgrade(w http.ResponseWriter, r *http.Request) {
if upgrade.CompareVersions(rel.Tag, Version) == 1 {
err = upgrade.UpgradeTo(rel, GoArchExtra)
if err != nil {
l.Warnln(err)
l.Warnln("upgrading:", err)
http.Error(w, err.Error(), 500)
return
}
@@ -526,6 +545,16 @@ func restPostUpgrade(w http.ResponseWriter, r *http.Request) {
}
}
func restPostScan(m *model.Model, w http.ResponseWriter, r *http.Request) {
qs := r.URL.Query()
repo := qs.Get("repo")
sub := qs.Get("sub")
err := m.ScanRepoSub(repo, sub)
if err != nil {
http.Error(w, err.Error(), 500)
}
}
func getQR(w http.ResponseWriter, r *http.Request) {
var qs = r.URL.Query()
var text = qs.Get("text")
@@ -564,57 +593,9 @@ func restGetPeerCompletion(m *model.Model, w http.ResponseWriter, r *http.Reques
json.NewEncoder(w).Encode(comp)
}
func basicAuthMiddleware(username string, passhash string, next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if validAPIKey(r.Header.Get("X-API-Key")) {
next.ServeHTTP(w, r)
return
}
error := func() {
time.Sleep(time.Duration(rand.Intn(100)+100) * time.Millisecond)
w.Header().Set("WWW-Authenticate", "Basic realm=\"Authorization Required\"")
http.Error(w, "Not Authorized", http.StatusUnauthorized)
}
hdr := r.Header.Get("Authorization")
if !strings.HasPrefix(hdr, "Basic ") {
error()
return
}
hdr = hdr[6:]
bs, err := base64.StdEncoding.DecodeString(hdr)
if err != nil {
error()
return
}
fields := bytes.SplitN(bs, []byte(":"), 2)
if len(fields) != 2 {
error()
return
}
if string(fields[0]) != username {
error()
return
}
if err := bcrypt.CompareHashAndPassword([]byte(passhash), fields[1]); err != nil {
error()
return
}
next.ServeHTTP(w, r)
})
}
func validAPIKey(k string) bool {
return len(apiKey) > 0 && k == apiKey
}
func embeddedStatic(assetDir string) http.Handler {
assets := auto.Assets()
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
file := r.URL.Path
@@ -635,13 +616,13 @@ func embeddedStatic(assetDir string) http.Handler {
}
}
bs, ok := auto.Assets[file]
bs, ok := assets[file]
if !ok {
http.NotFound(w, r)
return
}
mtype := mime.TypeByExtension(filepath.Ext(r.URL.Path))
mtype := mimeTypeForFile(file)
if len(mtype) != 0 {
w.Header().Set("Content-Type", mtype)
}
@@ -651,3 +632,28 @@ func embeddedStatic(assetDir string) http.Handler {
w.Write(bs)
})
}
func mimeTypeForFile(file string) string {
// We use a built in table of the common types since the system
// TypeByExtension might be unreliable. But if we don't know, we delegate
// to the system.
ext := filepath.Ext(file)
switch ext {
case ".htm", ".html":
return "text/html"
case ".css":
return "text/css"
case ".js":
return "application/javascript"
case ".json":
return "application/json"
case ".png":
return "image/png"
case ".ttf":
return "application/x-font-ttf"
case ".woff":
return "application/x-font-woff"
default:
return mime.TypeByExtension(ext)
}
}

90
cmd/syncthing/gui_auth.go Executable file
View File

@@ -0,0 +1,90 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package main
import (
"bytes"
"encoding/base64"
"math/rand"
"net/http"
"strings"
"sync"
"time"
"code.google.com/p/go.crypto/bcrypt"
"github.com/syncthing/syncthing/config"
)
var (
sessions = make(map[string]bool)
sessionsMut sync.Mutex
)
func basicAuthAndSessionMiddleware(cfg config.GUIConfiguration, next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if cfg.APIKey != "" && r.Header.Get("X-API-Key") == cfg.APIKey {
next.ServeHTTP(w, r)
return
}
cookie, err := r.Cookie("sessionid")
if err == nil && cookie != nil {
sessionsMut.Lock()
_, ok := sessions[cookie.Value]
sessionsMut.Unlock()
if ok {
next.ServeHTTP(w, r)
return
}
}
error := func() {
time.Sleep(time.Duration(rand.Intn(100)+100) * time.Millisecond)
w.Header().Set("WWW-Authenticate", "Basic realm=\"Authorization Required\"")
http.Error(w, "Not Authorized", http.StatusUnauthorized)
}
hdr := r.Header.Get("Authorization")
if !strings.HasPrefix(hdr, "Basic ") {
error()
return
}
hdr = hdr[6:]
bs, err := base64.StdEncoding.DecodeString(hdr)
if err != nil {
error()
return
}
fields := bytes.SplitN(bs, []byte(":"), 2)
if len(fields) != 2 {
error()
return
}
if string(fields[0]) != cfg.User {
error()
return
}
if err := bcrypt.CompareHashAndPassword([]byte(cfg.Password), fields[1]); err != nil {
error()
return
}
sessionid := randomString(32)
sessionsMut.Lock()
sessions[sessionid] = true
sessionsMut.Unlock()
http.SetCookie(w, &http.Cookie{
Name: "sessionid",
Value: sessionid,
MaxAge: 0,
})
next.ServeHTTP(w, r)
})
}

View File

@@ -1,3 +1,7 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package main
import (
@@ -21,10 +25,11 @@ var csrfMut sync.Mutex
// Check for CSRF token on /rest/ URLs. If a correct one is not given, reject
// the request with 403. For / and /index.html, set a new CSRF cookie if none
// is currently set.
func csrfMiddleware(prefix string, next http.Handler) http.Handler {
func csrfMiddleware(prefix, apiKey string, next http.Handler) http.Handler {
loadCsrfTokens()
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Allow requests carrying a valid API key
if validAPIKey(r.Header.Get("X-API-Key")) {
if apiKey != "" && r.Header.Get("X-API-Key") == apiKey {
next.ServeHTTP(w, r)
return
}
@@ -72,13 +77,7 @@ func validCsrfToken(token string) bool {
}
func newCsrfToken() string {
bs := make([]byte, 30)
_, err := rand.Reader.Read(bs)
if err != nil {
l.Fatalln(err)
}
token := base64.StdEncoding.EncodeToString(bs)
token := randomString(30)
csrfMut.Lock()
csrfTokens = append(csrfTokens, token)
@@ -130,3 +129,13 @@ func loadCsrfTokens() {
csrfTokens = append(csrfTokens, s.Text())
}
}
func randomString(len int) string {
bs := make([]byte, len)
_, err := rand.Reader.Read(bs)
if err != nil {
l.Fatalln(err)
}
return base64.StdEncoding.EncodeToString(bs)
}

View File

@@ -79,7 +79,7 @@ func trackCPUUsage() {
for _ = range time.NewTicker(time.Second).C {
err := solarisPrusage(pid, &rusage)
if err != nil {
l.Warnln(err)
l.Warnln("getting prusage:", err)
continue
}
curTime := time.Now().UnixNano()

47
cmd/syncthing/heapprof.go Normal file
View File

@@ -0,0 +1,47 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package main
import (
"fmt"
"os"
"runtime"
"runtime/pprof"
"syscall"
"time"
)
func init() {
if innerProcess && os.Getenv("STHEAPPROFILE") != "" {
l.Debugln("Starting heap profiling")
go saveHeapProfiles()
}
}
func saveHeapProfiles() {
runtime.MemProfileRate = 1
var memstats, prevMemstats runtime.MemStats
t0 := time.Now()
for t := range time.NewTicker(250 * time.Millisecond).C {
startms := int(t.Sub(t0).Seconds() * 1000)
runtime.ReadMemStats(&memstats)
if memstats.HeapInuse > prevMemstats.HeapInuse {
fd, err := os.Create(fmt.Sprintf("heap-%05d-%07d.pprof", syscall.Getpid(), startms))
if err != nil {
panic(err)
}
err = pprof.WriteHeapProfile(fd)
if err != nil {
panic(err)
}
err = fd.Close()
if err != nil {
panic(err)
}
prevMemstats = memstats
}
}
}

View File

@@ -0,0 +1,24 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package main
import (
"io"
"github.com/juju/ratelimit"
)
type limitedReader struct {
r io.Reader
bucket *ratelimit.Bucket
}
func (r *limitedReader) Read(buf []byte) (int, error) {
n, err := r.r.Read(buf)
if r.bucket != nil {
r.bucket.Wait(int64(n))
}
return n, err
}

View File

@@ -16,7 +16,6 @@ import (
"net/http"
_ "net/http/pprof"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
@@ -26,17 +25,19 @@ import (
"strings"
"time"
"code.google.com/p/go.crypto/bcrypt"
"github.com/juju/ratelimit"
"github.com/syncthing/syncthing/config"
"github.com/syncthing/syncthing/discover"
"github.com/syncthing/syncthing/events"
"github.com/syncthing/syncthing/files"
"github.com/syncthing/syncthing/logger"
"github.com/syncthing/syncthing/model"
"github.com/syncthing/syncthing/osutil"
"github.com/syncthing/syncthing/protocol"
"github.com/syncthing/syncthing/upgrade"
"github.com/syncthing/syncthing/upnp"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/opt"
)
var (
@@ -50,7 +51,15 @@ var (
GoArchExtra string // "", "v5", "v6", "v7"
)
const (
exitSuccess = 0
exitError = 1
exitNoUpgradeAvailable = 2
exitRestarting = 3
)
var l = logger.DefaultLogger
var innerProcess = os.Getenv("STNORESTART") != ""
func init() {
if Version != "unknown-dev" {
@@ -73,15 +82,16 @@ func init() {
}
var (
cfg config.Configuration
myID protocol.NodeID
confDir string
logFlags int = log.Ltime
rateBucket *ratelimit.Bucket
stop = make(chan bool)
discoverer *discover.Discoverer
lockConn *net.TCPListener
lockPort int
cfg config.Configuration
myID protocol.NodeID
confDir string
logFlags int = log.Ltime
writeRateLimit *ratelimit.Bucket
readRateLimit *ratelimit.Bucket
stop = make(chan int)
discoverer *discover.Discoverer
externalPort int
cert tls.Certificate
)
const (
@@ -100,13 +110,19 @@ show time only (2).
The following enviroment variables are interpreted by syncthing:
STGUIADDRESS Override GUI listen address set in config. Expects protocol type
followed by hostname or an IP address, followed by a port, such
as "https://127.0.0.1:8888".
STGUIAUTH Override GUI authentication credentials set in config. Expects
a colon separated username and password, such as "admin:secret".
STGUIAPIKEY Override GUI API key set in config.
STNORESTART Do not attempt to restart when requested to, instead just exit.
Set this variable when running under a service manager such as
runit, launchd, etc.
STPROFILER Set to a listen address such as "127.0.0.1:9090" to start the
profiler with HTTP access.
STTRACE A comma separated string of facilities to trace. The valid
facility strings:
- "beacon" (the beacon package)
@@ -116,36 +132,57 @@ The following enviroment variables are interpreted by syncthing:
- "net" (the main package; connections & network messages)
- "model" (the model package)
- "scanner" (the scanner package)
- "stats" (the stats package)
- "upnp" (the upnp package)
- "xdr" (the xdr package)
- "all" (all of the above)
STCPUPROFILE Write CPU profile to the specified file.
STGUIASSETS Directory to load GUI assets from. Overrides compiled in assets.
STDEADLOCKTIMEOUT Alter deadlock detection timeout (seconds; default 1200).`
STPROFILER Set to a listen address such as "127.0.0.1:9090" to start the
profiler with HTTP access.
STCPUPROFILE Write a CPU profile to cpu-$pid.pprof on exit.
STHEAPPROFILE Write heap profiles to heap-$pid-$timestamp.pprof each time
heap usage increases.
STPERFSTATS Write running performance statistics to perf-$pid.csv. Not
supported on Windows.
GOMAXPROCS Set the maximum number of CPU cores to use. Defaults to all
available CPU cores.`
)
func init() {
rand.Seed(time.Now().UnixNano())
}
// Command line options
var (
reset bool
showVersion bool
doUpgrade bool
doUpgradeCheck bool
noBrowser bool
generateDir string
guiAddress string
guiAuthentication string
guiAPIKey string
)
func main() {
var reset bool
var showVersion bool
var doUpgrade bool
var doUpgradeCheck bool
var generateDir string
var noBrowser bool
flag.StringVar(&confDir, "home", getDefaultConfDir(), "Set configuration directory")
flag.BoolVar(&reset, "reset", false, "Prepare to resync from cluster")
flag.BoolVar(&showVersion, "version", false, "Show version")
flag.BoolVar(&doUpgrade, "upgrade", false, "Perform upgrade")
flag.BoolVar(&doUpgradeCheck, "upgrade-check", false, "Check for available upgrade")
flag.BoolVar(&noBrowser, "no-browser", false, "Do not start browser")
flag.IntVar(&logFlags, "logflags", logFlags, "Set log flags")
flag.StringVar(&generateDir, "generate", "", "Generate key in specified dir")
flag.StringVar(&guiAddress, "gui-address", "", "Override GUI address")
flag.StringVar(&guiAuthentication, "gui-authentication", "", "Override GUI authentication. Expects 'username:password'")
flag.StringVar(&guiAPIKey, "gui-apikey", "", "Override GUI API key")
flag.IntVar(&logFlags, "logflags", logFlags, "Set log flags")
flag.Usage = usageFor(flag.CommandLine, usage, extraUsage)
flag.Parse()
@@ -189,7 +226,7 @@ func main() {
if upgrade.CompareVersions(rel.Tag, Version) <= 0 {
l.Infof("No upgrade available (current %q >= latest %q).", Version, rel.Tag)
os.Exit(2)
os.Exit(exitNoUpgradeAvailable)
}
l.Infof("Upgrade available (current %q < latest %q)", Version, rel.Tag)
@@ -206,12 +243,27 @@ func main() {
}
}
var err error
lockPort, err = getLockPort()
if err != nil {
l.Fatalln("Opening lock port:", err)
if reset {
resetRepositories()
return
}
confDir = expandTilde(confDir)
if info, err := os.Stat(confDir); err == nil && !info.IsDir() {
l.Fatalln("Config directory", confDir, "is not a directory")
}
if os.Getenv("STNORESTART") != "" {
syncthingMain()
} else {
monitorMain()
}
}
func syncthingMain() {
var err error
if len(os.Getenv("GOGC")) == 0 {
debug.SetGCPercent(25)
}
@@ -220,11 +272,9 @@ func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
}
confDir = expandTilde(confDir)
events.Default.Log(events.Starting, map[string]string{"home": confDir})
if _, err := os.Stat(confDir); err != nil && confDir == getDefaultConfDir() {
if _, err = os.Stat(confDir); err != nil && confDir == getDefaultConfDir() {
// We are supposed to use the default configuration directory. It
// doesn't exist. In the past our default has been ~/.syncthing, so if
// that directory exists we move it to the new default location and
@@ -248,7 +298,7 @@ func main() {
// Ensure that our home directory exists and that we have a certificate and key.
ensureDir(confDir, 0700)
cert, err := loadCert(confDir, "")
cert, err = loadCert(confDir, "")
if err != nil {
newCertificate(confDir, "")
cert, err = loadCert(confDir, "")
@@ -264,38 +314,39 @@ func main() {
// Prepare to be able to save configuration
cfgFile := filepath.Join(confDir, "config.xml")
go saveConfigLoop(cfgFile)
var myName string
// Load the configuration file, if it exists.
// If it does not, create a template.
cf, err := os.Open(cfgFile)
cfg, err = config.Load(cfgFile, myID)
if err == nil {
// Read config.xml
cfg, err = config.Load(cf, myID)
if err != nil {
l.Fatalln(err)
myCfg := cfg.GetNodeConfiguration(myID)
if myCfg == nil || myCfg.Name == "" {
myName, _ = os.Hostname()
} else {
myName = myCfg.Name
}
cf.Close()
} else {
l.Infoln("No config file; starting with empty defaults")
name, _ := os.Hostname()
myName, _ = os.Hostname()
defaultRepo := filepath.Join(getHomeDir(), "Sync")
ensureDir(defaultRepo, 0755)
cfg, err = config.Load(nil, myID)
cfg = config.New(cfgFile, myID)
cfg.Repositories = []config.RepositoryConfiguration{
{
ID: "default",
Directory: defaultRepo,
Nodes: []config.NodeConfiguration{{NodeID: myID}},
ID: "default",
Directory: defaultRepo,
RescanIntervalS: 60,
Nodes: []config.RepositoryNodeConfiguration{{NodeID: myID}},
},
}
cfg.Nodes = []config.NodeConfiguration{
{
NodeID: myID,
Addresses: []string{"dynamic"},
Name: name,
Name: myName,
},
}
@@ -307,19 +358,10 @@ func main() {
l.FatalErr(err)
cfg.Options.ListenAddress = []string{fmt.Sprintf("0.0.0.0:%d", port)}
saveConfig()
cfg.Save()
l.Infof("Edit %s to taste or use the GUI\n", cfgFile)
}
if reset {
resetRepositories()
return
}
if len(os.Getenv("STRESTART")) > 0 {
waitForParentExit()
}
if profiler := os.Getenv("STPROFILER"); len(profiler) > 0 {
go func() {
l.Debugln("Starting profiler on", profiler)
@@ -344,21 +386,34 @@ func main() {
MinVersion: tls.VersionTLS12,
}
// If the write rate should be limited, set up a rate limiter for it.
// If the read or write rate should be limited, set up a rate limiter for it.
// This will be used on connections created in the connect and listen routines.
if cfg.Options.MaxSendKbps > 0 {
rateBucket = ratelimit.NewBucketWithRate(float64(1000*cfg.Options.MaxSendKbps), int64(5*1000*cfg.Options.MaxSendKbps))
writeRateLimit = ratelimit.NewBucketWithRate(float64(1000*cfg.Options.MaxSendKbps), int64(5*1000*cfg.Options.MaxSendKbps))
}
if cfg.Options.MaxRecvKbps > 0 {
readRateLimit = ratelimit.NewBucketWithRate(float64(1000*cfg.Options.MaxRecvKbps), int64(5*1000*cfg.Options.MaxRecvKbps))
}
// If this is the first time the user runs v0.9, archive the old indexes and config.
archiveLegacyConfig()
db, err := leveldb.OpenFile(filepath.Join(confDir, "index"), nil)
db, err := leveldb.OpenFile(filepath.Join(confDir, "index"), &opt.Options{CachedOpenFiles: 100})
if err != nil {
l.Fatalln("leveldb.OpenFile():", err)
l.Fatalln("Cannot open database:", err, "- Is another copy of Syncthing already running?")
}
m := model.NewModel(confDir, &cfg, "syncthing", Version, db)
// Remove database entries for repos that no longer exist in the config
repoMap := cfg.RepoMap()
for _, repo := range files.ListRepos(db) {
if _, ok := repoMap[repo]; !ok {
l.Infof("Cleaning data for dropped repo %q", repo)
files.DropRepo(db, repo)
}
}
m := model.NewModel(confDir, &cfg, myName, "syncthing", Version, db)
nextRepo:
for i, repo := range cfg.Repositories {
@@ -375,6 +430,7 @@ nextRepo:
// that all files have been deleted which might not be the case,
// so mark it as invalid instead.
if err != nil || !fi.IsDir() {
l.Warnf("Stopping repository %q - directory missing, but has files in index", repo.ID)
cfg.Repositories[i].Invalid = "repo directory missing"
continue nextRepo
}
@@ -387,6 +443,7 @@ nextRepo:
if err != nil {
// If there was another error or we could not create the
// directory, the repository is invalid.
l.Warnf("Stopping repository %q - %v", err)
cfg.Repositories[i].Invalid = err.Error()
continue nextRepo
}
@@ -395,10 +452,13 @@ nextRepo:
}
// GUI
if cfg.GUI.Enabled && cfg.GUI.Address != "" {
addr, err := net.ResolveTCPAddr("tcp", cfg.GUI.Address)
guiCfg := overrideGUIConfig(cfg.GUI, guiAddress, guiAuthentication, guiAPIKey)
if guiCfg.Enabled && guiCfg.Address != "" {
addr, err := net.ResolveTCPAddr("tcp", guiCfg.Address)
if err != nil {
l.Fatalf("Cannot start GUI on %q: %v", cfg.GUI.Address, err)
l.Fatalf("Cannot start GUI on %q: %v", guiCfg.Address, err)
} else {
var hostOpen, hostShow string
switch {
@@ -414,12 +474,12 @@ nextRepo:
}
var proto = "http"
if cfg.GUI.UseTLS {
if guiCfg.UseTLS {
proto = "https"
}
l.Infof("Starting web GUI on %s://%s:%d/", proto, hostShow, addr.Port)
err := startGUI(cfg.GUI, os.Getenv("STGUIASSETS"), m)
l.Infof("Starting web GUI on %s://%s/", proto, net.JoinHostPort(hostShow, strconv.Itoa(addr.Port)))
err := startGUI(guiCfg, os.Getenv("STGUIASSETS"), m)
if err != nil {
l.Fatalln("Cannot start GUI:", err)
}
@@ -433,7 +493,13 @@ nextRepo:
// start needing a bunch of files which are nowhere to be found. This
// needs to be changed when we correctly do persistent indexes.
for _, repoCfg := range cfg.Repositories {
if repoCfg.Invalid != "" {
continue
}
for _, node := range repoCfg.NodeIDs() {
if node == myID {
continue
}
m.Index(node, repoCfg.ID, nil)
}
}
@@ -468,13 +534,18 @@ nextRepo:
}
}
// The default port we announce, possibly modified by setupUPnP next.
addr, err := net.ResolveTCPAddr("tcp", cfg.Options.ListenAddress[0])
if err != nil {
l.Fatalln("Bad listen address:", err)
}
externalPort = addr.Port
// UPnP
var externalPort = 0
if cfg.Options.UPnPEnabled {
// We seed the random number generator with the node ID to get a
// repeatable sequence of random external ports.
externalPort = setupUPnP(rand.NewSource(certSeed(cert.Certificate[0])))
setupUPnP()
}
// Routine to connect out to configured nodes
@@ -498,7 +569,7 @@ nextRepo:
}
if cpuprof := os.Getenv("STCPUPROFILE"); len(cpuprof) > 0 {
f, err := os.Create(cpuprof)
f, err := os.Create(fmt.Sprintf("cpu-%d.pprof", os.Getpid()))
if err != nil {
log.Fatal(err)
}
@@ -527,12 +598,17 @@ nextRepo:
}()
}
if cfg.Options.RestartOnWakeup {
go standbyMonitor()
}
events.Default.Log(events.StartupComplete, nil)
go generateEvents()
<-stop
code := <-stop
l.Okln("Exiting")
os.Exit(code)
}
func generateEvents() {
@@ -542,27 +618,7 @@ func generateEvents() {
}
}
func waitForParentExit() {
l.Infoln("Waiting for parent to exit...")
lockPortStr := os.Getenv("STRESTART")
lockPort, err := strconv.Atoi(lockPortStr)
if err != nil {
l.Warnln("Invalid lock port %q: %v", lockPortStr, err)
}
// Wait for the listen address to become free, indicating that the parent has exited.
for {
ln, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", lockPort))
if err == nil {
ln.Close()
break
}
time.Sleep(250 * time.Millisecond)
}
l.Infoln("Continuing")
}
func setupUPnP(r rand.Source) int {
var externalPort = 0
func setupUPnP() {
if len(cfg.Options.ListenAddress) == 1 {
_, portStr, err := net.SplitHostPort(cfg.Options.ListenAddress[0])
if err != nil {
@@ -572,17 +628,11 @@ func setupUPnP(r rand.Source) int {
port, _ := strconv.Atoi(portStr)
igd, err := upnp.Discover()
if err == nil {
for i := 0; i < 10; i++ {
r := 1024 + int(r.Int63()%(65535-1024))
err := igd.AddPortMapping(upnp.TCP, r, port, "syncthing", 0)
if err == nil {
externalPort = r
l.Infoln("Created UPnP port mapping - external port", externalPort)
break
}
}
externalPort = setupExternalPort(igd, port)
if externalPort == 0 {
l.Warnln("Failed to create UPnP port mapping")
} else {
l.Infoln("Created UPnP port mapping - external port", externalPort)
}
} else {
l.Infof("No UPnP gateway detected")
@@ -590,11 +640,60 @@ func setupUPnP(r rand.Source) int {
l.Debugf("UPnP: %v", err)
}
}
if cfg.Options.UPnPRenewal > 0 {
go renewUPnP(port)
}
}
} else {
l.Warnln("Multiple listening addresses; not attempting UPnP port mapping")
}
return externalPort
}
func setupExternalPort(igd *upnp.IGD, port int) int {
// We seed the random number generator with the node ID to get a
// repeatable sequence of random external ports.
rnd := rand.NewSource(certSeed(cert.Certificate[0]))
for i := 0; i < 10; i++ {
r := 1024 + int(rnd.Int63()%(65535-1024))
err := igd.AddPortMapping(upnp.TCP, r, port, "syncthing", cfg.Options.UPnPLease*60)
if err == nil {
return r
}
}
return 0
}
func renewUPnP(port int) {
for {
time.Sleep(time.Duration(cfg.Options.UPnPRenewal) * time.Minute)
igd, err := upnp.Discover()
if err != nil {
continue
}
// Just renew the same port that we already have
if externalPort != 0 {
err = igd.AddPortMapping(upnp.TCP, externalPort, port, "syncthing", cfg.Options.UPnPLease*60)
if err == nil {
l.Infoln("Renewed UPnP port mapping - external port", externalPort)
continue
}
}
// Something strange has happened. We didn't have an external port before?
// Or perhaps the gateway has changed?
// Retry the same port sequence from the beginning.
r := setupExternalPort(igd, port)
if r != 0 {
externalPort = r
l.Infoln("Updated UPnP port mapping - external port", externalPort)
discoverer.StopGlobal()
discoverer.StartGlobal(cfg.Options.GlobalAnnServer, uint16(r))
continue
}
l.Warnln("Failed to update UPnP port mapping - external port", externalPort)
}
}
func resetRepositories() {
@@ -639,7 +738,7 @@ func archiveLegacyConfig() {
l.Warnf("Cannot archive config:", err)
return
}
defer src.Close()
defer dst.Close()
l.Infoln("Archiving config.xml")
io.Copy(dst, src)
@@ -648,74 +747,12 @@ func archiveLegacyConfig() {
func restart() {
l.Infoln("Restarting")
if os.Getenv("SMF_FMRI") != "" || os.Getenv("STNORESTART") != "" {
// Solaris SMF
l.Infoln("Service manager detected; exit instead of restart")
stop <- true
return
}
env := os.Environ()
newEnv := make([]string, 0, len(env))
for _, s := range env {
if !strings.HasPrefix(s, "STRESTART=") {
newEnv = append(newEnv, s)
}
}
newEnv = append(newEnv, fmt.Sprintf("STRESTART=%d", lockPort))
pgm, err := exec.LookPath(os.Args[0])
if err != nil {
l.Warnln("Cannot restart:", err)
return
}
proc, err := os.StartProcess(pgm, os.Args, &os.ProcAttr{
Env: newEnv,
Files: []*os.File{os.Stdin, os.Stdout, os.Stderr},
})
if err != nil {
l.Fatalln(err)
}
proc.Release()
stop <- true
stop <- exitRestarting
}
func shutdown() {
stop <- true
}
var saveConfigCh = make(chan struct{})
func saveConfigLoop(cfgFile string) {
for _ = range saveConfigCh {
fd, err := os.Create(cfgFile + ".tmp")
if err != nil {
l.Warnln("Saving config:", err)
continue
}
err = config.Save(fd, cfg)
if err != nil {
l.Warnln("Saving config:", err)
fd.Close()
continue
}
err = fd.Close()
if err != nil {
l.Warnln("Saving config:", err)
continue
}
err = osutil.Rename(cfgFile+".tmp", cfgFile)
if err != nil {
l.Warnln("Saving config:", err)
}
}
}
func saveConfig() {
saveConfigCh <- struct{}{}
l.Infoln("Shutting down")
stop <- exitSuccess
}
func listenConnect(myID protocol.NodeID, m *model.Model, tlsCfg *tls.Config) {
@@ -771,15 +808,20 @@ next:
continue next
}
// If rate limiting is set, we wrap the write side of the
// connection in a limiter.
// If rate limiting is set, we wrap the connection in a
// limiter.
var wr io.Writer = conn
if rateBucket != nil {
wr = &limitedWriter{conn, rateBucket}
if writeRateLimit != nil {
wr = &limitedWriter{conn, writeRateLimit}
}
var rd io.Reader = conn
if readRateLimit != nil {
rd = &limitedReader{conn, readRateLimit}
}
name := fmt.Sprintf("%s-%s", conn.LocalAddr(), conn.RemoteAddr())
protoConn := protocol.NewConnection(remoteID, conn, wr, m, name, nodeCfg.Compression)
protoConn := protocol.NewConnection(remoteID, rd, wr, m, name, nodeCfg.Compression)
l.Infof("Established secure connection to %s at %s", remoteID, name)
if debugNet {
@@ -795,6 +837,10 @@ next:
}
}
events.Default.Log(events.NodeRejected, map[string]string{
"node": remoteID.String(),
"address": conn.RemoteAddr().String(),
})
l.Infof("Connection from %s with unknown node ID %s; ignoring", conn.RemoteAddr(), remoteID)
conn.Close()
}
@@ -934,19 +980,15 @@ func setTCPOptions(conn *net.TCPConn) {
}
func discovery(extPort int) *discover.Discoverer {
disc, err := discover.NewDiscoverer(myID, cfg.Options.ListenAddress, cfg.Options.LocalAnnPort)
if err != nil {
l.Warnf("No discovery possible (%v)", err)
return nil
}
disc := discover.NewDiscoverer(myID, cfg.Options.ListenAddress)
if cfg.Options.LocalAnnEnabled {
l.Infoln("Sending local discovery announcements")
disc.StartLocal()
l.Infoln("Starting local discovery announcements")
disc.StartLocal(cfg.Options.LocalAnnPort, cfg.Options.LocalAnnMCAddr)
}
if cfg.Options.GlobalAnnEnabled {
l.Infoln("Sending global discovery announcements")
l.Infoln("Starting global discovery announcements")
disc.StartGlobal(cfg.Options.GlobalAnnServer, uint16(extPort))
}
@@ -1035,12 +1077,71 @@ func getFreePort(host string, ports ...int) (int, error) {
return addr.Port, nil
}
func getLockPort() (int, error) {
var err error
lockConn, err = net.ListenTCP("tcp", &net.TCPAddr{IP: net.IP{127, 0, 0, 1}})
if err != nil {
return 0, err
func overrideGUIConfig(originalCfg config.GUIConfiguration, address, authentication, apikey string) config.GUIConfiguration {
// Make a copy of the config
cfg := originalCfg
if address == "" {
address = os.Getenv("STGUIADDRESS")
}
if address != "" {
cfg.Enabled = true
addressParts := strings.SplitN(address, "://", 2)
switch addressParts[0] {
case "http":
cfg.UseTLS = false
case "https":
cfg.UseTLS = true
default:
l.Fatalln("Unidentified protocol", addressParts[0])
}
cfg.Address = addressParts[1]
}
if authentication == "" {
authentication = os.Getenv("STGUIAUTH")
}
if authentication != "" {
authenticationParts := strings.SplitN(authentication, ":", 2)
hash, err := bcrypt.GenerateFromPassword([]byte(authenticationParts[1]), 0)
if err != nil {
l.Fatalln("Invalid GUI password:", err)
}
cfg.User = authenticationParts[0]
cfg.Password = string(hash)
}
if apikey == "" {
apikey = os.Getenv("STGUIAPIKEY")
}
if apikey != "" {
cfg.APIKey = apikey
}
return cfg
}
func standbyMonitor() {
restartDelay := time.Duration(60 * time.Second)
now := time.Now()
for {
time.Sleep(10 * time.Second)
if time.Since(now) > 2*time.Minute {
l.Infoln("Paused state detected, possibly woke up from standby. Restarting in", restartDelay)
// We most likely just woke from standby. If we restart
// immediately chances are we won't have networking ready. Give
// things a moment to stabilize.
time.Sleep(restartDelay)
restart()
return
}
now = time.Now()
}
addr := lockConn.Addr().(*net.TCPAddr)
return addr.Port, nil
}

View File

@@ -0,0 +1,7 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package main_test
// Empty test file to generate 0% coverage rather than no coverage

View File

@@ -1,3 +1,7 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package main
import (

View File

@@ -1,3 +1,7 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package main
import (

View File

@@ -1,3 +1,7 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build solaris
package main

View File

@@ -1,3 +1,7 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build freebsd
package main

View File

@@ -1,3 +1,7 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package main
import (

167
cmd/syncthing/monitor.go Normal file
View File

@@ -0,0 +1,167 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package main
import (
"bufio"
"io"
"os"
"os/exec"
"os/signal"
"path/filepath"
"strings"
"sync"
"syscall"
"time"
)
var (
stdoutFirstLines []string // The first 10 lines of stdout
stdoutLastLines []string // The last 50 lines of stdout
stdoutMut sync.Mutex
)
const (
countRestarts = 5
loopThreshold = 15 * time.Second
)
func monitorMain() {
os.Setenv("STNORESTART", "yes")
l.SetPrefix("[monitor] ")
args := os.Args
var restarts [countRestarts]time.Time
sign := make(chan os.Signal, 1)
sigTerm := syscall.Signal(0xf)
signal.Notify(sign, os.Interrupt, sigTerm, os.Kill)
for {
if t := time.Since(restarts[0]); t < loopThreshold {
l.Warnf("%d restarts in %v; not retrying further", countRestarts, t)
os.Exit(exitError)
}
copy(restarts[0:], restarts[1:])
restarts[len(restarts)-1] = time.Now()
cmd := exec.Command(args[0], args[1:]...)
stderr, err := cmd.StderrPipe()
if err != nil {
l.Fatalln(err)
}
stdout, err := cmd.StdoutPipe()
if err != nil {
l.Fatalln(err)
}
l.Infoln("Starting syncthing")
err = cmd.Start()
if err != nil {
l.Fatalln(err)
}
stdoutMut.Lock()
stdoutFirstLines = make([]string, 0, 10)
stdoutLastLines = make([]string, 0, 50)
stdoutMut.Unlock()
go copyStderr(stderr)
go copyStdout(stdout)
exit := make(chan error)
go func() {
exit <- cmd.Wait()
}()
select {
case s := <-sign:
l.Infof("Signal %d received; exiting", s)
cmd.Process.Kill()
<-exit
return
case err = <-exit:
if err == nil {
// Successfull exit indicates an intentional shutdown
return
}
}
l.Infoln("Syncthing exited:", err)
time.Sleep(1 * time.Second)
// Let the next child process know that this is not the first time
// it's starting up.
os.Setenv("STRESTART", "yes")
}
}
func copyStderr(stderr io.ReadCloser) {
br := bufio.NewReader(stderr)
var panicFd *os.File
for {
line, err := br.ReadString('\n')
if err != nil {
return
}
if panicFd == nil {
os.Stderr.WriteString(line)
if strings.HasPrefix(line, "panic:") || strings.HasPrefix(line, "fatal error:") {
panicFd, err = os.Create(filepath.Join(confDir, time.Now().Format("panic-20060102-150405.log")))
if err != nil {
l.Warnln("Create panic log:", err)
continue
}
l.Warnf("Panic detected, writing to \"%s\"", panicFd.Name())
l.Warnln("Please create an issue at https://github.com/syncthing/syncthing/issues/ with the panic log attached")
panicFd.WriteString("Panic at " + time.Now().Format(time.RFC1123) + "\n")
stdoutMut.Lock()
for _, line := range stdoutFirstLines {
panicFd.WriteString(line)
}
panicFd.WriteString("...\n")
for _, line := range stdoutLastLines {
panicFd.WriteString(line)
}
}
}
if panicFd != nil {
panicFd.WriteString(line)
}
}
}
func copyStdout(stderr io.ReadCloser) {
br := bufio.NewReader(stderr)
for {
line, err := br.ReadString('\n')
if err != nil {
return
}
stdoutMut.Lock()
if len(stdoutFirstLines) < cap(stdoutFirstLines) {
stdoutFirstLines = append(stdoutFirstLines, line)
}
if l := len(stdoutLastLines); l == cap(stdoutLastLines) {
stdoutLastLines = stdoutLastLines[:l-1]
}
stdoutLastLines = append(stdoutLastLines, line)
stdoutMut.Unlock()
os.Stdout.WriteString(line)
}
}

View File

@@ -1,4 +1,8 @@
// +build perfstats
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build !solaris,!windows
package main
@@ -11,7 +15,9 @@ import (
)
func init() {
go savePerfStats(fmt.Sprintf("perfstats-%d.csv", syscall.Getpid()))
if innerProcess && os.Getenv("STPERFSTATS") != "" {
go savePerfStats(fmt.Sprintf("perfstats-%d.csv", syscall.Getpid()))
}
}
func savePerfStats(file string) {
@@ -40,6 +46,6 @@ func savePerfStats(file string) {
startms := int(t.Sub(t0).Seconds() * 1000)
fmt.Fprintf(fd, "%d\t%f\t%d\t%d\n", startms, cpuUsagePercent, memstats.Alloc, memstats.Sys)
fmt.Fprintf(fd, "%d\t%f\t%d\t%d\n", startms, cpuUsagePercent, memstats.Alloc, memstats.Sys-memstats.HeapReleased)
}
}

View File

@@ -1,3 +1,7 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package main
import (

View File

@@ -51,21 +51,21 @@ func main() {
var langs []string
for code, stat := range stats {
shortCode := code[:2]
if !curValidLangs[shortCode] {
code = strings.Replace(code, "_", "-", 1)
if !curValidLangs[code] {
if pct := 100 * stat.Translated / (stat.Translated + stat.Untranslated); pct < 95 {
log.Printf("Skipping language %q (too low completion ratio %d%%)", shortCode, pct)
os.Remove("lang-" + shortCode + ".json")
log.Printf("Skipping language %q (too low completion ratio %d%%)", code, pct)
os.Remove("lang-" + code + ".json")
continue
}
}
langs = append(langs, shortCode)
if shortCode == "en" {
langs = append(langs, code)
if code == "en" {
continue
}
log.Printf("Updating language %q", shortCode)
log.Printf("Updating language %q", code)
resp := req("https://www.transifex.com/api/2/project/syncthing/resource/gui/translation/" + code)
var t translation
@@ -75,7 +75,7 @@ func main() {
}
resp.Body.Close()
fd, err := os.Create("lang-" + shortCode + ".json")
fd, err := os.Create("lang-" + code + ".json")
if err != nil {
log.Fatal(err)
}
@@ -130,7 +130,7 @@ func loadValidLangs() []string {
}
var langs []string
exp := regexp.MustCompile(`\[([a-z",]+)\]`)
exp := regexp.MustCompile(`\[([a-zA-Z",-]+)\]`)
if matches := exp.FindSubmatch(bs); len(matches) == 2 {
langs = strings.Split(string(matches[1]), ",")
for i := range langs {

View File

@@ -81,10 +81,16 @@ func main() {
}
fd.Close()
doc, err := html.Parse(os.Stdin)
fd, err = os.Open(os.Args[2])
if err != nil {
log.Fatal(err)
}
doc, err := html.Parse(fd)
if err != nil {
log.Fatal(err)
}
fd.Close()
generalNode(doc)
bs, err := json.MarshalIndent(trans, "", " ")
if err != nil {

View File

@@ -8,20 +8,22 @@ package config
import (
"encoding/xml"
"fmt"
"io"
"os"
"reflect"
"sort"
"strconv"
"code.google.com/p/go.crypto/bcrypt"
"github.com/syncthing/syncthing/events"
"github.com/syncthing/syncthing/logger"
"github.com/syncthing/syncthing/osutil"
"github.com/syncthing/syncthing/protocol"
)
var l = logger.DefaultLogger
type Configuration struct {
Location string `xml:"-" json:"-"`
Version int `xml:"version,attr" default:"3"`
Repositories []RepositoryConfiguration `xml:"repository"`
Nodes []NodeConfiguration `xml:"node"`
@@ -31,13 +33,14 @@ type Configuration struct {
}
type RepositoryConfiguration struct {
ID string `xml:"id,attr"`
Directory string `xml:"directory,attr"`
Nodes []NodeConfiguration `xml:"node"`
ReadOnly bool `xml:"ro,attr"`
IgnorePerms bool `xml:"ignorePerms,attr"`
Invalid string `xml:"-"` // Set at runtime when there is an error, not saved
Versioning VersioningConfiguration `xml:"versioning"`
ID string `xml:"id,attr"`
Directory string `xml:"directory,attr"`
Nodes []RepositoryNodeConfiguration `xml:"node"`
ReadOnly bool `xml:"ro,attr"`
RescanIntervalS int `xml:"rescanIntervalS,attr" default:"60"`
IgnorePerms bool `xml:"ignorePerms,attr"`
Invalid string `xml:"-"` // Set at runtime when there is an error, not saved
Versioning VersioningConfiguration `xml:"versioning"`
nodeIDs []protocol.NodeID
}
@@ -100,26 +103,37 @@ type NodeConfiguration struct {
CertName string `xml:"certName,attr,omitempty"`
}
type RepositoryNodeConfiguration struct {
NodeID protocol.NodeID `xml:"id,attr"`
Deprecated_Name string `xml:"name,attr,omitempty" json:"-"`
Deprecated_Addresses []string `xml:"address,omitempty" json:"-"`
}
type OptionsConfiguration struct {
ListenAddress []string `xml:"listenAddress" default:"0.0.0.0:22000"`
GlobalAnnServer string `xml:"globalAnnounceServer" default:"announce.syncthing.net:22026"`
GlobalAnnEnabled bool `xml:"globalAnnounceEnabled" default:"true"`
LocalAnnEnabled bool `xml:"localAnnounceEnabled" default:"true"`
LocalAnnPort int `xml:"localAnnouncePort" default:"21025"`
LocalAnnMCAddr string `xml:"localAnnounceMCAddr" default:"[ff32::5222]:21026"`
ParallelRequests int `xml:"parallelRequests" default:"16"`
MaxSendKbps int `xml:"maxSendKbps"`
RescanIntervalS int `xml:"rescanIntervalS" default:"60"`
MaxRecvKbps int `xml:"maxRecvKbps"`
ReconnectIntervalS int `xml:"reconnectionIntervalS" default:"60"`
MaxChangeKbps int `xml:"maxChangeKbps" default:"10000"`
StartBrowser bool `xml:"startBrowser" default:"true"`
UPnPEnabled bool `xml:"upnpEnabled" default:"true"`
UPnPLease int `xml:"upnpLeaseMinutes" default:"0"`
UPnPRenewal int `xml:"upnpRenewalMinutes" default:"30"`
URAccepted int `xml:"urAccepted"` // Accepted usage reporting version; 0 for off (undecided), -1 for off (permanently)
RestartOnWakeup bool `xml:"restartOnWakeup" default:"true"`
Deprecated_UREnabled bool `xml:"urEnabled,omitempty" json:"-"`
Deprecated_URDeclined bool `xml:"urDeclined,omitempty" json:"-"`
Deprecated_ReadOnly bool `xml:"readOnly,omitempty" json:"-"`
Deprecated_GUIEnabled bool `xml:"guiEnabled,omitempty" json:"-"`
Deprecated_GUIAddress string `xml:"guiAddress,omitempty" json:"-"`
Deprecated_RescanIntervalS int `xml:"rescanIntervalS,omitempty" json:"-"`
Deprecated_UREnabled bool `xml:"urEnabled,omitempty" json:"-"`
Deprecated_URDeclined bool `xml:"urDeclined,omitempty" json:"-"`
Deprecated_ReadOnly bool `xml:"readOnly,omitempty" json:"-"`
Deprecated_GUIEnabled bool `xml:"guiEnabled,omitempty" json:"-"`
Deprecated_GUIAddress string `xml:"guiAddress,omitempty" json:"-"`
}
type GUIConfiguration struct {
@@ -139,6 +153,15 @@ func (cfg *Configuration) NodeMap() map[protocol.NodeID]NodeConfiguration {
return m
}
func (cfg *Configuration) GetNodeConfiguration(nodeid protocol.NodeID) *NodeConfiguration {
for i, node := range cfg.Nodes {
if node.NodeID == nodeid {
return &cfg.Nodes[i]
}
}
return nil
}
func (cfg *Configuration) RepoMap() map[string]RepositoryConfiguration {
m := make(map[string]RepositoryConfiguration, len(cfg.Repositories))
for _, r := range cfg.Repositories {
@@ -208,14 +231,39 @@ func fillNilSlices(data interface{}) error {
return nil
}
func Save(wr io.Writer, cfg Configuration) error {
e := xml.NewEncoder(wr)
e.Indent("", " ")
err := e.Encode(cfg)
func (cfg *Configuration) Save() error {
fd, err := os.Create(cfg.Location + ".tmp")
if err != nil {
l.Warnln("Saving config:", err)
return err
}
_, err = wr.Write([]byte("\n"))
e := xml.NewEncoder(fd)
e.Indent("", " ")
err = e.Encode(cfg)
if err != nil {
fd.Close()
return err
}
_, err = fd.Write([]byte("\n"))
if err != nil {
l.Warnln("Saving config:", err)
fd.Close()
return err
}
err = fd.Close()
if err != nil {
l.Warnln("Saving config:", err)
return err
}
err = osutil.Rename(cfg.Location+".tmp", cfg.Location)
if err != nil {
l.Warnln("Saving config:", err)
}
events.Default.Log(events.ConfigSaved, cfg)
return err
}
@@ -233,18 +281,7 @@ func uniqueStrings(ss []string) []string {
return us
}
func Load(rd io.Reader, myID protocol.NodeID) (Configuration, error) {
var cfg Configuration
setDefaults(&cfg)
setDefaults(&cfg.Options)
setDefaults(&cfg.GUI)
var err error
if rd != nil {
err = xml.NewDecoder(rd).Decode(&cfg)
}
func (cfg *Configuration) prepare(myID protocol.NodeID) {
fillNilSlices(&cfg.Options)
cfg.Options.ListenAddress = uniqueStrings(cfg.Options.ListenAddress)
@@ -293,19 +330,24 @@ func Load(rd io.Reader, myID protocol.NodeID) (Configuration, error) {
// Upgrade to v2 configuration if appropriate
if cfg.Version == 1 {
convertV1V2(&cfg)
convertV1V2(cfg)
}
// Upgrade to v3 configuration if appropriate
if cfg.Version == 2 {
convertV2V3(&cfg)
convertV2V3(cfg)
}
// Upgrade to v4 configuration if appropriate
if cfg.Version == 3 {
convertV3V4(cfg)
}
// Hash old cleartext passwords
if len(cfg.GUI.Password) > 0 && cfg.GUI.Password[0] != '$' {
hash, err := bcrypt.GenerateFromPassword([]byte(cfg.GUI.Password), 0)
if err != nil {
l.Warnln(err)
l.Warnln("bcrypting password:", err)
} else {
cfg.GUI.Password = string(hash)
}
@@ -319,15 +361,22 @@ func Load(rd io.Reader, myID protocol.NodeID) (Configuration, error) {
}
// Ensure this node is present in all relevant places
me := cfg.GetNodeConfiguration(myID)
if me == nil {
myName, _ := os.Hostname()
cfg.Nodes = append(cfg.Nodes, NodeConfiguration{
NodeID: myID,
Name: myName,
})
}
sort.Sort(NodeConfigurationList(cfg.Nodes))
// Ensure that any loose nodes are not present in the wrong places
// Ensure that there are no duplicate nodes
cfg.Nodes = ensureNodePresent(cfg.Nodes, myID)
sort.Sort(NodeConfigurationList(cfg.Nodes))
for i := range cfg.Repositories {
cfg.Repositories[i].Nodes = ensureNodePresent(cfg.Repositories[i].Nodes, myID)
cfg.Repositories[i].Nodes = ensureExistingNodes(cfg.Repositories[i].Nodes, existingNodes)
cfg.Repositories[i].Nodes = ensureNoDuplicates(cfg.Repositories[i].Nodes)
sort.Sort(NodeConfigurationList(cfg.Repositories[i].Nodes))
sort.Sort(RepositoryNodeConfigurationList(cfg.Repositories[i].Nodes))
}
// An empty address list is equivalent to a single "dynamic" entry
@@ -337,10 +386,68 @@ func Load(rd io.Reader, myID protocol.NodeID) (Configuration, error) {
n.Addresses = []string{"dynamic"}
}
}
}
func New(location string, myID protocol.NodeID) Configuration {
var cfg Configuration
cfg.Location = location
setDefaults(&cfg)
setDefaults(&cfg.Options)
setDefaults(&cfg.GUI)
cfg.prepare(myID)
return cfg
}
func Load(location string, myID protocol.NodeID) (Configuration, error) {
var cfg Configuration
cfg.Location = location
setDefaults(&cfg)
setDefaults(&cfg.Options)
setDefaults(&cfg.GUI)
fd, err := os.Open(location)
if err != nil {
return Configuration{}, err
}
err = xml.NewDecoder(fd).Decode(&cfg)
fd.Close()
cfg.prepare(myID)
return cfg, err
}
func convertV3V4(cfg *Configuration) {
// In previous versions, rescan interval was common for each repository.
// From now, it can be set independently. We have to make sure, that after upgrade
// the individual rescan interval will be defined for every existing repository.
for i := range cfg.Repositories {
cfg.Repositories[i].RescanIntervalS = cfg.Options.Deprecated_RescanIntervalS
}
cfg.Options.Deprecated_RescanIntervalS = 0
// In previous versions, repositories held full node configurations.
// Since that's the only place where node configs were in V1, we still have
// to define the deprecated fields to be able to upgrade from V1 to V4.
for i, repo := range cfg.Repositories {
for j := range repo.Nodes {
rncfg := cfg.Repositories[i].Nodes[j]
rncfg.Deprecated_Name = ""
rncfg.Deprecated_Addresses = nil
}
}
cfg.Version = 4
}
func convertV2V3(cfg *Configuration) {
// In previous versions, compression was always on. When upgrading, enable
// compression on all existing new. New nodes will get compression on by
@@ -362,7 +469,7 @@ func convertV1V2(cfg *Configuration) {
// Collect the list of nodes.
// Replace node configs inside repositories with only a reference to the nide ID.
// Set all repositories to read only if the global read only flag is set.
var nodes = map[string]NodeConfiguration{}
var nodes = map[string]RepositoryNodeConfiguration{}
for i, repo := range cfg.Repositories {
cfg.Repositories[i].ReadOnly = cfg.Options.Deprecated_ReadOnly
for j, node := range repo.Nodes {
@@ -370,14 +477,18 @@ func convertV1V2(cfg *Configuration) {
if _, ok := nodes[id]; !ok {
nodes[id] = node
}
cfg.Repositories[i].Nodes[j] = NodeConfiguration{NodeID: node.NodeID}
cfg.Repositories[i].Nodes[j] = RepositoryNodeConfiguration{NodeID: node.NodeID}
}
}
cfg.Options.Deprecated_ReadOnly = false
// Set and sort the list of nodes.
for _, node := range nodes {
cfg.Nodes = append(cfg.Nodes, node)
cfg.Nodes = append(cfg.Nodes, NodeConfiguration{
NodeID: node.NodeID,
Name: node.Deprecated_Name,
Addresses: node.Deprecated_Addresses,
})
}
sort.Sort(NodeConfigurationList(cfg.Nodes))
@@ -402,23 +513,33 @@ func (l NodeConfigurationList) Len() int {
return len(l)
}
func ensureNodePresent(nodes []NodeConfiguration, myID protocol.NodeID) []NodeConfiguration {
type RepositoryNodeConfigurationList []RepositoryNodeConfiguration
func (l RepositoryNodeConfigurationList) Less(a, b int) bool {
return l[a].NodeID.Compare(l[b].NodeID) == -1
}
func (l RepositoryNodeConfigurationList) Swap(a, b int) {
l[a], l[b] = l[b], l[a]
}
func (l RepositoryNodeConfigurationList) Len() int {
return len(l)
}
func ensureNodePresent(nodes []RepositoryNodeConfiguration, myID protocol.NodeID) []RepositoryNodeConfiguration {
for _, node := range nodes {
if node.NodeID.Equals(myID) {
return nodes
}
}
name, _ := os.Hostname()
nodes = append(nodes, NodeConfiguration{
nodes = append(nodes, RepositoryNodeConfiguration{
NodeID: myID,
Name: name,
})
return nodes
}
func ensureExistingNodes(nodes []NodeConfiguration, existingNodes map[protocol.NodeID]bool) []NodeConfiguration {
func ensureExistingNodes(nodes []RepositoryNodeConfiguration, existingNodes map[protocol.NodeID]bool) []RepositoryNodeConfiguration {
count := len(nodes)
i := 0
loop:
@@ -433,7 +554,7 @@ loop:
return nodes[0:count]
}
func ensureNoDuplicates(nodes []NodeConfiguration) []NodeConfiguration {
func ensureNoDuplicates(nodes []RepositoryNodeConfiguration) []RepositoryNodeConfiguration {
count := len(nodes)
i := 0
seenNodes := make(map[protocol.NodeID]bool)

View File

@@ -5,8 +5,6 @@
package config
import (
"bytes"
"io"
"os"
"reflect"
"testing"
@@ -30,19 +28,19 @@ func TestDefaultValues(t *testing.T) {
GlobalAnnEnabled: true,
LocalAnnEnabled: true,
LocalAnnPort: 21025,
LocalAnnMCAddr: "[ff32::5222]:21026",
ParallelRequests: 16,
MaxSendKbps: 0,
RescanIntervalS: 60,
MaxRecvKbps: 0,
ReconnectIntervalS: 60,
MaxChangeKbps: 10000,
StartBrowser: true,
UPnPEnabled: true,
UPnPLease: 0,
UPnPRenewal: 30,
RestartOnWakeup: true,
}
cfg, err := Load(bytes.NewReader(nil), node1)
if err != io.EOF {
t.Error(err)
}
cfg := New("test", node1)
if !reflect.DeepEqual(cfg.Options, expected) {
t.Errorf("Default config differs;\n E: %#v\n A: %#v", expected, cfg.Options)
@@ -50,73 +48,19 @@ func TestDefaultValues(t *testing.T) {
}
func TestNodeConfig(t *testing.T) {
v1data := []byte(`
<configuration version="1">
<repository id="test" directory="~/Sync">
<node id="AIR6LPZ7K4PTTUXQSMUUCPQ5YWOEDFIIQJUG7772YQXXR5YD6AWQ" name="node one">
<address>a</address>
</node>
<node id="P56IOI7MZJNU2IQGDREYDM2MGTMGL3BXNPQ6W5BTBBZ4TJXZWICQ" name="node two">
<address>b</address>
</node>
<node id="AIR6LPZ7K4PTTUXQSMUUCPQ5YWOEDFIIQJUG7772YQXXR5YD6AWQ" name="node one">
<address>a</address>
</node>
<node id="P56IOI7MZJNU2IQGDREYDM2MGTMGL3BXNPQ6W5BTBBZ4TJXZWICQ" name="node two">
<address>b</address>
</node>
</repository>
<options>
<readOnly>true</readOnly>
</options>
</configuration>
`)
v2data := []byte(`
<configuration version="2">
<repository id="test" directory="~/Sync" ro="true">
<node id="P56IOI7MZJNU2IQGDREYDM2MGTMGL3BXNPQ6W5BTBBZ4TJXZWICQ"/>
<node id="AIR6LPZ7K4PTTUXQSMUUCPQ5YWOEDFIIQJUG7772YQXXR5YD6AWQ"/>
<node id="C4YBIESWDUAIGU62GOSRXCRAAJDWVE3TKCPMURZE2LH5QHAF576A"/>
<node id="P56IOI7MZJNU2IQGDREYDM2MGTMGL3BXNPQ6W5BTBBZ4TJXZWICQ"/>
<node id="AIR6LPZ7K4PTTUXQSMUUCPQ5YWOEDFIIQJUG7772YQXXR5YD6AWQ"/>
<node id="C4YBIESWDUAIGU62GOSRXCRAAJDWVE3TKCPMURZE2LH5QHAF576A"/>
</repository>
<node id="AIR6LPZ7K4PTTUXQSMUUCPQ5YWOEDFIIQJUG7772YQXXR5YD6AWQ" name="node one">
<address>a</address>
</node>
<node id="P56IOI7MZJNU2IQGDREYDM2MGTMGL3BXNPQ6W5BTBBZ4TJXZWICQ" name="node two">
<address>b</address>
</node>
</configuration>
`)
v3data := []byte(`
<configuration version="3">
<repository id="test" directory="~/Sync" ro="true" ignorePerms="false">
<node id="AIR6LPZ-7K4PTTV-UXQSMUU-CPQ5YWH-OEDFIIQ-JUG777G-2YQXXR5-YD6AWQR" compression="false"></node>
<node id="P56IOI7-MZJNU2Y-IQGDREY-DM2MGTI-MGL3BXN-PQ6W5BM-TBBZ4TJ-XZWICQ2" compression="false"></node>
</repository>
<node id="AIR6LPZ-7K4PTTV-UXQSMUU-CPQ5YWH-OEDFIIQ-JUG777G-2YQXXR5-YD6AWQR" name="node one" compression="true">
<address>a</address>
</node>
<node id="P56IOI7-MZJNU2Y-IQGDREY-DM2MGTI-MGL3BXN-PQ6W5BM-TBBZ4TJ-XZWICQ2" name="node two" compression="true">
<address>b</address>
</node>
</configuration>`)
for i, data := range [][]byte{v1data, v2data, v3data} {
cfg, err := Load(bytes.NewReader(data), node1)
for i, ver := range []string{"v1", "v2", "v3", "v4"} {
cfg, err := Load("testdata/"+ver+".xml", node1)
if err != nil {
t.Error(err)
}
expectedRepos := []RepositoryConfiguration{
{
ID: "test",
Directory: "~/Sync",
Nodes: []NodeConfiguration{{NodeID: node1}, {NodeID: node4}},
ReadOnly: true,
ID: "test",
Directory: "~/Sync",
Nodes: []RepositoryNodeConfiguration{{NodeID: node1}, {NodeID: node4}},
ReadOnly: true,
RescanIntervalS: 600,
},
}
expectedNodes := []NodeConfiguration{
@@ -135,7 +79,7 @@ func TestNodeConfig(t *testing.T) {
}
expectedNodeIDs := []protocol.NodeID{node1, node4}
if cfg.Version != 3 {
if cfg.Version != 4 {
t.Errorf("%d: Incorrect version %d != 3", i, cfg.Version)
}
if !reflect.DeepEqual(cfg.Repositories, expectedRepos) {
@@ -158,14 +102,7 @@ func TestNodeConfig(t *testing.T) {
}
func TestNoListenAddress(t *testing.T) {
data := []byte(`<configuration version="1">
<options>
<listenAddress></listenAddress>
</options>
</configuration>
`)
cfg, err := Load(bytes.NewReader(data), node1)
cfg, err := Load("testdata/nolistenaddress.xml", node1)
if err != nil {
t.Error(err)
}
@@ -177,41 +114,25 @@ func TestNoListenAddress(t *testing.T) {
}
func TestOverriddenValues(t *testing.T) {
data := []byte(`<configuration version="2">
<options>
<listenAddress>:23000</listenAddress>
<allowDelete>false</allowDelete>
<globalAnnounceServer>syncthing.nym.se:22026</globalAnnounceServer>
<globalAnnounceEnabled>false</globalAnnounceEnabled>
<localAnnounceEnabled>false</localAnnounceEnabled>
<localAnnouncePort>42123</localAnnouncePort>
<parallelRequests>32</parallelRequests>
<maxSendKbps>1234</maxSendKbps>
<rescanIntervalS>600</rescanIntervalS>
<reconnectionIntervalS>6000</reconnectionIntervalS>
<maxChangeKbps>2345</maxChangeKbps>
<startBrowser>false</startBrowser>
<upnpEnabled>false</upnpEnabled>
</options>
</configuration>
`)
expected := OptionsConfiguration{
ListenAddress: []string{":23000"},
GlobalAnnServer: "syncthing.nym.se:22026",
GlobalAnnEnabled: false,
LocalAnnEnabled: false,
LocalAnnPort: 42123,
LocalAnnMCAddr: "quux:3232",
ParallelRequests: 32,
MaxSendKbps: 1234,
RescanIntervalS: 600,
MaxRecvKbps: 2341,
ReconnectIntervalS: 6000,
MaxChangeKbps: 2345,
StartBrowser: false,
UPnPEnabled: false,
UPnPLease: 60,
UPnPRenewal: 15,
RestartOnWakeup: false,
}
cfg, err := Load(bytes.NewReader(data), node1)
cfg, err := Load("testdata/overridenvalues.xml", node1)
if err != nil {
t.Error(err)
}
@@ -222,19 +143,6 @@ func TestOverriddenValues(t *testing.T) {
}
func TestNodeAddressesDynamic(t *testing.T) {
data := []byte(`
<configuration version="2">
<node id="AIR6LPZ7K4PTTUXQSMUUCPQ5YWOEDFIIQJUG7772YQXXR5YD6AWQ">
<address></address>
</node>
<node id="GYRZZQBIRNPV4T7TC52WEQYJ3TFDQW6MWDFLMU4SSSU6EMFBK2VA">
</node>
<node id="LGFPDIT7SKNNJVJZA4FC7QNCRKCE753K72BW5QD2FOZ7FRFEP57Q">
<address>dynamic</address>
</node>
</configuration>
`)
name, _ := os.Hostname()
expected := []NodeConfiguration{
{
@@ -259,7 +167,7 @@ func TestNodeAddressesDynamic(t *testing.T) {
},
}
cfg, err := Load(bytes.NewReader(data), node4)
cfg, err := Load("testdata/nodeaddressesdynamic.xml", node4)
if err != nil {
t.Error(err)
}
@@ -270,23 +178,6 @@ func TestNodeAddressesDynamic(t *testing.T) {
}
func TestNodeAddressesStatic(t *testing.T) {
data := []byte(`
<configuration version="3">
<node id="AIR6LPZ7K4PTTUXQSMUUCPQ5YWOEDFIIQJUG7772YQXXR5YD6AWQ">
<address>192.0.2.1</address>
<address>192.0.2.2</address>
</node>
<node id="GYRZZQBIRNPV4T7TC52WEQYJ3TFDQW6MWDFLMU4SSSU6EMFBK2VA">
<address>192.0.2.3:6070</address>
<address>[2001:db8::42]:4242</address>
</node>
<node id="LGFPDIT7SKNNJVJZA4FC7QNCRKCE753K72BW5QD2FOZ7FRFEP57Q">
<address>[2001:db8::44]:4444</address>
<address>192.0.2.4:6090</address>
</node>
</configuration>
`)
name, _ := os.Hostname()
expected := []NodeConfiguration{
{
@@ -308,7 +199,7 @@ func TestNodeAddressesStatic(t *testing.T) {
},
}
cfg, err := Load(bytes.NewReader(data), node4)
cfg, err := Load("testdata/nodeaddressesstatic.xml", node4)
if err != nil {
t.Error(err)
}
@@ -319,18 +210,7 @@ func TestNodeAddressesStatic(t *testing.T) {
}
func TestVersioningConfig(t *testing.T) {
data := []byte(`
<configuration version="2">
<repository id="test" directory="~/Sync" ro="true">
<versioning type="simple">
<param key="foo" val="bar"/>
<param key="baz" val="quux"/>
</versioning>
</repository>
</configuration>
`)
cfg, err := Load(bytes.NewReader(data), node4)
cfg, err := Load("testdata/versioningconfig.xml", node4)
if err != nil {
t.Error(err)
}
@@ -351,3 +231,67 @@ func TestVersioningConfig(t *testing.T) {
t.Errorf("vc.Params differ;\n E: %#v\n A: %#v", expected, vc.Params)
}
}
func TestNewSaveLoad(t *testing.T) {
path := "testdata/temp.xml"
os.Remove(path)
exists := func(path string) bool {
_, err := os.Stat(path)
return err == nil
}
cfg := New(path, node1)
// To make the equality pass later
cfg.XMLName.Local = "configuration"
if exists(path) {
t.Error(path, "exists")
}
err := cfg.Save()
if err != nil {
t.Error(err)
}
if !exists(path) {
t.Error(path, "does not exist")
}
cfg2, err := Load(path, node1)
if err != nil {
t.Error(err)
}
if !reflect.DeepEqual(cfg, cfg2) {
t.Errorf("Configs are not equal;\n E: %#v\n A: %#v", cfg, cfg2)
}
cfg.GUI.User = "test"
cfg.Save()
cfg2, err = Load(path, node1)
if err != nil {
t.Error(err)
}
if cfg2.GUI.User != "test" || !reflect.DeepEqual(cfg, cfg2) {
t.Errorf("Configs are not equal;\n E: %#v\n A: %#v", cfg, cfg2)
}
os.Remove(path)
}
func TestPrepare(t *testing.T) {
var cfg Configuration
if cfg.Repositories != nil || cfg.Nodes != nil || cfg.Options.ListenAddress != nil {
t.Error("Expected nil")
}
cfg.prepare(node1)
if cfg.Repositories == nil || cfg.Nodes == nil || cfg.Options.ListenAddress == nil {
t.Error("Unexpected nil")
}
}

10
config/testdata/nodeaddressesdynamic.xml vendored Executable file
View File

@@ -0,0 +1,10 @@
<configuration version="2">
<node id="AIR6LPZ7K4PTTUXQSMUUCPQ5YWOEDFIIQJUG7772YQXXR5YD6AWQ">
<address></address>
</node>
<node id="GYRZZQBIRNPV4T7TC52WEQYJ3TFDQW6MWDFLMU4SSSU6EMFBK2VA">
</node>
<node id="LGFPDIT7SKNNJVJZA4FC7QNCRKCE753K72BW5QD2FOZ7FRFEP57Q">
<address>dynamic</address>
</node>
</configuration>

14
config/testdata/nodeaddressesstatic.xml vendored Executable file
View File

@@ -0,0 +1,14 @@
<configuration version="3">
<node id="AIR6LPZ7K4PTTUXQSMUUCPQ5YWOEDFIIQJUG7772YQXXR5YD6AWQ">
<address>192.0.2.1</address>
<address>192.0.2.2</address>
</node>
<node id="GYRZZQBIRNPV4T7TC52WEQYJ3TFDQW6MWDFLMU4SSSU6EMFBK2VA">
<address>192.0.2.3:6070</address>
<address>[2001:db8::42]:4242</address>
</node>
<node id="LGFPDIT7SKNNJVJZA4FC7QNCRKCE753K72BW5QD2FOZ7FRFEP57Q">
<address>[2001:db8::44]:4444</address>
<address>192.0.2.4:6090</address>
</node>
</configuration>

5
config/testdata/nolistenaddress.xml vendored Executable file
View File

@@ -0,0 +1,5 @@
<configuration version="1">
<options>
<listenAddress></listenAddress>
</options>
</configuration>

20
config/testdata/overridenvalues.xml vendored Executable file
View File

@@ -0,0 +1,20 @@
<configuration version="2">
<options>
<listenAddress>:23000</listenAddress>
<allowDelete>false</allowDelete>
<globalAnnounceServer>syncthing.nym.se:22026</globalAnnounceServer>
<globalAnnounceEnabled>false</globalAnnounceEnabled>
<localAnnounceEnabled>false</localAnnounceEnabled>
<localAnnouncePort>42123</localAnnouncePort>
<localAnnounceMCAddr>quux:3232</localAnnounceMCAddr>
<parallelRequests>32</parallelRequests>
<maxSendKbps>1234</maxSendKbps>
<maxRecvKbps>2341</maxRecvKbps>
<reconnectionIntervalS>6000</reconnectionIntervalS>
<startBrowser>false</startBrowser>
<upnpEnabled>false</upnpEnabled>
<upnpLeaseMinutes>60</upnpLeaseMinutes>
<upnpRenewalMinutes>15</upnpRenewalMinutes>
<restartOnWakeup>false</restartOnWakeup>
</options>
</configuration>

20
config/testdata/v1.xml vendored Executable file
View File

@@ -0,0 +1,20 @@
<configuration version="1">
<repository id="test" directory="~/Sync">
<node id="AIR6LPZ7K4PTTUXQSMUUCPQ5YWOEDFIIQJUG7772YQXXR5YD6AWQ" name="node one">
<address>a</address>
</node>
<node id="P56IOI7MZJNU2IQGDREYDM2MGTMGL3BXNPQ6W5BTBBZ4TJXZWICQ" name="node two">
<address>b</address>
</node>
<node id="AIR6LPZ7K4PTTUXQSMUUCPQ5YWOEDFIIQJUG7772YQXXR5YD6AWQ" name="node one">
<address>a</address>
</node>
<node id="P56IOI7MZJNU2IQGDREYDM2MGTMGL3BXNPQ6W5BTBBZ4TJXZWICQ" name="node two">
<address>b</address>
</node>
</repository>
<options>
<readOnly>true</readOnly>
<rescanIntervalS>600</rescanIntervalS>
</options>
</configuration>

19
config/testdata/v2.xml vendored Executable file
View File

@@ -0,0 +1,19 @@
<configuration version="2">
<repository id="test" directory="~/Sync" ro="true">
<node id="P56IOI7MZJNU2IQGDREYDM2MGTMGL3BXNPQ6W5BTBBZ4TJXZWICQ"/>
<node id="AIR6LPZ7K4PTTUXQSMUUCPQ5YWOEDFIIQJUG7772YQXXR5YD6AWQ"/>
<node id="C4YBIESWDUAIGU62GOSRXCRAAJDWVE3TKCPMURZE2LH5QHAF576A"/>
<node id="P56IOI7MZJNU2IQGDREYDM2MGTMGL3BXNPQ6W5BTBBZ4TJXZWICQ"/>
<node id="AIR6LPZ7K4PTTUXQSMUUCPQ5YWOEDFIIQJUG7772YQXXR5YD6AWQ"/>
<node id="C4YBIESWDUAIGU62GOSRXCRAAJDWVE3TKCPMURZE2LH5QHAF576A"/>
</repository>
<node id="AIR6LPZ7K4PTTUXQSMUUCPQ5YWOEDFIIQJUG7772YQXXR5YD6AWQ" name="node one">
<address>a</address>
</node>
<node id="P56IOI7MZJNU2IQGDREYDM2MGTMGL3BXNPQ6W5BTBBZ4TJXZWICQ" name="node two">
<address>b</address>
</node>
<options>
<rescanIntervalS>600</rescanIntervalS>
</options>
</configuration>

15
config/testdata/v3.xml vendored Executable file
View File

@@ -0,0 +1,15 @@
<configuration version="3">
<repository id="test" directory="~/Sync" ro="true" ignorePerms="false">
<node id="AIR6LPZ-7K4PTTV-UXQSMUU-CPQ5YWH-OEDFIIQ-JUG777G-2YQXXR5-YD6AWQR" compression="false"></node>
<node id="P56IOI7-MZJNU2Y-IQGDREY-DM2MGTI-MGL3BXN-PQ6W5BM-TBBZ4TJ-XZWICQ2" compression="false"></node>
</repository>
<node id="AIR6LPZ-7K4PTTV-UXQSMUU-CPQ5YWH-OEDFIIQ-JUG777G-2YQXXR5-YD6AWQR" name="node one" compression="true">
<address>a</address>
</node>
<node id="P56IOI7-MZJNU2Y-IQGDREY-DM2MGTI-MGL3BXN-PQ6W5BM-TBBZ4TJ-XZWICQ2" name="node two" compression="true">
<address>b</address>
</node>
<options>
<rescanIntervalS>600</rescanIntervalS>
</options>
</configuration>

12
config/testdata/v4.xml vendored Executable file
View File

@@ -0,0 +1,12 @@
<configuration version="4">
<repository id="test" directory="~/Sync" ro="true" ignorePerms="false" rescanIntervalS="600">
<node id="AIR6LPZ-7K4PTTV-UXQSMUU-CPQ5YWH-OEDFIIQ-JUG777G-2YQXXR5-YD6AWQR"></node>
<node id="P56IOI7-MZJNU2Y-IQGDREY-DM2MGTI-MGL3BXN-PQ6W5BM-TBBZ4TJ-XZWICQ2"></node>
</repository>
<node id="AIR6LPZ-7K4PTTV-UXQSMUU-CPQ5YWH-OEDFIIQ-JUG777G-2YQXXR5-YD6AWQR" name="node one" compression="true">
<address>a</address>
</node>
<node id="P56IOI7-MZJNU2Y-IQGDREY-DM2MGTI-MGL3BXN-PQ6W5BM-TBBZ4TJ-XZWICQ2" name="node two" compression="true">
<address>b</address>
</node>
</configuration>

8
config/testdata/versioningconfig.xml vendored Executable file
View File

@@ -0,0 +1,8 @@
<configuration version="2">
<repository id="test" directory="~/Sync" ro="true">
<versioning type="simple">
<param key="foo" val="bar"/>
<param key="baz" val="quux"/>
</versioning>
</repository>
</configuration>

View File

@@ -36,7 +36,7 @@ The Announcement packet has the following structure:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Magic (0x029E4C77) |
| Magic (0x9D79BC39) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ Node Structure \
@@ -121,7 +121,7 @@ The Query packet has the following structure:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Magic Number (0x23D63A9A) |
| Magic Number (0x2CA856F5) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Length of Node ID |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+

View File

@@ -8,9 +8,9 @@ import (
"bytes"
"encoding/hex"
"errors"
"fmt"
"io"
"net"
"strconv"
"sync"
"time"
@@ -24,57 +24,96 @@ type Discoverer struct {
listenAddrs []string
localBcastIntv time.Duration
globalBcastIntv time.Duration
beacon *beacon.Beacon
registry map[protocol.NodeID][]string
errorRetryIntv time.Duration
cacheLifetime time.Duration
broadcastBeacon beacon.Interface
multicastBeacon beacon.Interface
registry map[protocol.NodeID][]cacheEntry
registryLock sync.RWMutex
extServer string
extPort uint16
localBcastTick <-chan time.Time
stopGlobal chan struct{}
globalWG sync.WaitGroup
forcedBcastTick chan time.Time
extAnnounceOK bool
extAnnounceOKmut sync.Mutex
}
type cacheEntry struct {
addr string
seen time.Time
}
var (
ErrIncorrectMagic = errors.New("incorrect magic number")
)
// We tolerate a certain amount of errors because we might be running on
// laptops that sleep and wake, have intermittent network connectivity, etc.
// When we hit this many errors in succession, we stop.
const maxErrors = 30
func NewDiscoverer(id protocol.NodeID, addresses []string, localPort int) (*Discoverer, error) {
b, err := beacon.New(localPort)
if err != nil {
return nil, err
}
disc := &Discoverer{
func NewDiscoverer(id protocol.NodeID, addresses []string) *Discoverer {
return &Discoverer{
myID: id,
listenAddrs: addresses,
localBcastIntv: 30 * time.Second,
globalBcastIntv: 1800 * time.Second,
beacon: b,
registry: make(map[protocol.NodeID][]string),
errorRetryIntv: 60 * time.Second,
cacheLifetime: 5 * time.Minute,
registry: make(map[protocol.NodeID][]cacheEntry),
}
go disc.recvAnnouncements()
return disc, nil
}
func (d *Discoverer) StartLocal() {
d.localBcastTick = time.Tick(d.localBcastIntv)
d.forcedBcastTick = make(chan time.Time)
go d.sendLocalAnnouncements()
func (d *Discoverer) StartLocal(localPort int, localMCAddr string) {
if localPort > 0 {
bb, err := beacon.NewBroadcast(localPort)
if err != nil {
if debug {
l.Debugln(err)
}
l.Infoln("Local discovery over IPv4 unavailable")
} else {
d.broadcastBeacon = bb
go d.recvAnnouncements(bb)
}
}
if len(localMCAddr) > 0 {
mb, err := beacon.NewMulticast(localMCAddr)
if err != nil {
if debug {
l.Debugln(err)
}
l.Infoln("Local discovery over IPv6 unavailable")
} else {
d.multicastBeacon = mb
go d.recvAnnouncements(mb)
}
}
if d.broadcastBeacon == nil && d.multicastBeacon == nil {
l.Warnln("Local discovery unavailable")
} else {
d.localBcastTick = time.Tick(d.localBcastIntv)
d.forcedBcastTick = make(chan time.Time)
go d.sendLocalAnnouncements()
}
}
func (d *Discoverer) StartGlobal(server string, extPort uint16) {
// Wait for any previous announcer to stop before starting a new one.
d.globalWG.Wait()
d.extServer = server
d.extPort = extPort
d.stopGlobal = make(chan struct{})
d.globalWG.Add(1)
go d.sendExternalAnnouncements()
}
func (d *Discoverer) StopGlobal() {
if d.stopGlobal != nil {
close(d.stopGlobal)
d.globalWG.Wait()
}
}
func (d *Discoverer) ExtAnnounceOK() bool {
d.extAnnounceOKmut.Lock()
defer d.extAnnounceOKmut.Unlock()
@@ -83,14 +122,28 @@ func (d *Discoverer) ExtAnnounceOK() bool {
func (d *Discoverer) Lookup(node protocol.NodeID) []string {
d.registryLock.Lock()
addr, ok := d.registry[node]
cached := d.filterCached(d.registry[node])
d.registryLock.Unlock()
if ok {
return addr
if len(cached) > 0 {
addrs := make([]string, len(cached))
for i := range cached {
addrs[i] = cached[i].addr
}
return addrs
} else if len(d.extServer) != 0 {
// We might want to cache this, but not permanently so it needs some intelligence
return d.externalLookup(node)
addrs := d.externalLookup(node)
cached = make([]cacheEntry, len(addrs))
for i := range addrs {
cached[i] = cacheEntry{
addr: addrs[i],
seen: time.Now(),
}
}
d.registryLock.Lock()
d.registry[node] = cached
d.registryLock.Unlock()
}
return nil
}
@@ -105,11 +158,11 @@ func (d *Discoverer) Hint(node string, addrs []string) {
})
}
func (d *Discoverer) All() map[protocol.NodeID][]string {
func (d *Discoverer) All() map[protocol.NodeID][]cacheEntry {
d.registryLock.RLock()
nodes := make(map[protocol.NodeID][]string, len(d.registry))
nodes := make(map[protocol.NodeID][]cacheEntry, len(d.registry))
for node, addrs := range d.registry {
addrsCopy := make([]string, len(addrs))
addrsCopy := make([]cacheEntry, len(addrs))
copy(addrsCopy, addrs)
nodes[node] = addrsCopy
}
@@ -149,21 +202,15 @@ func (d *Discoverer) sendLocalAnnouncements() {
Magic: AnnouncementMagic,
This: Node{d.myID[:], addrs},
}
msg := pkt.MarshalXDR()
for {
pkt.Extra = nil
d.registryLock.RLock()
for node, addrs := range d.registry {
if len(pkt.Extra) == 16 {
break
}
anode := Node{node[:], resolveAddrs(addrs)}
pkt.Extra = append(pkt.Extra, anode)
if d.multicastBeacon != nil {
d.multicastBeacon.Send(msg)
}
if d.broadcastBeacon != nil {
d.broadcastBeacon.Send(msg)
}
d.registryLock.RUnlock()
d.beacon.Send(pkt.MarshalXDR())
select {
case <-d.localBcastTick:
@@ -173,20 +220,19 @@ func (d *Discoverer) sendLocalAnnouncements() {
}
func (d *Discoverer) sendExternalAnnouncements() {
// this should go in the Discoverer struct
errorRetryIntv := 60 * time.Second
defer d.globalWG.Done()
remote, err := net.ResolveUDPAddr("udp", d.extServer)
for err != nil {
l.Warnf("Global discovery: %v; trying again in %v", err, errorRetryIntv)
time.Sleep(errorRetryIntv)
l.Warnf("Global discovery: %v; trying again in %v", err, d.errorRetryIntv)
time.Sleep(d.errorRetryIntv)
remote, err = net.ResolveUDPAddr("udp", d.extServer)
}
conn, err := net.ListenUDP("udp", nil)
for err != nil {
l.Warnf("Global discovery: %v; trying again in %v", err, errorRetryIntv)
time.Sleep(errorRetryIntv)
l.Warnf("Global discovery: %v; trying again in %v", err, d.errorRetryIntv)
time.Sleep(d.errorRetryIntv)
conn, err = net.ListenUDP("udp", nil)
}
@@ -201,7 +247,10 @@ func (d *Discoverer) sendExternalAnnouncements() {
buf = d.announcementPkt()
}
for {
var bcastTick = time.Tick(d.globalBcastIntv)
var errTick <-chan time.Time
sendOneAnnouncement := func() {
var ok bool
if debug {
@@ -230,19 +279,40 @@ func (d *Discoverer) sendExternalAnnouncements() {
d.extAnnounceOKmut.Unlock()
if ok {
time.Sleep(d.globalBcastIntv)
} else {
time.Sleep(errorRetryIntv)
errTick = nil
} else if errTick != nil {
errTick = time.Tick(d.errorRetryIntv)
}
}
// Announce once, immediately
sendOneAnnouncement()
loop:
for {
select {
case <-d.stopGlobal:
break loop
case <-errTick:
sendOneAnnouncement()
case <-bcastTick:
sendOneAnnouncement()
}
}
if debug {
l.Debugln("discover: stopping global")
}
}
func (d *Discoverer) recvAnnouncements() {
func (d *Discoverer) recvAnnouncements(b beacon.Interface) {
for {
buf, addr := d.beacon.Recv()
buf, addr := b.Recv()
if debug {
l.Debugf("discover: read announcement:\n%s", hex.Dump(buf))
l.Debugf("discover: read announcement from %s:\n%s", addr, hex.Dump(buf))
}
var pkt Announce
@@ -251,20 +321,9 @@ func (d *Discoverer) recvAnnouncements() {
continue
}
if debug {
l.Debugf("discover: parsed announcement: %#v", pkt)
}
var newNode bool
if bytes.Compare(pkt.This.ID, d.myID[:]) != 0 {
newNode = d.registerNode(addr, pkt.This)
for _, node := range pkt.Extra {
if bytes.Compare(node.ID, d.myID[:]) != 0 {
if d.registerNode(nil, node) {
newNode = true
}
}
}
}
if newNode {
@@ -276,41 +335,57 @@ func (d *Discoverer) recvAnnouncements() {
}
func (d *Discoverer) registerNode(addr net.Addr, node Node) bool {
var addrs []string
var id protocol.NodeID
copy(id[:], node.ID)
d.registryLock.RLock()
current := d.filterCached(d.registry[id])
d.registryLock.RUnlock()
orig := current
for _, a := range node.Addresses {
var nodeAddr string
if len(a.IP) > 0 {
nodeAddr = fmt.Sprintf("%s:%d", net.IP(a.IP), a.Port)
addrs = append(addrs, nodeAddr)
nodeAddr = net.JoinHostPort(net.IP(a.IP).String(), strconv.Itoa(int(a.Port)))
} else if addr != nil {
ua := addr.(*net.UDPAddr)
ua.Port = int(a.Port)
nodeAddr = ua.String()
addrs = append(addrs, nodeAddr)
}
}
if len(addrs) == 0 {
if debug {
l.Debugln("discover: no valid address for", node.ID)
for i := range current {
if current[i].addr == nodeAddr {
current[i].seen = time.Now()
goto done
}
}
current = append(current, cacheEntry{
addr: nodeAddr,
seen: time.Now(),
})
done:
}
if debug {
l.Debugf("discover: register: %s -> %#v", node.ID, addrs)
l.Debugf("discover: register: %v -> %v", id, current)
}
var id protocol.NodeID
copy(id[:], node.ID)
d.registryLock.Lock()
_, seen := d.registry[id]
d.registry[id] = addrs
d.registry[id] = current
d.registryLock.Unlock()
if !seen {
if len(current) > len(orig) {
addrs := make([]string, len(current))
for i := range current {
addrs[i] = current[i].addr
}
events.Default.Log(events.NodeDiscovered, map[string]interface{}{
"node": id.String(),
"addrs": addrs,
})
}
return !seen
return len(current) > len(orig)
}
func (d *Discoverer) externalLookup(node protocol.NodeID) []string {
@@ -374,18 +449,29 @@ func (d *Discoverer) externalLookup(node protocol.NodeID) []string {
return nil
}
if debug {
l.Debugf("discover: parsed external: %#v", pkt)
}
var addrs []string
for _, a := range pkt.This.Addresses {
nodeAddr := fmt.Sprintf("%s:%d", net.IP(a.IP), a.Port)
nodeAddr := net.JoinHostPort(net.IP(a.IP).String(), strconv.Itoa(int(a.Port)))
addrs = append(addrs, nodeAddr)
}
return addrs
}
func (d *Discoverer) filterCached(c []cacheEntry) []cacheEntry {
for i := 0; i < len(c); {
if ago := time.Since(c[i].seen); ago > d.cacheLifetime {
if debug {
l.Debugf("removing cached address %s: seen %v ago", c[i].addr, ago)
}
c[i] = c[len(c)-1]
c = c[:len(c)-1]
} else {
i++
}
}
return c
}
func addrToAddr(addr *net.TCPAddr) Address {
if len(addr.IP) == 0 || addr.IP.IsUnspecified() {
return Address{Port: uint16(addr.Port)}

View File

@@ -0,0 +1,7 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package discover_test
// Empty test file to generate 0% coverage rather than no coverage

View File

@@ -20,10 +20,13 @@ const (
NodeDiscovered
NodeConnected
NodeDisconnected
NodeRejected
LocalIndexUpdated
RemoteIndexUpdated
ItemStarted
StateChanged
RepoRejected
ConfigSaved
AllEvents = ^EventType(0)
)
@@ -42,6 +45,8 @@ func (t EventType) String() string {
return "NodeConnected"
case NodeDisconnected:
return "NodeDisconnected"
case NodeRejected:
return "NodeRejected"
case LocalIndexUpdated:
return "LocalIndexUpdated"
case RemoteIndexUpdated:
@@ -50,6 +55,10 @@ func (t EventType) String() string {
return "ItemStarted"
case StateChanged:
return "StateChanged"
case RepoRejected:
return "RepoRejected"
case ConfigSaved:
return "ConfigSaved"
default:
return "Unknown"
}

15
files/filenames_darwin.go Normal file
View File

@@ -0,0 +1,15 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package files
import "code.google.com/p/go.text/unicode/norm"
func normalizedFilename(s string) string {
return norm.NFC.String(s)
}
func nativeFilename(s string) string {
return norm.NFD.String(s)
}

17
files/filenames_unix.go Normal file
View File

@@ -0,0 +1,17 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build !windows,!darwin
package files
import "code.google.com/p/go.text/unicode/norm"
func normalizedFilename(s string) string {
return norm.NFC.String(s)
}
func nativeFilename(s string) string {
return s
}

View File

@@ -0,0 +1,19 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package files
import (
"path/filepath"
"code.google.com/p/go.text/unicode/norm"
)
func normalizedFilename(s string) string {
return norm.NFC.String(filepath.ToSlash(s))
}
func nativeFilename(s string) string {
return filepath.FromSlash(s)
}

View File

@@ -1,3 +1,7 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package files
import (
@@ -117,9 +121,15 @@ func globalKeyName(key []byte) []byte {
return key[1+64:]
}
func globalKeyRepo(key []byte) []byte {
repo := key[1 : 1+64]
izero := bytes.IndexByte(repo, 0)
return repo[:izero]
}
type deletionHandler func(db dbReader, batch dbWriter, repo, node, name []byte, dbi iterator.Iterator) uint64
type fileIterator func(f protocol.FileInfo) bool
type fileIterator func(f protocol.FileIntf) bool
func ldbGenericReplace(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo, deleteFn deletionHandler) uint64 {
defer runtime.GC()
@@ -176,18 +186,28 @@ func ldbGenericReplace(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo
if lv := ldbInsert(batch, repo, node, newName, fs[fsi]); lv > maxLocalVer {
maxLocalVer = lv
}
ldbUpdateGlobal(snap, batch, repo, node, newName, fs[fsi].Version)
if fs[fsi].IsInvalid() {
ldbRemoveFromGlobal(snap, batch, repo, node, newName)
} else {
ldbUpdateGlobal(snap, batch, repo, node, newName, fs[fsi].Version)
}
fsi++
case moreFs && moreDb && cmp == 0:
// File exists on both sides - compare versions.
var ef protocol.FileInfo
// File exists on both sides - compare versions. We might get an
// update with the same version and different flags if a node has
// marked a file as invalid, so handle that too.
var ef protocol.FileInfoTruncated
ef.UnmarshalXDR(dbi.Value())
if fs[fsi].Version > ef.Version {
if fs[fsi].Version > ef.Version || fs[fsi].Version != ef.Version {
if lv := ldbInsert(batch, repo, node, newName, fs[fsi]); lv > maxLocalVer {
maxLocalVer = lv
}
ldbUpdateGlobal(snap, batch, repo, node, newName, fs[fsi].Version)
if fs[fsi].IsInvalid() {
ldbRemoveFromGlobal(snap, batch, repo, node, newName)
} else {
ldbUpdateGlobal(snap, batch, repo, node, newName, fs[fsi].Version)
}
}
// Iterate both sides.
fsi++
@@ -226,20 +246,23 @@ func ldbReplace(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo) uint6
func ldbReplaceWithDelete(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo) uint64 {
return ldbGenericReplace(db, repo, node, fs, func(db dbReader, batch dbWriter, repo, node, name []byte, dbi iterator.Iterator) uint64 {
var f protocol.FileInfo
err := f.UnmarshalXDR(dbi.Value())
var tf protocol.FileInfoTruncated
err := tf.UnmarshalXDR(dbi.Value())
if err != nil {
panic(err)
}
if !protocol.IsDeleted(f.Flags) {
if !tf.IsDeleted() {
if debug {
l.Debugf("mark deleted; repo=%q node=%v name=%q", repo, protocol.NodeIDFromBytes(node), name)
}
ts := clock(f.LocalVersion)
f.Blocks = nil
f.Version = lamport.Default.Tick(f.Version)
f.Flags |= protocol.FlagDeleted
f.LocalVersion = ts
ts := clock(tf.LocalVersion)
f := protocol.FileInfo{
Name: tf.Name,
Version: lamport.Default.Tick(tf.Version),
LocalVersion: ts,
Flags: tf.Flags | protocol.FlagDeleted,
Modified: tf.Modified,
}
batch.Put(dbi.Key(), f.MarshalXDR())
ldbUpdateGlobal(db, batch, repo, node, nodeKeyName(dbi.Key()), f.Version)
return ts
@@ -267,20 +290,30 @@ func ldbUpdate(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo) uint64
if lv := ldbInsert(batch, repo, node, name, f); lv > maxLocalVer {
maxLocalVer = lv
}
ldbUpdateGlobal(snap, batch, repo, node, name, f.Version)
if f.IsInvalid() {
ldbRemoveFromGlobal(snap, batch, repo, node, name)
} else {
ldbUpdateGlobal(snap, batch, repo, node, name, f.Version)
}
continue
}
var ef protocol.FileInfo
var ef protocol.FileInfoTruncated
err = ef.UnmarshalXDR(bs)
if err != nil {
panic(err)
}
if ef.Version != f.Version {
// Flags might change without the version being bumped when we set the
// invalid flag on an existing file.
if ef.Version != f.Version || ef.Flags != f.Flags {
if lv := ldbInsert(batch, repo, node, name, f); lv > maxLocalVer {
maxLocalVer = lv
}
ldbUpdateGlobal(snap, batch, repo, node, name, f.Version)
if f.IsInvalid() {
ldbRemoveFromGlobal(snap, batch, repo, node, name)
} else {
ldbUpdateGlobal(snap, batch, repo, node, name, f.Version)
}
}
}
@@ -372,7 +405,9 @@ func ldbRemoveFromGlobal(db dbReader, batch dbWriter, repo, node, file []byte) {
gk := globalKey(repo, file)
svl, err := db.Get(gk, nil)
if err != nil {
panic(err)
// We might be called to "remove" a global version that doesn't exist
// if the first update for the file is already marked invalid.
return
}
var fl versionList
@@ -395,7 +430,7 @@ func ldbRemoveFromGlobal(db dbReader, batch dbWriter, repo, node, file []byte) {
}
}
func ldbWithHave(db *leveldb.DB, repo, node []byte, fn fileIterator) {
func ldbWithHave(db *leveldb.DB, repo, node []byte, truncate bool, fn fileIterator) {
start := nodeKey(repo, node, nil) // before all repo/node files
limit := nodeKey(repo, node, []byte{0xff, 0xff, 0xff, 0xff}) // after all repo/node files
snap, err := db.GetSnapshot()
@@ -407,8 +442,7 @@ func ldbWithHave(db *leveldb.DB, repo, node []byte, fn fileIterator) {
defer dbi.Release()
for dbi.Next() {
var f protocol.FileInfo
err := f.UnmarshalXDR(dbi.Value())
f, err := unmarshalTrunc(dbi.Value(), truncate)
if err != nil {
panic(err)
}
@@ -418,7 +452,7 @@ func ldbWithHave(db *leveldb.DB, repo, node []byte, fn fileIterator) {
}
}
func ldbWithAllRepo(db *leveldb.DB, repo []byte, fn func(node []byte, f protocol.FileInfo) bool) {
func ldbWithAllRepoTruncated(db *leveldb.DB, repo []byte, fn func(node []byte, f protocol.FileInfoTruncated) bool) {
defer runtime.GC()
start := nodeKey(repo, nil, nil) // before all repo/node files
@@ -433,7 +467,7 @@ func ldbWithAllRepo(db *leveldb.DB, repo []byte, fn func(node []byte, f protocol
for dbi.Next() {
node := nodeKeyNode(dbi.Key())
var f protocol.FileInfo
var f protocol.FileInfoTruncated
err := f.UnmarshalXDR(dbi.Value())
if err != nil {
panic(err)
@@ -444,40 +478,6 @@ func ldbWithAllRepo(db *leveldb.DB, repo []byte, fn func(node []byte, f protocol
}
}
/*
func ldbCheckGlobalConsistency(db *leveldb.DB, repo []byte) {
l.Debugf("Checking global consistency for %q", repo)
start := nodeKey(repo, nil, nil) // before all repo/node files
limit := nodeKey(repo, protocol.LocalNodeID[:], []byte{0xff, 0xff, 0xff, 0xff}) // after all repo/node files
snap, err := db.GetSnapshot()
if err != nil {
panic(err)
}
defer snap.Release()
dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
defer dbi.Release()
batch := new(leveldb.Batch)
i := 0
for dbi.Next() {
repo := nodeKeyRepo(dbi.Key())
node := nodeKeyNode(dbi.Key())
var f protocol.FileInfo
err := f.UnmarshalXDR(dbi.Value())
if err != nil {
panic(err)
}
if ldbUpdateGlobal(snap, batch, repo, node, []byte(f.Name), f.Version) {
var nodeID protocol.NodeID
copy(nodeID[:], node)
l.Debugf("fixed global for %q %s %q", repo, nodeID, f.Name)
}
i++
}
l.Debugln("Done", i)
}
*/
func ldbGet(db *leveldb.DB, repo, node, file []byte) protocol.FileInfo {
nk := nodeKey(repo, node, file)
bs, err := db.Get(nk, nil)
@@ -536,7 +536,7 @@ func ldbGetGlobal(db *leveldb.DB, repo, file []byte) protocol.FileInfo {
return f
}
func ldbWithGlobal(db *leveldb.DB, repo []byte, fn fileIterator) {
func ldbWithGlobal(db *leveldb.DB, repo []byte, truncate bool, fn fileIterator) {
defer runtime.GC()
start := globalKey(repo, nil)
@@ -565,8 +565,7 @@ func ldbWithGlobal(db *leveldb.DB, repo []byte, fn fileIterator) {
panic(err)
}
var f protocol.FileInfo
err = f.UnmarshalXDR(bs)
f, err := unmarshalTrunc(bs, truncate)
if err != nil {
panic(err)
}
@@ -605,7 +604,7 @@ func ldbAvailability(db *leveldb.DB, repo, file []byte) []protocol.NodeID {
return nodes
}
func ldbWithNeed(db *leveldb.DB, repo, node []byte, fn fileIterator) {
func ldbWithNeed(db *leveldb.DB, repo, node []byte, truncate bool, fn fileIterator) {
defer runtime.GC()
start := globalKey(repo, nil)
@@ -618,6 +617,7 @@ func ldbWithNeed(db *leveldb.DB, repo, node []byte, fn fileIterator) {
dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
defer dbi.Release()
outer:
for dbi.Next() {
var vl versionList
err := vl.UnmarshalXDR(dbi.Value())
@@ -643,30 +643,121 @@ func ldbWithNeed(db *leveldb.DB, repo, node []byte, fn fileIterator) {
if need || !have {
name := globalKeyName(dbi.Key())
fk := nodeKey(repo, vl.versions[0].node, name)
bs, err := snap.Get(fk, nil)
if err != nil {
panic(err)
}
needVersion := vl.versions[0].version
inner:
for i := range vl.versions {
if vl.versions[i].version != needVersion {
// We haven't found a valid copy of the file with the needed version.
continue outer
}
fk := nodeKey(repo, vl.versions[i].node, name)
bs, err := snap.Get(fk, nil)
if err != nil {
panic(err)
}
var gf protocol.FileInfo
err = gf.UnmarshalXDR(bs)
if err != nil {
panic(err)
}
gf, err := unmarshalTrunc(bs, truncate)
if err != nil {
panic(err)
}
if protocol.IsDeleted(gf.Flags) && !have {
// We don't need deleted files that we don't have
continue
}
if gf.IsInvalid() {
// The file is marked invalid for whatever reason, don't use it.
continue inner
}
if debug {
l.Debugf("need repo=%q node=%v name=%q need=%v have=%v haveV=%d globalV=%d", repo, protocol.NodeIDFromBytes(node), name, need, have, haveVersion, vl.versions[0].version)
}
if gf.IsDeleted() && !have {
// We don't need deleted files that we don't have
continue outer
}
if cont := fn(gf); !cont {
return
if debug {
l.Debugf("need repo=%q node=%v name=%q need=%v have=%v haveV=%d globalV=%d", repo, protocol.NodeIDFromBytes(node), name, need, have, haveVersion, vl.versions[0].version)
}
if cont := fn(gf); !cont {
return
}
// This file is handled, no need to look further in the version list
continue outer
}
}
}
}
func ldbListRepos(db *leveldb.DB) []string {
defer runtime.GC()
start := []byte{keyTypeGlobal}
limit := []byte{keyTypeGlobal + 1}
snap, err := db.GetSnapshot()
if err != nil {
panic(err)
}
defer snap.Release()
dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
defer dbi.Release()
repoExists := make(map[string]bool)
for dbi.Next() {
repo := string(globalKeyRepo(dbi.Key()))
if !repoExists[repo] {
repoExists[repo] = true
}
}
repos := make([]string, 0, len(repoExists))
for k := range repoExists {
repos = append(repos, k)
}
sort.Strings(repos)
return repos
}
func ldbDropRepo(db *leveldb.DB, repo []byte) {
defer runtime.GC()
snap, err := db.GetSnapshot()
if err != nil {
panic(err)
}
defer snap.Release()
// Remove all items related to the given repo from the node->file bucket
start := []byte{keyTypeNode}
limit := []byte{keyTypeNode + 1}
dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
for dbi.Next() {
itemRepo := nodeKeyRepo(dbi.Key())
if bytes.Compare(repo, itemRepo) == 0 {
db.Delete(dbi.Key(), nil)
}
}
dbi.Release()
// Remove all items related to the given repo from the global bucket
start = []byte{keyTypeGlobal}
limit = []byte{keyTypeGlobal + 1}
dbi = snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
for dbi.Next() {
itemRepo := globalKeyRepo(dbi.Key())
if bytes.Compare(repo, itemRepo) == 0 {
db.Delete(dbi.Key(), nil)
}
}
dbi.Release()
}
func unmarshalTrunc(bs []byte, truncate bool) (protocol.FileIntf, error) {
if truncate {
var tf protocol.FileInfoTruncated
err := tf.UnmarshalXDR(bs)
return tf, err
} else {
var tf protocol.FileInfo
err := tf.UnmarshalXDR(bs)
return tf, err
}
}

View File

@@ -2,7 +2,12 @@
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// Package files provides a set type to track local/remote files with newness checks.
// Package files provides a set type to track local/remote files with newness
// checks. We must do a certain amount of normalization in here. We will get
// fed paths with either native or wire-format separators and encodings
// depending on who calls us. We transform paths to wire-format (NFC and
// slashes) on the way to the database, and transform to native format
// (varying separator and encoding) on the way back out.
package files
import (
@@ -36,7 +41,7 @@ func NewSet(repo string, db *leveldb.DB) *Set {
}
var nodeID protocol.NodeID
ldbWithAllRepo(db, []byte(repo), func(node []byte, f protocol.FileInfo) bool {
ldbWithAllRepoTruncated(db, []byte(repo), func(node []byte, f protocol.FileInfoTruncated) bool {
copy(nodeID[:], node)
if f.LocalVersion > s.localVersion[nodeID] {
s.localVersion[nodeID] = f.LocalVersion
@@ -56,6 +61,7 @@ func (s *Set) Replace(node protocol.NodeID, fs []protocol.FileInfo) {
if debug {
l.Debugf("%s Replace(%v, [%d])", s.repo, node, len(fs))
}
normalizeFilenames(fs)
s.mutex.Lock()
defer s.mutex.Unlock()
s.localVersion[node] = ldbReplace(s.db, []byte(s.repo), node[:], fs)
@@ -65,6 +71,7 @@ func (s *Set) ReplaceWithDelete(node protocol.NodeID, fs []protocol.FileInfo) {
if debug {
l.Debugf("%s ReplaceWithDelete(%v, [%d])", s.repo, node, len(fs))
}
normalizeFilenames(fs)
s.mutex.Lock()
defer s.mutex.Unlock()
if lv := ldbReplaceWithDelete(s.db, []byte(s.repo), node[:], fs); lv > s.localVersion[node] {
@@ -76,6 +83,7 @@ func (s *Set) Update(node protocol.NodeID, fs []protocol.FileInfo) {
if debug {
l.Debugf("%s Update(%v, [%d])", s.repo, node, len(fs))
}
normalizeFilenames(fs)
s.mutex.Lock()
defer s.mutex.Unlock()
if lv := ldbUpdate(s.db, []byte(s.repo), node[:], fs); lv > s.localVersion[node] {
@@ -87,33 +95,58 @@ func (s *Set) WithNeed(node protocol.NodeID, fn fileIterator) {
if debug {
l.Debugf("%s WithNeed(%v)", s.repo, node)
}
ldbWithNeed(s.db, []byte(s.repo), node[:], fn)
ldbWithNeed(s.db, []byte(s.repo), node[:], false, nativeFileIterator(fn))
}
func (s *Set) WithNeedTruncated(node protocol.NodeID, fn fileIterator) {
if debug {
l.Debugf("%s WithNeedTruncated(%v)", s.repo, node)
}
ldbWithNeed(s.db, []byte(s.repo), node[:], true, nativeFileIterator(fn))
}
func (s *Set) WithHave(node protocol.NodeID, fn fileIterator) {
if debug {
l.Debugf("%s WithHave(%v)", s.repo, node)
}
ldbWithHave(s.db, []byte(s.repo), node[:], fn)
ldbWithHave(s.db, []byte(s.repo), node[:], false, nativeFileIterator(fn))
}
func (s *Set) WithHaveTruncated(node protocol.NodeID, fn fileIterator) {
if debug {
l.Debugf("%s WithHaveTruncated(%v)", s.repo, node)
}
ldbWithHave(s.db, []byte(s.repo), node[:], true, nativeFileIterator(fn))
}
func (s *Set) WithGlobal(fn fileIterator) {
if debug {
l.Debugf("%s WithGlobal()", s.repo)
}
ldbWithGlobal(s.db, []byte(s.repo), fn)
ldbWithGlobal(s.db, []byte(s.repo), false, nativeFileIterator(fn))
}
func (s *Set) WithGlobalTruncated(fn fileIterator) {
if debug {
l.Debugf("%s WithGlobalTruncated()", s.repo)
}
ldbWithGlobal(s.db, []byte(s.repo), true, nativeFileIterator(fn))
}
func (s *Set) Get(node protocol.NodeID, file string) protocol.FileInfo {
return ldbGet(s.db, []byte(s.repo), node[:], []byte(file))
f := ldbGet(s.db, []byte(s.repo), node[:], []byte(normalizedFilename(file)))
f.Name = nativeFilename(f.Name)
return f
}
func (s *Set) GetGlobal(file string) protocol.FileInfo {
return ldbGetGlobal(s.db, []byte(s.repo), []byte(file))
f := ldbGetGlobal(s.db, []byte(s.repo), []byte(normalizedFilename(file)))
f.Name = nativeFilename(f.Name)
return f
}
func (s *Set) Availability(file string) []protocol.NodeID {
return ldbAvailability(s.db, []byte(s.repo), []byte(file))
return ldbAvailability(s.db, []byte(s.repo), []byte(normalizedFilename(file)))
}
func (s *Set) LocalVersion(node protocol.NodeID) uint64 {
@@ -121,3 +154,35 @@ func (s *Set) LocalVersion(node protocol.NodeID) uint64 {
defer s.mutex.Unlock()
return s.localVersion[node]
}
// ListRepos returns the repository IDs seen in the database.
func ListRepos(db *leveldb.DB) []string {
return ldbListRepos(db)
}
// DropRepo clears out all information related to the given repo from the
// database.
func DropRepo(db *leveldb.DB, repo string) {
ldbDropRepo(db, []byte(repo))
}
func normalizeFilenames(fs []protocol.FileInfo) {
for i := range fs {
fs[i].Name = normalizedFilename(fs[i].Name)
}
}
func nativeFileIterator(fn fileIterator) fileIterator {
return func(fi protocol.FileIntf) bool {
switch f := fi.(type) {
case protocol.FileInfo:
f.Name = nativeFilename(f.Name)
return fn(f)
case protocol.FileInfoTruncated:
f.Name = nativeFilename(f.Name)
return fn(f)
default:
panic("unknown interface type")
}
}
}

View File

@@ -5,7 +5,9 @@
package files_test
import (
"bytes"
"fmt"
"reflect"
"sort"
"testing"
@@ -16,10 +18,11 @@ import (
"github.com/syndtr/goleveldb/leveldb/storage"
)
var remoteNode protocol.NodeID
var remoteNode0, remoteNode1 protocol.NodeID
func init() {
remoteNode, _ = protocol.NodeIDFromString("AIR6LPZ-7K4PTTV-UXQSMUU-CPQ5YWH-OEDFIIQ-JUG777G-2YQXXR5-YD6AWQR")
remoteNode0, _ = protocol.NodeIDFromString("AIR6LPZ-7K4PTTV-UXQSMUU-CPQ5YWH-OEDFIIQ-JUG777G-2YQXXR5-YD6AWQR")
remoteNode1, _ = protocol.NodeIDFromString("I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU")
}
func genBlocks(n int) []protocol.BlockInfo {
@@ -37,7 +40,8 @@ func genBlocks(n int) []protocol.BlockInfo {
func globalList(s *files.Set) []protocol.FileInfo {
var fs []protocol.FileInfo
s.WithGlobal(func(f protocol.FileInfo) bool {
s.WithGlobal(func(fi protocol.FileIntf) bool {
f := fi.(protocol.FileInfo)
fs = append(fs, f)
return true
})
@@ -46,7 +50,8 @@ func globalList(s *files.Set) []protocol.FileInfo {
func haveList(s *files.Set, n protocol.NodeID) []protocol.FileInfo {
var fs []protocol.FileInfo
s.WithHave(n, func(f protocol.FileInfo) bool {
s.WithHave(n, func(fi protocol.FileIntf) bool {
f := fi.(protocol.FileInfo)
fs = append(fs, f)
return true
})
@@ -55,7 +60,8 @@ func haveList(s *files.Set, n protocol.NodeID) []protocol.FileInfo {
func needList(s *files.Set, n protocol.NodeID) []protocol.FileInfo {
var fs []protocol.FileInfo
s.WithNeed(n, func(f protocol.FileInfo) bool {
s.WithNeed(n, func(fi protocol.FileIntf) bool {
f := fi.(protocol.FileInfo)
fs = append(fs, f)
return true
})
@@ -76,6 +82,16 @@ func (l fileList) Swap(a, b int) {
l[a], l[b] = l[b], l[a]
}
func (l fileList) String() string {
var b bytes.Buffer
b.WriteString("[]protocol.FileList{\n")
for _, f := range l {
fmt.Fprintf(&b, " %q: #%d, %d bytes, %d blocks, flags=%o\n", f.Name, f.Version, f.Size(), len(f.Blocks), f.Flags)
}
b.WriteString("}")
return b.String()
}
func TestGlobalSet(t *testing.T) {
lamport.Default = lamport.Clock{}
@@ -86,20 +102,20 @@ func TestGlobalSet(t *testing.T) {
m := files.NewSet("test", db)
local0 := []protocol.FileInfo{
local0 := fileList{
protocol.FileInfo{Name: "a", Version: 1000, Blocks: genBlocks(1)},
protocol.FileInfo{Name: "b", Version: 1000, Blocks: genBlocks(2)},
protocol.FileInfo{Name: "c", Version: 1000, Blocks: genBlocks(3)},
protocol.FileInfo{Name: "d", Version: 1000, Blocks: genBlocks(4)},
protocol.FileInfo{Name: "z", Version: 1000, Blocks: genBlocks(8)},
}
local1 := []protocol.FileInfo{
local1 := fileList{
protocol.FileInfo{Name: "a", Version: 1000, Blocks: genBlocks(1)},
protocol.FileInfo{Name: "b", Version: 1000, Blocks: genBlocks(2)},
protocol.FileInfo{Name: "c", Version: 1000, Blocks: genBlocks(3)},
protocol.FileInfo{Name: "d", Version: 1000, Blocks: genBlocks(4)},
}
localTot := []protocol.FileInfo{
localTot := fileList{
local0[0],
local0[1],
local0[2],
@@ -107,76 +123,76 @@ func TestGlobalSet(t *testing.T) {
protocol.FileInfo{Name: "z", Version: 1001, Flags: protocol.FlagDeleted},
}
remote0 := []protocol.FileInfo{
remote0 := fileList{
protocol.FileInfo{Name: "a", Version: 1000, Blocks: genBlocks(1)},
protocol.FileInfo{Name: "b", Version: 1000, Blocks: genBlocks(2)},
protocol.FileInfo{Name: "c", Version: 1002, Blocks: genBlocks(5)},
}
remote1 := []protocol.FileInfo{
remote1 := fileList{
protocol.FileInfo{Name: "b", Version: 1001, Blocks: genBlocks(6)},
protocol.FileInfo{Name: "e", Version: 1000, Blocks: genBlocks(7)},
}
remoteTot := []protocol.FileInfo{
remoteTot := fileList{
remote0[0],
remote1[0],
remote0[2],
remote1[1],
}
expectedGlobal := []protocol.FileInfo{
remote0[0],
remote1[0],
remote0[2],
localTot[3],
remote1[1],
localTot[4],
expectedGlobal := fileList{
remote0[0], // a
remote1[0], // b
remote0[2], // c
localTot[3], // d
remote1[1], // e
localTot[4], // z
}
expectedLocalNeed := []protocol.FileInfo{
expectedLocalNeed := fileList{
remote1[0],
remote0[2],
remote1[1],
}
expectedRemoteNeed := []protocol.FileInfo{
expectedRemoteNeed := fileList{
local0[3],
}
m.ReplaceWithDelete(protocol.LocalNodeID, local0)
m.ReplaceWithDelete(protocol.LocalNodeID, local1)
m.Replace(remoteNode, remote0)
m.Update(remoteNode, remote1)
m.Replace(remoteNode0, remote0)
m.Update(remoteNode0, remote1)
g := globalList(m)
sort.Sort(fileList(g))
g := fileList(globalList(m))
sort.Sort(g)
if fmt.Sprint(g) != fmt.Sprint(expectedGlobal) {
t.Errorf("Global incorrect;\n A: %v !=\n E: %v", g, expectedGlobal)
}
h := haveList(m, protocol.LocalNodeID)
sort.Sort(fileList(h))
h := fileList(haveList(m, protocol.LocalNodeID))
sort.Sort(h)
if fmt.Sprint(h) != fmt.Sprint(localTot) {
t.Errorf("Have incorrect;\n A: %v !=\n E: %v", h, localTot)
}
h = haveList(m, remoteNode)
sort.Sort(fileList(h))
h = fileList(haveList(m, remoteNode0))
sort.Sort(h)
if fmt.Sprint(h) != fmt.Sprint(remoteTot) {
t.Errorf("Have incorrect;\n A: %v !=\n E: %v", h, remoteTot)
}
n := needList(m, protocol.LocalNodeID)
sort.Sort(fileList(n))
n := fileList(needList(m, protocol.LocalNodeID))
sort.Sort(n)
if fmt.Sprint(n) != fmt.Sprint(expectedLocalNeed) {
t.Errorf("Need incorrect;\n A: %v !=\n E: %v", n, expectedLocalNeed)
}
n = needList(m, remoteNode)
sort.Sort(fileList(n))
n = fileList(needList(m, remoteNode0))
sort.Sort(n)
if fmt.Sprint(n) != fmt.Sprint(expectedRemoteNeed) {
t.Errorf("Need incorrect;\n A: %v !=\n E: %v", n, expectedRemoteNeed)
@@ -187,7 +203,7 @@ func TestGlobalSet(t *testing.T) {
t.Errorf("Get incorrect;\n A: %v !=\n E: %v", f, localTot[1])
}
f = m.Get(remoteNode, "b")
f = m.Get(remoteNode0, "b")
if fmt.Sprint(f) != fmt.Sprint(remote1[0]) {
t.Errorf("Get incorrect;\n A: %v !=\n E: %v", f, remote1[0])
}
@@ -207,14 +223,14 @@ func TestGlobalSet(t *testing.T) {
t.Errorf("GetGlobal incorrect;\n A: %v !=\n E: %v", f, protocol.FileInfo{})
}
av := []protocol.NodeID{protocol.LocalNodeID, remoteNode}
av := []protocol.NodeID{protocol.LocalNodeID, remoteNode0}
a := m.Availability("a")
if !(len(a) == 2 && (a[0] == av[0] && a[1] == av[1] || a[0] == av[1] && a[1] == av[0])) {
t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, av)
}
a = m.Availability("b")
if len(a) != 1 || a[0] != remoteNode {
t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, remoteNode)
if len(a) != 1 || a[0] != remoteNode0 {
t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, remoteNode0)
}
a = m.Availability("d")
if len(a) != 1 || a[0] != protocol.LocalNodeID {
@@ -222,6 +238,128 @@ func TestGlobalSet(t *testing.T) {
}
}
func TestNeedWithInvalid(t *testing.T) {
lamport.Default = lamport.Clock{}
db, err := leveldb.Open(storage.NewMemStorage(), nil)
if err != nil {
t.Fatal(err)
}
s := files.NewSet("test", db)
localHave := fileList{
protocol.FileInfo{Name: "a", Version: 1000, Blocks: genBlocks(1)},
}
remote0Have := fileList{
protocol.FileInfo{Name: "b", Version: 1001, Blocks: genBlocks(2)},
protocol.FileInfo{Name: "c", Version: 1002, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
protocol.FileInfo{Name: "d", Version: 1003, Blocks: genBlocks(7)},
}
remote1Have := fileList{
protocol.FileInfo{Name: "c", Version: 1002, Blocks: genBlocks(7)},
protocol.FileInfo{Name: "d", Version: 1003, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
protocol.FileInfo{Name: "e", Version: 1004, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
}
expectedNeed := fileList{
protocol.FileInfo{Name: "b", Version: 1001, Blocks: genBlocks(2)},
protocol.FileInfo{Name: "c", Version: 1002, Blocks: genBlocks(7)},
protocol.FileInfo{Name: "d", Version: 1003, Blocks: genBlocks(7)},
}
s.ReplaceWithDelete(protocol.LocalNodeID, localHave)
s.Replace(remoteNode0, remote0Have)
s.Replace(remoteNode1, remote1Have)
need := fileList(needList(s, protocol.LocalNodeID))
sort.Sort(need)
if fmt.Sprint(need) != fmt.Sprint(expectedNeed) {
t.Errorf("Need incorrect;\n A: %v !=\n E: %v", need, expectedNeed)
}
}
func TestUpdateToInvalid(t *testing.T) {
lamport.Default = lamport.Clock{}
db, err := leveldb.Open(storage.NewMemStorage(), nil)
if err != nil {
t.Fatal(err)
}
s := files.NewSet("test", db)
localHave := fileList{
protocol.FileInfo{Name: "a", Version: 1000, Blocks: genBlocks(1)},
protocol.FileInfo{Name: "b", Version: 1001, Blocks: genBlocks(2)},
protocol.FileInfo{Name: "c", Version: 1002, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
protocol.FileInfo{Name: "d", Version: 1003, Blocks: genBlocks(7)},
}
s.ReplaceWithDelete(protocol.LocalNodeID, localHave)
have := fileList(haveList(s, protocol.LocalNodeID))
sort.Sort(have)
if fmt.Sprint(have) != fmt.Sprint(localHave) {
t.Errorf("Have incorrect before invalidation;\n A: %v !=\n E: %v", have, localHave)
}
localHave[1] = protocol.FileInfo{Name: "b", Version: 1001, Flags: protocol.FlagInvalid}
s.Update(protocol.LocalNodeID, localHave[1:2])
have = fileList(haveList(s, protocol.LocalNodeID))
sort.Sort(have)
if fmt.Sprint(have) != fmt.Sprint(localHave) {
t.Errorf("Have incorrect after invalidation;\n A: %v !=\n E: %v", have, localHave)
}
}
func TestInvalidAvailability(t *testing.T) {
lamport.Default = lamport.Clock{}
db, err := leveldb.Open(storage.NewMemStorage(), nil)
if err != nil {
t.Fatal(err)
}
s := files.NewSet("test", db)
remote0Have := fileList{
protocol.FileInfo{Name: "both", Version: 1001, Blocks: genBlocks(2)},
protocol.FileInfo{Name: "r1only", Version: 1002, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
protocol.FileInfo{Name: "r0only", Version: 1003, Blocks: genBlocks(7)},
protocol.FileInfo{Name: "none", Version: 1004, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
}
remote1Have := fileList{
protocol.FileInfo{Name: "both", Version: 1001, Blocks: genBlocks(2)},
protocol.FileInfo{Name: "r1only", Version: 1002, Blocks: genBlocks(7)},
protocol.FileInfo{Name: "r0only", Version: 1003, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
protocol.FileInfo{Name: "none", Version: 1004, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
}
s.Replace(remoteNode0, remote0Have)
s.Replace(remoteNode1, remote1Have)
if av := s.Availability("both"); len(av) != 2 {
t.Error("Incorrect availability for 'both':", av)
}
if av := s.Availability("r0only"); len(av) != 1 || av[0] != remoteNode0 {
t.Error("Incorrect availability for 'r0only':", av)
}
if av := s.Availability("r1only"); len(av) != 1 || av[0] != remoteNode1 {
t.Error("Incorrect availability for 'r1only':", av)
}
if av := s.Availability("none"); len(av) != 0 {
t.Error("Incorrect availability for 'none':", av)
}
}
func TestLocalDeleted(t *testing.T) {
db, err := leveldb.Open(storage.NewMemStorage(), nil)
if err != nil {
@@ -327,7 +465,7 @@ func Benchmark10kUpdateChg(b *testing.B) {
}
m := files.NewSet("test", db)
m.Replace(remoteNode, remote)
m.Replace(remoteNode0, remote)
var local []protocol.FileInfo
for i := 0; i < 10000; i++ {
@@ -358,7 +496,7 @@ func Benchmark10kUpdateSme(b *testing.B) {
b.Fatal(err)
}
m := files.NewSet("test", db)
m.Replace(remoteNode, remote)
m.Replace(remoteNode0, remote)
var local []protocol.FileInfo
for i := 0; i < 10000; i++ {
@@ -385,7 +523,7 @@ func Benchmark10kNeed2k(b *testing.B) {
}
m := files.NewSet("test", db)
m.Replace(remoteNode, remote)
m.Replace(remoteNode0, remote)
var local []protocol.FileInfo
for i := 0; i < 8000; i++ {
@@ -418,7 +556,7 @@ func Benchmark10kHaveFullList(b *testing.B) {
}
m := files.NewSet("test", db)
m.Replace(remoteNode, remote)
m.Replace(remoteNode0, remote)
var local []protocol.FileInfo
for i := 0; i < 2000; i++ {
@@ -451,7 +589,7 @@ func Benchmark10kGlobal(b *testing.B) {
}
m := files.NewSet("test", db)
m.Replace(remoteNode, remote)
m.Replace(remoteNode0, remote)
var local []protocol.FileInfo
for i := 0; i < 2000; i++ {
@@ -502,8 +640,8 @@ func TestGlobalReset(t *testing.T) {
t.Errorf("Global incorrect;\n%v !=\n%v", g, local)
}
m.Replace(remoteNode, remote)
m.Replace(remoteNode, nil)
m.Replace(remoteNode0, remote)
m.Replace(remoteNode0, nil)
g = globalList(m)
sort.Sort(fileList(g))
@@ -542,7 +680,7 @@ func TestNeed(t *testing.T) {
}
m.ReplaceWithDelete(protocol.LocalNodeID, local)
m.Replace(remoteNode, remote)
m.Replace(remoteNode0, remote)
need := needList(m, protocol.LocalNodeID)
@@ -592,3 +730,184 @@ func TestLocalVersion(t *testing.T) {
t.Fatal("Local version number should be unchanged")
}
}
func TestListDropRepo(t *testing.T) {
db, err := leveldb.Open(storage.NewMemStorage(), nil)
if err != nil {
t.Fatal(err)
}
s0 := files.NewSet("test0", db)
local1 := []protocol.FileInfo{
protocol.FileInfo{Name: "a", Version: 1000},
protocol.FileInfo{Name: "b", Version: 1000},
protocol.FileInfo{Name: "c", Version: 1000},
}
s0.Replace(protocol.LocalNodeID, local1)
s1 := files.NewSet("test1", db)
local2 := []protocol.FileInfo{
protocol.FileInfo{Name: "d", Version: 1002},
protocol.FileInfo{Name: "e", Version: 1002},
protocol.FileInfo{Name: "f", Version: 1002},
}
s1.Replace(remoteNode0, local2)
// Check that we have both repos and their data is in the global list
expectedRepoList := []string{"test0", "test1"}
if actualRepoList := files.ListRepos(db); !reflect.DeepEqual(actualRepoList, expectedRepoList) {
t.Fatalf("RepoList mismatch\nE: %v\nA: %v", expectedRepoList, actualRepoList)
}
if l := len(globalList(s0)); l != 3 {
t.Errorf("Incorrect global length %d != 3 for s0", l)
}
if l := len(globalList(s1)); l != 3 {
t.Errorf("Incorrect global length %d != 3 for s1", l)
}
// Drop one of them and check that it's gone.
files.DropRepo(db, "test1")
expectedRepoList = []string{"test0"}
if actualRepoList := files.ListRepos(db); !reflect.DeepEqual(actualRepoList, expectedRepoList) {
t.Fatalf("RepoList mismatch\nE: %v\nA: %v", expectedRepoList, actualRepoList)
}
if l := len(globalList(s0)); l != 3 {
t.Errorf("Incorrect global length %d != 3 for s0", l)
}
if l := len(globalList(s1)); l != 0 {
t.Errorf("Incorrect global length %d != 0 for s1", l)
}
}
func TestGlobalNeedWithInvalid(t *testing.T) {
db, err := leveldb.Open(storage.NewMemStorage(), nil)
if err != nil {
t.Fatal(err)
}
s := files.NewSet("test1", db)
rem0 := fileList{
protocol.FileInfo{Name: "a", Version: 1002, Blocks: genBlocks(4)},
protocol.FileInfo{Name: "b", Version: 1002, Flags: protocol.FlagInvalid},
protocol.FileInfo{Name: "c", Version: 1002, Blocks: genBlocks(4)},
}
s.Replace(remoteNode0, rem0)
rem1 := fileList{
protocol.FileInfo{Name: "a", Version: 1002, Blocks: genBlocks(4)},
protocol.FileInfo{Name: "b", Version: 1002, Blocks: genBlocks(4)},
protocol.FileInfo{Name: "c", Version: 1002, Flags: protocol.FlagInvalid},
}
s.Replace(remoteNode1, rem1)
total := fileList{
// There's a valid copy of each file, so it should be merged
protocol.FileInfo{Name: "a", Version: 1002, Blocks: genBlocks(4)},
protocol.FileInfo{Name: "b", Version: 1002, Blocks: genBlocks(4)},
protocol.FileInfo{Name: "c", Version: 1002, Blocks: genBlocks(4)},
}
need := fileList(needList(s, protocol.LocalNodeID))
if fmt.Sprint(need) != fmt.Sprint(total) {
t.Errorf("Need incorrect;\n A: %v !=\n E: %v", need, total)
}
global := fileList(globalList(s))
if fmt.Sprint(global) != fmt.Sprint(total) {
t.Errorf("Global incorrect;\n A: %v !=\n E: %v", global, total)
}
}
func TestLongPath(t *testing.T) {
db, err := leveldb.Open(storage.NewMemStorage(), nil)
if err != nil {
t.Fatal(err)
}
s := files.NewSet("test", db)
var b bytes.Buffer
for i := 0; i < 100; i++ {
b.WriteString("012345678901234567890123456789012345678901234567890")
}
name := b.String() // 5000 characters
local := []protocol.FileInfo{
protocol.FileInfo{Name: string(name), Version: 1000},
}
s.ReplaceWithDelete(protocol.LocalNodeID, local)
gf := globalList(s)
if l := len(gf); l != 1 {
t.Fatalf("Incorrect len %d != 1 for global list", l)
}
if gf[0].Name != local[0].Name {
t.Error("Incorrect long filename;\n%q !=\n%q", gf[0].Name, local[0].Name)
}
}
/*
var gf protocol.FileInfo
func TestStressGlobalVersion(t *testing.T) {
dur := 15 * time.Second
if testing.Short() {
dur = 1 * time.Second
}
set1 := []protocol.FileInfo{
protocol.FileInfo{Name: "a", Version: 1000},
protocol.FileInfo{Name: "b", Version: 1000},
}
set2 := []protocol.FileInfo{
protocol.FileInfo{Name: "b", Version: 1001},
protocol.FileInfo{Name: "c", Version: 1000},
}
db, err := leveldb.OpenFile("testdata/global.db", nil)
if err != nil {
t.Fatal(err)
}
m := files.NewSet("test", db)
done := make(chan struct{})
go stressWriter(m, remoteNode0, set1, nil, done)
go stressWriter(m, protocol.LocalNodeID, set2, nil, done)
t0 := time.Now()
for time.Since(t0) < dur {
m.WithGlobal(func(f protocol.FileInfo) bool {
gf = f
return true
})
}
close(done)
}
func stressWriter(s *files.Set, id protocol.NodeID, set1, set2 []protocol.FileInfo, done chan struct{}) {
one := true
i := 0
for {
select {
case <-done:
return
default:
if one {
s.Replace(id, set1)
} else {
s.Replace(id, set2)
}
one = !one
}
i++
}
}
*/

62
fnmatch/fnmatch.go Normal file
View File

@@ -0,0 +1,62 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package fnmatch
import (
"path/filepath"
"regexp"
"runtime"
"strings"
)
const (
FNM_NOESCAPE = (1 << iota)
FNM_PATHNAME
FNM_CASEFOLD
)
func Convert(pattern string, flags int) (*regexp.Regexp, error) {
any := "."
if runtime.GOOS == "windows" {
flags |= FNM_NOESCAPE
pattern = filepath.FromSlash(pattern)
if flags&FNM_PATHNAME != 0 {
any = "[^\\\\]"
}
} else if flags&FNM_PATHNAME != 0 {
any = "[^/]"
}
if flags&FNM_NOESCAPE != 0 {
pattern = strings.Replace(pattern, "\\", "\\\\", -1)
} else {
pattern = strings.Replace(pattern, "\\*", "[:escapedstar:]", -1)
pattern = strings.Replace(pattern, "\\?", "[:escapedques:]", -1)
pattern = strings.Replace(pattern, "\\.", "[:escapeddot:]", -1)
}
pattern = strings.Replace(pattern, ".", "\\.", -1)
pattern = strings.Replace(pattern, "**", "[:doublestar:]", -1)
pattern = strings.Replace(pattern, "*", any+"*", -1)
pattern = strings.Replace(pattern, "[:doublestar:]", ".*", -1)
pattern = strings.Replace(pattern, "?", any, -1)
pattern = strings.Replace(pattern, "[:escapedstar:]", "\\*", -1)
pattern = strings.Replace(pattern, "[:escapedques:]", "\\?", -1)
pattern = strings.Replace(pattern, "[:escapeddot:]", "\\.", -1)
pattern = "^" + pattern + "$"
if flags&FNM_CASEFOLD != 0 {
pattern = "(?i)" + pattern
}
return regexp.Compile(pattern)
}
// Matches the pattern against the string, with the given flags,
// and returns true if the match is successful.
func Match(pattern, s string, flags int) (bool, error) {
exp, err := Convert(pattern, flags)
if err != nil {
return false, err
}
return exp.MatchString(s), nil
}

81
fnmatch/fnmatch_test.go Normal file
View File

@@ -0,0 +1,81 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package fnmatch
import (
"path/filepath"
"runtime"
"testing"
)
type testcase struct {
pat string
name string
flags int
match bool
}
var testcases = []testcase{
{"", "", 0, true},
{"*", "", 0, true},
{"*", "foo", 0, true},
{"*", "bar", 0, true},
{"*", "*", 0, true},
{"**", "f", 0, true},
{"**", "foo.txt", 0, true},
{"*.*", "foo.txt", 0, true},
{"foo*.txt", "foobar.txt", 0, true},
{"foo.txt", "foo.txt", 0, true},
{"foo.txt", "bar/foo.txt", 0, false},
{"*/foo.txt", "bar/foo.txt", 0, true},
{"f?o.txt", "foo.txt", 0, true},
{"f?o.txt", "fooo.txt", 0, false},
{"f[ab]o.txt", "foo.txt", 0, false},
{"f[ab]o.txt", "fao.txt", 0, true},
{"f[ab]o.txt", "fbo.txt", 0, true},
{"f[ab]o.txt", "fco.txt", 0, false},
{"f[ab]o.txt", "fabo.txt", 0, false},
{"f[ab]o.txt", "f[ab]o.txt", 0, false},
{"f\\[ab\\]o.txt", "f[ab]o.txt", FNM_NOESCAPE, false},
{"*foo.txt", "bar/foo.txt", 0, true},
{"*foo.txt", "bar/foo.txt", FNM_PATHNAME, false},
{"*/foo.txt", "bar/foo.txt", 0, true},
{"*/foo.txt", "bar/foo.txt", FNM_PATHNAME, true},
{"*/foo.txt", "bar/baz/foo.txt", 0, true},
{"*/foo.txt", "bar/baz/foo.txt", FNM_PATHNAME, false},
{"**/foo.txt", "bar/baz/foo.txt", 0, true},
{"**/foo.txt", "bar/baz/foo.txt", FNM_PATHNAME, true},
{"foo.txt", "foo.TXT", 0, false},
{"foo.txt", "foo.TXT", FNM_CASEFOLD, true},
}
func TestMatch(t *testing.T) {
if runtime.GOOS != "windows" {
testcases = append(testcases, testcase{"f\\[ab\\]o.txt", "f[ab]o.txt", 0, true})
testcases = append(testcases, testcase{"foo\\.txt", "foo.txt", 0, true})
testcases = append(testcases, testcase{"foo\\*.txt", "foo*.txt", 0, true})
testcases = append(testcases, testcase{"foo\\.txt", "foo.txt", FNM_NOESCAPE, false})
testcases = append(testcases, testcase{"f\\\\\\[ab\\\\\\]o.txt", "f\\[ab\\]o.txt", 0, true})
}
for _, tc := range testcases {
if m, err := Match(tc.pat, filepath.FromSlash(tc.name), tc.flags); m != tc.match {
if err != nil {
t.Error(err)
} else {
t.Errorf("Match(%q, %q, %d) != %v", tc.pat, tc.name, tc.flags, tc.match)
}
}
}
}
func TestInvalid(t *testing.T) {
if _, err := Match("foo[bar", "...", 0); err == nil {
t.Error("Unexpected nil error")
}
}

View File

View File

@@ -15,22 +15,28 @@ syncthing.config(function ($httpProvider, $translateProvider) {
$httpProvider.defaults.xsrfCookieName = 'CSRF-Token';
$translateProvider.useStaticFilesLoader({
prefix: 'lang-',
prefix: 'lang/lang-',
suffix: '.json'
});
});
syncthing.controller('EventCtrl', function ($scope, $http) {
$scope.lastEvent = null;
var online = false;
var lastID = 0;
var successFn = function (data) {
if (!online) {
$scope.$emit('UIOnline');
online = true;
// When Syncthing restarts while the long polling connection is in
// progress the browser on some platforms returns a 200 (since the
// headers has been flushed with the return code 200), with no data.
// This basically means that the connection has been reset, and the call
// was not actually sucessful.
if (!data) {
errorFn(data);
return;
}
$scope.$emit('UIOnline');
if (lastID > 0) {
data.forEach(function (event) {
console.log("event", event.id, event.type, event.data);
@@ -49,10 +55,8 @@ syncthing.controller('EventCtrl', function ($scope, $http) {
};
var errorFn = function (data) {
if (online) {
$scope.$emit('UIOffline');
online = false;
}
$scope.$emit('UIOffline');
setTimeout(function () {
$http.get(urlbase + '/events?limit=1')
.success(successFn)
@@ -68,6 +72,8 @@ syncthing.controller('EventCtrl', function ($scope, $http) {
syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $location) {
var prevDate = 0;
var getOK = true;
var navigatingAway = false;
var online = false;
var restarting = false;
$scope.completion = {};
@@ -84,18 +90,44 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
$scope.repos = {};
$scope.seenError = '';
$scope.upgradeInfo = {};
$scope.stats = {};
$http.get(urlbase+"/lang").success(function (langs) {
var lang;
// Find the first language in the list provided by the user's browser
// that is a prefix of a language we have available. That is, "en"
// sent by the browser will match "en" or "en-US", while "zh-TW" will
// match only "zh-TW" and not "zh-CN".
var lang, matching;
for (var i = 0; i < langs.length; i++) {
lang = langs[i];
if (validLangs.indexOf(lang) >= 0) {
$translate.use(lang);
break;
if (lang.length < 2) {
continue;
}
matching = validLangs.filter(function (possibleLang) {
// The langs returned by the /rest/langs call will be in lower
// case. We compare to the lowercase version of the language
// code we have as well.
possibleLang = possibleLang.toLowerCase()
if (possibleLang.length > lang.length) {
return possibleLang.indexOf(lang) == 0;
} else {
return lang.indexOf(possibleLang) == 0;
}
});
if (matching.length >= 1) {
$translate.use(matching[0]);
return;
}
}
// Fallback if nothing matched
$translate.use("en");
})
$(window).bind('beforeunload', function() {
navigatingAway = true;
});
$scope.$on("$locationChangeSuccess", function () {
var lang = $location.search().lang;
if (lang) {
@@ -117,8 +149,13 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
}
$scope.$on('UIOnline', function (event, arg) {
if (online && !restarting) {
return;
}
console.log('UIOnline');
$scope.init();
online = true;
restarting = false;
$('#networkError').modal('hide');
$('#restarting').modal('hide');
@@ -126,9 +163,14 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
});
$scope.$on('UIOffline', function (event, arg) {
if (navigatingAway || !online) {
return;
}
console.log('UIOffline');
online = false;
if (!restarting) {
$('#networkError').modal({backdrop: 'static', keyboard: false});
$('#networkError').modal();
}
});
@@ -157,6 +199,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
$scope.$on('NodeDisconnected', function (event, arg) {
delete $scope.connections[arg.data.id];
refreshNodeStats();
});
$scope.$on('NodeConnected', function (event, arg) {
@@ -188,12 +231,20 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
document.cookie = "firstVisit=" + Date.now() + ";max-age=" + 30*24*3600;
} else {
if (+firstVisit < Date.now() - 4*3600*1000){
$('#ur').modal({backdrop: 'static', keyboard: false});
$('#ur').modal();
}
}
}
})
$scope.$on('ConfigSaved', function (event, arg) {
updateLocalConfig(arg.data);
$http.get(urlbase + '/config/sync').success(function (data) {
$scope.configInSync = data.configInSync;
});
});
var debouncedFuncs = {};
function refreshRepo(repo) {
@@ -209,6 +260,33 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
debouncedFuncs[key]();
}
function updateLocalConfig(config) {
var hasConfig = !isEmptyObject($scope.config);
$scope.config = config;
$scope.config.Options.ListenStr = $scope.config.Options.ListenAddress.join(', ');
$scope.nodes = $scope.config.Nodes;
$scope.nodes.forEach(function (nodeCfg) {
$scope.completion[nodeCfg.NodeID] = {
_total: 100,
};
});
$scope.nodes.sort(nodeCompare);
$scope.repos = repoMap($scope.config.Repositories);
Object.keys($scope.repos).forEach(function (repo) {
refreshRepo(repo);
$scope.repos[repo].Nodes.forEach(function (nodeCfg) {
refreshCompletion(nodeCfg.NodeID, repo);
});
});
if (!hasConfig) {
$scope.$emit('ConfigLoaded');
}
}
function refreshSystem() {
$http.get(urlbase + '/system').success(function (data) {
$scope.myID = data.myID;
@@ -281,31 +359,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
function refreshConfig() {
$http.get(urlbase + '/config').success(function (data) {
var hasConfig = !isEmptyObject($scope.config);
$scope.config = data;
$scope.config.Options.ListenStr = $scope.config.Options.ListenAddress.join(', ');
$scope.nodes = $scope.config.Nodes;
$scope.nodes.forEach(function (nodeCfg) {
$scope.completion[nodeCfg.NodeID] = {
_total: 100,
};
});
$scope.nodes.sort(nodeCompare);
$scope.repos = repoMap($scope.config.Repositories);
Object.keys($scope.repos).forEach(function (repo) {
refreshRepo(repo);
$scope.repos[repo].Nodes.forEach(function (nodeCfg) {
refreshCompletion(nodeCfg.NodeID, repo);
});
});
if (!hasConfig) {
$scope.$emit('ConfigLoaded');
}
updateLocalConfig(data);
console.log("refreshConfig", data);
});
@@ -314,10 +368,18 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
});
}
var refreshNodeStats = debounce(function () {
$http.get(urlbase+"/stats/node").success(function (data) {
$scope.stats = data;
console.log("refreshNodeStats", data);
});
}, 500);
$scope.init = function() {
refreshSystem();
refreshConfig();
refreshConnectionStats();
refreshNodeStats();
$http.get(urlbase + '/version').success(function (data) {
$scope.version = data;
@@ -426,17 +488,6 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
return '';
};
$scope.nodeVer = function (nodeCfg) {
if (nodeCfg.NodeID === $scope.myID) {
return $scope.version;
}
var conn = $scope.connections[nodeCfg.NodeID];
if (conn) {
return conn.ClientVersion;
}
return '?';
};
$scope.findNode = function (nodeID) {
var matches = $scope.nodes.filter(function (n) { return n.NodeID == nodeID; });
if (matches.length != 1) {
@@ -470,8 +521,9 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
// Make a working copy
$scope.tmpOptions = angular.copy($scope.config.Options);
$scope.tmpOptions.UREnabled = ($scope.tmpOptions.URAccepted > 0);
$scope.tmpOptions.NodeName = $scope.thisNode().Name;
$scope.tmpGUI = angular.copy($scope.config.GUI);
$('#settings').modal({backdrop: 'static', keyboard: true});
$('#settings').modal();
};
$scope.saveConfig = function() {
@@ -502,6 +554,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
}
// Apply new settings locally
$scope.thisNode().Name = $scope.tmpOptions.NodeName;
$scope.config.Options = angular.copy($scope.tmpOptions);
$scope.config.GUI = angular.copy($scope.tmpGUI);
$scope.config.Options.ListenAddress = $scope.config.Options.ListenStr.split(',').map(function (x) { return x.trim(); });
@@ -514,7 +567,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
$scope.restart = function () {
restarting = true;
$('#restarting').modal({backdrop: 'static', keyboard: false});
$('#restarting').modal();
$http.post(urlbase + '/restart');
$scope.configInSync = true;
@@ -536,9 +589,9 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
$scope.upgrade = function () {
restarting = true;
$('#upgrading').modal({backdrop: 'static', keyboard: false});
$('#upgrading').modal();
$http.post(urlbase + '/upgrade').success(function () {
$('#restarting').modal({backdrop: 'static', keyboard: false});
$('#restarting').modal();
$('#upgrading').modal('hide');
}).error(function () {
$('#upgrading').modal('hide');
@@ -548,7 +601,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
$scope.shutdown = function () {
restarting = true;
$http.post(urlbase + '/shutdown').success(function () {
$('#shutdown').modal({backdrop: 'static', keyboard: false});
$('#shutdown').modal();
});
$scope.configInSync = true;
};
@@ -559,7 +612,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
$scope.editingSelf = (nodeCfg.NodeID == $scope.myID);
$scope.currentNode.AddressesStr = nodeCfg.Addresses.join(', ');
$scope.nodeEditor.$setPristine();
$('#editNode').modal({backdrop: 'static', keyboard: true});
$('#editNode').modal();
};
$scope.idNode = function () {
@@ -571,7 +624,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
$scope.editingExisting = false;
$scope.editingSelf = false;
$scope.nodeEditor.$setPristine();
$('#editNode').modal({backdrop: 'static', keyboard: true});
$('#editNode').modal();
};
$scope.deleteNode = function () {
@@ -674,19 +727,44 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
});
if ($scope.currentRepo.Versioning && $scope.currentRepo.Versioning.Type === "simple") {
$scope.currentRepo.simpleFileVersioning = true;
$scope.currentRepo.FileVersioningSelector = "simple";
$scope.currentRepo.simpleKeep = +$scope.currentRepo.Versioning.Params.keep;
} else if ($scope.currentRepo.Versioning && $scope.currentRepo.Versioning.Type === "staggered") {
$scope.currentRepo.staggeredFileVersioning = true;
$scope.currentRepo.FileVersioningSelector = "staggered";
$scope.currentRepo.staggeredMaxAge = Math.floor(+$scope.currentRepo.Versioning.Params.maxAge / 86400);
$scope.currentRepo.staggeredCleanInterval = +$scope.currentRepo.Versioning.Params.cleanInterval;
$scope.currentRepo.staggeredVersionsPath = $scope.currentRepo.Versioning.Params.versionsPath;
} else {
$scope.currentRepo.FileVersioningSelector = "none";
}
$scope.currentRepo.simpleKeep = $scope.currentRepo.simpleKeep || 5;
$scope.currentRepo.staggeredCleanInterval = $scope.currentRepo.staggeredCleanInterval || 3600;
$scope.currentRepo.staggeredVersionsPath = $scope.currentRepo.staggeredVersionsPath || "";
// staggeredMaxAge can validly be zero, which we should not replace
// with the default value of 365. So only set the default if it's
// actually undefined.
if (typeof $scope.currentRepo.staggeredMaxAge === 'undefined') {
$scope.currentRepo.staggeredMaxAge = 365;
}
$scope.editingExisting = true;
$scope.repoEditor.$setPristine();
$('#editRepo').modal({backdrop: 'static', keyboard: true});
$('#editRepo').modal();
};
$scope.addRepo = function () {
$scope.currentRepo = {selectedNodes: {}};
$scope.currentRepo.RescanIntervalS = 60;
$scope.currentRepo.FileVersioningSelector = "none";
$scope.currentRepo.simpleKeep = 5;
$scope.currentRepo.staggeredMaxAge = 365;
$scope.currentRepo.staggeredCleanInterval = 3600;
$scope.currentRepo.staggeredVersionsPath = "";
$scope.editingExisting = false;
$scope.repoEditor.$setPristine();
$('#editRepo').modal({backdrop: 'static', keyboard: true});
$('#editRepo').modal();
};
$scope.saveRepo = function () {
@@ -703,7 +781,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
}
delete repoCfg.selectedNodes;
if (repoCfg.simpleFileVersioning) {
if (repoCfg.FileVersioningSelector === "simple") {
repoCfg.Versioning = {
'Type': 'simple',
'Params': {
@@ -712,6 +790,20 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
};
delete repoCfg.simpleFileVersioning;
delete repoCfg.simpleKeep;
} else if (repoCfg.FileVersioningSelector === "staggered") {
repoCfg.Versioning = {
'Type': 'staggered',
'Params': {
'maxAge': '' + (repoCfg.staggeredMaxAge * 86400),
'cleanInterval': '' + repoCfg.staggeredCleanInterval,
'versionsPath': '' + repoCfg.staggeredVersionsPath,
}
};
delete repoCfg.staggeredFileVersioning;
delete repoCfg.staggeredMaxAge;
delete repoCfg.staggeredCleanInterval;
delete repoCfg.staggeredVersionsPath;
} else {
delete repoCfg.Versioning;
}
@@ -747,8 +839,6 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
cfg.APIKey = randomString(30, 32);
};
$scope.acceptUR = function () {
$scope.config.Options.URAccepted = 1000; // Larger than the largest existing report version
$scope.saveConfig();
@@ -763,7 +853,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
$scope.showNeed = function (repo) {
$scope.neededLoaded = false;
$('#needed').modal({backdrop: 'static', keyboard: true});
$('#needed').modal();
$http.get(urlbase + "/need?repo=" + encodeURIComponent(repo)).success(function (data) {
$scope.needed = data;
$scope.neededLoaded = true;
@@ -786,9 +876,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
};
$scope.override = function (repo) {
$http.post(urlbase + "/model/override?repo=" + encodeURIComponent(repo)).success(function () {
$scope.refresh();
});
$http.post(urlbase + "/model/override?repo=" + encodeURIComponent(repo));
};
$scope.about = function () {
@@ -799,6 +887,10 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
$scope.reportPreview = true;
};
$scope.rescanRepo = function (repo) {
$http.post(urlbase + "/scan?repo=" + encodeURIComponent(repo));
};
$scope.init();
setInterval($scope.refresh, 10000);
});
@@ -816,10 +908,10 @@ function nodeCompare(a, b) {
}
function repoCompare(a, b) {
if (a.Directory < b.Directory) {
if (a.ID < b.ID) {
return -1;
}
return a.Directory > b.Directory;
return a.ID > b.ID;
}
function repoMap(l) {
@@ -881,9 +973,9 @@ function debounce(func, wait) {
} else {
timeout = null;
if (again) {
again = false;
result = func.apply(context, args);
context = args = null;
again = false;
}
}
};
@@ -953,12 +1045,6 @@ syncthing.filter('metric', function () {
};
});
syncthing.filter('short', function () {
return function (input) {
return input.substr(0, 6);
};
});
syncthing.filter('alwaysNumber', function () {
return function (input) {
if (input === undefined) {
@@ -968,18 +1054,6 @@ syncthing.filter('alwaysNumber', function () {
};
});
syncthing.filter('shortPath', function () {
return function (input) {
if (input === undefined)
return "";
var parts = input.split(/[\/\\]/);
if (!parts || parts.length <= 3) {
return input;
}
return ".../" + parts.slice(parts.length-2).join("/");
};
});
syncthing.filter('basename', function () {
return function (input) {
if (input === undefined)
@@ -992,24 +1066,6 @@ syncthing.filter('basename', function () {
};
});
syncthing.filter('clean', function () {
return function (input) {
return encodeURIComponent(input).replace(/%/g, '');
};
});
syncthing.directive('optionEditor', function () {
return {
restrict: 'C',
replace: true,
transclude: true,
scope: {
setting: '=setting',
},
template: '<input type="text" ng-model="config.Options[setting.id]"></input>',
};
});
syncthing.directive('uniqueRepo', function() {
return {
require: 'ngModel',

BIN
gui/font/raleway-500.woff Normal file
View File

Binary file not shown.

6
gui/font/raleway.css Normal file
View File

@@ -0,0 +1,6 @@
@font-face {
font-family: 'Raleway';
font-style: normal;
font-weight: 500;
src: local('Raleway'), url(raleway-500.woff) format('woff');
}

Some files were not shown because too many files have changed in this diff Show More