mirror of
https://github.com/syncthing/syncthing.git
synced 2026-01-08 05:49:17 -05:00
Compare commits
687 Commits
v0.9.0-bet
...
v0.10.6
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
68399601ce | ||
|
|
aa637fd942 | ||
|
|
601c97c015 | ||
|
|
6b47052491 | ||
|
|
297da94319 | ||
|
|
64f101f534 | ||
|
|
45917f278a | ||
|
|
ddd2759cec | ||
|
|
70d8903d3c | ||
|
|
f66c7dc09c | ||
|
|
82c6caef85 | ||
|
|
46ec72412a | ||
|
|
ead09395d9 | ||
|
|
7106fc5304 | ||
|
|
d16dcb9f19 | ||
|
|
1aaf34b0ed | ||
|
|
39a3b8922d | ||
|
|
9b78582475 | ||
|
|
3a84224b93 | ||
|
|
2592ba7399 | ||
|
|
1795e0a290 | ||
|
|
c959f59581 | ||
|
|
2449723a1c | ||
|
|
ae0e56e98d | ||
|
|
6efe521e44 | ||
|
|
bccd21ac14 | ||
|
|
8449a65cdf | ||
|
|
fc47562983 | ||
|
|
76900ae291 | ||
|
|
ec55559ff1 | ||
|
|
3daa26e1f7 | ||
|
|
9ea8b6f659 | ||
|
|
387f2f0a94 | ||
|
|
b0d95d02be | ||
|
|
3a98f01d31 | ||
|
|
d305752749 | ||
|
|
2ba4b235fc | ||
|
|
6820c0a5d7 | ||
|
|
048883ad27 | ||
|
|
08e7ada242 | ||
|
|
d3ddfa31f7 | ||
|
|
4b899a813e | ||
|
|
15ee9a5cac | ||
|
|
58945a429f | ||
|
|
9f4111015e | ||
|
|
04b960b415 | ||
|
|
33267f2178 | ||
|
|
970e50d1f1 | ||
|
|
d4199c2d08 | ||
|
|
cf4ca7b6a8 | ||
|
|
d8b335ce65 | ||
|
|
39a2934b05 | ||
|
|
7d1c720b84 | ||
|
|
51743461ee | ||
|
|
53cf5ca762 | ||
|
|
b5ef42b0a1 | ||
|
|
0521ddd858 | ||
|
|
b7bb3bfee2 | ||
|
|
4183044e96 | ||
|
|
27448bde20 | ||
|
|
87b9e8fbaf | ||
|
|
9d79859ba6 | ||
|
|
25bb55491a | ||
|
|
198cbacc3e | ||
|
|
3f842221f7 | ||
|
|
5d0183a9ed | ||
|
|
99df4d660b | ||
|
|
f2adfde1a8 | ||
|
|
1e915a2903 | ||
|
|
e2dc3e9ff3 | ||
|
|
f1bb8daaab | ||
|
|
b0fcbebdae | ||
|
|
34f72ecf8f | ||
|
|
9d348319fd | ||
|
|
c55fee69de | ||
|
|
ce31cb072b | ||
|
|
6b91fc9c91 | ||
|
|
e34f77ba0e | ||
|
|
bc3b7401a1 | ||
|
|
85677eaf1a | ||
|
|
75d5e74059 | ||
|
|
c4d15b3b95 | ||
|
|
aa168ec2d6 | ||
|
|
4ae0efe887 | ||
|
|
86a57d8b56 | ||
|
|
9dda7485eb | ||
|
|
8b9670add9 | ||
|
|
978aebd79c | ||
|
|
ac079f0f83 | ||
|
|
e82e912151 | ||
|
|
5488ae5b89 | ||
|
|
15b875b116 | ||
|
|
dedf835aa6 | ||
|
|
e62b9c6009 | ||
|
|
53da778506 | ||
|
|
4360b2c815 | ||
|
|
1e15b1e0be | ||
|
|
0bc50f7284 | ||
|
|
435f9113e8 | ||
|
|
8818c4785b | ||
|
|
2e003e5404 | ||
|
|
e791a8ea07 | ||
|
|
2fb8eb755b | ||
|
|
d031f958a9 | ||
|
|
9bbadac9dc | ||
|
|
b012f77475 | ||
|
|
3cf36b1773 | ||
|
|
8f93c046a9 | ||
|
|
90af68901a | ||
|
|
c17507b216 | ||
|
|
b110b7c3f7 | ||
|
|
36431b3dcd | ||
|
|
609294deee | ||
|
|
fffae9a741 | ||
|
|
598ce4bb5f | ||
|
|
212f6dc9e0 | ||
|
|
cd05f1c3d7 | ||
|
|
d7a0691c99 | ||
|
|
86346aa332 | ||
|
|
b162b1fa34 | ||
|
|
ea9f8b0ceb | ||
|
|
6210b9e746 | ||
|
|
a778b410b9 | ||
|
|
8a674c8bc3 | ||
|
|
aaf625c624 | ||
|
|
d4079a3273 | ||
|
|
8d94fe3346 | ||
|
|
ce510e55ae | ||
|
|
5419ff9a71 | ||
|
|
ade437d625 | ||
|
|
87780a5b7e | ||
|
|
f6f6f261ed | ||
|
|
65acc7c9ad | ||
|
|
31d95ac9e6 | ||
|
|
964d17d05a | ||
|
|
665c5992f0 | ||
|
|
5f52e0581d | ||
|
|
870e3a45ef | ||
|
|
a5fe4a3694 | ||
|
|
838670ccbc | ||
|
|
baf4cc225e | ||
|
|
93ac1605bd | ||
|
|
c8a68001c1 | ||
|
|
244a22755c | ||
|
|
79c3ea82c7 | ||
|
|
4b92960975 | ||
|
|
ef616ff25b | ||
|
|
7fb1a470ce | ||
|
|
fc6b2d9193 | ||
|
|
e5dc66e7e5 | ||
|
|
bb01b76582 | ||
|
|
f1aff0fd96 | ||
|
|
5656be5206 | ||
|
|
484ce8e488 | ||
|
|
dcadefd133 | ||
|
|
28366677b0 | ||
|
|
d65302742c | ||
|
|
b2cf28efdd | ||
|
|
41e20bb6b7 | ||
|
|
6e670a2499 | ||
|
|
481b2186cb | ||
|
|
1bc1c0b14f | ||
|
|
e9d27b9d2b | ||
|
|
828bbc407f | ||
|
|
e50469d84e | ||
|
|
d3a9b126a6 | ||
|
|
9eb185ec39 | ||
|
|
fcf60e7f7c | ||
|
|
0ebee92f7d | ||
|
|
64b42bba2e | ||
|
|
d297f9e032 | ||
|
|
30aabf1da9 | ||
|
|
eebdaa2f27 | ||
|
|
c3c9c4cde5 | ||
|
|
640d5135df | ||
|
|
cbbd20a687 | ||
|
|
1a2a27b988 | ||
|
|
d819151020 | ||
|
|
d089436546 | ||
|
|
289d604690 | ||
|
|
2979e0e964 | ||
|
|
5338f1cfbd | ||
|
|
214f18cbfd | ||
|
|
9b11609b63 | ||
|
|
d476c2b613 | ||
|
|
590afebc0a | ||
|
|
02bd1af293 | ||
|
|
2fde82528d | ||
|
|
6c383e279f | ||
|
|
5c07477de4 | ||
|
|
146a284315 | ||
|
|
a8faeeac73 | ||
|
|
69e385e4cd | ||
|
|
41b8dd2863 | ||
|
|
493dc8fcd5 | ||
|
|
87764445e8 | ||
|
|
0bb31e16c9 | ||
|
|
72c90abe36 | ||
|
|
c4d8d33a60 | ||
|
|
a267bca8fb | ||
|
|
32d2e78e3c | ||
|
|
555e70ebec | ||
|
|
cd1b2aab46 | ||
|
|
9edce23e76 | ||
|
|
756a8a35e3 | ||
|
|
f3057c61a7 | ||
|
|
25345f08e7 | ||
|
|
2091e12e82 | ||
|
|
3eb000fa60 | ||
|
|
3059b36118 | ||
|
|
35b1887e17 | ||
|
|
8f9b8a8550 | ||
|
|
174befe729 | ||
|
|
e212b64823 | ||
|
|
991dc32a0b | ||
|
|
a76efd4166 | ||
|
|
8a768baaaa | ||
|
|
997692b494 | ||
|
|
59ffec4e39 | ||
|
|
56d0ecc253 | ||
|
|
e863746bd7 | ||
|
|
d4dc7911eb | ||
|
|
f561d3261a | ||
|
|
fdf8ee7015 | ||
|
|
5ec95086f2 | ||
|
|
26e4669316 | ||
|
|
6c352dca74 | ||
|
|
9d816694ba | ||
|
|
39ef35db0c | ||
|
|
b8ed135183 | ||
|
|
6f750582dd | ||
|
|
5f93fbd471 | ||
|
|
0e2653b7dd | ||
|
|
47554b562d | ||
|
|
99427d649e | ||
|
|
7bc4589d4d | ||
|
|
9af586d4ac | ||
|
|
87e68cac6c | ||
|
|
7d5a98409b | ||
|
|
14817e31f6 | ||
|
|
fbdbd722b1 | ||
|
|
b34102cd11 | ||
|
|
3c51cd6626 | ||
|
|
b0b34236e3 | ||
|
|
a502836002 | ||
|
|
09417d4b83 | ||
|
|
83ef2fa84c | ||
|
|
e596a45e9f | ||
|
|
24e5000c37 | ||
|
|
e3bcfa17f8 | ||
|
|
3b512676b7 | ||
|
|
928198bbfe | ||
|
|
1fb56f0ad2 | ||
|
|
9797f62cb8 | ||
|
|
4ddd87e773 | ||
|
|
7fd2e4d2db | ||
|
|
55c7d86205 | ||
|
|
737a28050c | ||
|
|
434ecdac6b | ||
|
|
709570afcc | ||
|
|
b084b4faaf | ||
|
|
d96ce23451 | ||
|
|
760a9d6d35 | ||
|
|
8e624cedb1 | ||
|
|
39cf269d6b | ||
|
|
6a00b5a79e | ||
|
|
2ce674e3fd | ||
|
|
0fcc25d7c9 | ||
|
|
63bd0136fb | ||
|
|
80a2a934dd | ||
|
|
e13976a3b3 | ||
|
|
5144330807 | ||
|
|
bb29639183 | ||
|
|
4667cb9de9 | ||
|
|
c34f3defe1 | ||
|
|
eb0d742672 | ||
|
|
d9b0a73787 | ||
|
|
4810879b2f | ||
|
|
f4b6704aad | ||
|
|
b1a31d3b30 | ||
|
|
bf909db3f9 | ||
|
|
9c68be4d5e | ||
|
|
d7956dd495 | ||
|
|
37a473e7d6 | ||
|
|
5a1c885e8f | ||
|
|
0b1136ad82 | ||
|
|
45af549897 | ||
|
|
97844603fc | ||
|
|
c07b39e58b | ||
|
|
384c543ab9 | ||
|
|
592b13d7db | ||
|
|
6fdba3c02e | ||
|
|
cbf758ead9 | ||
|
|
d1ad778a64 | ||
|
|
ce5ad296ae | ||
|
|
797e105786 | ||
|
|
d17d80747e | ||
|
|
55ea207a55 | ||
|
|
6384d1e5a3 | ||
|
|
aba01cdace | ||
|
|
517b7a14b4 | ||
|
|
2927de7cf9 | ||
|
|
6471ba70e4 | ||
|
|
9f9de01c51 | ||
|
|
3662decb8b | ||
|
|
583bcfb3c7 | ||
|
|
c45e3fa4d5 | ||
|
|
24cbcef620 | ||
|
|
e2a520ff49 | ||
|
|
a5e3317e28 | ||
|
|
5638c4ba87 | ||
|
|
bf7a128142 | ||
|
|
c5243cd4d5 | ||
|
|
db868ed29d | ||
|
|
450c7d80f8 | ||
|
|
abbb001975 | ||
|
|
f35d83ae48 | ||
|
|
a2315dc95e | ||
|
|
4e2feb6fbc | ||
|
|
13602b6769 | ||
|
|
85dba25246 | ||
|
|
66432672b3 | ||
|
|
e6d96e4c18 | ||
|
|
9812305bb9 | ||
|
|
f680a63a1f | ||
|
|
781d63cb2a | ||
|
|
9ff04ee3d8 | ||
|
|
5d85a24977 | ||
|
|
1e51fca0b0 | ||
|
|
5537d53f9a | ||
|
|
50a4170541 | ||
|
|
3a8255bda1 | ||
|
|
a617846f0f | ||
|
|
5772588c29 | ||
|
|
9d0dc45f74 | ||
|
|
c6aefbc9a0 | ||
|
|
dbbafb0cc9 | ||
|
|
6e8272f78f | ||
|
|
baf8a63121 | ||
|
|
fc4a76ee50 | ||
|
|
2117d1d035 | ||
|
|
0a70e0b7b6 | ||
|
|
64ffac5671 | ||
|
|
ac384e8a9c | ||
|
|
f97c8222c7 | ||
|
|
728289ee3a | ||
|
|
5faa16f9ee | ||
|
|
4e608b116a | ||
|
|
521b49166e | ||
|
|
8f32decf2d | ||
|
|
0d51f83d2d | ||
|
|
78c6a68db9 | ||
|
|
2949ab73e2 | ||
|
|
223741820d | ||
|
|
4b57821f52 | ||
|
|
74271a479f | ||
|
|
c377177108 | ||
|
|
84eb729bd4 | ||
|
|
14aea365c5 | ||
|
|
97cb3fa5a5 | ||
|
|
b5368db704 | ||
|
|
8c442b72f3 | ||
|
|
f8f6791d39 | ||
|
|
0c09f077aa | ||
|
|
af2831d7b6 | ||
|
|
64d5d4aec7 | ||
|
|
619a6b2adb | ||
|
|
33a26bc0cf | ||
|
|
b445a7c4d3 | ||
|
|
e6892d0c3e | ||
|
|
33e9a88b56 | ||
|
|
df00a2251e | ||
|
|
92c44c8abe | ||
|
|
8e4f7bbd3e | ||
|
|
a40217cf07 | ||
|
|
e586fda5f2 | ||
|
|
a58564ff88 | ||
|
|
89885b9fb9 | ||
|
|
5c7d977ae0 | ||
|
|
2cd3ee9698 | ||
|
|
dd3080e018 | ||
|
|
5915e8e86a | ||
|
|
3c67c06654 | ||
|
|
76232ca573 | ||
|
|
5235e82bda | ||
|
|
10f0713257 | ||
|
|
e9c7970ea4 | ||
|
|
1a6ac4aeb1 | ||
|
|
f633bdddf0 | ||
|
|
de0b91d157 | ||
|
|
2e77e498f5 | ||
|
|
4ac67eb1f9 | ||
|
|
2b536de37f | ||
|
|
2ffa92ba1b | ||
|
|
6ecddd8388 | ||
|
|
bd2772ea4c | ||
|
|
92bf79d53b | ||
|
|
eebe0eeb71 | ||
|
|
1068eaa0b9 | ||
|
|
faac3e7d7c | ||
|
|
dab4340207 | ||
|
|
fd2567748f | ||
|
|
c2daedbd11 | ||
|
|
7c604beb73 | ||
|
|
8c42aea827 | ||
|
|
cf1bfdfb61 | ||
|
|
75b26513e1 | ||
|
|
6c09a77a97 | ||
|
|
67389c39fb | ||
|
|
c326103e6e | ||
|
|
c2120a16da | ||
|
|
258ad4352e | ||
|
|
435d3958f4 | ||
|
|
b0408ef5c6 | ||
|
|
1c41b0bc2f | ||
|
|
aa827f3042 | ||
|
|
f44f5964bb | ||
|
|
91ba93bd7a | ||
|
|
0abe4cefb4 | ||
|
|
bccd460f3b | ||
|
|
d1023004e1 | ||
|
|
04a5f9cb04 | ||
|
|
9818e2b550 | ||
|
|
fe43e3b89d | ||
|
|
e1f1ae041f | ||
|
|
5bcf26e324 | ||
|
|
5f47a8149f | ||
|
|
00b662b53a | ||
|
|
faf519ab1b | ||
|
|
fce73f6f17 | ||
|
|
887890baf5 | ||
|
|
c66b24feeb | ||
|
|
84c6f147ad | ||
|
|
0cdb0daa8c | ||
|
|
eee702f299 | ||
|
|
df65247325 | ||
|
|
1a174e75d3 | ||
|
|
9e1fd3454f | ||
|
|
3b1603cadf | ||
|
|
8803bac708 | ||
|
|
3a01eaa4a6 | ||
|
|
9f84c1c448 | ||
|
|
dda0390156 | ||
|
|
c74509dd5f | ||
|
|
f61bbb2ff4 | ||
|
|
e7f60161a3 | ||
|
|
ebec4fbc24 | ||
|
|
1d4105ae3d | ||
|
|
586d49f0c3 | ||
|
|
5b0fab0697 | ||
|
|
2b3359dff3 | ||
|
|
63203aa14c | ||
|
|
716a8329c2 | ||
|
|
dab0aec85e | ||
|
|
1f1ab017c0 | ||
|
|
b6912ef95e | ||
|
|
db54dca694 | ||
|
|
0e751b983c | ||
|
|
997b20a975 | ||
|
|
386f9c42c2 | ||
|
|
cfae06db65 | ||
|
|
44260b7b5c | ||
|
|
13063b957f | ||
|
|
ee05e12480 | ||
|
|
5538545fb0 | ||
|
|
bc1167c2c5 | ||
|
|
c57656e4c3 | ||
|
|
264400a984 | ||
|
|
408db4eb1d | ||
|
|
9347f223ef | ||
|
|
518aa30c9c | ||
|
|
6bbf1f9355 | ||
|
|
b221e4d445 | ||
|
|
580fccbfca | ||
|
|
045916efcc | ||
|
|
4f92482294 | ||
|
|
2f055a75a0 | ||
|
|
f0621207e3 | ||
|
|
d657bc4e3d | ||
|
|
a1fd07b27c | ||
|
|
52219c5f3f | ||
|
|
1a66461e07 | ||
|
|
d20df12168 | ||
|
|
668b429615 | ||
|
|
7db528be39 | ||
|
|
60f760ee49 | ||
|
|
884aaab751 | ||
|
|
e968560ea4 | ||
|
|
07caaa96e4 | ||
|
|
e8a679c280 | ||
|
|
bc885f1d08 | ||
|
|
f2f051d6de | ||
|
|
49a0bfccba | ||
|
|
0c1e60894f | ||
|
|
ace87ad7bb | ||
|
|
50f0097843 | ||
|
|
32a9466277 | ||
|
|
1ee3407946 | ||
|
|
f1120d7aa9 | ||
|
|
2e7d6b2f99 | ||
|
|
dfef929187 | ||
|
|
e78d9ad592 | ||
|
|
9f2948f595 | ||
|
|
198da910ed | ||
|
|
5f1bf9d9d6 | ||
|
|
798c4aef9a | ||
|
|
f80f5b3bda | ||
|
|
cbb07b0d67 | ||
|
|
7cc9921615 | ||
|
|
7555fe065e | ||
|
|
d977f4278e | ||
|
|
870e3ca893 | ||
|
|
213acaee3b | ||
|
|
58381496a2 | ||
|
|
5981e42aed | ||
|
|
3c9165d295 | ||
|
|
60d0ef93ac | ||
|
|
f45d5b0066 | ||
|
|
b71306480f | ||
|
|
0c7771ccc5 | ||
|
|
dc9df0a79a | ||
|
|
17cd49fbdc | ||
|
|
ad273adb78 | ||
|
|
150e7daf2d | ||
|
|
b004155e8f | ||
|
|
92eed3b33b | ||
|
|
fe7b77198c | ||
|
|
f51b775698 | ||
|
|
939dd5cb31 | ||
|
|
adcbe13ecd | ||
|
|
8976e53998 | ||
|
|
97dda6a4bb | ||
|
|
9e395eb883 | ||
|
|
60da59623e | ||
|
|
9752ea9ac3 | ||
|
|
279693078a | ||
|
|
19b93045a4 | ||
|
|
5231a09820 | ||
|
|
ab952e6103 | ||
|
|
a418771c04 | ||
|
|
b41590ce38 | ||
|
|
c7dde9499f | ||
|
|
528cbf62ec | ||
|
|
1be4b8bb5d | ||
|
|
c832fc9917 | ||
|
|
4797a94689 | ||
|
|
6948903084 | ||
|
|
94164611ae | ||
|
|
ae298e8902 | ||
|
|
3d8771ecb0 | ||
|
|
28db264e90 | ||
|
|
6af9fa4b81 | ||
|
|
60b4d05860 | ||
|
|
7b93839ed1 | ||
|
|
fdb11d7c06 | ||
|
|
5651847877 | ||
|
|
e1442290b6 | ||
|
|
c45b18cc75 | ||
|
|
bb2ad77987 | ||
|
|
68b1ffec19 | ||
|
|
bc2bb22673 | ||
|
|
83d707fc4b | ||
|
|
175b32e56c | ||
|
|
97b4a6553b | ||
|
|
4ade30e681 | ||
|
|
4e03b4f191 | ||
|
|
bfe1d1d4ca | ||
|
|
8918de85fd | ||
|
|
5e237aecae | ||
|
|
13291ad481 | ||
|
|
a47ee86bee | ||
|
|
62d703f967 | ||
|
|
b2c196e5c7 | ||
|
|
4be6a54bc0 | ||
|
|
8ce8476547 | ||
|
|
d82caf6bd4 | ||
|
|
8ea1e302c3 | ||
|
|
a8799efa94 | ||
|
|
0cfac4e021 | ||
|
|
f6c9642d72 | ||
|
|
5a07f9ddee | ||
|
|
9db75e91ac | ||
|
|
f288e00c37 | ||
|
|
c9edd31993 | ||
|
|
5a7780ab5f | ||
|
|
ac0fba99ad | ||
|
|
6f724a113c | ||
|
|
327cd4cb87 | ||
|
|
25de3a2590 | ||
|
|
06208a703a | ||
|
|
56afba6606 | ||
|
|
d65bbf2113 | ||
|
|
b8bfc9b732 | ||
|
|
cec3bad373 | ||
|
|
9312e3c7de | ||
|
|
43e7435c41 | ||
|
|
f34f5e41a4 | ||
|
|
47a70a536b | ||
|
|
bbeddfe522 | ||
|
|
28220310a5 | ||
|
|
3e82a0a259 | ||
|
|
c860ad23a0 | ||
|
|
4e36dd2943 | ||
|
|
13d77f1557 | ||
|
|
cc619f6b53 | ||
|
|
d425794665 | ||
|
|
32da1c8d58 | ||
|
|
830be1035b | ||
|
|
e9e45d0e29 | ||
|
|
d3ca265a25 | ||
|
|
244f0ffaf1 | ||
|
|
73f5c47fe2 | ||
|
|
e8b9600ddb | ||
|
|
d2c813ffac | ||
|
|
8e699f8243 | ||
|
|
3f6cdc829b | ||
|
|
c5c9ee92ac | ||
|
|
7f1fcc9cfc | ||
|
|
9de45c3be4 | ||
|
|
144a881ae5 | ||
|
|
4566690617 | ||
|
|
e8fe1590b6 | ||
|
|
25f4fd5a19 | ||
|
|
7b8c126aa1 | ||
|
|
86b3ff3099 | ||
|
|
fa9df4dc5e | ||
|
|
fbd22e7b94 | ||
|
|
e35411d90f | ||
|
|
be15e48074 | ||
|
|
2be1218aa3 | ||
|
|
c47aebdd2a | ||
|
|
f4d1632506 | ||
|
|
8bfe4374de | ||
|
|
4afe02cb21 | ||
|
|
115b967e5b | ||
|
|
ea4524024a | ||
|
|
4ff6cd9105 | ||
|
|
96c17d8292 | ||
|
|
bc6faaffc4 | ||
|
|
51e9839237 | ||
|
|
6115631746 | ||
|
|
ee005fbc8e | ||
|
|
e27d42935c | ||
|
|
9c99d65716 | ||
|
|
5b9469eed3 | ||
|
|
6805ac915b | ||
|
|
7148cf99f7 | ||
|
|
67a3fb8bf2 | ||
|
|
933b61f99f | ||
|
|
6c5c14f35f | ||
|
|
6a441d5013 | ||
|
|
6b46465c77 | ||
|
|
75388caeed | ||
|
|
2546930a1a | ||
|
|
135e29a3bb | ||
|
|
3b65a58f59 | ||
|
|
49cb931572 | ||
|
|
b7176d2204 | ||
|
|
5bf7d372f6 | ||
|
|
073775e461 | ||
|
|
fbf8f3dc68 | ||
|
|
e8c8cc550b | ||
|
|
87c3790fa8 | ||
|
|
0d9dcb2f4f | ||
|
|
6188185b22 | ||
|
|
f762bd5e25 | ||
|
|
b676264fca | ||
|
|
3640c3b66a | ||
|
|
5087d02fba | ||
|
|
2aa4340551 | ||
|
|
3b34895ae6 | ||
|
|
91cc84c4e6 | ||
|
|
797e53c5ba | ||
|
|
c714a12ad7 | ||
|
|
08ce9b09ec | ||
|
|
3152152ed9 | ||
|
|
544fea51b0 | ||
|
|
08ca9f9378 | ||
|
|
978f68b744 | ||
|
|
680896e4c4 | ||
|
|
975627af2e | ||
|
|
b208102b98 | ||
|
|
88a063434c | ||
|
|
58cc108c0c | ||
|
|
50b37f1366 | ||
|
|
a7b6e35467 | ||
|
|
37d83a4e2e |
4
.gitignore
vendored
4
.gitignore
vendored
@@ -4,7 +4,9 @@ syncthing.exe
|
||||
*.zip
|
||||
*.asc
|
||||
*.sublime*
|
||||
discosrv
|
||||
.jshintrc
|
||||
coverage.out
|
||||
files/pidx
|
||||
bin
|
||||
perfstats*.csv
|
||||
coverage.xml
|
||||
|
||||
20
.travis.yml
20
.travis.yml
@@ -1,20 +0,0 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- tip
|
||||
|
||||
install:
|
||||
- export PATH=$PATH:$HOME/gopath/bin
|
||||
- ./build.sh setup
|
||||
- go get code.google.com/p/go.tools/cmd/cover
|
||||
- go get github.com/mattn/goveralls
|
||||
|
||||
script:
|
||||
- ./build.sh test-cov
|
||||
|
||||
after_success:
|
||||
- goveralls -coverprofile=coverage.out -service=travis-ci -package=calmh/syncthing -repotoken="$COVERALS_TOKEN"
|
||||
|
||||
env:
|
||||
global:
|
||||
secure: "zEV2h2XtKHNLVdXJjM4LA/VjMfLVydm6goF+ARit+nOSGxGoH7f7jIdzJzhxgh7shKG93q61eLO1Tug+WBMYB2EpBuYnTB5AIMYhCDwNI8C4uBV6c3brHfcrie7MASNao8TID2QScASKNFFWvjv/i1Ccn5ztxdcQuhSsNjGZp8A="
|
||||
29
AUTHORS
Normal file
29
AUTHORS
Normal file
@@ -0,0 +1,29 @@
|
||||
# This is the official list of Syncthing authors for copyright purposes.
|
||||
|
||||
Aaron Bieber <qbit@deftly.net>
|
||||
Alexander Graf <register-github@alex-graf.de>
|
||||
Andrew Dunham <andrew@du.nham.ca>
|
||||
Audrius Butkevicius <audrius.butkevicius@gmail.com>
|
||||
Arthur Axel fREW Schmidt <frew@afoolishmanifesto.com> <frioux@gmail.com>
|
||||
Ben Sidhom <bsidhom@gmail.com>
|
||||
Brandon Philips <brandon@ifup.org>
|
||||
Caleb Callaway <enlightened.despot@gmail.com>
|
||||
Chris Joel <chris@scriptolo.gy>
|
||||
Daniel Martí <mvdan@mvdan.cc>
|
||||
Emil Hessman <emil@hessman.se>
|
||||
Felix Ableitner <me@nutomic.com>
|
||||
Felix Unterpaintner <bigbear2nd@gmail.com>
|
||||
Gilli Sigurdsson <gilli@vx.is>
|
||||
Jakob Borg <jakob@nym.se>
|
||||
James Patterson <jamespatterson@operamail.com> <jpjp@users.noreply.github.com>
|
||||
Jens Diemer <github.com@jensdiemer.de> <git@jensdiemer.de>
|
||||
Jochen Voss <voss@seehuhn.de>
|
||||
Lode Hoste <zillode@zillode.be>
|
||||
Marcin Dziadus <dziadus.marcin@gmail.com>
|
||||
Michael Tilli <pyfisch@gmail.com>
|
||||
Philippe Schommers <philippe@schommers.be>
|
||||
Phill Luby <phill.luby@newredo.com>
|
||||
Ryan Sullivan <kayoticsully@gmail.com>
|
||||
Tully Robinson <tully@tojr.org>
|
||||
Veeti Paananen <veeti.paananen@rojekti.fi>
|
||||
Vil Brekin <vilbrekin@gmail.com>
|
||||
@@ -1,14 +1,73 @@
|
||||
Please do contribute! If you want to contribute but are unsure where to
|
||||
start, the [Contributions Needed
|
||||
topic](http://discourse.syncthing.net/t/contributions-needed/49)
|
||||
lists areas in need of attention.
|
||||
## Reporting Bugs
|
||||
|
||||
Please file bugs in the [Github Issue
|
||||
Tracker](https://github.com/syncthing/syncthing/issues). Include at
|
||||
least the following:
|
||||
|
||||
- What happened
|
||||
|
||||
- What did you expect to happen instead of what *did* happen, if it's
|
||||
not crazy obvious
|
||||
|
||||
- What operating system, operating system version and version of
|
||||
Syncthing you are running
|
||||
|
||||
- The same for other connected devices, where relevant
|
||||
|
||||
- Screenshot if the issue concerns something visible in the GUI
|
||||
|
||||
- Console log entries, where possible and relevant
|
||||
|
||||
If you're not sure whether something is relevant, erring on the side of
|
||||
too much information will never get you yelled at. :)
|
||||
|
||||
## Contributing Translations
|
||||
|
||||
All translations are done via
|
||||
[Transifex](https://www.transifex.com/projects/p/syncthing/). If you
|
||||
wish to contribute to a translation, just head over there and sign up.
|
||||
Before every release, the language resources are updated from the
|
||||
latest info on Transifex.
|
||||
|
||||
## Contributing Code
|
||||
|
||||
Every contribution is welcome. If you want to contribute but are unsure
|
||||
where to start, any open issues are fair game! Be prepared for a
|
||||
[certain amount of review](https://discourse.syncthing.net/t/733); it's
|
||||
all in the name of quality. :) Following the points below will make this
|
||||
a smoother process.
|
||||
|
||||
## Coding Style
|
||||
|
||||
- Follow the conventions laid out in [Effective Go](https://golang.org/doc/effective_go.html)
|
||||
as much as makes sense.
|
||||
|
||||
- All text files use Unix line endings.
|
||||
|
||||
- Each commit should be `go fmt` clean.
|
||||
|
||||
- The commit message subject should be a single short sentence
|
||||
describing the change, starting with a capital letter.
|
||||
|
||||
- Commits that resolve an existing issue must include the issue number
|
||||
as `(fixes #123)` at the end of the commit message subject.
|
||||
|
||||
- Imports are grouped per `goimports` standard; that is, standard
|
||||
library first, then third party libraries after a blank line.
|
||||
|
||||
- A contribution solving a single issue or introducing a single new
|
||||
feature should probably be a single commit based on the current
|
||||
`master` branch. You may be asked to "rebase" or "squash" your pull
|
||||
request to make sure this is the case, especially if there have been
|
||||
amendments during review.
|
||||
|
||||
## Licensing
|
||||
|
||||
All contributions are made under the same MIT License as the rest of the
|
||||
project, except documentation which is licensed under the Creative
|
||||
Commons Attribution 4.0 International License. You retain the copyright
|
||||
to code you have written.
|
||||
All contributions are made under the same GPL license as the rest of the
|
||||
project, except documentation, user interface text and translation
|
||||
strings which are licensed under the Creative Commons Attribution 4.0
|
||||
International License. You retain the copyright to code you have
|
||||
written.
|
||||
|
||||
When accepting your first contribution, the maintainer of the project
|
||||
will ensure that you are added to the CONTRIBUTORS file. You are welcome
|
||||
@@ -16,8 +75,8 @@ to add yourself as a separate commit in your first pull request.
|
||||
|
||||
## Building
|
||||
|
||||
[See the
|
||||
documentation](http://discourse.syncthing.net/t/building-syncthing/44)
|
||||
[See the documentation](http://discourse.syncthing.net/t/44) on how to
|
||||
get started with a build environment.
|
||||
|
||||
## Branches
|
||||
|
||||
@@ -44,7 +103,9 @@ Yes please!
|
||||
|
||||
## Style
|
||||
|
||||
`go fmt`
|
||||
- `go fmt`
|
||||
|
||||
- Unix line breaks
|
||||
|
||||
## Documentation
|
||||
|
||||
@@ -52,5 +113,4 @@ Yes please!
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
||||
|
||||
GPLv3
|
||||
|
||||
10
CONTRIBUTORS
10
CONTRIBUTORS
@@ -1,10 +0,0 @@
|
||||
Aaron Bieber <qbit@deftly.net>
|
||||
Andrew Dunham <andrew@du.nham.ca>
|
||||
Arthur Axel fREW Schmidt <frew@afoolishmanifesto.com>
|
||||
Ben Sidhom <bsidhom@gmail.com>
|
||||
Brandon Philips <brandon@ifup.org>
|
||||
James Patterson <jamespatterson@operamail.com>
|
||||
Jens Diemer <github.com@jensdiemer.de>
|
||||
Philippe Schommers <philippe@schommers.be>
|
||||
Ryan Sullivan <kayoticsully@gmail.com>
|
||||
Veeti Paananen <veeti.paananen@rojekti.fi>
|
||||
56
Godeps/Godeps.json
generated
56
Godeps/Godeps.json
generated
@@ -1,45 +1,49 @@
|
||||
{
|
||||
"ImportPath": "github.com/calmh/syncthing",
|
||||
"GoVersion": "go1.3",
|
||||
"ImportPath": "github.com/syncthing/syncthing",
|
||||
"GoVersion": "go1.3.3",
|
||||
"Packages": [
|
||||
"./cmd/syncthing",
|
||||
"./cmd/assets",
|
||||
"./discover/cmd/discosrv"
|
||||
"./cmd/..."
|
||||
],
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "bitbucket.org/kardianos/osext",
|
||||
"Comment": "null-13",
|
||||
"Rev": "5d3ddcf53a508cc2f7404eaebf546ef2cb5cdb6e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "code.google.com/p/go.crypto/bcrypt",
|
||||
"Comment": "null-212",
|
||||
"Rev": "1064b89a6fb591df0dd65422295b8498916b092f"
|
||||
"Comment": "null-216",
|
||||
"Rev": "41cd4647fccc72b0b79ef1bd1fe6735e718257cd"
|
||||
},
|
||||
{
|
||||
"ImportPath": "code.google.com/p/go.crypto/blowfish",
|
||||
"Comment": "null-212",
|
||||
"Rev": "1064b89a6fb591df0dd65422295b8498916b092f"
|
||||
"Comment": "null-216",
|
||||
"Rev": "41cd4647fccc72b0b79ef1bd1fe6735e718257cd"
|
||||
},
|
||||
{
|
||||
"ImportPath": "code.google.com/p/go.text/transform",
|
||||
"Comment": "null-87",
|
||||
"Rev": "c59e4f2f93824f81213799e64c3eead7be24660a"
|
||||
"Comment": "null-90",
|
||||
"Rev": "d65bffbc88a153d23a6d2a864531e6e7c2cde59b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "code.google.com/p/go.text/unicode/norm",
|
||||
"Comment": "null-87",
|
||||
"Rev": "c59e4f2f93824f81213799e64c3eead7be24660a"
|
||||
"Comment": "null-90",
|
||||
"Rev": "d65bffbc88a153d23a6d2a864531e6e7c2cde59b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "code.google.com/p/snappy-go/snappy",
|
||||
"Comment": "null-15",
|
||||
"Rev": "12e4b4183793ac4b061921e7980845e750679fd0"
|
||||
"ImportPath": "github.com/AudriusButkevicius/lfu-go",
|
||||
"Rev": "164bcecceb92fd6037f4d18a8d97b495ec6ef669"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/groupcache/lru",
|
||||
"Rev": "a531d51b7f9f3dd13c1c2b50d42d739b70442dbb"
|
||||
"ImportPath": "github.com/bkaradzic/go-lz4",
|
||||
"Rev": "93a831dcee242be64a9cc9803dda84af25932de7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/calmh/logger",
|
||||
"Rev": "f50d32b313bec2933a3e1049f7416a29f3413d29"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/calmh/osext",
|
||||
"Rev": "9bf61584e5f1f172e8766ddc9022d9c401faaa5e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/calmh/xdr",
|
||||
"Rev": "ec3d404f43731551258977b38dd72cf557d00398"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/juju/ratelimit",
|
||||
@@ -47,7 +51,11 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
|
||||
"Rev": "e1f2d2bdccd7c62f4d4a29aaf081bf1fc4404f91"
|
||||
"Rev": "d8d1d2a5cc2d34c950dffa2f554525415d59f737"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/gosnappy/snappy",
|
||||
"Rev": "ce8acff4829e0c2458a67ead32390ac0a381c862"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vitrun/qart/coding",
|
||||
|
||||
95
Godeps/_workspace/src/code.google.com/p/go.crypto/blowfish/block.go
generated
vendored
95
Godeps/_workspace/src/code.google.com/p/go.crypto/blowfish/block.go
generated
vendored
@@ -4,6 +4,22 @@
|
||||
|
||||
package blowfish
|
||||
|
||||
// getNextWord returns the next big-endian uint32 value from the byte slice
|
||||
// at the given position in a circular manner, updating the position.
|
||||
func getNextWord(b []byte, pos *int) uint32 {
|
||||
var w uint32
|
||||
j := *pos
|
||||
for i := 0; i < 4; i++ {
|
||||
w = w<<8 | uint32(b[j])
|
||||
j++
|
||||
if j >= len(b) {
|
||||
j = 0
|
||||
}
|
||||
}
|
||||
*pos = j
|
||||
return w
|
||||
}
|
||||
|
||||
// ExpandKey performs a key expansion on the given *Cipher. Specifically, it
|
||||
// performs the Blowfish algorithm's key schedule which sets up the *Cipher's
|
||||
// pi and substitution tables for calls to Encrypt. This is used, primarily,
|
||||
@@ -12,6 +28,7 @@ package blowfish
|
||||
func ExpandKey(key []byte, c *Cipher) {
|
||||
j := 0
|
||||
for i := 0; i < 18; i++ {
|
||||
// Using inlined getNextWord for performance.
|
||||
var d uint32
|
||||
for k := 0; k < 4; k++ {
|
||||
d = d<<8 | uint32(key[j])
|
||||
@@ -54,86 +71,44 @@ func ExpandKey(key []byte, c *Cipher) {
|
||||
func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) {
|
||||
j := 0
|
||||
for i := 0; i < 18; i++ {
|
||||
var d uint32
|
||||
for k := 0; k < 4; k++ {
|
||||
d = d<<8 | uint32(key[j])
|
||||
j++
|
||||
if j >= len(key) {
|
||||
j = 0
|
||||
}
|
||||
}
|
||||
c.p[i] ^= d
|
||||
c.p[i] ^= getNextWord(key, &j)
|
||||
}
|
||||
|
||||
j = 0
|
||||
var expandedSalt [4]uint32
|
||||
for i := range expandedSalt {
|
||||
var d uint32
|
||||
for k := 0; k < 4; k++ {
|
||||
d = d<<8 | uint32(salt[j])
|
||||
j++
|
||||
if j >= len(salt) {
|
||||
j = 0
|
||||
}
|
||||
}
|
||||
expandedSalt[i] = d
|
||||
}
|
||||
|
||||
var l, r uint32
|
||||
for i := 0; i < 18; i += 2 {
|
||||
l ^= expandedSalt[i&2]
|
||||
r ^= expandedSalt[(i&2)+1]
|
||||
l ^= getNextWord(salt, &j)
|
||||
r ^= getNextWord(salt, &j)
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.p[i], c.p[i+1] = l, r
|
||||
}
|
||||
|
||||
for i := 0; i < 256; i += 4 {
|
||||
l ^= expandedSalt[2]
|
||||
r ^= expandedSalt[3]
|
||||
for i := 0; i < 256; i += 2 {
|
||||
l ^= getNextWord(salt, &j)
|
||||
r ^= getNextWord(salt, &j)
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.s0[i], c.s0[i+1] = l, r
|
||||
|
||||
l ^= expandedSalt[0]
|
||||
r ^= expandedSalt[1]
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.s0[i+2], c.s0[i+3] = l, r
|
||||
|
||||
}
|
||||
|
||||
for i := 0; i < 256; i += 4 {
|
||||
l ^= expandedSalt[2]
|
||||
r ^= expandedSalt[3]
|
||||
for i := 0; i < 256; i += 2 {
|
||||
l ^= getNextWord(salt, &j)
|
||||
r ^= getNextWord(salt, &j)
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.s1[i], c.s1[i+1] = l, r
|
||||
|
||||
l ^= expandedSalt[0]
|
||||
r ^= expandedSalt[1]
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.s1[i+2], c.s1[i+3] = l, r
|
||||
}
|
||||
|
||||
for i := 0; i < 256; i += 4 {
|
||||
l ^= expandedSalt[2]
|
||||
r ^= expandedSalt[3]
|
||||
for i := 0; i < 256; i += 2 {
|
||||
l ^= getNextWord(salt, &j)
|
||||
r ^= getNextWord(salt, &j)
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.s2[i], c.s2[i+1] = l, r
|
||||
|
||||
l ^= expandedSalt[0]
|
||||
r ^= expandedSalt[1]
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.s2[i+2], c.s2[i+3] = l, r
|
||||
}
|
||||
|
||||
for i := 0; i < 256; i += 4 {
|
||||
l ^= expandedSalt[2]
|
||||
r ^= expandedSalt[3]
|
||||
for i := 0; i < 256; i += 2 {
|
||||
l ^= getNextWord(salt, &j)
|
||||
r ^= getNextWord(salt, &j)
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.s3[i], c.s3[i+1] = l, r
|
||||
|
||||
l ^= expandedSalt[0]
|
||||
r ^= expandedSalt[1]
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.s3[i+2], c.s3[i+3] = l, r
|
||||
}
|
||||
}
|
||||
|
||||
@@ -182,9 +157,3 @@ func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) {
|
||||
xr ^= c.p[0]
|
||||
return xr, xl
|
||||
}
|
||||
|
||||
func zero(x []uint32) {
|
||||
for i := range x {
|
||||
x[i] = 0
|
||||
}
|
||||
}
|
||||
|
||||
76
Godeps/_workspace/src/code.google.com/p/go.crypto/blowfish/blowfish_test.go
generated
vendored
76
Godeps/_workspace/src/code.google.com/p/go.crypto/blowfish/blowfish_test.go
generated
vendored
@@ -4,9 +4,7 @@
|
||||
|
||||
package blowfish
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
import "testing"
|
||||
|
||||
type CryptTest struct {
|
||||
key []byte
|
||||
@@ -202,3 +200,75 @@ func TestSaltedCipherKeyLength(t *testing.T) {
|
||||
t.Errorf("NewSaltedCipher with long key, gave error %#v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Test vectors generated with Blowfish from OpenSSH.
|
||||
var saltedVectors = [][8]byte{
|
||||
{0x0c, 0x82, 0x3b, 0x7b, 0x8d, 0x01, 0x4b, 0x7e},
|
||||
{0xd1, 0xe1, 0x93, 0xf0, 0x70, 0xa6, 0xdb, 0x12},
|
||||
{0xfc, 0x5e, 0xba, 0xde, 0xcb, 0xf8, 0x59, 0xad},
|
||||
{0x8a, 0x0c, 0x76, 0xe7, 0xdd, 0x2c, 0xd3, 0xa8},
|
||||
{0x2c, 0xcb, 0x7b, 0xee, 0xac, 0x7b, 0x7f, 0xf8},
|
||||
{0xbb, 0xf6, 0x30, 0x6f, 0xe1, 0x5d, 0x62, 0xbf},
|
||||
{0x97, 0x1e, 0xc1, 0x3d, 0x3d, 0xe0, 0x11, 0xe9},
|
||||
{0x06, 0xd7, 0x4d, 0xb1, 0x80, 0xa3, 0xb1, 0x38},
|
||||
{0x67, 0xa1, 0xa9, 0x75, 0x0e, 0x5b, 0xc6, 0xb4},
|
||||
{0x51, 0x0f, 0x33, 0x0e, 0x4f, 0x67, 0xd2, 0x0c},
|
||||
{0xf1, 0x73, 0x7e, 0xd8, 0x44, 0xea, 0xdb, 0xe5},
|
||||
{0x14, 0x0e, 0x16, 0xce, 0x7f, 0x4a, 0x9c, 0x7b},
|
||||
{0x4b, 0xfe, 0x43, 0xfd, 0xbf, 0x36, 0x04, 0x47},
|
||||
{0xb1, 0xeb, 0x3e, 0x15, 0x36, 0xa7, 0xbb, 0xe2},
|
||||
{0x6d, 0x0b, 0x41, 0xdd, 0x00, 0x98, 0x0b, 0x19},
|
||||
{0xd3, 0xce, 0x45, 0xce, 0x1d, 0x56, 0xb7, 0xfc},
|
||||
{0xd9, 0xf0, 0xfd, 0xda, 0xc0, 0x23, 0xb7, 0x93},
|
||||
{0x4c, 0x6f, 0xa1, 0xe4, 0x0c, 0xa8, 0xca, 0x57},
|
||||
{0xe6, 0x2f, 0x28, 0xa7, 0x0c, 0x94, 0x0d, 0x08},
|
||||
{0x8f, 0xe3, 0xf0, 0xb6, 0x29, 0xe3, 0x44, 0x03},
|
||||
{0xff, 0x98, 0xdd, 0x04, 0x45, 0xb4, 0x6d, 0x1f},
|
||||
{0x9e, 0x45, 0x4d, 0x18, 0x40, 0x53, 0xdb, 0xef},
|
||||
{0xb7, 0x3b, 0xef, 0x29, 0xbe, 0xa8, 0x13, 0x71},
|
||||
{0x02, 0x54, 0x55, 0x41, 0x8e, 0x04, 0xfc, 0xad},
|
||||
{0x6a, 0x0a, 0xee, 0x7c, 0x10, 0xd9, 0x19, 0xfe},
|
||||
{0x0a, 0x22, 0xd9, 0x41, 0xcc, 0x23, 0x87, 0x13},
|
||||
{0x6e, 0xff, 0x1f, 0xff, 0x36, 0x17, 0x9c, 0xbe},
|
||||
{0x79, 0xad, 0xb7, 0x40, 0xf4, 0x9f, 0x51, 0xa6},
|
||||
{0x97, 0x81, 0x99, 0xa4, 0xde, 0x9e, 0x9f, 0xb6},
|
||||
{0x12, 0x19, 0x7a, 0x28, 0xd0, 0xdc, 0xcc, 0x92},
|
||||
{0x81, 0xda, 0x60, 0x1e, 0x0e, 0xdd, 0x65, 0x56},
|
||||
{0x7d, 0x76, 0x20, 0xb2, 0x73, 0xc9, 0x9e, 0xee},
|
||||
}
|
||||
|
||||
func TestSaltedCipher(t *testing.T) {
|
||||
var key, salt [32]byte
|
||||
for i := range key {
|
||||
key[i] = byte(i)
|
||||
salt[i] = byte(i + 32)
|
||||
}
|
||||
for i, v := range saltedVectors {
|
||||
c, err := NewSaltedCipher(key[:], salt[:i])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var buf [8]byte
|
||||
c.Encrypt(buf[:], buf[:])
|
||||
if v != buf {
|
||||
t.Errorf("%d: expected %x, got %x", i, v, buf)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkExpandKeyWithSalt(b *testing.B) {
|
||||
key := make([]byte, 32)
|
||||
salt := make([]byte, 16)
|
||||
c, _ := NewCipher(key)
|
||||
for i := 0; i < b.N; i++ {
|
||||
expandKeyWithSalt(key, salt, c)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkExpandKey(b *testing.B) {
|
||||
key := make([]byte, 32)
|
||||
c, _ := NewCipher(key)
|
||||
for i := 0; i < b.N; i++ {
|
||||
ExpandKey(key, c)
|
||||
}
|
||||
}
|
||||
|
||||
5
Godeps/_workspace/src/code.google.com/p/go.crypto/blowfish/cipher.go
generated
vendored
5
Godeps/_workspace/src/code.google.com/p/go.crypto/blowfish/cipher.go
generated
vendored
@@ -40,8 +40,11 @@ func NewCipher(key []byte) (*Cipher, error) {
|
||||
// NewSaltedCipher creates a returns a Cipher that folds a salt into its key
|
||||
// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is
|
||||
// sufficient and desirable. For bcrypt compatiblity, the key can be over 56
|
||||
// bytes. Only the first 16 bytes of salt are used.
|
||||
// bytes.
|
||||
func NewSaltedCipher(key, salt []byte) (*Cipher, error) {
|
||||
if len(salt) == 0 {
|
||||
return NewCipher(key)
|
||||
}
|
||||
var result Cipher
|
||||
if k := len(key); k < 1 {
|
||||
return nil, KeySizeError(k)
|
||||
|
||||
146
Godeps/_workspace/src/code.google.com/p/go.text/transform/transform.go
generated
vendored
146
Godeps/_workspace/src/code.google.com/p/go.text/transform/transform.go
generated
vendored
@@ -9,6 +9,7 @@
|
||||
package transform
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"unicode/utf8"
|
||||
@@ -127,7 +128,7 @@ func (r *Reader) Read(p []byte) (int, error) {
|
||||
// cannot read more bytes into src.
|
||||
r.transformComplete = r.err != nil
|
||||
continue
|
||||
case err == ErrShortDst && r.dst1 != 0:
|
||||
case err == ErrShortDst && (r.dst1 != 0 || n != 0):
|
||||
// Make room in dst by copying out, and try again.
|
||||
continue
|
||||
case err == ErrShortSrc && r.src1-r.src0 != len(r.src) && r.err == nil:
|
||||
@@ -210,7 +211,7 @@ func (w *Writer) Write(data []byte) (n int, err error) {
|
||||
n += nSrc
|
||||
}
|
||||
switch {
|
||||
case err == ErrShortDst && nDst > 0:
|
||||
case err == ErrShortDst && (nDst > 0 || nSrc > 0):
|
||||
case err == ErrShortSrc && len(src) < len(w.src):
|
||||
m := copy(w.src, src)
|
||||
// If w.n > 0, bytes from data were already copied to w.src and n
|
||||
@@ -467,30 +468,125 @@ func (t removeF) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err err
|
||||
return
|
||||
}
|
||||
|
||||
// Bytes returns a new byte slice with the result of converting b using t.
|
||||
// If any unrecoverable error occurs it returns nil.
|
||||
func Bytes(t Transformer, b []byte) []byte {
|
||||
out := make([]byte, len(b))
|
||||
n := 0
|
||||
for {
|
||||
nDst, nSrc, err := t.Transform(out[n:], b, true)
|
||||
n += nDst
|
||||
if err == nil {
|
||||
return out[:n]
|
||||
} else if err != ErrShortDst {
|
||||
return nil
|
||||
}
|
||||
b = b[nSrc:]
|
||||
// grow returns a new []byte that is longer than b, and copies the first n bytes
|
||||
// of b to the start of the new slice.
|
||||
func grow(b []byte, n int) []byte {
|
||||
m := len(b)
|
||||
if m <= 256 {
|
||||
m *= 2
|
||||
} else {
|
||||
m += m >> 1
|
||||
}
|
||||
buf := make([]byte, m)
|
||||
copy(buf, b[:n])
|
||||
return buf
|
||||
}
|
||||
|
||||
// Grow the destination buffer.
|
||||
sz := len(out)
|
||||
if sz <= 256 {
|
||||
sz *= 2
|
||||
} else {
|
||||
sz += sz >> 1
|
||||
const initialBufSize = 128
|
||||
|
||||
// String returns a string with the result of converting s[:n] using t, where
|
||||
// n <= len(s). If err == nil, n will be len(s).
|
||||
func String(t Transformer, s string) (result string, n int, err error) {
|
||||
if s == "" {
|
||||
return "", 0, nil
|
||||
}
|
||||
|
||||
// Allocate only once. Note that both dst and src escape when passed to
|
||||
// Transform.
|
||||
buf := [2 * initialBufSize]byte{}
|
||||
dst := buf[:initialBufSize:initialBufSize]
|
||||
src := buf[initialBufSize : 2*initialBufSize]
|
||||
|
||||
// Avoid allocation if the transformed string is identical to the original.
|
||||
// After this loop, pDst will point to the furthest point in s for which it
|
||||
// could be detected that t gives equal results, src[:nSrc] will
|
||||
// indicated the last processed chunk of s for which the output is not equal
|
||||
// and dst[:nDst] will be the transform of this chunk.
|
||||
var nDst, nSrc int
|
||||
pDst := 0 // Used as index in both src and dst in this loop.
|
||||
for {
|
||||
n := copy(src, s[pDst:])
|
||||
nDst, nSrc, err = t.Transform(dst, src[:n], pDst+n == len(s))
|
||||
|
||||
// Note 1: we will not enter the loop with pDst == len(s) and we will
|
||||
// not end the loop with it either. So if nSrc is 0, this means there is
|
||||
// some kind of error from which we cannot recover given the current
|
||||
// buffer sizes. We will give up in this case.
|
||||
// Note 2: it is not entirely correct to simply do a bytes.Equal as
|
||||
// a Transformer may buffer internally. It will work in most cases,
|
||||
// though, and no harm is done if it doesn't work.
|
||||
// TODO: let transformers implement an optional Spanner interface, akin
|
||||
// to norm's QuickSpan. This would even allow us to avoid any allocation.
|
||||
if nSrc == 0 || !bytes.Equal(dst[:nDst], src[:nSrc]) {
|
||||
break
|
||||
}
|
||||
|
||||
if pDst += nDst; pDst == len(s) {
|
||||
return s, pDst, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Move the bytes seen so far to dst.
|
||||
pSrc := pDst + nSrc
|
||||
if pDst+nDst <= initialBufSize {
|
||||
copy(dst[pDst:], dst[:nDst])
|
||||
} else {
|
||||
b := make([]byte, len(s)+nDst-nSrc)
|
||||
copy(b[pDst:], dst[:nDst])
|
||||
dst = b
|
||||
}
|
||||
copy(dst, s[:pDst])
|
||||
pDst += nDst
|
||||
|
||||
if err != nil && err != ErrShortDst && err != ErrShortSrc {
|
||||
return string(dst[:pDst]), pSrc, err
|
||||
}
|
||||
|
||||
// Complete the string with the remainder.
|
||||
for {
|
||||
n := copy(src, s[pSrc:])
|
||||
nDst, nSrc, err = t.Transform(dst[pDst:], src[:n], pSrc+n == len(s))
|
||||
pDst += nDst
|
||||
pSrc += nSrc
|
||||
|
||||
switch err {
|
||||
case nil:
|
||||
if pSrc == len(s) {
|
||||
return string(dst[:pDst]), pSrc, nil
|
||||
}
|
||||
case ErrShortDst:
|
||||
// Do not grow as long as we can make progress. This may avoid
|
||||
// excessive allocations.
|
||||
if nDst == 0 {
|
||||
dst = grow(dst, pDst)
|
||||
}
|
||||
case ErrShortSrc:
|
||||
if nSrc == 0 {
|
||||
src = grow(src, 0)
|
||||
}
|
||||
default:
|
||||
return string(dst[:pDst]), pSrc, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Bytes returns a new byte slice with the result of converting b[:n] using t,
|
||||
// where n <= len(b). If err == nil, n will be len(b).
|
||||
func Bytes(t Transformer, b []byte) (result []byte, n int, err error) {
|
||||
dst := make([]byte, len(b))
|
||||
pDst, pSrc := 0, 0
|
||||
for {
|
||||
nDst, nSrc, err := t.Transform(dst[pDst:], b[pSrc:], true)
|
||||
pDst += nDst
|
||||
pSrc += nSrc
|
||||
if err != ErrShortDst {
|
||||
return dst[:pDst], pSrc, err
|
||||
}
|
||||
|
||||
// Grow the destination buffer, but do not grow as long as we can make
|
||||
// progress. This may avoid excessive allocations.
|
||||
if nDst == 0 {
|
||||
dst = grow(dst, pDst)
|
||||
}
|
||||
out2 := make([]byte, sz)
|
||||
copy(out2, out[:n])
|
||||
out = out2
|
||||
}
|
||||
}
|
||||
|
||||
200
Godeps/_workspace/src/code.google.com/p/go.text/transform/transform_test.go
generated
vendored
200
Godeps/_workspace/src/code.google.com/p/go.text/transform/transform_test.go
generated
vendored
@@ -12,6 +12,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
@@ -132,6 +133,43 @@ func (e rleEncode) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err e
|
||||
return nDst, nSrc, nil
|
||||
}
|
||||
|
||||
// trickler consumes all input bytes, but writes a single byte at a time to dst.
|
||||
type trickler []byte
|
||||
|
||||
func (t *trickler) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
*t = append(*t, src...)
|
||||
if len(*t) == 0 {
|
||||
return 0, 0, nil
|
||||
}
|
||||
if len(dst) == 0 {
|
||||
return 0, len(src), ErrShortDst
|
||||
}
|
||||
dst[0] = (*t)[0]
|
||||
*t = (*t)[1:]
|
||||
if len(*t) > 0 {
|
||||
err = ErrShortDst
|
||||
}
|
||||
return 1, len(src), err
|
||||
}
|
||||
|
||||
// delayedTrickler is like trickler, but delays writing output to dst. This is
|
||||
// highly unlikely to be relevant in practice, but it seems like a good idea
|
||||
// to have some tolerance as long as progress can be detected.
|
||||
type delayedTrickler []byte
|
||||
|
||||
func (t *delayedTrickler) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
if len(*t) > 0 && len(dst) > 0 {
|
||||
dst[0] = (*t)[0]
|
||||
*t = (*t)[1:]
|
||||
nDst = 1
|
||||
}
|
||||
*t = append(*t, src...)
|
||||
if len(*t) > 0 {
|
||||
err = ErrShortDst
|
||||
}
|
||||
return nDst, len(src), err
|
||||
}
|
||||
|
||||
type testCase struct {
|
||||
desc string
|
||||
t Transformer
|
||||
@@ -170,6 +208,15 @@ func (c chain) String() string {
|
||||
}
|
||||
|
||||
var testCases = []testCase{
|
||||
{
|
||||
desc: "empty",
|
||||
t: lowerCaseASCII{},
|
||||
src: "",
|
||||
dstSize: 100,
|
||||
srcSize: 100,
|
||||
wantStr: "",
|
||||
},
|
||||
|
||||
{
|
||||
desc: "basic",
|
||||
t: lowerCaseASCII{},
|
||||
@@ -378,6 +425,24 @@ var testCases = []testCase{
|
||||
ioSize: 10,
|
||||
wantStr: "4a6b2b4c4d1d",
|
||||
},
|
||||
|
||||
{
|
||||
desc: "trickler",
|
||||
t: &trickler{},
|
||||
src: "abcdefghijklm",
|
||||
dstSize: 3,
|
||||
srcSize: 15,
|
||||
wantStr: "abcdefghijklm",
|
||||
},
|
||||
|
||||
{
|
||||
desc: "delayedTrickler",
|
||||
t: &delayedTrickler{},
|
||||
src: "abcdefghijklm",
|
||||
dstSize: 3,
|
||||
srcSize: 15,
|
||||
wantStr: "abcdefghijklm",
|
||||
},
|
||||
}
|
||||
|
||||
func TestReader(t *testing.T) {
|
||||
@@ -685,7 +750,7 @@ func doTransform(tc testCase) (res string, iter int, err error) {
|
||||
switch {
|
||||
case err == nil && len(in) != 0:
|
||||
case err == ErrShortSrc && nSrc > 0:
|
||||
case err == ErrShortDst && nDst > 0:
|
||||
case err == ErrShortDst && (nDst > 0 || nSrc > 0):
|
||||
default:
|
||||
return string(out), iter, err
|
||||
}
|
||||
@@ -875,27 +940,136 @@ func TestRemoveFunc(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBytes(t *testing.T) {
|
||||
func testString(t *testing.T, f func(Transformer, string) (string, int, error)) {
|
||||
for _, tt := range append(testCases, chainTests()...) {
|
||||
if tt.desc == "allowStutter = true" {
|
||||
// We don't have control over the buffer size, so we eliminate tests
|
||||
// that depend on a specific buffer size being set.
|
||||
continue
|
||||
}
|
||||
got := Bytes(tt.t, []byte(tt.src))
|
||||
if tt.wantErr != nil {
|
||||
if tt.wantErr != ErrShortDst && tt.wantErr != ErrShortSrc {
|
||||
// Bytes should return nil for non-recoverable errors.
|
||||
if g, w := (got == nil), (tt.wantErr != nil); g != w {
|
||||
t.Errorf("%s:error: got %v; want %v", tt.desc, g, w)
|
||||
}
|
||||
}
|
||||
// The output strings in the tests that expect an error will
|
||||
// almost certainly not be the same as the result of Bytes.
|
||||
reset(tt.t)
|
||||
if tt.wantErr == ErrShortDst || tt.wantErr == ErrShortSrc {
|
||||
// The result string will be different.
|
||||
continue
|
||||
}
|
||||
if string(got) != tt.wantStr {
|
||||
got, n, err := f(tt.t, tt.src)
|
||||
if tt.wantErr != err {
|
||||
t.Errorf("%s:error: got %v; want %v", tt.desc, err, tt.wantErr)
|
||||
}
|
||||
if got, want := err == nil, n == len(tt.src); got != want {
|
||||
t.Errorf("%s:n: got %v; want %v", tt.desc, got, want)
|
||||
}
|
||||
if got != tt.wantStr {
|
||||
t.Errorf("%s:string: got %q; want %q", tt.desc, got, tt.wantStr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBytes(t *testing.T) {
|
||||
testString(t, func(z Transformer, s string) (string, int, error) {
|
||||
b, n, err := Bytes(z, []byte(s))
|
||||
return string(b), n, err
|
||||
})
|
||||
}
|
||||
|
||||
func TestString(t *testing.T) {
|
||||
testString(t, String)
|
||||
|
||||
// Overrun the internal destination buffer.
|
||||
for i, s := range []string{
|
||||
strings.Repeat("a", initialBufSize-1),
|
||||
strings.Repeat("a", initialBufSize+0),
|
||||
strings.Repeat("a", initialBufSize+1),
|
||||
strings.Repeat("A", initialBufSize-1),
|
||||
strings.Repeat("A", initialBufSize+0),
|
||||
strings.Repeat("A", initialBufSize+1),
|
||||
strings.Repeat("A", 2*initialBufSize-1),
|
||||
strings.Repeat("A", 2*initialBufSize+0),
|
||||
strings.Repeat("A", 2*initialBufSize+1),
|
||||
strings.Repeat("a", initialBufSize-2) + "A",
|
||||
strings.Repeat("a", initialBufSize-1) + "A",
|
||||
strings.Repeat("a", initialBufSize+0) + "A",
|
||||
strings.Repeat("a", initialBufSize+1) + "A",
|
||||
} {
|
||||
got, _, _ := String(lowerCaseASCII{}, s)
|
||||
if want := strings.ToLower(s); got != want {
|
||||
t.Errorf("%d:dst buffer test: got %s (%d); want %s (%d)", i, got, len(got), want, len(want))
|
||||
}
|
||||
}
|
||||
|
||||
// Overrun the internal source buffer.
|
||||
for i, s := range []string{
|
||||
strings.Repeat("a", initialBufSize-1),
|
||||
strings.Repeat("a", initialBufSize+0),
|
||||
strings.Repeat("a", initialBufSize+1),
|
||||
strings.Repeat("a", 2*initialBufSize+1),
|
||||
strings.Repeat("a", 2*initialBufSize+0),
|
||||
strings.Repeat("a", 2*initialBufSize+1),
|
||||
} {
|
||||
got, _, _ := String(rleEncode{}, s)
|
||||
if want := fmt.Sprintf("%da", len(s)); got != want {
|
||||
t.Errorf("%d:src buffer test: got %s (%d); want %s (%d)", i, got, len(got), want, len(want))
|
||||
}
|
||||
}
|
||||
|
||||
// Test allocations for non-changing strings.
|
||||
// Note we still need to allocate a single buffer.
|
||||
for i, s := range []string{
|
||||
"",
|
||||
"123",
|
||||
"123456789",
|
||||
strings.Repeat("a", initialBufSize),
|
||||
strings.Repeat("a", 10*initialBufSize),
|
||||
} {
|
||||
if n := testing.AllocsPerRun(5, func() { String(&lowerCaseASCII{}, s) }); n > 1 {
|
||||
t.Errorf("%d: #allocs was %f; want 1", i, n)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestBytesAllocation tests that buffer growth stays limited with the trickler
|
||||
// transformer, which behaves oddly but within spec. In case buffer growth is
|
||||
// not correctly handled, the test will either panic with a failed allocation or
|
||||
// thrash. To ensure the tests terminate under the last condition, we time out
|
||||
// after some sufficiently long period of time.
|
||||
func TestBytesAllocation(t *testing.T) {
|
||||
done := make(chan bool)
|
||||
go func() {
|
||||
in := bytes.Repeat([]byte{'a'}, 1000)
|
||||
tr := trickler(make([]byte, 1))
|
||||
Bytes(&tr, in)
|
||||
done <- true
|
||||
}()
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(3 * time.Second):
|
||||
t.Error("time out, likely due to excessive allocation")
|
||||
}
|
||||
}
|
||||
|
||||
// TestStringAllocation tests that buffer growth stays limited with the trickler
|
||||
// transformer, which behaves oddly but within spec. In case buffer growth is
|
||||
// not correctly handled, the test will either panic with a failed allocation or
|
||||
// thrash. To ensure the tests terminate under the last condition, we time out
|
||||
// after some sufficiently long period of time.
|
||||
func TestStringAllocation(t *testing.T) {
|
||||
done := make(chan bool)
|
||||
go func() {
|
||||
in := strings.Repeat("a", 1000)
|
||||
tr := trickler(make([]byte, 1))
|
||||
String(&tr, in)
|
||||
done <- true
|
||||
}()
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(3 * time.Second):
|
||||
t.Error("time out, likely due to excessive allocation")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkStringLower(b *testing.B) {
|
||||
in := strings.Repeat("a", 4096)
|
||||
for i := 0; i < b.N; i++ {
|
||||
String(&lowerCaseASCII{}, in)
|
||||
}
|
||||
}
|
||||
|
||||
212
Godeps/_workspace/src/code.google.com/p/go.text/unicode/norm/maketables.go
generated
vendored
212
Godeps/_workspace/src/code.google.com/p/go.text/unicode/norm/maketables.go
generated
vendored
@@ -11,7 +11,6 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
@@ -24,6 +23,8 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"code.google.com/p/go.text/internal/ucd"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -63,31 +64,7 @@ var localFiles = flag.Bool("local",
|
||||
|
||||
var logger = log.New(os.Stderr, "", log.Lshortfile)
|
||||
|
||||
// UnicodeData.txt has form:
|
||||
// 0037;DIGIT SEVEN;Nd;0;EN;;7;7;7;N;;;;;
|
||||
// 007A;LATIN SMALL LETTER Z;Ll;0;L;;;;;N;;;005A;;005A
|
||||
// See http://unicode.org/reports/tr44/ for full explanation
|
||||
// The fields:
|
||||
const (
|
||||
FCodePoint = iota
|
||||
FName
|
||||
FGeneralCategory
|
||||
FCanonicalCombiningClass
|
||||
FBidiClass
|
||||
FDecompMapping
|
||||
FDecimalValue
|
||||
FDigitValue
|
||||
FNumericValue
|
||||
FBidiMirrored
|
||||
FUnicode1Name
|
||||
FISOComment
|
||||
FSimpleUppercaseMapping
|
||||
FSimpleLowercaseMapping
|
||||
FSimpleTitlecaseMapping
|
||||
NumField
|
||||
|
||||
MaxChar = 0x10FFFF // anything above this shouldn't exist
|
||||
)
|
||||
const MaxChar = 0x10FFFF // anything above this shouldn't exist
|
||||
|
||||
// Quick Check properties of runes allow us to quickly
|
||||
// determine whether a rune may occur in a normal form.
|
||||
@@ -232,7 +209,7 @@ func openReader(file string) (input io.ReadCloser) {
|
||||
return
|
||||
}
|
||||
|
||||
func parseDecomposition(s string, skipfirst bool) (a []rune, e error) {
|
||||
func parseDecomposition(s string, skipfirst bool) (a []rune, err error) {
|
||||
decomp := strings.Split(s, " ")
|
||||
if len(decomp) > 0 && skipfirst {
|
||||
decomp = decomp[1:]
|
||||
@@ -247,56 +224,31 @@ func parseDecomposition(s string, skipfirst bool) (a []rune, e error) {
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func parseCharacter(line string) {
|
||||
field := strings.Split(line, ";")
|
||||
if len(field) != NumField {
|
||||
logger.Fatalf("%5s: %d fields (expected %d)\n", line, len(field), NumField)
|
||||
}
|
||||
x, err := strconv.ParseUint(field[FCodePoint], 16, 64)
|
||||
point := int(x)
|
||||
if err != nil {
|
||||
logger.Fatalf("%.5s...: %s", line, err)
|
||||
}
|
||||
if point == 0 {
|
||||
return // not interesting and we use 0 as unset
|
||||
}
|
||||
if point > MaxChar {
|
||||
logger.Fatalf("%5s: Rune %X > MaxChar (%X)", line, point, MaxChar)
|
||||
return
|
||||
}
|
||||
state := SNormal
|
||||
switch {
|
||||
case strings.Index(field[FName], ", First>") > 0:
|
||||
state = SFirst
|
||||
case strings.Index(field[FName], ", Last>") > 0:
|
||||
state = SLast
|
||||
}
|
||||
firstChar := lastChar + 1
|
||||
lastChar = rune(point)
|
||||
if state != SLast {
|
||||
firstChar = lastChar
|
||||
}
|
||||
x, err = strconv.ParseUint(field[FCanonicalCombiningClass], 10, 64)
|
||||
if err != nil {
|
||||
logger.Fatalf("%U: bad ccc field: %s", int(x), err)
|
||||
}
|
||||
ccc := uint8(x)
|
||||
decmap := field[FDecompMapping]
|
||||
exp, e := parseDecomposition(decmap, false)
|
||||
isCompat := false
|
||||
if e != nil {
|
||||
if len(decmap) > 0 {
|
||||
exp, e = parseDecomposition(decmap, true)
|
||||
if e != nil {
|
||||
logger.Fatalf(`%U: bad decomp |%v|: "%s"`, int(x), decmap, e)
|
||||
func loadUnicodeData() {
|
||||
f := openReader("UnicodeData.txt")
|
||||
defer f.Close()
|
||||
p := ucd.New(f)
|
||||
for p.Next() {
|
||||
r := p.Rune(ucd.CodePoint)
|
||||
char := &chars[r]
|
||||
|
||||
char.ccc = uint8(p.Uint(ucd.CanonicalCombiningClass))
|
||||
decmap := p.String(ucd.DecompMapping)
|
||||
|
||||
exp, err := parseDecomposition(decmap, false)
|
||||
isCompat := false
|
||||
if err != nil {
|
||||
if len(decmap) > 0 {
|
||||
exp, err = parseDecomposition(decmap, true)
|
||||
if err != nil {
|
||||
logger.Fatalf(`%U: bad decomp |%v|: "%s"`, r, decmap, err)
|
||||
}
|
||||
isCompat = true
|
||||
}
|
||||
isCompat = true
|
||||
}
|
||||
}
|
||||
for i := firstChar; i <= lastChar; i++ {
|
||||
char := &chars[i]
|
||||
char.name = field[FName]
|
||||
char.codePoint = i
|
||||
|
||||
char.name = p.String(ucd.Name)
|
||||
char.codePoint = r
|
||||
char.forms[FCompatibility].decomp = exp
|
||||
if !isCompat {
|
||||
char.forms[FCanonical].decomp = exp
|
||||
@@ -306,24 +258,9 @@ func parseCharacter(line string) {
|
||||
if len(decmap) > 0 {
|
||||
char.forms[FCompatibility].decomp = exp
|
||||
}
|
||||
char.ccc = ccc
|
||||
char.state = SMissing
|
||||
if i == lastChar {
|
||||
char.state = state
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func loadUnicodeData() {
|
||||
f := openReader("UnicodeData.txt")
|
||||
defer f.Close()
|
||||
scanner := bufio.NewScanner(f)
|
||||
for scanner.Scan() {
|
||||
parseCharacter(scanner.Text())
|
||||
}
|
||||
if scanner.Err() != nil {
|
||||
logger.Fatal(scanner.Err())
|
||||
if err := p.Err(); err != nil {
|
||||
logger.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -354,47 +291,22 @@ func compactCCC() {
|
||||
}
|
||||
}
|
||||
|
||||
var singlePointRe = regexp.MustCompile(`^([0-9A-F]+) *$`)
|
||||
|
||||
// CompositionExclusions.txt has form:
|
||||
// 0958 # ...
|
||||
// See http://unicode.org/reports/tr44/ for full explanation
|
||||
func parseExclusion(line string) int {
|
||||
comment := strings.Index(line, "#")
|
||||
if comment >= 0 {
|
||||
line = line[0:comment]
|
||||
}
|
||||
if len(line) == 0 {
|
||||
return 0
|
||||
}
|
||||
matches := singlePointRe.FindStringSubmatch(line)
|
||||
if len(matches) != 2 {
|
||||
logger.Fatalf("%s: %d matches (expected 1)\n", line, len(matches))
|
||||
}
|
||||
point, err := strconv.ParseUint(matches[1], 16, 64)
|
||||
if err != nil {
|
||||
logger.Fatalf("%.5s...: %s", line, err)
|
||||
}
|
||||
return int(point)
|
||||
}
|
||||
|
||||
func loadCompositionExclusions() {
|
||||
f := openReader("CompositionExclusions.txt")
|
||||
defer f.Close()
|
||||
scanner := bufio.NewScanner(f)
|
||||
for scanner.Scan() {
|
||||
point := parseExclusion(scanner.Text())
|
||||
if point == 0 {
|
||||
continue
|
||||
}
|
||||
c := &chars[point]
|
||||
p := ucd.New(f)
|
||||
for p.Next() {
|
||||
c := &chars[p.Rune(0)]
|
||||
if c.excludeInComp {
|
||||
logger.Fatalf("%U: Duplicate entry in exclusions.", c.codePoint)
|
||||
}
|
||||
c.excludeInComp = true
|
||||
}
|
||||
if scanner.Err() != nil {
|
||||
log.Fatal(scanner.Err())
|
||||
if e := p.Err(); e != nil {
|
||||
logger.Fatal(e)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -988,8 +900,6 @@ func verifyComputed() {
|
||||
}
|
||||
}
|
||||
|
||||
var qcRe = regexp.MustCompile(`([0-9A-F\.]+) *; (NF.*_QC); ([YNM]) #.*`)
|
||||
|
||||
// Use values in DerivedNormalizationProps.txt to compare against the
|
||||
// values we computed.
|
||||
// DerivedNormalizationProps.txt has form:
|
||||
@@ -999,27 +909,13 @@ var qcRe = regexp.MustCompile(`([0-9A-F\.]+) *; (NF.*_QC); ([YNM]) #.*`)
|
||||
func testDerived() {
|
||||
f := openReader("DerivedNormalizationProps.txt")
|
||||
defer f.Close()
|
||||
scanner := bufio.NewScanner(f)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
qc := qcRe.FindStringSubmatch(line)
|
||||
if qc == nil {
|
||||
continue
|
||||
}
|
||||
rng := strings.Split(qc[1], "..")
|
||||
i, err := strconv.ParseUint(rng[0], 16, 64)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
j := i
|
||||
if len(rng) > 1 {
|
||||
j, err = strconv.ParseUint(rng[1], 16, 64)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
p := ucd.New(f)
|
||||
for p.Next() {
|
||||
r := p.Rune(0)
|
||||
c := &chars[r]
|
||||
|
||||
var ftype, mode int
|
||||
qt := strings.TrimSpace(qc[2])
|
||||
qt := p.String(1)
|
||||
switch qt {
|
||||
case "NFC_QC":
|
||||
ftype, mode = FCanonical, MComposed
|
||||
@@ -1030,10 +926,10 @@ func testDerived() {
|
||||
case "NFKD_QC":
|
||||
ftype, mode = FCompatibility, MDecomposed
|
||||
default:
|
||||
log.Fatalf(`Unexpected quick check type "%s"`, qt)
|
||||
continue
|
||||
}
|
||||
var qr QCResult
|
||||
switch qc[3] {
|
||||
switch p.String(2) {
|
||||
case "Y":
|
||||
qr = QCYes
|
||||
case "N":
|
||||
@@ -1041,27 +937,15 @@ func testDerived() {
|
||||
case "M":
|
||||
qr = QCMaybe
|
||||
default:
|
||||
log.Fatalf(`Unexpected quick check value "%s"`, qc[3])
|
||||
log.Fatalf(`Unexpected quick check value "%s"`, p.String(2))
|
||||
}
|
||||
var lastFailed bool
|
||||
// Verify current
|
||||
for ; i <= j; i++ {
|
||||
c := &chars[int(i)]
|
||||
c.forms[ftype].verified[mode] = true
|
||||
curqr := c.forms[ftype].quickCheck[mode]
|
||||
if curqr != qr {
|
||||
if !lastFailed {
|
||||
logger.Printf("%s: %.4X..%.4X -- %s\n",
|
||||
qt, int(i), int(j), line[0:50])
|
||||
}
|
||||
logger.Printf("%U: FAILED %s (was %v need %v)\n",
|
||||
int(i), qt, curqr, qr)
|
||||
lastFailed = true
|
||||
}
|
||||
if got := c.forms[ftype].quickCheck[mode]; got != qr {
|
||||
logger.Printf("%U: FAILED %s (was %v need %v)\n", r, qt, got, qr)
|
||||
}
|
||||
c.forms[ftype].verified[mode] = true
|
||||
}
|
||||
if scanner.Err() != nil {
|
||||
logger.Fatal(scanner.Err())
|
||||
if err := p.Err(); err != nil {
|
||||
logger.Fatal(err)
|
||||
}
|
||||
// Any unspecified value must be QCYes. Verify this.
|
||||
for i, c := range chars {
|
||||
|
||||
19
Godeps/_workspace/src/github.com/AudriusButkevicius/lfu-go/LICENSE
generated
vendored
Normal file
19
Godeps/_workspace/src/github.com/AudriusButkevicius/lfu-go/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
Copyright (C) 2012 Dave Grijalva
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a
|
||||
copy of this software and associated documentation files (the "Software"),
|
||||
to deal in the Software without restriction, including without limitation
|
||||
the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
and/or sell copies of the Software, and to permit persons to whom the
|
||||
Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included
|
||||
in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
19
Godeps/_workspace/src/github.com/AudriusButkevicius/lfu-go/README.md
generated
vendored
Normal file
19
Godeps/_workspace/src/github.com/AudriusButkevicius/lfu-go/README.md
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
A simple LFU cache for golang. Based on the paper [An O(1) algorithm for implementing the LFU cache eviction scheme](http://dhruvbird.com/lfu.pdf).
|
||||
|
||||
Usage:
|
||||
|
||||
```go
|
||||
import "github.com/dgrijalva/lfu-go"
|
||||
|
||||
// Make a new thing
|
||||
c := lfu.New()
|
||||
|
||||
// Set some values
|
||||
c.Set("myKey", myValue)
|
||||
|
||||
// Retrieve some values
|
||||
myValue = c.Get("myKey")
|
||||
|
||||
// Evict some values
|
||||
c.Evict(1)
|
||||
```
|
||||
156
Godeps/_workspace/src/github.com/AudriusButkevicius/lfu-go/lfu.go
generated
vendored
Normal file
156
Godeps/_workspace/src/github.com/AudriusButkevicius/lfu-go/lfu.go
generated
vendored
Normal file
@@ -0,0 +1,156 @@
|
||||
package lfu
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type Eviction struct {
|
||||
Key string
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
type Cache struct {
|
||||
// If len > UpperBound, cache will automatically evict
|
||||
// down to LowerBound. If either value is 0, this behavior
|
||||
// is disabled.
|
||||
UpperBound int
|
||||
LowerBound int
|
||||
values map[string]*cacheEntry
|
||||
freqs *list.List
|
||||
len int
|
||||
lock *sync.Mutex
|
||||
EvictionChannel chan<- Eviction
|
||||
}
|
||||
|
||||
type cacheEntry struct {
|
||||
key string
|
||||
value interface{}
|
||||
freqNode *list.Element
|
||||
}
|
||||
|
||||
type listEntry struct {
|
||||
entries map[*cacheEntry]byte
|
||||
freq int
|
||||
}
|
||||
|
||||
func New() *Cache {
|
||||
c := new(Cache)
|
||||
c.values = make(map[string]*cacheEntry)
|
||||
c.freqs = list.New()
|
||||
c.lock = new(sync.Mutex)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Cache) Get(key string) interface{} {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if e, ok := c.values[key]; ok {
|
||||
c.increment(e)
|
||||
return e.value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cache) Set(key string, value interface{}) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if e, ok := c.values[key]; ok {
|
||||
// value already exists for key. overwrite
|
||||
e.value = value
|
||||
c.increment(e)
|
||||
} else {
|
||||
// value doesn't exist. insert
|
||||
e := new(cacheEntry)
|
||||
e.key = key
|
||||
e.value = value
|
||||
c.values[key] = e
|
||||
c.increment(e)
|
||||
c.len++
|
||||
// bounds mgmt
|
||||
if c.UpperBound > 0 && c.LowerBound > 0 {
|
||||
if c.len > c.UpperBound {
|
||||
c.evict(c.len - c.LowerBound)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache) Len() int {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
return c.len
|
||||
}
|
||||
|
||||
func (c *Cache) Evict(count int) int {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
return c.evict(count)
|
||||
}
|
||||
|
||||
func (c *Cache) evict(count int) int {
|
||||
// No lock here so it can be called
|
||||
// from within the lock (during Set)
|
||||
var evicted int
|
||||
for i := 0; i < count; {
|
||||
if place := c.freqs.Front(); place != nil {
|
||||
for entry, _ := range place.Value.(*listEntry).entries {
|
||||
if i < count {
|
||||
if c.EvictionChannel != nil {
|
||||
c.EvictionChannel <- Eviction{
|
||||
Key: entry.key,
|
||||
Value: entry.value,
|
||||
}
|
||||
}
|
||||
delete(c.values, entry.key)
|
||||
c.remEntry(place, entry)
|
||||
evicted++
|
||||
c.len--
|
||||
i++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return evicted
|
||||
}
|
||||
|
||||
func (c *Cache) increment(e *cacheEntry) {
|
||||
currentPlace := e.freqNode
|
||||
var nextFreq int
|
||||
var nextPlace *list.Element
|
||||
if currentPlace == nil {
|
||||
// new entry
|
||||
nextFreq = 1
|
||||
nextPlace = c.freqs.Front()
|
||||
} else {
|
||||
// move up
|
||||
nextFreq = currentPlace.Value.(*listEntry).freq + 1
|
||||
nextPlace = currentPlace.Next()
|
||||
}
|
||||
|
||||
if nextPlace == nil || nextPlace.Value.(*listEntry).freq != nextFreq {
|
||||
// create a new list entry
|
||||
li := new(listEntry)
|
||||
li.freq = nextFreq
|
||||
li.entries = make(map[*cacheEntry]byte)
|
||||
if currentPlace != nil {
|
||||
nextPlace = c.freqs.InsertAfter(li, currentPlace)
|
||||
} else {
|
||||
nextPlace = c.freqs.PushFront(li)
|
||||
}
|
||||
}
|
||||
e.freqNode = nextPlace
|
||||
nextPlace.Value.(*listEntry).entries[e] = 1
|
||||
if currentPlace != nil {
|
||||
// remove from current position
|
||||
c.remEntry(currentPlace, e)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache) remEntry(place *list.Element, entry *cacheEntry) {
|
||||
entries := place.Value.(*listEntry).entries
|
||||
delete(entries, entry)
|
||||
if len(entries) == 0 {
|
||||
c.freqs.Remove(place)
|
||||
}
|
||||
}
|
||||
68
Godeps/_workspace/src/github.com/AudriusButkevicius/lfu-go/lfu_test.go
generated
vendored
Normal file
68
Godeps/_workspace/src/github.com/AudriusButkevicius/lfu-go/lfu_test.go
generated
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
package lfu
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestLFU(t *testing.T) {
|
||||
c := New()
|
||||
c.Set("a", "a")
|
||||
if v := c.Get("a"); v != "a" {
|
||||
t.Errorf("Value was not saved: %v != 'a'", v)
|
||||
}
|
||||
if l := c.Len(); l != 1 {
|
||||
t.Errorf("Length was not updated: %v != 1", l)
|
||||
}
|
||||
|
||||
c.Set("b", "b")
|
||||
if v := c.Get("b"); v != "b" {
|
||||
t.Errorf("Value was not saved: %v != 'b'", v)
|
||||
}
|
||||
if l := c.Len(); l != 2 {
|
||||
t.Errorf("Length was not updated: %v != 2", l)
|
||||
}
|
||||
|
||||
c.Get("a")
|
||||
evicted := c.Evict(1)
|
||||
if v := c.Get("a"); v != "a" {
|
||||
t.Errorf("Value was improperly evicted: %v != 'a'", v)
|
||||
}
|
||||
if v := c.Get("b"); v != nil {
|
||||
t.Errorf("Value was not evicted: %v", v)
|
||||
}
|
||||
if l := c.Len(); l != 1 {
|
||||
t.Errorf("Length was not updated: %v != 1", l)
|
||||
}
|
||||
if evicted != 1 {
|
||||
t.Errorf("Number of evicted items is wrong: %v != 1", evicted)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBoundsMgmt(t *testing.T) {
|
||||
c := New()
|
||||
c.UpperBound = 10
|
||||
c.LowerBound = 5
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
c.Set(fmt.Sprintf("%v", i), i)
|
||||
}
|
||||
if c.Len() > 10 {
|
||||
t.Errorf("Bounds management failed to evict properly: %v", c.Len())
|
||||
}
|
||||
}
|
||||
|
||||
func TestEviction(t *testing.T) {
|
||||
ch := make(chan Eviction, 1)
|
||||
|
||||
c := New()
|
||||
c.EvictionChannel = ch
|
||||
c.Set("a", "b")
|
||||
c.Evict(1)
|
||||
|
||||
ev := <-ch
|
||||
|
||||
if ev.Key != "a" || ev.Value.(string) != "b" {
|
||||
t.Error("Incorrect item")
|
||||
}
|
||||
}
|
||||
1
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/.gitignore
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/.gitignore
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
/lz4-example/lz4-example
|
||||
7
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/.travis.yml
generated
vendored
Normal file
7
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.1
|
||||
- 1.2
|
||||
- 1.3
|
||||
- tip
|
||||
24
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/LICENSE
generated
vendored
Normal file
24
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
Copyright 2011-2012 Branimir Karadzic. All rights reserved.
|
||||
Copyright 2013 Damian Gryski. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||
THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
71
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/README.md
generated
vendored
Normal file
71
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/README.md
generated
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
go-lz4
|
||||
======
|
||||
|
||||
go-lz4 is port of LZ4 lossless compression algorithm to Go. The original C code
|
||||
is located at:
|
||||
|
||||
https://code.google.com/p/lz4/
|
||||
|
||||
Status
|
||||
------
|
||||
[](http://travis-ci.org/bkaradzic/go-lz4)
|
||||
[](https://godoc.org/github.com/bkaradzic/go-lz4)
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
go get github.com/bkaradzic/go-lz4
|
||||
|
||||
import "github.com/bkaradzic/go-lz4"
|
||||
|
||||
The package name is `lz4`
|
||||
|
||||
Notes
|
||||
-----
|
||||
|
||||
* go-lz4 saves a uint32 with the original uncompressed length at the beginning
|
||||
of the encoded buffer. They may get in the way of interoperability with
|
||||
other implementations.
|
||||
|
||||
Contributors
|
||||
------------
|
||||
|
||||
Damian Gryski ([@dgryski](https://github.com/dgryski))
|
||||
Dustin Sallings ([@dustin](https://github.com/dustin))
|
||||
|
||||
Contact
|
||||
-------
|
||||
|
||||
[@bkaradzic](https://twitter.com/bkaradzic)
|
||||
http://www.stuckingeometry.com
|
||||
|
||||
Project page
|
||||
https://github.com/bkaradzic/go-lz4
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
Copyright 2011-2012 Branimir Karadzic. All rights reserved.
|
||||
Copyright 2013 Damian Gryski. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||
THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
74
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/fuzzer/main.go
generated
vendored
Normal file
74
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/fuzzer/main.go
generated
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
|
||||
"github.com/bkaradzic/go-lz4"
|
||||
|
||||
// lz4's API matches snappy's, so we can easily see how it performs
|
||||
// lz4 "code.google.com/p/snappy-go/snappy"
|
||||
)
|
||||
|
||||
var input = `
|
||||
ADVENTURE I. A SCANDAL IN BOHEMIA
|
||||
|
||||
I.
|
||||
|
||||
To Sherlock Holmes she is always THE woman. I have seldom heard
|
||||
him mention her under any other name. In his eyes she eclipses
|
||||
and predominates the whole of her sex. It was not that he felt
|
||||
any emotion akin to love for Irene Adler. All emotions, and that
|
||||
one particularly, were abhorrent to his cold, precise but
|
||||
admirably balanced mind. He was, I take it, the most perfect
|
||||
reasoning and observing machine that the world has seen, but as a
|
||||
lover he would have placed himself in a false position. He never
|
||||
spoke of the softer passions, save with a gibe and a sneer. They
|
||||
were admirable things for the observer--excellent for drawing the
|
||||
veil from men's motives and actions. But for the trained reasoner
|
||||
to admit such intrusions into his own delicate and finely
|
||||
adjusted temperament was to introduce a distracting factor which
|
||||
might throw a doubt upon all his mental results. Grit in a
|
||||
sensitive instrument, or a crack in one of his own high-power
|
||||
lenses, would not be more disturbing than a strong emotion in a
|
||||
nature such as his. And yet there was but one woman to him, and
|
||||
that woman was the late Irene Adler, of dubious and questionable
|
||||
memory.
|
||||
|
||||
I had seen little of Holmes lately. My marriage had drifted us
|
||||
away from each other. My own complete happiness, and the
|
||||
home-centred interests which rise up around the man who first
|
||||
finds himself master of his own establishment, were sufficient to
|
||||
absorb all my attention, while Holmes, who loathed every form of
|
||||
society with his whole Bohemian soul, remained in our lodgings in
|
||||
Baker Street, buried among his old books, and alternating from
|
||||
week to week between cocaine and ambition, the drowsiness of the
|
||||
drug, and the fierce energy of his own keen nature. He was still,
|
||||
as ever, deeply attracted by the study of crime, and occupied his
|
||||
immense faculties and extraordinary powers of observation in
|
||||
following out those clues, and clearing up those mysteries which
|
||||
had been abandoned as hopeless by the official police. From time
|
||||
to time I heard some vague account of his doings: of his summons
|
||||
to Odessa in the case of the Trepoff murder, of his clearing up
|
||||
of the singular tragedy of the Atkinson brothers at Trincomalee,
|
||||
and finally of the mission which he had accomplished so
|
||||
delicately and successfully for the reigning family of Holland.
|
||||
Beyond these signs of his activity, however, which I merely
|
||||
shared with all the readers of the daily press, I knew little of
|
||||
my former friend and companion.
|
||||
`
|
||||
|
||||
func main() {
|
||||
|
||||
compressed, _ := lz4.Encode(nil, []byte(input))
|
||||
|
||||
modified := make([]byte, len(compressed))
|
||||
|
||||
for {
|
||||
copy(modified, compressed)
|
||||
for i := 0; i < 100; i++ {
|
||||
modified[rand.Intn(len(compressed)-4)+4] = byte(rand.Intn(256))
|
||||
}
|
||||
lz4.Decode(nil, modified)
|
||||
}
|
||||
|
||||
}
|
||||
94
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/lz4-example/main.go
generated
vendored
Normal file
94
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/lz4-example/main.go
generated
vendored
Normal file
@@ -0,0 +1,94 @@
|
||||
/*
|
||||
* Copyright 2011 Branimir Karadzic. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice, this
|
||||
* list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
* SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||
* THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"runtime/pprof"
|
||||
|
||||
lz4 "github.com/bkaradzic/go-lz4"
|
||||
)
|
||||
|
||||
var (
|
||||
decompress = flag.Bool("d", false, "decompress")
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
||||
var optCPUProfile = flag.String("cpuprofile", "", "profile")
|
||||
flag.Parse()
|
||||
|
||||
if *optCPUProfile != "" {
|
||||
f, err := os.Create(*optCPUProfile)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
pprof.StartCPUProfile(f)
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
|
||||
args := flag.Args()
|
||||
|
||||
var data []byte
|
||||
|
||||
if len(args) < 2 {
|
||||
fmt.Print("Usage: lz4 [-d] <input> <output>\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
input, err := os.OpenFile(args[0], os.O_RDONLY, 0644)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to open input file %s\n", args[0])
|
||||
os.Exit(1)
|
||||
}
|
||||
defer input.Close()
|
||||
|
||||
if *decompress {
|
||||
data, _ = ioutil.ReadAll(input)
|
||||
data, err = lz4.Decode(nil, data)
|
||||
if err != nil {
|
||||
fmt.Println("Failed to decode:", err)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
data, _ = ioutil.ReadAll(input)
|
||||
data, err = lz4.Encode(nil, data)
|
||||
if err != nil {
|
||||
fmt.Println("Failed to encode:", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(args[1], data, 0644)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to open output file %s\n", args[1])
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
63
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/lz4_test.go
generated
vendored
Normal file
63
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/lz4_test.go
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
package lz4
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var testfile, _ = ioutil.ReadFile("testdata/pg1661.txt")
|
||||
|
||||
func roundtrip(t *testing.T, input []byte) {
|
||||
|
||||
dst, err := Encode(nil, input)
|
||||
if err != nil {
|
||||
t.Errorf("got error during compression: %s", err)
|
||||
}
|
||||
|
||||
output, err := Decode(nil, dst)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("got error during decompress: %s", err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(output, input) {
|
||||
t.Errorf("roundtrip failed")
|
||||
}
|
||||
}
|
||||
|
||||
func TestEmpty(t *testing.T) {
|
||||
roundtrip(t, nil)
|
||||
}
|
||||
|
||||
func TestLengths(t *testing.T) {
|
||||
|
||||
for i := 0; i < 1024; i++ {
|
||||
roundtrip(t, testfile[:i])
|
||||
}
|
||||
|
||||
for i := 1024; i < 4096; i += 23 {
|
||||
roundtrip(t, testfile[:i])
|
||||
}
|
||||
}
|
||||
|
||||
func TestWords(t *testing.T) {
|
||||
roundtrip(t, testfile)
|
||||
}
|
||||
|
||||
func BenchmarkLZ4Encode(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
Encode(nil, testfile)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLZ4Decode(b *testing.B) {
|
||||
|
||||
var compressed, _ = Encode(nil, testfile)
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
Decode(nil, compressed)
|
||||
}
|
||||
}
|
||||
194
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/reader.go
generated
vendored
Normal file
194
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/reader.go
generated
vendored
Normal file
@@ -0,0 +1,194 @@
|
||||
/*
|
||||
* Copyright 2011-2012 Branimir Karadzic. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice, this
|
||||
* list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
* SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||
* THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package lz4
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrCorrupt indicates the input was corrupt
|
||||
ErrCorrupt = errors.New("corrupt input")
|
||||
)
|
||||
|
||||
const (
|
||||
mlBits = 4
|
||||
mlMask = (1 << mlBits) - 1
|
||||
runBits = 8 - mlBits
|
||||
runMask = (1 << runBits) - 1
|
||||
)
|
||||
|
||||
type decoder struct {
|
||||
src []byte
|
||||
dst []byte
|
||||
spos uint32
|
||||
dpos uint32
|
||||
ref uint32
|
||||
}
|
||||
|
||||
func (d *decoder) readByte() (uint8, error) {
|
||||
if int(d.spos) == len(d.src) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
b := d.src[d.spos]
|
||||
d.spos++
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (d *decoder) getLen() (uint32, error) {
|
||||
|
||||
length := uint32(0)
|
||||
ln, err := d.readByte()
|
||||
if err != nil {
|
||||
return 0, ErrCorrupt
|
||||
}
|
||||
for ln == 255 {
|
||||
length += 255
|
||||
ln, err = d.readByte()
|
||||
if err != nil {
|
||||
return 0, ErrCorrupt
|
||||
}
|
||||
}
|
||||
length += uint32(ln)
|
||||
|
||||
return length, nil
|
||||
}
|
||||
|
||||
func (d *decoder) cp(length, decr uint32) {
|
||||
|
||||
if int(d.ref+length) < int(d.dpos) {
|
||||
copy(d.dst[d.dpos:], d.dst[d.ref:d.ref+length])
|
||||
} else {
|
||||
for ii := uint32(0); ii < length; ii++ {
|
||||
d.dst[d.dpos+ii] = d.dst[d.ref+ii]
|
||||
}
|
||||
}
|
||||
d.dpos += length
|
||||
d.ref += length - decr
|
||||
}
|
||||
|
||||
func (d *decoder) finish(err error) error {
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Decode returns the decoded form of src. The returned slice may be a
|
||||
// subslice of dst if it was large enough to hold the entire decoded block.
|
||||
func Decode(dst, src []byte) ([]byte, error) {
|
||||
|
||||
if len(src) < 4 {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
uncompressedLen := binary.LittleEndian.Uint32(src)
|
||||
|
||||
if uncompressedLen == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if uncompressedLen > MaxInputSize {
|
||||
return nil, ErrTooLarge
|
||||
}
|
||||
|
||||
if dst == nil || len(dst) < int(uncompressedLen) {
|
||||
dst = make([]byte, uncompressedLen)
|
||||
}
|
||||
|
||||
d := decoder{src: src, dst: dst[:uncompressedLen], spos: 4}
|
||||
|
||||
decr := []uint32{0, 3, 2, 3}
|
||||
|
||||
for {
|
||||
code, err := d.readByte()
|
||||
if err != nil {
|
||||
return d.dst, d.finish(err)
|
||||
}
|
||||
|
||||
length := uint32(code >> mlBits)
|
||||
if length == runMask {
|
||||
ln, err := d.getLen()
|
||||
if err != nil {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
length += ln
|
||||
}
|
||||
|
||||
if int(d.spos+length) > len(d.src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
for ii := uint32(0); ii < length; ii++ {
|
||||
d.dst[d.dpos+ii] = d.src[d.spos+ii]
|
||||
}
|
||||
|
||||
d.spos += length
|
||||
d.dpos += length
|
||||
|
||||
if int(d.spos) == len(d.src) {
|
||||
return d.dst, nil
|
||||
}
|
||||
|
||||
if int(d.spos+2) >= len(d.src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
back := uint32(d.src[d.spos]) | uint32(d.src[d.spos+1])<<8
|
||||
|
||||
if back > d.dpos {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
d.spos += 2
|
||||
d.ref = d.dpos - back
|
||||
|
||||
length = uint32(code & mlMask)
|
||||
if length == mlMask {
|
||||
ln, err := d.getLen()
|
||||
if err != nil {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
length += ln
|
||||
}
|
||||
|
||||
literal := d.dpos - d.ref
|
||||
if literal < 4 {
|
||||
d.cp(4, decr[literal])
|
||||
} else {
|
||||
length += 4
|
||||
}
|
||||
|
||||
if d.dpos+length > uncompressedLen {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
d.cp(length, 0)
|
||||
}
|
||||
}
|
||||
13052
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/testdata/pg1661.txt
generated
vendored
Normal file
13052
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/testdata/pg1661.txt
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
188
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/writer.go
generated
vendored
Normal file
188
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/writer.go
generated
vendored
Normal file
@@ -0,0 +1,188 @@
|
||||
/*
|
||||
* Copyright 2011-2012 Branimir Karadzic. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice, this
|
||||
* list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
* SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||
* THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package lz4
|
||||
|
||||
import "encoding/binary"
|
||||
import "errors"
|
||||
|
||||
const (
|
||||
minMatch = 4
|
||||
hashLog = 17
|
||||
hashTableSize = 1 << hashLog
|
||||
hashShift = (minMatch * 8) - hashLog
|
||||
incompressible uint32 = 128
|
||||
uninitHash = 0x88888888
|
||||
|
||||
// MaxInputSize is the largest buffer than can be compressed in a single block
|
||||
MaxInputSize = 0x7E000000
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrTooLarge indicates the input buffer was too large
|
||||
ErrTooLarge = errors.New("input too large")
|
||||
)
|
||||
|
||||
type encoder struct {
|
||||
src []byte
|
||||
dst []byte
|
||||
hashTable []uint32
|
||||
pos uint32
|
||||
anchor uint32
|
||||
dpos uint32
|
||||
}
|
||||
|
||||
// CompressBound returns the maximum length of a lz4 block, given it's uncompressed length
|
||||
func CompressBound(isize int) int {
|
||||
if isize > MaxInputSize {
|
||||
return 0
|
||||
}
|
||||
return isize + ((isize) / 255) + 16 + 4
|
||||
}
|
||||
|
||||
func (e *encoder) writeLiterals(length, mlLen, pos uint32) {
|
||||
|
||||
ln := length
|
||||
|
||||
var code byte
|
||||
if ln > runMask-1 {
|
||||
code = runMask
|
||||
} else {
|
||||
code = byte(ln)
|
||||
}
|
||||
|
||||
if mlLen > mlMask-1 {
|
||||
e.dst[e.dpos] = (code << mlBits) + byte(mlMask)
|
||||
} else {
|
||||
e.dst[e.dpos] = (code << mlBits) + byte(mlLen)
|
||||
}
|
||||
e.dpos++
|
||||
|
||||
if code == runMask {
|
||||
ln -= runMask
|
||||
for ; ln > 254; ln -= 255 {
|
||||
e.dst[e.dpos] = 255
|
||||
e.dpos++
|
||||
}
|
||||
|
||||
e.dst[e.dpos] = byte(ln)
|
||||
e.dpos++
|
||||
}
|
||||
|
||||
for ii := uint32(0); ii < length; ii++ {
|
||||
e.dst[e.dpos+ii] = e.src[pos+ii]
|
||||
}
|
||||
|
||||
e.dpos += length
|
||||
}
|
||||
|
||||
// Encode returns the encoded form of src. The returned array may be a
|
||||
// sub-slice of dst if it was large enough to hold the entire output.
|
||||
func Encode(dst, src []byte) ([]byte, error) {
|
||||
|
||||
if len(src) >= MaxInputSize {
|
||||
return nil, ErrTooLarge
|
||||
}
|
||||
|
||||
if n := CompressBound(len(src)); len(dst) < n {
|
||||
dst = make([]byte, n)
|
||||
}
|
||||
|
||||
e := encoder{src: src, dst: dst, hashTable: make([]uint32, hashTableSize)}
|
||||
|
||||
binary.LittleEndian.PutUint32(dst, uint32(len(src)))
|
||||
e.dpos = 4
|
||||
|
||||
var (
|
||||
step uint32 = 1
|
||||
limit = incompressible
|
||||
)
|
||||
|
||||
for {
|
||||
if int(e.pos)+12 >= len(e.src) {
|
||||
e.writeLiterals(uint32(len(e.src))-e.anchor, 0, e.anchor)
|
||||
return e.dst[:e.dpos], nil
|
||||
}
|
||||
|
||||
sequence := uint32(e.src[e.pos+3])<<24 | uint32(e.src[e.pos+2])<<16 | uint32(e.src[e.pos+1])<<8 | uint32(e.src[e.pos+0])
|
||||
|
||||
hash := (sequence * 2654435761) >> hashShift
|
||||
ref := e.hashTable[hash] + uninitHash
|
||||
e.hashTable[hash] = e.pos - uninitHash
|
||||
|
||||
if ((e.pos-ref)>>16) != 0 || uint32(e.src[ref+3])<<24|uint32(e.src[ref+2])<<16|uint32(e.src[ref+1])<<8|uint32(e.src[ref+0]) != sequence {
|
||||
if e.pos-e.anchor > limit {
|
||||
limit <<= 1
|
||||
step += 1 + (step >> 2)
|
||||
}
|
||||
e.pos += step
|
||||
continue
|
||||
}
|
||||
|
||||
if step > 1 {
|
||||
e.hashTable[hash] = ref - uninitHash
|
||||
e.pos -= step - 1
|
||||
step = 1
|
||||
continue
|
||||
}
|
||||
limit = incompressible
|
||||
|
||||
ln := e.pos - e.anchor
|
||||
back := e.pos - ref
|
||||
|
||||
anchor := e.anchor
|
||||
|
||||
e.pos += minMatch
|
||||
ref += minMatch
|
||||
e.anchor = e.pos
|
||||
|
||||
for int(e.pos) < len(e.src)-5 && e.src[e.pos] == e.src[ref] {
|
||||
e.pos++
|
||||
ref++
|
||||
}
|
||||
|
||||
mlLen := e.pos - e.anchor
|
||||
|
||||
e.writeLiterals(ln, mlLen, anchor)
|
||||
e.dst[e.dpos] = uint8(back)
|
||||
e.dst[e.dpos+1] = uint8(back >> 8)
|
||||
e.dpos += 2
|
||||
|
||||
if mlLen > mlMask-1 {
|
||||
mlLen -= mlMask
|
||||
for mlLen > 254 {
|
||||
mlLen -= 255
|
||||
|
||||
e.dst[e.dpos] = 255
|
||||
e.dpos++
|
||||
}
|
||||
|
||||
e.dst[e.dpos] = byte(mlLen)
|
||||
e.dpos++
|
||||
}
|
||||
|
||||
e.anchor = e.pos
|
||||
}
|
||||
}
|
||||
19
Godeps/_workspace/src/github.com/calmh/logger/LICENSE
generated
vendored
Normal file
19
Godeps/_workspace/src/github.com/calmh/logger/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
Copyright (C) 2013 Jakob Borg
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
- The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
15
Godeps/_workspace/src/github.com/calmh/logger/README.md
generated
vendored
Normal file
15
Godeps/_workspace/src/github.com/calmh/logger/README.md
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
logger
|
||||
======
|
||||
|
||||
A small wrapper around `log` to provide log levels.
|
||||
|
||||
Documentation
|
||||
-------------
|
||||
|
||||
http://godoc.org/github.com/calmh/logger
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
MIT
|
||||
|
||||
42
logger/logger.go → Godeps/_workspace/src/github.com/calmh/logger/logger.go
generated
vendored
42
logger/logger.go → Godeps/_workspace/src/github.com/calmh/logger/logger.go
generated
vendored
@@ -1,6 +1,5 @@
|
||||
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
// All rights reserved. Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
// Package logger implements a standardized logger with callback functionality
|
||||
package logger
|
||||
@@ -9,6 +8,7 @@ import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
@@ -23,6 +23,7 @@ const (
|
||||
NumLevels
|
||||
)
|
||||
|
||||
// A MessageHandler is called with the log level and message text.
|
||||
type MessageHandler func(l LogLevel, msg string)
|
||||
|
||||
type Logger struct {
|
||||
@@ -31,6 +32,7 @@ type Logger struct {
|
||||
mut sync.Mutex
|
||||
}
|
||||
|
||||
// The default logger logs to standard output with a time prefix.
|
||||
var DefaultLogger = New()
|
||||
|
||||
func New() *Logger {
|
||||
@@ -39,26 +41,31 @@ func New() *Logger {
|
||||
}
|
||||
}
|
||||
|
||||
// AddHandler registers a new MessageHandler to receive messages with the
|
||||
// specified log level or above.
|
||||
func (l *Logger) AddHandler(level LogLevel, h MessageHandler) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
l.handlers[level] = append(l.handlers[level], h)
|
||||
}
|
||||
|
||||
// See log.SetFlags
|
||||
func (l *Logger) SetFlags(flag int) {
|
||||
l.logger.SetFlags(flag)
|
||||
}
|
||||
|
||||
// See log.SetPrefix
|
||||
func (l *Logger) SetPrefix(prefix string) {
|
||||
l.logger.SetPrefix(prefix)
|
||||
}
|
||||
|
||||
func (l *Logger) callHandlers(level LogLevel, s string) {
|
||||
for _, h := range l.handlers[level] {
|
||||
h(level, s)
|
||||
h(level, strings.TrimSpace(s))
|
||||
}
|
||||
}
|
||||
|
||||
// Debugln logs a line with a DEBUG prefix.
|
||||
func (l *Logger) Debugln(vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
@@ -67,6 +74,7 @@ func (l *Logger) Debugln(vals ...interface{}) {
|
||||
l.callHandlers(LevelDebug, s)
|
||||
}
|
||||
|
||||
// Debugf logs a formatted line with a DEBUG prefix.
|
||||
func (l *Logger) Debugf(format string, vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
@@ -74,6 +82,8 @@ func (l *Logger) Debugf(format string, vals ...interface{}) {
|
||||
l.logger.Output(2, "DEBUG: "+s)
|
||||
l.callHandlers(LevelDebug, s)
|
||||
}
|
||||
|
||||
// Infoln logs a line with an INFO prefix.
|
||||
func (l *Logger) Infoln(vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
@@ -82,6 +92,7 @@ func (l *Logger) Infoln(vals ...interface{}) {
|
||||
l.callHandlers(LevelInfo, s)
|
||||
}
|
||||
|
||||
// Infof logs a formatted line with an INFO prefix.
|
||||
func (l *Logger) Infof(format string, vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
@@ -90,6 +101,7 @@ func (l *Logger) Infof(format string, vals ...interface{}) {
|
||||
l.callHandlers(LevelInfo, s)
|
||||
}
|
||||
|
||||
// Okln logs a line with an OK prefix.
|
||||
func (l *Logger) Okln(vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
@@ -98,6 +110,7 @@ func (l *Logger) Okln(vals ...interface{}) {
|
||||
l.callHandlers(LevelOK, s)
|
||||
}
|
||||
|
||||
// Okf logs a formatted line with an OK prefix.
|
||||
func (l *Logger) Okf(format string, vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
@@ -106,6 +119,7 @@ func (l *Logger) Okf(format string, vals ...interface{}) {
|
||||
l.callHandlers(LevelOK, s)
|
||||
}
|
||||
|
||||
// Warnln logs a formatted line with a WARNING prefix.
|
||||
func (l *Logger) Warnln(vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
@@ -114,6 +128,7 @@ func (l *Logger) Warnln(vals ...interface{}) {
|
||||
l.callHandlers(LevelWarn, s)
|
||||
}
|
||||
|
||||
// Warnf logs a formatted line with a WARNING prefix.
|
||||
func (l *Logger) Warnf(format string, vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
@@ -122,31 +137,24 @@ func (l *Logger) Warnf(format string, vals ...interface{}) {
|
||||
l.callHandlers(LevelWarn, s)
|
||||
}
|
||||
|
||||
// Fatalln logs a line with a FATAL prefix and exits the process with exit
|
||||
// code 1.
|
||||
func (l *Logger) Fatalln(vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
s := fmt.Sprintln(vals...)
|
||||
l.logger.Output(2, "FATAL: "+s)
|
||||
l.callHandlers(LevelFatal, s)
|
||||
os.Exit(3)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Fatalf logs a formatted line with a FATAL prefix and exits the process with
|
||||
// exit code 1.
|
||||
func (l *Logger) Fatalf(format string, vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
s := fmt.Sprintf(format, vals...)
|
||||
l.logger.Output(2, "FATAL: "+s)
|
||||
l.callHandlers(LevelFatal, s)
|
||||
os.Exit(3)
|
||||
}
|
||||
|
||||
func (l *Logger) FatalErr(err error) {
|
||||
if err != nil {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
l.logger.SetFlags(l.logger.Flags() | log.Lshortfile)
|
||||
l.logger.Output(2, "FATAL: "+err.Error())
|
||||
l.callHandlers(LevelFatal, err.Error())
|
||||
os.Exit(3)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
58
Godeps/_workspace/src/github.com/calmh/logger/logger_test.go
generated
vendored
Normal file
58
Godeps/_workspace/src/github.com/calmh/logger/logger_test.go
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
package logger
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAPI(t *testing.T) {
|
||||
l := New()
|
||||
l.SetFlags(0)
|
||||
l.SetPrefix("testing")
|
||||
|
||||
debug := 0
|
||||
l.AddHandler(LevelDebug, checkFunc(t, LevelDebug, "test 0", &debug))
|
||||
info := 0
|
||||
l.AddHandler(LevelInfo, checkFunc(t, LevelInfo, "test 1", &info))
|
||||
warn := 0
|
||||
l.AddHandler(LevelWarn, checkFunc(t, LevelWarn, "test 2", &warn))
|
||||
ok := 0
|
||||
l.AddHandler(LevelOK, checkFunc(t, LevelOK, "test 3", &ok))
|
||||
|
||||
l.Debugf("test %d", 0)
|
||||
l.Debugln("test", 0)
|
||||
l.Infof("test %d", 1)
|
||||
l.Infoln("test", 1)
|
||||
l.Warnf("test %d", 2)
|
||||
l.Warnln("test", 2)
|
||||
l.Okf("test %d", 3)
|
||||
l.Okln("test", 3)
|
||||
|
||||
if debug != 2 {
|
||||
t.Errorf("Debug handler called %d != 2 times", debug)
|
||||
}
|
||||
if info != 2 {
|
||||
t.Errorf("Info handler called %d != 2 times", info)
|
||||
}
|
||||
if warn != 2 {
|
||||
t.Errorf("Warn handler called %d != 2 times", warn)
|
||||
}
|
||||
if ok != 2 {
|
||||
t.Errorf("Ok handler called %d != 2 times", ok)
|
||||
}
|
||||
}
|
||||
|
||||
func checkFunc(t *testing.T, expectl LogLevel, expectmsg string, counter *int) func(LogLevel, string) {
|
||||
return func(l LogLevel, msg string) {
|
||||
*counter++
|
||||
if l != expectl {
|
||||
t.Errorf("Incorrect message level %d != %d", l, expectl)
|
||||
}
|
||||
if !strings.HasSuffix(msg, expectmsg) {
|
||||
t.Errorf("%q does not end with %q", msg, expectmsg)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2,12 +2,13 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux netbsd openbsd
|
||||
// +build linux netbsd openbsd solaris
|
||||
|
||||
package osext
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
)
|
||||
@@ -20,6 +21,8 @@ func executable() (string, error) {
|
||||
return os.Readlink("/proc/curproc/exe")
|
||||
case "openbsd":
|
||||
return os.Readlink("/proc/curproc/file")
|
||||
case "solaris":
|
||||
return os.Readlink(fmt.Sprintf("/proc/%d/path/a.out", os.Getpid()))
|
||||
}
|
||||
return "", errors.New("ExecPath not implemented for " + runtime.GOOS)
|
||||
}
|
||||
1
Godeps/_workspace/src/github.com/calmh/xdr/.gitignore
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/calmh/xdr/.gitignore
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
coverage.out
|
||||
19
Godeps/_workspace/src/github.com/calmh/xdr/.travis.yml
generated
vendored
Normal file
19
Godeps/_workspace/src/github.com/calmh/xdr/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
language: go
|
||||
go:
|
||||
- tip
|
||||
|
||||
install:
|
||||
- export PATH=$PATH:$HOME/gopath/bin
|
||||
- go get code.google.com/p/go.tools/cmd/cover
|
||||
- go get github.com/mattn/goveralls
|
||||
|
||||
script:
|
||||
- ./generate.sh
|
||||
- go test -coverprofile=coverage.out
|
||||
|
||||
after_success:
|
||||
- goveralls -coverprofile=coverage.out -service=travis-ci -package=calmh/xdr -repotoken="$COVERALLS_TOKEN"
|
||||
|
||||
env:
|
||||
global:
|
||||
secure: SmgnrGfp2zLrA44ChRMpjPeujubt9veZ8Fx/OseMWECmacyV5N/TuDhzIbwo6QwV4xB0sBacoPzvxQbJRVjNKsPiSu72UbcQmQ7flN4Tf7nW09tSh1iW8NgrpBCq/3UYLoBu2iPBEBKm93IK0aGNAKs6oEkB0fU27iTVBwiTXOY=
|
||||
22
assets/bootstrap-3.1.1/LICENSE → Godeps/_workspace/src/github.com/calmh/xdr/LICENSE
generated
vendored
22
assets/bootstrap-3.1.1/LICENSE → Godeps/_workspace/src/github.com/calmh/xdr/LICENSE
generated
vendored
@@ -1,21 +1,19 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2011-2014 Twitter, Inc
|
||||
Copyright (C) 2014 Jakob Borg.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
of this software and associated documentation files (the "Software"), to
|
||||
deal in the Software without restriction, including without limitation the
|
||||
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||
sell copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
- The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
IN THE SOFTWARE.
|
||||
12
Godeps/_workspace/src/github.com/calmh/xdr/README.md
generated
vendored
Normal file
12
Godeps/_workspace/src/github.com/calmh/xdr/README.md
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
xdr
|
||||
===
|
||||
|
||||
[](https://travis-ci.org/calmh/xdr)
|
||||
[](https://coveralls.io/r/calmh/xdr?branch=master)
|
||||
[](http://godoc.org/github.com/calmh/xdr)
|
||||
[](http://opensource.org/licenses/MIT)
|
||||
|
||||
This is an XDR encoding/decoding library. It uses code generation and
|
||||
not reflection. It supports the IPDR bastardized XDR format when built
|
||||
with `-tags ipdr`.
|
||||
|
||||
40
xdr/bench_test.go → Godeps/_workspace/src/github.com/calmh/xdr/bench_test.go
generated
vendored
40
xdr/bench_test.go → Godeps/_workspace/src/github.com/calmh/xdr/bench_test.go
generated
vendored
@@ -1,6 +1,5 @@
|
||||
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
// All rights reserved. Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
package xdr_test
|
||||
|
||||
@@ -8,12 +7,15 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/calmh/xdr"
|
||||
)
|
||||
|
||||
type XDRBenchStruct struct {
|
||||
I1 uint64
|
||||
I2 uint32
|
||||
I3 uint16
|
||||
I4 uint8
|
||||
Bs0 []byte // max:128
|
||||
Bs1 []byte
|
||||
S0 string // max:128
|
||||
@@ -25,16 +27,21 @@ var s = XDRBenchStruct{
|
||||
I1: 42,
|
||||
I2: 43,
|
||||
I3: 44,
|
||||
I4: 45,
|
||||
Bs0: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18},
|
||||
Bs1: []byte{11, 12, 13, 14, 15, 16, 17, 18, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
|
||||
S0: "Hello World! String one.",
|
||||
S1: "Hello World! String two.",
|
||||
}
|
||||
var e = s.MarshalXDR()
|
||||
var e []byte
|
||||
|
||||
func init() {
|
||||
e, _ = s.MarshalXDR()
|
||||
}
|
||||
|
||||
func BenchmarkThisMarshal(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
res = s.MarshalXDR()
|
||||
res, _ = s.MarshalXDR()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -57,6 +64,16 @@ func BenchmarkThisEncode(b *testing.B) {
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkThisEncoder(b *testing.B) {
|
||||
w := xdr.NewWriter(ioutil.Discard)
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := s.encodeXDR(w)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type repeatReader struct {
|
||||
data []byte
|
||||
}
|
||||
@@ -85,3 +102,16 @@ func BenchmarkThisDecode(b *testing.B) {
|
||||
rr.Reset(e)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkThisDecoder(b *testing.B) {
|
||||
rr := &repeatReader{e}
|
||||
r := xdr.NewReader(rr)
|
||||
var t XDRBenchStruct
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := t.decodeXDR(r)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
rr.Reset(e)
|
||||
}
|
||||
}
|
||||
199
Godeps/_workspace/src/github.com/calmh/xdr/bench_xdr_test.go
generated
vendored
Normal file
199
Godeps/_workspace/src/github.com/calmh/xdr/bench_xdr_test.go
generated
vendored
Normal file
@@ -0,0 +1,199 @@
|
||||
// ************************************************************
|
||||
// This file is automatically generated by genxdr. Do not edit.
|
||||
// ************************************************************
|
||||
|
||||
package xdr_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
|
||||
"github.com/calmh/xdr"
|
||||
)
|
||||
|
||||
/*
|
||||
|
||||
XDRBenchStruct Structure:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
+ I1 (64 bits) +
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| I2 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| 0x0000 | I3 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| uint8 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of Bs0 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Bs0 (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of Bs1 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Bs1 (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of S0 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ S0 (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of S1 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ S1 (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
struct XDRBenchStruct {
|
||||
unsigned hyper I1;
|
||||
unsigned int I2;
|
||||
unsigned int I3;
|
||||
uint8 I4;
|
||||
opaque Bs0<128>;
|
||||
opaque Bs1<>;
|
||||
string S0<128>;
|
||||
string S1<>;
|
||||
}
|
||||
|
||||
*/
|
||||
|
||||
func (o XDRBenchStruct) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o XDRBenchStruct) MarshalXDR() ([]byte, error) {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}
|
||||
|
||||
func (o XDRBenchStruct) MustMarshalXDR() []byte {
|
||||
bs, err := o.MarshalXDR()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
func (o XDRBenchStruct) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o XDRBenchStruct) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
xw.WriteUint64(o.I1)
|
||||
xw.WriteUint32(o.I2)
|
||||
xw.WriteUint16(o.I3)
|
||||
xw.WriteUint8(o.I4)
|
||||
if l := len(o.Bs0); l > 128 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("Bs0", l, 128)
|
||||
}
|
||||
xw.WriteBytes(o.Bs0)
|
||||
xw.WriteBytes(o.Bs1)
|
||||
if l := len(o.S0); l > 128 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("S0", l, 128)
|
||||
}
|
||||
xw.WriteString(o.S0)
|
||||
xw.WriteString(o.S1)
|
||||
return xw.Tot(), xw.Error()
|
||||
}
|
||||
|
||||
func (o *XDRBenchStruct) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *XDRBenchStruct) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *XDRBenchStruct) decodeXDR(xr *xdr.Reader) error {
|
||||
o.I1 = xr.ReadUint64()
|
||||
o.I2 = xr.ReadUint32()
|
||||
o.I3 = xr.ReadUint16()
|
||||
o.I4 = xr.ReadUint8()
|
||||
o.Bs0 = xr.ReadBytesMax(128)
|
||||
o.Bs1 = xr.ReadBytes()
|
||||
o.S0 = xr.ReadStringMax(128)
|
||||
o.S1 = xr.ReadString()
|
||||
return xr.Error()
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
repeatReader Structure:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of data |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ data (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
struct repeatReader {
|
||||
opaque data<>;
|
||||
}
|
||||
|
||||
*/
|
||||
|
||||
func (o repeatReader) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o repeatReader) MarshalXDR() ([]byte, error) {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}
|
||||
|
||||
func (o repeatReader) MustMarshalXDR() []byte {
|
||||
bs, err := o.MarshalXDR()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
func (o repeatReader) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o repeatReader) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
xw.WriteBytes(o.data)
|
||||
return xw.Tot(), xw.Error()
|
||||
}
|
||||
|
||||
func (o *repeatReader) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *repeatReader) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *repeatReader) decodeXDR(xr *xdr.Reader) error {
|
||||
o.data = xr.ReadBytes()
|
||||
return xr.Error()
|
||||
}
|
||||
51
xdr/cmd/genxdr/main.go → Godeps/_workspace/src/github.com/calmh/xdr/cmd/genxdr/main.go
generated
vendored
51
xdr/cmd/genxdr/main.go → Godeps/_workspace/src/github.com/calmh/xdr/cmd/genxdr/main.go
generated
vendored
@@ -1,6 +1,5 @@
|
||||
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
// All rights reserved. Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
@@ -34,11 +33,7 @@ type structInfo struct {
|
||||
Fields []fieldInfo
|
||||
}
|
||||
|
||||
var headerTpl = template.Must(template.New("header").Parse(`// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
// All rights reserved. Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// ************************************************************
|
||||
var headerTpl = template.Must(template.New("header").Parse(`// ************************************************************
|
||||
// This file is automatically generated by genxdr. Do not edit.
|
||||
// ************************************************************
|
||||
|
||||
@@ -48,7 +43,7 @@ import (
|
||||
"bytes"
|
||||
"io"
|
||||
|
||||
"github.com/calmh/syncthing/xdr"
|
||||
"github.com/calmh/xdr"
|
||||
)
|
||||
`))
|
||||
|
||||
@@ -58,15 +53,23 @@ func (o {{.TypeName}}) EncodeXDR(w io.Writer) (int, error) {
|
||||
return o.encodeXDR(xw)
|
||||
}//+n
|
||||
|
||||
func (o {{.TypeName}}) MarshalXDR() []byte {
|
||||
func (o {{.TypeName}}) MarshalXDR() ([]byte, error) {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}//+n
|
||||
|
||||
func (o {{.TypeName}}) AppendXDR(bs []byte) []byte {
|
||||
func (o {{.TypeName}}) MustMarshalXDR() []byte {
|
||||
bs, err := o.MarshalXDR()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bs
|
||||
}//+n
|
||||
|
||||
func (o {{.TypeName}}) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
o.encodeXDR(xw)
|
||||
return []byte(aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
return []byte(aw), err
|
||||
}//+n
|
||||
|
||||
func (o {{.TypeName}}) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
@@ -76,18 +79,21 @@ func (o {{.TypeName}}) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
xw.Write{{$fieldInfo.Encoder}}({{$fieldInfo.Convert}}(o.{{$fieldInfo.Name}}))
|
||||
{{else if $fieldInfo.IsBasic}}
|
||||
{{if ge $fieldInfo.Max 1}}
|
||||
if len(o.{{$fieldInfo.Name}}) > {{$fieldInfo.Max}} {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
if l := len(o.{{$fieldInfo.Name}}); l > {{$fieldInfo.Max}} {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("{{$fieldInfo.Name}}", l, {{$fieldInfo.Max}})
|
||||
}
|
||||
{{end}}
|
||||
xw.Write{{$fieldInfo.Encoder}}(o.{{$fieldInfo.Name}})
|
||||
{{else}}
|
||||
o.{{$fieldInfo.Name}}.encodeXDR(xw)
|
||||
_, err := o.{{$fieldInfo.Name}}.encodeXDR(xw)
|
||||
if err != nil {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
{{end}}
|
||||
{{else}}
|
||||
{{if ge $fieldInfo.Max 1}}
|
||||
if len(o.{{$fieldInfo.Name}}) > {{$fieldInfo.Max}} {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
if l := len(o.{{$fieldInfo.Name}}); l > {{$fieldInfo.Max}} {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("{{$fieldInfo.Name}}", l, {{$fieldInfo.Max}})
|
||||
}
|
||||
{{end}}
|
||||
xw.WriteUint32(uint32(len(o.{{$fieldInfo.Name}})))
|
||||
@@ -97,7 +103,10 @@ func (o {{.TypeName}}) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
{{else if $fieldInfo.IsBasic}}
|
||||
xw.Write{{$fieldInfo.Encoder}}(o.{{$fieldInfo.Name}}[i])
|
||||
{{else}}
|
||||
o.{{$fieldInfo.Name}}[i].encodeXDR(xw)
|
||||
_, err := o.{{$fieldInfo.Name}}[i].encodeXDR(xw)
|
||||
if err != nil {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
{{end}}
|
||||
}
|
||||
{{end}}
|
||||
@@ -134,7 +143,7 @@ func (o *{{.TypeName}}) decodeXDR(xr *xdr.Reader) error {
|
||||
_{{$fieldInfo.Name}}Size := int(xr.ReadUint32())
|
||||
{{if ge $fieldInfo.Max 1}}
|
||||
if _{{$fieldInfo.Name}}Size > {{$fieldInfo.Max}} {
|
||||
return xdr.ErrElementSizeExceeded
|
||||
return xdr.ElementSizeExceeded("{{$fieldInfo.Name}}", _{{$fieldInfo.Name}}Size, {{$fieldInfo.Max}})
|
||||
}
|
||||
{{end}}
|
||||
o.{{$fieldInfo.Name}} = make([]{{$fieldInfo.FieldType}}, _{{$fieldInfo.Name}}Size)
|
||||
@@ -160,6 +169,8 @@ type typeSet struct {
|
||||
}
|
||||
|
||||
var xdrEncoders = map[string]typeSet{
|
||||
"int8": typeSet{"uint8", "Uint8"},
|
||||
"uint8": typeSet{"", "Uint8"},
|
||||
"int16": typeSet{"uint16", "Uint16"},
|
||||
"uint16": typeSet{"", "Uint16"},
|
||||
"int32": typeSet{"uint32", "Uint32"},
|
||||
16
Godeps/_workspace/src/github.com/calmh/xdr/debug.go
generated
vendored
Normal file
16
Godeps/_workspace/src/github.com/calmh/xdr/debug.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
package xdr
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
var (
|
||||
debug = len(os.Getenv("XDRTRACE")) > 0
|
||||
dl = log.New(os.Stdout, "xdr: ", log.Lshortfile|log.Ltime|log.Lmicroseconds)
|
||||
)
|
||||
|
||||
const maxDebugBytes = 32
|
||||
5
Godeps/_workspace/src/github.com/calmh/xdr/doc.go
generated
vendored
Normal file
5
Godeps/_workspace/src/github.com/calmh/xdr/doc.go
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
// Package xdr implements an XDR (RFC 4506) encoder/decoder.
|
||||
package xdr
|
||||
79
Godeps/_workspace/src/github.com/calmh/xdr/encdec_test.go
generated
vendored
Normal file
79
Godeps/_workspace/src/github.com/calmh/xdr/encdec_test.go
generated
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
package xdr_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
|
||||
"github.com/calmh/xdr"
|
||||
)
|
||||
|
||||
// Contains all supported types
|
||||
type TestStruct struct {
|
||||
I int
|
||||
I8 int8
|
||||
UI8 uint8
|
||||
I16 int16
|
||||
UI16 uint16
|
||||
I32 int32
|
||||
UI32 uint32
|
||||
I64 int64
|
||||
UI64 uint64
|
||||
BS []byte // max:1024
|
||||
S string // max:1024
|
||||
C Opaque
|
||||
SS []string // max:1024
|
||||
}
|
||||
|
||||
type Opaque [32]byte
|
||||
|
||||
func (u *Opaque) encodeXDR(w *xdr.Writer) (int, error) {
|
||||
return w.WriteRaw(u[:])
|
||||
}
|
||||
|
||||
func (u *Opaque) decodeXDR(r *xdr.Reader) (int, error) {
|
||||
return r.ReadRaw(u[:])
|
||||
}
|
||||
|
||||
func (Opaque) Generate(rand *rand.Rand, size int) reflect.Value {
|
||||
var u Opaque
|
||||
for i := range u[:] {
|
||||
u[i] = byte(rand.Int())
|
||||
}
|
||||
return reflect.ValueOf(u)
|
||||
}
|
||||
|
||||
func TestEncDec(t *testing.T) {
|
||||
fn := func(t0 TestStruct) bool {
|
||||
bs, err := t0.MarshalXDR()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var t1 TestStruct
|
||||
err = t1.UnmarshalXDR(bs)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Not comparing with DeepEqual since we'll unmarshal nil slices as empty
|
||||
if t0.I != t1.I ||
|
||||
t0.I16 != t1.I16 || t0.UI16 != t1.UI16 ||
|
||||
t0.I32 != t1.I32 || t0.UI32 != t1.UI32 ||
|
||||
t0.I64 != t1.I64 || t0.UI64 != t1.UI64 ||
|
||||
bytes.Compare(t0.BS, t1.BS) != 0 ||
|
||||
t0.S != t1.S || t0.C != t1.C {
|
||||
t.Logf("%#v", t0)
|
||||
t.Logf("%#v", t1)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
if err := quick.Check(fn, nil); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
174
Godeps/_workspace/src/github.com/calmh/xdr/encdec_xdr_test.go
generated
vendored
Normal file
174
Godeps/_workspace/src/github.com/calmh/xdr/encdec_xdr_test.go
generated
vendored
Normal file
@@ -0,0 +1,174 @@
|
||||
// ************************************************************
|
||||
// This file is automatically generated by genxdr. Do not edit.
|
||||
// ************************************************************
|
||||
|
||||
package xdr_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
|
||||
"github.com/calmh/xdr"
|
||||
)
|
||||
|
||||
/*
|
||||
|
||||
TestStruct Structure:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| int |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| int8 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| uint8 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| int16 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| 0x0000 | UI16 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| int32 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| UI32 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
+ I64 (64 bits) +
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
+ UI64 (64 bits) +
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of BS |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ BS (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of S |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ S (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Opaque |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Number of SS |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of SS |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ SS (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
struct TestStruct {
|
||||
int I;
|
||||
int8 I8;
|
||||
uint8 UI8;
|
||||
int16 I16;
|
||||
unsigned int UI16;
|
||||
int32 I32;
|
||||
unsigned int UI32;
|
||||
hyper I64;
|
||||
unsigned hyper UI64;
|
||||
opaque BS<1024>;
|
||||
string S<1024>;
|
||||
Opaque C;
|
||||
string SS<1024>;
|
||||
}
|
||||
|
||||
*/
|
||||
|
||||
func (o TestStruct) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o TestStruct) MarshalXDR() ([]byte, error) {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}
|
||||
|
||||
func (o TestStruct) MustMarshalXDR() []byte {
|
||||
bs, err := o.MarshalXDR()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
func (o TestStruct) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o TestStruct) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
xw.WriteUint64(uint64(o.I))
|
||||
xw.WriteUint8(uint8(o.I8))
|
||||
xw.WriteUint8(o.UI8)
|
||||
xw.WriteUint16(uint16(o.I16))
|
||||
xw.WriteUint16(o.UI16)
|
||||
xw.WriteUint32(uint32(o.I32))
|
||||
xw.WriteUint32(o.UI32)
|
||||
xw.WriteUint64(uint64(o.I64))
|
||||
xw.WriteUint64(o.UI64)
|
||||
if l := len(o.BS); l > 1024 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("BS", l, 1024)
|
||||
}
|
||||
xw.WriteBytes(o.BS)
|
||||
if l := len(o.S); l > 1024 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("S", l, 1024)
|
||||
}
|
||||
xw.WriteString(o.S)
|
||||
_, err := o.C.encodeXDR(xw)
|
||||
if err != nil {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
if l := len(o.SS); l > 1024 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("SS", l, 1024)
|
||||
}
|
||||
xw.WriteUint32(uint32(len(o.SS)))
|
||||
for i := range o.SS {
|
||||
xw.WriteString(o.SS[i])
|
||||
}
|
||||
return xw.Tot(), xw.Error()
|
||||
}
|
||||
|
||||
func (o *TestStruct) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *TestStruct) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *TestStruct) decodeXDR(xr *xdr.Reader) error {
|
||||
o.I = int(xr.ReadUint64())
|
||||
o.I8 = int8(xr.ReadUint8())
|
||||
o.UI8 = xr.ReadUint8()
|
||||
o.I16 = int16(xr.ReadUint16())
|
||||
o.UI16 = xr.ReadUint16()
|
||||
o.I32 = int32(xr.ReadUint32())
|
||||
o.UI32 = xr.ReadUint32()
|
||||
o.I64 = int64(xr.ReadUint64())
|
||||
o.UI64 = xr.ReadUint64()
|
||||
o.BS = xr.ReadBytesMax(1024)
|
||||
o.S = xr.ReadStringMax(1024)
|
||||
(&o.C).decodeXDR(xr)
|
||||
_SSSize := int(xr.ReadUint32())
|
||||
if _SSSize > 1024 {
|
||||
return xdr.ElementSizeExceeded("SS", _SSSize, 1024)
|
||||
}
|
||||
o.SS = make([]string, _SSSize)
|
||||
for i := range o.SS {
|
||||
o.SS[i] = xr.ReadString()
|
||||
}
|
||||
return xr.Error()
|
||||
}
|
||||
4
Godeps/_workspace/src/github.com/calmh/xdr/generate.sh
generated
vendored
Normal file
4
Godeps/_workspace/src/github.com/calmh/xdr/generate.sh
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
#!/bin/sh
|
||||
|
||||
go run cmd/genxdr/main.go -- bench_test.go > bench_xdr_test.go
|
||||
go run cmd/genxdr/main.go -- encdec_test.go > encdec_xdr_test.go
|
||||
10
Godeps/_workspace/src/github.com/calmh/xdr/pad_ipdr.go
generated
vendored
Normal file
10
Godeps/_workspace/src/github.com/calmh/xdr/pad_ipdr.go
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
// +build ipdr
|
||||
|
||||
package xdr
|
||||
|
||||
func pad(l int) int {
|
||||
return 0
|
||||
}
|
||||
14
Godeps/_workspace/src/github.com/calmh/xdr/pad_xdr.go
generated
vendored
Normal file
14
Godeps/_workspace/src/github.com/calmh/xdr/pad_xdr.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
// +build !ipdr
|
||||
|
||||
package xdr
|
||||
|
||||
func pad(l int) int {
|
||||
d := l % 4
|
||||
if d == 0 {
|
||||
return 0
|
||||
}
|
||||
return 4 - d
|
||||
}
|
||||
107
xdr/reader.go → Godeps/_workspace/src/github.com/calmh/xdr/reader.go
generated
vendored
107
xdr/reader.go → Godeps/_workspace/src/github.com/calmh/xdr/reader.go
generated
vendored
@@ -5,20 +5,16 @@
|
||||
package xdr
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var ErrElementSizeExceeded = errors.New("element size exceeded")
|
||||
|
||||
type Reader struct {
|
||||
r io.Reader
|
||||
tot int
|
||||
err error
|
||||
b [8]byte
|
||||
sb []byte
|
||||
last time.Time
|
||||
r io.Reader
|
||||
err error
|
||||
b [8]byte
|
||||
}
|
||||
|
||||
func NewReader(r io.Reader) *Reader {
|
||||
@@ -27,24 +23,28 @@ func NewReader(r io.Reader) *Reader {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reader) ReadString() string {
|
||||
if r.sb == nil {
|
||||
r.sb = make([]byte, 64)
|
||||
} else {
|
||||
r.sb = r.sb[:cap(r.sb)]
|
||||
func (r *Reader) ReadRaw(bs []byte) (int, error) {
|
||||
if r.err != nil {
|
||||
return 0, r.err
|
||||
}
|
||||
r.sb = r.ReadBytesInto(r.sb)
|
||||
return string(r.sb)
|
||||
|
||||
var n int
|
||||
n, r.err = io.ReadFull(r.r, bs)
|
||||
return n, r.err
|
||||
}
|
||||
|
||||
func (r *Reader) ReadString() string {
|
||||
return r.ReadStringMax(0)
|
||||
}
|
||||
|
||||
func (r *Reader) ReadStringMax(max int) string {
|
||||
if r.sb == nil {
|
||||
r.sb = make([]byte, 64)
|
||||
} else {
|
||||
r.sb = r.sb[:cap(r.sb)]
|
||||
buf := r.ReadBytesMaxInto(max, nil)
|
||||
bh := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
|
||||
sh := reflect.StringHeader{
|
||||
Data: bh.Data,
|
||||
Len: bh.Len,
|
||||
}
|
||||
r.sb = r.ReadBytesMaxInto(max, r.sb)
|
||||
return string(r.sb)
|
||||
return *((*string)(unsafe.Pointer(&sh)))
|
||||
}
|
||||
|
||||
func (r *Reader) ReadBytes() []byte {
|
||||
@@ -63,65 +63,54 @@ func (r *Reader) ReadBytesMaxInto(max int, dst []byte) []byte {
|
||||
if r.err != nil {
|
||||
return nil
|
||||
}
|
||||
r.last = time.Now()
|
||||
s := r.tot
|
||||
|
||||
l := int(r.ReadUint32())
|
||||
if r.err != nil {
|
||||
return nil
|
||||
}
|
||||
if max > 0 && l > max {
|
||||
r.err = ErrElementSizeExceeded
|
||||
r.err = ElementSizeExceeded("bytes field", l, max)
|
||||
return nil
|
||||
}
|
||||
|
||||
if l+pad(l) > len(dst) {
|
||||
dst = make([]byte, l+pad(l))
|
||||
if fullLen := l + pad(l); fullLen > len(dst) {
|
||||
dst = make([]byte, fullLen)
|
||||
} else {
|
||||
dst = dst[:l+pad(l)]
|
||||
dst = dst[:fullLen]
|
||||
}
|
||||
|
||||
var n int
|
||||
n, r.err = io.ReadFull(r.r, dst)
|
||||
if r.err != nil {
|
||||
if debug {
|
||||
dl.Debugf("@0x%x: rd bytes (%d): %v", s, len(dst), r.err)
|
||||
dl.Printf("rd bytes (%d): %v", len(dst), r.err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
r.tot += n
|
||||
|
||||
if debug {
|
||||
if n > maxDebugBytes {
|
||||
dl.Debugf("@0x%x: rd bytes (%d): %x...", s, len(dst), dst[:maxDebugBytes])
|
||||
dl.Printf("rd bytes (%d): %x...", len(dst), dst[:maxDebugBytes])
|
||||
} else {
|
||||
dl.Debugf("@0x%x: rd bytes (%d): %x", s, len(dst), dst)
|
||||
dl.Printf("rd bytes (%d): %x", len(dst), dst)
|
||||
}
|
||||
}
|
||||
return dst[:l]
|
||||
}
|
||||
|
||||
func (r *Reader) ReadBool() bool {
|
||||
return r.ReadUint32() != 0
|
||||
}
|
||||
|
||||
func (r *Reader) ReadUint16() uint16 {
|
||||
return uint16(r.ReadUint32())
|
||||
return r.ReadUint8() != 0
|
||||
}
|
||||
|
||||
func (r *Reader) ReadUint32() uint32 {
|
||||
if r.err != nil {
|
||||
return 0
|
||||
}
|
||||
r.last = time.Now()
|
||||
s := r.tot
|
||||
|
||||
var n int
|
||||
n, r.err = io.ReadFull(r.r, r.b[:4])
|
||||
r.tot += n
|
||||
_, r.err = io.ReadFull(r.r, r.b[:4])
|
||||
if r.err != nil {
|
||||
if debug {
|
||||
dl.Debugf("@0x%x: rd uint32: %v", r.tot, r.err)
|
||||
dl.Printf("rd uint32: %v", r.err)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
@@ -129,7 +118,7 @@ func (r *Reader) ReadUint32() uint32 {
|
||||
v := uint32(r.b[3]) | uint32(r.b[2])<<8 | uint32(r.b[1])<<16 | uint32(r.b[0])<<24
|
||||
|
||||
if debug {
|
||||
dl.Debugf("@0x%x: rd uint32=%d (0x%08x)", s, v, v)
|
||||
dl.Printf("rd uint32=%d (0x%08x)", v, v)
|
||||
}
|
||||
return v
|
||||
}
|
||||
@@ -138,15 +127,11 @@ func (r *Reader) ReadUint64() uint64 {
|
||||
if r.err != nil {
|
||||
return 0
|
||||
}
|
||||
r.last = time.Now()
|
||||
s := r.tot
|
||||
|
||||
var n int
|
||||
n, r.err = io.ReadFull(r.r, r.b[:8])
|
||||
r.tot += n
|
||||
_, r.err = io.ReadFull(r.r, r.b[:8])
|
||||
if r.err != nil {
|
||||
if debug {
|
||||
dl.Debugf("@0x%x: rd uint64: %v", r.tot, r.err)
|
||||
dl.Printf("rd uint64: %v", r.err)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
@@ -155,19 +140,27 @@ func (r *Reader) ReadUint64() uint64 {
|
||||
uint64(r.b[3])<<32 | uint64(r.b[2])<<40 | uint64(r.b[1])<<48 | uint64(r.b[0])<<56
|
||||
|
||||
if debug {
|
||||
dl.Debugf("@0x%x: rd uint64=%d (0x%016x)", s, v, v)
|
||||
dl.Printf("rd uint64=%d (0x%016x)", v, v)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func (r *Reader) Tot() int {
|
||||
return r.tot
|
||||
type XDRError struct {
|
||||
op string
|
||||
err error
|
||||
}
|
||||
|
||||
func (e XDRError) Error() string {
|
||||
return "xdr " + e.op + ": " + e.err.Error()
|
||||
}
|
||||
|
||||
func (r *Reader) Error() error {
|
||||
return r.err
|
||||
if r.err == nil {
|
||||
return nil
|
||||
}
|
||||
return XDRError{"read", r.err}
|
||||
}
|
||||
|
||||
func (r *Reader) LastRead() time.Time {
|
||||
return r.last
|
||||
func ElementSizeExceeded(field string, size, limit int) error {
|
||||
return fmt.Errorf("%s exceeds size limit; %d > %d", field, size, limit)
|
||||
}
|
||||
49
Godeps/_workspace/src/github.com/calmh/xdr/reader_ipdr.go
generated
vendored
Normal file
49
Godeps/_workspace/src/github.com/calmh/xdr/reader_ipdr.go
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
// All rights reserved. Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ipdr
|
||||
|
||||
package xdr
|
||||
|
||||
import "io"
|
||||
|
||||
func (r *Reader) ReadUint8() uint8 {
|
||||
if r.err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
_, r.err = io.ReadFull(r.r, r.b[:1])
|
||||
if r.err != nil {
|
||||
if debug {
|
||||
dl.Printf("rd uint8: %v", r.err)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
if debug {
|
||||
dl.Printf("rd uint8=%d (0x%02x)", r.b[0], r.b[0])
|
||||
}
|
||||
return r.b[0]
|
||||
}
|
||||
|
||||
func (r *Reader) ReadUint16() uint16 {
|
||||
if r.err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
_, r.err = io.ReadFull(r.r, r.b[:2])
|
||||
if r.err != nil {
|
||||
if debug {
|
||||
dl.Printf("rd uint16: %v", r.err)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
v := uint16(r.b[1]) | uint16(r.b[0])<<8
|
||||
|
||||
if debug {
|
||||
dl.Printf("rd uint16=%d (0x%04x)", v, v)
|
||||
}
|
||||
return v
|
||||
}
|
||||
11
xdr/doc.go → Godeps/_workspace/src/github.com/calmh/xdr/reader_xdr.go
generated
vendored
11
xdr/doc.go → Godeps/_workspace/src/github.com/calmh/xdr/reader_xdr.go
generated
vendored
@@ -2,5 +2,14 @@
|
||||
// All rights reserved. Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package xdr implements an XDR (RFC 4506) encoder/decoder.
|
||||
// +build !ipdr
|
||||
|
||||
package xdr
|
||||
|
||||
func (r *Reader) ReadUint8() uint8 {
|
||||
return uint8(r.ReadUint32())
|
||||
}
|
||||
|
||||
func (r *Reader) ReadUint16() uint16 {
|
||||
return uint16(r.ReadUint32())
|
||||
}
|
||||
5
xdr/refl_test.go → Godeps/_workspace/src/github.com/calmh/xdr/refl_test.go
generated
vendored
5
xdr/refl_test.go → Godeps/_workspace/src/github.com/calmh/xdr/refl_test.go
generated
vendored
@@ -1,6 +1,5 @@
|
||||
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
// All rights reserved. Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
// +build refl
|
||||
|
||||
71
xdr/writer.go → Godeps/_workspace/src/github.com/calmh/xdr/writer.go
generated
vendored
71
xdr/writer.go → Godeps/_workspace/src/github.com/calmh/xdr/writer.go
generated
vendored
@@ -1,30 +1,21 @@
|
||||
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
// All rights reserved. Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
package xdr
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func pad(l int) int {
|
||||
d := l % 4
|
||||
if d == 0 {
|
||||
return 0
|
||||
}
|
||||
return 4 - d
|
||||
}
|
||||
|
||||
var padBytes = []byte{0, 0, 0}
|
||||
|
||||
type Writer struct {
|
||||
w io.Writer
|
||||
tot int
|
||||
err error
|
||||
b [8]byte
|
||||
last time.Time
|
||||
w io.Writer
|
||||
tot int
|
||||
err error
|
||||
b [8]byte
|
||||
}
|
||||
|
||||
type AppendWriter []byte
|
||||
@@ -40,8 +31,24 @@ func NewWriter(w io.Writer) *Writer {
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Writer) WriteRaw(bs []byte) (int, error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
var n int
|
||||
n, w.err = w.w.Write(bs)
|
||||
return n, w.err
|
||||
}
|
||||
|
||||
func (w *Writer) WriteString(s string) (int, error) {
|
||||
return w.WriteBytes([]byte(s))
|
||||
sh := *((*reflect.StringHeader)(unsafe.Pointer(&s)))
|
||||
bh := reflect.SliceHeader{
|
||||
Data: sh.Data,
|
||||
Len: sh.Len,
|
||||
Cap: sh.Len,
|
||||
}
|
||||
return w.WriteBytes(*(*[]byte)(unsafe.Pointer(&bh)))
|
||||
}
|
||||
|
||||
func (w *Writer) WriteBytes(bs []byte) (int, error) {
|
||||
@@ -49,7 +56,6 @@ func (w *Writer) WriteBytes(bs []byte) (int, error) {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
w.last = time.Now()
|
||||
w.WriteUint32(uint32(len(bs)))
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
@@ -57,9 +63,9 @@ func (w *Writer) WriteBytes(bs []byte) (int, error) {
|
||||
|
||||
if debug {
|
||||
if len(bs) > maxDebugBytes {
|
||||
dl.Debugf("wr bytes (%d): %x...", len(bs), bs[:maxDebugBytes])
|
||||
dl.Printf("wr bytes (%d): %x...", len(bs), bs[:maxDebugBytes])
|
||||
} else {
|
||||
dl.Debugf("wr bytes (%d): %x", len(bs), bs)
|
||||
dl.Printf("wr bytes (%d): %x", len(bs), bs)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -78,24 +84,19 @@ func (w *Writer) WriteBytes(bs []byte) (int, error) {
|
||||
|
||||
func (w *Writer) WriteBool(v bool) (int, error) {
|
||||
if v {
|
||||
return w.WriteUint32(1)
|
||||
return w.WriteUint8(1)
|
||||
} else {
|
||||
return w.WriteUint32(0)
|
||||
return w.WriteUint8(0)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Writer) WriteUint16(v uint16) (int, error) {
|
||||
return w.WriteUint32(uint32(v))
|
||||
}
|
||||
|
||||
func (w *Writer) WriteUint32(v uint32) (int, error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
w.last = time.Now()
|
||||
if debug {
|
||||
dl.Debugf("wr uint32=%d", v)
|
||||
dl.Printf("wr uint32=%d", v)
|
||||
}
|
||||
|
||||
w.b[0] = byte(v >> 24)
|
||||
@@ -114,9 +115,8 @@ func (w *Writer) WriteUint64(v uint64) (int, error) {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
w.last = time.Now()
|
||||
if debug {
|
||||
dl.Debugf("wr uint64=%d", v)
|
||||
dl.Printf("wr uint64=%d", v)
|
||||
}
|
||||
|
||||
w.b[0] = byte(v >> 56)
|
||||
@@ -139,9 +139,8 @@ func (w *Writer) Tot() int {
|
||||
}
|
||||
|
||||
func (w *Writer) Error() error {
|
||||
return w.err
|
||||
}
|
||||
|
||||
func (w *Writer) LastWrite() time.Time {
|
||||
return w.last
|
||||
if w.err == nil {
|
||||
return nil
|
||||
}
|
||||
return XDRError{"write", w.err}
|
||||
}
|
||||
41
Godeps/_workspace/src/github.com/calmh/xdr/writer_ipdr.go
generated
vendored
Normal file
41
Godeps/_workspace/src/github.com/calmh/xdr/writer_ipdr.go
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
// +build ipdr
|
||||
|
||||
package xdr
|
||||
|
||||
func (w *Writer) WriteUint8(v uint8) (int, error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
if debug {
|
||||
dl.Printf("wr uint8=%d", v)
|
||||
}
|
||||
|
||||
w.b[0] = byte(v)
|
||||
|
||||
var l int
|
||||
l, w.err = w.w.Write(w.b[:1])
|
||||
w.tot += l
|
||||
return l, w.err
|
||||
}
|
||||
|
||||
func (w *Writer) WriteUint16(v uint16) (int, error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
if debug {
|
||||
dl.Printf("wr uint8=%d", v)
|
||||
}
|
||||
|
||||
w.b[0] = byte(v >> 8)
|
||||
w.b[1] = byte(v)
|
||||
|
||||
var l int
|
||||
l, w.err = w.w.Write(w.b[:2])
|
||||
w.tot += l
|
||||
return l, w.err
|
||||
}
|
||||
14
Godeps/_workspace/src/github.com/calmh/xdr/writer_xdr.go
generated
vendored
Normal file
14
Godeps/_workspace/src/github.com/calmh/xdr/writer_xdr.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
// +build !ipdr
|
||||
|
||||
package xdr
|
||||
|
||||
func (w *Writer) WriteUint8(v uint8) (int, error) {
|
||||
return w.WriteUint32(uint32(v))
|
||||
}
|
||||
|
||||
func (w *Writer) WriteUint16(v uint16) (int, error) {
|
||||
return w.WriteUint32(uint32(v))
|
||||
}
|
||||
33
xdr/xdr_test.go → Godeps/_workspace/src/github.com/calmh/xdr/xdr_test.go
generated
vendored
33
xdr/xdr_test.go → Godeps/_workspace/src/github.com/calmh/xdr/xdr_test.go
generated
vendored
@@ -1,32 +1,15 @@
|
||||
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
// All rights reserved. Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
package xdr
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
)
|
||||
|
||||
func TestPad(t *testing.T) {
|
||||
tests := [][]int{
|
||||
{0, 0},
|
||||
{1, 3},
|
||||
{2, 2},
|
||||
{3, 1},
|
||||
{4, 0},
|
||||
{32, 0},
|
||||
{33, 3},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
if p := pad(tc[0]); p != tc[1] {
|
||||
t.Errorf("Incorrect padding for %d bytes, %d != %d", tc[0], p, tc[1])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBytesNil(t *testing.T) {
|
||||
fn := func(bs []byte) bool {
|
||||
var b = new(bytes.Buffer)
|
||||
@@ -78,14 +61,14 @@ func TestReadBytesMaxInto(t *testing.T) {
|
||||
if read := len(bs); read != tot {
|
||||
t.Errorf("Incorrect read bytes, wrote=%d, buf=%d, max=%d, read=%d", tot, tot+diff, max, read)
|
||||
}
|
||||
} else if r.err != ErrElementSizeExceeded {
|
||||
} else if !strings.Contains(r.err.Error(), "exceeds size") {
|
||||
t.Errorf("Unexpected non-ErrElementSizeExceeded error for wrote=%d, max=%d: %v", tot, max, r.err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadBytesMaxIntoNil(t *testing.T) {
|
||||
func TestReadStringMax(t *testing.T) {
|
||||
for tot := 42; tot < 72; tot++ {
|
||||
for max := 0; max < 128; max++ {
|
||||
var b = new(bytes.Buffer)
|
||||
@@ -95,14 +78,14 @@ func TestReadBytesMaxIntoNil(t *testing.T) {
|
||||
var toWrite = make([]byte, tot)
|
||||
w.WriteBytes(toWrite)
|
||||
|
||||
var bs = r.ReadBytesMaxInto(max, nil)
|
||||
var read = len(bs)
|
||||
var str = r.ReadStringMax(max)
|
||||
var read = len(str)
|
||||
|
||||
if max == 0 || tot <= max {
|
||||
if read != tot {
|
||||
t.Errorf("Incorrect read bytes, wrote=%d, max=%d, read=%d", tot, max, read)
|
||||
}
|
||||
} else if r.err != ErrElementSizeExceeded {
|
||||
} else if !strings.Contains(r.err.Error(), "exceeds size") {
|
||||
t.Errorf("Unexpected non-ErrElementSizeExceeded error for wrote=%d, max=%d, read=%d: %v", tot, max, read, r.err)
|
||||
}
|
||||
}
|
||||
121
Godeps/_workspace/src/github.com/golang/groupcache/lru/lru.go
generated
vendored
121
Godeps/_workspace/src/github.com/golang/groupcache/lru/lru.go
generated
vendored
@@ -1,121 +0,0 @@
|
||||
/*
|
||||
Copyright 2013 Google Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package lru implements an LRU cache.
|
||||
package lru
|
||||
|
||||
import "container/list"
|
||||
|
||||
// Cache is an LRU cache. It is not safe for concurrent access.
|
||||
type Cache struct {
|
||||
// MaxEntries is the maximum number of cache entries before
|
||||
// an item is evicted. Zero means no limit.
|
||||
MaxEntries int
|
||||
|
||||
// OnEvicted optionally specificies a callback function to be
|
||||
// executed when an entry is purged from the cache.
|
||||
OnEvicted func(key Key, value interface{})
|
||||
|
||||
ll *list.List
|
||||
cache map[interface{}]*list.Element
|
||||
}
|
||||
|
||||
// A Key may be any value that is comparable. See http://golang.org/ref/spec#Comparison_operators
|
||||
type Key interface{}
|
||||
|
||||
type entry struct {
|
||||
key Key
|
||||
value interface{}
|
||||
}
|
||||
|
||||
// New creates a new Cache.
|
||||
// If maxEntries is zero, the cache has no limit and it's assumed
|
||||
// that eviction is done by the caller.
|
||||
func New(maxEntries int) *Cache {
|
||||
return &Cache{
|
||||
MaxEntries: maxEntries,
|
||||
ll: list.New(),
|
||||
cache: make(map[interface{}]*list.Element),
|
||||
}
|
||||
}
|
||||
|
||||
// Add adds a value to the cache.
|
||||
func (c *Cache) Add(key Key, value interface{}) {
|
||||
if c.cache == nil {
|
||||
c.cache = make(map[interface{}]*list.Element)
|
||||
c.ll = list.New()
|
||||
}
|
||||
if ee, ok := c.cache[key]; ok {
|
||||
c.ll.MoveToFront(ee)
|
||||
ee.Value.(*entry).value = value
|
||||
return
|
||||
}
|
||||
ele := c.ll.PushFront(&entry{key, value})
|
||||
c.cache[key] = ele
|
||||
if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries {
|
||||
c.RemoveOldest()
|
||||
}
|
||||
}
|
||||
|
||||
// Get looks up a key's value from the cache.
|
||||
func (c *Cache) Get(key Key) (value interface{}, ok bool) {
|
||||
if c.cache == nil {
|
||||
return
|
||||
}
|
||||
if ele, hit := c.cache[key]; hit {
|
||||
c.ll.MoveToFront(ele)
|
||||
return ele.Value.(*entry).value, true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Remove removes the provided key from the cache.
|
||||
func (c *Cache) Remove(key Key) {
|
||||
if c.cache == nil {
|
||||
return
|
||||
}
|
||||
if ele, hit := c.cache[key]; hit {
|
||||
c.removeElement(ele)
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveOldest removes the oldest item from the cache.
|
||||
func (c *Cache) RemoveOldest() {
|
||||
if c.cache == nil {
|
||||
return
|
||||
}
|
||||
ele := c.ll.Back()
|
||||
if ele != nil {
|
||||
c.removeElement(ele)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache) removeElement(e *list.Element) {
|
||||
c.ll.Remove(e)
|
||||
kv := e.Value.(*entry)
|
||||
delete(c.cache, kv.key)
|
||||
if c.OnEvicted != nil {
|
||||
c.OnEvicted(kv.key, kv.value)
|
||||
}
|
||||
}
|
||||
|
||||
// Len returns the number of items in the cache.
|
||||
func (c *Cache) Len() int {
|
||||
if c.cache == nil {
|
||||
return 0
|
||||
}
|
||||
return c.ll.Len()
|
||||
}
|
||||
73
Godeps/_workspace/src/github.com/golang/groupcache/lru/lru_test.go
generated
vendored
73
Godeps/_workspace/src/github.com/golang/groupcache/lru/lru_test.go
generated
vendored
@@ -1,73 +0,0 @@
|
||||
/*
|
||||
Copyright 2013 Google Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package lru
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
type simpleStruct struct {
|
||||
int
|
||||
string
|
||||
}
|
||||
|
||||
type complexStruct struct {
|
||||
int
|
||||
simpleStruct
|
||||
}
|
||||
|
||||
var getTests = []struct {
|
||||
name string
|
||||
keyToAdd interface{}
|
||||
keyToGet interface{}
|
||||
expectedOk bool
|
||||
}{
|
||||
{"string_hit", "myKey", "myKey", true},
|
||||
{"string_miss", "myKey", "nonsense", false},
|
||||
{"simple_struct_hit", simpleStruct{1, "two"}, simpleStruct{1, "two"}, true},
|
||||
{"simeple_struct_miss", simpleStruct{1, "two"}, simpleStruct{0, "noway"}, false},
|
||||
{"complex_struct_hit", complexStruct{1, simpleStruct{2, "three"}},
|
||||
complexStruct{1, simpleStruct{2, "three"}}, true},
|
||||
}
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
for _, tt := range getTests {
|
||||
lru := New(0)
|
||||
lru.Add(tt.keyToAdd, 1234)
|
||||
val, ok := lru.Get(tt.keyToGet)
|
||||
if ok != tt.expectedOk {
|
||||
t.Fatalf("%s: cache hit = %v; want %v", tt.name, ok, !ok)
|
||||
} else if ok && val != 1234 {
|
||||
t.Fatalf("%s expected get to return 1234 but got %v", tt.name, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemove(t *testing.T) {
|
||||
lru := New(0)
|
||||
lru.Add("myKey", 1234)
|
||||
if val, ok := lru.Get("myKey"); !ok {
|
||||
t.Fatal("TestRemove returned no match")
|
||||
} else if val != 1234 {
|
||||
t.Fatalf("TestRemove failed. Expected %d, got %v", 1234, val)
|
||||
}
|
||||
|
||||
lru.Remove("myKey")
|
||||
if _, ok := lru.Get("myKey"); ok {
|
||||
t.Fatal("TestRemove returned a removed entry")
|
||||
}
|
||||
}
|
||||
228
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go
generated
vendored
228
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go
generated
vendored
@@ -8,65 +8,84 @@ package leveldb
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/memdb"
|
||||
)
|
||||
|
||||
var (
|
||||
errBatchTooShort = errors.New("leveldb: batch is too short")
|
||||
errBatchBadRecord = errors.New("leveldb: bad record in batch")
|
||||
type ErrBatchCorrupted struct {
|
||||
Reason string
|
||||
}
|
||||
|
||||
func (e *ErrBatchCorrupted) Error() string {
|
||||
return fmt.Sprintf("leveldb: batch corrupted: %s", e.Reason)
|
||||
}
|
||||
|
||||
func newErrBatchCorrupted(reason string) error {
|
||||
return errors.NewErrCorrupted(nil, &ErrBatchCorrupted{reason})
|
||||
}
|
||||
|
||||
const (
|
||||
batchHdrLen = 8 + 4
|
||||
batchGrowRec = 3000
|
||||
)
|
||||
|
||||
const kBatchHdrLen = 8 + 4
|
||||
|
||||
type batchReplay interface {
|
||||
put(key, value []byte, seq uint64)
|
||||
delete(key []byte, seq uint64)
|
||||
type BatchReplay interface {
|
||||
Put(key, value []byte)
|
||||
Delete(key []byte)
|
||||
}
|
||||
|
||||
// Batch is a write batch.
|
||||
type Batch struct {
|
||||
buf []byte
|
||||
data []byte
|
||||
rLen, bLen int
|
||||
seq uint64
|
||||
sync bool
|
||||
}
|
||||
|
||||
func (b *Batch) grow(n int) {
|
||||
off := len(b.buf)
|
||||
off := len(b.data)
|
||||
if off == 0 {
|
||||
// include headers
|
||||
off = kBatchHdrLen
|
||||
n += off
|
||||
off = batchHdrLen
|
||||
if b.data != nil {
|
||||
b.data = b.data[:off]
|
||||
}
|
||||
}
|
||||
if cap(b.buf)-off >= n {
|
||||
return
|
||||
if cap(b.data)-off < n {
|
||||
if b.data == nil {
|
||||
b.data = make([]byte, off, off+n)
|
||||
} else {
|
||||
odata := b.data
|
||||
div := 1
|
||||
if b.rLen > batchGrowRec {
|
||||
div = b.rLen / batchGrowRec
|
||||
}
|
||||
b.data = make([]byte, off, off+n+(off-batchHdrLen)/div)
|
||||
copy(b.data, odata)
|
||||
}
|
||||
}
|
||||
buf := make([]byte, 2*cap(b.buf)+n)
|
||||
copy(buf, b.buf)
|
||||
b.buf = buf[:off]
|
||||
}
|
||||
|
||||
func (b *Batch) appendRec(t vType, key, value []byte) {
|
||||
func (b *Batch) appendRec(kt kType, key, value []byte) {
|
||||
n := 1 + binary.MaxVarintLen32 + len(key)
|
||||
if t == tVal {
|
||||
if kt == ktVal {
|
||||
n += binary.MaxVarintLen32 + len(value)
|
||||
}
|
||||
b.grow(n)
|
||||
off := len(b.buf)
|
||||
buf := b.buf[:off+n]
|
||||
buf[off] = byte(t)
|
||||
off := len(b.data)
|
||||
data := b.data[:off+n]
|
||||
data[off] = byte(kt)
|
||||
off += 1
|
||||
off += binary.PutUvarint(buf[off:], uint64(len(key)))
|
||||
copy(buf[off:], key)
|
||||
off += binary.PutUvarint(data[off:], uint64(len(key)))
|
||||
copy(data[off:], key)
|
||||
off += len(key)
|
||||
if t == tVal {
|
||||
off += binary.PutUvarint(buf[off:], uint64(len(value)))
|
||||
copy(buf[off:], value)
|
||||
if kt == ktVal {
|
||||
off += binary.PutUvarint(data[off:], uint64(len(value)))
|
||||
copy(data[off:], value)
|
||||
off += len(value)
|
||||
}
|
||||
b.buf = buf[:off]
|
||||
b.data = data[:off]
|
||||
b.rLen++
|
||||
// Include 8-byte ikey header
|
||||
b.bLen += len(key) + len(value) + 8
|
||||
@@ -75,18 +94,51 @@ func (b *Batch) appendRec(t vType, key, value []byte) {
|
||||
// Put appends 'put operation' of the given key/value pair to the batch.
|
||||
// It is safe to modify the contents of the argument after Put returns.
|
||||
func (b *Batch) Put(key, value []byte) {
|
||||
b.appendRec(tVal, key, value)
|
||||
b.appendRec(ktVal, key, value)
|
||||
}
|
||||
|
||||
// Delete appends 'delete operation' of the given key to the batch.
|
||||
// It is safe to modify the contents of the argument after Delete returns.
|
||||
func (b *Batch) Delete(key []byte) {
|
||||
b.appendRec(tDel, key, nil)
|
||||
b.appendRec(ktDel, key, nil)
|
||||
}
|
||||
|
||||
// Dump dumps batch contents. The returned slice can be loaded into the
|
||||
// batch using Load method.
|
||||
// The returned slice is not its own copy, so the contents should not be
|
||||
// modified.
|
||||
func (b *Batch) Dump() []byte {
|
||||
return b.encode()
|
||||
}
|
||||
|
||||
// Load loads given slice into the batch. Previous contents of the batch
|
||||
// will be discarded.
|
||||
// The given slice will not be copied and will be used as batch buffer, so
|
||||
// it is not safe to modify the contents of the slice.
|
||||
func (b *Batch) Load(data []byte) error {
|
||||
return b.decode(0, data)
|
||||
}
|
||||
|
||||
// Replay replays batch contents.
|
||||
func (b *Batch) Replay(r BatchReplay) error {
|
||||
return b.decodeRec(func(i int, kt kType, key, value []byte) {
|
||||
switch kt {
|
||||
case ktVal:
|
||||
r.Put(key, value)
|
||||
case ktDel:
|
||||
r.Delete(key)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Len returns number of records in the batch.
|
||||
func (b *Batch) Len() int {
|
||||
return b.rLen
|
||||
}
|
||||
|
||||
// Reset resets the batch.
|
||||
func (b *Batch) Reset() {
|
||||
b.buf = nil
|
||||
b.data = b.data[:0]
|
||||
b.seq = 0
|
||||
b.rLen = 0
|
||||
b.bLen = 0
|
||||
@@ -97,24 +149,10 @@ func (b *Batch) init(sync bool) {
|
||||
b.sync = sync
|
||||
}
|
||||
|
||||
func (b *Batch) put(key, value []byte, seq uint64) {
|
||||
if b.rLen == 0 {
|
||||
b.seq = seq
|
||||
}
|
||||
b.Put(key, value)
|
||||
}
|
||||
|
||||
func (b *Batch) delete(key []byte, seq uint64) {
|
||||
if b.rLen == 0 {
|
||||
b.seq = seq
|
||||
}
|
||||
b.Delete(key)
|
||||
}
|
||||
|
||||
func (b *Batch) append(p *Batch) {
|
||||
if p.rLen > 0 {
|
||||
b.grow(len(p.buf) - kBatchHdrLen)
|
||||
b.buf = append(b.buf, p.buf[kBatchHdrLen:]...)
|
||||
b.grow(len(p.data) - batchHdrLen)
|
||||
b.data = append(b.data, p.data[batchHdrLen:]...)
|
||||
b.rLen += p.rLen
|
||||
}
|
||||
if p.sync {
|
||||
@@ -122,95 +160,93 @@ func (b *Batch) append(p *Batch) {
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Batch) len() int {
|
||||
return b.rLen
|
||||
}
|
||||
|
||||
// size returns sums of key/value pair length plus 8-bytes ikey.
|
||||
func (b *Batch) size() int {
|
||||
return b.bLen
|
||||
}
|
||||
|
||||
func (b *Batch) encode() []byte {
|
||||
b.grow(0)
|
||||
binary.LittleEndian.PutUint64(b.buf, b.seq)
|
||||
binary.LittleEndian.PutUint32(b.buf[8:], uint32(b.rLen))
|
||||
binary.LittleEndian.PutUint64(b.data, b.seq)
|
||||
binary.LittleEndian.PutUint32(b.data[8:], uint32(b.rLen))
|
||||
|
||||
return b.buf
|
||||
return b.data
|
||||
}
|
||||
|
||||
func (b *Batch) decode(buf []byte) error {
|
||||
if len(buf) < kBatchHdrLen {
|
||||
return errBatchTooShort
|
||||
func (b *Batch) decode(prevSeq uint64, data []byte) error {
|
||||
if len(data) < batchHdrLen {
|
||||
return newErrBatchCorrupted("too short")
|
||||
}
|
||||
|
||||
b.seq = binary.LittleEndian.Uint64(buf)
|
||||
b.rLen = int(binary.LittleEndian.Uint32(buf[8:]))
|
||||
b.seq = binary.LittleEndian.Uint64(data)
|
||||
if b.seq < prevSeq {
|
||||
return newErrBatchCorrupted("invalid sequence number")
|
||||
}
|
||||
b.rLen = int(binary.LittleEndian.Uint32(data[8:]))
|
||||
if b.rLen < 0 {
|
||||
return newErrBatchCorrupted("invalid records length")
|
||||
}
|
||||
// No need to be precise at this point, it won't be used anyway
|
||||
b.bLen = len(buf) - kBatchHdrLen
|
||||
b.buf = buf
|
||||
b.bLen = len(data) - batchHdrLen
|
||||
b.data = data
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Batch) decodeRec(f func(i int, t vType, key, value []byte)) error {
|
||||
off := kBatchHdrLen
|
||||
func (b *Batch) decodeRec(f func(i int, kt kType, key, value []byte)) (err error) {
|
||||
off := batchHdrLen
|
||||
for i := 0; i < b.rLen; i++ {
|
||||
if off >= len(b.buf) {
|
||||
return errors.New("leveldb: invalid batch record length")
|
||||
if off >= len(b.data) {
|
||||
return newErrBatchCorrupted("invalid records length")
|
||||
}
|
||||
|
||||
t := vType(b.buf[off])
|
||||
if t > tVal {
|
||||
return errors.New("leveldb: invalid batch record type in batch")
|
||||
kt := kType(b.data[off])
|
||||
if kt > ktVal {
|
||||
return newErrBatchCorrupted("bad record: invalid type")
|
||||
}
|
||||
off += 1
|
||||
|
||||
x, n := binary.Uvarint(b.buf[off:])
|
||||
x, n := binary.Uvarint(b.data[off:])
|
||||
off += n
|
||||
if n <= 0 || off+int(x) > len(b.buf) {
|
||||
return errBatchBadRecord
|
||||
if n <= 0 || off+int(x) > len(b.data) {
|
||||
return newErrBatchCorrupted("bad record: invalid key length")
|
||||
}
|
||||
key := b.buf[off : off+int(x)]
|
||||
key := b.data[off : off+int(x)]
|
||||
off += int(x)
|
||||
|
||||
var value []byte
|
||||
if t == tVal {
|
||||
x, n := binary.Uvarint(b.buf[off:])
|
||||
if kt == ktVal {
|
||||
x, n := binary.Uvarint(b.data[off:])
|
||||
off += n
|
||||
if n <= 0 || off+int(x) > len(b.buf) {
|
||||
return errBatchBadRecord
|
||||
if n <= 0 || off+int(x) > len(b.data) {
|
||||
return newErrBatchCorrupted("bad record: invalid value length")
|
||||
}
|
||||
value = b.buf[off : off+int(x)]
|
||||
value = b.data[off : off+int(x)]
|
||||
off += int(x)
|
||||
}
|
||||
|
||||
f(i, t, key, value)
|
||||
f(i, kt, key, value)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Batch) replay(to batchReplay) error {
|
||||
return b.decodeRec(func(i int, t vType, key, value []byte) {
|
||||
switch t {
|
||||
case tVal:
|
||||
to.put(key, value, b.seq+uint64(i))
|
||||
case tDel:
|
||||
to.delete(key, b.seq+uint64(i))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (b *Batch) memReplay(to *memdb.DB) error {
|
||||
return b.decodeRec(func(i int, t vType, key, value []byte) {
|
||||
ikey := newIKey(key, b.seq+uint64(i), t)
|
||||
return b.decodeRec(func(i int, kt kType, key, value []byte) {
|
||||
ikey := newIkey(key, b.seq+uint64(i), kt)
|
||||
to.Put(ikey, value)
|
||||
})
|
||||
}
|
||||
|
||||
func (b *Batch) memDecodeAndReplay(prevSeq uint64, data []byte, to *memdb.DB) error {
|
||||
if err := b.decode(prevSeq, data); err != nil {
|
||||
return err
|
||||
}
|
||||
return b.memReplay(to)
|
||||
}
|
||||
|
||||
func (b *Batch) revertMemReplay(to *memdb.DB) error {
|
||||
return b.decodeRec(func(i int, t vType, key, value []byte) {
|
||||
ikey := newIKey(key, b.seq+uint64(i), t)
|
||||
return b.decodeRec(func(i int, kt kType, key, value []byte) {
|
||||
ikey := newIkey(key, b.seq+uint64(i), kt)
|
||||
to.Delete(ikey)
|
||||
})
|
||||
}
|
||||
|
||||
26
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go
generated
vendored
26
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go
generated
vendored
@@ -15,7 +15,7 @@ import (
|
||||
)
|
||||
|
||||
type tbRec struct {
|
||||
t vType
|
||||
kt kType
|
||||
key, value []byte
|
||||
}
|
||||
|
||||
@@ -23,39 +23,39 @@ type testBatch struct {
|
||||
rec []*tbRec
|
||||
}
|
||||
|
||||
func (p *testBatch) put(key, value []byte, seq uint64) {
|
||||
p.rec = append(p.rec, &tbRec{tVal, key, value})
|
||||
func (p *testBatch) Put(key, value []byte) {
|
||||
p.rec = append(p.rec, &tbRec{ktVal, key, value})
|
||||
}
|
||||
|
||||
func (p *testBatch) delete(key []byte, seq uint64) {
|
||||
p.rec = append(p.rec, &tbRec{tDel, key, nil})
|
||||
func (p *testBatch) Delete(key []byte) {
|
||||
p.rec = append(p.rec, &tbRec{ktDel, key, nil})
|
||||
}
|
||||
|
||||
func compareBatch(t *testing.T, b1, b2 *Batch) {
|
||||
if b1.seq != b2.seq {
|
||||
t.Errorf("invalid seq number want %d, got %d", b1.seq, b2.seq)
|
||||
}
|
||||
if b1.len() != b2.len() {
|
||||
t.Fatalf("invalid record length want %d, got %d", b1.len(), b2.len())
|
||||
if b1.Len() != b2.Len() {
|
||||
t.Fatalf("invalid record length want %d, got %d", b1.Len(), b2.Len())
|
||||
}
|
||||
p1, p2 := new(testBatch), new(testBatch)
|
||||
err := b1.replay(p1)
|
||||
err := b1.Replay(p1)
|
||||
if err != nil {
|
||||
t.Fatal("error when replaying batch 1: ", err)
|
||||
}
|
||||
err = b2.replay(p2)
|
||||
err = b2.Replay(p2)
|
||||
if err != nil {
|
||||
t.Fatal("error when replaying batch 2: ", err)
|
||||
}
|
||||
for i := range p1.rec {
|
||||
r1, r2 := p1.rec[i], p2.rec[i]
|
||||
if r1.t != r2.t {
|
||||
t.Errorf("invalid type on record '%d' want %d, got %d", i, r1.t, r2.t)
|
||||
if r1.kt != r2.kt {
|
||||
t.Errorf("invalid type on record '%d' want %d, got %d", i, r1.kt, r2.kt)
|
||||
}
|
||||
if !bytes.Equal(r1.key, r2.key) {
|
||||
t.Errorf("invalid key on record '%d' want %s, got %s", i, string(r1.key), string(r2.key))
|
||||
}
|
||||
if r1.t == tVal {
|
||||
if r1.kt == ktVal {
|
||||
if !bytes.Equal(r1.value, r2.value) {
|
||||
t.Errorf("invalid value on record '%d' want %s, got %s", i, string(r1.value), string(r2.value))
|
||||
}
|
||||
@@ -75,7 +75,7 @@ func TestBatch_EncodeDecode(t *testing.T) {
|
||||
b1.Delete([]byte("k"))
|
||||
buf := b1.encode()
|
||||
b2 := new(Batch)
|
||||
err := b2.decode(buf)
|
||||
err := b2.decode(0, buf)
|
||||
if err != nil {
|
||||
t.Error("error when decoding batch: ", err)
|
||||
}
|
||||
|
||||
15
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go
generated
vendored
15
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go
generated
vendored
@@ -170,7 +170,7 @@ func (p *dbBench) writes(perBatch int) {
|
||||
b.SetBytes(116)
|
||||
}
|
||||
|
||||
func (p *dbBench) drop() {
|
||||
func (p *dbBench) gc() {
|
||||
p.keys, p.values = nil, nil
|
||||
runtime.GC()
|
||||
}
|
||||
@@ -249,6 +249,9 @@ func (p *dbBench) newIter() iterator.Iterator {
|
||||
}
|
||||
|
||||
func (p *dbBench) close() {
|
||||
if bp, err := p.db.GetProperty("leveldb.blockpool"); err == nil {
|
||||
p.b.Log("Block pool stats: ", bp)
|
||||
}
|
||||
p.db.Close()
|
||||
p.stor.Close()
|
||||
os.RemoveAll(benchDB)
|
||||
@@ -331,7 +334,7 @@ func BenchmarkDBRead(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.drop()
|
||||
p.gc()
|
||||
|
||||
iter := p.newIter()
|
||||
b.ResetTimer()
|
||||
@@ -362,7 +365,7 @@ func BenchmarkDBReadUncompressed(b *testing.B) {
|
||||
p := openDBBench(b, true)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.drop()
|
||||
p.gc()
|
||||
|
||||
iter := p.newIter()
|
||||
b.ResetTimer()
|
||||
@@ -379,7 +382,7 @@ func BenchmarkDBReadTable(b *testing.B) {
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.reopen()
|
||||
p.drop()
|
||||
p.gc()
|
||||
|
||||
iter := p.newIter()
|
||||
b.ResetTimer()
|
||||
@@ -395,7 +398,7 @@ func BenchmarkDBReadReverse(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.drop()
|
||||
p.gc()
|
||||
|
||||
iter := p.newIter()
|
||||
b.ResetTimer()
|
||||
@@ -413,7 +416,7 @@ func BenchmarkDBReadReverseTable(b *testing.B) {
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.reopen()
|
||||
p.drop()
|
||||
p.gc()
|
||||
|
||||
iter := p.newIter()
|
||||
b.ResetTimer()
|
||||
|
||||
152
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go
generated
vendored
152
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go
generated
vendored
@@ -11,115 +11,149 @@ import (
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// SetFunc used by Namespace.Get method to create a cache object. SetFunc
|
||||
// may return ok false, in that case the cache object will not be created.
|
||||
type SetFunc func() (ok bool, value interface{}, charge int, fin SetFin)
|
||||
// SetFunc is the function that will be called by Namespace.Get to create
|
||||
// a cache object, if charge is less than one than the cache object will
|
||||
// not be registered to cache tree, if value is nil then the cache object
|
||||
// will not be created.
|
||||
type SetFunc func() (charge int, value interface{})
|
||||
|
||||
// SetFin will be called when corresponding cache object are released.
|
||||
type SetFin func()
|
||||
// DelFin is the function that will be called as the result of a delete operation.
|
||||
// Exist == true is indication that the object is exist, and pending == true is
|
||||
// indication of deletion already happen but haven't done yet (wait for all handles
|
||||
// to be released). And exist == false means the object doesn't exist.
|
||||
type DelFin func(exist, pending bool)
|
||||
|
||||
// DelFin will be called when corresponding cache object are released.
|
||||
// DelFin will be called after SetFin. The exist is true if the corresponding
|
||||
// cache object is actually exist in the cache tree.
|
||||
type DelFin func(exist bool)
|
||||
// PurgeFin is the function that will be called as the result of a purge operation.
|
||||
type PurgeFin func(ns, key uint64)
|
||||
|
||||
// PurgeFin will be called when corresponding cache object are released.
|
||||
// PurgeFin will be called after SetFin. If PurgeFin present DelFin will
|
||||
// not be executed but passed to the PurgeFin, it is up to the caller
|
||||
// to call it or not.
|
||||
type PurgeFin func(ns, key uint64, delfin DelFin)
|
||||
|
||||
// Cache is a cache tree.
|
||||
// Cache is a cache tree. A cache instance must be goroutine-safe.
|
||||
type Cache interface {
|
||||
// SetCapacity sets cache capacity.
|
||||
// SetCapacity sets cache tree capacity.
|
||||
SetCapacity(capacity int)
|
||||
|
||||
// GetNamespace gets or creates a cache namespace for the given id.
|
||||
// Capacity returns cache tree capacity.
|
||||
Capacity() int
|
||||
|
||||
// Used returns used cache tree capacity.
|
||||
Used() int
|
||||
|
||||
// Size returns entire alive cache objects size.
|
||||
Size() int
|
||||
|
||||
// NumObjects returns number of alive objects.
|
||||
NumObjects() int
|
||||
|
||||
// GetNamespace gets cache namespace with the given id.
|
||||
// GetNamespace is never return nil.
|
||||
GetNamespace(id uint64) Namespace
|
||||
|
||||
// Purge purges all cache namespaces, read Namespace.Purge method documentation.
|
||||
// PurgeNamespace purges cache namespace with the given id from this cache tree.
|
||||
// Also read Namespace.Purge.
|
||||
PurgeNamespace(id uint64, fin PurgeFin)
|
||||
|
||||
// ZapNamespace detaches cache namespace with the given id from this cache tree.
|
||||
// Also read Namespace.Zap.
|
||||
ZapNamespace(id uint64)
|
||||
|
||||
// Purge purges all cache namespace from this cache tree.
|
||||
// This is behave the same as calling Namespace.Purge method on all cache namespace.
|
||||
Purge(fin PurgeFin)
|
||||
|
||||
// Zap zaps all cache namespaces, read Namespace.Zap method documentation.
|
||||
Zap(closed bool)
|
||||
// Zap detaches all cache namespace from this cache tree.
|
||||
// This is behave the same as calling Namespace.Zap method on all cache namespace.
|
||||
Zap()
|
||||
}
|
||||
|
||||
// Namespace is a cache namespace.
|
||||
// Namespace is a cache namespace. A namespace instance must be goroutine-safe.
|
||||
type Namespace interface {
|
||||
// Get gets cache object for the given key. The given SetFunc (if not nil) will
|
||||
// be called if the given key does not exist.
|
||||
// If the given key does not exist, SetFunc is nil or SetFunc return ok false, Get
|
||||
// will return ok false.
|
||||
Get(key uint64, setf SetFunc) (obj Object, ok bool)
|
||||
|
||||
// Get deletes cache object for the given key. If exist the cache object will
|
||||
// be deleted later when all of its handles have been released (i.e. no one use
|
||||
// it anymore) and the given DelFin (if not nil) will finally be executed. If
|
||||
// such cache object does not exist the given DelFin will be executed anyway.
|
||||
// Get gets cache object with the given key.
|
||||
// If cache object is not found and setf is not nil, Get will atomically creates
|
||||
// the cache object by calling setf. Otherwise Get will returns nil.
|
||||
//
|
||||
// Delete returns true if such cache object exist.
|
||||
// The returned cache handle should be released after use by calling Release
|
||||
// method.
|
||||
Get(key uint64, setf SetFunc) Handle
|
||||
|
||||
// Delete removes cache object with the given key from cache tree.
|
||||
// A deleted cache object will be released as soon as all of its handles have
|
||||
// been released.
|
||||
// Delete only happen once, subsequent delete will consider cache object doesn't
|
||||
// exist, even if the cache object ins't released yet.
|
||||
//
|
||||
// If not nil, fin will be called if the cache object doesn't exist or when
|
||||
// finally be released.
|
||||
//
|
||||
// Delete returns true if such cache object exist and never been deleted.
|
||||
Delete(key uint64, fin DelFin) bool
|
||||
|
||||
// Purge deletes all cache objects, read Delete method documentation.
|
||||
// Purge removes all cache objects within this namespace from cache tree.
|
||||
// This is the same as doing delete on all cache objects.
|
||||
//
|
||||
// If not nil, fin will be called on all cache objects when its finally be
|
||||
// released.
|
||||
Purge(fin PurgeFin)
|
||||
|
||||
// Zap detaches the namespace from the cache tree and delete all its cache
|
||||
// objects. The cache objects deletion and finalizers execution are happen
|
||||
// immediately, even if its existing handles haven't yet been released.
|
||||
// A zapped namespace can't never be filled again.
|
||||
// If closed is false then the Get function will always call the given SetFunc
|
||||
// if it is not nil, but resultant of the SetFunc will not be cached.
|
||||
Zap(closed bool)
|
||||
// Zap detaches namespace from cache tree and release all its cache objects.
|
||||
// A zapped namespace can never be filled again.
|
||||
// Calling Get on zapped namespace will always return nil.
|
||||
Zap()
|
||||
}
|
||||
|
||||
// Object is a cache object.
|
||||
type Object interface {
|
||||
// Release releases the cache object. Other methods should not be called
|
||||
// after the cache object has been released.
|
||||
// Handle is a cache handle.
|
||||
type Handle interface {
|
||||
// Release releases this cache handle. This method can be safely called mutiple
|
||||
// times.
|
||||
Release()
|
||||
|
||||
// Value returns value of the cache object.
|
||||
// Value returns value of this cache handle.
|
||||
// Value will returns nil after this cache handle have be released.
|
||||
Value() interface{}
|
||||
}
|
||||
|
||||
const (
|
||||
DelNotExist = iota
|
||||
DelExist
|
||||
DelPendig
|
||||
)
|
||||
|
||||
// Namespace state.
|
||||
type nsState int
|
||||
|
||||
const (
|
||||
nsEffective nsState = iota
|
||||
nsZapped
|
||||
nsClosed
|
||||
)
|
||||
|
||||
// Node state.
|
||||
type nodeState int
|
||||
|
||||
const (
|
||||
nodeEffective nodeState = iota
|
||||
nodeZero nodeState = iota
|
||||
nodeEffective
|
||||
nodeEvicted
|
||||
nodeRemoved
|
||||
nodeDeleted
|
||||
)
|
||||
|
||||
// Fake object.
|
||||
type fakeObject struct {
|
||||
// Fake handle.
|
||||
type fakeHandle struct {
|
||||
value interface{}
|
||||
fin func()
|
||||
once uint32
|
||||
}
|
||||
|
||||
func (o *fakeObject) Value() interface{} {
|
||||
if atomic.LoadUint32(&o.once) == 0 {
|
||||
return o.value
|
||||
func (h *fakeHandle) Value() interface{} {
|
||||
if atomic.LoadUint32(&h.once) == 0 {
|
||||
return h.value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *fakeObject) Release() {
|
||||
if !atomic.CompareAndSwapUint32(&o.once, 0, 1) {
|
||||
func (h *fakeHandle) Release() {
|
||||
if !atomic.CompareAndSwapUint32(&h.once, 0, 1) {
|
||||
return
|
||||
}
|
||||
if o.fin != nil {
|
||||
o.fin()
|
||||
o.fin = nil
|
||||
if h.fin != nil {
|
||||
h.fin()
|
||||
h.fin = nil
|
||||
}
|
||||
}
|
||||
|
||||
553
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go
generated
vendored
553
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go
generated
vendored
@@ -8,14 +8,32 @@ package cache
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func set(ns Namespace, key uint64, value interface{}, charge int, fin func()) Object {
|
||||
obj, _ := ns.Get(key, func() (bool, interface{}, int, SetFin) {
|
||||
return true, value, charge, fin
|
||||
type releaserFunc struct {
|
||||
fn func()
|
||||
value interface{}
|
||||
}
|
||||
|
||||
func (r releaserFunc) Release() {
|
||||
if r.fn != nil {
|
||||
r.fn()
|
||||
}
|
||||
}
|
||||
|
||||
func set(ns Namespace, key uint64, value interface{}, charge int, relf func()) Handle {
|
||||
return ns.Get(key, func() (int, interface{}) {
|
||||
if relf != nil {
|
||||
return charge, releaserFunc{relf, value}
|
||||
} else {
|
||||
return charge, value
|
||||
}
|
||||
})
|
||||
return obj
|
||||
}
|
||||
|
||||
func TestCache_HitMiss(t *testing.T) {
|
||||
@@ -43,29 +61,31 @@ func TestCache_HitMiss(t *testing.T) {
|
||||
setfin++
|
||||
}).Release()
|
||||
for j, y := range cases {
|
||||
r, ok := ns.Get(y.key, nil)
|
||||
h := ns.Get(y.key, nil)
|
||||
if j <= i {
|
||||
// should hit
|
||||
if !ok {
|
||||
if h == nil {
|
||||
t.Errorf("case '%d' iteration '%d' is miss", i, j)
|
||||
} else if r.Value().(string) != y.value {
|
||||
t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, r.Value().(string), y.value)
|
||||
} else {
|
||||
if x := h.Value().(releaserFunc).value.(string); x != y.value {
|
||||
t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// should miss
|
||||
if ok {
|
||||
t.Errorf("case '%d' iteration '%d' is hit , value '%s'", i, j, r.Value().(string))
|
||||
if h != nil {
|
||||
t.Errorf("case '%d' iteration '%d' is hit , value '%s'", i, j, h.Value().(releaserFunc).value.(string))
|
||||
}
|
||||
}
|
||||
if ok {
|
||||
r.Release()
|
||||
if h != nil {
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for i, x := range cases {
|
||||
finalizerOk := false
|
||||
ns.Delete(x.key, func(exist bool) {
|
||||
ns.Delete(x.key, func(exist, pending bool) {
|
||||
finalizerOk = true
|
||||
})
|
||||
|
||||
@@ -74,22 +94,24 @@ func TestCache_HitMiss(t *testing.T) {
|
||||
}
|
||||
|
||||
for j, y := range cases {
|
||||
r, ok := ns.Get(y.key, nil)
|
||||
h := ns.Get(y.key, nil)
|
||||
if j > i {
|
||||
// should hit
|
||||
if !ok {
|
||||
if h == nil {
|
||||
t.Errorf("case '%d' iteration '%d' is miss", i, j)
|
||||
} else if r.Value().(string) != y.value {
|
||||
t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, r.Value().(string), y.value)
|
||||
} else {
|
||||
if x := h.Value().(releaserFunc).value.(string); x != y.value {
|
||||
t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// should miss
|
||||
if ok {
|
||||
t.Errorf("case '%d' iteration '%d' is hit, value '%s'", i, j, r.Value().(string))
|
||||
if h != nil {
|
||||
t.Errorf("case '%d' iteration '%d' is hit, value '%s'", i, j, h.Value().(releaserFunc).value.(string))
|
||||
}
|
||||
}
|
||||
if ok {
|
||||
r.Release()
|
||||
if h != nil {
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -107,42 +129,42 @@ func TestLRUCache_Eviction(t *testing.T) {
|
||||
set(ns, 3, 3, 1, nil).Release()
|
||||
set(ns, 4, 4, 1, nil).Release()
|
||||
set(ns, 5, 5, 1, nil).Release()
|
||||
if r, ok := ns.Get(2, nil); ok { // 1,3,4,5,2
|
||||
r.Release()
|
||||
if h := ns.Get(2, nil); h != nil { // 1,3,4,5,2
|
||||
h.Release()
|
||||
}
|
||||
set(ns, 9, 9, 10, nil).Release() // 5,2,9
|
||||
|
||||
for _, x := range []uint64{9, 2, 5, 1} {
|
||||
r, ok := ns.Get(x, nil)
|
||||
if !ok {
|
||||
t.Errorf("miss for key '%d'", x)
|
||||
for _, key := range []uint64{9, 2, 5, 1} {
|
||||
h := ns.Get(key, nil)
|
||||
if h == nil {
|
||||
t.Errorf("miss for key '%d'", key)
|
||||
} else {
|
||||
if r.Value().(int) != int(x) {
|
||||
t.Errorf("invalid value for key '%d' want '%d', got '%d'", x, x, r.Value().(int))
|
||||
if x := h.Value().(int); x != int(key) {
|
||||
t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
|
||||
}
|
||||
r.Release()
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
o1.Release()
|
||||
for _, x := range []uint64{1, 2, 5} {
|
||||
r, ok := ns.Get(x, nil)
|
||||
if !ok {
|
||||
t.Errorf("miss for key '%d'", x)
|
||||
for _, key := range []uint64{1, 2, 5} {
|
||||
h := ns.Get(key, nil)
|
||||
if h == nil {
|
||||
t.Errorf("miss for key '%d'", key)
|
||||
} else {
|
||||
if r.Value().(int) != int(x) {
|
||||
t.Errorf("invalid value for key '%d' want '%d', got '%d'", x, x, r.Value().(int))
|
||||
if x := h.Value().(int); x != int(key) {
|
||||
t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
|
||||
}
|
||||
r.Release()
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
for _, x := range []uint64{3, 4, 9} {
|
||||
r, ok := ns.Get(x, nil)
|
||||
if ok {
|
||||
t.Errorf("hit for key '%d'", x)
|
||||
if r.Value().(int) != int(x) {
|
||||
t.Errorf("invalid value for key '%d' want '%d', got '%d'", x, x, r.Value().(int))
|
||||
for _, key := range []uint64{3, 4, 9} {
|
||||
h := ns.Get(key, nil)
|
||||
if h != nil {
|
||||
t.Errorf("hit for key '%d'", key)
|
||||
if x := h.Value().(int); x != int(key) {
|
||||
t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
|
||||
}
|
||||
r.Release()
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -153,16 +175,15 @@ func TestLRUCache_SetGet(t *testing.T) {
|
||||
for i := 0; i < 200; i++ {
|
||||
n := uint64(rand.Intn(99999) % 20)
|
||||
set(ns, n, n, 1, nil).Release()
|
||||
if p, ok := ns.Get(n, nil); ok {
|
||||
if p.Value() == nil {
|
||||
if h := ns.Get(n, nil); h != nil {
|
||||
if h.Value() == nil {
|
||||
t.Errorf("key '%d' contains nil value", n)
|
||||
} else {
|
||||
got := p.Value().(uint64)
|
||||
if got != n {
|
||||
t.Errorf("invalid value for key '%d' want '%d', got '%d'", n, n, got)
|
||||
if x := h.Value().(uint64); x != n {
|
||||
t.Errorf("invalid value for key '%d' want '%d', got '%d'", n, n, x)
|
||||
}
|
||||
}
|
||||
p.Release()
|
||||
h.Release()
|
||||
} else {
|
||||
t.Errorf("key '%d' doesn't exist", n)
|
||||
}
|
||||
@@ -176,31 +197,429 @@ func TestLRUCache_Purge(t *testing.T) {
|
||||
o2 := set(ns1, 2, 2, 1, nil)
|
||||
ns1.Purge(nil)
|
||||
set(ns1, 3, 3, 1, nil).Release()
|
||||
for _, x := range []uint64{1, 2, 3} {
|
||||
r, ok := ns1.Get(x, nil)
|
||||
if !ok {
|
||||
t.Errorf("miss for key '%d'", x)
|
||||
for _, key := range []uint64{1, 2, 3} {
|
||||
h := ns1.Get(key, nil)
|
||||
if h == nil {
|
||||
t.Errorf("miss for key '%d'", key)
|
||||
} else {
|
||||
if r.Value().(int) != int(x) {
|
||||
t.Errorf("invalid value for key '%d' want '%d', got '%d'", x, x, r.Value().(int))
|
||||
if x := h.Value().(int); x != int(key) {
|
||||
t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
|
||||
}
|
||||
r.Release()
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
o1.Release()
|
||||
o2.Release()
|
||||
for _, x := range []uint64{1, 2} {
|
||||
r, ok := ns1.Get(x, nil)
|
||||
if ok {
|
||||
t.Errorf("hit for key '%d'", x)
|
||||
if r.Value().(int) != int(x) {
|
||||
t.Errorf("invalid value for key '%d' want '%d', got '%d'", x, x, r.Value().(int))
|
||||
for _, key := range []uint64{1, 2} {
|
||||
h := ns1.Get(key, nil)
|
||||
if h != nil {
|
||||
t.Errorf("hit for key '%d'", key)
|
||||
if x := h.Value().(int); x != int(key) {
|
||||
t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
|
||||
}
|
||||
r.Release()
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type testingCacheObjectCounter struct {
|
||||
created uint
|
||||
released uint
|
||||
}
|
||||
|
||||
func (c *testingCacheObjectCounter) createOne() {
|
||||
c.created++
|
||||
}
|
||||
|
||||
func (c *testingCacheObjectCounter) releaseOne() {
|
||||
c.released++
|
||||
}
|
||||
|
||||
type testingCacheObject struct {
|
||||
t *testing.T
|
||||
cnt *testingCacheObjectCounter
|
||||
|
||||
ns, key uint64
|
||||
|
||||
releaseCalled bool
|
||||
}
|
||||
|
||||
func (x *testingCacheObject) Release() {
|
||||
if !x.releaseCalled {
|
||||
x.releaseCalled = true
|
||||
x.cnt.releaseOne()
|
||||
} else {
|
||||
x.t.Errorf("duplicate setfin NS#%d KEY#%d", x.ns, x.key)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLRUCache_ConcurrentSetGet(t *testing.T) {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
|
||||
seed := time.Now().UnixNano()
|
||||
t.Logf("seed=%d", seed)
|
||||
|
||||
const (
|
||||
N = 2000000
|
||||
M = 4000
|
||||
C = 3
|
||||
)
|
||||
|
||||
var set, get uint32
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
c := NewLRUCache(M / 4)
|
||||
for ni := uint64(0); ni < C; ni++ {
|
||||
r0 := rand.New(rand.NewSource(seed + int64(ni)))
|
||||
r1 := rand.New(rand.NewSource(seed + int64(ni) + 1))
|
||||
ns := c.GetNamespace(ni)
|
||||
|
||||
wg.Add(2)
|
||||
go func(ns Namespace, r *rand.Rand) {
|
||||
for i := 0; i < N; i++ {
|
||||
x := uint64(r.Int63n(M))
|
||||
o := ns.Get(x, func() (int, interface{}) {
|
||||
atomic.AddUint32(&set, 1)
|
||||
return 1, x
|
||||
})
|
||||
if v := o.Value().(uint64); v != x {
|
||||
t.Errorf("#%d invalid value, got=%d", x, v)
|
||||
}
|
||||
o.Release()
|
||||
}
|
||||
wg.Done()
|
||||
}(ns, r0)
|
||||
go func(ns Namespace, r *rand.Rand) {
|
||||
for i := 0; i < N; i++ {
|
||||
x := uint64(r.Int63n(M))
|
||||
o := ns.Get(x, nil)
|
||||
if o != nil {
|
||||
atomic.AddUint32(&get, 1)
|
||||
if v := o.Value().(uint64); v != x {
|
||||
t.Errorf("#%d invalid value, got=%d", x, v)
|
||||
}
|
||||
o.Release()
|
||||
}
|
||||
}
|
||||
wg.Done()
|
||||
}(ns, r1)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
t.Logf("set=%d get=%d", set, get)
|
||||
}
|
||||
|
||||
func TestLRUCache_Finalizer(t *testing.T) {
|
||||
const (
|
||||
capacity = 100
|
||||
goroutines = 100
|
||||
iterations = 10000
|
||||
keymax = 8000
|
||||
)
|
||||
|
||||
cnt := &testingCacheObjectCounter{}
|
||||
|
||||
c := NewLRUCache(capacity)
|
||||
|
||||
type instance struct {
|
||||
seed int64
|
||||
rnd *rand.Rand
|
||||
nsid uint64
|
||||
ns Namespace
|
||||
effective int
|
||||
handles []Handle
|
||||
handlesMap map[uint64]int
|
||||
|
||||
delete bool
|
||||
purge bool
|
||||
zap bool
|
||||
wantDel int
|
||||
delfinCalled int
|
||||
delfinCalledAll int
|
||||
delfinCalledEff int
|
||||
purgefinCalled int
|
||||
}
|
||||
|
||||
instanceGet := func(p *instance, key uint64) {
|
||||
h := p.ns.Get(key, func() (charge int, value interface{}) {
|
||||
to := &testingCacheObject{
|
||||
t: t, cnt: cnt,
|
||||
ns: p.nsid,
|
||||
key: key,
|
||||
}
|
||||
p.effective++
|
||||
cnt.createOne()
|
||||
return 1, releaserFunc{func() {
|
||||
to.Release()
|
||||
p.effective--
|
||||
}, to}
|
||||
})
|
||||
p.handles = append(p.handles, h)
|
||||
p.handlesMap[key] = p.handlesMap[key] + 1
|
||||
}
|
||||
instanceRelease := func(p *instance, i int) {
|
||||
h := p.handles[i]
|
||||
key := h.Value().(releaserFunc).value.(*testingCacheObject).key
|
||||
if n := p.handlesMap[key]; n == 0 {
|
||||
t.Fatal("key ref == 0")
|
||||
} else if n > 1 {
|
||||
p.handlesMap[key] = n - 1
|
||||
} else {
|
||||
delete(p.handlesMap, key)
|
||||
}
|
||||
h.Release()
|
||||
p.handles = append(p.handles[:i], p.handles[i+1:]...)
|
||||
p.handles[len(p.handles) : len(p.handles)+1][0] = nil
|
||||
}
|
||||
|
||||
seed := time.Now().UnixNano()
|
||||
t.Logf("seed=%d", seed)
|
||||
|
||||
instances := make([]*instance, goroutines)
|
||||
for i := range instances {
|
||||
p := &instance{}
|
||||
p.handlesMap = make(map[uint64]int)
|
||||
p.seed = seed + int64(i)
|
||||
p.rnd = rand.New(rand.NewSource(p.seed))
|
||||
p.nsid = uint64(i)
|
||||
p.ns = c.GetNamespace(p.nsid)
|
||||
p.delete = i%6 == 0
|
||||
p.purge = i%8 == 0
|
||||
p.zap = i%12 == 0 || i%3 == 0
|
||||
instances[i] = p
|
||||
}
|
||||
|
||||
runr := rand.New(rand.NewSource(seed - 1))
|
||||
run := func(rnd *rand.Rand, x []*instance, init func(p *instance) bool, fn func(p *instance, i int) bool) {
|
||||
var (
|
||||
rx []*instance
|
||||
rn []int
|
||||
)
|
||||
if init == nil {
|
||||
rx = append([]*instance{}, x...)
|
||||
rn = make([]int, len(x))
|
||||
} else {
|
||||
for _, p := range x {
|
||||
if init(p) {
|
||||
rx = append(rx, p)
|
||||
rn = append(rn, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
for len(rx) > 0 {
|
||||
i := rand.Intn(len(rx))
|
||||
if fn(rx[i], rn[i]) {
|
||||
rn[i]++
|
||||
} else {
|
||||
rx = append(rx[:i], rx[i+1:]...)
|
||||
rn = append(rn[:i], rn[i+1:]...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get and release.
|
||||
run(runr, instances, nil, func(p *instance, i int) bool {
|
||||
if i < iterations {
|
||||
if len(p.handles) == 0 || p.rnd.Int()%2 == 0 {
|
||||
instanceGet(p, uint64(p.rnd.Intn(keymax)))
|
||||
} else {
|
||||
instanceRelease(p, p.rnd.Intn(len(p.handles)))
|
||||
}
|
||||
return true
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
})
|
||||
|
||||
if used, cap := c.Used(), c.Capacity(); used > cap {
|
||||
t.Errorf("Used > capacity, used=%d cap=%d", used, cap)
|
||||
}
|
||||
|
||||
// Check effective objects.
|
||||
for i, p := range instances {
|
||||
if int(p.effective) < len(p.handlesMap) {
|
||||
t.Errorf("#%d effective objects < acquired handle, eo=%d ah=%d", i, p.effective, len(p.handlesMap))
|
||||
}
|
||||
}
|
||||
|
||||
if want := int(cnt.created - cnt.released); c.Size() != want {
|
||||
t.Errorf("Invalid cache size, want=%d got=%d", want, c.Size())
|
||||
}
|
||||
|
||||
// First delete.
|
||||
run(runr, instances, func(p *instance) bool {
|
||||
p.wantDel = p.effective
|
||||
return p.delete
|
||||
}, func(p *instance, i int) bool {
|
||||
key := uint64(i)
|
||||
if key < keymax {
|
||||
_, wantExist := p.handlesMap[key]
|
||||
gotExist := p.ns.Delete(key, func(exist, pending bool) {
|
||||
p.delfinCalledAll++
|
||||
if exist {
|
||||
p.delfinCalledEff++
|
||||
}
|
||||
})
|
||||
if !gotExist && wantExist {
|
||||
t.Errorf("delete on NS#%d KEY#%d not found", p.nsid, key)
|
||||
}
|
||||
return true
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
})
|
||||
|
||||
// Second delete.
|
||||
run(runr, instances, func(p *instance) bool {
|
||||
p.delfinCalled = 0
|
||||
return p.delete
|
||||
}, func(p *instance, i int) bool {
|
||||
key := uint64(i)
|
||||
if key < keymax {
|
||||
gotExist := p.ns.Delete(key, func(exist, pending bool) {
|
||||
if exist && !pending {
|
||||
t.Errorf("delete fin on NS#%d KEY#%d exist and not pending for deletion", p.nsid, key)
|
||||
}
|
||||
p.delfinCalled++
|
||||
})
|
||||
if gotExist {
|
||||
t.Errorf("delete on NS#%d KEY#%d found", p.nsid, key)
|
||||
}
|
||||
return true
|
||||
} else {
|
||||
if p.delfinCalled != keymax {
|
||||
t.Errorf("(2) NS#%d not all delete fin called, diff=%d", p.nsid, keymax-p.delfinCalled)
|
||||
}
|
||||
return false
|
||||
}
|
||||
})
|
||||
|
||||
// Purge.
|
||||
run(runr, instances, func(p *instance) bool {
|
||||
return p.purge
|
||||
}, func(p *instance, i int) bool {
|
||||
p.ns.Purge(func(ns, key uint64) {
|
||||
p.purgefinCalled++
|
||||
})
|
||||
return false
|
||||
})
|
||||
|
||||
if want := int(cnt.created - cnt.released); c.Size() != want {
|
||||
t.Errorf("Invalid cache size, want=%d got=%d", want, c.Size())
|
||||
}
|
||||
|
||||
// Release.
|
||||
run(runr, instances, func(p *instance) bool {
|
||||
return !p.zap
|
||||
}, func(p *instance, i int) bool {
|
||||
if len(p.handles) > 0 {
|
||||
instanceRelease(p, len(p.handles)-1)
|
||||
return true
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
})
|
||||
|
||||
if want := int(cnt.created - cnt.released); c.Size() != want {
|
||||
t.Errorf("Invalid cache size, want=%d got=%d", want, c.Size())
|
||||
}
|
||||
|
||||
// Zap.
|
||||
run(runr, instances, func(p *instance) bool {
|
||||
return p.zap
|
||||
}, func(p *instance, i int) bool {
|
||||
p.ns.Zap()
|
||||
p.handles = nil
|
||||
p.handlesMap = nil
|
||||
return false
|
||||
})
|
||||
|
||||
if want := int(cnt.created - cnt.released); c.Size() != want {
|
||||
t.Errorf("Invalid cache size, want=%d got=%d", want, c.Size())
|
||||
}
|
||||
|
||||
if notrel, used := int(cnt.created-cnt.released), c.Used(); notrel != used {
|
||||
t.Errorf("Invalid used value, want=%d got=%d", notrel, used)
|
||||
}
|
||||
|
||||
c.Purge(nil)
|
||||
|
||||
for _, p := range instances {
|
||||
if p.delete {
|
||||
if p.delfinCalledAll != keymax {
|
||||
t.Errorf("#%d not all delete fin called, purge=%v zap=%v diff=%d", p.nsid, p.purge, p.zap, keymax-p.delfinCalledAll)
|
||||
}
|
||||
if p.delfinCalledEff != p.wantDel {
|
||||
t.Errorf("#%d not all effective delete fin called, diff=%d", p.nsid, p.wantDel-p.delfinCalledEff)
|
||||
}
|
||||
if p.purge && p.purgefinCalled > 0 {
|
||||
t.Errorf("#%d some purge fin called, delete=%v zap=%v n=%d", p.nsid, p.delete, p.zap, p.purgefinCalled)
|
||||
}
|
||||
} else {
|
||||
if p.purge {
|
||||
if p.purgefinCalled != p.wantDel {
|
||||
t.Errorf("#%d not all purge fin called, delete=%v zap=%v diff=%d", p.nsid, p.delete, p.zap, p.wantDel-p.purgefinCalled)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if cnt.created != cnt.released {
|
||||
t.Errorf("Some cache object weren't released, created=%d released=%d", cnt.created, cnt.released)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLRUCache_Set(b *testing.B) {
|
||||
c := NewLRUCache(0)
|
||||
ns := c.GetNamespace(0)
|
||||
b.ResetTimer()
|
||||
for i := uint64(0); i < uint64(b.N); i++ {
|
||||
set(ns, i, "", 1, nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLRUCache_Get(b *testing.B) {
|
||||
c := NewLRUCache(0)
|
||||
ns := c.GetNamespace(0)
|
||||
b.ResetTimer()
|
||||
for i := uint64(0); i < uint64(b.N); i++ {
|
||||
set(ns, i, "", 1, nil)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := uint64(0); i < uint64(b.N); i++ {
|
||||
ns.Get(i, nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLRUCache_Get2(b *testing.B) {
|
||||
c := NewLRUCache(0)
|
||||
ns := c.GetNamespace(0)
|
||||
b.ResetTimer()
|
||||
for i := uint64(0); i < uint64(b.N); i++ {
|
||||
set(ns, i, "", 1, nil)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := uint64(0); i < uint64(b.N); i++ {
|
||||
ns.Get(i, func() (charge int, value interface{}) {
|
||||
return 0, nil
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLRUCache_Release(b *testing.B) {
|
||||
c := NewLRUCache(0)
|
||||
ns := c.GetNamespace(0)
|
||||
handles := make([]Handle, b.N)
|
||||
for i := uint64(0); i < uint64(b.N); i++ {
|
||||
handles[i] = set(ns, i, "", 1, nil)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for _, h := range handles {
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLRUCache_SetRelease(b *testing.B) {
|
||||
capacity := b.N / 100
|
||||
if capacity <= 0 {
|
||||
@@ -210,7 +629,7 @@ func BenchmarkLRUCache_SetRelease(b *testing.B) {
|
||||
ns := c.GetNamespace(0)
|
||||
b.ResetTimer()
|
||||
for i := uint64(0); i < uint64(b.N); i++ {
|
||||
set(ns, i, nil, 1, nil).Release()
|
||||
set(ns, i, "", 1, nil).Release()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -227,10 +646,10 @@ func BenchmarkLRUCache_SetReleaseTwice(b *testing.B) {
|
||||
nb := b.N - na
|
||||
|
||||
for i := uint64(0); i < uint64(na); i++ {
|
||||
set(ns, i, nil, 1, nil).Release()
|
||||
set(ns, i, "", 1, nil).Release()
|
||||
}
|
||||
|
||||
for i := uint64(0); i < uint64(nb); i++ {
|
||||
set(ns, i, nil, 1, nil).Release()
|
||||
set(ns, i, "", 1, nil).Release()
|
||||
}
|
||||
}
|
||||
|
||||
246
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/empty_cache.go
generated
vendored
246
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/empty_cache.go
generated
vendored
@@ -1,246 +0,0 @@
|
||||
// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
type emptyCache struct {
|
||||
sync.Mutex
|
||||
table map[uint64]*emptyNS
|
||||
}
|
||||
|
||||
// NewEmptyCache creates a new initialized empty cache.
|
||||
func NewEmptyCache() Cache {
|
||||
return &emptyCache{
|
||||
table: make(map[uint64]*emptyNS),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *emptyCache) GetNamespace(id uint64) Namespace {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
if ns, ok := c.table[id]; ok {
|
||||
return ns
|
||||
}
|
||||
|
||||
ns := &emptyNS{
|
||||
cache: c,
|
||||
id: id,
|
||||
table: make(map[uint64]*emptyNode),
|
||||
}
|
||||
c.table[id] = ns
|
||||
return ns
|
||||
}
|
||||
|
||||
func (c *emptyCache) Purge(fin PurgeFin) {
|
||||
c.Lock()
|
||||
for _, ns := range c.table {
|
||||
ns.purgeNB(fin)
|
||||
}
|
||||
c.Unlock()
|
||||
}
|
||||
|
||||
func (c *emptyCache) Zap(closed bool) {
|
||||
c.Lock()
|
||||
for _, ns := range c.table {
|
||||
ns.zapNB(closed)
|
||||
}
|
||||
c.table = make(map[uint64]*emptyNS)
|
||||
c.Unlock()
|
||||
}
|
||||
|
||||
func (*emptyCache) SetCapacity(capacity int) {}
|
||||
|
||||
type emptyNS struct {
|
||||
cache *emptyCache
|
||||
id uint64
|
||||
table map[uint64]*emptyNode
|
||||
state nsState
|
||||
}
|
||||
|
||||
func (ns *emptyNS) Get(key uint64, setf SetFunc) (o Object, ok bool) {
|
||||
ns.cache.Lock()
|
||||
|
||||
switch ns.state {
|
||||
case nsZapped:
|
||||
ns.cache.Unlock()
|
||||
if setf == nil {
|
||||
return
|
||||
}
|
||||
|
||||
var value interface{}
|
||||
var fin func()
|
||||
ok, value, _, fin = setf()
|
||||
if ok {
|
||||
o = &fakeObject{
|
||||
value: value,
|
||||
fin: fin,
|
||||
}
|
||||
}
|
||||
return
|
||||
case nsClosed:
|
||||
ns.cache.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
n, ok := ns.table[key]
|
||||
if ok {
|
||||
n.ref++
|
||||
} else {
|
||||
if setf == nil {
|
||||
ns.cache.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
var value interface{}
|
||||
var fin func()
|
||||
ok, value, _, fin = setf()
|
||||
if !ok {
|
||||
ns.cache.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
n = &emptyNode{
|
||||
ns: ns,
|
||||
key: key,
|
||||
value: value,
|
||||
setfin: fin,
|
||||
ref: 1,
|
||||
}
|
||||
ns.table[key] = n
|
||||
}
|
||||
|
||||
ns.cache.Unlock()
|
||||
o = &emptyObject{node: n}
|
||||
return
|
||||
}
|
||||
|
||||
func (ns *emptyNS) Delete(key uint64, fin DelFin) bool {
|
||||
ns.cache.Lock()
|
||||
|
||||
if ns.state != nsEffective {
|
||||
ns.cache.Unlock()
|
||||
if fin != nil {
|
||||
fin(false)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
n, ok := ns.table[key]
|
||||
if !ok {
|
||||
ns.cache.Unlock()
|
||||
if fin != nil {
|
||||
fin(false)
|
||||
}
|
||||
return false
|
||||
}
|
||||
n.delfin = fin
|
||||
ns.cache.Unlock()
|
||||
return true
|
||||
}
|
||||
|
||||
func (ns *emptyNS) purgeNB(fin PurgeFin) {
|
||||
if ns.state != nsEffective {
|
||||
return
|
||||
}
|
||||
for _, n := range ns.table {
|
||||
n.purgefin = fin
|
||||
}
|
||||
}
|
||||
|
||||
func (ns *emptyNS) Purge(fin PurgeFin) {
|
||||
ns.cache.Lock()
|
||||
ns.purgeNB(fin)
|
||||
ns.cache.Unlock()
|
||||
}
|
||||
|
||||
func (ns *emptyNS) zapNB(closed bool) {
|
||||
if ns.state != nsEffective {
|
||||
return
|
||||
}
|
||||
for _, n := range ns.table {
|
||||
n.execFin()
|
||||
}
|
||||
if closed {
|
||||
ns.state = nsClosed
|
||||
} else {
|
||||
ns.state = nsZapped
|
||||
}
|
||||
ns.table = nil
|
||||
}
|
||||
|
||||
func (ns *emptyNS) Zap(closed bool) {
|
||||
ns.cache.Lock()
|
||||
ns.zapNB(closed)
|
||||
delete(ns.cache.table, ns.id)
|
||||
ns.cache.Unlock()
|
||||
}
|
||||
|
||||
type emptyNode struct {
|
||||
ns *emptyNS
|
||||
key uint64
|
||||
value interface{}
|
||||
ref int
|
||||
setfin SetFin
|
||||
delfin DelFin
|
||||
purgefin PurgeFin
|
||||
}
|
||||
|
||||
func (n *emptyNode) execFin() {
|
||||
if n.setfin != nil {
|
||||
n.setfin()
|
||||
n.setfin = nil
|
||||
}
|
||||
if n.purgefin != nil {
|
||||
n.purgefin(n.ns.id, n.key, n.delfin)
|
||||
n.delfin = nil
|
||||
n.purgefin = nil
|
||||
} else if n.delfin != nil {
|
||||
n.delfin(true)
|
||||
n.delfin = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (n *emptyNode) evict() {
|
||||
n.ns.cache.Lock()
|
||||
n.ref--
|
||||
if n.ref == 0 {
|
||||
if n.ns.state == nsEffective {
|
||||
// Remove elem.
|
||||
delete(n.ns.table, n.key)
|
||||
// Execute finalizer.
|
||||
n.execFin()
|
||||
}
|
||||
} else if n.ref < 0 {
|
||||
panic("leveldb/cache: emptyNode: negative node reference")
|
||||
}
|
||||
n.ns.cache.Unlock()
|
||||
}
|
||||
|
||||
type emptyObject struct {
|
||||
node *emptyNode
|
||||
once uint32
|
||||
}
|
||||
|
||||
func (o *emptyObject) Value() interface{} {
|
||||
if atomic.LoadUint32(&o.once) == 0 {
|
||||
return o.node.value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *emptyObject) Release() {
|
||||
if !atomic.CompareAndSwapUint32(&o.once, 0, 1) {
|
||||
return
|
||||
}
|
||||
o.node.evict()
|
||||
o.node = nil
|
||||
}
|
||||
628
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru_cache.go
generated
vendored
628
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru_cache.go
generated
vendored
@@ -9,16 +9,24 @@ package cache
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
// The LLRB implementation were taken from https://github.com/petar/GoLLRB.
|
||||
// Which contains the following header:
|
||||
//
|
||||
// Copyright 2010 Petar Maymounkov. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// lruCache represent a LRU cache state.
|
||||
type lruCache struct {
|
||||
sync.Mutex
|
||||
|
||||
recent lruNode
|
||||
table map[uint64]*lruNs
|
||||
capacity int
|
||||
size int
|
||||
mu sync.Mutex
|
||||
recent lruNode
|
||||
table map[uint64]*lruNs
|
||||
capacity int
|
||||
used, size, alive int
|
||||
}
|
||||
|
||||
// NewLRUCache creates a new initialized LRU cache with the given capacity.
|
||||
@@ -32,245 +40,415 @@ func NewLRUCache(capacity int) Cache {
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *lruCache) Capacity() int {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.capacity
|
||||
}
|
||||
|
||||
func (c *lruCache) Used() int {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.used
|
||||
}
|
||||
|
||||
func (c *lruCache) Size() int {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.size
|
||||
}
|
||||
|
||||
func (c *lruCache) NumObjects() int {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.alive
|
||||
}
|
||||
|
||||
// SetCapacity set cache capacity.
|
||||
func (c *lruCache) SetCapacity(capacity int) {
|
||||
c.Lock()
|
||||
c.mu.Lock()
|
||||
c.capacity = capacity
|
||||
c.evict()
|
||||
c.Unlock()
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// GetNamespace return namespace object for given id.
|
||||
func (c *lruCache) GetNamespace(id uint64) Namespace {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if p, ok := c.table[id]; ok {
|
||||
return p
|
||||
if ns, ok := c.table[id]; ok {
|
||||
return ns
|
||||
}
|
||||
|
||||
p := &lruNs{
|
||||
lru: c,
|
||||
id: id,
|
||||
table: make(map[uint64]*lruNode),
|
||||
ns := &lruNs{lru: c, id: id}
|
||||
c.table[id] = ns
|
||||
return ns
|
||||
}
|
||||
|
||||
func (c *lruCache) ZapNamespace(id uint64) {
|
||||
c.mu.Lock()
|
||||
if ns, exist := c.table[id]; exist {
|
||||
ns.zapNB()
|
||||
delete(c.table, id)
|
||||
}
|
||||
c.table[id] = p
|
||||
return p
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *lruCache) PurgeNamespace(id uint64, fin PurgeFin) {
|
||||
c.mu.Lock()
|
||||
if ns, exist := c.table[id]; exist {
|
||||
ns.purgeNB(fin)
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// Purge purge entire cache.
|
||||
func (c *lruCache) Purge(fin PurgeFin) {
|
||||
c.Lock()
|
||||
c.mu.Lock()
|
||||
for _, ns := range c.table {
|
||||
ns.purgeNB(fin)
|
||||
}
|
||||
c.Unlock()
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *lruCache) Zap(closed bool) {
|
||||
c.Lock()
|
||||
func (c *lruCache) Zap() {
|
||||
c.mu.Lock()
|
||||
for _, ns := range c.table {
|
||||
ns.zapNB(closed)
|
||||
ns.zapNB()
|
||||
}
|
||||
c.table = make(map[uint64]*lruNs)
|
||||
c.Unlock()
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *lruCache) evict() {
|
||||
top := &c.recent
|
||||
for n := c.recent.rPrev; c.size > c.capacity && n != top; {
|
||||
for n := c.recent.rPrev; c.used > c.capacity && n != top; {
|
||||
if n.state != nodeEffective {
|
||||
panic("evicting non effective node")
|
||||
}
|
||||
n.state = nodeEvicted
|
||||
n.rRemove()
|
||||
n.evictNB()
|
||||
c.size -= n.charge
|
||||
n.derefNB()
|
||||
c.used -= n.charge
|
||||
n = c.recent.rPrev
|
||||
}
|
||||
}
|
||||
|
||||
type lruNs struct {
|
||||
lru *lruCache
|
||||
id uint64
|
||||
table map[uint64]*lruNode
|
||||
state nsState
|
||||
lru *lruCache
|
||||
id uint64
|
||||
rbRoot *lruNode
|
||||
state nsState
|
||||
}
|
||||
|
||||
func (ns *lruNs) Get(key uint64, setf SetFunc) (o Object, ok bool) {
|
||||
lru := ns.lru
|
||||
lru.Lock()
|
||||
|
||||
switch ns.state {
|
||||
case nsZapped:
|
||||
lru.Unlock()
|
||||
if setf == nil {
|
||||
return
|
||||
}
|
||||
|
||||
var value interface{}
|
||||
var fin func()
|
||||
ok, value, _, fin = setf()
|
||||
if ok {
|
||||
o = &fakeObject{
|
||||
value: value,
|
||||
fin: fin,
|
||||
}
|
||||
}
|
||||
return
|
||||
case nsClosed:
|
||||
lru.Unlock()
|
||||
return
|
||||
func (ns *lruNs) rbGetOrCreateNode(h *lruNode, key uint64) (hn, n *lruNode) {
|
||||
if h == nil {
|
||||
n = &lruNode{ns: ns, key: key}
|
||||
return n, n
|
||||
}
|
||||
|
||||
n, ok := ns.table[key]
|
||||
if ok {
|
||||
switch n.state {
|
||||
case nodeEvicted:
|
||||
// Insert to recent list.
|
||||
n.state = nodeEffective
|
||||
n.ref++
|
||||
lru.size += n.charge
|
||||
lru.evict()
|
||||
fallthrough
|
||||
case nodeEffective:
|
||||
// Bump to front
|
||||
n.rRemove()
|
||||
n.rInsert(&lru.recent)
|
||||
if key < h.key {
|
||||
hn, n = ns.rbGetOrCreateNode(h.rbLeft, key)
|
||||
if hn != nil {
|
||||
h.rbLeft = hn
|
||||
} else {
|
||||
return nil, n
|
||||
}
|
||||
} else if key > h.key {
|
||||
hn, n = ns.rbGetOrCreateNode(h.rbRight, key)
|
||||
if hn != nil {
|
||||
h.rbRight = hn
|
||||
} else {
|
||||
return nil, n
|
||||
}
|
||||
n.ref++
|
||||
} else {
|
||||
if setf == nil {
|
||||
lru.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
var value interface{}
|
||||
var charge int
|
||||
var fin func()
|
||||
ok, value, charge, fin = setf()
|
||||
if !ok {
|
||||
lru.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
n = &lruNode{
|
||||
ns: ns,
|
||||
key: key,
|
||||
value: value,
|
||||
charge: charge,
|
||||
setfin: fin,
|
||||
ref: 2,
|
||||
}
|
||||
ns.table[key] = n
|
||||
n.rInsert(&lru.recent)
|
||||
|
||||
lru.size += charge
|
||||
lru.evict()
|
||||
return nil, h
|
||||
}
|
||||
|
||||
lru.Unlock()
|
||||
o = &lruObject{node: n}
|
||||
return
|
||||
if rbIsRed(h.rbRight) && !rbIsRed(h.rbLeft) {
|
||||
h = rbRotLeft(h)
|
||||
}
|
||||
if rbIsRed(h.rbLeft) && rbIsRed(h.rbLeft.rbLeft) {
|
||||
h = rbRotRight(h)
|
||||
}
|
||||
if rbIsRed(h.rbLeft) && rbIsRed(h.rbRight) {
|
||||
rbFlip(h)
|
||||
}
|
||||
return h, n
|
||||
}
|
||||
|
||||
func (ns *lruNs) getOrCreateNode(key uint64) *lruNode {
|
||||
hn, n := ns.rbGetOrCreateNode(ns.rbRoot, key)
|
||||
if hn != nil {
|
||||
ns.rbRoot = hn
|
||||
ns.rbRoot.rbBlack = true
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (ns *lruNs) rbGetNode(key uint64) *lruNode {
|
||||
h := ns.rbRoot
|
||||
for h != nil {
|
||||
switch {
|
||||
case key < h.key:
|
||||
h = h.rbLeft
|
||||
case key > h.key:
|
||||
h = h.rbRight
|
||||
default:
|
||||
return h
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ns *lruNs) getNode(key uint64) *lruNode {
|
||||
return ns.rbGetNode(key)
|
||||
}
|
||||
|
||||
func (ns *lruNs) rbDeleteNode(h *lruNode, key uint64) *lruNode {
|
||||
if h == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if key < h.key {
|
||||
if h.rbLeft == nil { // key not present. Nothing to delete
|
||||
return h
|
||||
}
|
||||
if !rbIsRed(h.rbLeft) && !rbIsRed(h.rbLeft.rbLeft) {
|
||||
h = rbMoveLeft(h)
|
||||
}
|
||||
h.rbLeft = ns.rbDeleteNode(h.rbLeft, key)
|
||||
} else {
|
||||
if rbIsRed(h.rbLeft) {
|
||||
h = rbRotRight(h)
|
||||
}
|
||||
// If @key equals @h.key and no right children at @h
|
||||
if h.key == key && h.rbRight == nil {
|
||||
return nil
|
||||
}
|
||||
if h.rbRight != nil && !rbIsRed(h.rbRight) && !rbIsRed(h.rbRight.rbLeft) {
|
||||
h = rbMoveRight(h)
|
||||
}
|
||||
// If @key equals @h.key, and (from above) 'h.Right != nil'
|
||||
if h.key == key {
|
||||
var x *lruNode
|
||||
h.rbRight, x = rbDeleteMin(h.rbRight)
|
||||
if x == nil {
|
||||
panic("logic")
|
||||
}
|
||||
x.rbLeft, h.rbLeft = h.rbLeft, nil
|
||||
x.rbRight, h.rbRight = h.rbRight, nil
|
||||
x.rbBlack = h.rbBlack
|
||||
h = x
|
||||
} else { // Else, @key is bigger than @h.key
|
||||
h.rbRight = ns.rbDeleteNode(h.rbRight, key)
|
||||
}
|
||||
}
|
||||
|
||||
return rbFixup(h)
|
||||
}
|
||||
|
||||
func (ns *lruNs) deleteNode(key uint64) {
|
||||
ns.rbRoot = ns.rbDeleteNode(ns.rbRoot, key)
|
||||
if ns.rbRoot != nil {
|
||||
ns.rbRoot.rbBlack = true
|
||||
}
|
||||
}
|
||||
|
||||
func (ns *lruNs) rbIterateNodes(h *lruNode, pivot uint64, iter func(n *lruNode) bool) bool {
|
||||
if h == nil {
|
||||
return true
|
||||
}
|
||||
if h.key >= pivot {
|
||||
if !ns.rbIterateNodes(h.rbLeft, pivot, iter) {
|
||||
return false
|
||||
}
|
||||
if !iter(h) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return ns.rbIterateNodes(h.rbRight, pivot, iter)
|
||||
}
|
||||
|
||||
func (ns *lruNs) iterateNodes(iter func(n *lruNode) bool) {
|
||||
ns.rbIterateNodes(ns.rbRoot, 0, iter)
|
||||
}
|
||||
|
||||
func (ns *lruNs) Get(key uint64, setf SetFunc) Handle {
|
||||
ns.lru.mu.Lock()
|
||||
defer ns.lru.mu.Unlock()
|
||||
|
||||
if ns.state != nsEffective {
|
||||
return nil
|
||||
}
|
||||
|
||||
var n *lruNode
|
||||
if setf == nil {
|
||||
n = ns.getNode(key)
|
||||
if n == nil {
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
n = ns.getOrCreateNode(key)
|
||||
}
|
||||
switch n.state {
|
||||
case nodeZero:
|
||||
charge, value := setf()
|
||||
if value == nil {
|
||||
ns.deleteNode(key)
|
||||
return nil
|
||||
}
|
||||
if charge < 0 {
|
||||
charge = 0
|
||||
}
|
||||
|
||||
n.value = value
|
||||
n.charge = charge
|
||||
n.state = nodeEvicted
|
||||
|
||||
ns.lru.size += charge
|
||||
ns.lru.alive++
|
||||
|
||||
fallthrough
|
||||
case nodeEvicted:
|
||||
if n.charge == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Insert to recent list.
|
||||
n.state = nodeEffective
|
||||
n.ref++
|
||||
ns.lru.used += n.charge
|
||||
ns.lru.evict()
|
||||
|
||||
fallthrough
|
||||
case nodeEffective:
|
||||
// Bump to front.
|
||||
n.rRemove()
|
||||
n.rInsert(&ns.lru.recent)
|
||||
case nodeDeleted:
|
||||
// Do nothing.
|
||||
default:
|
||||
panic("invalid state")
|
||||
}
|
||||
n.ref++
|
||||
|
||||
return &lruHandle{node: n}
|
||||
}
|
||||
|
||||
func (ns *lruNs) Delete(key uint64, fin DelFin) bool {
|
||||
lru := ns.lru
|
||||
lru.Lock()
|
||||
ns.lru.mu.Lock()
|
||||
defer ns.lru.mu.Unlock()
|
||||
|
||||
if ns.state != nsEffective {
|
||||
lru.Unlock()
|
||||
if fin != nil {
|
||||
fin(false)
|
||||
fin(false, false)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
n, ok := ns.table[key]
|
||||
if !ok {
|
||||
lru.Unlock()
|
||||
n := ns.getNode(key)
|
||||
if n == nil {
|
||||
if fin != nil {
|
||||
fin(false)
|
||||
fin(false, false)
|
||||
}
|
||||
return false
|
||||
|
||||
}
|
||||
|
||||
n.delfin = fin
|
||||
switch n.state {
|
||||
case nodeRemoved:
|
||||
lru.Unlock()
|
||||
return false
|
||||
case nodeEffective:
|
||||
lru.size -= n.charge
|
||||
ns.lru.used -= n.charge
|
||||
n.state = nodeDeleted
|
||||
n.delfin = fin
|
||||
n.rRemove()
|
||||
n.evictNB()
|
||||
n.derefNB()
|
||||
case nodeEvicted:
|
||||
n.state = nodeDeleted
|
||||
n.delfin = fin
|
||||
case nodeDeleted:
|
||||
if fin != nil {
|
||||
fin(true, true)
|
||||
}
|
||||
return false
|
||||
default:
|
||||
panic("invalid state")
|
||||
}
|
||||
n.state = nodeRemoved
|
||||
|
||||
lru.Unlock()
|
||||
return true
|
||||
}
|
||||
|
||||
func (ns *lruNs) purgeNB(fin PurgeFin) {
|
||||
lru := ns.lru
|
||||
if ns.state != nsEffective {
|
||||
return
|
||||
}
|
||||
|
||||
for _, n := range ns.table {
|
||||
n.purgefin = fin
|
||||
if n.state == nodeEffective {
|
||||
lru.size -= n.charge
|
||||
n.rRemove()
|
||||
n.evictNB()
|
||||
if ns.state == nsEffective {
|
||||
var nodes []*lruNode
|
||||
ns.iterateNodes(func(n *lruNode) bool {
|
||||
nodes = append(nodes, n)
|
||||
return true
|
||||
})
|
||||
for _, n := range nodes {
|
||||
switch n.state {
|
||||
case nodeEffective:
|
||||
ns.lru.used -= n.charge
|
||||
n.state = nodeDeleted
|
||||
n.purgefin = fin
|
||||
n.rRemove()
|
||||
n.derefNB()
|
||||
case nodeEvicted:
|
||||
n.state = nodeDeleted
|
||||
n.purgefin = fin
|
||||
case nodeDeleted:
|
||||
default:
|
||||
panic("invalid state")
|
||||
}
|
||||
}
|
||||
n.state = nodeRemoved
|
||||
}
|
||||
}
|
||||
|
||||
func (ns *lruNs) Purge(fin PurgeFin) {
|
||||
ns.lru.Lock()
|
||||
ns.lru.mu.Lock()
|
||||
ns.purgeNB(fin)
|
||||
ns.lru.Unlock()
|
||||
ns.lru.mu.Unlock()
|
||||
}
|
||||
|
||||
func (ns *lruNs) zapNB(closed bool) {
|
||||
lru := ns.lru
|
||||
if ns.state != nsEffective {
|
||||
return
|
||||
}
|
||||
|
||||
if closed {
|
||||
ns.state = nsClosed
|
||||
} else {
|
||||
func (ns *lruNs) zapNB() {
|
||||
if ns.state == nsEffective {
|
||||
ns.state = nsZapped
|
||||
|
||||
ns.iterateNodes(func(n *lruNode) bool {
|
||||
if n.state == nodeEffective {
|
||||
ns.lru.used -= n.charge
|
||||
n.rRemove()
|
||||
}
|
||||
ns.lru.size -= n.charge
|
||||
n.state = nodeDeleted
|
||||
n.fin()
|
||||
|
||||
return true
|
||||
})
|
||||
ns.rbRoot = nil
|
||||
}
|
||||
for _, n := range ns.table {
|
||||
if n.state == nodeEffective {
|
||||
lru.size -= n.charge
|
||||
n.rRemove()
|
||||
}
|
||||
n.state = nodeRemoved
|
||||
n.execFin()
|
||||
}
|
||||
ns.table = nil
|
||||
}
|
||||
|
||||
func (ns *lruNs) Zap(closed bool) {
|
||||
ns.lru.Lock()
|
||||
ns.zapNB(closed)
|
||||
func (ns *lruNs) Zap() {
|
||||
ns.lru.mu.Lock()
|
||||
ns.zapNB()
|
||||
delete(ns.lru.table, ns.id)
|
||||
ns.lru.Unlock()
|
||||
ns.lru.mu.Unlock()
|
||||
}
|
||||
|
||||
type lruNode struct {
|
||||
ns *lruNs
|
||||
|
||||
rNext, rPrev *lruNode
|
||||
rNext, rPrev *lruNode
|
||||
rbLeft, rbRight *lruNode
|
||||
rbBlack bool
|
||||
|
||||
key uint64
|
||||
value interface{}
|
||||
charge int
|
||||
ref int
|
||||
state nodeState
|
||||
setfin SetFin
|
||||
delfin DelFin
|
||||
purgefin PurgeFin
|
||||
}
|
||||
@@ -284,7 +462,6 @@ func (n *lruNode) rInsert(at *lruNode) {
|
||||
}
|
||||
|
||||
func (n *lruNode) rRemove() bool {
|
||||
// only remove if not already removed
|
||||
if n.rPrev == nil {
|
||||
return false
|
||||
}
|
||||
@@ -297,58 +474,149 @@ func (n *lruNode) rRemove() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (n *lruNode) execFin() {
|
||||
if n.setfin != nil {
|
||||
n.setfin()
|
||||
n.setfin = nil
|
||||
func (n *lruNode) fin() {
|
||||
if r, ok := n.value.(util.Releaser); ok {
|
||||
r.Release()
|
||||
}
|
||||
if n.purgefin != nil {
|
||||
n.purgefin(n.ns.id, n.key, n.delfin)
|
||||
n.delfin = nil
|
||||
if n.delfin != nil {
|
||||
panic("conflicting delete and purge fin")
|
||||
}
|
||||
n.purgefin(n.ns.id, n.key)
|
||||
n.purgefin = nil
|
||||
} else if n.delfin != nil {
|
||||
n.delfin(true)
|
||||
n.delfin(true, false)
|
||||
n.delfin = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (n *lruNode) evictNB() {
|
||||
func (n *lruNode) derefNB() {
|
||||
n.ref--
|
||||
if n.ref == 0 {
|
||||
if n.ns.state == nsEffective {
|
||||
// remove elem
|
||||
delete(n.ns.table, n.key)
|
||||
// execute finalizer
|
||||
n.execFin()
|
||||
// Remove elemement.
|
||||
n.ns.deleteNode(n.key)
|
||||
n.ns.lru.size -= n.charge
|
||||
n.ns.lru.alive--
|
||||
n.fin()
|
||||
}
|
||||
n.value = nil
|
||||
} else if n.ref < 0 {
|
||||
panic("leveldb/cache: lruCache: negative node reference")
|
||||
}
|
||||
}
|
||||
|
||||
func (n *lruNode) evict() {
|
||||
n.ns.lru.Lock()
|
||||
n.evictNB()
|
||||
n.ns.lru.Unlock()
|
||||
func (n *lruNode) deref() {
|
||||
n.ns.lru.mu.Lock()
|
||||
n.derefNB()
|
||||
n.ns.lru.mu.Unlock()
|
||||
}
|
||||
|
||||
type lruObject struct {
|
||||
type lruHandle struct {
|
||||
node *lruNode
|
||||
once uint32
|
||||
}
|
||||
|
||||
func (o *lruObject) Value() interface{} {
|
||||
if atomic.LoadUint32(&o.once) == 0 {
|
||||
return o.node.value
|
||||
func (h *lruHandle) Value() interface{} {
|
||||
if atomic.LoadUint32(&h.once) == 0 {
|
||||
return h.node.value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *lruObject) Release() {
|
||||
if !atomic.CompareAndSwapUint32(&o.once, 0, 1) {
|
||||
func (h *lruHandle) Release() {
|
||||
if !atomic.CompareAndSwapUint32(&h.once, 0, 1) {
|
||||
return
|
||||
}
|
||||
|
||||
o.node.evict()
|
||||
o.node = nil
|
||||
h.node.deref()
|
||||
h.node = nil
|
||||
}
|
||||
|
||||
func rbIsRed(h *lruNode) bool {
|
||||
if h == nil {
|
||||
return false
|
||||
}
|
||||
return !h.rbBlack
|
||||
}
|
||||
|
||||
func rbRotLeft(h *lruNode) *lruNode {
|
||||
x := h.rbRight
|
||||
if x.rbBlack {
|
||||
panic("rotating a black link")
|
||||
}
|
||||
h.rbRight = x.rbLeft
|
||||
x.rbLeft = h
|
||||
x.rbBlack = h.rbBlack
|
||||
h.rbBlack = false
|
||||
return x
|
||||
}
|
||||
|
||||
func rbRotRight(h *lruNode) *lruNode {
|
||||
x := h.rbLeft
|
||||
if x.rbBlack {
|
||||
panic("rotating a black link")
|
||||
}
|
||||
h.rbLeft = x.rbRight
|
||||
x.rbRight = h
|
||||
x.rbBlack = h.rbBlack
|
||||
h.rbBlack = false
|
||||
return x
|
||||
}
|
||||
|
||||
func rbFlip(h *lruNode) {
|
||||
h.rbBlack = !h.rbBlack
|
||||
h.rbLeft.rbBlack = !h.rbLeft.rbBlack
|
||||
h.rbRight.rbBlack = !h.rbRight.rbBlack
|
||||
}
|
||||
|
||||
func rbMoveLeft(h *lruNode) *lruNode {
|
||||
rbFlip(h)
|
||||
if rbIsRed(h.rbRight.rbLeft) {
|
||||
h.rbRight = rbRotRight(h.rbRight)
|
||||
h = rbRotLeft(h)
|
||||
rbFlip(h)
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
func rbMoveRight(h *lruNode) *lruNode {
|
||||
rbFlip(h)
|
||||
if rbIsRed(h.rbLeft.rbLeft) {
|
||||
h = rbRotRight(h)
|
||||
rbFlip(h)
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
func rbFixup(h *lruNode) *lruNode {
|
||||
if rbIsRed(h.rbRight) {
|
||||
h = rbRotLeft(h)
|
||||
}
|
||||
|
||||
if rbIsRed(h.rbLeft) && rbIsRed(h.rbLeft.rbLeft) {
|
||||
h = rbRotRight(h)
|
||||
}
|
||||
|
||||
if rbIsRed(h.rbLeft) && rbIsRed(h.rbRight) {
|
||||
rbFlip(h)
|
||||
}
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
func rbDeleteMin(h *lruNode) (hn, n *lruNode) {
|
||||
if h == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if h.rbLeft == nil {
|
||||
return nil, h
|
||||
}
|
||||
|
||||
if !rbIsRed(h.rbLeft) && !rbIsRed(h.rbLeft.rbLeft) {
|
||||
h = rbMoveLeft(h)
|
||||
}
|
||||
|
||||
h.rbLeft, n = rbDeleteMin(h.rbLeft)
|
||||
|
||||
return rbFixup(h), n
|
||||
}
|
||||
|
||||
40
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/config.go
generated
vendored
40
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/config.go
generated
vendored
@@ -1,40 +0,0 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package leveldb
|
||||
|
||||
const (
|
||||
kNumLevels = 7
|
||||
|
||||
// Level-0 compaction is started when we hit this many files.
|
||||
kL0_CompactionTrigger float64 = 4
|
||||
|
||||
// Soft limit on number of level-0 files. We slow down writes at this point.
|
||||
kL0_SlowdownWritesTrigger = 8
|
||||
|
||||
// Maximum number of level-0 files. We stop writes at this point.
|
||||
kL0_StopWritesTrigger = 12
|
||||
|
||||
// Maximum level to which a new compacted memdb is pushed if it
|
||||
// does not create overlap. We try to push to level 2 to avoid the
|
||||
// relatively expensive level 0=>1 compactions and to avoid some
|
||||
// expensive manifest file operations. We do not push all the way to
|
||||
// the largest level since that can generate a lot of wasted disk
|
||||
// space if the same key space is being repeatedly overwritten.
|
||||
kMaxMemCompactLevel = 2
|
||||
|
||||
// Maximum size of a table.
|
||||
kMaxTableSize = 2 * 1048576
|
||||
|
||||
// Maximum bytes of overlaps in grandparent (i.e., level+2) before we
|
||||
// stop building a single file in a level->level+1 compaction.
|
||||
kMaxGrandParentOverlapBytes = 10 * kMaxTableSize
|
||||
|
||||
// Maximum number of bytes in all compacted files. We avoid expanding
|
||||
// the lower level file set of a compaction if it would make the
|
||||
// total compaction cover more than this many bytes.
|
||||
kExpCompactionMaxBytes = 25 * kMaxTableSize
|
||||
)
|
||||
492
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go
generated
vendored
492
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go
generated
vendored
@@ -7,15 +7,17 @@
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"container/list"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/iterator"
|
||||
"github.com/syndtr/goleveldb/leveldb/journal"
|
||||
"github.com/syndtr/goleveldb/leveldb/memdb"
|
||||
@@ -30,41 +32,46 @@ type DB struct {
|
||||
// Need 64-bit alignment.
|
||||
seq uint64
|
||||
|
||||
// Session.
|
||||
s *session
|
||||
|
||||
// MemDB
|
||||
// MemDB.
|
||||
memMu sync.RWMutex
|
||||
mem *memdb.DB
|
||||
frozenMem *memdb.DB
|
||||
memPool chan *memdb.DB
|
||||
mem, frozenMem *memDB
|
||||
journal *journal.Writer
|
||||
journalWriter storage.Writer
|
||||
journalFile storage.File
|
||||
frozenJournalFile storage.File
|
||||
frozenSeq uint64
|
||||
|
||||
// Snapshot
|
||||
// Snapshot.
|
||||
snapsMu sync.Mutex
|
||||
snapsRoot snapshotElement
|
||||
snapsList *list.List
|
||||
|
||||
// Write
|
||||
// Stats.
|
||||
aliveSnaps, aliveIters int32
|
||||
|
||||
// Write.
|
||||
writeC chan *Batch
|
||||
writeMergedC chan bool
|
||||
writeLockC chan struct{}
|
||||
writeAckC chan error
|
||||
writeDelay time.Duration
|
||||
writeDelayN int
|
||||
journalC chan *Batch
|
||||
journalAckC chan error
|
||||
|
||||
// Compaction
|
||||
tcompCmdC chan cCmd
|
||||
tcompPauseC chan chan<- struct{}
|
||||
tcompTriggerC chan struct{}
|
||||
mcompCmdC chan cCmd
|
||||
mcompTriggerC chan struct{}
|
||||
compErrC chan error
|
||||
compErrSetC chan error
|
||||
compStats [kNumLevels]cStats
|
||||
// Compaction.
|
||||
tcompCmdC chan cCmd
|
||||
tcompPauseC chan chan<- struct{}
|
||||
mcompCmdC chan cCmd
|
||||
compErrC chan error
|
||||
compPerErrC chan error
|
||||
compErrSetC chan error
|
||||
compStats []cStats
|
||||
|
||||
// Close
|
||||
// Close.
|
||||
closeW sync.WaitGroup
|
||||
closeC chan struct{}
|
||||
closed uint32
|
||||
@@ -77,7 +84,11 @@ func openDB(s *session) (*DB, error) {
|
||||
db := &DB{
|
||||
s: s,
|
||||
// Initial sequence
|
||||
seq: s.stSeq,
|
||||
seq: s.stSeqNum,
|
||||
// MemDB
|
||||
memPool: make(chan *memdb.DB, 1),
|
||||
// Snapshot
|
||||
snapsList: list.New(),
|
||||
// Write
|
||||
writeC: make(chan *Batch),
|
||||
writeMergedC: make(chan bool),
|
||||
@@ -86,17 +97,16 @@ func openDB(s *session) (*DB, error) {
|
||||
journalC: make(chan *Batch),
|
||||
journalAckC: make(chan error),
|
||||
// Compaction
|
||||
tcompCmdC: make(chan cCmd),
|
||||
tcompPauseC: make(chan chan<- struct{}),
|
||||
tcompTriggerC: make(chan struct{}, 1),
|
||||
mcompCmdC: make(chan cCmd),
|
||||
mcompTriggerC: make(chan struct{}, 1),
|
||||
compErrC: make(chan error),
|
||||
compErrSetC: make(chan error),
|
||||
tcompCmdC: make(chan cCmd),
|
||||
tcompPauseC: make(chan chan<- struct{}),
|
||||
mcompCmdC: make(chan cCmd),
|
||||
compErrC: make(chan error),
|
||||
compPerErrC: make(chan error),
|
||||
compErrSetC: make(chan error),
|
||||
compStats: make([]cStats, s.o.GetNumLevel()),
|
||||
// Close
|
||||
closeC: make(chan struct{}),
|
||||
}
|
||||
db.initSnapshot()
|
||||
|
||||
if err := db.recoverJournal(); err != nil {
|
||||
return nil, err
|
||||
@@ -112,8 +122,9 @@ func openDB(s *session) (*DB, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Don't include compaction error goroutine into wait group.
|
||||
// Doesn't need to be included in the wait group.
|
||||
go db.compactionError()
|
||||
go db.mpoolDrain()
|
||||
|
||||
db.closeW.Add(3)
|
||||
go db.tCompaction()
|
||||
@@ -135,9 +146,10 @@ func openDB(s *session) (*DB, error) {
|
||||
// detected in the DB. Corrupted DB can be recovered with Recover
|
||||
// function.
|
||||
//
|
||||
// The returned DB instance is goroutine-safe.
|
||||
// The DB must be closed after use, by calling Close method.
|
||||
func Open(p storage.Storage, o *opt.Options) (db *DB, err error) {
|
||||
s, err := newSession(p, o)
|
||||
func Open(stor storage.Storage, o *opt.Options) (db *DB, err error) {
|
||||
s, err := newSession(stor, o)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -177,6 +189,7 @@ func Open(p storage.Storage, o *opt.Options) (db *DB, err error) {
|
||||
// detected in the DB. Corrupted DB can be recovered with Recover
|
||||
// function.
|
||||
//
|
||||
// The returned DB instance is goroutine-safe.
|
||||
// The DB must be closed after use, by calling Close method.
|
||||
func OpenFile(path string, o *opt.Options) (db *DB, err error) {
|
||||
stor, err := storage.OpenFile(path)
|
||||
@@ -197,9 +210,10 @@ func OpenFile(path string, o *opt.Options) (db *DB, err error) {
|
||||
// The DB must already exist or it will returns an error.
|
||||
// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options.
|
||||
//
|
||||
// The returned DB instance is goroutine-safe.
|
||||
// The DB must be closed after use, by calling Close method.
|
||||
func Recover(p storage.Storage, o *opt.Options) (db *DB, err error) {
|
||||
s, err := newSession(p, o)
|
||||
func Recover(stor storage.Storage, o *opt.Options) (db *DB, err error) {
|
||||
s, err := newSession(stor, o)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -225,6 +239,7 @@ func Recover(p storage.Storage, o *opt.Options) (db *DB, err error) {
|
||||
// RecoverFile uses standard file-system backed storage implementation as desribed
|
||||
// in the leveldb/storage package.
|
||||
//
|
||||
// The returned DB instance is goroutine-safe.
|
||||
// The DB must be closed after use, by calling Close method.
|
||||
func RecoverFile(path string, o *opt.Options) (db *DB, err error) {
|
||||
stor, err := storage.OpenFile(path)
|
||||
@@ -241,16 +256,28 @@ func RecoverFile(path string, o *opt.Options) (db *DB, err error) {
|
||||
}
|
||||
|
||||
func recoverTable(s *session, o *opt.Options) error {
|
||||
ff0, err := s.getFiles(storage.TypeTable)
|
||||
o = dupOptions(o)
|
||||
// Mask StrictReader, lets StrictRecovery doing its job.
|
||||
o.Strict &= ^opt.StrictReader
|
||||
|
||||
// Get all tables and sort it by file number.
|
||||
tableFiles_, err := s.getFiles(storage.TypeTable)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ff1 := files(ff0)
|
||||
ff1.sort()
|
||||
tableFiles := files(tableFiles_)
|
||||
tableFiles.sort()
|
||||
|
||||
var mSeq uint64
|
||||
var good, corrupted int
|
||||
rec := new(sessionRecord)
|
||||
var (
|
||||
mSeq uint64
|
||||
recoveredKey, goodKey, corruptedKey, corruptedBlock, droppedTable int
|
||||
|
||||
// We will drop corrupted table.
|
||||
strict = o.GetStrict(opt.StrictRecovery)
|
||||
|
||||
rec = &sessionRecord{numLevel: o.GetNumLevel()}
|
||||
bpool = util.NewBufferPool(o.GetBlockSize() + 5)
|
||||
)
|
||||
buildTable := func(iter iterator.Iterator) (tmp storage.File, size int64, err error) {
|
||||
tmp = s.newTemp()
|
||||
writer, err := tmp.Create()
|
||||
@@ -264,8 +291,9 @@ func recoverTable(s *session, o *opt.Options) error {
|
||||
tmp = nil
|
||||
}
|
||||
}()
|
||||
|
||||
// Copy entries.
|
||||
tw := table.NewWriter(writer, o)
|
||||
// Copy records.
|
||||
for iter.Next() {
|
||||
key := iter.Key()
|
||||
if validIkey(key) {
|
||||
@@ -297,44 +325,65 @@ func recoverTable(s *session, o *opt.Options) error {
|
||||
return err
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
// Get file size.
|
||||
size, err := reader.Seek(0, 2)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var tSeq uint64
|
||||
var tgood, tcorrupted, blockerr int
|
||||
var min, max []byte
|
||||
tr := table.NewReader(reader, size, nil, o)
|
||||
|
||||
var (
|
||||
tSeq uint64
|
||||
tgoodKey, tcorruptedKey, tcorruptedBlock int
|
||||
imin, imax []byte
|
||||
)
|
||||
tr, err := table.NewReader(reader, size, storage.NewFileInfo(file), nil, bpool, o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
iter := tr.NewIterator(nil, nil)
|
||||
iter.(iterator.ErrorCallbackSetter).SetErrorCallback(func(err error) {
|
||||
s.logf("table@recovery found error @%d %q", file.Num(), err)
|
||||
blockerr++
|
||||
if errors.IsCorrupted(err) {
|
||||
s.logf("table@recovery block corruption @%d %q", file.Num(), err)
|
||||
tcorruptedBlock++
|
||||
}
|
||||
})
|
||||
|
||||
// Scan the table.
|
||||
for iter.Next() {
|
||||
key := iter.Key()
|
||||
_, seq, _, ok := parseIkey(key)
|
||||
if !ok {
|
||||
tcorrupted++
|
||||
_, seq, _, kerr := parseIkey(key)
|
||||
if kerr != nil {
|
||||
tcorruptedKey++
|
||||
continue
|
||||
}
|
||||
tgood++
|
||||
tgoodKey++
|
||||
if seq > tSeq {
|
||||
tSeq = seq
|
||||
}
|
||||
if min == nil {
|
||||
min = append([]byte{}, key...)
|
||||
if imin == nil {
|
||||
imin = append([]byte{}, key...)
|
||||
}
|
||||
max = append(max[:0], key...)
|
||||
imax = append(imax[:0], key...)
|
||||
}
|
||||
if err := iter.Error(); err != nil {
|
||||
iter.Release()
|
||||
return err
|
||||
}
|
||||
iter.Release()
|
||||
if tgood > 0 {
|
||||
if tcorrupted > 0 || blockerr > 0 {
|
||||
|
||||
goodKey += tgoodKey
|
||||
corruptedKey += tcorruptedKey
|
||||
corruptedBlock += tcorruptedBlock
|
||||
|
||||
if strict && (tcorruptedKey > 0 || tcorruptedBlock > 0) {
|
||||
droppedTable++
|
||||
s.logf("table@recovery dropped @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", file.Num(), tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq)
|
||||
return nil
|
||||
}
|
||||
|
||||
if tgoodKey > 0 {
|
||||
if tcorruptedKey > 0 || tcorruptedBlock > 0 {
|
||||
// Rebuild the table.
|
||||
s.logf("table@recovery rebuilding @%d", file.Num())
|
||||
iter := tr.NewIterator(nil, nil)
|
||||
@@ -352,53 +401,67 @@ func recoverTable(s *session, o *opt.Options) error {
|
||||
if tSeq > mSeq {
|
||||
mSeq = tSeq
|
||||
}
|
||||
recoveredKey += tgoodKey
|
||||
// Add table to level 0.
|
||||
rec.addTable(0, file.Num(), uint64(size), min, max)
|
||||
s.logf("table@recovery recovered @%d N·%d C·%d B·%d S·%d Q·%d", file.Num(), tgood, tcorrupted, blockerr, size, tSeq)
|
||||
rec.addTable(0, file.Num(), uint64(size), imin, imax)
|
||||
s.logf("table@recovery recovered @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", file.Num(), tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq)
|
||||
} else {
|
||||
s.logf("table@recovery unrecoverable @%d C·%d B·%d S·%d", file.Num(), tcorrupted, blockerr, size)
|
||||
droppedTable++
|
||||
s.logf("table@recovery unrecoverable @%d Ck·%d Cb·%d S·%d", file.Num(), tcorruptedKey, tcorruptedBlock, size)
|
||||
}
|
||||
|
||||
good += tgood
|
||||
corrupted += tcorrupted
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Recover all tables.
|
||||
if len(ff1) > 0 {
|
||||
s.logf("table@recovery F·%d", len(ff1))
|
||||
s.markFileNum(ff1[len(ff1)-1].Num())
|
||||
for _, file := range ff1 {
|
||||
if len(tableFiles) > 0 {
|
||||
s.logf("table@recovery F·%d", len(tableFiles))
|
||||
|
||||
// Mark file number as used.
|
||||
s.markFileNum(tableFiles[len(tableFiles)-1].Num())
|
||||
|
||||
for _, file := range tableFiles {
|
||||
if err := recoverTable(file); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
s.logf("table@recovery recovered F·%d N·%d C·%d Q·%d", len(ff1), good, corrupted, mSeq)
|
||||
|
||||
s.logf("table@recovery recovered F·%d N·%d Gk·%d Ck·%d Q·%d", len(tableFiles), recoveredKey, goodKey, corruptedKey, mSeq)
|
||||
}
|
||||
|
||||
// Set sequence number.
|
||||
rec.setSeq(mSeq + 1)
|
||||
rec.setSeqNum(mSeq + 1)
|
||||
|
||||
// Create new manifest.
|
||||
if err := s.create(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Commit.
|
||||
return s.commit(rec)
|
||||
}
|
||||
|
||||
func (d *DB) recoverJournal() error {
|
||||
s := d.s
|
||||
|
||||
ff0, err := s.getFiles(storage.TypeJournal)
|
||||
func (db *DB) recoverJournal() error {
|
||||
// Get all tables and sort it by file number.
|
||||
journalFiles_, err := db.s.getFiles(storage.TypeJournal)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ff1 := files(ff0)
|
||||
ff1.sort()
|
||||
ff2 := make([]storage.File, 0, len(ff1))
|
||||
for _, file := range ff1 {
|
||||
if file.Num() >= s.stJournalNum || file.Num() == s.stPrevJournalNum {
|
||||
s.markFileNum(file.Num())
|
||||
ff2 = append(ff2, file)
|
||||
journalFiles := files(journalFiles_)
|
||||
journalFiles.sort()
|
||||
|
||||
// Discard older journal.
|
||||
prev := -1
|
||||
for i, file := range journalFiles {
|
||||
if file.Num() >= db.s.stJournalNum {
|
||||
if prev >= 0 {
|
||||
i--
|
||||
journalFiles[i] = journalFiles[prev]
|
||||
}
|
||||
journalFiles = journalFiles[i:]
|
||||
break
|
||||
} else if file.Num() == db.s.stPrevJournalNum {
|
||||
prev = i
|
||||
}
|
||||
}
|
||||
|
||||
@@ -406,38 +469,43 @@ func (d *DB) recoverJournal() error {
|
||||
var of storage.File
|
||||
var mem *memdb.DB
|
||||
batch := new(Batch)
|
||||
cm := newCMem(s)
|
||||
cm := newCMem(db.s)
|
||||
buf := new(util.Buffer)
|
||||
// Options.
|
||||
strict := s.o.GetStrict(opt.StrictJournal)
|
||||
checksum := s.o.GetStrict(opt.StrictJournalChecksum)
|
||||
writeBuffer := s.o.GetWriteBuffer()
|
||||
strict := db.s.o.GetStrict(opt.StrictJournal)
|
||||
checksum := db.s.o.GetStrict(opt.StrictJournalChecksum)
|
||||
writeBuffer := db.s.o.GetWriteBuffer()
|
||||
recoverJournal := func(file storage.File) error {
|
||||
s.logf("journal@recovery recovering @%d", file.Num())
|
||||
db.logf("journal@recovery recovering @%d", file.Num())
|
||||
reader, err := file.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
// Create/reset journal reader instance.
|
||||
if jr == nil {
|
||||
jr = journal.NewReader(reader, dropper{s, file}, strict, checksum)
|
||||
jr = journal.NewReader(reader, dropper{db.s, file}, strict, checksum)
|
||||
} else {
|
||||
jr.Reset(reader, dropper{s, file}, strict, checksum)
|
||||
jr.Reset(reader, dropper{db.s, file}, strict, checksum)
|
||||
}
|
||||
|
||||
// Flush memdb and remove obsolete journal file.
|
||||
if of != nil {
|
||||
if mem.Len() > 0 {
|
||||
if err := cm.flush(mem, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := cm.commit(file.Num(), d.seq); err != nil {
|
||||
if err := cm.commit(file.Num(), db.seq); err != nil {
|
||||
return err
|
||||
}
|
||||
cm.reset()
|
||||
of.Remove()
|
||||
of = nil
|
||||
}
|
||||
// Reset memdb.
|
||||
|
||||
// Replay journal to memdb.
|
||||
mem.Reset()
|
||||
for {
|
||||
r, err := jr.Next()
|
||||
@@ -445,43 +513,58 @@ func (d *DB) recoverJournal() error {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return err
|
||||
return errors.SetFile(err, file)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
if _, err := buf.ReadFrom(r); err != nil {
|
||||
if strict {
|
||||
return err
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
// This is error returned due to corruption, with strict == false.
|
||||
continue
|
||||
} else {
|
||||
return errors.SetFile(err, file)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err := batch.decode(buf.Bytes()); err != nil {
|
||||
return err
|
||||
if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mem); err != nil {
|
||||
if strict || !errors.IsCorrupted(err) {
|
||||
return errors.SetFile(err, file)
|
||||
} else {
|
||||
db.s.logf("journal error: %v (skipped)", err)
|
||||
// We won't apply sequence number as it might be corrupted.
|
||||
continue
|
||||
}
|
||||
}
|
||||
if err := batch.memReplay(mem); err != nil {
|
||||
return err
|
||||
}
|
||||
d.seq = batch.seq + uint64(batch.len())
|
||||
|
||||
// Save sequence number.
|
||||
db.seq = batch.seq + uint64(batch.Len())
|
||||
|
||||
// Flush it if large enough.
|
||||
if mem.Size() >= writeBuffer {
|
||||
// Large enough, flush it.
|
||||
if err := cm.flush(mem, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
// Reset memdb.
|
||||
mem.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
of = file
|
||||
return nil
|
||||
}
|
||||
|
||||
// Recover all journals.
|
||||
if len(ff2) > 0 {
|
||||
s.logf("journal@recovery F·%d", len(ff2))
|
||||
mem = memdb.New(s.icmp, writeBuffer)
|
||||
for _, file := range ff2 {
|
||||
if len(journalFiles) > 0 {
|
||||
db.logf("journal@recovery F·%d", len(journalFiles))
|
||||
|
||||
// Mark file number as used.
|
||||
db.s.markFileNum(journalFiles[len(journalFiles)-1].Num())
|
||||
|
||||
mem = memdb.New(db.s.icmp, writeBuffer)
|
||||
for _, file := range journalFiles {
|
||||
if err := recoverJournal(file); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Flush the last journal.
|
||||
if mem.Len() > 0 {
|
||||
if err := cm.flush(mem, 0); err != nil {
|
||||
@@ -489,51 +572,64 @@ func (d *DB) recoverJournal() error {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create a new journal.
|
||||
if _, err := d.newMem(0); err != nil {
|
||||
if _, err := db.newMem(0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Commit.
|
||||
if err := cm.commit(d.journalFile.Num(), d.seq); err != nil {
|
||||
if err := cm.commit(db.journalFile.Num(), db.seq); err != nil {
|
||||
// Close journal.
|
||||
if db.journal != nil {
|
||||
db.journal.Close()
|
||||
db.journalWriter.Close()
|
||||
}
|
||||
return err
|
||||
}
|
||||
// Remove the last journal.
|
||||
|
||||
// Remove the last obsolete journal file.
|
||||
if of != nil {
|
||||
of.Remove()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) {
|
||||
s := d.s
|
||||
func (db *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) {
|
||||
ikey := newIkey(key, seq, ktSeek)
|
||||
|
||||
ikey := newIKey(key, seq, tSeek)
|
||||
|
||||
em, fm := d.getMems()
|
||||
for _, m := range [...]*memdb.DB{em, fm} {
|
||||
em, fm := db.getMems()
|
||||
for _, m := range [...]*memDB{em, fm} {
|
||||
if m == nil {
|
||||
continue
|
||||
}
|
||||
mk, mv, me := m.Find(ikey)
|
||||
defer m.decref()
|
||||
|
||||
mk, mv, me := m.mdb.Find(ikey)
|
||||
if me == nil {
|
||||
ukey, _, t, ok := parseIkey(mk)
|
||||
if ok && s.icmp.uCompare(ukey, key) == 0 {
|
||||
if t == tDel {
|
||||
ukey, _, kt, kerr := parseIkey(mk)
|
||||
if kerr != nil {
|
||||
// Shouldn't have had happen.
|
||||
panic(kerr)
|
||||
}
|
||||
if db.s.icmp.uCompare(ukey, key) == 0 {
|
||||
if kt == ktDel {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
return mv, nil
|
||||
return append([]byte{}, mv...), nil
|
||||
}
|
||||
} else if me != ErrNotFound {
|
||||
return nil, me
|
||||
}
|
||||
}
|
||||
|
||||
v := s.version()
|
||||
v := db.s.version()
|
||||
value, cSched, err := v.get(ikey, ro)
|
||||
v.release()
|
||||
if cSched {
|
||||
// Trigger table compaction.
|
||||
d.compTrigger(d.tcompTriggerC)
|
||||
db.compSendTrigger(db.tcompCmdC)
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -541,15 +637,18 @@ func (d *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err
|
||||
// Get gets the value for the given key. It returns ErrNotFound if the
|
||||
// DB does not contain the key.
|
||||
//
|
||||
// The caller should not modify the contents of the returned slice, but
|
||||
// it is safe to modify the contents of the argument after Get returns.
|
||||
func (d *DB) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
|
||||
err = d.ok()
|
||||
// The returned slice is its own copy, it is safe to modify the contents
|
||||
// of the returned slice.
|
||||
// It is safe to modify the contents of the argument after Get returns.
|
||||
func (db *DB) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
|
||||
err = db.ok()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return d.get(key, d.getSeq(), ro)
|
||||
se := db.acquireSnapshot()
|
||||
defer db.releaseSnapshot(se)
|
||||
return db.get(key, se.seq, ro)
|
||||
}
|
||||
|
||||
// NewIterator returns an iterator for the latest snapshot of the
|
||||
@@ -568,14 +667,16 @@ func (d *DB) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
|
||||
// The iterator must be released after use, by calling Release method.
|
||||
//
|
||||
// Also read Iterator documentation of the leveldb/iterator package.
|
||||
func (d *DB) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
|
||||
if err := d.ok(); err != nil {
|
||||
func (db *DB) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
|
||||
if err := db.ok(); err != nil {
|
||||
return iterator.NewEmptyIterator(err)
|
||||
}
|
||||
|
||||
p := d.newSnapshot()
|
||||
defer p.Release()
|
||||
return p.NewIterator(slice, ro)
|
||||
se := db.acquireSnapshot()
|
||||
defer db.releaseSnapshot(se)
|
||||
// Iterator holds 'version' lock, 'version' is immutable so snapshot
|
||||
// can be released after iterator created.
|
||||
return db.newIterator(se.seq, slice, ro)
|
||||
}
|
||||
|
||||
// GetSnapshot returns a latest snapshot of the underlying DB. A snapshot
|
||||
@@ -583,25 +684,35 @@ func (d *DB) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterat
|
||||
// content of snapshot are guaranteed to be consistent.
|
||||
//
|
||||
// The snapshot must be released after use, by calling Release method.
|
||||
func (d *DB) GetSnapshot() (*Snapshot, error) {
|
||||
if err := d.ok(); err != nil {
|
||||
func (db *DB) GetSnapshot() (*Snapshot, error) {
|
||||
if err := db.ok(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return d.newSnapshot(), nil
|
||||
return db.newSnapshot(), nil
|
||||
}
|
||||
|
||||
// GetProperty returns value of the given property name.
|
||||
//
|
||||
// Property names:
|
||||
// leveldb.num-files-at-level{n}
|
||||
// Returns the number of filer at level 'n'.
|
||||
// Returns the number of files at level 'n'.
|
||||
// leveldb.stats
|
||||
// Returns statistics of the underlying DB.
|
||||
// leveldb.sstables
|
||||
// Returns sstables list for each level.
|
||||
func (d *DB) GetProperty(name string) (value string, err error) {
|
||||
err = d.ok()
|
||||
// leveldb.blockpool
|
||||
// Returns block pool stats.
|
||||
// leveldb.cachedblock
|
||||
// Returns size of cached block.
|
||||
// leveldb.openedtables
|
||||
// Returns number of opened tables.
|
||||
// leveldb.alivesnaps
|
||||
// Returns number of alive snapshots.
|
||||
// leveldb.aliveiters
|
||||
// Returns number of alive iterators.
|
||||
func (db *DB) GetProperty(name string) (value string, err error) {
|
||||
err = db.ok()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -610,19 +721,18 @@ func (d *DB) GetProperty(name string) (value string, err error) {
|
||||
if !strings.HasPrefix(name, prefix) {
|
||||
return "", errors.New("leveldb: GetProperty: unknown property: " + name)
|
||||
}
|
||||
|
||||
p := name[len(prefix):]
|
||||
|
||||
s := d.s
|
||||
v := s.version()
|
||||
v := db.s.version()
|
||||
defer v.release()
|
||||
|
||||
numFilesPrefix := "num-files-at-level"
|
||||
switch {
|
||||
case strings.HasPrefix(p, "num-files-at-level"):
|
||||
case strings.HasPrefix(p, numFilesPrefix):
|
||||
var level uint
|
||||
var rest string
|
||||
n, _ := fmt.Scanf("%d%s", &level, &rest)
|
||||
if n != 1 || level >= kNumLevels {
|
||||
n, _ := fmt.Sscanf(p[len(numFilesPrefix):], "%d%s", &level, &rest)
|
||||
if n != 1 || int(level) >= db.s.o.GetNumLevel() {
|
||||
err = errors.New("leveldb: GetProperty: invalid property: " + name)
|
||||
} else {
|
||||
value = fmt.Sprint(v.tLen(int(level)))
|
||||
@@ -631,22 +741,36 @@ func (d *DB) GetProperty(name string) (value string, err error) {
|
||||
value = "Compactions\n" +
|
||||
" Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB)\n" +
|
||||
"-------+------------+---------------+---------------+---------------+---------------\n"
|
||||
for level, tt := range v.tables {
|
||||
duration, read, write := d.compStats[level].get()
|
||||
if len(tt) == 0 && duration == 0 {
|
||||
for level, tables := range v.tables {
|
||||
duration, read, write := db.compStats[level].get()
|
||||
if len(tables) == 0 && duration == 0 {
|
||||
continue
|
||||
}
|
||||
value += fmt.Sprintf(" %3d | %10d | %13.5f | %13.5f | %13.5f | %13.5f\n",
|
||||
level, len(tt), float64(tt.size())/1048576.0, duration.Seconds(),
|
||||
level, len(tables), float64(tables.size())/1048576.0, duration.Seconds(),
|
||||
float64(read)/1048576.0, float64(write)/1048576.0)
|
||||
}
|
||||
case p == "sstables":
|
||||
for level, tt := range v.tables {
|
||||
for level, tables := range v.tables {
|
||||
value += fmt.Sprintf("--- level %d ---\n", level)
|
||||
for _, t := range tt {
|
||||
value += fmt.Sprintf("%d:%d[%q .. %q]\n", t.file.Num(), t.size, t.min, t.max)
|
||||
for _, t := range tables {
|
||||
value += fmt.Sprintf("%d:%d[%q .. %q]\n", t.file.Num(), t.size, t.imin, t.imax)
|
||||
}
|
||||
}
|
||||
case p == "blockpool":
|
||||
value = fmt.Sprintf("%v", db.s.tops.bpool)
|
||||
case p == "cachedblock":
|
||||
if bc := db.s.o.GetBlockCache(); bc != nil {
|
||||
value = fmt.Sprintf("%d", bc.Size())
|
||||
} else {
|
||||
value = "<nil>"
|
||||
}
|
||||
case p == "openedtables":
|
||||
value = fmt.Sprintf("%d", db.s.tops.cache.Size())
|
||||
case p == "alivesnaps":
|
||||
value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveSnaps))
|
||||
case p == "aliveiters":
|
||||
value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveIters))
|
||||
default:
|
||||
err = errors.New("leveldb: GetProperty: unknown property: " + name)
|
||||
}
|
||||
@@ -660,23 +784,23 @@ func (d *DB) GetProperty(name string) (value string, err error) {
|
||||
// data compresses by a factor of ten, the returned sizes will be one-tenth
|
||||
// the size of the corresponding user data size.
|
||||
// The results may not include the sizes of recently written data.
|
||||
func (d *DB) SizeOf(ranges []util.Range) (Sizes, error) {
|
||||
if err := d.ok(); err != nil {
|
||||
func (db *DB) SizeOf(ranges []util.Range) (Sizes, error) {
|
||||
if err := db.ok(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
v := d.s.version()
|
||||
v := db.s.version()
|
||||
defer v.release()
|
||||
|
||||
sizes := make(Sizes, 0, len(ranges))
|
||||
for _, r := range ranges {
|
||||
min := newIKey(r.Start, kMaxSeq, tSeek)
|
||||
max := newIKey(r.Limit, kMaxSeq, tSeek)
|
||||
start, err := v.offsetOf(min)
|
||||
imin := newIkey(r.Start, kMaxSeq, ktSeek)
|
||||
imax := newIkey(r.Limit, kMaxSeq, ktSeek)
|
||||
start, err := v.offsetOf(imin)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
limit, err := v.offsetOf(max)
|
||||
limit, err := v.offsetOf(imax)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -690,61 +814,67 @@ func (d *DB) SizeOf(ranges []util.Range) (Sizes, error) {
|
||||
return sizes, nil
|
||||
}
|
||||
|
||||
// Close closes the DB. This will also releases any outstanding snapshot.
|
||||
// Close closes the DB. This will also releases any outstanding snapshot and
|
||||
// abort any in-flight compaction.
|
||||
//
|
||||
// It is not safe to close a DB until all outstanding iterators are released.
|
||||
// It is valid to call Close multiple times. Other methods should not be
|
||||
// called after the DB has been closed.
|
||||
func (d *DB) Close() error {
|
||||
if !d.setClosed() {
|
||||
func (db *DB) Close() error {
|
||||
if !db.setClosed() {
|
||||
return ErrClosed
|
||||
}
|
||||
|
||||
s := d.s
|
||||
start := time.Now()
|
||||
s.log("db@close closing")
|
||||
db.log("db@close closing")
|
||||
|
||||
// Clear the finalizer.
|
||||
runtime.SetFinalizer(d, nil)
|
||||
runtime.SetFinalizer(db, nil)
|
||||
|
||||
// Get compaction error.
|
||||
var err error
|
||||
select {
|
||||
case err = <-d.compErrC:
|
||||
case err = <-db.compErrC:
|
||||
default:
|
||||
}
|
||||
|
||||
close(d.closeC)
|
||||
// Signal all goroutines.
|
||||
close(db.closeC)
|
||||
|
||||
// Wait for the close WaitGroup.
|
||||
d.closeW.Wait()
|
||||
// Wait for all gorotines to exit.
|
||||
db.closeW.Wait()
|
||||
|
||||
// Close journal.
|
||||
if d.journal != nil {
|
||||
d.journal.Close()
|
||||
d.journalWriter.Close()
|
||||
// Lock writer and closes journal.
|
||||
db.writeLockC <- struct{}{}
|
||||
if db.journal != nil {
|
||||
db.journal.Close()
|
||||
db.journalWriter.Close()
|
||||
}
|
||||
|
||||
if db.writeDelayN > 0 {
|
||||
db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay)
|
||||
}
|
||||
|
||||
// Close session.
|
||||
s.close()
|
||||
s.logf("db@close done T·%v", time.Since(start))
|
||||
s.release()
|
||||
db.s.close()
|
||||
db.logf("db@close done T·%v", time.Since(start))
|
||||
db.s.release()
|
||||
|
||||
if d.closer != nil {
|
||||
if err1 := d.closer.Close(); err == nil {
|
||||
if db.closer != nil {
|
||||
if err1 := db.closer.Close(); err == nil {
|
||||
err = err1
|
||||
}
|
||||
}
|
||||
|
||||
d.s = nil
|
||||
d.mem = nil
|
||||
d.frozenMem = nil
|
||||
d.journal = nil
|
||||
d.journalWriter = nil
|
||||
d.journalFile = nil
|
||||
d.frozenJournalFile = nil
|
||||
d.snapsRoot = snapshotElement{}
|
||||
d.closer = nil
|
||||
// NIL'ing pointers.
|
||||
db.s = nil
|
||||
db.mem = nil
|
||||
db.frozenMem = nil
|
||||
db.journal = nil
|
||||
db.journalWriter = nil
|
||||
db.journalFile = nil
|
||||
db.frozenJournalFile = nil
|
||||
db.closer = nil
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
789
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go
generated
vendored
789
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go
generated
vendored
File diff suppressed because it is too large
Load Diff
80
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go
generated
vendored
80
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go
generated
vendored
@@ -9,6 +9,8 @@ package leveldb
|
||||
import (
|
||||
"errors"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/iterator"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
@@ -19,46 +21,61 @@ var (
|
||||
errInvalidIkey = errors.New("leveldb: Iterator: invalid internal key")
|
||||
)
|
||||
|
||||
func (db *DB) newRawIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
|
||||
s := db.s
|
||||
type memdbReleaser struct {
|
||||
once sync.Once
|
||||
m *memDB
|
||||
}
|
||||
|
||||
func (mr *memdbReleaser) Release() {
|
||||
mr.once.Do(func() {
|
||||
mr.m.decref()
|
||||
})
|
||||
}
|
||||
|
||||
func (db *DB) newRawIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
|
||||
em, fm := db.getMems()
|
||||
v := s.version()
|
||||
v := db.s.version()
|
||||
|
||||
ti := v.getIterators(slice, ro)
|
||||
n := len(ti) + 2
|
||||
i := make([]iterator.Iterator, 0, n)
|
||||
i = append(i, em.NewIterator(slice))
|
||||
emi := em.mdb.NewIterator(slice)
|
||||
emi.SetReleaser(&memdbReleaser{m: em})
|
||||
i = append(i, emi)
|
||||
if fm != nil {
|
||||
i = append(i, fm.NewIterator(slice))
|
||||
fmi := fm.mdb.NewIterator(slice)
|
||||
fmi.SetReleaser(&memdbReleaser{m: fm})
|
||||
i = append(i, fmi)
|
||||
}
|
||||
i = append(i, ti...)
|
||||
strict := s.o.GetStrict(opt.StrictIterator) || ro.GetStrict(opt.StrictIterator)
|
||||
mi := iterator.NewMergedIterator(i, s.icmp, strict)
|
||||
strict := opt.GetStrict(db.s.o.Options, ro, opt.StrictReader)
|
||||
mi := iterator.NewMergedIterator(i, db.s.icmp, strict)
|
||||
mi.SetReleaser(&versionReleaser{v: v})
|
||||
return mi
|
||||
}
|
||||
|
||||
func (db *DB) newIterator(seq uint64, slice *util.Range, ro *opt.ReadOptions) *dbIter {
|
||||
var slice_ *util.Range
|
||||
var islice *util.Range
|
||||
if slice != nil {
|
||||
slice_ = &util.Range{}
|
||||
islice = &util.Range{}
|
||||
if slice.Start != nil {
|
||||
slice_.Start = newIKey(slice.Start, kMaxSeq, tSeek)
|
||||
islice.Start = newIkey(slice.Start, kMaxSeq, ktSeek)
|
||||
}
|
||||
if slice.Limit != nil {
|
||||
slice_.Limit = newIKey(slice.Limit, kMaxSeq, tSeek)
|
||||
islice.Limit = newIkey(slice.Limit, kMaxSeq, ktSeek)
|
||||
}
|
||||
}
|
||||
rawIter := db.newRawIterator(slice_, ro)
|
||||
rawIter := db.newRawIterator(islice, ro)
|
||||
iter := &dbIter{
|
||||
db: db,
|
||||
icmp: db.s.icmp,
|
||||
iter: rawIter,
|
||||
seq: seq,
|
||||
strict: db.s.o.GetStrict(opt.StrictIterator) || ro.GetStrict(opt.StrictIterator),
|
||||
strict: opt.GetStrict(db.s.o.Options, ro, opt.StrictReader),
|
||||
key: make([]byte, 0),
|
||||
value: make([]byte, 0),
|
||||
}
|
||||
atomic.AddInt32(&db.aliveIters, 1)
|
||||
runtime.SetFinalizer(iter, (*dbIter).Release)
|
||||
return iter
|
||||
}
|
||||
@@ -75,6 +92,7 @@ const (
|
||||
|
||||
// dbIter represent an interator states over a database session.
|
||||
type dbIter struct {
|
||||
db *DB
|
||||
icmp *iComparer
|
||||
iter iterator.Iterator
|
||||
seq uint64
|
||||
@@ -144,7 +162,7 @@ func (i *dbIter) Seek(key []byte) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
ikey := newIKey(key, i.seq, tSeek)
|
||||
ikey := newIkey(key, i.seq, ktSeek)
|
||||
if i.iter.Seek(ikey) {
|
||||
i.dir = dirSOI
|
||||
return i.next()
|
||||
@@ -156,15 +174,14 @@ func (i *dbIter) Seek(key []byte) bool {
|
||||
|
||||
func (i *dbIter) next() bool {
|
||||
for {
|
||||
ukey, seq, t, ok := parseIkey(i.iter.Key())
|
||||
if ok {
|
||||
if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil {
|
||||
if seq <= i.seq {
|
||||
switch t {
|
||||
case tDel:
|
||||
switch kt {
|
||||
case ktDel:
|
||||
// Skip deleted key.
|
||||
i.key = append(i.key[:0], ukey...)
|
||||
i.dir = dirForward
|
||||
case tVal:
|
||||
case ktVal:
|
||||
if i.dir == dirSOI || i.icmp.uCompare(ukey, i.key) > 0 {
|
||||
i.key = append(i.key[:0], ukey...)
|
||||
i.value = append(i.value[:0], i.iter.Value()...)
|
||||
@@ -174,7 +191,7 @@ func (i *dbIter) next() bool {
|
||||
}
|
||||
}
|
||||
} else if i.strict {
|
||||
i.setErr(errInvalidIkey)
|
||||
i.setErr(kerr)
|
||||
break
|
||||
}
|
||||
if !i.iter.Next() {
|
||||
@@ -207,20 +224,19 @@ func (i *dbIter) prev() bool {
|
||||
del := true
|
||||
if i.iter.Valid() {
|
||||
for {
|
||||
ukey, seq, t, ok := parseIkey(i.iter.Key())
|
||||
if ok {
|
||||
if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil {
|
||||
if seq <= i.seq {
|
||||
if !del && i.icmp.uCompare(ukey, i.key) < 0 {
|
||||
return true
|
||||
}
|
||||
del = (t == tDel)
|
||||
del = (kt == ktDel)
|
||||
if !del {
|
||||
i.key = append(i.key[:0], ukey...)
|
||||
i.value = append(i.value[:0], i.iter.Value()...)
|
||||
}
|
||||
}
|
||||
} else if i.strict {
|
||||
i.setErr(errInvalidIkey)
|
||||
i.setErr(kerr)
|
||||
return false
|
||||
}
|
||||
if !i.iter.Prev() {
|
||||
@@ -249,13 +265,12 @@ func (i *dbIter) Prev() bool {
|
||||
return i.Last()
|
||||
case dirForward:
|
||||
for i.iter.Prev() {
|
||||
ukey, _, _, ok := parseIkey(i.iter.Key())
|
||||
if ok {
|
||||
if ukey, _, _, kerr := parseIkey(i.iter.Key()); kerr == nil {
|
||||
if i.icmp.uCompare(ukey, i.key) < 0 {
|
||||
goto cont
|
||||
}
|
||||
} else if i.strict {
|
||||
i.setErr(errInvalidIkey)
|
||||
i.setErr(kerr)
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -289,6 +304,7 @@ func (i *dbIter) Release() {
|
||||
|
||||
if i.releaser != nil {
|
||||
i.releaser.Release()
|
||||
i.releaser = nil
|
||||
}
|
||||
|
||||
i.dir = dirReleased
|
||||
@@ -296,13 +312,19 @@ func (i *dbIter) Release() {
|
||||
i.value = nil
|
||||
i.iter.Release()
|
||||
i.iter = nil
|
||||
atomic.AddInt32(&i.db.aliveIters, -1)
|
||||
i.db = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (i *dbIter) SetReleaser(releaser util.Releaser) {
|
||||
if i.dir != dirReleased {
|
||||
i.releaser = releaser
|
||||
if i.dir == dirReleased {
|
||||
panic(util.ErrReleased)
|
||||
}
|
||||
if i.releaser != nil && releaser != nil {
|
||||
panic(util.ErrHasReleaser)
|
||||
}
|
||||
i.releaser = releaser
|
||||
}
|
||||
|
||||
func (i *dbIter) Error() error {
|
||||
|
||||
126
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go
generated
vendored
126
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go
generated
vendored
@@ -7,8 +7,10 @@
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/iterator"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
@@ -18,51 +20,41 @@ import (
|
||||
type snapshotElement struct {
|
||||
seq uint64
|
||||
ref int
|
||||
// Next and previous pointers in the doubly-linked list of elements.
|
||||
next, prev *snapshotElement
|
||||
}
|
||||
|
||||
// Initialize the snapshot.
|
||||
func (db *DB) initSnapshot() {
|
||||
db.snapsRoot.next = &db.snapsRoot
|
||||
db.snapsRoot.prev = &db.snapsRoot
|
||||
e *list.Element
|
||||
}
|
||||
|
||||
// Acquires a snapshot, based on latest sequence.
|
||||
func (db *DB) acquireSnapshot() *snapshotElement {
|
||||
db.snapsMu.Lock()
|
||||
defer db.snapsMu.Unlock()
|
||||
|
||||
seq := db.getSeq()
|
||||
elem := db.snapsRoot.prev
|
||||
if elem == &db.snapsRoot || elem.seq != seq {
|
||||
at := db.snapsRoot.prev
|
||||
next := at.next
|
||||
elem = &snapshotElement{
|
||||
seq: seq,
|
||||
prev: at,
|
||||
next: next,
|
||||
|
||||
if e := db.snapsList.Back(); e != nil {
|
||||
se := e.Value.(*snapshotElement)
|
||||
if se.seq == seq {
|
||||
se.ref++
|
||||
return se
|
||||
} else if seq < se.seq {
|
||||
panic("leveldb: sequence number is not increasing")
|
||||
}
|
||||
at.next = elem
|
||||
next.prev = elem
|
||||
}
|
||||
elem.ref++
|
||||
db.snapsMu.Unlock()
|
||||
return elem
|
||||
se := &snapshotElement{seq: seq, ref: 1}
|
||||
se.e = db.snapsList.PushBack(se)
|
||||
return se
|
||||
}
|
||||
|
||||
// Releases given snapshot element.
|
||||
func (db *DB) releaseSnapshot(elem *snapshotElement) {
|
||||
if !db.isClosed() {
|
||||
db.snapsMu.Lock()
|
||||
elem.ref--
|
||||
if elem.ref == 0 {
|
||||
elem.prev.next = elem.next
|
||||
elem.next.prev = elem.prev
|
||||
elem.next = nil
|
||||
elem.prev = nil
|
||||
} else if elem.ref < 0 {
|
||||
panic("leveldb: Snapshot: negative element reference")
|
||||
}
|
||||
db.snapsMu.Unlock()
|
||||
func (db *DB) releaseSnapshot(se *snapshotElement) {
|
||||
db.snapsMu.Lock()
|
||||
defer db.snapsMu.Unlock()
|
||||
|
||||
se.ref--
|
||||
if se.ref == 0 {
|
||||
db.snapsList.Remove(se.e)
|
||||
se.e = nil
|
||||
} else if se.ref < 0 {
|
||||
panic("leveldb: Snapshot: negative element reference")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -70,10 +62,11 @@ func (db *DB) releaseSnapshot(elem *snapshotElement) {
|
||||
func (db *DB) minSeq() uint64 {
|
||||
db.snapsMu.Lock()
|
||||
defer db.snapsMu.Unlock()
|
||||
elem := db.snapsRoot.prev
|
||||
if elem != &db.snapsRoot {
|
||||
return elem.seq
|
||||
|
||||
if e := db.snapsList.Front(); e != nil {
|
||||
return e.Value.(*snapshotElement).seq
|
||||
}
|
||||
|
||||
return db.getSeq()
|
||||
}
|
||||
|
||||
@@ -81,18 +74,19 @@ func (db *DB) minSeq() uint64 {
|
||||
type Snapshot struct {
|
||||
db *DB
|
||||
elem *snapshotElement
|
||||
mu sync.Mutex
|
||||
mu sync.RWMutex
|
||||
released bool
|
||||
}
|
||||
|
||||
// Creates new snapshot object.
|
||||
func (db *DB) newSnapshot() *Snapshot {
|
||||
p := &Snapshot{
|
||||
snap := &Snapshot{
|
||||
db: db,
|
||||
elem: db.acquireSnapshot(),
|
||||
}
|
||||
runtime.SetFinalizer(p, (*Snapshot).Release)
|
||||
return p
|
||||
atomic.AddInt32(&db.aliveSnaps, 1)
|
||||
runtime.SetFinalizer(snap, (*Snapshot).Release)
|
||||
return snap
|
||||
}
|
||||
|
||||
// Get gets the value for the given key. It returns ErrNotFound if
|
||||
@@ -100,19 +94,18 @@ func (db *DB) newSnapshot() *Snapshot {
|
||||
//
|
||||
// The caller should not modify the contents of the returned slice, but
|
||||
// it is safe to modify the contents of the argument after Get returns.
|
||||
func (p *Snapshot) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
|
||||
db := p.db
|
||||
err = db.ok()
|
||||
func (snap *Snapshot) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
|
||||
err = snap.db.ok()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if p.released {
|
||||
snap.mu.RLock()
|
||||
defer snap.mu.RUnlock()
|
||||
if snap.released {
|
||||
err = ErrSnapshotReleased
|
||||
return
|
||||
}
|
||||
return db.get(key, p.elem.seq, ro)
|
||||
return snap.db.get(key, snap.elem.seq, ro)
|
||||
}
|
||||
|
||||
// NewIterator returns an iterator for the snapshot of the uderlying DB.
|
||||
@@ -132,17 +125,18 @@ func (p *Snapshot) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error
|
||||
// iterator would be still valid until released.
|
||||
//
|
||||
// Also read Iterator documentation of the leveldb/iterator package.
|
||||
func (p *Snapshot) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
|
||||
db := p.db
|
||||
if err := db.ok(); err != nil {
|
||||
func (snap *Snapshot) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
|
||||
if err := snap.db.ok(); err != nil {
|
||||
return iterator.NewEmptyIterator(err)
|
||||
}
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if p.released {
|
||||
snap.mu.Lock()
|
||||
defer snap.mu.Unlock()
|
||||
if snap.released {
|
||||
return iterator.NewEmptyIterator(ErrSnapshotReleased)
|
||||
}
|
||||
return db.newIterator(p.elem.seq, slice, ro)
|
||||
// Since iterator already hold version ref, it doesn't need to
|
||||
// hold snapshot ref.
|
||||
return snap.db.newIterator(snap.elem.seq, slice, ro)
|
||||
}
|
||||
|
||||
// Release releases the snapshot. This will not release any returned
|
||||
@@ -150,16 +144,18 @@ func (p *Snapshot) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.
|
||||
// underlying DB is closed.
|
||||
//
|
||||
// Other methods should not be called after the snapshot has been released.
|
||||
func (p *Snapshot) Release() {
|
||||
p.mu.Lock()
|
||||
if !p.released {
|
||||
// Clear the finalizer.
|
||||
runtime.SetFinalizer(p, nil)
|
||||
func (snap *Snapshot) Release() {
|
||||
snap.mu.Lock()
|
||||
defer snap.mu.Unlock()
|
||||
|
||||
p.released = true
|
||||
p.db.releaseSnapshot(p.elem)
|
||||
p.db = nil
|
||||
p.elem = nil
|
||||
if !snap.released {
|
||||
// Clear the finalizer.
|
||||
runtime.SetFinalizer(snap, nil)
|
||||
|
||||
snap.released = true
|
||||
snap.db.releaseSnapshot(snap.elem)
|
||||
atomic.AddInt32(&snap.db.aliveSnaps, -1)
|
||||
snap.db = nil
|
||||
snap.elem = nil
|
||||
}
|
||||
p.mu.Unlock()
|
||||
}
|
||||
|
||||
198
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go
generated
vendored
198
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go
generated
vendored
@@ -8,106 +8,194 @@ package leveldb
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/journal"
|
||||
"github.com/syndtr/goleveldb/leveldb/memdb"
|
||||
)
|
||||
|
||||
type memDB struct {
|
||||
db *DB
|
||||
mdb *memdb.DB
|
||||
ref int32
|
||||
}
|
||||
|
||||
func (m *memDB) incref() {
|
||||
atomic.AddInt32(&m.ref, 1)
|
||||
}
|
||||
|
||||
func (m *memDB) decref() {
|
||||
if ref := atomic.AddInt32(&m.ref, -1); ref == 0 {
|
||||
// Only put back memdb with std capacity.
|
||||
if m.mdb.Capacity() == m.db.s.o.GetWriteBuffer() {
|
||||
m.mdb.Reset()
|
||||
m.db.mpoolPut(m.mdb)
|
||||
}
|
||||
m.db = nil
|
||||
m.mdb = nil
|
||||
} else if ref < 0 {
|
||||
panic("negative memdb ref")
|
||||
}
|
||||
}
|
||||
|
||||
// Get latest sequence number.
|
||||
func (d *DB) getSeq() uint64 {
|
||||
return atomic.LoadUint64(&d.seq)
|
||||
func (db *DB) getSeq() uint64 {
|
||||
return atomic.LoadUint64(&db.seq)
|
||||
}
|
||||
|
||||
// Atomically adds delta to seq.
|
||||
func (d *DB) addSeq(delta uint64) {
|
||||
atomic.AddUint64(&d.seq, delta)
|
||||
func (db *DB) addSeq(delta uint64) {
|
||||
atomic.AddUint64(&db.seq, delta)
|
||||
}
|
||||
|
||||
func (db *DB) mpoolPut(mem *memdb.DB) {
|
||||
defer func() {
|
||||
recover()
|
||||
}()
|
||||
select {
|
||||
case db.memPool <- mem:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (db *DB) mpoolGet() *memdb.DB {
|
||||
select {
|
||||
case mem := <-db.memPool:
|
||||
return mem
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (db *DB) mpoolDrain() {
|
||||
ticker := time.NewTicker(30 * time.Second)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
select {
|
||||
case <-db.memPool:
|
||||
default:
|
||||
}
|
||||
case _, _ = <-db.closeC:
|
||||
close(db.memPool)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create new memdb and froze the old one; need external synchronization.
|
||||
// newMem only called synchronously by the writer.
|
||||
func (d *DB) newMem(n int) (mem *memdb.DB, err error) {
|
||||
s := d.s
|
||||
|
||||
num := s.allocFileNum()
|
||||
file := s.getJournalFile(num)
|
||||
func (db *DB) newMem(n int) (mem *memDB, err error) {
|
||||
num := db.s.allocFileNum()
|
||||
file := db.s.getJournalFile(num)
|
||||
w, err := file.Create()
|
||||
if err != nil {
|
||||
s.reuseFileNum(num)
|
||||
db.s.reuseFileNum(num)
|
||||
return
|
||||
}
|
||||
d.memMu.Lock()
|
||||
if d.journal == nil {
|
||||
d.journal = journal.NewWriter(w)
|
||||
} else {
|
||||
d.journal.Reset(w)
|
||||
d.journalWriter.Close()
|
||||
d.frozenJournalFile = d.journalFile
|
||||
|
||||
db.memMu.Lock()
|
||||
defer db.memMu.Unlock()
|
||||
|
||||
if db.frozenMem != nil {
|
||||
panic("still has frozen mem")
|
||||
}
|
||||
d.journalWriter = w
|
||||
d.journalFile = file
|
||||
d.frozenMem = d.mem
|
||||
d.mem = memdb.New(s.icmp, maxInt(d.s.o.GetWriteBuffer(), n))
|
||||
mem = d.mem
|
||||
// The seq only incremented by the writer.
|
||||
d.frozenSeq = d.seq
|
||||
d.memMu.Unlock()
|
||||
|
||||
if db.journal == nil {
|
||||
db.journal = journal.NewWriter(w)
|
||||
} else {
|
||||
db.journal.Reset(w)
|
||||
db.journalWriter.Close()
|
||||
db.frozenJournalFile = db.journalFile
|
||||
}
|
||||
db.journalWriter = w
|
||||
db.journalFile = file
|
||||
db.frozenMem = db.mem
|
||||
mdb := db.mpoolGet()
|
||||
if mdb == nil || mdb.Capacity() < n {
|
||||
mdb = memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n))
|
||||
}
|
||||
mem = &memDB{
|
||||
db: db,
|
||||
mdb: mdb,
|
||||
ref: 2,
|
||||
}
|
||||
db.mem = mem
|
||||
// The seq only incremented by the writer. And whoever called newMem
|
||||
// should hold write lock, so no need additional synchronization here.
|
||||
db.frozenSeq = db.seq
|
||||
return
|
||||
}
|
||||
|
||||
// Get all memdbs.
|
||||
func (d *DB) getMems() (e *memdb.DB, f *memdb.DB) {
|
||||
d.memMu.RLock()
|
||||
defer d.memMu.RUnlock()
|
||||
return d.mem, d.frozenMem
|
||||
func (db *DB) getMems() (e, f *memDB) {
|
||||
db.memMu.RLock()
|
||||
defer db.memMu.RUnlock()
|
||||
if db.mem == nil {
|
||||
panic("nil effective mem")
|
||||
}
|
||||
db.mem.incref()
|
||||
if db.frozenMem != nil {
|
||||
db.frozenMem.incref()
|
||||
}
|
||||
return db.mem, db.frozenMem
|
||||
}
|
||||
|
||||
// Get frozen memdb.
|
||||
func (d *DB) getEffectiveMem() *memdb.DB {
|
||||
d.memMu.RLock()
|
||||
defer d.memMu.RUnlock()
|
||||
return d.mem
|
||||
func (db *DB) getEffectiveMem() *memDB {
|
||||
db.memMu.RLock()
|
||||
defer db.memMu.RUnlock()
|
||||
if db.mem == nil {
|
||||
panic("nil effective mem")
|
||||
}
|
||||
db.mem.incref()
|
||||
return db.mem
|
||||
}
|
||||
|
||||
// Check whether we has frozen memdb.
|
||||
func (d *DB) hasFrozenMem() bool {
|
||||
d.memMu.RLock()
|
||||
defer d.memMu.RUnlock()
|
||||
return d.frozenMem != nil
|
||||
func (db *DB) hasFrozenMem() bool {
|
||||
db.memMu.RLock()
|
||||
defer db.memMu.RUnlock()
|
||||
return db.frozenMem != nil
|
||||
}
|
||||
|
||||
// Get frozen memdb.
|
||||
func (d *DB) getFrozenMem() *memdb.DB {
|
||||
d.memMu.RLock()
|
||||
defer d.memMu.RUnlock()
|
||||
return d.frozenMem
|
||||
func (db *DB) getFrozenMem() *memDB {
|
||||
db.memMu.RLock()
|
||||
defer db.memMu.RUnlock()
|
||||
if db.frozenMem != nil {
|
||||
db.frozenMem.incref()
|
||||
}
|
||||
return db.frozenMem
|
||||
}
|
||||
|
||||
// Drop frozen memdb; assume that frozen memdb isn't nil.
|
||||
func (d *DB) dropFrozenMem() {
|
||||
d.memMu.Lock()
|
||||
if err := d.frozenJournalFile.Remove(); err != nil {
|
||||
d.s.logf("journal@remove removing @%d %q", d.frozenJournalFile.Num(), err)
|
||||
func (db *DB) dropFrozenMem() {
|
||||
db.memMu.Lock()
|
||||
if err := db.frozenJournalFile.Remove(); err != nil {
|
||||
db.logf("journal@remove removing @%d %q", db.frozenJournalFile.Num(), err)
|
||||
} else {
|
||||
d.s.logf("journal@remove removed @%d", d.frozenJournalFile.Num())
|
||||
db.logf("journal@remove removed @%d", db.frozenJournalFile.Num())
|
||||
}
|
||||
d.frozenJournalFile = nil
|
||||
d.frozenMem = nil
|
||||
d.memMu.Unlock()
|
||||
db.frozenJournalFile = nil
|
||||
db.frozenMem.decref()
|
||||
db.frozenMem = nil
|
||||
db.memMu.Unlock()
|
||||
}
|
||||
|
||||
// Set closed flag; return true if not already closed.
|
||||
func (d *DB) setClosed() bool {
|
||||
return atomic.CompareAndSwapUint32(&d.closed, 0, 1)
|
||||
func (db *DB) setClosed() bool {
|
||||
return atomic.CompareAndSwapUint32(&db.closed, 0, 1)
|
||||
}
|
||||
|
||||
// Check whether DB was closed.
|
||||
func (d *DB) isClosed() bool {
|
||||
return atomic.LoadUint32(&d.closed) != 0
|
||||
func (db *DB) isClosed() bool {
|
||||
return atomic.LoadUint32(&db.closed) != 0
|
||||
}
|
||||
|
||||
// Check read ok status.
|
||||
func (d *DB) ok() error {
|
||||
if d.isClosed() {
|
||||
func (db *DB) ok() error {
|
||||
if db.isClosed() {
|
||||
return ErrClosed
|
||||
}
|
||||
return nil
|
||||
|
||||
750
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go
generated
vendored
750
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go
generated
vendored
@@ -7,6 +7,10 @@
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"container/list"
|
||||
crand "crypto/rand"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
@@ -20,6 +24,7 @@ import (
|
||||
"unsafe"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/comparer"
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/filter"
|
||||
"github.com/syndtr/goleveldb/leveldb/iterator"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
@@ -148,25 +153,29 @@ func (h *dbHarness) maxNextLevelOverlappingBytes(want uint64) {
|
||||
t := h.t
|
||||
db := h.db
|
||||
|
||||
var res uint64
|
||||
var (
|
||||
maxOverlaps uint64
|
||||
maxLevel int
|
||||
)
|
||||
v := db.s.version()
|
||||
for i, tt := range v.tables[1 : len(v.tables)-1] {
|
||||
level := i + 1
|
||||
next := v.tables[level+1]
|
||||
for _, t := range tt {
|
||||
var r tFiles
|
||||
min, max := t.min.ukey(), t.max.ukey()
|
||||
next.getOverlaps(min, max, &r, true, db.s.icmp.ucmp)
|
||||
r := next.getOverlaps(nil, db.s.icmp, t.imin.ukey(), t.imax.ukey(), false)
|
||||
sum := r.size()
|
||||
if sum > res {
|
||||
res = sum
|
||||
if sum > maxOverlaps {
|
||||
maxOverlaps = sum
|
||||
maxLevel = level
|
||||
}
|
||||
}
|
||||
}
|
||||
v.release()
|
||||
|
||||
if res > want {
|
||||
t.Errorf("next level overlapping bytes is more than %d, got=%d", want, res)
|
||||
if maxOverlaps > want {
|
||||
t.Errorf("next level most overlapping bytes is more than %d, got=%d level=%d", want, maxOverlaps, maxLevel)
|
||||
} else {
|
||||
t.Logf("next level most overlapping bytes is %d, level=%d want=%d", maxOverlaps, maxLevel, want)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -239,7 +248,7 @@ func (h *dbHarness) allEntriesFor(key, want string) {
|
||||
db := h.db
|
||||
s := db.s
|
||||
|
||||
ikey := newIKey([]byte(key), kMaxSeq, tVal)
|
||||
ikey := newIkey([]byte(key), kMaxSeq, ktVal)
|
||||
iter := db.newRawIterator(nil, nil)
|
||||
if !iter.Seek(ikey) && iter.Error() != nil {
|
||||
t.Error("AllEntries: error during seek, err: ", iter.Error())
|
||||
@@ -248,19 +257,18 @@ func (h *dbHarness) allEntriesFor(key, want string) {
|
||||
res := "[ "
|
||||
first := true
|
||||
for iter.Valid() {
|
||||
rkey := iKey(iter.Key())
|
||||
if _, t, ok := rkey.parseNum(); ok {
|
||||
if s.icmp.uCompare(ikey.ukey(), rkey.ukey()) != 0 {
|
||||
if ukey, _, kt, kerr := parseIkey(iter.Key()); kerr == nil {
|
||||
if s.icmp.uCompare(ikey.ukey(), ukey) != 0 {
|
||||
break
|
||||
}
|
||||
if !first {
|
||||
res += ", "
|
||||
}
|
||||
first = false
|
||||
switch t {
|
||||
case tVal:
|
||||
switch kt {
|
||||
case ktVal:
|
||||
res += string(iter.Value())
|
||||
case tDel:
|
||||
case ktDel:
|
||||
res += "DEL"
|
||||
}
|
||||
} else {
|
||||
@@ -325,6 +333,8 @@ func (h *dbHarness) compactMem() {
|
||||
t := h.t
|
||||
db := h.db
|
||||
|
||||
t.Log("starting memdb compaction")
|
||||
|
||||
db.writeLockC <- struct{}{}
|
||||
defer func() {
|
||||
<-db.writeLockC
|
||||
@@ -340,6 +350,8 @@ func (h *dbHarness) compactMem() {
|
||||
if h.totalTables() == 0 {
|
||||
t.Error("zero tables after mem compaction")
|
||||
}
|
||||
|
||||
t.Log("memdb compaction done")
|
||||
}
|
||||
|
||||
func (h *dbHarness) compactRangeAtErr(level int, min, max string, wanterr bool) {
|
||||
@@ -354,6 +366,8 @@ func (h *dbHarness) compactRangeAtErr(level int, min, max string, wanterr bool)
|
||||
_max = []byte(max)
|
||||
}
|
||||
|
||||
t.Logf("starting table range compaction: level=%d, min=%q, max=%q", level, min, max)
|
||||
|
||||
if err := db.compSendRange(db.tcompCmdC, level, _min, _max); err != nil {
|
||||
if wanterr {
|
||||
t.Log("CompactRangeAt: got error (expected): ", err)
|
||||
@@ -363,6 +377,8 @@ func (h *dbHarness) compactRangeAtErr(level int, min, max string, wanterr bool)
|
||||
} else if wanterr {
|
||||
t.Error("CompactRangeAt: expect error")
|
||||
}
|
||||
|
||||
t.Log("table range compaction done")
|
||||
}
|
||||
|
||||
func (h *dbHarness) compactRangeAt(level int, min, max string) {
|
||||
@@ -373,6 +389,8 @@ func (h *dbHarness) compactRange(min, max string) {
|
||||
t := h.t
|
||||
db := h.db
|
||||
|
||||
t.Logf("starting DB range compaction: min=%q, max=%q", min, max)
|
||||
|
||||
var r util.Range
|
||||
if min != "" {
|
||||
r.Start = []byte(min)
|
||||
@@ -383,6 +401,8 @@ func (h *dbHarness) compactRange(min, max string) {
|
||||
if err := db.CompactRange(r); err != nil {
|
||||
t.Error("CompactRange: got error: ", err)
|
||||
}
|
||||
|
||||
t.Log("DB range compaction done")
|
||||
}
|
||||
|
||||
func (h *dbHarness) sizeAssert(start, limit string, low, hi uint64) {
|
||||
@@ -504,10 +524,10 @@ func Test_FieldsAligned(t *testing.T) {
|
||||
p1 := new(DB)
|
||||
testAligned(t, "DB.seq", unsafe.Offsetof(p1.seq))
|
||||
p2 := new(session)
|
||||
testAligned(t, "session.stFileNum", unsafe.Offsetof(p2.stFileNum))
|
||||
testAligned(t, "session.stNextFileNum", unsafe.Offsetof(p2.stNextFileNum))
|
||||
testAligned(t, "session.stJournalNum", unsafe.Offsetof(p2.stJournalNum))
|
||||
testAligned(t, "session.stPrevJournalNum", unsafe.Offsetof(p2.stPrevJournalNum))
|
||||
testAligned(t, "session.stSeq", unsafe.Offsetof(p2.stSeq))
|
||||
testAligned(t, "session.stSeqNum", unsafe.Offsetof(p2.stSeqNum))
|
||||
}
|
||||
|
||||
func TestDb_Locking(t *testing.T) {
|
||||
@@ -943,7 +963,7 @@ func TestDb_RepeatedWritesToSameKey(t *testing.T) {
|
||||
h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100000})
|
||||
defer h.close()
|
||||
|
||||
maxTables := kNumLevels + kL0_StopWritesTrigger
|
||||
maxTables := h.o.GetNumLevel() + h.o.GetWriteL0PauseTrigger()
|
||||
|
||||
value := strings.Repeat("v", 2*h.o.GetWriteBuffer())
|
||||
for i := 0; i < 5*maxTables; i++ {
|
||||
@@ -961,7 +981,7 @@ func TestDb_RepeatedWritesToSameKeyAfterReopen(t *testing.T) {
|
||||
|
||||
h.reopenDB()
|
||||
|
||||
maxTables := kNumLevels + kL0_StopWritesTrigger
|
||||
maxTables := h.o.GetNumLevel() + h.o.GetWriteL0PauseTrigger()
|
||||
|
||||
value := strings.Repeat("v", 2*h.o.GetWriteBuffer())
|
||||
for i := 0; i < 5*maxTables; i++ {
|
||||
@@ -977,7 +997,7 @@ func TestDb_SparseMerge(t *testing.T) {
|
||||
h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression})
|
||||
defer h.close()
|
||||
|
||||
h.putMulti(kNumLevels, "A", "Z")
|
||||
h.putMulti(h.o.GetNumLevel(), "A", "Z")
|
||||
|
||||
// Suppose there is:
|
||||
// small amount of data with prefix A
|
||||
@@ -1001,6 +1021,7 @@ func TestDb_SparseMerge(t *testing.T) {
|
||||
h.put("C", "vc2")
|
||||
h.compactMem()
|
||||
|
||||
h.waitCompaction()
|
||||
h.maxNextLevelOverlappingBytes(20 * 1048576)
|
||||
h.compactRangeAt(0, "", "")
|
||||
h.waitCompaction()
|
||||
@@ -1127,13 +1148,51 @@ func TestDb_Snapshot(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestDb_SnapshotList(t *testing.T) {
|
||||
db := &DB{snapsList: list.New()}
|
||||
e0a := db.acquireSnapshot()
|
||||
e0b := db.acquireSnapshot()
|
||||
db.seq = 1
|
||||
e1 := db.acquireSnapshot()
|
||||
db.seq = 2
|
||||
e2 := db.acquireSnapshot()
|
||||
|
||||
if db.minSeq() != 0 {
|
||||
t.Fatalf("invalid sequence number, got=%d", db.minSeq())
|
||||
}
|
||||
db.releaseSnapshot(e0a)
|
||||
if db.minSeq() != 0 {
|
||||
t.Fatalf("invalid sequence number, got=%d", db.minSeq())
|
||||
}
|
||||
db.releaseSnapshot(e2)
|
||||
if db.minSeq() != 0 {
|
||||
t.Fatalf("invalid sequence number, got=%d", db.minSeq())
|
||||
}
|
||||
db.releaseSnapshot(e0b)
|
||||
if db.minSeq() != 1 {
|
||||
t.Fatalf("invalid sequence number, got=%d", db.minSeq())
|
||||
}
|
||||
e2 = db.acquireSnapshot()
|
||||
if db.minSeq() != 1 {
|
||||
t.Fatalf("invalid sequence number, got=%d", db.minSeq())
|
||||
}
|
||||
db.releaseSnapshot(e1)
|
||||
if db.minSeq() != 2 {
|
||||
t.Fatalf("invalid sequence number, got=%d", db.minSeq())
|
||||
}
|
||||
db.releaseSnapshot(e2)
|
||||
if db.minSeq() != 2 {
|
||||
t.Fatalf("invalid sequence number, got=%d", db.minSeq())
|
||||
}
|
||||
}
|
||||
|
||||
func TestDb_HiddenValuesAreRemoved(t *testing.T) {
|
||||
trun(t, func(h *dbHarness) {
|
||||
s := h.db.s
|
||||
|
||||
h.put("foo", "v1")
|
||||
h.compactMem()
|
||||
m := kMaxMemCompactLevel
|
||||
m := h.o.GetMaxMemCompationLevel()
|
||||
v := s.version()
|
||||
num := v.tLen(m)
|
||||
v.release()
|
||||
@@ -1177,7 +1236,7 @@ func TestDb_DeletionMarkers2(t *testing.T) {
|
||||
|
||||
h.put("foo", "v1")
|
||||
h.compactMem()
|
||||
m := kMaxMemCompactLevel
|
||||
m := h.o.GetMaxMemCompationLevel()
|
||||
v := s.version()
|
||||
num := v.tLen(m)
|
||||
v.release()
|
||||
@@ -1212,7 +1271,7 @@ func TestDb_DeletionMarkers2(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDb_CompactionTableOpenError(t *testing.T) {
|
||||
h := newDbHarnessWopt(t, &opt.Options{MaxOpenFiles: 0})
|
||||
h := newDbHarnessWopt(t, &opt.Options{CachedOpenFiles: -1})
|
||||
defer h.close()
|
||||
|
||||
im := 10
|
||||
@@ -1230,14 +1289,14 @@ func TestDb_CompactionTableOpenError(t *testing.T) {
|
||||
t.Errorf("total tables is %d, want %d", n, im)
|
||||
}
|
||||
|
||||
h.stor.SetOpenErr(storage.TypeTable)
|
||||
h.stor.SetEmuErr(storage.TypeTable, tsOpOpen)
|
||||
go h.db.CompactRange(util.Range{})
|
||||
if err := h.db.compSendIdle(h.db.tcompCmdC); err != nil {
|
||||
t.Log("compaction error: ", err)
|
||||
}
|
||||
h.closeDB0()
|
||||
h.openDB()
|
||||
h.stor.SetOpenErr(0)
|
||||
h.stor.SetEmuErr(0, tsOpOpen)
|
||||
|
||||
for i := 0; i < im; i++ {
|
||||
for j := 0; j < jm; j++ {
|
||||
@@ -1248,7 +1307,7 @@ func TestDb_CompactionTableOpenError(t *testing.T) {
|
||||
|
||||
func TestDb_OverlapInLevel0(t *testing.T) {
|
||||
trun(t, func(h *dbHarness) {
|
||||
if kMaxMemCompactLevel != 2 {
|
||||
if h.o.GetMaxMemCompationLevel() != 2 {
|
||||
t.Fatal("fix test to reflect the config")
|
||||
}
|
||||
|
||||
@@ -1368,23 +1427,23 @@ func TestDb_ManifestWriteError(t *testing.T) {
|
||||
h.compactMem()
|
||||
h.getVal("foo", "bar")
|
||||
v := h.db.s.version()
|
||||
if n := v.tLen(kMaxMemCompactLevel); n != 1 {
|
||||
if n := v.tLen(h.o.GetMaxMemCompationLevel()); n != 1 {
|
||||
t.Errorf("invalid total tables, want=1 got=%d", n)
|
||||
}
|
||||
v.release()
|
||||
|
||||
if i == 0 {
|
||||
h.stor.SetWriteErr(storage.TypeManifest)
|
||||
h.stor.SetEmuErr(storage.TypeManifest, tsOpWrite)
|
||||
} else {
|
||||
h.stor.SetSyncErr(storage.TypeManifest)
|
||||
h.stor.SetEmuErr(storage.TypeManifest, tsOpSync)
|
||||
}
|
||||
|
||||
// Merging compaction (will fail)
|
||||
h.compactRangeAtErr(kMaxMemCompactLevel, "", "", true)
|
||||
h.compactRangeAtErr(h.o.GetMaxMemCompationLevel(), "", "", true)
|
||||
|
||||
h.db.Close()
|
||||
h.stor.SetWriteErr(0)
|
||||
h.stor.SetSyncErr(0)
|
||||
h.stor.SetEmuErr(0, tsOpWrite)
|
||||
h.stor.SetEmuErr(0, tsOpSync)
|
||||
|
||||
// Should not lose data
|
||||
h.openDB()
|
||||
@@ -1534,7 +1593,7 @@ func TestDb_ManualCompaction(t *testing.T) {
|
||||
h := newDbHarness(t)
|
||||
defer h.close()
|
||||
|
||||
if kMaxMemCompactLevel != 2 {
|
||||
if h.o.GetMaxMemCompationLevel() != 2 {
|
||||
t.Fatal("fix test to reflect the config")
|
||||
}
|
||||
|
||||
@@ -1579,7 +1638,7 @@ func TestDb_BloomFilter(t *testing.T) {
|
||||
return fmt.Sprintf("key%06d", i)
|
||||
}
|
||||
|
||||
n := 10000
|
||||
const n = 10000
|
||||
|
||||
// Populate multiple layers
|
||||
for i := 0; i < n; i++ {
|
||||
@@ -1818,7 +1877,7 @@ func TestDb_DeletionMarkersOnMemdb(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDb_LeveldbIssue178(t *testing.T) {
|
||||
nKeys := (kMaxTableSize / 30) * 5
|
||||
nKeys := (opt.DefaultCompactionTableSize / 30) * 5
|
||||
key1 := func(i int) string {
|
||||
return fmt.Sprintf("my_key_%d", i)
|
||||
}
|
||||
@@ -1886,3 +1945,624 @@ func TestDb_LeveldbIssue200(t *testing.T) {
|
||||
iter.Next()
|
||||
assertBytes(t, []byte("5"), iter.Key())
|
||||
}
|
||||
|
||||
func TestDb_GoleveldbIssue74(t *testing.T) {
|
||||
h := newDbHarnessWopt(t, &opt.Options{
|
||||
WriteBuffer: 1 * opt.MiB,
|
||||
})
|
||||
defer h.close()
|
||||
|
||||
const n, dur = 10000, 5 * time.Second
|
||||
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
|
||||
until := time.Now().Add(dur)
|
||||
wg := new(sync.WaitGroup)
|
||||
wg.Add(2)
|
||||
var done uint32
|
||||
go func() {
|
||||
var i int
|
||||
defer func() {
|
||||
t.Logf("WRITER DONE #%d", i)
|
||||
atomic.StoreUint32(&done, 1)
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
b := new(Batch)
|
||||
for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ {
|
||||
iv := fmt.Sprintf("VAL%010d", i)
|
||||
for k := 0; k < n; k++ {
|
||||
key := fmt.Sprintf("KEY%06d", k)
|
||||
b.Put([]byte(key), []byte(key+iv))
|
||||
b.Put([]byte(fmt.Sprintf("PTR%06d", k)), []byte(key))
|
||||
}
|
||||
h.write(b)
|
||||
|
||||
b.Reset()
|
||||
snap := h.getSnapshot()
|
||||
iter := snap.NewIterator(util.BytesPrefix([]byte("PTR")), nil)
|
||||
var k int
|
||||
for ; iter.Next(); k++ {
|
||||
ptrKey := iter.Key()
|
||||
key := iter.Value()
|
||||
|
||||
if _, err := snap.Get(ptrKey, nil); err != nil {
|
||||
t.Fatalf("WRITER #%d snapshot.Get %q: %v", i, ptrKey, err)
|
||||
}
|
||||
if value, err := snap.Get(key, nil); err != nil {
|
||||
t.Fatalf("WRITER #%d snapshot.Get %q: %v", i, key, err)
|
||||
} else if string(value) != string(key)+iv {
|
||||
t.Fatalf("WRITER #%d snapshot.Get %q got invalid value, want %q got %q", i, key, string(key)+iv, value)
|
||||
}
|
||||
|
||||
b.Delete(key)
|
||||
b.Delete(ptrKey)
|
||||
}
|
||||
h.write(b)
|
||||
iter.Release()
|
||||
snap.Release()
|
||||
if k != n {
|
||||
t.Fatalf("#%d %d != %d", i, k, n)
|
||||
}
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
var i int
|
||||
defer func() {
|
||||
t.Logf("READER DONE #%d", i)
|
||||
atomic.StoreUint32(&done, 1)
|
||||
wg.Done()
|
||||
}()
|
||||
for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ {
|
||||
snap := h.getSnapshot()
|
||||
iter := snap.NewIterator(util.BytesPrefix([]byte("PTR")), nil)
|
||||
var prevValue string
|
||||
var k int
|
||||
for ; iter.Next(); k++ {
|
||||
ptrKey := iter.Key()
|
||||
key := iter.Value()
|
||||
|
||||
if _, err := snap.Get(ptrKey, nil); err != nil {
|
||||
t.Fatalf("READER #%d snapshot.Get %q: %v", i, ptrKey, err)
|
||||
}
|
||||
|
||||
if value, err := snap.Get(key, nil); err != nil {
|
||||
t.Fatalf("READER #%d snapshot.Get %q: %v", i, key, err)
|
||||
} else if prevValue != "" && string(value) != string(key)+prevValue {
|
||||
t.Fatalf("READER #%d snapshot.Get %q got invalid value, want %q got %q", i, key, string(key)+prevValue, value)
|
||||
} else {
|
||||
prevValue = string(value[len(key):])
|
||||
}
|
||||
}
|
||||
iter.Release()
|
||||
snap.Release()
|
||||
if k > 0 && k != n {
|
||||
t.Fatalf("#%d %d != %d", i, k, n)
|
||||
}
|
||||
}
|
||||
}()
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestDb_GetProperties(t *testing.T) {
|
||||
h := newDbHarness(t)
|
||||
defer h.close()
|
||||
|
||||
_, err := h.db.GetProperty("leveldb.num-files-at-level")
|
||||
if err == nil {
|
||||
t.Error("GetProperty() failed to detect missing level")
|
||||
}
|
||||
|
||||
_, err = h.db.GetProperty("leveldb.num-files-at-level0")
|
||||
if err != nil {
|
||||
t.Error("got unexpected error", err)
|
||||
}
|
||||
|
||||
_, err = h.db.GetProperty("leveldb.num-files-at-level0x")
|
||||
if err == nil {
|
||||
t.Error("GetProperty() failed to detect invalid level")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDb_GoleveldbIssue72and83(t *testing.T) {
|
||||
h := newDbHarnessWopt(t, &opt.Options{
|
||||
WriteBuffer: 1 * opt.MiB,
|
||||
CachedOpenFiles: 3,
|
||||
})
|
||||
defer h.close()
|
||||
|
||||
const n, wn, dur = 10000, 100, 30 * time.Second
|
||||
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
|
||||
randomData := func(prefix byte, i int) []byte {
|
||||
data := make([]byte, 1+4+32+64+32)
|
||||
_, err := crand.Reader.Read(data[1 : len(data)-4])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
data[0] = prefix
|
||||
binary.LittleEndian.PutUint32(data[len(data)-4:], uint32(i))
|
||||
return data
|
||||
}
|
||||
|
||||
keys := make([][]byte, n)
|
||||
for i := range keys {
|
||||
keys[i] = randomData(1, 0)
|
||||
}
|
||||
|
||||
until := time.Now().Add(dur)
|
||||
wg := new(sync.WaitGroup)
|
||||
wg.Add(3)
|
||||
var done uint32
|
||||
go func() {
|
||||
i := 0
|
||||
defer func() {
|
||||
t.Logf("WRITER DONE #%d", i)
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
b := new(Batch)
|
||||
for ; i < wn && atomic.LoadUint32(&done) == 0; i++ {
|
||||
b.Reset()
|
||||
for _, k1 := range keys {
|
||||
k2 := randomData(2, i)
|
||||
b.Put(k2, randomData(42, i))
|
||||
b.Put(k1, k2)
|
||||
}
|
||||
if err := h.db.Write(b, h.wo); err != nil {
|
||||
atomic.StoreUint32(&done, 1)
|
||||
t.Fatalf("WRITER #%d db.Write: %v", i, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
var i int
|
||||
defer func() {
|
||||
t.Logf("READER0 DONE #%d", i)
|
||||
atomic.StoreUint32(&done, 1)
|
||||
wg.Done()
|
||||
}()
|
||||
for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ {
|
||||
snap := h.getSnapshot()
|
||||
seq := snap.elem.seq
|
||||
if seq == 0 {
|
||||
snap.Release()
|
||||
continue
|
||||
}
|
||||
iter := snap.NewIterator(util.BytesPrefix([]byte{1}), nil)
|
||||
writei := int(snap.elem.seq/(n*2) - 1)
|
||||
var k int
|
||||
for ; iter.Next(); k++ {
|
||||
k1 := iter.Key()
|
||||
k2 := iter.Value()
|
||||
kwritei := int(binary.LittleEndian.Uint32(k2[len(k2)-4:]))
|
||||
if writei != kwritei {
|
||||
t.Fatalf("READER0 #%d.%d W#%d invalid write iteration num: %d", i, k, writei, kwritei)
|
||||
}
|
||||
if _, err := snap.Get(k2, nil); err != nil {
|
||||
t.Fatalf("READER0 #%d.%d W#%d snap.Get: %v\nk1: %x\n -> k2: %x", i, k, writei, err, k1, k2)
|
||||
}
|
||||
}
|
||||
if err := iter.Error(); err != nil {
|
||||
t.Fatalf("READER0 #%d.%d W#%d snap.Iterator: %v", i, k, writei, err)
|
||||
}
|
||||
iter.Release()
|
||||
snap.Release()
|
||||
if k > 0 && k != n {
|
||||
t.Fatalf("READER0 #%d W#%d short read, got=%d want=%d", i, writei, k, n)
|
||||
}
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
var i int
|
||||
defer func() {
|
||||
t.Logf("READER1 DONE #%d", i)
|
||||
atomic.StoreUint32(&done, 1)
|
||||
wg.Done()
|
||||
}()
|
||||
for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ {
|
||||
iter := h.db.NewIterator(nil, nil)
|
||||
seq := iter.(*dbIter).seq
|
||||
if seq == 0 {
|
||||
iter.Release()
|
||||
continue
|
||||
}
|
||||
writei := int(seq/(n*2) - 1)
|
||||
var k int
|
||||
for ok := iter.Last(); ok; ok = iter.Prev() {
|
||||
k++
|
||||
}
|
||||
if err := iter.Error(); err != nil {
|
||||
t.Fatalf("READER1 #%d.%d W#%d db.Iterator: %v", i, k, writei, err)
|
||||
}
|
||||
iter.Release()
|
||||
if m := (writei+1)*n + n; k != m {
|
||||
t.Fatalf("READER1 #%d W#%d short read, got=%d want=%d", i, writei, k, m)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestDb_TransientError(t *testing.T) {
|
||||
h := newDbHarnessWopt(t, &opt.Options{
|
||||
WriteBuffer: 128 * opt.KiB,
|
||||
CachedOpenFiles: 3,
|
||||
DisableCompactionBackoff: true,
|
||||
})
|
||||
defer h.close()
|
||||
|
||||
const (
|
||||
nSnap = 20
|
||||
nKey = 10000
|
||||
)
|
||||
|
||||
var (
|
||||
snaps [nSnap]*Snapshot
|
||||
b = &Batch{}
|
||||
)
|
||||
for i := range snaps {
|
||||
vtail := fmt.Sprintf("VAL%030d", i)
|
||||
b.Reset()
|
||||
for k := 0; k < nKey; k++ {
|
||||
key := fmt.Sprintf("KEY%8d", k)
|
||||
b.Put([]byte(key), []byte(key+vtail))
|
||||
}
|
||||
h.stor.SetEmuRandErr(storage.TypeTable, tsOpOpen, tsOpRead, tsOpReadAt)
|
||||
if err := h.db.Write(b, nil); err != nil {
|
||||
t.Logf("WRITE #%d error: %v", i, err)
|
||||
h.stor.SetEmuRandErr(0, tsOpOpen, tsOpRead, tsOpReadAt, tsOpWrite)
|
||||
for {
|
||||
if err := h.db.Write(b, nil); err == nil {
|
||||
break
|
||||
} else if errors.IsCorrupted(err) {
|
||||
t.Fatalf("WRITE #%d corrupted: %v", i, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
snaps[i] = h.db.newSnapshot()
|
||||
b.Reset()
|
||||
for k := 0; k < nKey; k++ {
|
||||
key := fmt.Sprintf("KEY%8d", k)
|
||||
b.Delete([]byte(key))
|
||||
}
|
||||
h.stor.SetEmuRandErr(storage.TypeTable, tsOpOpen, tsOpRead, tsOpReadAt)
|
||||
if err := h.db.Write(b, nil); err != nil {
|
||||
t.Logf("WRITE #%d error: %v", i, err)
|
||||
h.stor.SetEmuRandErr(0, tsOpOpen, tsOpRead, tsOpReadAt)
|
||||
for {
|
||||
if err := h.db.Write(b, nil); err == nil {
|
||||
break
|
||||
} else if errors.IsCorrupted(err) {
|
||||
t.Fatalf("WRITE #%d corrupted: %v", i, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
h.stor.SetEmuRandErr(0, tsOpOpen, tsOpRead, tsOpReadAt)
|
||||
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
|
||||
rnd := rand.New(rand.NewSource(0xecafdaed))
|
||||
wg := &sync.WaitGroup{}
|
||||
for i, snap := range snaps {
|
||||
wg.Add(2)
|
||||
|
||||
go func(i int, snap *Snapshot, sk []int) {
|
||||
defer wg.Done()
|
||||
|
||||
vtail := fmt.Sprintf("VAL%030d", i)
|
||||
for _, k := range sk {
|
||||
key := fmt.Sprintf("KEY%8d", k)
|
||||
xvalue, err := snap.Get([]byte(key), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("READER_GET #%d SEQ=%d K%d error: %v", i, snap.elem.seq, k, err)
|
||||
}
|
||||
value := key + vtail
|
||||
if !bytes.Equal([]byte(value), xvalue) {
|
||||
t.Fatalf("READER_GET #%d SEQ=%d K%d invalid value: want %q, got %q", i, snap.elem.seq, k, value, xvalue)
|
||||
}
|
||||
}
|
||||
}(i, snap, rnd.Perm(nKey))
|
||||
|
||||
go func(i int, snap *Snapshot) {
|
||||
defer wg.Done()
|
||||
|
||||
vtail := fmt.Sprintf("VAL%030d", i)
|
||||
iter := snap.NewIterator(nil, nil)
|
||||
defer iter.Release()
|
||||
for k := 0; k < nKey; k++ {
|
||||
if !iter.Next() {
|
||||
if err := iter.Error(); err != nil {
|
||||
t.Fatalf("READER_ITER #%d K%d error: %v", i, k, err)
|
||||
} else {
|
||||
t.Fatalf("READER_ITER #%d K%d eoi", i, k)
|
||||
}
|
||||
}
|
||||
key := fmt.Sprintf("KEY%8d", k)
|
||||
xkey := iter.Key()
|
||||
if !bytes.Equal([]byte(key), xkey) {
|
||||
t.Fatalf("READER_ITER #%d K%d invalid key: want %q, got %q", i, k, key, xkey)
|
||||
}
|
||||
value := key + vtail
|
||||
xvalue := iter.Value()
|
||||
if !bytes.Equal([]byte(value), xvalue) {
|
||||
t.Fatalf("READER_ITER #%d K%d invalid value: want %q, got %q", i, k, value, xvalue)
|
||||
}
|
||||
}
|
||||
}(i, snap)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestDb_UkeyShouldntHopAcrossTable(t *testing.T) {
|
||||
h := newDbHarnessWopt(t, &opt.Options{
|
||||
WriteBuffer: 112 * opt.KiB,
|
||||
CompactionTableSize: 90 * opt.KiB,
|
||||
CompactionExpandLimitFactor: 1,
|
||||
})
|
||||
defer h.close()
|
||||
|
||||
const (
|
||||
nSnap = 190
|
||||
nKey = 140
|
||||
)
|
||||
|
||||
var (
|
||||
snaps [nSnap]*Snapshot
|
||||
b = &Batch{}
|
||||
)
|
||||
for i := range snaps {
|
||||
vtail := fmt.Sprintf("VAL%030d", i)
|
||||
b.Reset()
|
||||
for k := 0; k < nKey; k++ {
|
||||
key := fmt.Sprintf("KEY%08d", k)
|
||||
b.Put([]byte(key), []byte(key+vtail))
|
||||
}
|
||||
if err := h.db.Write(b, nil); err != nil {
|
||||
t.Fatalf("WRITE #%d error: %v", i, err)
|
||||
}
|
||||
|
||||
snaps[i] = h.db.newSnapshot()
|
||||
b.Reset()
|
||||
for k := 0; k < nKey; k++ {
|
||||
key := fmt.Sprintf("KEY%08d", k)
|
||||
b.Delete([]byte(key))
|
||||
}
|
||||
if err := h.db.Write(b, nil); err != nil {
|
||||
t.Fatalf("WRITE #%d error: %v", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
h.compactMem()
|
||||
|
||||
h.waitCompaction()
|
||||
for level, tables := range h.db.s.stVersion.tables {
|
||||
for _, table := range tables {
|
||||
t.Logf("L%d@%d %q:%q", level, table.file.Num(), table.imin, table.imax)
|
||||
}
|
||||
}
|
||||
|
||||
h.compactRangeAt(0, "", "")
|
||||
h.waitCompaction()
|
||||
for level, tables := range h.db.s.stVersion.tables {
|
||||
for _, table := range tables {
|
||||
t.Logf("L%d@%d %q:%q", level, table.file.Num(), table.imin, table.imax)
|
||||
}
|
||||
}
|
||||
h.compactRangeAt(1, "", "")
|
||||
h.waitCompaction()
|
||||
for level, tables := range h.db.s.stVersion.tables {
|
||||
for _, table := range tables {
|
||||
t.Logf("L%d@%d %q:%q", level, table.file.Num(), table.imin, table.imax)
|
||||
}
|
||||
}
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
for i, snap := range snaps {
|
||||
wg.Add(1)
|
||||
|
||||
go func(i int, snap *Snapshot) {
|
||||
defer wg.Done()
|
||||
|
||||
vtail := fmt.Sprintf("VAL%030d", i)
|
||||
for k := 0; k < nKey; k++ {
|
||||
key := fmt.Sprintf("KEY%08d", k)
|
||||
xvalue, err := snap.Get([]byte(key), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("READER_GET #%d SEQ=%d K%d error: %v", i, snap.elem.seq, k, err)
|
||||
}
|
||||
value := key + vtail
|
||||
if !bytes.Equal([]byte(value), xvalue) {
|
||||
t.Fatalf("READER_GET #%d SEQ=%d K%d invalid value: want %q, got %q", i, snap.elem.seq, k, value, xvalue)
|
||||
}
|
||||
}
|
||||
}(i, snap)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestDb_TableCompactionBuilder(t *testing.T) {
|
||||
stor := newTestStorage(t)
|
||||
defer stor.Close()
|
||||
|
||||
const nSeq = 99
|
||||
|
||||
o := &opt.Options{
|
||||
WriteBuffer: 112 * opt.KiB,
|
||||
CompactionTableSize: 43 * opt.KiB,
|
||||
CompactionExpandLimitFactor: 1,
|
||||
CompactionGPOverlapsFactor: 1,
|
||||
BlockCache: opt.NoCache,
|
||||
}
|
||||
s, err := newSession(stor, o)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.create(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer s.close()
|
||||
var (
|
||||
seq uint64
|
||||
targetSize = 5 * o.CompactionTableSize
|
||||
value = bytes.Repeat([]byte{'0'}, 100)
|
||||
)
|
||||
for i := 0; i < 2; i++ {
|
||||
tw, err := s.tops.create()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for k := 0; tw.tw.BytesLen() < targetSize; k++ {
|
||||
key := []byte(fmt.Sprintf("%09d", k))
|
||||
seq += nSeq - 1
|
||||
for x := uint64(0); x < nSeq; x++ {
|
||||
if err := tw.append(newIkey(key, seq-x, ktVal), value); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
tf, err := tw.finish()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rec := &sessionRecord{numLevel: s.o.GetNumLevel()}
|
||||
rec.addTableFile(i, tf)
|
||||
if err := s.commit(rec); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Build grandparent.
|
||||
v := s.version()
|
||||
c := newCompaction(s, v, 1, append(tFiles{}, v.tables[1]...))
|
||||
rec := &sessionRecord{numLevel: s.o.GetNumLevel()}
|
||||
b := &tableCompactionBuilder{
|
||||
s: s,
|
||||
c: c,
|
||||
rec: rec,
|
||||
stat1: new(cStatsStaging),
|
||||
minSeq: 0,
|
||||
strict: true,
|
||||
tableSize: o.CompactionTableSize/3 + 961,
|
||||
}
|
||||
if err := b.run(new(compactionTransactCounter)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, t := range c.tables[0] {
|
||||
rec.delTable(c.level, t.file.Num())
|
||||
}
|
||||
if err := s.commit(rec); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
c.release()
|
||||
|
||||
// Build level-1.
|
||||
v = s.version()
|
||||
c = newCompaction(s, v, 0, append(tFiles{}, v.tables[0]...))
|
||||
rec = &sessionRecord{numLevel: s.o.GetNumLevel()}
|
||||
b = &tableCompactionBuilder{
|
||||
s: s,
|
||||
c: c,
|
||||
rec: rec,
|
||||
stat1: new(cStatsStaging),
|
||||
minSeq: 0,
|
||||
strict: true,
|
||||
tableSize: o.CompactionTableSize,
|
||||
}
|
||||
if err := b.run(new(compactionTransactCounter)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, t := range c.tables[0] {
|
||||
rec.delTable(c.level, t.file.Num())
|
||||
}
|
||||
// Move grandparent to level-3
|
||||
for _, t := range v.tables[2] {
|
||||
rec.delTable(2, t.file.Num())
|
||||
rec.addTableFile(3, t)
|
||||
}
|
||||
if err := s.commit(rec); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
c.release()
|
||||
|
||||
v = s.version()
|
||||
for level, want := range []bool{false, true, false, true, false} {
|
||||
got := len(v.tables[level]) > 0
|
||||
if want != got {
|
||||
t.Fatalf("invalid level-%d tables len: want %v, got %v", level, want, got)
|
||||
}
|
||||
}
|
||||
for i, f := range v.tables[1][:len(v.tables[1])-1] {
|
||||
nf := v.tables[1][i+1]
|
||||
if bytes.Equal(f.imax.ukey(), nf.imin.ukey()) {
|
||||
t.Fatalf("KEY %q hop across table %d .. %d", f.imax.ukey(), f.file.Num(), nf.file.Num())
|
||||
}
|
||||
}
|
||||
v.release()
|
||||
|
||||
// Compaction with transient error.
|
||||
v = s.version()
|
||||
c = newCompaction(s, v, 1, append(tFiles{}, v.tables[1]...))
|
||||
rec = &sessionRecord{numLevel: s.o.GetNumLevel()}
|
||||
b = &tableCompactionBuilder{
|
||||
s: s,
|
||||
c: c,
|
||||
rec: rec,
|
||||
stat1: new(cStatsStaging),
|
||||
minSeq: 0,
|
||||
strict: true,
|
||||
tableSize: o.CompactionTableSize,
|
||||
}
|
||||
stor.SetEmuErrOnce(storage.TypeTable, tsOpSync)
|
||||
stor.SetEmuRandErr(storage.TypeTable, tsOpRead, tsOpReadAt, tsOpWrite)
|
||||
stor.SetEmuRandErrProb(0xf0)
|
||||
for {
|
||||
if err := b.run(new(compactionTransactCounter)); err != nil {
|
||||
t.Logf("(expected) b.run: %v", err)
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err := s.commit(rec); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
c.release()
|
||||
|
||||
stor.SetEmuErrOnce(0, tsOpSync)
|
||||
stor.SetEmuRandErr(0, tsOpRead, tsOpReadAt, tsOpWrite)
|
||||
|
||||
v = s.version()
|
||||
if len(v.tables[1]) != len(v.tables[2]) {
|
||||
t.Fatalf("invalid tables length, want %d, got %d", len(v.tables[1]), len(v.tables[2]))
|
||||
}
|
||||
for i, f0 := range v.tables[1] {
|
||||
f1 := v.tables[2][i]
|
||||
iter0 := s.tops.newIterator(f0, nil, nil)
|
||||
iter1 := s.tops.newIterator(f1, nil, nil)
|
||||
for j := 0; true; j++ {
|
||||
next0 := iter0.Next()
|
||||
next1 := iter1.Next()
|
||||
if next0 != next1 {
|
||||
t.Fatalf("#%d.%d invalid eoi: want %v, got %v", i, j, next0, next1)
|
||||
}
|
||||
key0 := iter0.Key()
|
||||
key1 := iter1.Key()
|
||||
if !bytes.Equal(key0, key1) {
|
||||
t.Fatalf("#%d.%d invalid key: want %q, got %q", i, j, key0, key1)
|
||||
}
|
||||
if next0 == false {
|
||||
break
|
||||
}
|
||||
}
|
||||
iter0.Release()
|
||||
iter1.Release()
|
||||
}
|
||||
v.release()
|
||||
}
|
||||
|
||||
53
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go
generated
vendored
53
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go
generated
vendored
@@ -7,8 +7,7 @@
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/iterator"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||
@@ -32,40 +31,44 @@ func (p Sizes) Sum() (n uint64) {
|
||||
return n
|
||||
}
|
||||
|
||||
// Check and clean files.
|
||||
func (d *DB) checkAndCleanFiles() error {
|
||||
s := d.s
|
||||
// Logging.
|
||||
func (db *DB) log(v ...interface{}) { db.s.log(v...) }
|
||||
func (db *DB) logf(format string, v ...interface{}) { db.s.logf(format, v...) }
|
||||
|
||||
v := s.version_NB()
|
||||
tables := make(map[uint64]bool)
|
||||
for _, tt := range v.tables {
|
||||
for _, t := range tt {
|
||||
tables[t.file.Num()] = false
|
||||
// Check and clean files.
|
||||
func (db *DB) checkAndCleanFiles() error {
|
||||
v := db.s.version()
|
||||
defer v.release()
|
||||
|
||||
tablesMap := make(map[uint64]bool)
|
||||
for _, tables := range v.tables {
|
||||
for _, t := range tables {
|
||||
tablesMap[t.file.Num()] = false
|
||||
}
|
||||
}
|
||||
|
||||
ff, err := s.getFiles(storage.TypeAll)
|
||||
files, err := db.s.getFiles(storage.TypeAll)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var nTables int
|
||||
var rem []storage.File
|
||||
for _, f := range ff {
|
||||
for _, f := range files {
|
||||
keep := true
|
||||
switch f.Type() {
|
||||
case storage.TypeManifest:
|
||||
keep = f.Num() >= s.manifestFile.Num()
|
||||
keep = f.Num() >= db.s.manifestFile.Num()
|
||||
case storage.TypeJournal:
|
||||
if d.frozenJournalFile != nil {
|
||||
keep = f.Num() >= d.frozenJournalFile.Num()
|
||||
if db.frozenJournalFile != nil {
|
||||
keep = f.Num() >= db.frozenJournalFile.Num()
|
||||
} else {
|
||||
keep = f.Num() >= d.journalFile.Num()
|
||||
keep = f.Num() >= db.journalFile.Num()
|
||||
}
|
||||
case storage.TypeTable:
|
||||
_, keep = tables[f.Num()]
|
||||
_, keep = tablesMap[f.Num()]
|
||||
if keep {
|
||||
tables[f.Num()] = true
|
||||
tablesMap[f.Num()] = true
|
||||
nTables++
|
||||
}
|
||||
}
|
||||
@@ -75,18 +78,20 @@ func (d *DB) checkAndCleanFiles() error {
|
||||
}
|
||||
}
|
||||
|
||||
if nTables != len(tables) {
|
||||
for num, present := range tables {
|
||||
if nTables != len(tablesMap) {
|
||||
var missing []*storage.FileInfo
|
||||
for num, present := range tablesMap {
|
||||
if !present {
|
||||
s.logf("db@janitor table missing @%d", num)
|
||||
missing = append(missing, &storage.FileInfo{Type: storage.TypeTable, Num: num})
|
||||
db.logf("db@janitor table missing @%d", num)
|
||||
}
|
||||
}
|
||||
return ErrCorrupted{Type: MissingFiles, Err: errors.New("leveldb: table files missing")}
|
||||
return errors.NewErrCorrupted(nil, &errors.ErrMissingFiles{Files: missing})
|
||||
}
|
||||
|
||||
s.logf("db@janitor F·%d G·%d", len(ff), len(rem))
|
||||
db.logf("db@janitor F·%d G·%d", len(files), len(rem))
|
||||
for _, f := range rem {
|
||||
s.logf("db@janitor removing %s-%d", f.Type(), f.Num())
|
||||
db.logf("db@janitor removing %s-%d", f.Type(), f.Num())
|
||||
if err := f.Remove(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
180
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go
generated
vendored
180
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go
generated
vendored
@@ -14,84 +14,93 @@ import (
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
func (d *DB) writeJournal(b *Batch) error {
|
||||
w, err := d.journal.Next()
|
||||
func (db *DB) writeJournal(b *Batch) error {
|
||||
w, err := db.journal.Next()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write(b.encode()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := d.journal.Flush(); err != nil {
|
||||
if err := db.journal.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
if b.sync {
|
||||
return d.journalWriter.Sync()
|
||||
return db.journalWriter.Sync()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *DB) jWriter() {
|
||||
defer d.closeW.Done()
|
||||
func (db *DB) jWriter() {
|
||||
defer db.closeW.Done()
|
||||
for {
|
||||
select {
|
||||
case b := <-d.journalC:
|
||||
case b := <-db.journalC:
|
||||
if b != nil {
|
||||
d.journalAckC <- d.writeJournal(b)
|
||||
db.journalAckC <- db.writeJournal(b)
|
||||
}
|
||||
case _, _ = <-d.closeC:
|
||||
case _, _ = <-db.closeC:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *DB) rotateMem(n int) (mem *memdb.DB, err error) {
|
||||
func (db *DB) rotateMem(n int) (mem *memDB, err error) {
|
||||
// Wait for pending memdb compaction.
|
||||
err = d.compSendIdle(d.mcompCmdC)
|
||||
err = db.compSendIdle(db.mcompCmdC)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Create new memdb and journal.
|
||||
mem, err = d.newMem(n)
|
||||
mem, err = db.newMem(n)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Schedule memdb compaction.
|
||||
d.compTrigger(d.mcompTriggerC)
|
||||
db.compSendTrigger(db.mcompCmdC)
|
||||
return
|
||||
}
|
||||
|
||||
func (d *DB) flush(n int) (mem *memdb.DB, nn int, err error) {
|
||||
s := d.s
|
||||
|
||||
func (db *DB) flush(n int) (mem *memDB, nn int, err error) {
|
||||
delayed := false
|
||||
flush := func() bool {
|
||||
v := s.version()
|
||||
flush := func() (retry bool) {
|
||||
v := db.s.version()
|
||||
defer v.release()
|
||||
mem = d.getEffectiveMem()
|
||||
nn = mem.Free()
|
||||
mem = db.getEffectiveMem()
|
||||
defer func() {
|
||||
if retry {
|
||||
mem.decref()
|
||||
mem = nil
|
||||
}
|
||||
}()
|
||||
nn = mem.mdb.Free()
|
||||
switch {
|
||||
case v.tLen(0) >= kL0_SlowdownWritesTrigger && !delayed:
|
||||
case v.tLen(0) >= db.s.o.GetWriteL0SlowdownTrigger() && !delayed:
|
||||
delayed = true
|
||||
time.Sleep(time.Millisecond)
|
||||
case nn >= n:
|
||||
return false
|
||||
case v.tLen(0) >= kL0_StopWritesTrigger:
|
||||
case v.tLen(0) >= db.s.o.GetWriteL0PauseTrigger():
|
||||
delayed = true
|
||||
err = d.compSendIdle(d.tcompCmdC)
|
||||
err = db.compSendIdle(db.tcompCmdC)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
default:
|
||||
// Allow memdb to grow if it has no entry.
|
||||
if mem.Len() == 0 {
|
||||
if mem.mdb.Len() == 0 {
|
||||
nn = n
|
||||
return false
|
||||
} else {
|
||||
mem.decref()
|
||||
mem, err = db.rotateMem(n)
|
||||
if err == nil {
|
||||
nn = mem.mdb.Free()
|
||||
} else {
|
||||
nn = 0
|
||||
}
|
||||
}
|
||||
mem, err = d.rotateMem(n)
|
||||
nn = mem.Free()
|
||||
return false
|
||||
}
|
||||
return true
|
||||
@@ -100,7 +109,12 @@ func (d *DB) flush(n int) (mem *memdb.DB, nn int, err error) {
|
||||
for flush() {
|
||||
}
|
||||
if delayed {
|
||||
s.logf("db@write delayed T·%v", time.Since(start))
|
||||
db.writeDelay += time.Since(start)
|
||||
db.writeDelayN++
|
||||
} else if db.writeDelayN > 0 {
|
||||
db.writeDelay = 0
|
||||
db.writeDelayN = 0
|
||||
db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay)
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -109,39 +123,45 @@ func (d *DB) flush(n int) (mem *memdb.DB, nn int, err error) {
|
||||
// sequentially.
|
||||
//
|
||||
// It is safe to modify the contents of the arguments after Write returns.
|
||||
func (d *DB) Write(b *Batch, wo *opt.WriteOptions) (err error) {
|
||||
err = d.ok()
|
||||
if err != nil || b == nil || b.len() == 0 {
|
||||
func (db *DB) Write(b *Batch, wo *opt.WriteOptions) (err error) {
|
||||
err = db.ok()
|
||||
if err != nil || b == nil || b.Len() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
b.init(wo.GetSync())
|
||||
|
||||
// The write happen synchronously.
|
||||
retry:
|
||||
select {
|
||||
case d.writeC <- b:
|
||||
if <-d.writeMergedC {
|
||||
return <-d.writeAckC
|
||||
case db.writeC <- b:
|
||||
if <-db.writeMergedC {
|
||||
return <-db.writeAckC
|
||||
}
|
||||
goto retry
|
||||
case d.writeLockC <- struct{}{}:
|
||||
case _, _ = <-d.closeC:
|
||||
case db.writeLockC <- struct{}{}:
|
||||
case err = <-db.compPerErrC:
|
||||
return
|
||||
case _, _ = <-db.closeC:
|
||||
return ErrClosed
|
||||
}
|
||||
|
||||
merged := 0
|
||||
danglingMerge := false
|
||||
defer func() {
|
||||
<-d.writeLockC
|
||||
if danglingMerge {
|
||||
db.writeMergedC <- false
|
||||
} else {
|
||||
<-db.writeLockC
|
||||
}
|
||||
for i := 0; i < merged; i++ {
|
||||
d.writeAckC <- err
|
||||
db.writeAckC <- err
|
||||
}
|
||||
}()
|
||||
|
||||
mem, memFree, err := d.flush(b.size())
|
||||
mem, memFree, err := db.flush(b.size())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer mem.decref()
|
||||
|
||||
// Calculate maximum size of the batch.
|
||||
m := 1 << 20
|
||||
@@ -154,13 +174,13 @@ retry:
|
||||
drain:
|
||||
for b.size() < m && !b.sync {
|
||||
select {
|
||||
case nb := <-d.writeC:
|
||||
case nb := <-db.writeC:
|
||||
if b.size()+nb.size() <= m {
|
||||
b.append(nb)
|
||||
d.writeMergedC <- true
|
||||
db.writeMergedC <- true
|
||||
merged++
|
||||
} else {
|
||||
d.writeMergedC <- false
|
||||
danglingMerge = true
|
||||
break drain
|
||||
}
|
||||
default:
|
||||
@@ -169,44 +189,52 @@ drain:
|
||||
}
|
||||
|
||||
// Set batch first seq number relative from last seq.
|
||||
b.seq = d.seq + 1
|
||||
b.seq = db.seq + 1
|
||||
|
||||
// Write journal concurrently if it is large enough.
|
||||
if b.size() >= (128 << 10) {
|
||||
// Push the write batch to the journal writer
|
||||
select {
|
||||
case _, _ = <-d.closeC:
|
||||
case db.journalC <- b:
|
||||
// Write into memdb
|
||||
if berr := b.memReplay(mem.mdb); berr != nil {
|
||||
panic(berr)
|
||||
}
|
||||
case err = <-db.compPerErrC:
|
||||
return
|
||||
case _, _ = <-db.closeC:
|
||||
err = ErrClosed
|
||||
return
|
||||
case d.journalC <- b:
|
||||
// Write into memdb
|
||||
b.memReplay(mem)
|
||||
}
|
||||
// Wait for journal writer
|
||||
select {
|
||||
case _, _ = <-d.closeC:
|
||||
err = ErrClosed
|
||||
return
|
||||
case err = <-d.journalAckC:
|
||||
case err = <-db.journalAckC:
|
||||
if err != nil {
|
||||
// Revert memdb if error detected
|
||||
b.revertMemReplay(mem)
|
||||
if berr := b.revertMemReplay(mem.mdb); berr != nil {
|
||||
panic(berr)
|
||||
}
|
||||
return
|
||||
}
|
||||
case _, _ = <-db.closeC:
|
||||
err = ErrClosed
|
||||
return
|
||||
}
|
||||
} else {
|
||||
err = d.writeJournal(b)
|
||||
err = db.writeJournal(b)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
b.memReplay(mem)
|
||||
if berr := b.memReplay(mem.mdb); berr != nil {
|
||||
panic(berr)
|
||||
}
|
||||
}
|
||||
|
||||
// Set last seq number.
|
||||
d.addSeq(uint64(b.len()))
|
||||
db.addSeq(uint64(b.Len()))
|
||||
|
||||
if b.size() >= memFree {
|
||||
d.rotateMem(0)
|
||||
db.rotateMem(0)
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -215,20 +243,20 @@ drain:
|
||||
// for that key; a DB is not a multi-map.
|
||||
//
|
||||
// It is safe to modify the contents of the arguments after Put returns.
|
||||
func (d *DB) Put(key, value []byte, wo *opt.WriteOptions) error {
|
||||
func (db *DB) Put(key, value []byte, wo *opt.WriteOptions) error {
|
||||
b := new(Batch)
|
||||
b.Put(key, value)
|
||||
return d.Write(b, wo)
|
||||
return db.Write(b, wo)
|
||||
}
|
||||
|
||||
// Delete deletes the value for the given key. It returns ErrNotFound if
|
||||
// the DB does not contain the key.
|
||||
//
|
||||
// It is safe to modify the contents of the arguments after Delete returns.
|
||||
func (d *DB) Delete(key []byte, wo *opt.WriteOptions) error {
|
||||
func (db *DB) Delete(key []byte, wo *opt.WriteOptions) error {
|
||||
b := new(Batch)
|
||||
b.Delete(key)
|
||||
return d.Write(b, wo)
|
||||
return db.Write(b, wo)
|
||||
}
|
||||
|
||||
func isMemOverlaps(icmp *iComparer, mem *memdb.DB, min, max []byte) bool {
|
||||
@@ -247,33 +275,37 @@ func isMemOverlaps(icmp *iComparer, mem *memdb.DB, min, max []byte) bool {
|
||||
// A nil Range.Start is treated as a key before all keys in the DB.
|
||||
// And a nil Range.Limit is treated as a key after all keys in the DB.
|
||||
// Therefore if both is nil then it will compact entire DB.
|
||||
func (d *DB) CompactRange(r util.Range) error {
|
||||
if err := d.ok(); err != nil {
|
||||
func (db *DB) CompactRange(r util.Range) error {
|
||||
if err := db.ok(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Lock writer.
|
||||
select {
|
||||
case d.writeLockC <- struct{}{}:
|
||||
case _, _ = <-d.closeC:
|
||||
case db.writeLockC <- struct{}{}:
|
||||
case err := <-db.compPerErrC:
|
||||
return err
|
||||
case _, _ = <-db.closeC:
|
||||
return ErrClosed
|
||||
}
|
||||
|
||||
// Check for overlaps in memdb.
|
||||
mem := d.getEffectiveMem()
|
||||
if isMemOverlaps(d.s.icmp, mem, r.Start, r.Limit) {
|
||||
mem := db.getEffectiveMem()
|
||||
defer mem.decref()
|
||||
if isMemOverlaps(db.s.icmp, mem.mdb, r.Start, r.Limit) {
|
||||
// Memdb compaction.
|
||||
if _, err := d.rotateMem(0); err != nil {
|
||||
<-d.writeLockC
|
||||
if _, err := db.rotateMem(0); err != nil {
|
||||
<-db.writeLockC
|
||||
return err
|
||||
}
|
||||
<-d.writeLockC
|
||||
if err := d.compSendIdle(d.mcompCmdC); err != nil {
|
||||
<-db.writeLockC
|
||||
if err := db.compSendIdle(db.mcompCmdC); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
<-d.writeLockC
|
||||
<-db.writeLockC
|
||||
}
|
||||
|
||||
// Table compaction.
|
||||
return d.compSendRange(d.tcompCmdC, -1, r.Start, r.Limit)
|
||||
return db.compSendRange(db.tcompCmdC, -1, r.Start, r.Limit)
|
||||
}
|
||||
|
||||
10
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/doc.go
generated
vendored
10
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/doc.go
generated
vendored
@@ -37,6 +37,16 @@
|
||||
// err = iter.Error()
|
||||
// ...
|
||||
//
|
||||
// Iterate over subset of database content with a particular prefix:
|
||||
// iter := db.NewIterator(util.BytesPrefix([]byte("foo-")), nil)
|
||||
// for iter.Next() {
|
||||
// // Use key/value.
|
||||
// ...
|
||||
// }
|
||||
// iter.Release()
|
||||
// err = iter.Error()
|
||||
// ...
|
||||
//
|
||||
// Seek-then-Iterate:
|
||||
//
|
||||
// iter := db.NewIterator(nil, nil)
|
||||
|
||||
@@ -7,32 +7,12 @@
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNotFound = util.ErrNotFound
|
||||
ErrNotFound = errors.ErrNotFound
|
||||
ErrSnapshotReleased = errors.New("leveldb: snapshot released")
|
||||
ErrIterReleased = errors.New("leveldb: iterator released")
|
||||
ErrClosed = errors.New("leveldb: closed")
|
||||
)
|
||||
|
||||
type CorruptionType int
|
||||
|
||||
const (
|
||||
CorruptedManifest CorruptionType = iota
|
||||
MissingFiles
|
||||
)
|
||||
|
||||
// ErrCorrupted is the type that wraps errors that indicate corruption in
|
||||
// the database.
|
||||
type ErrCorrupted struct {
|
||||
Type CorruptionType
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e ErrCorrupted) Error() string {
|
||||
return e.Err.Error()
|
||||
}
|
||||
76
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors/errors.go
generated
vendored
Normal file
76
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors/errors.go
generated
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Package errors provides common error types used throughout leveldb.
|
||||
package errors
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNotFound = New("leveldb: not found")
|
||||
ErrReleased = util.ErrReleased
|
||||
ErrHasReleaser = util.ErrHasReleaser
|
||||
)
|
||||
|
||||
// New returns an error that formats as the given text.
|
||||
func New(text string) error {
|
||||
return errors.New(text)
|
||||
}
|
||||
|
||||
// ErrCorrupted is the type that wraps errors that indicate corruption in
|
||||
// the database.
|
||||
type ErrCorrupted struct {
|
||||
File *storage.FileInfo
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *ErrCorrupted) Error() string {
|
||||
if e.File != nil {
|
||||
return fmt.Sprintf("%v [file=%v]", e.Err, e.File)
|
||||
} else {
|
||||
return e.Err.Error()
|
||||
}
|
||||
}
|
||||
|
||||
// NewErrCorrupted creates new ErrCorrupted error.
|
||||
func NewErrCorrupted(f storage.File, err error) error {
|
||||
return &ErrCorrupted{storage.NewFileInfo(f), err}
|
||||
}
|
||||
|
||||
// IsCorrupted returns a boolean indicating whether the error is indicating
|
||||
// a corruption.
|
||||
func IsCorrupted(err error) bool {
|
||||
switch err.(type) {
|
||||
case *ErrCorrupted:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ErrMissingFiles is the type that indicating a corruption due to missing
|
||||
// files.
|
||||
type ErrMissingFiles struct {
|
||||
Files []*storage.FileInfo
|
||||
}
|
||||
|
||||
func (e *ErrMissingFiles) Error() string { return "file missing" }
|
||||
|
||||
// SetFile sets 'file info' of the given error with the given file.
|
||||
// Currently only ErrCorrupted is supported, otherwise will do nothing.
|
||||
func SetFile(err error, f storage.File) error {
|
||||
switch x := err.(type) {
|
||||
case *ErrCorrupted:
|
||||
x.File = storage.NewFileInfo(f)
|
||||
return x
|
||||
}
|
||||
return err
|
||||
}
|
||||
11
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go
generated
vendored
11
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go
generated
vendored
@@ -21,7 +21,7 @@ var _ = testutil.Defer(func() {
|
||||
BlockRestartInterval: 5,
|
||||
BlockSize: 50,
|
||||
Compression: opt.NoCompression,
|
||||
MaxOpenFiles: 0,
|
||||
CachedOpenFiles: -1,
|
||||
Strict: opt.StrictAll,
|
||||
WriteBuffer: 1000,
|
||||
}
|
||||
@@ -36,22 +36,21 @@ var _ = testutil.Defer(func() {
|
||||
testutil.DoDBTesting(&t)
|
||||
db.TestClose()
|
||||
done <- true
|
||||
}, 9.0)
|
||||
}, 20.0)
|
||||
})
|
||||
|
||||
Describe("read test", func() {
|
||||
testutil.AllKeyValueTesting(nil, func(kv testutil.KeyValue) testutil.DB {
|
||||
testutil.AllKeyValueTesting(nil, nil, func(kv testutil.KeyValue) testutil.DB {
|
||||
// Building the DB.
|
||||
db := newTestingDB(o, nil, nil)
|
||||
kv.IterateShuffled(nil, func(i int, key, value []byte) {
|
||||
err := db.TestPut(key, value)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
testutil.Defer("teardown", func() {
|
||||
db.TestClose()
|
||||
})
|
||||
|
||||
return db
|
||||
}, func(db testutil.DB) {
|
||||
db.(*testingDB).TestClose()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
58
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/go13_bench_test.go
generated
vendored
Normal file
58
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/go13_bench_test.go
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// +build go1.3
|
||||
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkDBReadConcurrent(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.gc()
|
||||
defer p.close()
|
||||
|
||||
b.ResetTimer()
|
||||
b.SetBytes(116)
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
iter := p.newIter()
|
||||
defer iter.Release()
|
||||
for pb.Next() && iter.Next() {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkDBReadConcurrent2(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.gc()
|
||||
defer p.close()
|
||||
|
||||
b.ResetTimer()
|
||||
b.SetBytes(116)
|
||||
|
||||
var dir uint32
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
iter := p.newIter()
|
||||
defer iter.Release()
|
||||
if atomic.AddUint32(&dir, 1)%2 == 0 {
|
||||
for pb.Next() && iter.Next() {
|
||||
}
|
||||
} else {
|
||||
if pb.Next() && iter.Last() {
|
||||
for pb.Next() && iter.Prev() {
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
30
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go
generated
vendored
30
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go
generated
vendored
@@ -40,13 +40,19 @@ type basicArrayIterator struct {
|
||||
util.BasicReleaser
|
||||
array BasicArray
|
||||
pos int
|
||||
err error
|
||||
}
|
||||
|
||||
func (i *basicArrayIterator) Valid() bool {
|
||||
return i.pos >= 0 && i.pos < i.array.Len()
|
||||
return i.pos >= 0 && i.pos < i.array.Len() && !i.Released()
|
||||
}
|
||||
|
||||
func (i *basicArrayIterator) First() bool {
|
||||
if i.Released() {
|
||||
i.err = ErrIterReleased
|
||||
return false
|
||||
}
|
||||
|
||||
if i.array.Len() == 0 {
|
||||
i.pos = -1
|
||||
return false
|
||||
@@ -56,6 +62,11 @@ func (i *basicArrayIterator) First() bool {
|
||||
}
|
||||
|
||||
func (i *basicArrayIterator) Last() bool {
|
||||
if i.Released() {
|
||||
i.err = ErrIterReleased
|
||||
return false
|
||||
}
|
||||
|
||||
n := i.array.Len()
|
||||
if n == 0 {
|
||||
i.pos = 0
|
||||
@@ -66,6 +77,11 @@ func (i *basicArrayIterator) Last() bool {
|
||||
}
|
||||
|
||||
func (i *basicArrayIterator) Seek(key []byte) bool {
|
||||
if i.Released() {
|
||||
i.err = ErrIterReleased
|
||||
return false
|
||||
}
|
||||
|
||||
n := i.array.Len()
|
||||
if n == 0 {
|
||||
i.pos = 0
|
||||
@@ -79,6 +95,11 @@ func (i *basicArrayIterator) Seek(key []byte) bool {
|
||||
}
|
||||
|
||||
func (i *basicArrayIterator) Next() bool {
|
||||
if i.Released() {
|
||||
i.err = ErrIterReleased
|
||||
return false
|
||||
}
|
||||
|
||||
i.pos++
|
||||
if n := i.array.Len(); i.pos >= n {
|
||||
i.pos = n
|
||||
@@ -88,6 +109,11 @@ func (i *basicArrayIterator) Next() bool {
|
||||
}
|
||||
|
||||
func (i *basicArrayIterator) Prev() bool {
|
||||
if i.Released() {
|
||||
i.err = ErrIterReleased
|
||||
return false
|
||||
}
|
||||
|
||||
i.pos--
|
||||
if i.pos < 0 {
|
||||
i.pos = -1
|
||||
@@ -96,7 +122,7 @@ func (i *basicArrayIterator) Prev() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (i *basicArrayIterator) Error() error { return nil }
|
||||
func (i *basicArrayIterator) Error() error { return i.err }
|
||||
|
||||
type arrayIterator struct {
|
||||
basicArrayIterator
|
||||
|
||||
73
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go
generated
vendored
73
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go
generated
vendored
@@ -7,6 +7,7 @@
|
||||
package iterator
|
||||
|
||||
import (
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
@@ -22,13 +23,13 @@ type IteratorIndexer interface {
|
||||
|
||||
type indexedIterator struct {
|
||||
util.BasicReleaser
|
||||
index IteratorIndexer
|
||||
strict bool
|
||||
strictGet bool
|
||||
index IteratorIndexer
|
||||
strict bool
|
||||
|
||||
data Iterator
|
||||
err error
|
||||
errf func(err error)
|
||||
data Iterator
|
||||
err error
|
||||
errf func(err error)
|
||||
closed bool
|
||||
}
|
||||
|
||||
func (i *indexedIterator) setData() {
|
||||
@@ -36,11 +37,6 @@ func (i *indexedIterator) setData() {
|
||||
i.data.Release()
|
||||
}
|
||||
i.data = i.index.Get()
|
||||
if i.strictGet {
|
||||
if err := i.data.Error(); err != nil {
|
||||
i.err = err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (i *indexedIterator) clearData() {
|
||||
@@ -50,14 +46,21 @@ func (i *indexedIterator) clearData() {
|
||||
i.data = nil
|
||||
}
|
||||
|
||||
func (i *indexedIterator) dataErr() bool {
|
||||
if i.errf != nil {
|
||||
if err := i.data.Error(); err != nil {
|
||||
func (i *indexedIterator) indexErr() {
|
||||
if err := i.index.Error(); err != nil {
|
||||
if i.errf != nil {
|
||||
i.errf(err)
|
||||
}
|
||||
i.err = err
|
||||
}
|
||||
if i.strict {
|
||||
if err := i.data.Error(); err != nil {
|
||||
}
|
||||
|
||||
func (i *indexedIterator) dataErr() bool {
|
||||
if err := i.data.Error(); err != nil {
|
||||
if i.errf != nil {
|
||||
i.errf(err)
|
||||
}
|
||||
if i.strict || !errors.IsCorrupted(err) {
|
||||
i.err = err
|
||||
return true
|
||||
}
|
||||
@@ -72,9 +75,13 @@ func (i *indexedIterator) Valid() bool {
|
||||
func (i *indexedIterator) First() bool {
|
||||
if i.err != nil {
|
||||
return false
|
||||
} else if i.Released() {
|
||||
i.err = ErrIterReleased
|
||||
return false
|
||||
}
|
||||
|
||||
if !i.index.First() {
|
||||
i.indexErr()
|
||||
i.clearData()
|
||||
return false
|
||||
}
|
||||
@@ -85,9 +92,13 @@ func (i *indexedIterator) First() bool {
|
||||
func (i *indexedIterator) Last() bool {
|
||||
if i.err != nil {
|
||||
return false
|
||||
} else if i.Released() {
|
||||
i.err = ErrIterReleased
|
||||
return false
|
||||
}
|
||||
|
||||
if !i.index.Last() {
|
||||
i.indexErr()
|
||||
i.clearData()
|
||||
return false
|
||||
}
|
||||
@@ -105,9 +116,13 @@ func (i *indexedIterator) Last() bool {
|
||||
func (i *indexedIterator) Seek(key []byte) bool {
|
||||
if i.err != nil {
|
||||
return false
|
||||
} else if i.Released() {
|
||||
i.err = ErrIterReleased
|
||||
return false
|
||||
}
|
||||
|
||||
if !i.index.Seek(key) {
|
||||
i.indexErr()
|
||||
i.clearData()
|
||||
return false
|
||||
}
|
||||
@@ -125,6 +140,9 @@ func (i *indexedIterator) Seek(key []byte) bool {
|
||||
func (i *indexedIterator) Next() bool {
|
||||
if i.err != nil {
|
||||
return false
|
||||
} else if i.Released() {
|
||||
i.err = ErrIterReleased
|
||||
return false
|
||||
}
|
||||
|
||||
switch {
|
||||
@@ -136,6 +154,7 @@ func (i *indexedIterator) Next() bool {
|
||||
fallthrough
|
||||
case i.data == nil:
|
||||
if !i.index.Next() {
|
||||
i.indexErr()
|
||||
return false
|
||||
}
|
||||
i.setData()
|
||||
@@ -147,6 +166,9 @@ func (i *indexedIterator) Next() bool {
|
||||
func (i *indexedIterator) Prev() bool {
|
||||
if i.err != nil {
|
||||
return false
|
||||
} else if i.Released() {
|
||||
i.err = ErrIterReleased
|
||||
return false
|
||||
}
|
||||
|
||||
switch {
|
||||
@@ -158,6 +180,7 @@ func (i *indexedIterator) Prev() bool {
|
||||
fallthrough
|
||||
case i.data == nil:
|
||||
if !i.index.Prev() {
|
||||
i.indexErr()
|
||||
return false
|
||||
}
|
||||
i.setData()
|
||||
@@ -206,16 +229,14 @@ func (i *indexedIterator) SetErrorCallback(f func(err error)) {
|
||||
i.errf = f
|
||||
}
|
||||
|
||||
// NewIndexedIterator returns an indexed iterator. An index is iterator
|
||||
// that returns another iterator, a data iterator. A data iterator is the
|
||||
// NewIndexedIterator returns an 'indexed iterator'. An index is iterator
|
||||
// that returns another iterator, a 'data iterator'. A 'data iterator' is the
|
||||
// iterator that contains actual key/value pairs.
|
||||
//
|
||||
// If strict is true then error yield by data iterator will halt the indexed
|
||||
// iterator, on contrary if strict is false then the indexed iterator will
|
||||
// ignore those error and move on to the next index. If strictGet is true and
|
||||
// index.Get() yield an 'error iterator' then the indexed iterator will be halted.
|
||||
// An 'error iterator' is iterator which its Error() method always return non-nil
|
||||
// even before any 'seeks method' is called.
|
||||
func NewIndexedIterator(index IteratorIndexer, strict, strictGet bool) Iterator {
|
||||
return &indexedIterator{index: index, strict: strict, strictGet: strictGet}
|
||||
// If strict is true the any 'corruption errors' (i.e errors.IsCorrupted(err) == true)
|
||||
// won't be ignored and will halt 'indexed iterator', otherwise the iterator will
|
||||
// continue to the next 'data iterator'. Corruption on 'index iterator' will not be
|
||||
// ignored and will halt the iterator.
|
||||
func NewIndexedIterator(index IteratorIndexer, strict bool) Iterator {
|
||||
return &indexedIterator{index: index, strict: strict}
|
||||
}
|
||||
|
||||
@@ -65,7 +65,7 @@ var _ = testutil.Defer(func() {
|
||||
// Test the iterator.
|
||||
t := testutil.IteratorTesting{
|
||||
KeyValue: kv.Clone(),
|
||||
Iter: NewIndexedIterator(NewArrayIndexer(index), true, true),
|
||||
Iter: NewIndexedIterator(NewArrayIndexer(index), true),
|
||||
}
|
||||
testutil.DoIteratorTesting(&t)
|
||||
done <- true
|
||||
|
||||
27
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter.go
generated
vendored
27
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter.go
generated
vendored
@@ -14,6 +14,10 @@ import (
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrIterReleased = errors.New("leveldb/iterator: iterator released")
|
||||
)
|
||||
|
||||
// IteratorSeeker is the interface that wraps the 'seeks method'.
|
||||
type IteratorSeeker interface {
|
||||
// First moves the iterator to the first key/value pair. If the iterator
|
||||
@@ -100,28 +104,13 @@ type ErrorCallbackSetter interface {
|
||||
}
|
||||
|
||||
type emptyIterator struct {
|
||||
releaser util.Releaser
|
||||
released bool
|
||||
err error
|
||||
util.BasicReleaser
|
||||
err error
|
||||
}
|
||||
|
||||
func (i *emptyIterator) rErr() {
|
||||
if i.err == nil && i.released {
|
||||
i.err = errors.New("leveldb/iterator: iterator released")
|
||||
}
|
||||
}
|
||||
|
||||
func (i *emptyIterator) Release() {
|
||||
if i.releaser != nil {
|
||||
i.releaser.Release()
|
||||
i.releaser = nil
|
||||
}
|
||||
i.released = true
|
||||
}
|
||||
|
||||
func (i *emptyIterator) SetReleaser(releaser util.Releaser) {
|
||||
if !i.released {
|
||||
i.releaser = releaser
|
||||
if i.err == nil && i.Released() {
|
||||
i.err = ErrIterReleased
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
29
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go
generated
vendored
29
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go
generated
vendored
@@ -7,16 +7,11 @@
|
||||
package iterator
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/comparer"
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrIterReleased = errors.New("leveldb/iterator: iterator released")
|
||||
)
|
||||
|
||||
type dir int
|
||||
|
||||
const (
|
||||
@@ -48,13 +43,11 @@ func assertKey(key []byte) []byte {
|
||||
}
|
||||
|
||||
func (i *mergedIterator) iterErr(iter Iterator) bool {
|
||||
if i.errf != nil {
|
||||
if err := iter.Error(); err != nil {
|
||||
if err := iter.Error(); err != nil {
|
||||
if i.errf != nil {
|
||||
i.errf(err)
|
||||
}
|
||||
}
|
||||
if i.strict {
|
||||
if err := iter.Error(); err != nil {
|
||||
if i.strict || !errors.IsCorrupted(err) {
|
||||
i.err = err
|
||||
return true
|
||||
}
|
||||
@@ -274,9 +267,13 @@ func (i *mergedIterator) Release() {
|
||||
}
|
||||
|
||||
func (i *mergedIterator) SetReleaser(releaser util.Releaser) {
|
||||
if i.dir != dirReleased {
|
||||
i.releaser = releaser
|
||||
if i.dir == dirReleased {
|
||||
panic(util.ErrReleased)
|
||||
}
|
||||
if i.releaser != nil && releaser != nil {
|
||||
panic(util.ErrHasReleaser)
|
||||
}
|
||||
i.releaser = releaser
|
||||
}
|
||||
|
||||
func (i *mergedIterator) Error() error {
|
||||
@@ -294,9 +291,9 @@ func (i *mergedIterator) SetErrorCallback(f func(err error)) {
|
||||
// keys: if iters[i] contains a key k then iters[j] will not contain that key k.
|
||||
// None of the iters may be nil.
|
||||
//
|
||||
// If strict is true then error yield by any iterators will halt the merged
|
||||
// iterator, on contrary if strict is false then the merged iterator will
|
||||
// ignore those error and move on to the next iterator.
|
||||
// If strict is true the any 'corruption errors' (i.e errors.IsCorrupted(err) == true)
|
||||
// won't be ignored and will halt 'merged iterator', otherwise the iterator will
|
||||
// continue to the next 'input iterator'.
|
||||
func NewMergedIterator(iters []Iterator, cmp comparer.Comparer, strict bool) Iterator {
|
||||
return &mergedIterator{
|
||||
iters: iters,
|
||||
|
||||
123
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go
generated
vendored
123
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go
generated
vendored
@@ -79,10 +79,10 @@ package journal
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
@@ -103,18 +103,18 @@ type flusher interface {
|
||||
Flush() error
|
||||
}
|
||||
|
||||
// DroppedError is the error type that passed to Dropper.Drop method.
|
||||
type DroppedError struct {
|
||||
// ErrCorrupted is the error type that generated by corrupted block or chunk.
|
||||
type ErrCorrupted struct {
|
||||
Size int
|
||||
Reason string
|
||||
}
|
||||
|
||||
func (e DroppedError) Error() string {
|
||||
return fmt.Sprintf("leveldb/journal: dropped %d bytes: %s", e.Size, e.Reason)
|
||||
func (e *ErrCorrupted) Error() string {
|
||||
return fmt.Sprintf("leveldb/journal: block/chunk corrupted: %s (%d bytes)", e.Reason, e.Size)
|
||||
}
|
||||
|
||||
// Dropper is the interface that wrap simple Drop method. The Drop
|
||||
// method will be called when the journal reader dropping a chunk.
|
||||
// method will be called when the journal reader dropping a block or chunk.
|
||||
type Dropper interface {
|
||||
Drop(err error)
|
||||
}
|
||||
@@ -158,76 +158,78 @@ func NewReader(r io.Reader, dropper Dropper, strict, checksum bool) *Reader {
|
||||
}
|
||||
}
|
||||
|
||||
var errSkip = errors.New("leveldb/journal: skipped")
|
||||
|
||||
func (r *Reader) corrupt(n int, reason string, skip bool) error {
|
||||
if r.dropper != nil {
|
||||
r.dropper.Drop(&ErrCorrupted{n, reason})
|
||||
}
|
||||
if r.strict && !skip {
|
||||
r.err = errors.NewErrCorrupted(nil, &ErrCorrupted{n, reason})
|
||||
return r.err
|
||||
}
|
||||
return errSkip
|
||||
}
|
||||
|
||||
// nextChunk sets r.buf[r.i:r.j] to hold the next chunk's payload, reading the
|
||||
// next block into the buffer if necessary.
|
||||
func (r *Reader) nextChunk(wantFirst, skip bool) error {
|
||||
func (r *Reader) nextChunk(first bool) error {
|
||||
for {
|
||||
if r.j+headerSize <= r.n {
|
||||
checksum := binary.LittleEndian.Uint32(r.buf[r.j+0 : r.j+4])
|
||||
length := binary.LittleEndian.Uint16(r.buf[r.j+4 : r.j+6])
|
||||
chunkType := r.buf[r.j+6]
|
||||
|
||||
var err error
|
||||
if checksum == 0 && length == 0 && chunkType == 0 {
|
||||
// Drop entire block.
|
||||
err = DroppedError{r.n - r.j, "zero header"}
|
||||
m := r.n - r.j
|
||||
r.i = r.n
|
||||
r.j = r.n
|
||||
return r.corrupt(m, "zero header", false)
|
||||
} else {
|
||||
m := r.n - r.j
|
||||
r.i = r.j + headerSize
|
||||
r.j = r.j + headerSize + int(length)
|
||||
if r.j > r.n {
|
||||
// Drop entire block.
|
||||
err = DroppedError{m, "chunk length overflows block"}
|
||||
r.i = r.n
|
||||
r.j = r.n
|
||||
return r.corrupt(m, "chunk length overflows block", false)
|
||||
} else if r.checksum && checksum != util.NewCRC(r.buf[r.i-1:r.j]).Value() {
|
||||
// Drop entire block.
|
||||
err = DroppedError{m, "checksum mismatch"}
|
||||
r.i = r.n
|
||||
r.j = r.n
|
||||
return r.corrupt(m, "checksum mismatch", false)
|
||||
}
|
||||
}
|
||||
if wantFirst && err == nil && chunkType != fullChunkType && chunkType != firstChunkType {
|
||||
if skip {
|
||||
// The chunk are intentionally skipped.
|
||||
if chunkType == lastChunkType {
|
||||
skip = false
|
||||
}
|
||||
continue
|
||||
} else {
|
||||
// Drop the chunk.
|
||||
err = DroppedError{r.j - r.i + headerSize, "orphan chunk"}
|
||||
}
|
||||
if first && chunkType != fullChunkType && chunkType != firstChunkType {
|
||||
m := r.j - r.i
|
||||
r.i = r.j
|
||||
// Report the error, but skip it.
|
||||
return r.corrupt(m+headerSize, "orphan chunk", true)
|
||||
}
|
||||
if err == nil {
|
||||
r.last = chunkType == fullChunkType || chunkType == lastChunkType
|
||||
} else {
|
||||
if r.dropper != nil {
|
||||
r.dropper.Drop(err)
|
||||
}
|
||||
if r.strict {
|
||||
r.err = err
|
||||
}
|
||||
r.last = chunkType == fullChunkType || chunkType == lastChunkType
|
||||
return nil
|
||||
}
|
||||
|
||||
// The last block.
|
||||
if r.n < blockSize && r.n > 0 {
|
||||
if !first {
|
||||
return r.corrupt(0, "missing chunk part", false)
|
||||
}
|
||||
r.err = io.EOF
|
||||
return r.err
|
||||
}
|
||||
|
||||
// Read block.
|
||||
n, err := io.ReadFull(r.r, r.buf[:])
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
return err
|
||||
}
|
||||
if r.n < blockSize && r.n > 0 {
|
||||
// This is the last block.
|
||||
if r.j != r.n {
|
||||
r.err = io.ErrUnexpectedEOF
|
||||
} else {
|
||||
r.err = io.EOF
|
||||
}
|
||||
return r.err
|
||||
}
|
||||
n, err := io.ReadFull(r.r, r.buf[:])
|
||||
if err != nil && err != io.ErrUnexpectedEOF {
|
||||
r.err = err
|
||||
return r.err
|
||||
}
|
||||
if n == 0 {
|
||||
if !first {
|
||||
return r.corrupt(0, "missing chunk part", false)
|
||||
}
|
||||
r.err = io.EOF
|
||||
return r.err
|
||||
}
|
||||
@@ -237,29 +239,26 @@ func (r *Reader) nextChunk(wantFirst, skip bool) error {
|
||||
|
||||
// Next returns a reader for the next journal. It returns io.EOF if there are no
|
||||
// more journals. The reader returned becomes stale after the next Next call,
|
||||
// and should no longer be used.
|
||||
// and should no longer be used. If strict is false, the reader will returns
|
||||
// io.ErrUnexpectedEOF error when found corrupted journal.
|
||||
func (r *Reader) Next() (io.Reader, error) {
|
||||
r.seq++
|
||||
if r.err != nil {
|
||||
return nil, r.err
|
||||
}
|
||||
skip := !r.last
|
||||
r.i = r.j
|
||||
for {
|
||||
r.i = r.j
|
||||
if r.nextChunk(true, skip) != nil {
|
||||
// So that 'orphan chunk' drop will be reported.
|
||||
skip = false
|
||||
} else {
|
||||
if err := r.nextChunk(true); err == nil {
|
||||
break
|
||||
}
|
||||
if r.err != nil {
|
||||
return nil, r.err
|
||||
} else if err != errSkip {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &singleReader{r, r.seq, nil}, nil
|
||||
}
|
||||
|
||||
// Reset resets the journal reader, allows reuse of the journal reader.
|
||||
// Reset resets the journal reader, allows reuse of the journal reader. Reset returns
|
||||
// last accumulated error.
|
||||
func (r *Reader) Reset(reader io.Reader, dropper Dropper, strict, checksum bool) error {
|
||||
r.seq++
|
||||
err := r.err
|
||||
@@ -296,7 +295,11 @@ func (x *singleReader) Read(p []byte) (int, error) {
|
||||
if r.last {
|
||||
return 0, io.EOF
|
||||
}
|
||||
if x.err = r.nextChunk(false, false); x.err != nil {
|
||||
x.err = r.nextChunk(false)
|
||||
if x.err != nil {
|
||||
if x.err == errSkip {
|
||||
x.err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return 0, x.err
|
||||
}
|
||||
}
|
||||
@@ -320,7 +323,11 @@ func (x *singleReader) ReadByte() (byte, error) {
|
||||
if r.last {
|
||||
return 0, io.EOF
|
||||
}
|
||||
if x.err = r.nextChunk(false, false); x.err != nil {
|
||||
x.err = r.nextChunk(false)
|
||||
if x.err != nil {
|
||||
if x.err == errSkip {
|
||||
x.err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return 0, x.err
|
||||
}
|
||||
}
|
||||
|
||||
490
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go
generated
vendored
490
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go
generated
vendored
@@ -12,6 +12,7 @@ package journal
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -326,3 +327,492 @@ func TestStaleWriter(t *testing.T) {
|
||||
t.Fatalf("stale write #1: unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCorrupt_MissingLastBlock(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
w := NewWriter(buf)
|
||||
|
||||
// First record.
|
||||
ww, err := w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-1024)); err != nil {
|
||||
t.Fatalf("write #0: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Second record.
|
||||
ww, err = w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
|
||||
t.Fatalf("write #1: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if err := w.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Cut the last block.
|
||||
b := buf.Bytes()[:blockSize]
|
||||
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
|
||||
|
||||
// First read.
|
||||
rr, err := r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err := io.Copy(ioutil.Discard, rr)
|
||||
if err != nil {
|
||||
t.Fatalf("read #0: %v", err)
|
||||
}
|
||||
if n != blockSize-1024 {
|
||||
t.Fatalf("read #0: got %d bytes want %d", n, blockSize-1024)
|
||||
}
|
||||
|
||||
// Second read.
|
||||
rr, err = r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err = io.Copy(ioutil.Discard, rr)
|
||||
if err != io.ErrUnexpectedEOF {
|
||||
t.Fatalf("read #1: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if _, err := r.Next(); err != io.EOF {
|
||||
t.Fatalf("last next: unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCorrupt_CorruptedFirstBlock(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
w := NewWriter(buf)
|
||||
|
||||
// First record.
|
||||
ww, err := w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
|
||||
t.Fatalf("write #0: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Second record.
|
||||
ww, err = w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
|
||||
t.Fatalf("write #1: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Third record.
|
||||
ww, err = w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
|
||||
t.Fatalf("write #2: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Fourth record.
|
||||
ww, err = w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil {
|
||||
t.Fatalf("write #3: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if err := w.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
b := buf.Bytes()
|
||||
// Corrupting block #0.
|
||||
for i := 0; i < 1024; i++ {
|
||||
b[i] = '1'
|
||||
}
|
||||
|
||||
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
|
||||
|
||||
// First read (third record).
|
||||
rr, err := r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err := io.Copy(ioutil.Discard, rr)
|
||||
if err != nil {
|
||||
t.Fatalf("read #0: %v", err)
|
||||
}
|
||||
if want := int64(blockSize-headerSize) + 1; n != want {
|
||||
t.Fatalf("read #0: got %d bytes want %d", n, want)
|
||||
}
|
||||
|
||||
// Second read (fourth record).
|
||||
rr, err = r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err = io.Copy(ioutil.Discard, rr)
|
||||
if err != nil {
|
||||
t.Fatalf("read #1: %v", err)
|
||||
}
|
||||
if want := int64(blockSize-headerSize) + 2; n != want {
|
||||
t.Fatalf("read #1: got %d bytes want %d", n, want)
|
||||
}
|
||||
|
||||
if _, err := r.Next(); err != io.EOF {
|
||||
t.Fatalf("last next: unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCorrupt_CorruptedMiddleBlock(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
w := NewWriter(buf)
|
||||
|
||||
// First record.
|
||||
ww, err := w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
|
||||
t.Fatalf("write #0: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Second record.
|
||||
ww, err = w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
|
||||
t.Fatalf("write #1: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Third record.
|
||||
ww, err = w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
|
||||
t.Fatalf("write #2: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Fourth record.
|
||||
ww, err = w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil {
|
||||
t.Fatalf("write #3: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if err := w.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
b := buf.Bytes()
|
||||
// Corrupting block #1.
|
||||
for i := 0; i < 1024; i++ {
|
||||
b[blockSize+i] = '1'
|
||||
}
|
||||
|
||||
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
|
||||
|
||||
// First read (first record).
|
||||
rr, err := r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err := io.Copy(ioutil.Discard, rr)
|
||||
if err != nil {
|
||||
t.Fatalf("read #0: %v", err)
|
||||
}
|
||||
if want := int64(blockSize / 2); n != want {
|
||||
t.Fatalf("read #0: got %d bytes want %d", n, want)
|
||||
}
|
||||
|
||||
// Second read (second record).
|
||||
rr, err = r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err = io.Copy(ioutil.Discard, rr)
|
||||
if err != io.ErrUnexpectedEOF {
|
||||
t.Fatalf("read #1: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Third read (fourth record).
|
||||
rr, err = r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err = io.Copy(ioutil.Discard, rr)
|
||||
if err != nil {
|
||||
t.Fatalf("read #2: %v", err)
|
||||
}
|
||||
if want := int64(blockSize-headerSize) + 2; n != want {
|
||||
t.Fatalf("read #2: got %d bytes want %d", n, want)
|
||||
}
|
||||
|
||||
if _, err := r.Next(); err != io.EOF {
|
||||
t.Fatalf("last next: unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCorrupt_CorruptedLastBlock(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
w := NewWriter(buf)
|
||||
|
||||
// First record.
|
||||
ww, err := w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
|
||||
t.Fatalf("write #0: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Second record.
|
||||
ww, err = w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
|
||||
t.Fatalf("write #1: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Third record.
|
||||
ww, err = w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
|
||||
t.Fatalf("write #2: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Fourth record.
|
||||
ww, err = w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil {
|
||||
t.Fatalf("write #3: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if err := w.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
b := buf.Bytes()
|
||||
// Corrupting block #3.
|
||||
for i := len(b) - 1; i > len(b)-1024; i-- {
|
||||
b[i] = '1'
|
||||
}
|
||||
|
||||
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
|
||||
|
||||
// First read (first record).
|
||||
rr, err := r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err := io.Copy(ioutil.Discard, rr)
|
||||
if err != nil {
|
||||
t.Fatalf("read #0: %v", err)
|
||||
}
|
||||
if want := int64(blockSize / 2); n != want {
|
||||
t.Fatalf("read #0: got %d bytes want %d", n, want)
|
||||
}
|
||||
|
||||
// Second read (second record).
|
||||
rr, err = r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err = io.Copy(ioutil.Discard, rr)
|
||||
if err != nil {
|
||||
t.Fatalf("read #1: %v", err)
|
||||
}
|
||||
if want := int64(blockSize - headerSize); n != want {
|
||||
t.Fatalf("read #1: got %d bytes want %d", n, want)
|
||||
}
|
||||
|
||||
// Third read (third record).
|
||||
rr, err = r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err = io.Copy(ioutil.Discard, rr)
|
||||
if err != nil {
|
||||
t.Fatalf("read #2: %v", err)
|
||||
}
|
||||
if want := int64(blockSize-headerSize) + 1; n != want {
|
||||
t.Fatalf("read #2: got %d bytes want %d", n, want)
|
||||
}
|
||||
|
||||
// Fourth read (fourth record).
|
||||
rr, err = r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err = io.Copy(ioutil.Discard, rr)
|
||||
if err != io.ErrUnexpectedEOF {
|
||||
t.Fatalf("read #3: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if _, err := r.Next(); err != io.EOF {
|
||||
t.Fatalf("last next: unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCorrupt_FirstChuckLengthOverflow(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
w := NewWriter(buf)
|
||||
|
||||
// First record.
|
||||
ww, err := w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
|
||||
t.Fatalf("write #0: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Second record.
|
||||
ww, err = w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
|
||||
t.Fatalf("write #1: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Third record.
|
||||
ww, err = w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
|
||||
t.Fatalf("write #2: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if err := w.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
b := buf.Bytes()
|
||||
// Corrupting record #1.
|
||||
x := blockSize
|
||||
binary.LittleEndian.PutUint16(b[x+4:], 0xffff)
|
||||
|
||||
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
|
||||
|
||||
// First read (first record).
|
||||
rr, err := r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err := io.Copy(ioutil.Discard, rr)
|
||||
if err != nil {
|
||||
t.Fatalf("read #0: %v", err)
|
||||
}
|
||||
if want := int64(blockSize / 2); n != want {
|
||||
t.Fatalf("read #0: got %d bytes want %d", n, want)
|
||||
}
|
||||
|
||||
// Second read (second record).
|
||||
rr, err = r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err = io.Copy(ioutil.Discard, rr)
|
||||
if err != io.ErrUnexpectedEOF {
|
||||
t.Fatalf("read #1: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if _, err := r.Next(); err != io.EOF {
|
||||
t.Fatalf("last next: unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCorrupt_MiddleChuckLengthOverflow(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
w := NewWriter(buf)
|
||||
|
||||
// First record.
|
||||
ww, err := w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
|
||||
t.Fatalf("write #0: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Second record.
|
||||
ww, err = w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
|
||||
t.Fatalf("write #1: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Third record.
|
||||
ww, err = w.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
|
||||
t.Fatalf("write #2: unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if err := w.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
b := buf.Bytes()
|
||||
// Corrupting record #1.
|
||||
x := blockSize/2 + headerSize
|
||||
binary.LittleEndian.PutUint16(b[x+4:], 0xffff)
|
||||
|
||||
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
|
||||
|
||||
// First read (first record).
|
||||
rr, err := r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err := io.Copy(ioutil.Discard, rr)
|
||||
if err != nil {
|
||||
t.Fatalf("read #0: %v", err)
|
||||
}
|
||||
if want := int64(blockSize / 2); n != want {
|
||||
t.Fatalf("read #0: got %d bytes want %d", n, want)
|
||||
}
|
||||
|
||||
// Second read (third record).
|
||||
rr, err = r.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err = io.Copy(ioutil.Discard, rr)
|
||||
if err != nil {
|
||||
t.Fatalf("read #1: %v", err)
|
||||
}
|
||||
if want := int64(blockSize-headerSize) + 1; n != want {
|
||||
t.Fatalf("read #1: got %d bytes want %d", n, want)
|
||||
}
|
||||
|
||||
if _, err := r.Next(); err != io.EOF {
|
||||
t.Fatalf("last next: unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
133
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go
generated
vendored
133
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go
generated
vendored
@@ -9,15 +9,30 @@ package leveldb
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
)
|
||||
|
||||
type vType int
|
||||
type ErrIkeyCorrupted struct {
|
||||
Ikey []byte
|
||||
Reason string
|
||||
}
|
||||
|
||||
func (t vType) String() string {
|
||||
switch t {
|
||||
case tDel:
|
||||
func (e *ErrIkeyCorrupted) Error() string {
|
||||
return fmt.Sprintf("leveldb: iKey %q corrupted: %s", e.Ikey, e.Reason)
|
||||
}
|
||||
|
||||
func newErrIkeyCorrupted(ikey []byte, reason string) error {
|
||||
return errors.NewErrCorrupted(nil, &ErrIkeyCorrupted{append([]byte{}, ikey...), reason})
|
||||
}
|
||||
|
||||
type kType int
|
||||
|
||||
func (kt kType) String() string {
|
||||
switch kt {
|
||||
case ktDel:
|
||||
return "d"
|
||||
case tVal:
|
||||
case ktVal:
|
||||
return "v"
|
||||
}
|
||||
return "x"
|
||||
@@ -26,16 +41,16 @@ func (t vType) String() string {
|
||||
// Value types encoded as the last component of internal keys.
|
||||
// Don't modify; this value are saved to disk.
|
||||
const (
|
||||
tDel vType = iota
|
||||
tVal
|
||||
ktDel kType = iota
|
||||
ktVal
|
||||
)
|
||||
|
||||
// tSeek defines the vType that should be passed when constructing an
|
||||
// ktSeek defines the kType that should be passed when constructing an
|
||||
// internal key for seeking to a particular sequence number (since we
|
||||
// sort sequence numbers in decreasing order and the value type is
|
||||
// embedded as the low 8 bits in the sequence number in internal keys,
|
||||
// we need to use the highest-numbered ValueType, not the lowest).
|
||||
const tSeek = tVal
|
||||
const ktSeek = ktVal
|
||||
|
||||
const (
|
||||
// Maximum value possible for sequence number; the 8-bits are
|
||||
@@ -43,7 +58,7 @@ const (
|
||||
// 64-bit integer.
|
||||
kMaxSeq uint64 = (uint64(1) << 56) - 1
|
||||
// Maximum value possible for packed sequence number and type.
|
||||
kMaxNum uint64 = (kMaxSeq << 8) | uint64(tSeek)
|
||||
kMaxNum uint64 = (kMaxSeq << 8) | uint64(ktSeek)
|
||||
)
|
||||
|
||||
// Maximum number encoded in bytes.
|
||||
@@ -55,85 +70,73 @@ func init() {
|
||||
|
||||
type iKey []byte
|
||||
|
||||
func newIKey(ukey []byte, seq uint64, t vType) iKey {
|
||||
if seq > kMaxSeq || t > tVal {
|
||||
panic("invalid seq number or value type")
|
||||
func newIkey(ukey []byte, seq uint64, kt kType) iKey {
|
||||
if seq > kMaxSeq {
|
||||
panic("leveldb: invalid sequence number")
|
||||
} else if kt > ktVal {
|
||||
panic("leveldb: invalid type")
|
||||
}
|
||||
|
||||
b := make(iKey, len(ukey)+8)
|
||||
copy(b, ukey)
|
||||
binary.LittleEndian.PutUint64(b[len(ukey):], (seq<<8)|uint64(t))
|
||||
return b
|
||||
ik := make(iKey, len(ukey)+8)
|
||||
copy(ik, ukey)
|
||||
binary.LittleEndian.PutUint64(ik[len(ukey):], (seq<<8)|uint64(kt))
|
||||
return ik
|
||||
}
|
||||
|
||||
func parseIkey(p []byte) (ukey []byte, seq uint64, t vType, ok bool) {
|
||||
if len(p) < 8 {
|
||||
return
|
||||
func parseIkey(ik []byte) (ukey []byte, seq uint64, kt kType, err error) {
|
||||
if len(ik) < 8 {
|
||||
return nil, 0, 0, newErrIkeyCorrupted(ik, "invalid length")
|
||||
}
|
||||
num := binary.LittleEndian.Uint64(p[len(p)-8:])
|
||||
seq, t = uint64(num>>8), vType(num&0xff)
|
||||
if t > tVal {
|
||||
return
|
||||
num := binary.LittleEndian.Uint64(ik[len(ik)-8:])
|
||||
seq, kt = uint64(num>>8), kType(num&0xff)
|
||||
if kt > ktVal {
|
||||
return nil, 0, 0, newErrIkeyCorrupted(ik, "invalid type")
|
||||
}
|
||||
ukey = p[:len(p)-8]
|
||||
ok = true
|
||||
ukey = ik[:len(ik)-8]
|
||||
return
|
||||
}
|
||||
|
||||
func validIkey(p []byte) bool {
|
||||
_, _, _, ok := parseIkey(p)
|
||||
return ok
|
||||
func validIkey(ik []byte) bool {
|
||||
_, _, _, err := parseIkey(ik)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func (p iKey) assert() {
|
||||
if p == nil {
|
||||
panic("nil iKey")
|
||||
func (ik iKey) assert() {
|
||||
if ik == nil {
|
||||
panic("leveldb: nil iKey")
|
||||
}
|
||||
if len(p) < 8 {
|
||||
panic(fmt.Sprintf("invalid iKey %q, len=%d", []byte(p), len(p)))
|
||||
if len(ik) < 8 {
|
||||
panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid length", ik, len(ik)))
|
||||
}
|
||||
}
|
||||
|
||||
func (p iKey) ok() bool {
|
||||
if len(p) < 8 {
|
||||
return false
|
||||
}
|
||||
_, _, ok := p.parseNum()
|
||||
return ok
|
||||
func (ik iKey) ukey() []byte {
|
||||
ik.assert()
|
||||
return ik[:len(ik)-8]
|
||||
}
|
||||
|
||||
func (p iKey) ukey() []byte {
|
||||
p.assert()
|
||||
return p[:len(p)-8]
|
||||
func (ik iKey) num() uint64 {
|
||||
ik.assert()
|
||||
return binary.LittleEndian.Uint64(ik[len(ik)-8:])
|
||||
}
|
||||
|
||||
func (p iKey) num() uint64 {
|
||||
p.assert()
|
||||
return binary.LittleEndian.Uint64(p[len(p)-8:])
|
||||
}
|
||||
|
||||
func (p iKey) parseNum() (seq uint64, t vType, ok bool) {
|
||||
if p == nil {
|
||||
panic("nil iKey")
|
||||
func (ik iKey) parseNum() (seq uint64, kt kType) {
|
||||
num := ik.num()
|
||||
seq, kt = uint64(num>>8), kType(num&0xff)
|
||||
if kt > ktVal {
|
||||
panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid type %#x", ik, len(ik), kt))
|
||||
}
|
||||
if len(p) < 8 {
|
||||
return
|
||||
}
|
||||
num := p.num()
|
||||
seq, t = uint64(num>>8), vType(num&0xff)
|
||||
if t > tVal {
|
||||
return 0, 0, false
|
||||
}
|
||||
ok = true
|
||||
return
|
||||
}
|
||||
|
||||
func (p iKey) String() string {
|
||||
if len(p) == 0 {
|
||||
func (ik iKey) String() string {
|
||||
if ik == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
if seq, t, ok := p.parseNum(); ok {
|
||||
return fmt.Sprintf("%s,%s%d", shorten(string(p.ukey())), t, seq)
|
||||
|
||||
if ukey, seq, kt, err := parseIkey(ik); err == nil {
|
||||
return fmt.Sprintf("%s,%s%d", shorten(string(ukey)), kt, seq)
|
||||
} else {
|
||||
return "<invalid>"
|
||||
}
|
||||
return "<invalid>"
|
||||
}
|
||||
|
||||
94
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key_test.go
generated
vendored
94
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key_test.go
generated
vendored
@@ -15,8 +15,8 @@ import (
|
||||
|
||||
var defaultIComparer = &iComparer{comparer.DefaultComparer}
|
||||
|
||||
func ikey(key string, seq uint64, t vType) iKey {
|
||||
return newIKey([]byte(key), uint64(seq), t)
|
||||
func ikey(key string, seq uint64, kt kType) iKey {
|
||||
return newIkey([]byte(key), uint64(seq), kt)
|
||||
}
|
||||
|
||||
func shortSep(a, b []byte) []byte {
|
||||
@@ -37,27 +37,37 @@ func shortSuccessor(b []byte) []byte {
|
||||
return dst
|
||||
}
|
||||
|
||||
func testSingleKey(t *testing.T, key string, seq uint64, vt vType) {
|
||||
ik := ikey(key, seq, vt)
|
||||
func testSingleKey(t *testing.T, key string, seq uint64, kt kType) {
|
||||
ik := ikey(key, seq, kt)
|
||||
|
||||
if !bytes.Equal(ik.ukey(), []byte(key)) {
|
||||
t.Errorf("user key does not equal, got %v, want %v", string(ik.ukey()), key)
|
||||
}
|
||||
|
||||
if rseq, rt, ok := ik.parseNum(); ok {
|
||||
rseq, rt := ik.parseNum()
|
||||
if rseq != seq {
|
||||
t.Errorf("seq number does not equal, got %v, want %v", rseq, seq)
|
||||
}
|
||||
if rt != kt {
|
||||
t.Errorf("type does not equal, got %v, want %v", rt, kt)
|
||||
}
|
||||
|
||||
if rukey, rseq, rt, kerr := parseIkey(ik); kerr == nil {
|
||||
if !bytes.Equal(rukey, []byte(key)) {
|
||||
t.Errorf("user key does not equal, got %v, want %v", string(ik.ukey()), key)
|
||||
}
|
||||
if rseq != seq {
|
||||
t.Errorf("seq number does not equal, got %v, want %v", rseq, seq)
|
||||
}
|
||||
|
||||
if rt != vt {
|
||||
t.Errorf("type does not equal, got %v, want %v", rt, vt)
|
||||
if rt != kt {
|
||||
t.Errorf("type does not equal, got %v, want %v", rt, kt)
|
||||
}
|
||||
} else {
|
||||
t.Error("cannot parse seq and type")
|
||||
t.Errorf("key error: %v", kerr)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIKey_EncodeDecode(t *testing.T) {
|
||||
func TestIkey_EncodeDecode(t *testing.T) {
|
||||
keys := []string{"", "k", "hello", "longggggggggggggggggggggg"}
|
||||
seqs := []uint64{
|
||||
1, 2, 3,
|
||||
@@ -67,8 +77,8 @@ func TestIKey_EncodeDecode(t *testing.T) {
|
||||
}
|
||||
for _, key := range keys {
|
||||
for _, seq := range seqs {
|
||||
testSingleKey(t, key, seq, tVal)
|
||||
testSingleKey(t, "hello", 1, tDel)
|
||||
testSingleKey(t, key, seq, ktVal)
|
||||
testSingleKey(t, "hello", 1, ktDel)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -79,45 +89,45 @@ func assertBytes(t *testing.T, want, got []byte) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestIKeyShortSeparator(t *testing.T) {
|
||||
func TestIkeyShortSeparator(t *testing.T) {
|
||||
// When user keys are same
|
||||
assertBytes(t, ikey("foo", 100, tVal),
|
||||
shortSep(ikey("foo", 100, tVal),
|
||||
ikey("foo", 99, tVal)))
|
||||
assertBytes(t, ikey("foo", 100, tVal),
|
||||
shortSep(ikey("foo", 100, tVal),
|
||||
ikey("foo", 101, tVal)))
|
||||
assertBytes(t, ikey("foo", 100, tVal),
|
||||
shortSep(ikey("foo", 100, tVal),
|
||||
ikey("foo", 100, tVal)))
|
||||
assertBytes(t, ikey("foo", 100, tVal),
|
||||
shortSep(ikey("foo", 100, tVal),
|
||||
ikey("foo", 100, tDel)))
|
||||
assertBytes(t, ikey("foo", 100, ktVal),
|
||||
shortSep(ikey("foo", 100, ktVal),
|
||||
ikey("foo", 99, ktVal)))
|
||||
assertBytes(t, ikey("foo", 100, ktVal),
|
||||
shortSep(ikey("foo", 100, ktVal),
|
||||
ikey("foo", 101, ktVal)))
|
||||
assertBytes(t, ikey("foo", 100, ktVal),
|
||||
shortSep(ikey("foo", 100, ktVal),
|
||||
ikey("foo", 100, ktVal)))
|
||||
assertBytes(t, ikey("foo", 100, ktVal),
|
||||
shortSep(ikey("foo", 100, ktVal),
|
||||
ikey("foo", 100, ktDel)))
|
||||
|
||||
// When user keys are misordered
|
||||
assertBytes(t, ikey("foo", 100, tVal),
|
||||
shortSep(ikey("foo", 100, tVal),
|
||||
ikey("bar", 99, tVal)))
|
||||
assertBytes(t, ikey("foo", 100, ktVal),
|
||||
shortSep(ikey("foo", 100, ktVal),
|
||||
ikey("bar", 99, ktVal)))
|
||||
|
||||
// When user keys are different, but correctly ordered
|
||||
assertBytes(t, ikey("g", uint64(kMaxSeq), tSeek),
|
||||
shortSep(ikey("foo", 100, tVal),
|
||||
ikey("hello", 200, tVal)))
|
||||
assertBytes(t, ikey("g", uint64(kMaxSeq), ktSeek),
|
||||
shortSep(ikey("foo", 100, ktVal),
|
||||
ikey("hello", 200, ktVal)))
|
||||
|
||||
// When start user key is prefix of limit user key
|
||||
assertBytes(t, ikey("foo", 100, tVal),
|
||||
shortSep(ikey("foo", 100, tVal),
|
||||
ikey("foobar", 200, tVal)))
|
||||
assertBytes(t, ikey("foo", 100, ktVal),
|
||||
shortSep(ikey("foo", 100, ktVal),
|
||||
ikey("foobar", 200, ktVal)))
|
||||
|
||||
// When limit user key is prefix of start user key
|
||||
assertBytes(t, ikey("foobar", 100, tVal),
|
||||
shortSep(ikey("foobar", 100, tVal),
|
||||
ikey("foo", 200, tVal)))
|
||||
assertBytes(t, ikey("foobar", 100, ktVal),
|
||||
shortSep(ikey("foobar", 100, ktVal),
|
||||
ikey("foo", 200, ktVal)))
|
||||
}
|
||||
|
||||
func TestIKeyShortestSuccessor(t *testing.T) {
|
||||
assertBytes(t, ikey("g", uint64(kMaxSeq), tSeek),
|
||||
shortSuccessor(ikey("foo", 100, tVal)))
|
||||
assertBytes(t, ikey("\xff\xff", 100, tVal),
|
||||
shortSuccessor(ikey("\xff\xff", 100, tVal)))
|
||||
func TestIkeyShortestSuccessor(t *testing.T) {
|
||||
assertBytes(t, ikey("g", uint64(kMaxSeq), ktSeek),
|
||||
shortSuccessor(ikey("foo", 100, ktVal)))
|
||||
assertBytes(t, ikey("\xff\xff", 100, ktVal),
|
||||
shortSuccessor(ikey("\xff\xff", 100, ktVal)))
|
||||
}
|
||||
|
||||
32
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go
generated
vendored
32
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go
generated
vendored
@@ -12,12 +12,14 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/comparer"
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/iterator"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNotFound = util.ErrNotFound
|
||||
ErrNotFound = errors.ErrNotFound
|
||||
ErrIterReleased = errors.New("leveldb/memdb: iterator released")
|
||||
)
|
||||
|
||||
const tMaxHeight = 12
|
||||
@@ -29,6 +31,7 @@ type dbIter struct {
|
||||
node int
|
||||
forward bool
|
||||
key, value []byte
|
||||
err error
|
||||
}
|
||||
|
||||
func (i *dbIter) fill(checkStart, checkLimit bool) bool {
|
||||
@@ -59,6 +62,11 @@ func (i *dbIter) Valid() bool {
|
||||
}
|
||||
|
||||
func (i *dbIter) First() bool {
|
||||
if i.Released() {
|
||||
i.err = ErrIterReleased
|
||||
return false
|
||||
}
|
||||
|
||||
i.forward = true
|
||||
i.p.mu.RLock()
|
||||
defer i.p.mu.RUnlock()
|
||||
@@ -71,9 +79,11 @@ func (i *dbIter) First() bool {
|
||||
}
|
||||
|
||||
func (i *dbIter) Last() bool {
|
||||
if i.p == nil {
|
||||
if i.Released() {
|
||||
i.err = ErrIterReleased
|
||||
return false
|
||||
}
|
||||
|
||||
i.forward = false
|
||||
i.p.mu.RLock()
|
||||
defer i.p.mu.RUnlock()
|
||||
@@ -86,9 +96,11 @@ func (i *dbIter) Last() bool {
|
||||
}
|
||||
|
||||
func (i *dbIter) Seek(key []byte) bool {
|
||||
if i.p == nil {
|
||||
if i.Released() {
|
||||
i.err = ErrIterReleased
|
||||
return false
|
||||
}
|
||||
|
||||
i.forward = true
|
||||
i.p.mu.RLock()
|
||||
defer i.p.mu.RUnlock()
|
||||
@@ -100,9 +112,11 @@ func (i *dbIter) Seek(key []byte) bool {
|
||||
}
|
||||
|
||||
func (i *dbIter) Next() bool {
|
||||
if i.p == nil {
|
||||
if i.Released() {
|
||||
i.err = ErrIterReleased
|
||||
return false
|
||||
}
|
||||
|
||||
if i.node == 0 {
|
||||
if !i.forward {
|
||||
return i.First()
|
||||
@@ -117,9 +131,11 @@ func (i *dbIter) Next() bool {
|
||||
}
|
||||
|
||||
func (i *dbIter) Prev() bool {
|
||||
if i.p == nil {
|
||||
if i.Released() {
|
||||
i.err = ErrIterReleased
|
||||
return false
|
||||
}
|
||||
|
||||
if i.node == 0 {
|
||||
if i.forward {
|
||||
return i.Last()
|
||||
@@ -141,10 +157,10 @@ func (i *dbIter) Value() []byte {
|
||||
return i.value
|
||||
}
|
||||
|
||||
func (i *dbIter) Error() error { return nil }
|
||||
func (i *dbIter) Error() error { return i.err }
|
||||
|
||||
func (i *dbIter) Release() {
|
||||
if i.p != nil {
|
||||
if !i.Released() {
|
||||
i.p = nil
|
||||
i.node = 0
|
||||
i.key = nil
|
||||
@@ -437,6 +453,8 @@ func (p *DB) Reset() {
|
||||
// New creates a new initalized in-memory key/value DB. The capacity
|
||||
// is the initial key/value buffer capacity. The capacity is advisory,
|
||||
// not enforced.
|
||||
//
|
||||
// The returned DB instance is goroutine-safe.
|
||||
func New(cmp comparer.BasicComparer, capacity int) *DB {
|
||||
p := &DB{
|
||||
cmp: cmp,
|
||||
|
||||
2
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go
generated
vendored
2
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go
generated
vendored
@@ -129,7 +129,7 @@ var _ = testutil.Defer(func() {
|
||||
}
|
||||
|
||||
return db
|
||||
})
|
||||
}, nil, nil)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
319
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go
generated
vendored
319
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go
generated
vendored
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/syndtr/goleveldb/leveldb/cache"
|
||||
"github.com/syndtr/goleveldb/leveldb/comparer"
|
||||
"github.com/syndtr/goleveldb/leveldb/filter"
|
||||
"math"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -20,20 +21,38 @@ const (
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultBlockCacheSize = 8 * MiB
|
||||
DefaultBlockRestartInterval = 16
|
||||
DefaultBlockSize = 4 * KiB
|
||||
DefaultCompressionType = SnappyCompression
|
||||
DefaultMaxOpenFiles = 1000
|
||||
DefaultWriteBuffer = 4 * MiB
|
||||
DefaultBlockCacheSize = 8 * MiB
|
||||
DefaultBlockRestartInterval = 16
|
||||
DefaultBlockSize = 4 * KiB
|
||||
DefaultCompactionExpandLimitFactor = 25
|
||||
DefaultCompactionGPOverlapsFactor = 10
|
||||
DefaultCompactionL0Trigger = 4
|
||||
DefaultCompactionSourceLimitFactor = 1
|
||||
DefaultCompactionTableSize = 2 * MiB
|
||||
DefaultCompactionTableSizeMultiplier = 1.0
|
||||
DefaultCompactionTotalSize = 10 * MiB
|
||||
DefaultCompactionTotalSizeMultiplier = 10.0
|
||||
DefaultCompressionType = SnappyCompression
|
||||
DefaultCachedOpenFiles = 500
|
||||
DefaultMaxMemCompationLevel = 2
|
||||
DefaultNumLevel = 7
|
||||
DefaultWriteBuffer = 4 * MiB
|
||||
DefaultWriteL0PauseTrigger = 12
|
||||
DefaultWriteL0SlowdownTrigger = 8
|
||||
)
|
||||
|
||||
type noCache struct{}
|
||||
|
||||
func (noCache) SetCapacity(capacity int) {}
|
||||
func (noCache) GetNamespace(id uint64) cache.Namespace { return nil }
|
||||
func (noCache) Purge(fin cache.PurgeFin) {}
|
||||
func (noCache) Zap(closed bool) {}
|
||||
func (noCache) SetCapacity(capacity int) {}
|
||||
func (noCache) Capacity() int { return 0 }
|
||||
func (noCache) Used() int { return 0 }
|
||||
func (noCache) Size() int { return 0 }
|
||||
func (noCache) NumObjects() int { return 0 }
|
||||
func (noCache) GetNamespace(id uint64) cache.Namespace { return nil }
|
||||
func (noCache) PurgeNamespace(id uint64, fin cache.PurgeFin) {}
|
||||
func (noCache) ZapNamespace(id uint64) {}
|
||||
func (noCache) Purge(fin cache.PurgeFin) {}
|
||||
func (noCache) Zap() {}
|
||||
|
||||
var NoCache cache.Cache = noCache{}
|
||||
|
||||
@@ -59,34 +78,47 @@ const (
|
||||
nCompression
|
||||
)
|
||||
|
||||
// Strict is the DB strict level.
|
||||
// Strict is the DB 'strict level'.
|
||||
type Strict uint
|
||||
|
||||
const (
|
||||
// If present then a corrupted or invalid chunk or block in manifest
|
||||
// journal will cause an error istead of being dropped.
|
||||
// journal will cause an error instead of being dropped.
|
||||
// This will prevent database with corrupted manifest to be opened.
|
||||
StrictManifest Strict = 1 << iota
|
||||
|
||||
// If present then a corrupted or invalid chunk or block in journal
|
||||
// will cause an error istead of being dropped.
|
||||
StrictJournal
|
||||
|
||||
// If present then journal chunk checksum will be verified.
|
||||
StrictJournalChecksum
|
||||
|
||||
// If present then an invalid key/value pair will cause an error
|
||||
// instead of being skipped.
|
||||
StrictIterator
|
||||
// If present then a corrupted or invalid chunk or block in journal
|
||||
// will cause an error instead of being dropped.
|
||||
// This will prevent database with corrupted journal to be opened.
|
||||
StrictJournal
|
||||
|
||||
// If present then 'sorted table' block checksum will be verified.
|
||||
// This has effect on both 'read operation' and compaction.
|
||||
StrictBlockChecksum
|
||||
|
||||
// If present then a corrupted 'sorted table' will fails compaction.
|
||||
// The database will enter read-only mode.
|
||||
StrictCompaction
|
||||
|
||||
// If present then a corrupted 'sorted table' will halts 'read operation'.
|
||||
StrictReader
|
||||
|
||||
// If present then leveldb.Recover will drop corrupted 'sorted table'.
|
||||
StrictRecovery
|
||||
|
||||
// This only applicable for ReadOptions, if present then this ReadOptions
|
||||
// 'strict level' will override global ones.
|
||||
StrictOverride
|
||||
|
||||
// StrictAll enables all strict flags.
|
||||
StrictAll = StrictManifest | StrictJournal | StrictJournalChecksum | StrictIterator | StrictBlockChecksum
|
||||
StrictAll = StrictManifest | StrictJournalChecksum | StrictJournal | StrictBlockChecksum | StrictCompaction | StrictReader
|
||||
|
||||
// DefaultStrict is the default strict flags. Specify any strict flags
|
||||
// will override default strict flags as whole (i.e. not OR'ed).
|
||||
DefaultStrict = StrictJournalChecksum | StrictBlockChecksum
|
||||
DefaultStrict = StrictJournalChecksum | StrictBlockChecksum | StrictCompaction | StrictReader
|
||||
|
||||
// NoStrict disables all strict flags. Override default strict flags.
|
||||
NoStrict = ^StrictAll
|
||||
@@ -119,6 +151,80 @@ type Options struct {
|
||||
// The default value is 4KiB.
|
||||
BlockSize int
|
||||
|
||||
// CachedOpenFiles defines number of open files to kept around when not
|
||||
// in-use, the counting includes still in-use files.
|
||||
// Set this to negative value to disable caching.
|
||||
//
|
||||
// The default value is 500.
|
||||
CachedOpenFiles int
|
||||
|
||||
// CompactionExpandLimitFactor limits compaction size after expanded.
|
||||
// This will be multiplied by table size limit at compaction target level.
|
||||
//
|
||||
// The default value is 25.
|
||||
CompactionExpandLimitFactor int
|
||||
|
||||
// CompactionGPOverlapsFactor limits overlaps in grandparent (Level + 2) that a
|
||||
// single 'sorted table' generates.
|
||||
// This will be multiplied by table size limit at grandparent level.
|
||||
//
|
||||
// The default value is 10.
|
||||
CompactionGPOverlapsFactor int
|
||||
|
||||
// CompactionL0Trigger defines number of 'sorted table' at level-0 that will
|
||||
// trigger compaction.
|
||||
//
|
||||
// The default value is 4.
|
||||
CompactionL0Trigger int
|
||||
|
||||
// CompactionSourceLimitFactor limits compaction source size. This doesn't apply to
|
||||
// level-0.
|
||||
// This will be multiplied by table size limit at compaction target level.
|
||||
//
|
||||
// The default value is 1.
|
||||
CompactionSourceLimitFactor int
|
||||
|
||||
// CompactionTableSize limits size of 'sorted table' that compaction generates.
|
||||
// The limits for each level will be calculated as:
|
||||
// CompactionTableSize * (CompactionTableSizeMultiplier ^ Level)
|
||||
// The multiplier for each level can also fine-tuned using CompactionTableSizeMultiplierPerLevel.
|
||||
//
|
||||
// The default value is 2MiB.
|
||||
CompactionTableSize int
|
||||
|
||||
// CompactionTableSizeMultiplier defines multiplier for CompactionTableSize.
|
||||
//
|
||||
// The default value is 1.
|
||||
CompactionTableSizeMultiplier float64
|
||||
|
||||
// CompactionTableSizeMultiplierPerLevel defines per-level multiplier for
|
||||
// CompactionTableSize.
|
||||
// Use zero to skip a level.
|
||||
//
|
||||
// The default value is nil.
|
||||
CompactionTableSizeMultiplierPerLevel []float64
|
||||
|
||||
// CompactionTotalSize limits total size of 'sorted table' for each level.
|
||||
// The limits for each level will be calculated as:
|
||||
// CompactionTotalSize * (CompactionTotalSizeMultiplier ^ Level)
|
||||
// The multiplier for each level can also fine-tuned using
|
||||
// CompactionTotalSizeMultiplierPerLevel.
|
||||
//
|
||||
// The default value is 10MiB.
|
||||
CompactionTotalSize int
|
||||
|
||||
// CompactionTotalSizeMultiplier defines multiplier for CompactionTotalSize.
|
||||
//
|
||||
// The default value is 10.
|
||||
CompactionTotalSizeMultiplier float64
|
||||
|
||||
// CompactionTotalSizeMultiplierPerLevel defines per-level multiplier for
|
||||
// CompactionTotalSize.
|
||||
// Use zero to skip a level.
|
||||
//
|
||||
// The default value is nil.
|
||||
CompactionTotalSizeMultiplierPerLevel []float64
|
||||
|
||||
// Comparer defines a total ordering over the space of []byte keys: a 'less
|
||||
// than' relationship. The same comparison algorithm must be used for reads
|
||||
// and writes over the lifetime of the DB.
|
||||
@@ -131,6 +237,11 @@ type Options struct {
|
||||
// The default value (DefaultCompression) uses snappy compression.
|
||||
Compression Compression
|
||||
|
||||
// DisableCompactionBackoff allows disable compaction retry backoff.
|
||||
//
|
||||
// The default value is false.
|
||||
DisableCompactionBackoff bool
|
||||
|
||||
// ErrorIfExist defines whether an error should returned if the DB already
|
||||
// exist.
|
||||
//
|
||||
@@ -159,12 +270,18 @@ type Options struct {
|
||||
// The default value is nil.
|
||||
Filter filter.Filter
|
||||
|
||||
// MaxOpenFiles defines maximum number of open files to kept around
|
||||
// (cached). This is not an hard limit, actual open files may exceed
|
||||
// the defined value.
|
||||
// MaxMemCompationLevel defines maximum level a newly compacted 'memdb'
|
||||
// will be pushed into if doesn't creates overlap. This should less than
|
||||
// NumLevel. Use -1 for level-0.
|
||||
//
|
||||
// The default value is 1000.
|
||||
MaxOpenFiles int
|
||||
// The default is 2.
|
||||
MaxMemCompationLevel int
|
||||
|
||||
// NumLevel defines number of database level. The level shouldn't changed
|
||||
// between opens, or the database will panic.
|
||||
//
|
||||
// The default is 7.
|
||||
NumLevel int
|
||||
|
||||
// Strict defines the DB strict level.
|
||||
Strict Strict
|
||||
@@ -177,6 +294,18 @@ type Options struct {
|
||||
//
|
||||
// The default value is 4MiB.
|
||||
WriteBuffer int
|
||||
|
||||
// WriteL0StopTrigger defines number of 'sorted table' at level-0 that will
|
||||
// pause write.
|
||||
//
|
||||
// The default value is 12.
|
||||
WriteL0PauseTrigger int
|
||||
|
||||
// WriteL0SlowdownTrigger defines number of 'sorted table' at level-0 that
|
||||
// will trigger write slowdown.
|
||||
//
|
||||
// The default value is 8.
|
||||
WriteL0SlowdownTrigger int
|
||||
}
|
||||
|
||||
func (o *Options) GetAltFilters() []filter.Filter {
|
||||
@@ -207,6 +336,88 @@ func (o *Options) GetBlockSize() int {
|
||||
return o.BlockSize
|
||||
}
|
||||
|
||||
func (o *Options) GetCachedOpenFiles() int {
|
||||
if o == nil || o.CachedOpenFiles == 0 {
|
||||
return DefaultCachedOpenFiles
|
||||
} else if o.CachedOpenFiles < 0 {
|
||||
return 0
|
||||
}
|
||||
return o.CachedOpenFiles
|
||||
}
|
||||
|
||||
func (o *Options) GetCompactionExpandLimit(level int) int {
|
||||
factor := DefaultCompactionExpandLimitFactor
|
||||
if o != nil && o.CompactionExpandLimitFactor > 0 {
|
||||
factor = o.CompactionExpandLimitFactor
|
||||
}
|
||||
return o.GetCompactionTableSize(level+1) * factor
|
||||
}
|
||||
|
||||
func (o *Options) GetCompactionGPOverlaps(level int) int {
|
||||
factor := DefaultCompactionGPOverlapsFactor
|
||||
if o != nil && o.CompactionGPOverlapsFactor > 0 {
|
||||
factor = o.CompactionGPOverlapsFactor
|
||||
}
|
||||
return o.GetCompactionTableSize(level+2) * factor
|
||||
}
|
||||
|
||||
func (o *Options) GetCompactionL0Trigger() int {
|
||||
if o == nil || o.CompactionL0Trigger == 0 {
|
||||
return DefaultCompactionL0Trigger
|
||||
}
|
||||
return o.CompactionL0Trigger
|
||||
}
|
||||
|
||||
func (o *Options) GetCompactionSourceLimit(level int) int {
|
||||
factor := DefaultCompactionSourceLimitFactor
|
||||
if o != nil && o.CompactionSourceLimitFactor > 0 {
|
||||
factor = o.CompactionSourceLimitFactor
|
||||
}
|
||||
return o.GetCompactionTableSize(level+1) * factor
|
||||
}
|
||||
|
||||
func (o *Options) GetCompactionTableSize(level int) int {
|
||||
var (
|
||||
base = DefaultCompactionTableSize
|
||||
mult float64
|
||||
)
|
||||
if o != nil {
|
||||
if o.CompactionTableSize > 0 {
|
||||
base = o.CompactionTableSize
|
||||
}
|
||||
if len(o.CompactionTableSizeMultiplierPerLevel) > level && o.CompactionTableSizeMultiplierPerLevel[level] > 0 {
|
||||
mult = o.CompactionTableSizeMultiplierPerLevel[level]
|
||||
} else if o.CompactionTableSizeMultiplier > 0 {
|
||||
mult = math.Pow(o.CompactionTableSizeMultiplier, float64(level))
|
||||
}
|
||||
}
|
||||
if mult == 0 {
|
||||
mult = math.Pow(DefaultCompactionTableSizeMultiplier, float64(level))
|
||||
}
|
||||
return int(float64(base) * mult)
|
||||
}
|
||||
|
||||
func (o *Options) GetCompactionTotalSize(level int) int64 {
|
||||
var (
|
||||
base = DefaultCompactionTotalSize
|
||||
mult float64
|
||||
)
|
||||
if o != nil {
|
||||
if o.CompactionTotalSize > 0 {
|
||||
base = o.CompactionTotalSize
|
||||
}
|
||||
if len(o.CompactionTotalSizeMultiplierPerLevel) > level && o.CompactionTotalSizeMultiplierPerLevel[level] > 0 {
|
||||
mult = o.CompactionTotalSizeMultiplierPerLevel[level]
|
||||
} else if o.CompactionTotalSizeMultiplier > 0 {
|
||||
mult = math.Pow(o.CompactionTotalSizeMultiplier, float64(level))
|
||||
}
|
||||
}
|
||||
if mult == 0 {
|
||||
mult = math.Pow(DefaultCompactionTotalSizeMultiplier, float64(level))
|
||||
}
|
||||
return int64(float64(base) * mult)
|
||||
}
|
||||
|
||||
func (o *Options) GetComparer() comparer.Comparer {
|
||||
if o == nil || o.Comparer == nil {
|
||||
return comparer.DefaultComparer
|
||||
@@ -221,6 +432,13 @@ func (o *Options) GetCompression() Compression {
|
||||
return o.Compression
|
||||
}
|
||||
|
||||
func (o *Options) GetDisableCompactionBackoff() bool {
|
||||
if o == nil {
|
||||
return false
|
||||
}
|
||||
return o.DisableCompactionBackoff
|
||||
}
|
||||
|
||||
func (o *Options) GetErrorIfExist() bool {
|
||||
if o == nil {
|
||||
return false
|
||||
@@ -242,11 +460,26 @@ func (o *Options) GetFilter() filter.Filter {
|
||||
return o.Filter
|
||||
}
|
||||
|
||||
func (o *Options) GetMaxOpenFiles() int {
|
||||
if o == nil || o.MaxOpenFiles <= 0 {
|
||||
return DefaultMaxOpenFiles
|
||||
func (o *Options) GetMaxMemCompationLevel() int {
|
||||
level := DefaultMaxMemCompationLevel
|
||||
if o != nil {
|
||||
if o.MaxMemCompationLevel > 0 {
|
||||
level = o.MaxMemCompationLevel
|
||||
} else if o.MaxMemCompationLevel == -1 {
|
||||
level = 0
|
||||
}
|
||||
}
|
||||
return o.MaxOpenFiles
|
||||
if level >= o.GetNumLevel() {
|
||||
return o.GetNumLevel() - 1
|
||||
}
|
||||
return level
|
||||
}
|
||||
|
||||
func (o *Options) GetNumLevel() int {
|
||||
if o == nil || o.NumLevel <= 0 {
|
||||
return DefaultNumLevel
|
||||
}
|
||||
return o.NumLevel
|
||||
}
|
||||
|
||||
func (o *Options) GetStrict(strict Strict) bool {
|
||||
@@ -263,6 +496,20 @@ func (o *Options) GetWriteBuffer() int {
|
||||
return o.WriteBuffer
|
||||
}
|
||||
|
||||
func (o *Options) GetWriteL0PauseTrigger() int {
|
||||
if o == nil || o.WriteL0PauseTrigger == 0 {
|
||||
return DefaultWriteL0PauseTrigger
|
||||
}
|
||||
return o.WriteL0PauseTrigger
|
||||
}
|
||||
|
||||
func (o *Options) GetWriteL0SlowdownTrigger() int {
|
||||
if o == nil || o.WriteL0SlowdownTrigger == 0 {
|
||||
return DefaultWriteL0SlowdownTrigger
|
||||
}
|
||||
return o.WriteL0SlowdownTrigger
|
||||
}
|
||||
|
||||
// ReadOptions holds the optional parameters for 'read operation'. The
|
||||
// 'read operation' includes Get, Find and NewIterator.
|
||||
type ReadOptions struct {
|
||||
@@ -273,8 +520,8 @@ type ReadOptions struct {
|
||||
// The default value is false.
|
||||
DontFillCache bool
|
||||
|
||||
// Strict overrides global DB strict level. Only StrictIterator and
|
||||
// StrictBlockChecksum that does have effects here.
|
||||
// Strict will be OR'ed with global DB 'strict level' unless StrictOverride
|
||||
// is present. Currently only StrictReader that has effect here.
|
||||
Strict Strict
|
||||
}
|
||||
|
||||
@@ -316,3 +563,11 @@ func (wo *WriteOptions) GetSync() bool {
|
||||
}
|
||||
return wo.Sync
|
||||
}
|
||||
|
||||
func GetStrict(o *Options, ro *ReadOptions, strict Strict) bool {
|
||||
if ro.GetStrict(StrictOverride) {
|
||||
return ro.GetStrict(strict)
|
||||
} else {
|
||||
return o.GetStrict(strict) || ro.GetStrict(strict)
|
||||
}
|
||||
}
|
||||
|
||||
74
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go
generated
vendored
74
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go
generated
vendored
@@ -12,30 +12,86 @@ import (
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
)
|
||||
|
||||
func (s *session) setOptions(o *opt.Options) {
|
||||
s.o = &opt.Options{}
|
||||
func dupOptions(o *opt.Options) *opt.Options {
|
||||
newo := &opt.Options{}
|
||||
if o != nil {
|
||||
*s.o = *o
|
||||
*newo = *o
|
||||
}
|
||||
return newo
|
||||
}
|
||||
|
||||
func (s *session) setOptions(o *opt.Options) {
|
||||
no := dupOptions(o)
|
||||
// Alternative filters.
|
||||
if filters := o.GetAltFilters(); len(filters) > 0 {
|
||||
s.o.AltFilters = make([]filter.Filter, len(filters))
|
||||
no.AltFilters = make([]filter.Filter, len(filters))
|
||||
for i, filter := range filters {
|
||||
s.o.AltFilters[i] = &iFilter{filter}
|
||||
no.AltFilters[i] = &iFilter{filter}
|
||||
}
|
||||
}
|
||||
// Block cache.
|
||||
switch o.GetBlockCache() {
|
||||
case nil:
|
||||
s.o.BlockCache = cache.NewLRUCache(opt.DefaultBlockCacheSize)
|
||||
no.BlockCache = cache.NewLRUCache(opt.DefaultBlockCacheSize)
|
||||
case opt.NoCache:
|
||||
s.o.BlockCache = nil
|
||||
no.BlockCache = nil
|
||||
}
|
||||
// Comparer.
|
||||
s.icmp = &iComparer{o.GetComparer()}
|
||||
s.o.Comparer = s.icmp
|
||||
no.Comparer = s.icmp
|
||||
// Filter.
|
||||
if filter := o.GetFilter(); filter != nil {
|
||||
s.o.Filter = &iFilter{filter}
|
||||
no.Filter = &iFilter{filter}
|
||||
}
|
||||
|
||||
s.o = &cachedOptions{Options: no}
|
||||
s.o.cache()
|
||||
}
|
||||
|
||||
type cachedOptions struct {
|
||||
*opt.Options
|
||||
|
||||
compactionExpandLimit []int
|
||||
compactionGPOverlaps []int
|
||||
compactionSourceLimit []int
|
||||
compactionTableSize []int
|
||||
compactionTotalSize []int64
|
||||
}
|
||||
|
||||
func (co *cachedOptions) cache() {
|
||||
numLevel := co.Options.GetNumLevel()
|
||||
|
||||
co.compactionExpandLimit = make([]int, numLevel)
|
||||
co.compactionGPOverlaps = make([]int, numLevel)
|
||||
co.compactionSourceLimit = make([]int, numLevel)
|
||||
co.compactionTableSize = make([]int, numLevel)
|
||||
co.compactionTotalSize = make([]int64, numLevel)
|
||||
|
||||
for level := 0; level < numLevel; level++ {
|
||||
co.compactionExpandLimit[level] = co.Options.GetCompactionExpandLimit(level)
|
||||
co.compactionGPOverlaps[level] = co.Options.GetCompactionGPOverlaps(level)
|
||||
co.compactionSourceLimit[level] = co.Options.GetCompactionSourceLimit(level)
|
||||
co.compactionTableSize[level] = co.Options.GetCompactionTableSize(level)
|
||||
co.compactionTotalSize[level] = co.Options.GetCompactionTotalSize(level)
|
||||
}
|
||||
}
|
||||
|
||||
func (co *cachedOptions) GetCompactionExpandLimit(level int) int {
|
||||
return co.compactionExpandLimit[level]
|
||||
}
|
||||
|
||||
func (co *cachedOptions) GetCompactionGPOverlaps(level int) int {
|
||||
return co.compactionGPOverlaps[level]
|
||||
}
|
||||
|
||||
func (co *cachedOptions) GetCompactionSourceLimit(level int) int {
|
||||
return co.compactionSourceLimit[level]
|
||||
}
|
||||
|
||||
func (co *cachedOptions) GetCompactionTableSize(level int) int {
|
||||
return co.compactionTableSize[level]
|
||||
}
|
||||
|
||||
func (co *cachedOptions) GetCompactionTotalSize(level int) int64 {
|
||||
return co.compactionTotalSize[level]
|
||||
}
|
||||
|
||||
303
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go
generated
vendored
303
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go
generated
vendored
@@ -7,12 +7,13 @@
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/iterator"
|
||||
"github.com/syndtr/goleveldb/leveldb/journal"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
@@ -20,18 +21,31 @@ import (
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
type ErrManifestCorrupted struct {
|
||||
Field string
|
||||
Reason string
|
||||
}
|
||||
|
||||
func (e *ErrManifestCorrupted) Error() string {
|
||||
return fmt.Sprintf("leveldb: manifest corrupted (field '%s'): %s", e.Field, e.Reason)
|
||||
}
|
||||
|
||||
func newErrManifestCorrupted(f storage.File, field, reason string) error {
|
||||
return errors.NewErrCorrupted(f, &ErrManifestCorrupted{field, reason})
|
||||
}
|
||||
|
||||
// session represent a persistent database session.
|
||||
type session struct {
|
||||
// Need 64-bit alignment.
|
||||
stFileNum uint64 // current unused file number
|
||||
stNextFileNum uint64 // current unused file number
|
||||
stJournalNum uint64 // current journal file number; need external synchronization
|
||||
stPrevJournalNum uint64 // prev journal file number; no longer used; for compatibility with older version of leveldb
|
||||
stSeq uint64 // last mem compacted seq; need external synchronization
|
||||
stSeqNum uint64 // last mem compacted seq; need external synchronization
|
||||
stTempFileNum uint64
|
||||
|
||||
stor storage.Storage
|
||||
storLock util.Releaser
|
||||
o *opt.Options
|
||||
o *cachedOptions
|
||||
icmp *iComparer
|
||||
tops *tOps
|
||||
|
||||
@@ -39,11 +53,12 @@ type session struct {
|
||||
manifestWriter storage.Writer
|
||||
manifestFile storage.File
|
||||
|
||||
stCPtrs [kNumLevels]iKey // compact pointers; need external synchronization
|
||||
stVersion *version // current version
|
||||
vmu sync.Mutex
|
||||
stCompPtrs []iKey // compaction pointers; need external synchronization
|
||||
stVersion *version // current version
|
||||
vmu sync.Mutex
|
||||
}
|
||||
|
||||
// Creates new initialized session instance.
|
||||
func newSession(stor storage.Storage, o *opt.Options) (s *session, err error) {
|
||||
if stor == nil {
|
||||
return nil, os.ErrInvalid
|
||||
@@ -53,13 +68,14 @@ func newSession(stor storage.Storage, o *opt.Options) (s *session, err error) {
|
||||
return
|
||||
}
|
||||
s = &session{
|
||||
stor: stor,
|
||||
storLock: storLock,
|
||||
stor: stor,
|
||||
storLock: storLock,
|
||||
stCompPtrs: make([]iKey, o.GetNumLevel()),
|
||||
}
|
||||
s.setOptions(o)
|
||||
s.tops = newTableOps(s, s.o.GetMaxOpenFiles())
|
||||
s.setVersion(&version{s: s})
|
||||
s.log("log@legend F·NumFile S·FileSize N·Entry C·BadEntry B·BadBlock D·DeletedEntry L·Level Q·SeqNum T·TimeElapsed")
|
||||
s.tops = newTableOps(s, s.o.GetCachedOpenFiles())
|
||||
s.setVersion(newVersion(s))
|
||||
s.log("log@legend F·NumFile S·FileSize N·Entry C·BadEntry B·BadBlock Ke·KeyError D·DroppedEntry L·Level Q·SeqNum T·TimeElapsed")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -81,6 +97,7 @@ func (s *session) close() {
|
||||
s.stVersion = nil
|
||||
}
|
||||
|
||||
// Release session lock.
|
||||
func (s *session) release() {
|
||||
s.storLock.Release()
|
||||
}
|
||||
@@ -98,26 +115,26 @@ func (s *session) recover() (err error) {
|
||||
// Don't return os.ErrNotExist if the underlying storage contains
|
||||
// other files that belong to LevelDB. So the DB won't get trashed.
|
||||
if files, _ := s.stor.GetFiles(storage.TypeAll); len(files) > 0 {
|
||||
err = ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: manifest file missing")}
|
||||
err = &errors.ErrCorrupted{File: &storage.FileInfo{Type: storage.TypeManifest}, Err: &errors.ErrMissingFiles{}}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
file, err := s.stor.GetManifest()
|
||||
m, err := s.stor.GetManifest()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
reader, err := file.Open()
|
||||
reader, err := m.Open()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer reader.Close()
|
||||
strict := s.o.GetStrict(opt.StrictManifest)
|
||||
jr := journal.NewReader(reader, dropper{s, file}, strict, true)
|
||||
jr := journal.NewReader(reader, dropper{s, m}, strict, true)
|
||||
|
||||
staging := s.version_NB().newStaging()
|
||||
rec := &sessionRecord{}
|
||||
staging := s.stVersion.newStaging()
|
||||
rec := &sessionRecord{numLevel: s.o.GetNumLevel()}
|
||||
for {
|
||||
var r io.Reader
|
||||
r, err = jr.Next()
|
||||
@@ -126,51 +143,57 @@ func (s *session) recover() (err error) {
|
||||
err = nil
|
||||
break
|
||||
}
|
||||
return
|
||||
return errors.SetFile(err, m)
|
||||
}
|
||||
|
||||
err = rec.decode(r)
|
||||
if err == nil {
|
||||
// save compact pointers
|
||||
for _, rp := range rec.compactionPointers {
|
||||
s.stCPtrs[rp.level] = iKey(rp.key)
|
||||
for _, r := range rec.compPtrs {
|
||||
s.stCompPtrs[r.level] = iKey(r.ikey)
|
||||
}
|
||||
// commit record to version staging
|
||||
staging.commit(rec)
|
||||
} else if strict {
|
||||
return ErrCorrupted{Type: CorruptedManifest, Err: err}
|
||||
} else {
|
||||
s.logf("manifest error: %v (skipped)", err)
|
||||
err = errors.SetFile(err, m)
|
||||
if strict || !errors.IsCorrupted(err) {
|
||||
return
|
||||
} else {
|
||||
s.logf("manifest error: %v (skipped)", errors.SetFile(err, m))
|
||||
}
|
||||
}
|
||||
rec.resetCompactionPointers()
|
||||
rec.resetCompPtrs()
|
||||
rec.resetAddedTables()
|
||||
rec.resetDeletedTables()
|
||||
}
|
||||
|
||||
switch {
|
||||
case !rec.has(recComparer):
|
||||
return ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: manifest missing comparer name")}
|
||||
return newErrManifestCorrupted(m, "comparer", "missing")
|
||||
case rec.comparer != s.icmp.uName():
|
||||
return ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: comparer mismatch, " + "want '" + s.icmp.uName() + "', " + "got '" + rec.comparer + "'")}
|
||||
case !rec.has(recNextNum):
|
||||
return ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: manifest missing next file number")}
|
||||
return newErrManifestCorrupted(m, "comparer", fmt.Sprintf("mismatch: want '%s', got '%s'", s.icmp.uName(), rec.comparer))
|
||||
case !rec.has(recNextFileNum):
|
||||
return newErrManifestCorrupted(m, "next-file-num", "missing")
|
||||
case !rec.has(recJournalNum):
|
||||
return ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: manifest missing journal file number")}
|
||||
case !rec.has(recSeq):
|
||||
return ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: manifest missing seq number")}
|
||||
return newErrManifestCorrupted(m, "journal-file-num", "missing")
|
||||
case !rec.has(recSeqNum):
|
||||
return newErrManifestCorrupted(m, "seq-num", "missing")
|
||||
}
|
||||
|
||||
s.manifestFile = file
|
||||
s.manifestFile = m
|
||||
s.setVersion(staging.finish())
|
||||
s.setFileNum(rec.nextNum)
|
||||
s.setNextFileNum(rec.nextFileNum)
|
||||
s.recordCommited(rec)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Commit session; need external synchronization.
|
||||
func (s *session) commit(r *sessionRecord) (err error) {
|
||||
v := s.version()
|
||||
defer v.release()
|
||||
|
||||
// spawn new version based on current version
|
||||
nv := s.version_NB().spawn(r)
|
||||
nv := v.spawn(r)
|
||||
|
||||
if s.manifest == nil {
|
||||
// manifest journal writer not yet created, create one
|
||||
@@ -189,22 +212,22 @@ func (s *session) commit(r *sessionRecord) (err error) {
|
||||
|
||||
// Pick a compaction based on current state; need external synchronization.
|
||||
func (s *session) pickCompaction() *compaction {
|
||||
v := s.version_NB()
|
||||
v := s.version()
|
||||
|
||||
var level int
|
||||
var t0 tFiles
|
||||
if v.cScore >= 1 {
|
||||
level = v.cLevel
|
||||
cp := s.stCPtrs[level]
|
||||
tt := v.tables[level]
|
||||
for _, t := range tt {
|
||||
if cp == nil || s.icmp.Compare(t.max, cp) > 0 {
|
||||
cptr := s.stCompPtrs[level]
|
||||
tables := v.tables[level]
|
||||
for _, t := range tables {
|
||||
if cptr == nil || s.icmp.Compare(t.imax, cptr) > 0 {
|
||||
t0 = append(t0, t)
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(t0) == 0 {
|
||||
t0 = append(t0, tt[0])
|
||||
t0 = append(t0, tables[0])
|
||||
}
|
||||
} else {
|
||||
if p := atomic.LoadPointer(&v.cSeek); p != nil {
|
||||
@@ -212,29 +235,21 @@ func (s *session) pickCompaction() *compaction {
|
||||
level = ts.level
|
||||
t0 = append(t0, ts.table)
|
||||
} else {
|
||||
v.release()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
c := &compaction{s: s, version: v, level: level}
|
||||
if level == 0 {
|
||||
min, max := t0.getRange(s.icmp)
|
||||
t0 = nil
|
||||
v.tables[0].getOverlaps(min.ukey(), max.ukey(), &t0, false, s.icmp.ucmp)
|
||||
}
|
||||
|
||||
c.tables[0] = t0
|
||||
c.expand()
|
||||
return c
|
||||
return newCompaction(s, v, level, t0)
|
||||
}
|
||||
|
||||
// Create compaction from given level and range; need external synchronization.
|
||||
func (s *session) getCompactionRange(level int, min, max []byte) *compaction {
|
||||
v := s.version_NB()
|
||||
func (s *session) getCompactionRange(level int, umin, umax []byte) *compaction {
|
||||
v := s.version()
|
||||
|
||||
var t0 tFiles
|
||||
v.tables[level].getOverlaps(min, max, &t0, level != 0, s.icmp.ucmp)
|
||||
t0 := v.tables[level].getOverlaps(nil, s.icmp, umin, umax, level == 0)
|
||||
if len(t0) == 0 {
|
||||
v.release()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -243,7 +258,7 @@ func (s *session) getCompactionRange(level int, min, max []byte) *compaction {
|
||||
// and we must not pick one file and drop another older file if the
|
||||
// two files overlap.
|
||||
if level > 0 {
|
||||
limit := uint64(kMaxTableSize)
|
||||
limit := uint64(v.s.o.GetCompactionSourceLimit(level))
|
||||
total := uint64(0)
|
||||
for i, t := range t0 {
|
||||
total += t.size
|
||||
@@ -255,90 +270,124 @@ func (s *session) getCompactionRange(level int, min, max []byte) *compaction {
|
||||
}
|
||||
}
|
||||
|
||||
c := &compaction{s: s, version: v, level: level}
|
||||
c.tables[0] = t0
|
||||
return newCompaction(s, v, level, t0)
|
||||
}
|
||||
|
||||
func newCompaction(s *session, v *version, level int, t0 tFiles) *compaction {
|
||||
c := &compaction{
|
||||
s: s,
|
||||
v: v,
|
||||
level: level,
|
||||
tables: [2]tFiles{t0, nil},
|
||||
maxGPOverlaps: uint64(s.o.GetCompactionGPOverlaps(level)),
|
||||
tPtrs: make([]int, s.o.GetNumLevel()),
|
||||
}
|
||||
c.expand()
|
||||
c.save()
|
||||
return c
|
||||
}
|
||||
|
||||
// compaction represent a compaction state
|
||||
// compaction represent a compaction state.
|
||||
type compaction struct {
|
||||
s *session
|
||||
version *version
|
||||
s *session
|
||||
v *version
|
||||
|
||||
level int
|
||||
tables [2]tFiles
|
||||
level int
|
||||
tables [2]tFiles
|
||||
maxGPOverlaps uint64
|
||||
|
||||
gp tFiles
|
||||
gpidx int
|
||||
seenKey bool
|
||||
overlappedBytes uint64
|
||||
min, max iKey
|
||||
gp tFiles
|
||||
gpi int
|
||||
seenKey bool
|
||||
gpOverlappedBytes uint64
|
||||
imin, imax iKey
|
||||
tPtrs []int
|
||||
released bool
|
||||
|
||||
tPtrs [kNumLevels]int
|
||||
snapGPI int
|
||||
snapSeenKey bool
|
||||
snapGPOverlappedBytes uint64
|
||||
snapTPtrs []int
|
||||
}
|
||||
|
||||
func (c *compaction) save() {
|
||||
c.snapGPI = c.gpi
|
||||
c.snapSeenKey = c.seenKey
|
||||
c.snapGPOverlappedBytes = c.gpOverlappedBytes
|
||||
c.snapTPtrs = append(c.snapTPtrs[:0], c.tPtrs...)
|
||||
}
|
||||
|
||||
func (c *compaction) restore() {
|
||||
c.gpi = c.snapGPI
|
||||
c.seenKey = c.snapSeenKey
|
||||
c.gpOverlappedBytes = c.snapGPOverlappedBytes
|
||||
c.tPtrs = append(c.tPtrs[:0], c.snapTPtrs...)
|
||||
}
|
||||
|
||||
func (c *compaction) release() {
|
||||
if !c.released {
|
||||
c.released = true
|
||||
c.v.release()
|
||||
}
|
||||
}
|
||||
|
||||
// Expand compacted tables; need external synchronization.
|
||||
func (c *compaction) expand() {
|
||||
s := c.s
|
||||
v := c.version
|
||||
|
||||
level := c.level
|
||||
vt0, vt1 := v.tables[level], v.tables[level+1]
|
||||
limit := uint64(c.s.o.GetCompactionExpandLimit(c.level))
|
||||
vt0, vt1 := c.v.tables[c.level], c.v.tables[c.level+1]
|
||||
|
||||
t0, t1 := c.tables[0], c.tables[1]
|
||||
min, max := t0.getRange(s.icmp)
|
||||
vt1.getOverlaps(min.ukey(), max.ukey(), &t1, true, s.icmp.ucmp)
|
||||
|
||||
// Get entire range covered by compaction
|
||||
amin, amax := append(t0, t1...).getRange(s.icmp)
|
||||
imin, imax := t0.getRange(c.s.icmp)
|
||||
// We expand t0 here just incase ukey hop across tables.
|
||||
t0 = vt0.getOverlaps(t0, c.s.icmp, imin.ukey(), imax.ukey(), c.level == 0)
|
||||
if len(t0) != len(c.tables[0]) {
|
||||
imin, imax = t0.getRange(c.s.icmp)
|
||||
}
|
||||
t1 = vt1.getOverlaps(t1, c.s.icmp, imin.ukey(), imax.ukey(), false)
|
||||
// Get entire range covered by compaction.
|
||||
amin, amax := append(t0, t1...).getRange(c.s.icmp)
|
||||
|
||||
// See if we can grow the number of inputs in "level" without
|
||||
// changing the number of "level+1" files we pick up.
|
||||
if len(t1) > 0 {
|
||||
var exp0 tFiles
|
||||
vt0.getOverlaps(amin.ukey(), amax.ukey(), &exp0, level != 0, s.icmp.ucmp)
|
||||
if len(exp0) > len(t0) && t1.size()+exp0.size() < kExpCompactionMaxBytes {
|
||||
var exp1 tFiles
|
||||
xmin, xmax := exp0.getRange(s.icmp)
|
||||
vt1.getOverlaps(xmin.ukey(), xmax.ukey(), &exp1, true, s.icmp.ucmp)
|
||||
exp0 := vt0.getOverlaps(nil, c.s.icmp, amin.ukey(), amax.ukey(), c.level == 0)
|
||||
if len(exp0) > len(t0) && t1.size()+exp0.size() < limit {
|
||||
xmin, xmax := exp0.getRange(c.s.icmp)
|
||||
exp1 := vt1.getOverlaps(nil, c.s.icmp, xmin.ukey(), xmax.ukey(), false)
|
||||
if len(exp1) == len(t1) {
|
||||
s.logf("table@compaction expanding L%d+L%d (F·%d S·%s)+(F·%d S·%s) -> (F·%d S·%s)+(F·%d S·%s)",
|
||||
level, level+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())),
|
||||
c.s.logf("table@compaction expanding L%d+L%d (F·%d S·%s)+(F·%d S·%s) -> (F·%d S·%s)+(F·%d S·%s)",
|
||||
c.level, c.level+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())),
|
||||
len(exp0), shortenb(int(exp0.size())), len(exp1), shortenb(int(exp1.size())))
|
||||
min, max = xmin, xmax
|
||||
imin, imax = xmin, xmax
|
||||
t0, t1 = exp0, exp1
|
||||
amin, amax = append(t0, t1...).getRange(s.icmp)
|
||||
amin, amax = append(t0, t1...).getRange(c.s.icmp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Compute the set of grandparent files that overlap this compaction
|
||||
// (parent == level+1; grandparent == level+2)
|
||||
if level+2 < kNumLevels {
|
||||
v.tables[level+2].getOverlaps(amin.ukey(), amax.ukey(), &c.gp, true, s.icmp.ucmp)
|
||||
if c.level+2 < c.s.o.GetNumLevel() {
|
||||
c.gp = c.v.tables[c.level+2].getOverlaps(c.gp, c.s.icmp, amin.ukey(), amax.ukey(), false)
|
||||
}
|
||||
|
||||
c.tables[0], c.tables[1] = t0, t1
|
||||
c.min, c.max = min, max
|
||||
c.imin, c.imax = imin, imax
|
||||
}
|
||||
|
||||
// Check whether compaction is trivial.
|
||||
func (c *compaction) trivial() bool {
|
||||
return len(c.tables[0]) == 1 && len(c.tables[1]) == 0 && c.gp.size() <= kMaxGrandParentOverlapBytes
|
||||
return len(c.tables[0]) == 1 && len(c.tables[1]) == 0 && c.gp.size() <= c.maxGPOverlaps
|
||||
}
|
||||
|
||||
func (c *compaction) isBaseLevelForKey(key []byte) bool {
|
||||
s := c.s
|
||||
v := c.version
|
||||
|
||||
for level, tt := range v.tables[c.level+2:] {
|
||||
for c.tPtrs[level] < len(tt) {
|
||||
t := tt[c.tPtrs[level]]
|
||||
if s.icmp.uCompare(key, t.max.ukey()) <= 0 {
|
||||
// We've advanced far enough
|
||||
if s.icmp.uCompare(key, t.min.ukey()) >= 0 {
|
||||
// Key falls in this file's range, so definitely not base level
|
||||
func (c *compaction) baseLevelForKey(ukey []byte) bool {
|
||||
for level, tables := range c.v.tables[c.level+2:] {
|
||||
for c.tPtrs[level] < len(tables) {
|
||||
t := tables[c.tPtrs[level]]
|
||||
if c.s.icmp.uCompare(ukey, t.imax.ukey()) <= 0 {
|
||||
// We've advanced far enough.
|
||||
if c.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 {
|
||||
// Key falls in this file's range, so definitely not base level.
|
||||
return false
|
||||
}
|
||||
break
|
||||
@@ -349,55 +398,61 @@ func (c *compaction) isBaseLevelForKey(key []byte) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *compaction) shouldStopBefore(key iKey) bool {
|
||||
for ; c.gpidx < len(c.gp); c.gpidx++ {
|
||||
gp := c.gp[c.gpidx]
|
||||
if c.s.icmp.Compare(key, gp.max) <= 0 {
|
||||
func (c *compaction) shouldStopBefore(ikey iKey) bool {
|
||||
for ; c.gpi < len(c.gp); c.gpi++ {
|
||||
gp := c.gp[c.gpi]
|
||||
if c.s.icmp.Compare(ikey, gp.imax) <= 0 {
|
||||
break
|
||||
}
|
||||
if c.seenKey {
|
||||
c.overlappedBytes += gp.size
|
||||
c.gpOverlappedBytes += gp.size
|
||||
}
|
||||
}
|
||||
c.seenKey = true
|
||||
|
||||
if c.overlappedBytes > kMaxGrandParentOverlapBytes {
|
||||
// Too much overlap for current output; start new output
|
||||
c.overlappedBytes = 0
|
||||
if c.gpOverlappedBytes > c.maxGPOverlaps {
|
||||
// Too much overlap for current output; start new output.
|
||||
c.gpOverlappedBytes = 0
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Creates an iterator.
|
||||
func (c *compaction) newIterator() iterator.Iterator {
|
||||
s := c.s
|
||||
|
||||
level := c.level
|
||||
icap := 2
|
||||
// Creates iterator slice.
|
||||
icap := len(c.tables)
|
||||
if c.level == 0 {
|
||||
// Special case for level-0
|
||||
icap = len(c.tables[0]) + 1
|
||||
}
|
||||
its := make([]iterator.Iterator, 0, icap)
|
||||
|
||||
// Options.
|
||||
ro := &opt.ReadOptions{
|
||||
DontFillCache: true,
|
||||
Strict: opt.StrictOverride,
|
||||
}
|
||||
strict := c.s.o.GetStrict(opt.StrictCompaction)
|
||||
if strict {
|
||||
ro.Strict |= opt.StrictReader
|
||||
}
|
||||
strict := s.o.GetStrict(opt.StrictIterator)
|
||||
|
||||
for i, tt := range c.tables {
|
||||
if len(tt) == 0 {
|
||||
for i, tables := range c.tables {
|
||||
if len(tables) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if level+i == 0 {
|
||||
for _, t := range tt {
|
||||
its = append(its, s.tops.newIterator(t, nil, ro))
|
||||
// Level-0 is not sorted and may overlaps each other.
|
||||
if c.level+i == 0 {
|
||||
for _, t := range tables {
|
||||
its = append(its, c.s.tops.newIterator(t, nil, ro))
|
||||
}
|
||||
} else {
|
||||
it := iterator.NewIndexedIterator(tt.newIndexIterator(s.tops, s.icmp, nil, ro), strict, true)
|
||||
it := iterator.NewIndexedIterator(tables.newIndexIterator(c.s.tops, c.s.icmp, nil, ro), strict)
|
||||
its = append(its, it)
|
||||
}
|
||||
}
|
||||
|
||||
return iterator.NewMergedIterator(its, s.icmp, true)
|
||||
return iterator.NewMergedIterator(its, c.s.icmp, strict)
|
||||
}
|
||||
|
||||
225
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go
generated
vendored
225
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go
generated
vendored
@@ -9,11 +9,11 @@ package leveldb
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
"strings"
|
||||
|
||||
var errCorruptManifest = errors.New("leveldb: corrupt manifest")
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
)
|
||||
|
||||
type byteReader interface {
|
||||
io.Reader
|
||||
@@ -22,32 +22,28 @@ type byteReader interface {
|
||||
|
||||
// These numbers are written to disk and should not be changed.
|
||||
const (
|
||||
recComparer = 1
|
||||
recJournalNum = 2
|
||||
recNextNum = 3
|
||||
recSeq = 4
|
||||
recCompactionPointer = 5
|
||||
recDeletedTable = 6
|
||||
recNewTable = 7
|
||||
recComparer = 1
|
||||
recJournalNum = 2
|
||||
recNextFileNum = 3
|
||||
recSeqNum = 4
|
||||
recCompPtr = 5
|
||||
recDelTable = 6
|
||||
recAddTable = 7
|
||||
// 8 was used for large value refs
|
||||
recPrevJournalNum = 9
|
||||
)
|
||||
|
||||
type cpRecord struct {
|
||||
level int
|
||||
key iKey
|
||||
ikey iKey
|
||||
}
|
||||
|
||||
type ntRecord struct {
|
||||
type atRecord struct {
|
||||
level int
|
||||
num uint64
|
||||
size uint64
|
||||
min iKey
|
||||
max iKey
|
||||
}
|
||||
|
||||
func (r ntRecord) makeFile(s *session) *tFile {
|
||||
return newTFile(s.getTableFile(r.num), r.size, r.min, r.max)
|
||||
imin iKey
|
||||
imax iKey
|
||||
}
|
||||
|
||||
type dtRecord struct {
|
||||
@@ -56,17 +52,20 @@ type dtRecord struct {
|
||||
}
|
||||
|
||||
type sessionRecord struct {
|
||||
hasRec int
|
||||
comparer string
|
||||
journalNum uint64
|
||||
prevJournalNum uint64
|
||||
nextNum uint64
|
||||
seq uint64
|
||||
compactionPointers []cpRecord
|
||||
addedTables []ntRecord
|
||||
deletedTables []dtRecord
|
||||
scratch [binary.MaxVarintLen64]byte
|
||||
err error
|
||||
numLevel int
|
||||
|
||||
hasRec int
|
||||
comparer string
|
||||
journalNum uint64
|
||||
prevJournalNum uint64
|
||||
nextFileNum uint64
|
||||
seqNum uint64
|
||||
compPtrs []cpRecord
|
||||
addedTables []atRecord
|
||||
deletedTables []dtRecord
|
||||
|
||||
scratch [binary.MaxVarintLen64]byte
|
||||
err error
|
||||
}
|
||||
|
||||
func (p *sessionRecord) has(rec int) bool {
|
||||
@@ -88,47 +87,47 @@ func (p *sessionRecord) setPrevJournalNum(num uint64) {
|
||||
p.prevJournalNum = num
|
||||
}
|
||||
|
||||
func (p *sessionRecord) setNextNum(num uint64) {
|
||||
p.hasRec |= 1 << recNextNum
|
||||
p.nextNum = num
|
||||
func (p *sessionRecord) setNextFileNum(num uint64) {
|
||||
p.hasRec |= 1 << recNextFileNum
|
||||
p.nextFileNum = num
|
||||
}
|
||||
|
||||
func (p *sessionRecord) setSeq(seq uint64) {
|
||||
p.hasRec |= 1 << recSeq
|
||||
p.seq = seq
|
||||
func (p *sessionRecord) setSeqNum(num uint64) {
|
||||
p.hasRec |= 1 << recSeqNum
|
||||
p.seqNum = num
|
||||
}
|
||||
|
||||
func (p *sessionRecord) addCompactionPointer(level int, key iKey) {
|
||||
p.hasRec |= 1 << recCompactionPointer
|
||||
p.compactionPointers = append(p.compactionPointers, cpRecord{level, key})
|
||||
func (p *sessionRecord) addCompPtr(level int, ikey iKey) {
|
||||
p.hasRec |= 1 << recCompPtr
|
||||
p.compPtrs = append(p.compPtrs, cpRecord{level, ikey})
|
||||
}
|
||||
|
||||
func (p *sessionRecord) resetCompactionPointers() {
|
||||
p.hasRec &= ^(1 << recCompactionPointer)
|
||||
p.compactionPointers = p.compactionPointers[:0]
|
||||
func (p *sessionRecord) resetCompPtrs() {
|
||||
p.hasRec &= ^(1 << recCompPtr)
|
||||
p.compPtrs = p.compPtrs[:0]
|
||||
}
|
||||
|
||||
func (p *sessionRecord) addTable(level int, num, size uint64, min, max iKey) {
|
||||
p.hasRec |= 1 << recNewTable
|
||||
p.addedTables = append(p.addedTables, ntRecord{level, num, size, min, max})
|
||||
func (p *sessionRecord) addTable(level int, num, size uint64, imin, imax iKey) {
|
||||
p.hasRec |= 1 << recAddTable
|
||||
p.addedTables = append(p.addedTables, atRecord{level, num, size, imin, imax})
|
||||
}
|
||||
|
||||
func (p *sessionRecord) addTableFile(level int, t *tFile) {
|
||||
p.addTable(level, t.file.Num(), t.size, t.min, t.max)
|
||||
p.addTable(level, t.file.Num(), t.size, t.imin, t.imax)
|
||||
}
|
||||
|
||||
func (p *sessionRecord) resetAddedTables() {
|
||||
p.hasRec &= ^(1 << recNewTable)
|
||||
p.hasRec &= ^(1 << recAddTable)
|
||||
p.addedTables = p.addedTables[:0]
|
||||
}
|
||||
|
||||
func (p *sessionRecord) deleteTable(level int, num uint64) {
|
||||
p.hasRec |= 1 << recDeletedTable
|
||||
func (p *sessionRecord) delTable(level int, num uint64) {
|
||||
p.hasRec |= 1 << recDelTable
|
||||
p.deletedTables = append(p.deletedTables, dtRecord{level, num})
|
||||
}
|
||||
|
||||
func (p *sessionRecord) resetDeletedTables() {
|
||||
p.hasRec &= ^(1 << recDeletedTable)
|
||||
p.hasRec &= ^(1 << recDelTable)
|
||||
p.deletedTables = p.deletedTables[:0]
|
||||
}
|
||||
|
||||
@@ -161,43 +160,45 @@ func (p *sessionRecord) encode(w io.Writer) error {
|
||||
p.putUvarint(w, recJournalNum)
|
||||
p.putUvarint(w, p.journalNum)
|
||||
}
|
||||
if p.has(recNextNum) {
|
||||
p.putUvarint(w, recNextNum)
|
||||
p.putUvarint(w, p.nextNum)
|
||||
if p.has(recNextFileNum) {
|
||||
p.putUvarint(w, recNextFileNum)
|
||||
p.putUvarint(w, p.nextFileNum)
|
||||
}
|
||||
if p.has(recSeq) {
|
||||
p.putUvarint(w, recSeq)
|
||||
p.putUvarint(w, p.seq)
|
||||
if p.has(recSeqNum) {
|
||||
p.putUvarint(w, recSeqNum)
|
||||
p.putUvarint(w, p.seqNum)
|
||||
}
|
||||
for _, cp := range p.compactionPointers {
|
||||
p.putUvarint(w, recCompactionPointer)
|
||||
p.putUvarint(w, uint64(cp.level))
|
||||
p.putBytes(w, cp.key)
|
||||
for _, r := range p.compPtrs {
|
||||
p.putUvarint(w, recCompPtr)
|
||||
p.putUvarint(w, uint64(r.level))
|
||||
p.putBytes(w, r.ikey)
|
||||
}
|
||||
for _, t := range p.deletedTables {
|
||||
p.putUvarint(w, recDeletedTable)
|
||||
p.putUvarint(w, uint64(t.level))
|
||||
p.putUvarint(w, t.num)
|
||||
for _, r := range p.deletedTables {
|
||||
p.putUvarint(w, recDelTable)
|
||||
p.putUvarint(w, uint64(r.level))
|
||||
p.putUvarint(w, r.num)
|
||||
}
|
||||
for _, t := range p.addedTables {
|
||||
p.putUvarint(w, recNewTable)
|
||||
p.putUvarint(w, uint64(t.level))
|
||||
p.putUvarint(w, t.num)
|
||||
p.putUvarint(w, t.size)
|
||||
p.putBytes(w, t.min)
|
||||
p.putBytes(w, t.max)
|
||||
for _, r := range p.addedTables {
|
||||
p.putUvarint(w, recAddTable)
|
||||
p.putUvarint(w, uint64(r.level))
|
||||
p.putUvarint(w, r.num)
|
||||
p.putUvarint(w, r.size)
|
||||
p.putBytes(w, r.imin)
|
||||
p.putBytes(w, r.imax)
|
||||
}
|
||||
return p.err
|
||||
}
|
||||
|
||||
func (p *sessionRecord) readUvarint(r io.ByteReader) uint64 {
|
||||
func (p *sessionRecord) readUvarintMayEOF(field string, r io.ByteReader, mayEOF bool) uint64 {
|
||||
if p.err != nil {
|
||||
return 0
|
||||
}
|
||||
x, err := binary.ReadUvarint(r)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
p.err = errCorruptManifest
|
||||
if err == io.ErrUnexpectedEOF || (mayEOF == false && err == io.EOF) {
|
||||
p.err = errors.NewErrCorrupted(nil, &ErrManifestCorrupted{field, "short read"})
|
||||
} else if strings.HasPrefix(err.Error(), "binary:") {
|
||||
p.err = errors.NewErrCorrupted(nil, &ErrManifestCorrupted{field, err.Error()})
|
||||
} else {
|
||||
p.err = err
|
||||
}
|
||||
@@ -206,35 +207,39 @@ func (p *sessionRecord) readUvarint(r io.ByteReader) uint64 {
|
||||
return x
|
||||
}
|
||||
|
||||
func (p *sessionRecord) readBytes(r byteReader) []byte {
|
||||
func (p *sessionRecord) readUvarint(field string, r io.ByteReader) uint64 {
|
||||
return p.readUvarintMayEOF(field, r, false)
|
||||
}
|
||||
|
||||
func (p *sessionRecord) readBytes(field string, r byteReader) []byte {
|
||||
if p.err != nil {
|
||||
return nil
|
||||
}
|
||||
n := p.readUvarint(r)
|
||||
n := p.readUvarint(field, r)
|
||||
if p.err != nil {
|
||||
return nil
|
||||
}
|
||||
x := make([]byte, n)
|
||||
_, p.err = io.ReadFull(r, x)
|
||||
if p.err != nil {
|
||||
if p.err == io.EOF {
|
||||
p.err = errCorruptManifest
|
||||
if p.err == io.ErrUnexpectedEOF {
|
||||
p.err = errors.NewErrCorrupted(nil, &ErrManifestCorrupted{field, "short read"})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
func (p *sessionRecord) readLevel(r io.ByteReader) int {
|
||||
func (p *sessionRecord) readLevel(field string, r io.ByteReader) int {
|
||||
if p.err != nil {
|
||||
return 0
|
||||
}
|
||||
x := p.readUvarint(r)
|
||||
x := p.readUvarint(field, r)
|
||||
if p.err != nil {
|
||||
return 0
|
||||
}
|
||||
if x >= kNumLevels {
|
||||
p.err = errCorruptManifest
|
||||
if x >= uint64(p.numLevel) {
|
||||
p.err = errors.NewErrCorrupted(nil, &ErrManifestCorrupted{field, "invalid level number"})
|
||||
return 0
|
||||
}
|
||||
return int(x)
|
||||
@@ -247,59 +252,59 @@ func (p *sessionRecord) decode(r io.Reader) error {
|
||||
}
|
||||
p.err = nil
|
||||
for p.err == nil {
|
||||
rec, err := binary.ReadUvarint(br)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
rec := p.readUvarintMayEOF("field-header", br, true)
|
||||
if p.err != nil {
|
||||
if p.err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
return p.err
|
||||
}
|
||||
switch rec {
|
||||
case recComparer:
|
||||
x := p.readBytes(br)
|
||||
x := p.readBytes("comparer", br)
|
||||
if p.err == nil {
|
||||
p.setComparer(string(x))
|
||||
}
|
||||
case recJournalNum:
|
||||
x := p.readUvarint(br)
|
||||
x := p.readUvarint("journal-num", br)
|
||||
if p.err == nil {
|
||||
p.setJournalNum(x)
|
||||
}
|
||||
case recPrevJournalNum:
|
||||
x := p.readUvarint(br)
|
||||
x := p.readUvarint("prev-journal-num", br)
|
||||
if p.err == nil {
|
||||
p.setPrevJournalNum(x)
|
||||
}
|
||||
case recNextNum:
|
||||
x := p.readUvarint(br)
|
||||
case recNextFileNum:
|
||||
x := p.readUvarint("next-file-num", br)
|
||||
if p.err == nil {
|
||||
p.setNextNum(x)
|
||||
p.setNextFileNum(x)
|
||||
}
|
||||
case recSeq:
|
||||
x := p.readUvarint(br)
|
||||
case recSeqNum:
|
||||
x := p.readUvarint("seq-num", br)
|
||||
if p.err == nil {
|
||||
p.setSeq(x)
|
||||
p.setSeqNum(x)
|
||||
}
|
||||
case recCompactionPointer:
|
||||
level := p.readLevel(br)
|
||||
key := p.readBytes(br)
|
||||
case recCompPtr:
|
||||
level := p.readLevel("comp-ptr.level", br)
|
||||
ikey := p.readBytes("comp-ptr.ikey", br)
|
||||
if p.err == nil {
|
||||
p.addCompactionPointer(level, iKey(key))
|
||||
p.addCompPtr(level, iKey(ikey))
|
||||
}
|
||||
case recNewTable:
|
||||
level := p.readLevel(br)
|
||||
num := p.readUvarint(br)
|
||||
size := p.readUvarint(br)
|
||||
min := p.readBytes(br)
|
||||
max := p.readBytes(br)
|
||||
case recAddTable:
|
||||
level := p.readLevel("add-table.level", br)
|
||||
num := p.readUvarint("add-table.num", br)
|
||||
size := p.readUvarint("add-table.size", br)
|
||||
imin := p.readBytes("add-table.imin", br)
|
||||
imax := p.readBytes("add-table.imax", br)
|
||||
if p.err == nil {
|
||||
p.addTable(level, num, size, min, max)
|
||||
p.addTable(level, num, size, imin, imax)
|
||||
}
|
||||
case recDeletedTable:
|
||||
level := p.readLevel(br)
|
||||
num := p.readUvarint(br)
|
||||
case recDelTable:
|
||||
level := p.readLevel("del-table.level", br)
|
||||
num := p.readUvarint("del-table.num", br)
|
||||
if p.err == nil {
|
||||
p.deleteTable(level, num)
|
||||
p.delTable(level, num)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
18
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go
generated
vendored
18
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go
generated
vendored
@@ -9,6 +9,8 @@ package leveldb
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
)
|
||||
|
||||
func decodeEncode(v *sessionRecord) (res bool, err error) {
|
||||
@@ -17,7 +19,7 @@ func decodeEncode(v *sessionRecord) (res bool, err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
v2 := new(sessionRecord)
|
||||
v2 := &sessionRecord{numLevel: opt.DefaultNumLevel}
|
||||
err = v.decode(b)
|
||||
if err != nil {
|
||||
return
|
||||
@@ -32,7 +34,7 @@ func decodeEncode(v *sessionRecord) (res bool, err error) {
|
||||
|
||||
func TestSessionRecord_EncodeDecode(t *testing.T) {
|
||||
big := uint64(1) << 50
|
||||
v := new(sessionRecord)
|
||||
v := &sessionRecord{numLevel: opt.DefaultNumLevel}
|
||||
i := uint64(0)
|
||||
test := func() {
|
||||
res, err := decodeEncode(v)
|
||||
@@ -47,16 +49,16 @@ func TestSessionRecord_EncodeDecode(t *testing.T) {
|
||||
for ; i < 4; i++ {
|
||||
test()
|
||||
v.addTable(3, big+300+i, big+400+i,
|
||||
newIKey([]byte("foo"), big+500+1, tVal),
|
||||
newIKey([]byte("zoo"), big+600+1, tDel))
|
||||
v.deleteTable(4, big+700+i)
|
||||
v.addCompactionPointer(int(i), newIKey([]byte("x"), big+900+1, tVal))
|
||||
newIkey([]byte("foo"), big+500+1, ktVal),
|
||||
newIkey([]byte("zoo"), big+600+1, ktDel))
|
||||
v.delTable(4, big+700+i)
|
||||
v.addCompPtr(int(i), newIkey([]byte("x"), big+900+1, ktVal))
|
||||
}
|
||||
|
||||
v.setComparer("foo")
|
||||
v.setJournalNum(big + 100)
|
||||
v.setPrevJournalNum(big + 99)
|
||||
v.setNextNum(big + 200)
|
||||
v.setSeq(big + 1000)
|
||||
v.setNextFileNum(big + 200)
|
||||
v.setSeqNum(big + 1000)
|
||||
test()
|
||||
}
|
||||
|
||||
86
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go
generated
vendored
86
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go
generated
vendored
@@ -14,7 +14,7 @@ import (
|
||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||
)
|
||||
|
||||
// logging
|
||||
// Logging.
|
||||
|
||||
type dropper struct {
|
||||
s *session
|
||||
@@ -22,22 +22,17 @@ type dropper struct {
|
||||
}
|
||||
|
||||
func (d dropper) Drop(err error) {
|
||||
if e, ok := err.(journal.DroppedError); ok {
|
||||
if e, ok := err.(*journal.ErrCorrupted); ok {
|
||||
d.s.logf("journal@drop %s-%d S·%s %q", d.file.Type(), d.file.Num(), shortenb(e.Size), e.Reason)
|
||||
} else {
|
||||
d.s.logf("journal@drop %s-%d %q", d.file.Type(), d.file.Num(), err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *session) log(v ...interface{}) {
|
||||
s.stor.Log(fmt.Sprint(v...))
|
||||
}
|
||||
func (s *session) log(v ...interface{}) { s.stor.Log(fmt.Sprint(v...)) }
|
||||
func (s *session) logf(format string, v ...interface{}) { s.stor.Log(fmt.Sprintf(format, v...)) }
|
||||
|
||||
func (s *session) logf(format string, v ...interface{}) {
|
||||
s.stor.Log(fmt.Sprintf(format, v...))
|
||||
}
|
||||
|
||||
// file utils
|
||||
// File utils.
|
||||
|
||||
func (s *session) getJournalFile(num uint64) storage.File {
|
||||
return s.stor.GetFile(num, storage.TypeJournal)
|
||||
@@ -56,9 +51,14 @@ func (s *session) newTemp() storage.File {
|
||||
return s.stor.GetFile(num, storage.TypeTemp)
|
||||
}
|
||||
|
||||
// session state
|
||||
func (s *session) tableFileFromRecord(r atRecord) *tFile {
|
||||
return newTableFile(s.getTableFile(r.num), r.size, r.imin, r.imax)
|
||||
}
|
||||
|
||||
// Get current version.
|
||||
// Session state.
|
||||
|
||||
// Get current version. This will incr version ref, must call
|
||||
// version.release (exactly once) after use.
|
||||
func (s *session) version() *version {
|
||||
s.vmu.Lock()
|
||||
defer s.vmu.Unlock()
|
||||
@@ -66,85 +66,80 @@ func (s *session) version() *version {
|
||||
return s.stVersion
|
||||
}
|
||||
|
||||
// Get current version; no barrier.
|
||||
func (s *session) version_NB() *version {
|
||||
return s.stVersion
|
||||
}
|
||||
|
||||
// Set current version to v.
|
||||
func (s *session) setVersion(v *version) {
|
||||
s.vmu.Lock()
|
||||
v.ref = 1
|
||||
v.ref = 1 // Holds by session.
|
||||
if old := s.stVersion; old != nil {
|
||||
v.ref++
|
||||
v.ref++ // Holds by old version.
|
||||
old.next = v
|
||||
old.release_NB()
|
||||
old.releaseNB()
|
||||
}
|
||||
s.stVersion = v
|
||||
s.vmu.Unlock()
|
||||
}
|
||||
|
||||
// Get current unused file number.
|
||||
func (s *session) fileNum() uint64 {
|
||||
return atomic.LoadUint64(&s.stFileNum)
|
||||
func (s *session) nextFileNum() uint64 {
|
||||
return atomic.LoadUint64(&s.stNextFileNum)
|
||||
}
|
||||
|
||||
// Get current unused file number to num.
|
||||
func (s *session) setFileNum(num uint64) {
|
||||
atomic.StoreUint64(&s.stFileNum, num)
|
||||
// Set current unused file number to num.
|
||||
func (s *session) setNextFileNum(num uint64) {
|
||||
atomic.StoreUint64(&s.stNextFileNum, num)
|
||||
}
|
||||
|
||||
// Mark file number as used.
|
||||
func (s *session) markFileNum(num uint64) {
|
||||
num += 1
|
||||
nextFileNum := num + 1
|
||||
for {
|
||||
old, x := s.stFileNum, num
|
||||
old, x := s.stNextFileNum, nextFileNum
|
||||
if old > x {
|
||||
x = old
|
||||
}
|
||||
if atomic.CompareAndSwapUint64(&s.stFileNum, old, x) {
|
||||
if atomic.CompareAndSwapUint64(&s.stNextFileNum, old, x) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Allocate a file number.
|
||||
func (s *session) allocFileNum() (num uint64) {
|
||||
return atomic.AddUint64(&s.stFileNum, 1) - 1
|
||||
func (s *session) allocFileNum() uint64 {
|
||||
return atomic.AddUint64(&s.stNextFileNum, 1) - 1
|
||||
}
|
||||
|
||||
// Reuse given file number.
|
||||
func (s *session) reuseFileNum(num uint64) {
|
||||
for {
|
||||
old, x := s.stFileNum, num
|
||||
old, x := s.stNextFileNum, num
|
||||
if old != x+1 {
|
||||
x = old
|
||||
}
|
||||
if atomic.CompareAndSwapUint64(&s.stFileNum, old, x) {
|
||||
if atomic.CompareAndSwapUint64(&s.stNextFileNum, old, x) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// manifest related utils
|
||||
// Manifest related utils.
|
||||
|
||||
// Fill given session record obj with current states; need external
|
||||
// synchronization.
|
||||
func (s *session) fillRecord(r *sessionRecord, snapshot bool) {
|
||||
r.setNextNum(s.fileNum())
|
||||
r.setNextFileNum(s.nextFileNum())
|
||||
|
||||
if snapshot {
|
||||
if !r.has(recJournalNum) {
|
||||
r.setJournalNum(s.stJournalNum)
|
||||
}
|
||||
|
||||
if !r.has(recSeq) {
|
||||
r.setSeq(s.stSeq)
|
||||
if !r.has(recSeqNum) {
|
||||
r.setSeqNum(s.stSeqNum)
|
||||
}
|
||||
|
||||
for level, ik := range s.stCPtrs {
|
||||
for level, ik := range s.stCompPtrs {
|
||||
if ik != nil {
|
||||
r.addCompactionPointer(level, ik)
|
||||
r.addCompPtr(level, ik)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -152,7 +147,7 @@ func (s *session) fillRecord(r *sessionRecord, snapshot bool) {
|
||||
}
|
||||
}
|
||||
|
||||
// Mark if record has been commited, this will update session state;
|
||||
// Mark if record has been committed, this will update session state;
|
||||
// need external synchronization.
|
||||
func (s *session) recordCommited(r *sessionRecord) {
|
||||
if r.has(recJournalNum) {
|
||||
@@ -163,12 +158,12 @@ func (s *session) recordCommited(r *sessionRecord) {
|
||||
s.stPrevJournalNum = r.prevJournalNum
|
||||
}
|
||||
|
||||
if r.has(recSeq) {
|
||||
s.stSeq = r.seq
|
||||
if r.has(recSeqNum) {
|
||||
s.stSeqNum = r.seqNum
|
||||
}
|
||||
|
||||
for _, p := range r.compactionPointers {
|
||||
s.stCPtrs[p.level] = iKey(p.key)
|
||||
for _, p := range r.compPtrs {
|
||||
s.stCompPtrs[p.level] = iKey(p.ikey)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -183,10 +178,11 @@ func (s *session) newManifest(rec *sessionRecord, v *version) (err error) {
|
||||
jw := journal.NewWriter(writer)
|
||||
|
||||
if v == nil {
|
||||
v = s.version_NB()
|
||||
v = s.version()
|
||||
defer v.release()
|
||||
}
|
||||
if rec == nil {
|
||||
rec = new(sessionRecord)
|
||||
rec = &sessionRecord{numLevel: s.o.GetNumLevel()}
|
||||
}
|
||||
s.fillRecord(rec, true)
|
||||
v.fillRecord(rec)
|
||||
|
||||
12
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go
generated
vendored
12
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go
generated
vendored
@@ -344,19 +344,17 @@ type fileWrap struct {
|
||||
}
|
||||
|
||||
func (fw fileWrap) Sync() error {
|
||||
if err := fw.File.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
if fw.f.Type() == TypeManifest {
|
||||
// Also sync parent directory if file type is manifest.
|
||||
// See: https://code.google.com/p/leveldb/issues/detail?id=190.
|
||||
f, err := os.Open(fw.f.fs.path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
if err := f.Sync(); err != nil {
|
||||
if err := syncDir(fw.f.fs.path); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return fw.File.Sync()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fw fileWrap) Close() error {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user