mirror of
https://github.com/syncthing/syncthing.git
synced 2025-12-24 06:28:10 -05:00
Compare commits
810 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c5243cd4d5 | ||
|
|
db868ed29d | ||
|
|
450c7d80f8 | ||
|
|
abbb001975 | ||
|
|
f35d83ae48 | ||
|
|
a2315dc95e | ||
|
|
4e2feb6fbc | ||
|
|
13602b6769 | ||
|
|
85dba25246 | ||
|
|
66432672b3 | ||
|
|
e6d96e4c18 | ||
|
|
9812305bb9 | ||
|
|
f680a63a1f | ||
|
|
781d63cb2a | ||
|
|
9ff04ee3d8 | ||
|
|
5d85a24977 | ||
|
|
1e51fca0b0 | ||
|
|
5537d53f9a | ||
|
|
50a4170541 | ||
|
|
3a8255bda1 | ||
|
|
a617846f0f | ||
|
|
5772588c29 | ||
|
|
9d0dc45f74 | ||
|
|
c6aefbc9a0 | ||
|
|
dbbafb0cc9 | ||
|
|
6e8272f78f | ||
|
|
baf8a63121 | ||
|
|
fc4a76ee50 | ||
|
|
2117d1d035 | ||
|
|
0a70e0b7b6 | ||
|
|
64ffac5671 | ||
|
|
ac384e8a9c | ||
|
|
f97c8222c7 | ||
|
|
728289ee3a | ||
|
|
5faa16f9ee | ||
|
|
4e608b116a | ||
|
|
521b49166e | ||
|
|
8f32decf2d | ||
|
|
0d51f83d2d | ||
|
|
78c6a68db9 | ||
|
|
2949ab73e2 | ||
|
|
223741820d | ||
|
|
4b57821f52 | ||
|
|
74271a479f | ||
|
|
c377177108 | ||
|
|
84eb729bd4 | ||
|
|
14aea365c5 | ||
|
|
97cb3fa5a5 | ||
|
|
b5368db704 | ||
|
|
8c442b72f3 | ||
|
|
f8f6791d39 | ||
|
|
0c09f077aa | ||
|
|
af2831d7b6 | ||
|
|
64d5d4aec7 | ||
|
|
619a6b2adb | ||
|
|
33a26bc0cf | ||
|
|
b445a7c4d3 | ||
|
|
e6892d0c3e | ||
|
|
33e9a88b56 | ||
|
|
df00a2251e | ||
|
|
92c44c8abe | ||
|
|
8e4f7bbd3e | ||
|
|
a40217cf07 | ||
|
|
e586fda5f2 | ||
|
|
a58564ff88 | ||
|
|
89885b9fb9 | ||
|
|
5c7d977ae0 | ||
|
|
2cd3ee9698 | ||
|
|
dd3080e018 | ||
|
|
5915e8e86a | ||
|
|
3c67c06654 | ||
|
|
76232ca573 | ||
|
|
5235e82bda | ||
|
|
10f0713257 | ||
|
|
e9c7970ea4 | ||
|
|
1a6ac4aeb1 | ||
|
|
f633bdddf0 | ||
|
|
de0b91d157 | ||
|
|
2e77e498f5 | ||
|
|
4ac67eb1f9 | ||
|
|
2b536de37f | ||
|
|
2ffa92ba1b | ||
|
|
6ecddd8388 | ||
|
|
bd2772ea4c | ||
|
|
92bf79d53b | ||
|
|
eebe0eeb71 | ||
|
|
1068eaa0b9 | ||
|
|
faac3e7d7c | ||
|
|
dab4340207 | ||
|
|
fd2567748f | ||
|
|
c2daedbd11 | ||
|
|
7c604beb73 | ||
|
|
8c42aea827 | ||
|
|
cf1bfdfb61 | ||
|
|
75b26513e1 | ||
|
|
6c09a77a97 | ||
|
|
67389c39fb | ||
|
|
c326103e6e | ||
|
|
c2120a16da | ||
|
|
258ad4352e | ||
|
|
435d3958f4 | ||
|
|
b0408ef5c6 | ||
|
|
1c41b0bc2f | ||
|
|
aa827f3042 | ||
|
|
f44f5964bb | ||
|
|
91ba93bd7a | ||
|
|
0abe4cefb4 | ||
|
|
bccd460f3b | ||
|
|
d1023004e1 | ||
|
|
04a5f9cb04 | ||
|
|
9818e2b550 | ||
|
|
fe43e3b89d | ||
|
|
e1f1ae041f | ||
|
|
5bcf26e324 | ||
|
|
5f47a8149f | ||
|
|
00b662b53a | ||
|
|
faf519ab1b | ||
|
|
fce73f6f17 | ||
|
|
887890baf5 | ||
|
|
c66b24feeb | ||
|
|
84c6f147ad | ||
|
|
0cdb0daa8c | ||
|
|
eee702f299 | ||
|
|
df65247325 | ||
|
|
1a174e75d3 | ||
|
|
9e1fd3454f | ||
|
|
3b1603cadf | ||
|
|
8803bac708 | ||
|
|
3a01eaa4a6 | ||
|
|
9f84c1c448 | ||
|
|
dda0390156 | ||
|
|
c74509dd5f | ||
|
|
f61bbb2ff4 | ||
|
|
e7f60161a3 | ||
|
|
ebec4fbc24 | ||
|
|
1d4105ae3d | ||
|
|
586d49f0c3 | ||
|
|
5b0fab0697 | ||
|
|
2b3359dff3 | ||
|
|
63203aa14c | ||
|
|
716a8329c2 | ||
|
|
dab0aec85e | ||
|
|
1f1ab017c0 | ||
|
|
b6912ef95e | ||
|
|
db54dca694 | ||
|
|
0e751b983c | ||
|
|
997b20a975 | ||
|
|
386f9c42c2 | ||
|
|
cfae06db65 | ||
|
|
44260b7b5c | ||
|
|
13063b957f | ||
|
|
ee05e12480 | ||
|
|
5538545fb0 | ||
|
|
bc1167c2c5 | ||
|
|
c57656e4c3 | ||
|
|
264400a984 | ||
|
|
408db4eb1d | ||
|
|
9347f223ef | ||
|
|
518aa30c9c | ||
|
|
6bbf1f9355 | ||
|
|
b221e4d445 | ||
|
|
580fccbfca | ||
|
|
045916efcc | ||
|
|
4f92482294 | ||
|
|
2f055a75a0 | ||
|
|
f0621207e3 | ||
|
|
d657bc4e3d | ||
|
|
a1fd07b27c | ||
|
|
52219c5f3f | ||
|
|
1a66461e07 | ||
|
|
d20df12168 | ||
|
|
668b429615 | ||
|
|
7db528be39 | ||
|
|
60f760ee49 | ||
|
|
884aaab751 | ||
|
|
e968560ea4 | ||
|
|
07caaa96e4 | ||
|
|
e8a679c280 | ||
|
|
bc885f1d08 | ||
|
|
f2f051d6de | ||
|
|
49a0bfccba | ||
|
|
0c1e60894f | ||
|
|
ace87ad7bb | ||
|
|
50f0097843 | ||
|
|
32a9466277 | ||
|
|
1ee3407946 | ||
|
|
f1120d7aa9 | ||
|
|
2e7d6b2f99 | ||
|
|
dfef929187 | ||
|
|
e78d9ad592 | ||
|
|
9f2948f595 | ||
|
|
198da910ed | ||
|
|
5f1bf9d9d6 | ||
|
|
798c4aef9a | ||
|
|
f80f5b3bda | ||
|
|
cbb07b0d67 | ||
|
|
7cc9921615 | ||
|
|
7555fe065e | ||
|
|
d977f4278e | ||
|
|
870e3ca893 | ||
|
|
213acaee3b | ||
|
|
58381496a2 | ||
|
|
5981e42aed | ||
|
|
3c9165d295 | ||
|
|
60d0ef93ac | ||
|
|
f45d5b0066 | ||
|
|
b71306480f | ||
|
|
0c7771ccc5 | ||
|
|
dc9df0a79a | ||
|
|
17cd49fbdc | ||
|
|
ad273adb78 | ||
|
|
150e7daf2d | ||
|
|
b004155e8f | ||
|
|
92eed3b33b | ||
|
|
fe7b77198c | ||
|
|
f51b775698 | ||
|
|
939dd5cb31 | ||
|
|
adcbe13ecd | ||
|
|
8976e53998 | ||
|
|
97dda6a4bb | ||
|
|
9e395eb883 | ||
|
|
60da59623e | ||
|
|
9752ea9ac3 | ||
|
|
279693078a | ||
|
|
19b93045a4 | ||
|
|
5231a09820 | ||
|
|
ab952e6103 | ||
|
|
a418771c04 | ||
|
|
b41590ce38 | ||
|
|
c7dde9499f | ||
|
|
528cbf62ec | ||
|
|
1be4b8bb5d | ||
|
|
c832fc9917 | ||
|
|
4797a94689 | ||
|
|
6948903084 | ||
|
|
94164611ae | ||
|
|
ae298e8902 | ||
|
|
3d8771ecb0 | ||
|
|
28db264e90 | ||
|
|
6af9fa4b81 | ||
|
|
60b4d05860 | ||
|
|
7b93839ed1 | ||
|
|
fdb11d7c06 | ||
|
|
5651847877 | ||
|
|
e1442290b6 | ||
|
|
c45b18cc75 | ||
|
|
bb2ad77987 | ||
|
|
68b1ffec19 | ||
|
|
bc2bb22673 | ||
|
|
83d707fc4b | ||
|
|
175b32e56c | ||
|
|
97b4a6553b | ||
|
|
4ade30e681 | ||
|
|
4e03b4f191 | ||
|
|
bfe1d1d4ca | ||
|
|
8918de85fd | ||
|
|
5e237aecae | ||
|
|
13291ad481 | ||
|
|
a47ee86bee | ||
|
|
62d703f967 | ||
|
|
b2c196e5c7 | ||
|
|
4be6a54bc0 | ||
|
|
8ce8476547 | ||
|
|
d82caf6bd4 | ||
|
|
8ea1e302c3 | ||
|
|
a8799efa94 | ||
|
|
0cfac4e021 | ||
|
|
f6c9642d72 | ||
|
|
5a07f9ddee | ||
|
|
9db75e91ac | ||
|
|
f288e00c37 | ||
|
|
c9edd31993 | ||
|
|
5a7780ab5f | ||
|
|
ac0fba99ad | ||
|
|
6f724a113c | ||
|
|
327cd4cb87 | ||
|
|
25de3a2590 | ||
|
|
06208a703a | ||
|
|
56afba6606 | ||
|
|
d65bbf2113 | ||
|
|
b8bfc9b732 | ||
|
|
cec3bad373 | ||
|
|
9312e3c7de | ||
|
|
43e7435c41 | ||
|
|
f34f5e41a4 | ||
|
|
47a70a536b | ||
|
|
bbeddfe522 | ||
|
|
28220310a5 | ||
|
|
3e82a0a259 | ||
|
|
c860ad23a0 | ||
|
|
4e36dd2943 | ||
|
|
13d77f1557 | ||
|
|
cc619f6b53 | ||
|
|
d425794665 | ||
|
|
32da1c8d58 | ||
|
|
830be1035b | ||
|
|
e9e45d0e29 | ||
|
|
d3ca265a25 | ||
|
|
244f0ffaf1 | ||
|
|
73f5c47fe2 | ||
|
|
e8b9600ddb | ||
|
|
d2c813ffac | ||
|
|
8e699f8243 | ||
|
|
3f6cdc829b | ||
|
|
c5c9ee92ac | ||
|
|
7f1fcc9cfc | ||
|
|
9de45c3be4 | ||
|
|
144a881ae5 | ||
|
|
4566690617 | ||
|
|
e8fe1590b6 | ||
|
|
25f4fd5a19 | ||
|
|
7b8c126aa1 | ||
|
|
86b3ff3099 | ||
|
|
fa9df4dc5e | ||
|
|
fbd22e7b94 | ||
|
|
e35411d90f | ||
|
|
be15e48074 | ||
|
|
2be1218aa3 | ||
|
|
c47aebdd2a | ||
|
|
f4d1632506 | ||
|
|
8bfe4374de | ||
|
|
4afe02cb21 | ||
|
|
115b967e5b | ||
|
|
ea4524024a | ||
|
|
4ff6cd9105 | ||
|
|
96c17d8292 | ||
|
|
bc6faaffc4 | ||
|
|
51e9839237 | ||
|
|
6115631746 | ||
|
|
ee005fbc8e | ||
|
|
e27d42935c | ||
|
|
9c99d65716 | ||
|
|
5b9469eed3 | ||
|
|
6805ac915b | ||
|
|
7148cf99f7 | ||
|
|
67a3fb8bf2 | ||
|
|
933b61f99f | ||
|
|
6c5c14f35f | ||
|
|
6a441d5013 | ||
|
|
6b46465c77 | ||
|
|
75388caeed | ||
|
|
2546930a1a | ||
|
|
135e29a3bb | ||
|
|
3b65a58f59 | ||
|
|
49cb931572 | ||
|
|
b7176d2204 | ||
|
|
5bf7d372f6 | ||
|
|
073775e461 | ||
|
|
fbf8f3dc68 | ||
|
|
e8c8cc550b | ||
|
|
87c3790fa8 | ||
|
|
0d9dcb2f4f | ||
|
|
6188185b22 | ||
|
|
f762bd5e25 | ||
|
|
b676264fca | ||
|
|
3640c3b66a | ||
|
|
5087d02fba | ||
|
|
2aa4340551 | ||
|
|
3b34895ae6 | ||
|
|
91cc84c4e6 | ||
|
|
797e53c5ba | ||
|
|
c714a12ad7 | ||
|
|
08ce9b09ec | ||
|
|
3152152ed9 | ||
|
|
544fea51b0 | ||
|
|
08ca9f9378 | ||
|
|
978f68b744 | ||
|
|
680896e4c4 | ||
|
|
975627af2e | ||
|
|
b208102b98 | ||
|
|
88a063434c | ||
|
|
bc0a8fcc1d | ||
|
|
3b4fe19dfb | ||
|
|
58cc108c0c | ||
|
|
d3085a4127 | ||
|
|
0fcc193197 | ||
|
|
75d4d2df8b | ||
|
|
28f2e8f24d | ||
|
|
f692e3ac73 | ||
|
|
bcb5f6f472 | ||
|
|
74fd4a3722 | ||
|
|
884bb638bc | ||
|
|
3388d5b49c | ||
|
|
4fe2992924 | ||
|
|
0a804e39a8 | ||
|
|
f88a7a8e6a | ||
|
|
ec212f73eb | ||
|
|
91cc0cd05e | ||
|
|
7943902d73 | ||
|
|
32a5e83612 | ||
|
|
8b349945de | ||
|
|
fccdd85cc1 | ||
|
|
bd2b5db8f3 | ||
|
|
44bc5fd784 | ||
|
|
45dfd616cb | ||
|
|
39a691a7e6 | ||
|
|
35b5999cba | ||
|
|
d812f559ef | ||
|
|
54a1f37bf5 | ||
|
|
b0f46beffb | ||
|
|
c844991cba | ||
|
|
b7cf8a471f | ||
|
|
864bb8bc34 | ||
|
|
2d4b89a8e9 | ||
|
|
a6d67d30f5 | ||
|
|
0a633c526f | ||
|
|
655acb4cb2 | ||
|
|
91b35118d9 | ||
|
|
c64321df47 | ||
|
|
3f791b57ce | ||
|
|
8de2a7f4c8 | ||
|
|
dbb4b67205 | ||
|
|
f510f5f205 | ||
|
|
620eeae4a7 | ||
|
|
50b37f1366 | ||
|
|
a7b6e35467 | ||
|
|
4cf04a3e0d | ||
|
|
27cd6e60f4 | ||
|
|
2b9fc0fd43 | ||
|
|
d6c058c407 | ||
|
|
8fe5438b59 | ||
|
|
e937e51476 | ||
|
|
b7ea695caf | ||
|
|
31350b4352 | ||
|
|
37d83a4e2e | ||
|
|
4a88d1244d | ||
|
|
439049f672 | ||
|
|
ee10295d04 | ||
|
|
2d272a3cac | ||
|
|
2b26891062 | ||
|
|
3d7d4d845a | ||
|
|
c488179783 | ||
|
|
cfb33321b0 | ||
|
|
193cea95ce | ||
|
|
3c4002e149 | ||
|
|
a720f90a70 | ||
|
|
4a6b43bcae | ||
|
|
2f5a822ca4 | ||
|
|
bc1d04f0b9 | ||
|
|
381795d6d0 | ||
|
|
6ade27641d | ||
|
|
53898d2c60 | ||
|
|
91c4ff6009 | ||
|
|
0aa067a726 | ||
|
|
67445a6dda | ||
|
|
5353659f9f | ||
|
|
7ac00e189b | ||
|
|
071f4c0769 | ||
|
|
b57f4ed97e | ||
|
|
7633b9672f | ||
|
|
4f6ee7c8eb | ||
|
|
d7cc48eab2 | ||
|
|
8f3effed32 | ||
|
|
fee8289c0a | ||
|
|
a2da31056b | ||
|
|
f97dd9d8d3 | ||
|
|
2383579a64 | ||
|
|
68750211ef | ||
|
|
db3e3ade80 | ||
|
|
e6f04ed238 | ||
|
|
a6eb690e31 | ||
|
|
21518adfc8 | ||
|
|
77fe8449ba | ||
|
|
33e9a35f08 | ||
|
|
4ab4816556 | ||
|
|
8e8a579bb2 | ||
|
|
efbdf72d20 | ||
|
|
0e59b5678a | ||
|
|
de75550415 | ||
|
|
4dbce32738 | ||
|
|
b05fcbc9d7 | ||
|
|
d09c71b688 | ||
|
|
874d6760d4 | ||
|
|
26ebbee877 | ||
|
|
12eda0449a | ||
|
|
5a98f4e47c | ||
|
|
964c903a68 | ||
|
|
21b699826d | ||
|
|
5fa8f8e50c | ||
|
|
9ca87f5314 | ||
|
|
537c6b3b69 | ||
|
|
48a3fac2da | ||
|
|
fd73682806 | ||
|
|
34bd5b9dcf | ||
|
|
58c5e46206 | ||
|
|
4c61ab0f18 | ||
|
|
f241b63e0e | ||
|
|
2ffdb5a82a | ||
|
|
46e963443d | ||
|
|
66d4e9e5d7 | ||
|
|
de382e33a3 | ||
|
|
3c6738da73 | ||
|
|
18e5cb6793 | ||
|
|
9cd6b85c09 | ||
|
|
f40f3b3b7b | ||
|
|
7454670b0a | ||
|
|
e63596681d | ||
|
|
3dbaa76dcb | ||
|
|
8752003b50 | ||
|
|
8716ed5aa4 | ||
|
|
38ac4e8f79 | ||
|
|
70fc8a3064 | ||
|
|
7626c5d526 | ||
|
|
7e04c9d048 | ||
|
|
9eda8f2c7e | ||
|
|
456d9e870d | ||
|
|
a1533696a5 | ||
|
|
92499af323 | ||
|
|
b2988cdd35 | ||
|
|
82cfd37263 | ||
|
|
df381fd03f | ||
|
|
5a2328d9a5 | ||
|
|
b2f66cfb60 | ||
|
|
6d24e4f122 | ||
|
|
2e2185165c | ||
|
|
f0612e57c2 | ||
|
|
e5d16ed08a | ||
|
|
1cff9ccc63 | ||
|
|
20a018db2e | ||
|
|
80c2b32b92 | ||
|
|
028e9bc17a | ||
|
|
afc2d6fda4 | ||
|
|
bec5c76631 | ||
|
|
d87051ca99 | ||
|
|
3798cebad0 | ||
|
|
a477989950 | ||
|
|
5065d1d0b4 | ||
|
|
829990c9ef | ||
|
|
ac037e0fa3 | ||
|
|
da42d51008 | ||
|
|
99027813ef | ||
|
|
9112ba8f0b | ||
|
|
843fd9bdbd | ||
|
|
26c33c4a69 | ||
|
|
2db76ae786 | ||
|
|
a0b15d006d | ||
|
|
23b27fa24a | ||
|
|
b6f580cbc2 | ||
|
|
f2459ef331 | ||
|
|
0a37fac794 | ||
|
|
2d9a822ed7 | ||
|
|
98622ca4d0 | ||
|
|
f7a25adcbd | ||
|
|
9bf13b253c | ||
|
|
2e8b639a34 | ||
|
|
672f7a010f | ||
|
|
37e15c4368 | ||
|
|
4d7837ba96 | ||
|
|
a6c8423905 | ||
|
|
832ed556d9 | ||
|
|
7c6fb018ca | ||
|
|
9c5c06bf31 | ||
|
|
61e3daaead | ||
|
|
9c0fde795e | ||
|
|
ce4f565e2f | ||
|
|
5369a62fd5 | ||
|
|
b44016ff70 | ||
|
|
9f76c87880 | ||
|
|
42ae2898e1 | ||
|
|
dd649a6be4 | ||
|
|
593f098276 | ||
|
|
4a87221f16 | ||
|
|
7745ed34d3 | ||
|
|
8fe546c4a2 | ||
|
|
381f6aeaf6 | ||
|
|
9154bacced | ||
|
|
dc0dc8efb4 | ||
|
|
b062d5dd7f | ||
|
|
c519e582b5 | ||
|
|
6b9dce36bf | ||
|
|
8e0520887a | ||
|
|
cfd1fdb38e | ||
|
|
c6ba0208d0 | ||
|
|
3d055bbb79 | ||
|
|
dd971b56e5 | ||
|
|
4031f5e24b | ||
|
|
1cd7cc6869 | ||
|
|
9de2864db3 | ||
|
|
c27861cbaf | ||
|
|
c2f75d3689 | ||
|
|
5454ca1cf7 | ||
|
|
8644bf30a9 | ||
|
|
db3341a178 | ||
|
|
e2cb0219c7 | ||
|
|
217f29de76 | ||
|
|
8661afcb4f | ||
|
|
ed07fc0f2c | ||
|
|
4af3f77a9a | ||
|
|
8c4f07ef1b | ||
|
|
1a231d39a5 | ||
|
|
17e3d14272 | ||
|
|
03182c7714 | ||
|
|
963078f6ac | ||
|
|
8356b58b1d | ||
|
|
303ce02271 | ||
|
|
bcdc3ecdae | ||
|
|
b60d648e22 | ||
|
|
7bc36cbbd1 | ||
|
|
04130fcb15 | ||
|
|
52d8e4c691 | ||
|
|
ae0193b724 | ||
|
|
2e1c33206f | ||
|
|
0c642ec7cf | ||
|
|
b3ca96eeba | ||
|
|
ae0e033178 | ||
|
|
a97985b428 | ||
|
|
63c0f11458 | ||
|
|
5b0dd9d3b2 | ||
|
|
8bba82a08d | ||
|
|
51bf15728a | ||
|
|
b336b2c336 | ||
|
|
2331089854 | ||
|
|
8a5a573851 | ||
|
|
6fb05fc82a | ||
|
|
358862c7ad | ||
|
|
4235175966 | ||
|
|
cd433a4f52 | ||
|
|
f3b550744f | ||
|
|
f31e5eaba2 | ||
|
|
7c8652b600 | ||
|
|
30837a7d95 | ||
|
|
3c25a74f3b | ||
|
|
da3177dbe0 | ||
|
|
3ca8dce98b | ||
|
|
e17fa7b14e | ||
|
|
230f149b4b | ||
|
|
dd3d8a6c98 | ||
|
|
36bdd9cb4d | ||
|
|
95f5e5fa9a | ||
|
|
1aef03288a | ||
|
|
a8c510cc72 | ||
|
|
e359b146aa | ||
|
|
dba40eefb1 | ||
|
|
23b55f68b7 | ||
|
|
5c65e10875 | ||
|
|
d7df11e724 | ||
|
|
c08ce5c571 | ||
|
|
12a6682eaf | ||
|
|
bbefcef53b | ||
|
|
729b0143e1 | ||
|
|
84c4298cd5 | ||
|
|
7d672fd989 | ||
|
|
1442d6f4c2 | ||
|
|
13022817d4 | ||
|
|
e788a99c3e | ||
|
|
09d4b3f71e | ||
|
|
78f0f2b131 | ||
|
|
40c750141a | ||
|
|
b60251b960 | ||
|
|
958c39ef5f | ||
|
|
e22ddae3a8 | ||
|
|
68afc897d6 | ||
|
|
ba58e95f6b | ||
|
|
adbd0b1834 | ||
|
|
3e34fc66e6 | ||
|
|
f8e34c083e | ||
|
|
cba554d0fa | ||
|
|
8903825e02 | ||
|
|
81cd84add2 | ||
|
|
8229d47da5 | ||
|
|
8f14d11d66 | ||
|
|
76f82cbd1f | ||
|
|
dc2f83e522 | ||
|
|
8b4282fe28 | ||
|
|
c6416b235b | ||
|
|
79f2b3bd7e | ||
|
|
1f996afa21 | ||
|
|
21335d65c4 | ||
|
|
870ce57005 | ||
|
|
0d3caa2183 | ||
|
|
602d7e8d18 | ||
|
|
a2309f3119 | ||
|
|
9a51be8548 | ||
|
|
5ed319ea42 | ||
|
|
eb7a70a3c9 | ||
|
|
b61f418bf2 | ||
|
|
1338b0a2f8 | ||
|
|
587e1a4f4c | ||
|
|
85d5449b3c | ||
|
|
532b576fd5 | ||
|
|
dd1197236d | ||
|
|
e8a9abaf40 | ||
|
|
1bf07d6b58 | ||
|
|
30ea9cb37e | ||
|
|
bae9247d84 | ||
|
|
a105ad1391 | ||
|
|
abbb40abd2 | ||
|
|
20b23338f7 | ||
|
|
0fcbee6478 | ||
|
|
1d602b9efa | ||
|
|
b783169c72 | ||
|
|
e4f266883a | ||
|
|
7a41362d90 | ||
|
|
3ed783983f | ||
|
|
837f3a68ab | ||
|
|
59e45c5c68 | ||
|
|
94761d0472 | ||
|
|
a91eb701bf | ||
|
|
1a1f118f1a | ||
|
|
b115fca8a9 | ||
|
|
dfd239ac06 | ||
|
|
2ae218d069 | ||
|
|
1401d2ee9b | ||
|
|
f39e105101 | ||
|
|
482795bab0 | ||
|
|
10e8861f14 | ||
|
|
ecc6476308 | ||
|
|
28e347002a | ||
|
|
b3d19bd5cc | ||
|
|
647165ab89 | ||
|
|
6807d9bd4c | ||
|
|
699ecc7140 | ||
|
|
b374ec9355 | ||
|
|
9659d021cb | ||
|
|
a4ad9eb134 | ||
|
|
a455258a62 | ||
|
|
0ae342673a | ||
|
|
33d75a264d | ||
|
|
89dc5bb951 | ||
|
|
45403917de | ||
|
|
ed476271a6 | ||
|
|
1e92c47960 | ||
|
|
4f2fe07ae4 | ||
|
|
aff3cd01c5 | ||
|
|
ac74ee1468 | ||
|
|
0d55cf4be5 | ||
|
|
5399a25532 | ||
|
|
ae882c93c9 | ||
|
|
f398ca77c1 | ||
|
|
dcd7d278aa | ||
|
|
89f5f3bf9a | ||
|
|
76ef42ee07 | ||
|
|
92c1ce57a6 | ||
|
|
116f232f5a | ||
|
|
ef81a36654 | ||
|
|
9fd2724d73 | ||
|
|
07d49b61d0 | ||
|
|
0c4e6ae7de | ||
|
|
65ec129dfb | ||
|
|
3e4d628f54 | ||
|
|
71684bfa45 | ||
|
|
e73b7e0398 | ||
|
|
35ebdc76ff | ||
|
|
90d0896848 | ||
|
|
5528db9693 | ||
|
|
aa78fbb09d | ||
|
|
d53b193e09 | ||
|
|
e0e16c371f | ||
|
|
53cd877899 | ||
|
|
1207223f3d | ||
|
|
39be6932b5 | ||
|
|
44a194d226 | ||
|
|
9349eb77cd | ||
|
|
c64549471a | ||
|
|
264bcbc78c | ||
|
|
f76fe1ac7a | ||
|
|
6364c4ff3f | ||
|
|
292a50de04 | ||
|
|
a08cba9c85 | ||
|
|
9fb60d6935 | ||
|
|
f2ed2d98d8 | ||
|
|
b802cb1e36 | ||
|
|
31bfd8c039 | ||
|
|
f72ee7a69e | ||
|
|
a98d75edaa | ||
|
|
622568c327 | ||
|
|
1ca7e47fd6 | ||
|
|
116203aef8 | ||
|
|
1bf128612d | ||
|
|
935a8eb9a7 | ||
|
|
7e5b350096 | ||
|
|
5c75869e85 | ||
|
|
fbd5ddea72 | ||
|
|
bc8e033eb5 | ||
|
|
70fa5ffa06 | ||
|
|
fb162ff529 | ||
|
|
9d535b13cf | ||
|
|
48bfc2d9ed | ||
|
|
5064f846fc | ||
|
|
41c228cb56 | ||
|
|
e974c8f33e | ||
|
|
94f5d5b59e | ||
|
|
433a0cb9cc | ||
|
|
c42a6b511c | ||
|
|
346b6f4f11 | ||
|
|
07eb4020bd | ||
|
|
c2f0c2225a | ||
|
|
711587492c | ||
|
|
536613f008 | ||
|
|
c832fc7d1b | ||
|
|
0b654581b6 | ||
|
|
cbae64fc06 | ||
|
|
1443a1388e | ||
|
|
a203d99182 | ||
|
|
bc5ff6e1b6 | ||
|
|
3b3c0c5950 | ||
|
|
ee0ee0e39d | ||
|
|
8dee10ba9c | ||
|
|
d3915b8dbf | ||
|
|
671d5cace6 | ||
|
|
aa3d73d322 | ||
|
|
d30a286f38 | ||
|
|
15699a39cf | ||
|
|
a1f32095df | ||
|
|
76e0960a51 | ||
|
|
8e33288156 | ||
|
|
f4d3a9980f | ||
|
|
fc8ce7c6e0 | ||
|
|
aaf0604601 | ||
|
|
dddf563105 |
9
.gitignore
vendored
9
.gitignore
vendored
@@ -1,7 +1,12 @@
|
||||
syncthing
|
||||
syncthing.exe
|
||||
stcli
|
||||
stcli.exe
|
||||
*.tar.gz
|
||||
*.zip
|
||||
*.asc
|
||||
*.sublime*
|
||||
.jshintrc
|
||||
coverage.out
|
||||
files/pidx
|
||||
bin
|
||||
perfstats*.csv
|
||||
coverage.xml
|
||||
|
||||
@@ -1,7 +1,43 @@
|
||||
## Reporting Bugs
|
||||
|
||||
Please file bugs in the [Github Issue
|
||||
Tracker](https://github.com/syncthing/syncthing/issues). Include at
|
||||
least the following:
|
||||
|
||||
- What happened
|
||||
|
||||
- What did you expect to happen instead of what *did* happen, if it's
|
||||
not crazy obvious
|
||||
|
||||
- What operating system, operating system version and version of
|
||||
Syncthing you are running
|
||||
|
||||
- The same for other connected nodes, where relevant
|
||||
|
||||
- Screenshot if the issue concerns something visible in the GUI
|
||||
|
||||
- Console log entries, where possible and relevant
|
||||
|
||||
If you're not sure whether something is relevant, erring on the side of
|
||||
too much information will never get you yelled at. :)
|
||||
|
||||
## Contributing Translations
|
||||
|
||||
All translations are done via
|
||||
[Transifex](https://www.transifex.com/projects/p/syncthing/). If you
|
||||
wish to contribute to a translation, just head over there and sign up.
|
||||
Before every release, the language resources are updated from the
|
||||
latest info on Transifex.
|
||||
|
||||
## Contributing Code
|
||||
|
||||
Please do contribute! If you want to contribute but are unsure where to
|
||||
start, the [Contributions Needed
|
||||
page](https://github.com/calmh/syncthing/wiki/Contributions-Needed)
|
||||
lists areas in need of attention.
|
||||
topic](http://discourse.syncthing.net/t/49) lists areas in need of
|
||||
attention. In general, any open issues are fair game! Be prepared for a
|
||||
[certain amount of
|
||||
review](https://discourse.syncthing.net/t/733); it's all in the name of
|
||||
quality. :)
|
||||
|
||||
## Licensing
|
||||
|
||||
@@ -11,11 +47,13 @@ Commons Attribution 4.0 International License. You retain the copyright
|
||||
to code you have written.
|
||||
|
||||
When accepting your first contribution, the maintainer of the project
|
||||
will ensure that you are added to the CONTRIBUTORS file.
|
||||
will ensure that you are added to the CONTRIBUTORS file. You are welcome
|
||||
to add yourself as a separate commit in your first pull request.
|
||||
|
||||
## Building
|
||||
|
||||
[See the wiki](https://github.com/calmh/syncthing/wiki/Building)
|
||||
[See the documentation](http://discourse.syncthing.net/t/44) on how to
|
||||
get started with a build environment.
|
||||
|
||||
## Branches
|
||||
|
||||
@@ -42,13 +80,14 @@ Yes please!
|
||||
|
||||
## Style
|
||||
|
||||
`go fmt`
|
||||
- `go fmt`
|
||||
|
||||
- Unix line breaks
|
||||
|
||||
## Documentation
|
||||
|
||||
[Hack it here](https://github.com/calmh/syncthing/wiki)
|
||||
[Over here!](http://discourse.syncthing.net/category/documentation)
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
||||
|
||||
|
||||
14
CONTRIBUTORS
14
CONTRIBUTORS
@@ -1,4 +1,16 @@
|
||||
Aaron Bieber <qbit@deftly.net>
|
||||
Alexander Graf <register-github@alex-graf.de>
|
||||
Andrew Dunham <andrew@du.nham.ca>
|
||||
Audrius Butkevicius <audrius.butkevicius@gmail.com>
|
||||
Arthur Axel fREW Schmidt <frew@afoolishmanifesto.com> <frioux@gmail.com>
|
||||
Ben Sidhom <bsidhom@gmail.com>
|
||||
Brandon Philips <brandon@ifup.org>
|
||||
James Patterson <jamespatterson@operamail.com>
|
||||
Gilli Sigurdsson <gilli@vx.is>
|
||||
James Patterson <jamespatterson@operamail.com> <jpjp@users.noreply.github.com>
|
||||
Jens Diemer <github.com@jensdiemer.de> <git@jensdiemer.de>
|
||||
Marcin Dziadus <dziadus.marcin@gmail.com>
|
||||
Michael Tilli <pyfisch@gmail.com>
|
||||
Philippe Schommers <philippe@schommers.be>
|
||||
Ryan Sullivan <kayoticsully@gmail.com>
|
||||
Tully Robinson <tully@tojr.org>
|
||||
Veeti Paananen <veeti.paananen@rojekti.fi>
|
||||
|
||||
63
Godeps/Godeps.json
generated
63
Godeps/Godeps.json
generated
@@ -1,32 +1,67 @@
|
||||
{
|
||||
"ImportPath": "github.com/calmh/syncthing",
|
||||
"GoVersion": "go1.2.1",
|
||||
"ImportPath": "github.com/syncthing/syncthing",
|
||||
"GoVersion": "go1.3.1",
|
||||
"Packages": [
|
||||
"./cmd/syncthing"
|
||||
"./cmd/..."
|
||||
],
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "bitbucket.org/kardianos/osext",
|
||||
"Comment": "null-13",
|
||||
"Rev": "5d3ddcf53a508cc2f7404eaebf546ef2cb5cdb6e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "code.google.com/p/go.crypto/bcrypt",
|
||||
"Comment": "null-216",
|
||||
"Rev": "41cd4647fccc72b0b79ef1bd1fe6735e718257cd"
|
||||
},
|
||||
{
|
||||
"ImportPath": "code.google.com/p/go.crypto/blowfish",
|
||||
"Comment": "null-216",
|
||||
"Rev": "41cd4647fccc72b0b79ef1bd1fe6735e718257cd"
|
||||
},
|
||||
{
|
||||
"ImportPath": "code.google.com/p/go.text/transform",
|
||||
"Comment": "null-81",
|
||||
"Rev": "9cbe983aed9b0dfc73954433fead5e00866342ac"
|
||||
"Comment": "null-90",
|
||||
"Rev": "d65bffbc88a153d23a6d2a864531e6e7c2cde59b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "code.google.com/p/go.text/unicode/norm",
|
||||
"Comment": "null-81",
|
||||
"Rev": "9cbe983aed9b0dfc73954433fead5e00866342ac"
|
||||
"Comment": "null-90",
|
||||
"Rev": "d65bffbc88a153d23a6d2a864531e6e7c2cde59b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/calmh/ini",
|
||||
"Rev": "386c4240a9684d91d9ec4d93651909b49c7269e1"
|
||||
"ImportPath": "code.google.com/p/snappy-go/snappy",
|
||||
"Comment": "null-15",
|
||||
"Rev": "12e4b4183793ac4b061921e7980845e750679fd0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/codegangsta/inject",
|
||||
"Rev": "9aea7a2fa5b79ef7fc00f63a575e72df33b4e886"
|
||||
"ImportPath": "github.com/bkaradzic/go-lz4",
|
||||
"Rev": "77e2ba877bde9da31213bec75dbbe197fa507c21"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/codegangsta/martini",
|
||||
"Comment": "v0.1-142-g8659df7",
|
||||
"Rev": "8659df7a51aebe6c6120268cd5a8b4c34fa8441a"
|
||||
"ImportPath": "github.com/calmh/xdr",
|
||||
"Rev": "a597b63b87d6140f79084c8aab214b4d533833a1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/juju/ratelimit",
|
||||
"Rev": "f9f36d11773655c0485207f0ad30dc2655f69d56"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
|
||||
"Rev": "457e6f75905f7c1316afd8c43ad323f4c32b31c2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vitrun/qart/coding",
|
||||
"Rev": "ccb109cf25f0cd24474da73b9fee4e7a3e8a8ce0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vitrun/qart/gf256",
|
||||
"Rev": "ccb109cf25f0cd24474da73b9fee4e7a3e8a8ce0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vitrun/qart/qr",
|
||||
"Rev": "ccb109cf25f0cd24474da73b9fee4e7a3e8a8ce0"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
20
Godeps/_workspace/src/bitbucket.org/kardianos/osext/LICENSE
generated
vendored
Normal file
20
Godeps/_workspace/src/bitbucket.org/kardianos/osext/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
Copyright (c) 2012 Daniel Theophanes
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
|
||||
3. This notice may not be removed or altered from any source
|
||||
distribution.
|
||||
32
Godeps/_workspace/src/bitbucket.org/kardianos/osext/osext.go
generated
vendored
Normal file
32
Godeps/_workspace/src/bitbucket.org/kardianos/osext/osext.go
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Extensions to the standard "os" package.
|
||||
package osext
|
||||
|
||||
import "path/filepath"
|
||||
|
||||
// Executable returns an absolute path that can be used to
|
||||
// re-invoke the current program.
|
||||
// It may not be valid after the current program exits.
|
||||
func Executable() (string, error) {
|
||||
p, err := executable()
|
||||
return filepath.Clean(p), err
|
||||
}
|
||||
|
||||
// Returns same path as Executable, returns just the folder
|
||||
// path. Excludes the executable name.
|
||||
func ExecutableFolder() (string, error) {
|
||||
p, err := Executable()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
folder, _ := filepath.Split(p)
|
||||
return folder, nil
|
||||
}
|
||||
|
||||
// Depricated. Same as Executable().
|
||||
func GetExePath() (exePath string, err error) {
|
||||
return Executable()
|
||||
}
|
||||
20
Godeps/_workspace/src/bitbucket.org/kardianos/osext/osext_plan9.go
generated
vendored
Normal file
20
Godeps/_workspace/src/bitbucket.org/kardianos/osext/osext_plan9.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package osext
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"os"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func executable() (string, error) {
|
||||
f, err := os.Open("/proc/" + strconv.Itoa(os.Getpid()) + "/text")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
return syscall.Fd2path(int(f.Fd()))
|
||||
}
|
||||
25
Godeps/_workspace/src/bitbucket.org/kardianos/osext/osext_procfs.go
generated
vendored
Normal file
25
Godeps/_workspace/src/bitbucket.org/kardianos/osext/osext_procfs.go
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux netbsd openbsd
|
||||
|
||||
package osext
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
func executable() (string, error) {
|
||||
switch runtime.GOOS {
|
||||
case "linux":
|
||||
return os.Readlink("/proc/self/exe")
|
||||
case "netbsd":
|
||||
return os.Readlink("/proc/curproc/exe")
|
||||
case "openbsd":
|
||||
return os.Readlink("/proc/curproc/file")
|
||||
}
|
||||
return "", errors.New("ExecPath not implemented for " + runtime.GOOS)
|
||||
}
|
||||
79
Godeps/_workspace/src/bitbucket.org/kardianos/osext/osext_sysctl.go
generated
vendored
Normal file
79
Godeps/_workspace/src/bitbucket.org/kardianos/osext/osext_sysctl.go
generated
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin freebsd
|
||||
|
||||
package osext
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var initCwd, initCwdErr = os.Getwd()
|
||||
|
||||
func executable() (string, error) {
|
||||
var mib [4]int32
|
||||
switch runtime.GOOS {
|
||||
case "freebsd":
|
||||
mib = [4]int32{1 /* CTL_KERN */, 14 /* KERN_PROC */, 12 /* KERN_PROC_PATHNAME */, -1}
|
||||
case "darwin":
|
||||
mib = [4]int32{1 /* CTL_KERN */, 38 /* KERN_PROCARGS */, int32(os.Getpid()), -1}
|
||||
}
|
||||
|
||||
n := uintptr(0)
|
||||
// Get length.
|
||||
_, _, errNum := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, 0, uintptr(unsafe.Pointer(&n)), 0, 0)
|
||||
if errNum != 0 {
|
||||
return "", errNum
|
||||
}
|
||||
if n == 0 { // This shouldn't happen.
|
||||
return "", nil
|
||||
}
|
||||
buf := make([]byte, n)
|
||||
_, _, errNum = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&n)), 0, 0)
|
||||
if errNum != 0 {
|
||||
return "", errNum
|
||||
}
|
||||
if n == 0 { // This shouldn't happen.
|
||||
return "", nil
|
||||
}
|
||||
for i, v := range buf {
|
||||
if v == 0 {
|
||||
buf = buf[:i]
|
||||
break
|
||||
}
|
||||
}
|
||||
var err error
|
||||
execPath := string(buf)
|
||||
// execPath will not be empty due to above checks.
|
||||
// Try to get the absolute path if the execPath is not rooted.
|
||||
if execPath[0] != '/' {
|
||||
execPath, err = getAbs(execPath)
|
||||
if err != nil {
|
||||
return execPath, err
|
||||
}
|
||||
}
|
||||
// For darwin KERN_PROCARGS may return the path to a symlink rather than the
|
||||
// actual executable.
|
||||
if runtime.GOOS == "darwin" {
|
||||
if execPath, err = filepath.EvalSymlinks(execPath); err != nil {
|
||||
return execPath, err
|
||||
}
|
||||
}
|
||||
return execPath, nil
|
||||
}
|
||||
|
||||
func getAbs(execPath string) (string, error) {
|
||||
if initCwdErr != nil {
|
||||
return execPath, initCwdErr
|
||||
}
|
||||
// The execPath may begin with a "../" or a "./" so clean it first.
|
||||
// Join the two paths, trailing and starting slashes undetermined, so use
|
||||
// the generic Join function.
|
||||
return filepath.Join(initCwd, filepath.Clean(execPath)), nil
|
||||
}
|
||||
79
Godeps/_workspace/src/bitbucket.org/kardianos/osext/osext_test.go
generated
vendored
Normal file
79
Godeps/_workspace/src/bitbucket.org/kardianos/osext/osext_test.go
generated
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin linux freebsd netbsd windows
|
||||
|
||||
package osext
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
oexec "os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const execPath_EnvVar = "OSTEST_OUTPUT_EXECPATH"
|
||||
|
||||
func TestExecPath(t *testing.T) {
|
||||
ep, err := Executable()
|
||||
if err != nil {
|
||||
t.Fatalf("ExecPath failed: %v", err)
|
||||
}
|
||||
// we want fn to be of the form "dir/prog"
|
||||
dir := filepath.Dir(filepath.Dir(ep))
|
||||
fn, err := filepath.Rel(dir, ep)
|
||||
if err != nil {
|
||||
t.Fatalf("filepath.Rel: %v", err)
|
||||
}
|
||||
cmd := &oexec.Cmd{}
|
||||
// make child start with a relative program path
|
||||
cmd.Dir = dir
|
||||
cmd.Path = fn
|
||||
// forge argv[0] for child, so that we can verify we could correctly
|
||||
// get real path of the executable without influenced by argv[0].
|
||||
cmd.Args = []string{"-", "-test.run=XXXX"}
|
||||
cmd.Env = []string{fmt.Sprintf("%s=1", execPath_EnvVar)}
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Fatalf("exec(self) failed: %v", err)
|
||||
}
|
||||
outs := string(out)
|
||||
if !filepath.IsAbs(outs) {
|
||||
t.Fatalf("Child returned %q, want an absolute path", out)
|
||||
}
|
||||
if !sameFile(outs, ep) {
|
||||
t.Fatalf("Child returned %q, not the same file as %q", out, ep)
|
||||
}
|
||||
}
|
||||
|
||||
func sameFile(fn1, fn2 string) bool {
|
||||
fi1, err := os.Stat(fn1)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
fi2, err := os.Stat(fn2)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return os.SameFile(fi1, fi2)
|
||||
}
|
||||
|
||||
func init() {
|
||||
if e := os.Getenv(execPath_EnvVar); e != "" {
|
||||
// first chdir to another path
|
||||
dir := "/"
|
||||
if runtime.GOOS == "windows" {
|
||||
dir = filepath.VolumeName(".")
|
||||
}
|
||||
os.Chdir(dir)
|
||||
if ep, err := Executable(); err != nil {
|
||||
fmt.Fprint(os.Stderr, "ERROR: ", err)
|
||||
} else {
|
||||
fmt.Fprint(os.Stderr, ep)
|
||||
}
|
||||
os.Exit(0)
|
||||
}
|
||||
}
|
||||
34
Godeps/_workspace/src/bitbucket.org/kardianos/osext/osext_windows.go
generated
vendored
Normal file
34
Godeps/_workspace/src/bitbucket.org/kardianos/osext/osext_windows.go
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package osext
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unicode/utf16"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var (
|
||||
kernel = syscall.MustLoadDLL("kernel32.dll")
|
||||
getModuleFileNameProc = kernel.MustFindProc("GetModuleFileNameW")
|
||||
)
|
||||
|
||||
// GetModuleFileName() with hModule = NULL
|
||||
func executable() (exePath string, err error) {
|
||||
return getModuleFileName()
|
||||
}
|
||||
|
||||
func getModuleFileName() (string, error) {
|
||||
var n uint32
|
||||
b := make([]uint16, syscall.MAX_PATH)
|
||||
size := uint32(len(b))
|
||||
|
||||
r0, _, e1 := getModuleFileNameProc.Call(0, uintptr(unsafe.Pointer(&b[0])), uintptr(size))
|
||||
n = uint32(r0)
|
||||
if n == 0 {
|
||||
return "", e1
|
||||
}
|
||||
return string(utf16.Decode(b[0:n])), nil
|
||||
}
|
||||
35
Godeps/_workspace/src/code.google.com/p/go.crypto/bcrypt/base64.go
generated
vendored
Normal file
35
Godeps/_workspace/src/code.google.com/p/go.crypto/bcrypt/base64.go
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bcrypt
|
||||
|
||||
import "encoding/base64"
|
||||
|
||||
const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
|
||||
|
||||
var bcEncoding = base64.NewEncoding(alphabet)
|
||||
|
||||
func base64Encode(src []byte) []byte {
|
||||
n := bcEncoding.EncodedLen(len(src))
|
||||
dst := make([]byte, n)
|
||||
bcEncoding.Encode(dst, src)
|
||||
for dst[n-1] == '=' {
|
||||
n--
|
||||
}
|
||||
return dst[:n]
|
||||
}
|
||||
|
||||
func base64Decode(src []byte) ([]byte, error) {
|
||||
numOfEquals := 4 - (len(src) % 4)
|
||||
for i := 0; i < numOfEquals; i++ {
|
||||
src = append(src, '=')
|
||||
}
|
||||
|
||||
dst := make([]byte, bcEncoding.DecodedLen(len(src)))
|
||||
n, err := bcEncoding.Decode(dst, src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dst[:n], nil
|
||||
}
|
||||
294
Godeps/_workspace/src/code.google.com/p/go.crypto/bcrypt/bcrypt.go
generated
vendored
Normal file
294
Godeps/_workspace/src/code.google.com/p/go.crypto/bcrypt/bcrypt.go
generated
vendored
Normal file
@@ -0,0 +1,294 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing
|
||||
// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf
|
||||
package bcrypt
|
||||
|
||||
// The code is a port of Provos and Mazières's C implementation.
|
||||
import (
|
||||
"code.google.com/p/go.crypto/blowfish"
|
||||
"crypto/rand"
|
||||
"crypto/subtle"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const (
|
||||
MinCost int = 4 // the minimum allowable cost as passed in to GenerateFromPassword
|
||||
MaxCost int = 31 // the maximum allowable cost as passed in to GenerateFromPassword
|
||||
DefaultCost int = 10 // the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword
|
||||
)
|
||||
|
||||
// The error returned from CompareHashAndPassword when a password and hash do
|
||||
// not match.
|
||||
var ErrMismatchedHashAndPassword = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password")
|
||||
|
||||
// The error returned from CompareHashAndPassword when a hash is too short to
|
||||
// be a bcrypt hash.
|
||||
var ErrHashTooShort = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password")
|
||||
|
||||
// The error returned from CompareHashAndPassword when a hash was created with
|
||||
// a bcrypt algorithm newer than this implementation.
|
||||
type HashVersionTooNewError byte
|
||||
|
||||
func (hv HashVersionTooNewError) Error() string {
|
||||
return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion)
|
||||
}
|
||||
|
||||
// The error returned from CompareHashAndPassword when a hash starts with something other than '$'
|
||||
type InvalidHashPrefixError byte
|
||||
|
||||
func (ih InvalidHashPrefixError) Error() string {
|
||||
return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih))
|
||||
}
|
||||
|
||||
type InvalidCostError int
|
||||
|
||||
func (ic InvalidCostError) Error() string {
|
||||
return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), int(MinCost), int(MaxCost))
|
||||
}
|
||||
|
||||
const (
|
||||
majorVersion = '2'
|
||||
minorVersion = 'a'
|
||||
maxSaltSize = 16
|
||||
maxCryptedHashSize = 23
|
||||
encodedSaltSize = 22
|
||||
encodedHashSize = 31
|
||||
minHashSize = 59
|
||||
)
|
||||
|
||||
// magicCipherData is an IV for the 64 Blowfish encryption calls in
|
||||
// bcrypt(). It's the string "OrpheanBeholderScryDoubt" in big-endian bytes.
|
||||
var magicCipherData = []byte{
|
||||
0x4f, 0x72, 0x70, 0x68,
|
||||
0x65, 0x61, 0x6e, 0x42,
|
||||
0x65, 0x68, 0x6f, 0x6c,
|
||||
0x64, 0x65, 0x72, 0x53,
|
||||
0x63, 0x72, 0x79, 0x44,
|
||||
0x6f, 0x75, 0x62, 0x74,
|
||||
}
|
||||
|
||||
type hashed struct {
|
||||
hash []byte
|
||||
salt []byte
|
||||
cost int // allowed range is MinCost to MaxCost
|
||||
major byte
|
||||
minor byte
|
||||
}
|
||||
|
||||
// GenerateFromPassword returns the bcrypt hash of the password at the given
|
||||
// cost. If the cost given is less than MinCost, the cost will be set to
|
||||
// DefaultCost, instead. Use CompareHashAndPassword, as defined in this package,
|
||||
// to compare the returned hashed password with its cleartext version.
|
||||
func GenerateFromPassword(password []byte, cost int) ([]byte, error) {
|
||||
p, err := newFromPassword(password, cost)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p.Hash(), nil
|
||||
}
|
||||
|
||||
// CompareHashAndPassword compares a bcrypt hashed password with its possible
|
||||
// plaintext equivalent. Returns nil on success, or an error on failure.
|
||||
func CompareHashAndPassword(hashedPassword, password []byte) error {
|
||||
p, err := newFromHash(hashedPassword)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
otherHash, err := bcrypt(password, p.cost, p.salt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
otherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor}
|
||||
if subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return ErrMismatchedHashAndPassword
|
||||
}
|
||||
|
||||
// Cost returns the hashing cost used to create the given hashed
|
||||
// password. When, in the future, the hashing cost of a password system needs
|
||||
// to be increased in order to adjust for greater computational power, this
|
||||
// function allows one to establish which passwords need to be updated.
|
||||
func Cost(hashedPassword []byte) (int, error) {
|
||||
p, err := newFromHash(hashedPassword)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return p.cost, nil
|
||||
}
|
||||
|
||||
func newFromPassword(password []byte, cost int) (*hashed, error) {
|
||||
if cost < MinCost {
|
||||
cost = DefaultCost
|
||||
}
|
||||
p := new(hashed)
|
||||
p.major = majorVersion
|
||||
p.minor = minorVersion
|
||||
|
||||
err := checkCost(cost)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.cost = cost
|
||||
|
||||
unencodedSalt := make([]byte, maxSaltSize)
|
||||
_, err = io.ReadFull(rand.Reader, unencodedSalt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p.salt = base64Encode(unencodedSalt)
|
||||
hash, err := bcrypt(password, p.cost, p.salt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.hash = hash
|
||||
return p, err
|
||||
}
|
||||
|
||||
func newFromHash(hashedSecret []byte) (*hashed, error) {
|
||||
if len(hashedSecret) < minHashSize {
|
||||
return nil, ErrHashTooShort
|
||||
}
|
||||
p := new(hashed)
|
||||
n, err := p.decodeVersion(hashedSecret)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hashedSecret = hashedSecret[n:]
|
||||
n, err = p.decodeCost(hashedSecret)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hashedSecret = hashedSecret[n:]
|
||||
|
||||
// The "+2" is here because we'll have to append at most 2 '=' to the salt
|
||||
// when base64 decoding it in expensiveBlowfishSetup().
|
||||
p.salt = make([]byte, encodedSaltSize, encodedSaltSize+2)
|
||||
copy(p.salt, hashedSecret[:encodedSaltSize])
|
||||
|
||||
hashedSecret = hashedSecret[encodedSaltSize:]
|
||||
p.hash = make([]byte, len(hashedSecret))
|
||||
copy(p.hash, hashedSecret)
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) {
|
||||
cipherData := make([]byte, len(magicCipherData))
|
||||
copy(cipherData, magicCipherData)
|
||||
|
||||
c, err := expensiveBlowfishSetup(password, uint32(cost), salt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i := 0; i < 24; i += 8 {
|
||||
for j := 0; j < 64; j++ {
|
||||
c.Encrypt(cipherData[i:i+8], cipherData[i:i+8])
|
||||
}
|
||||
}
|
||||
|
||||
// Bug compatibility with C bcrypt implementations. We only encode 23 of
|
||||
// the 24 bytes encrypted.
|
||||
hsh := base64Encode(cipherData[:maxCryptedHashSize])
|
||||
return hsh, nil
|
||||
}
|
||||
|
||||
func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) {
|
||||
|
||||
csalt, err := base64Decode(salt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Bug compatibility with C bcrypt implementations. They use the trailing
|
||||
// NULL in the key string during expansion.
|
||||
ckey := append(key, 0)
|
||||
|
||||
c, err := blowfish.NewSaltedCipher(ckey, csalt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var i, rounds uint64
|
||||
rounds = 1 << cost
|
||||
for i = 0; i < rounds; i++ {
|
||||
blowfish.ExpandKey(ckey, c)
|
||||
blowfish.ExpandKey(csalt, c)
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (p *hashed) Hash() []byte {
|
||||
arr := make([]byte, 60)
|
||||
arr[0] = '$'
|
||||
arr[1] = p.major
|
||||
n := 2
|
||||
if p.minor != 0 {
|
||||
arr[2] = p.minor
|
||||
n = 3
|
||||
}
|
||||
arr[n] = '$'
|
||||
n += 1
|
||||
copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost)))
|
||||
n += 2
|
||||
arr[n] = '$'
|
||||
n += 1
|
||||
copy(arr[n:], p.salt)
|
||||
n += encodedSaltSize
|
||||
copy(arr[n:], p.hash)
|
||||
n += encodedHashSize
|
||||
return arr[:n]
|
||||
}
|
||||
|
||||
func (p *hashed) decodeVersion(sbytes []byte) (int, error) {
|
||||
if sbytes[0] != '$' {
|
||||
return -1, InvalidHashPrefixError(sbytes[0])
|
||||
}
|
||||
if sbytes[1] > majorVersion {
|
||||
return -1, HashVersionTooNewError(sbytes[1])
|
||||
}
|
||||
p.major = sbytes[1]
|
||||
n := 3
|
||||
if sbytes[2] != '$' {
|
||||
p.minor = sbytes[2]
|
||||
n++
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// sbytes should begin where decodeVersion left off.
|
||||
func (p *hashed) decodeCost(sbytes []byte) (int, error) {
|
||||
cost, err := strconv.Atoi(string(sbytes[0:2]))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
err = checkCost(cost)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
p.cost = cost
|
||||
return 3, nil
|
||||
}
|
||||
|
||||
func (p *hashed) String() string {
|
||||
return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor)
|
||||
}
|
||||
|
||||
func checkCost(cost int) error {
|
||||
if cost < MinCost || cost > MaxCost {
|
||||
return InvalidCostError(cost)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
226
Godeps/_workspace/src/code.google.com/p/go.crypto/bcrypt/bcrypt_test.go
generated
vendored
Normal file
226
Godeps/_workspace/src/code.google.com/p/go.crypto/bcrypt/bcrypt_test.go
generated
vendored
Normal file
@@ -0,0 +1,226 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bcrypt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBcryptingIsEasy(t *testing.T) {
|
||||
pass := []byte("mypassword")
|
||||
hp, err := GenerateFromPassword(pass, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("GenerateFromPassword error: %s", err)
|
||||
}
|
||||
|
||||
if CompareHashAndPassword(hp, pass) != nil {
|
||||
t.Errorf("%v should hash %s correctly", hp, pass)
|
||||
}
|
||||
|
||||
notPass := "notthepass"
|
||||
err = CompareHashAndPassword(hp, []byte(notPass))
|
||||
if err != ErrMismatchedHashAndPassword {
|
||||
t.Errorf("%v and %s should be mismatched", hp, notPass)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBcryptingIsCorrect(t *testing.T) {
|
||||
pass := []byte("allmine")
|
||||
salt := []byte("XajjQvNhvvRt5GSeFk1xFe")
|
||||
expectedHash := []byte("$2a$10$XajjQvNhvvRt5GSeFk1xFeyqRrsxkhBkUiQeg0dt.wU1qD4aFDcga")
|
||||
|
||||
hash, err := bcrypt(pass, 10, salt)
|
||||
if err != nil {
|
||||
t.Fatalf("bcrypt blew up: %v", err)
|
||||
}
|
||||
if !bytes.HasSuffix(expectedHash, hash) {
|
||||
t.Errorf("%v should be the suffix of %v", hash, expectedHash)
|
||||
}
|
||||
|
||||
h, err := newFromHash(expectedHash)
|
||||
if err != nil {
|
||||
t.Errorf("Unable to parse %s: %v", string(expectedHash), err)
|
||||
}
|
||||
|
||||
// This is not the safe way to compare these hashes. We do this only for
|
||||
// testing clarity. Use bcrypt.CompareHashAndPassword()
|
||||
if err == nil && !bytes.Equal(expectedHash, h.Hash()) {
|
||||
t.Errorf("Parsed hash %v should equal %v", h.Hash(), expectedHash)
|
||||
}
|
||||
}
|
||||
|
||||
func TestVeryShortPasswords(t *testing.T) {
|
||||
key := []byte("k")
|
||||
salt := []byte("XajjQvNhvvRt5GSeFk1xFe")
|
||||
_, err := bcrypt(key, 10, salt)
|
||||
if err != nil {
|
||||
t.Errorf("One byte key resulted in error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTooLongPasswordsWork(t *testing.T) {
|
||||
salt := []byte("XajjQvNhvvRt5GSeFk1xFe")
|
||||
// One byte over the usual 56 byte limit that blowfish has
|
||||
tooLongPass := []byte("012345678901234567890123456789012345678901234567890123456")
|
||||
tooLongExpected := []byte("$2a$10$XajjQvNhvvRt5GSeFk1xFe5l47dONXg781AmZtd869sO8zfsHuw7C")
|
||||
hash, err := bcrypt(tooLongPass, 10, salt)
|
||||
if err != nil {
|
||||
t.Fatalf("bcrypt blew up on long password: %v", err)
|
||||
}
|
||||
if !bytes.HasSuffix(tooLongExpected, hash) {
|
||||
t.Errorf("%v should be the suffix of %v", hash, tooLongExpected)
|
||||
}
|
||||
}
|
||||
|
||||
type InvalidHashTest struct {
|
||||
err error
|
||||
hash []byte
|
||||
}
|
||||
|
||||
var invalidTests = []InvalidHashTest{
|
||||
{ErrHashTooShort, []byte("$2a$10$fooo")},
|
||||
{ErrHashTooShort, []byte("$2a")},
|
||||
{HashVersionTooNewError('3'), []byte("$3a$10$sssssssssssssssssssssshhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh")},
|
||||
{InvalidHashPrefixError('%'), []byte("%2a$10$sssssssssssssssssssssshhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh")},
|
||||
{InvalidCostError(32), []byte("$2a$32$sssssssssssssssssssssshhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh")},
|
||||
}
|
||||
|
||||
func TestInvalidHashErrors(t *testing.T) {
|
||||
check := func(name string, expected, err error) {
|
||||
if err == nil {
|
||||
t.Errorf("%s: Should have returned an error", name)
|
||||
}
|
||||
if err != nil && err != expected {
|
||||
t.Errorf("%s gave err %v but should have given %v", name, err, expected)
|
||||
}
|
||||
}
|
||||
for _, iht := range invalidTests {
|
||||
_, err := newFromHash(iht.hash)
|
||||
check("newFromHash", iht.err, err)
|
||||
err = CompareHashAndPassword(iht.hash, []byte("anything"))
|
||||
check("CompareHashAndPassword", iht.err, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnpaddedBase64Encoding(t *testing.T) {
|
||||
original := []byte{101, 201, 101, 75, 19, 227, 199, 20, 239, 236, 133, 32, 30, 109, 243, 30}
|
||||
encodedOriginal := []byte("XajjQvNhvvRt5GSeFk1xFe")
|
||||
|
||||
encoded := base64Encode(original)
|
||||
|
||||
if !bytes.Equal(encodedOriginal, encoded) {
|
||||
t.Errorf("Encoded %v should have equaled %v", encoded, encodedOriginal)
|
||||
}
|
||||
|
||||
decoded, err := base64Decode(encodedOriginal)
|
||||
if err != nil {
|
||||
t.Fatalf("base64Decode blew up: %s", err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(decoded, original) {
|
||||
t.Errorf("Decoded %v should have equaled %v", decoded, original)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCost(t *testing.T) {
|
||||
suffix := "XajjQvNhvvRt5GSeFk1xFe5l47dONXg781AmZtd869sO8zfsHuw7C"
|
||||
for _, vers := range []string{"2a", "2"} {
|
||||
for _, cost := range []int{4, 10} {
|
||||
s := fmt.Sprintf("$%s$%02d$%s", vers, cost, suffix)
|
||||
h := []byte(s)
|
||||
actual, err := Cost(h)
|
||||
if err != nil {
|
||||
t.Errorf("Cost, error: %s", err)
|
||||
continue
|
||||
}
|
||||
if actual != cost {
|
||||
t.Errorf("Cost, expected: %d, actual: %d", cost, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
_, err := Cost([]byte("$a$a$" + suffix))
|
||||
if err == nil {
|
||||
t.Errorf("Cost, malformed but no error returned")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCostValidationInHash(t *testing.T) {
|
||||
if testing.Short() {
|
||||
return
|
||||
}
|
||||
|
||||
pass := []byte("mypassword")
|
||||
|
||||
for c := 0; c < MinCost; c++ {
|
||||
p, _ := newFromPassword(pass, c)
|
||||
if p.cost != DefaultCost {
|
||||
t.Errorf("newFromPassword should default costs below %d to %d, but was %d", MinCost, DefaultCost, p.cost)
|
||||
}
|
||||
}
|
||||
|
||||
p, _ := newFromPassword(pass, 14)
|
||||
if p.cost != 14 {
|
||||
t.Errorf("newFromPassword should default cost to 14, but was %d", p.cost)
|
||||
}
|
||||
|
||||
hp, _ := newFromHash(p.Hash())
|
||||
if p.cost != hp.cost {
|
||||
t.Errorf("newFromHash should maintain the cost at %d, but was %d", p.cost, hp.cost)
|
||||
}
|
||||
|
||||
_, err := newFromPassword(pass, 32)
|
||||
if err == nil {
|
||||
t.Fatalf("newFromPassword: should return a cost error")
|
||||
}
|
||||
if err != InvalidCostError(32) {
|
||||
t.Errorf("newFromPassword: should return cost error, got %#v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCostReturnsWithLeadingZeroes(t *testing.T) {
|
||||
hp, _ := newFromPassword([]byte("abcdefgh"), 7)
|
||||
cost := hp.Hash()[4:7]
|
||||
expected := []byte("07$")
|
||||
|
||||
if !bytes.Equal(expected, cost) {
|
||||
t.Errorf("single digit costs in hash should have leading zeros: was %v instead of %v", cost, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMinorNotRequired(t *testing.T) {
|
||||
noMinorHash := []byte("$2$10$XajjQvNhvvRt5GSeFk1xFeyqRrsxkhBkUiQeg0dt.wU1qD4aFDcga")
|
||||
h, err := newFromHash(noMinorHash)
|
||||
if err != nil {
|
||||
t.Fatalf("No minor hash blew up: %s", err)
|
||||
}
|
||||
if h.minor != 0 {
|
||||
t.Errorf("Should leave minor version at 0, but was %d", h.minor)
|
||||
}
|
||||
|
||||
if !bytes.Equal(noMinorHash, h.Hash()) {
|
||||
t.Errorf("Should generate hash %v, but created %v", noMinorHash, h.Hash())
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEqual(b *testing.B) {
|
||||
b.StopTimer()
|
||||
passwd := []byte("somepasswordyoulike")
|
||||
hash, _ := GenerateFromPassword(passwd, 10)
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
CompareHashAndPassword(hash, passwd)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkGeneration(b *testing.B) {
|
||||
b.StopTimer()
|
||||
passwd := []byte("mylongpassword1234")
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
GenerateFromPassword(passwd, 10)
|
||||
}
|
||||
}
|
||||
159
Godeps/_workspace/src/code.google.com/p/go.crypto/blowfish/block.go
generated
vendored
Normal file
159
Godeps/_workspace/src/code.google.com/p/go.crypto/blowfish/block.go
generated
vendored
Normal file
@@ -0,0 +1,159 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blowfish
|
||||
|
||||
// getNextWord returns the next big-endian uint32 value from the byte slice
|
||||
// at the given position in a circular manner, updating the position.
|
||||
func getNextWord(b []byte, pos *int) uint32 {
|
||||
var w uint32
|
||||
j := *pos
|
||||
for i := 0; i < 4; i++ {
|
||||
w = w<<8 | uint32(b[j])
|
||||
j++
|
||||
if j >= len(b) {
|
||||
j = 0
|
||||
}
|
||||
}
|
||||
*pos = j
|
||||
return w
|
||||
}
|
||||
|
||||
// ExpandKey performs a key expansion on the given *Cipher. Specifically, it
|
||||
// performs the Blowfish algorithm's key schedule which sets up the *Cipher's
|
||||
// pi and substitution tables for calls to Encrypt. This is used, primarily,
|
||||
// by the bcrypt package to reuse the Blowfish key schedule during its
|
||||
// set up. It's unlikely that you need to use this directly.
|
||||
func ExpandKey(key []byte, c *Cipher) {
|
||||
j := 0
|
||||
for i := 0; i < 18; i++ {
|
||||
// Using inlined getNextWord for performance.
|
||||
var d uint32
|
||||
for k := 0; k < 4; k++ {
|
||||
d = d<<8 | uint32(key[j])
|
||||
j++
|
||||
if j >= len(key) {
|
||||
j = 0
|
||||
}
|
||||
}
|
||||
c.p[i] ^= d
|
||||
}
|
||||
|
||||
var l, r uint32
|
||||
for i := 0; i < 18; i += 2 {
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.p[i], c.p[i+1] = l, r
|
||||
}
|
||||
|
||||
for i := 0; i < 256; i += 2 {
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.s0[i], c.s0[i+1] = l, r
|
||||
}
|
||||
for i := 0; i < 256; i += 2 {
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.s1[i], c.s1[i+1] = l, r
|
||||
}
|
||||
for i := 0; i < 256; i += 2 {
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.s2[i], c.s2[i+1] = l, r
|
||||
}
|
||||
for i := 0; i < 256; i += 2 {
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.s3[i], c.s3[i+1] = l, r
|
||||
}
|
||||
}
|
||||
|
||||
// This is similar to ExpandKey, but folds the salt during the key
|
||||
// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero
|
||||
// salt passed in, reusing ExpandKey turns out to be a place of inefficiency
|
||||
// and specializing it here is useful.
|
||||
func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) {
|
||||
j := 0
|
||||
for i := 0; i < 18; i++ {
|
||||
c.p[i] ^= getNextWord(key, &j)
|
||||
}
|
||||
|
||||
j = 0
|
||||
var l, r uint32
|
||||
for i := 0; i < 18; i += 2 {
|
||||
l ^= getNextWord(salt, &j)
|
||||
r ^= getNextWord(salt, &j)
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.p[i], c.p[i+1] = l, r
|
||||
}
|
||||
|
||||
for i := 0; i < 256; i += 2 {
|
||||
l ^= getNextWord(salt, &j)
|
||||
r ^= getNextWord(salt, &j)
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.s0[i], c.s0[i+1] = l, r
|
||||
}
|
||||
|
||||
for i := 0; i < 256; i += 2 {
|
||||
l ^= getNextWord(salt, &j)
|
||||
r ^= getNextWord(salt, &j)
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.s1[i], c.s1[i+1] = l, r
|
||||
}
|
||||
|
||||
for i := 0; i < 256; i += 2 {
|
||||
l ^= getNextWord(salt, &j)
|
||||
r ^= getNextWord(salt, &j)
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.s2[i], c.s2[i+1] = l, r
|
||||
}
|
||||
|
||||
for i := 0; i < 256; i += 2 {
|
||||
l ^= getNextWord(salt, &j)
|
||||
r ^= getNextWord(salt, &j)
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.s3[i], c.s3[i+1] = l, r
|
||||
}
|
||||
}
|
||||
|
||||
func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) {
|
||||
xl, xr := l, r
|
||||
xl ^= c.p[0]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16]
|
||||
xr ^= c.p[17]
|
||||
return xr, xl
|
||||
}
|
||||
|
||||
func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) {
|
||||
xl, xr := l, r
|
||||
xl ^= c.p[17]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1]
|
||||
xr ^= c.p[0]
|
||||
return xr, xl
|
||||
}
|
||||
274
Godeps/_workspace/src/code.google.com/p/go.crypto/blowfish/blowfish_test.go
generated
vendored
Normal file
274
Godeps/_workspace/src/code.google.com/p/go.crypto/blowfish/blowfish_test.go
generated
vendored
Normal file
@@ -0,0 +1,274 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blowfish
|
||||
|
||||
import "testing"
|
||||
|
||||
type CryptTest struct {
|
||||
key []byte
|
||||
in []byte
|
||||
out []byte
|
||||
}
|
||||
|
||||
// Test vector values are from http://www.schneier.com/code/vectors.txt.
|
||||
var encryptTests = []CryptTest{
|
||||
{
|
||||
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
[]byte{0x4E, 0xF9, 0x97, 0x45, 0x61, 0x98, 0xDD, 0x78}},
|
||||
{
|
||||
[]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
|
||||
[]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
|
||||
[]byte{0x51, 0x86, 0x6F, 0xD5, 0xB8, 0x5E, 0xCB, 0x8A}},
|
||||
{
|
||||
[]byte{0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
[]byte{0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
|
||||
[]byte{0x7D, 0x85, 0x6F, 0x9A, 0x61, 0x30, 0x63, 0xF2}},
|
||||
{
|
||||
[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11},
|
||||
[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11},
|
||||
[]byte{0x24, 0x66, 0xDD, 0x87, 0x8B, 0x96, 0x3C, 0x9D}},
|
||||
|
||||
{
|
||||
[]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
|
||||
[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11},
|
||||
[]byte{0x61, 0xF9, 0xC3, 0x80, 0x22, 0x81, 0xB0, 0x96}},
|
||||
{
|
||||
[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11},
|
||||
[]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
|
||||
[]byte{0x7D, 0x0C, 0xC6, 0x30, 0xAF, 0xDA, 0x1E, 0xC7}},
|
||||
{
|
||||
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
[]byte{0x4E, 0xF9, 0x97, 0x45, 0x61, 0x98, 0xDD, 0x78}},
|
||||
{
|
||||
[]byte{0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10},
|
||||
[]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
|
||||
[]byte{0x0A, 0xCE, 0xAB, 0x0F, 0xC6, 0xA0, 0xA2, 0x8D}},
|
||||
{
|
||||
[]byte{0x7C, 0xA1, 0x10, 0x45, 0x4A, 0x1A, 0x6E, 0x57},
|
||||
[]byte{0x01, 0xA1, 0xD6, 0xD0, 0x39, 0x77, 0x67, 0x42},
|
||||
[]byte{0x59, 0xC6, 0x82, 0x45, 0xEB, 0x05, 0x28, 0x2B}},
|
||||
{
|
||||
[]byte{0x01, 0x31, 0xD9, 0x61, 0x9D, 0xC1, 0x37, 0x6E},
|
||||
[]byte{0x5C, 0xD5, 0x4C, 0xA8, 0x3D, 0xEF, 0x57, 0xDA},
|
||||
[]byte{0xB1, 0xB8, 0xCC, 0x0B, 0x25, 0x0F, 0x09, 0xA0}},
|
||||
{
|
||||
[]byte{0x07, 0xA1, 0x13, 0x3E, 0x4A, 0x0B, 0x26, 0x86},
|
||||
[]byte{0x02, 0x48, 0xD4, 0x38, 0x06, 0xF6, 0x71, 0x72},
|
||||
[]byte{0x17, 0x30, 0xE5, 0x77, 0x8B, 0xEA, 0x1D, 0xA4}},
|
||||
{
|
||||
[]byte{0x38, 0x49, 0x67, 0x4C, 0x26, 0x02, 0x31, 0x9E},
|
||||
[]byte{0x51, 0x45, 0x4B, 0x58, 0x2D, 0xDF, 0x44, 0x0A},
|
||||
[]byte{0xA2, 0x5E, 0x78, 0x56, 0xCF, 0x26, 0x51, 0xEB}},
|
||||
{
|
||||
[]byte{0x04, 0xB9, 0x15, 0xBA, 0x43, 0xFE, 0xB5, 0xB6},
|
||||
[]byte{0x42, 0xFD, 0x44, 0x30, 0x59, 0x57, 0x7F, 0xA2},
|
||||
[]byte{0x35, 0x38, 0x82, 0xB1, 0x09, 0xCE, 0x8F, 0x1A}},
|
||||
{
|
||||
[]byte{0x01, 0x13, 0xB9, 0x70, 0xFD, 0x34, 0xF2, 0xCE},
|
||||
[]byte{0x05, 0x9B, 0x5E, 0x08, 0x51, 0xCF, 0x14, 0x3A},
|
||||
[]byte{0x48, 0xF4, 0xD0, 0x88, 0x4C, 0x37, 0x99, 0x18}},
|
||||
{
|
||||
[]byte{0x01, 0x70, 0xF1, 0x75, 0x46, 0x8F, 0xB5, 0xE6},
|
||||
[]byte{0x07, 0x56, 0xD8, 0xE0, 0x77, 0x47, 0x61, 0xD2},
|
||||
[]byte{0x43, 0x21, 0x93, 0xB7, 0x89, 0x51, 0xFC, 0x98}},
|
||||
{
|
||||
[]byte{0x43, 0x29, 0x7F, 0xAD, 0x38, 0xE3, 0x73, 0xFE},
|
||||
[]byte{0x76, 0x25, 0x14, 0xB8, 0x29, 0xBF, 0x48, 0x6A},
|
||||
[]byte{0x13, 0xF0, 0x41, 0x54, 0xD6, 0x9D, 0x1A, 0xE5}},
|
||||
{
|
||||
[]byte{0x07, 0xA7, 0x13, 0x70, 0x45, 0xDA, 0x2A, 0x16},
|
||||
[]byte{0x3B, 0xDD, 0x11, 0x90, 0x49, 0x37, 0x28, 0x02},
|
||||
[]byte{0x2E, 0xED, 0xDA, 0x93, 0xFF, 0xD3, 0x9C, 0x79}},
|
||||
{
|
||||
[]byte{0x04, 0x68, 0x91, 0x04, 0xC2, 0xFD, 0x3B, 0x2F},
|
||||
[]byte{0x26, 0x95, 0x5F, 0x68, 0x35, 0xAF, 0x60, 0x9A},
|
||||
[]byte{0xD8, 0x87, 0xE0, 0x39, 0x3C, 0x2D, 0xA6, 0xE3}},
|
||||
{
|
||||
[]byte{0x37, 0xD0, 0x6B, 0xB5, 0x16, 0xCB, 0x75, 0x46},
|
||||
[]byte{0x16, 0x4D, 0x5E, 0x40, 0x4F, 0x27, 0x52, 0x32},
|
||||
[]byte{0x5F, 0x99, 0xD0, 0x4F, 0x5B, 0x16, 0x39, 0x69}},
|
||||
{
|
||||
[]byte{0x1F, 0x08, 0x26, 0x0D, 0x1A, 0xC2, 0x46, 0x5E},
|
||||
[]byte{0x6B, 0x05, 0x6E, 0x18, 0x75, 0x9F, 0x5C, 0xCA},
|
||||
[]byte{0x4A, 0x05, 0x7A, 0x3B, 0x24, 0xD3, 0x97, 0x7B}},
|
||||
{
|
||||
[]byte{0x58, 0x40, 0x23, 0x64, 0x1A, 0xBA, 0x61, 0x76},
|
||||
[]byte{0x00, 0x4B, 0xD6, 0xEF, 0x09, 0x17, 0x60, 0x62},
|
||||
[]byte{0x45, 0x20, 0x31, 0xC1, 0xE4, 0xFA, 0xDA, 0x8E}},
|
||||
{
|
||||
[]byte{0x02, 0x58, 0x16, 0x16, 0x46, 0x29, 0xB0, 0x07},
|
||||
[]byte{0x48, 0x0D, 0x39, 0x00, 0x6E, 0xE7, 0x62, 0xF2},
|
||||
[]byte{0x75, 0x55, 0xAE, 0x39, 0xF5, 0x9B, 0x87, 0xBD}},
|
||||
{
|
||||
[]byte{0x49, 0x79, 0x3E, 0xBC, 0x79, 0xB3, 0x25, 0x8F},
|
||||
[]byte{0x43, 0x75, 0x40, 0xC8, 0x69, 0x8F, 0x3C, 0xFA},
|
||||
[]byte{0x53, 0xC5, 0x5F, 0x9C, 0xB4, 0x9F, 0xC0, 0x19}},
|
||||
{
|
||||
[]byte{0x4F, 0xB0, 0x5E, 0x15, 0x15, 0xAB, 0x73, 0xA7},
|
||||
[]byte{0x07, 0x2D, 0x43, 0xA0, 0x77, 0x07, 0x52, 0x92},
|
||||
[]byte{0x7A, 0x8E, 0x7B, 0xFA, 0x93, 0x7E, 0x89, 0xA3}},
|
||||
{
|
||||
[]byte{0x49, 0xE9, 0x5D, 0x6D, 0x4C, 0xA2, 0x29, 0xBF},
|
||||
[]byte{0x02, 0xFE, 0x55, 0x77, 0x81, 0x17, 0xF1, 0x2A},
|
||||
[]byte{0xCF, 0x9C, 0x5D, 0x7A, 0x49, 0x86, 0xAD, 0xB5}},
|
||||
{
|
||||
[]byte{0x01, 0x83, 0x10, 0xDC, 0x40, 0x9B, 0x26, 0xD6},
|
||||
[]byte{0x1D, 0x9D, 0x5C, 0x50, 0x18, 0xF7, 0x28, 0xC2},
|
||||
[]byte{0xD1, 0xAB, 0xB2, 0x90, 0x65, 0x8B, 0xC7, 0x78}},
|
||||
{
|
||||
[]byte{0x1C, 0x58, 0x7F, 0x1C, 0x13, 0x92, 0x4F, 0xEF},
|
||||
[]byte{0x30, 0x55, 0x32, 0x28, 0x6D, 0x6F, 0x29, 0x5A},
|
||||
[]byte{0x55, 0xCB, 0x37, 0x74, 0xD1, 0x3E, 0xF2, 0x01}},
|
||||
{
|
||||
[]byte{0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01},
|
||||
[]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
|
||||
[]byte{0xFA, 0x34, 0xEC, 0x48, 0x47, 0xB2, 0x68, 0xB2}},
|
||||
{
|
||||
[]byte{0x1F, 0x1F, 0x1F, 0x1F, 0x0E, 0x0E, 0x0E, 0x0E},
|
||||
[]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
|
||||
[]byte{0xA7, 0x90, 0x79, 0x51, 0x08, 0xEA, 0x3C, 0xAE}},
|
||||
{
|
||||
[]byte{0xE0, 0xFE, 0xE0, 0xFE, 0xF1, 0xFE, 0xF1, 0xFE},
|
||||
[]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
|
||||
[]byte{0xC3, 0x9E, 0x07, 0x2D, 0x9F, 0xAC, 0x63, 0x1D}},
|
||||
{
|
||||
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
[]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
|
||||
[]byte{0x01, 0x49, 0x33, 0xE0, 0xCD, 0xAF, 0xF6, 0xE4}},
|
||||
{
|
||||
[]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
|
||||
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
[]byte{0xF2, 0x1E, 0x9A, 0x77, 0xB7, 0x1C, 0x49, 0xBC}},
|
||||
{
|
||||
[]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
|
||||
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
[]byte{0x24, 0x59, 0x46, 0x88, 0x57, 0x54, 0x36, 0x9A}},
|
||||
{
|
||||
[]byte{0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10},
|
||||
[]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
|
||||
[]byte{0x6B, 0x5C, 0x5A, 0x9C, 0x5D, 0x9E, 0x0A, 0x5A}},
|
||||
}
|
||||
|
||||
func TestCipherEncrypt(t *testing.T) {
|
||||
for i, tt := range encryptTests {
|
||||
c, err := NewCipher(tt.key)
|
||||
if err != nil {
|
||||
t.Errorf("NewCipher(%d bytes) = %s", len(tt.key), err)
|
||||
continue
|
||||
}
|
||||
ct := make([]byte, len(tt.out))
|
||||
c.Encrypt(ct, tt.in)
|
||||
for j, v := range ct {
|
||||
if v != tt.out[j] {
|
||||
t.Errorf("Cipher.Encrypt, test vector #%d: cipher-text[%d] = %#x, expected %#x", i, j, v, tt.out[j])
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCipherDecrypt(t *testing.T) {
|
||||
for i, tt := range encryptTests {
|
||||
c, err := NewCipher(tt.key)
|
||||
if err != nil {
|
||||
t.Errorf("NewCipher(%d bytes) = %s", len(tt.key), err)
|
||||
continue
|
||||
}
|
||||
pt := make([]byte, len(tt.in))
|
||||
c.Decrypt(pt, tt.out)
|
||||
for j, v := range pt {
|
||||
if v != tt.in[j] {
|
||||
t.Errorf("Cipher.Decrypt, test vector #%d: plain-text[%d] = %#x, expected %#x", i, j, v, tt.in[j])
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSaltedCipherKeyLength(t *testing.T) {
|
||||
if _, err := NewSaltedCipher(nil, []byte{'a'}); err != KeySizeError(0) {
|
||||
t.Errorf("NewSaltedCipher with short key, gave error %#v, expected %#v", err, KeySizeError(0))
|
||||
}
|
||||
|
||||
// A 57-byte key. One over the typical blowfish restriction.
|
||||
key := []byte("012345678901234567890123456789012345678901234567890123456")
|
||||
if _, err := NewSaltedCipher(key, []byte{'a'}); err != nil {
|
||||
t.Errorf("NewSaltedCipher with long key, gave error %#v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Test vectors generated with Blowfish from OpenSSH.
|
||||
var saltedVectors = [][8]byte{
|
||||
{0x0c, 0x82, 0x3b, 0x7b, 0x8d, 0x01, 0x4b, 0x7e},
|
||||
{0xd1, 0xe1, 0x93, 0xf0, 0x70, 0xa6, 0xdb, 0x12},
|
||||
{0xfc, 0x5e, 0xba, 0xde, 0xcb, 0xf8, 0x59, 0xad},
|
||||
{0x8a, 0x0c, 0x76, 0xe7, 0xdd, 0x2c, 0xd3, 0xa8},
|
||||
{0x2c, 0xcb, 0x7b, 0xee, 0xac, 0x7b, 0x7f, 0xf8},
|
||||
{0xbb, 0xf6, 0x30, 0x6f, 0xe1, 0x5d, 0x62, 0xbf},
|
||||
{0x97, 0x1e, 0xc1, 0x3d, 0x3d, 0xe0, 0x11, 0xe9},
|
||||
{0x06, 0xd7, 0x4d, 0xb1, 0x80, 0xa3, 0xb1, 0x38},
|
||||
{0x67, 0xa1, 0xa9, 0x75, 0x0e, 0x5b, 0xc6, 0xb4},
|
||||
{0x51, 0x0f, 0x33, 0x0e, 0x4f, 0x67, 0xd2, 0x0c},
|
||||
{0xf1, 0x73, 0x7e, 0xd8, 0x44, 0xea, 0xdb, 0xe5},
|
||||
{0x14, 0x0e, 0x16, 0xce, 0x7f, 0x4a, 0x9c, 0x7b},
|
||||
{0x4b, 0xfe, 0x43, 0xfd, 0xbf, 0x36, 0x04, 0x47},
|
||||
{0xb1, 0xeb, 0x3e, 0x15, 0x36, 0xa7, 0xbb, 0xe2},
|
||||
{0x6d, 0x0b, 0x41, 0xdd, 0x00, 0x98, 0x0b, 0x19},
|
||||
{0xd3, 0xce, 0x45, 0xce, 0x1d, 0x56, 0xb7, 0xfc},
|
||||
{0xd9, 0xf0, 0xfd, 0xda, 0xc0, 0x23, 0xb7, 0x93},
|
||||
{0x4c, 0x6f, 0xa1, 0xe4, 0x0c, 0xa8, 0xca, 0x57},
|
||||
{0xe6, 0x2f, 0x28, 0xa7, 0x0c, 0x94, 0x0d, 0x08},
|
||||
{0x8f, 0xe3, 0xf0, 0xb6, 0x29, 0xe3, 0x44, 0x03},
|
||||
{0xff, 0x98, 0xdd, 0x04, 0x45, 0xb4, 0x6d, 0x1f},
|
||||
{0x9e, 0x45, 0x4d, 0x18, 0x40, 0x53, 0xdb, 0xef},
|
||||
{0xb7, 0x3b, 0xef, 0x29, 0xbe, 0xa8, 0x13, 0x71},
|
||||
{0x02, 0x54, 0x55, 0x41, 0x8e, 0x04, 0xfc, 0xad},
|
||||
{0x6a, 0x0a, 0xee, 0x7c, 0x10, 0xd9, 0x19, 0xfe},
|
||||
{0x0a, 0x22, 0xd9, 0x41, 0xcc, 0x23, 0x87, 0x13},
|
||||
{0x6e, 0xff, 0x1f, 0xff, 0x36, 0x17, 0x9c, 0xbe},
|
||||
{0x79, 0xad, 0xb7, 0x40, 0xf4, 0x9f, 0x51, 0xa6},
|
||||
{0x97, 0x81, 0x99, 0xa4, 0xde, 0x9e, 0x9f, 0xb6},
|
||||
{0x12, 0x19, 0x7a, 0x28, 0xd0, 0xdc, 0xcc, 0x92},
|
||||
{0x81, 0xda, 0x60, 0x1e, 0x0e, 0xdd, 0x65, 0x56},
|
||||
{0x7d, 0x76, 0x20, 0xb2, 0x73, 0xc9, 0x9e, 0xee},
|
||||
}
|
||||
|
||||
func TestSaltedCipher(t *testing.T) {
|
||||
var key, salt [32]byte
|
||||
for i := range key {
|
||||
key[i] = byte(i)
|
||||
salt[i] = byte(i + 32)
|
||||
}
|
||||
for i, v := range saltedVectors {
|
||||
c, err := NewSaltedCipher(key[:], salt[:i])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var buf [8]byte
|
||||
c.Encrypt(buf[:], buf[:])
|
||||
if v != buf {
|
||||
t.Errorf("%d: expected %x, got %x", i, v, buf)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkExpandKeyWithSalt(b *testing.B) {
|
||||
key := make([]byte, 32)
|
||||
salt := make([]byte, 16)
|
||||
c, _ := NewCipher(key)
|
||||
for i := 0; i < b.N; i++ {
|
||||
expandKeyWithSalt(key, salt, c)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkExpandKey(b *testing.B) {
|
||||
key := make([]byte, 32)
|
||||
c, _ := NewCipher(key)
|
||||
for i := 0; i < b.N; i++ {
|
||||
ExpandKey(key, c)
|
||||
}
|
||||
}
|
||||
91
Godeps/_workspace/src/code.google.com/p/go.crypto/blowfish/cipher.go
generated
vendored
Normal file
91
Godeps/_workspace/src/code.google.com/p/go.crypto/blowfish/cipher.go
generated
vendored
Normal file
@@ -0,0 +1,91 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm.
|
||||
package blowfish
|
||||
|
||||
// The code is a port of Bruce Schneier's C implementation.
|
||||
// See http://www.schneier.com/blowfish.html.
|
||||
|
||||
import "strconv"
|
||||
|
||||
// The Blowfish block size in bytes.
|
||||
const BlockSize = 8
|
||||
|
||||
// A Cipher is an instance of Blowfish encryption using a particular key.
|
||||
type Cipher struct {
|
||||
p [18]uint32
|
||||
s0, s1, s2, s3 [256]uint32
|
||||
}
|
||||
|
||||
type KeySizeError int
|
||||
|
||||
func (k KeySizeError) Error() string {
|
||||
return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k))
|
||||
}
|
||||
|
||||
// NewCipher creates and returns a Cipher.
|
||||
// The key argument should be the Blowfish key, from 1 to 56 bytes.
|
||||
func NewCipher(key []byte) (*Cipher, error) {
|
||||
var result Cipher
|
||||
if k := len(key); k < 1 || k > 56 {
|
||||
return nil, KeySizeError(k)
|
||||
}
|
||||
initCipher(&result)
|
||||
ExpandKey(key, &result)
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// NewSaltedCipher creates a returns a Cipher that folds a salt into its key
|
||||
// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is
|
||||
// sufficient and desirable. For bcrypt compatiblity, the key can be over 56
|
||||
// bytes.
|
||||
func NewSaltedCipher(key, salt []byte) (*Cipher, error) {
|
||||
if len(salt) == 0 {
|
||||
return NewCipher(key)
|
||||
}
|
||||
var result Cipher
|
||||
if k := len(key); k < 1 {
|
||||
return nil, KeySizeError(k)
|
||||
}
|
||||
initCipher(&result)
|
||||
expandKeyWithSalt(key, salt, &result)
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// BlockSize returns the Blowfish block size, 8 bytes.
|
||||
// It is necessary to satisfy the Block interface in the
|
||||
// package "crypto/cipher".
|
||||
func (c *Cipher) BlockSize() int { return BlockSize }
|
||||
|
||||
// Encrypt encrypts the 8-byte buffer src using the key k
|
||||
// and stores the result in dst.
|
||||
// Note that for amounts of data larger than a block,
|
||||
// it is not safe to just call Encrypt on successive blocks;
|
||||
// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go).
|
||||
func (c *Cipher) Encrypt(dst, src []byte) {
|
||||
l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
|
||||
r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
|
||||
l, r = encryptBlock(l, r, c)
|
||||
dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l)
|
||||
dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r)
|
||||
}
|
||||
|
||||
// Decrypt decrypts the 8-byte buffer src using the key k
|
||||
// and stores the result in dst.
|
||||
func (c *Cipher) Decrypt(dst, src []byte) {
|
||||
l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
|
||||
r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
|
||||
l, r = decryptBlock(l, r, c)
|
||||
dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l)
|
||||
dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r)
|
||||
}
|
||||
|
||||
func initCipher(c *Cipher) {
|
||||
copy(c.p[0:], p[0:])
|
||||
copy(c.s0[0:], s0[0:])
|
||||
copy(c.s1[0:], s1[0:])
|
||||
copy(c.s2[0:], s2[0:])
|
||||
copy(c.s3[0:], s3[0:])
|
||||
}
|
||||
199
Godeps/_workspace/src/code.google.com/p/go.crypto/blowfish/const.go
generated
vendored
Normal file
199
Godeps/_workspace/src/code.google.com/p/go.crypto/blowfish/const.go
generated
vendored
Normal file
@@ -0,0 +1,199 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// The startup permutation array and substitution boxes.
|
||||
// They are the hexadecimal digits of PI; see:
|
||||
// http://www.schneier.com/code/constants.txt.
|
||||
|
||||
package blowfish
|
||||
|
||||
var s0 = [256]uint32{
|
||||
0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96,
|
||||
0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16,
|
||||
0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658,
|
||||
0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013,
|
||||
0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e,
|
||||
0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60,
|
||||
0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6,
|
||||
0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a,
|
||||
0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c,
|
||||
0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193,
|
||||
0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1,
|
||||
0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239,
|
||||
0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a,
|
||||
0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3,
|
||||
0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176,
|
||||
0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe,
|
||||
0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706,
|
||||
0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b,
|
||||
0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b,
|
||||
0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463,
|
||||
0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c,
|
||||
0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3,
|
||||
0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a,
|
||||
0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8,
|
||||
0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760,
|
||||
0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db,
|
||||
0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8,
|
||||
0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b,
|
||||
0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33,
|
||||
0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4,
|
||||
0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0,
|
||||
0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c,
|
||||
0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777,
|
||||
0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299,
|
||||
0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705,
|
||||
0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf,
|
||||
0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e,
|
||||
0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa,
|
||||
0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9,
|
||||
0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915,
|
||||
0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f,
|
||||
0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664,
|
||||
0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a,
|
||||
}
|
||||
|
||||
var s1 = [256]uint32{
|
||||
0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d,
|
||||
0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1,
|
||||
0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65,
|
||||
0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1,
|
||||
0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9,
|
||||
0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737,
|
||||
0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d,
|
||||
0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd,
|
||||
0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc,
|
||||
0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41,
|
||||
0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908,
|
||||
0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af,
|
||||
0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124,
|
||||
0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c,
|
||||
0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908,
|
||||
0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd,
|
||||
0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b,
|
||||
0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e,
|
||||
0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa,
|
||||
0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a,
|
||||
0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d,
|
||||
0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66,
|
||||
0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5,
|
||||
0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84,
|
||||
0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96,
|
||||
0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14,
|
||||
0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca,
|
||||
0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7,
|
||||
0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77,
|
||||
0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99,
|
||||
0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054,
|
||||
0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73,
|
||||
0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea,
|
||||
0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105,
|
||||
0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646,
|
||||
0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285,
|
||||
0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea,
|
||||
0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb,
|
||||
0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e,
|
||||
0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc,
|
||||
0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd,
|
||||
0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20,
|
||||
0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7,
|
||||
}
|
||||
|
||||
var s2 = [256]uint32{
|
||||
0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7,
|
||||
0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af,
|
||||
0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af,
|
||||
0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504,
|
||||
0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4,
|
||||
0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee,
|
||||
0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec,
|
||||
0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b,
|
||||
0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332,
|
||||
0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527,
|
||||
0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58,
|
||||
0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c,
|
||||
0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22,
|
||||
0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17,
|
||||
0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60,
|
||||
0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115,
|
||||
0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99,
|
||||
0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0,
|
||||
0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74,
|
||||
0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d,
|
||||
0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3,
|
||||
0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3,
|
||||
0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979,
|
||||
0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c,
|
||||
0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa,
|
||||
0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a,
|
||||
0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086,
|
||||
0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc,
|
||||
0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24,
|
||||
0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2,
|
||||
0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84,
|
||||
0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c,
|
||||
0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09,
|
||||
0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10,
|
||||
0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe,
|
||||
0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027,
|
||||
0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0,
|
||||
0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634,
|
||||
0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188,
|
||||
0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc,
|
||||
0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8,
|
||||
0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837,
|
||||
0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0,
|
||||
}
|
||||
|
||||
var s3 = [256]uint32{
|
||||
0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742,
|
||||
0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b,
|
||||
0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79,
|
||||
0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6,
|
||||
0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a,
|
||||
0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4,
|
||||
0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1,
|
||||
0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59,
|
||||
0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797,
|
||||
0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28,
|
||||
0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6,
|
||||
0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28,
|
||||
0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba,
|
||||
0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a,
|
||||
0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5,
|
||||
0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f,
|
||||
0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce,
|
||||
0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680,
|
||||
0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd,
|
||||
0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb,
|
||||
0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb,
|
||||
0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370,
|
||||
0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc,
|
||||
0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048,
|
||||
0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc,
|
||||
0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9,
|
||||
0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a,
|
||||
0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f,
|
||||
0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a,
|
||||
0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1,
|
||||
0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b,
|
||||
0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e,
|
||||
0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e,
|
||||
0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f,
|
||||
0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623,
|
||||
0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc,
|
||||
0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a,
|
||||
0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6,
|
||||
0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3,
|
||||
0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060,
|
||||
0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c,
|
||||
0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f,
|
||||
0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6,
|
||||
}
|
||||
|
||||
var p = [18]uint32{
|
||||
0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0,
|
||||
0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c,
|
||||
0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b,
|
||||
}
|
||||
146
Godeps/_workspace/src/code.google.com/p/go.text/transform/transform.go
generated
vendored
146
Godeps/_workspace/src/code.google.com/p/go.text/transform/transform.go
generated
vendored
@@ -9,6 +9,7 @@
|
||||
package transform
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"unicode/utf8"
|
||||
@@ -127,7 +128,7 @@ func (r *Reader) Read(p []byte) (int, error) {
|
||||
// cannot read more bytes into src.
|
||||
r.transformComplete = r.err != nil
|
||||
continue
|
||||
case err == ErrShortDst && r.dst1 != 0:
|
||||
case err == ErrShortDst && (r.dst1 != 0 || n != 0):
|
||||
// Make room in dst by copying out, and try again.
|
||||
continue
|
||||
case err == ErrShortSrc && r.src1-r.src0 != len(r.src) && r.err == nil:
|
||||
@@ -210,7 +211,7 @@ func (w *Writer) Write(data []byte) (n int, err error) {
|
||||
n += nSrc
|
||||
}
|
||||
switch {
|
||||
case err == ErrShortDst && nDst > 0:
|
||||
case err == ErrShortDst && (nDst > 0 || nSrc > 0):
|
||||
case err == ErrShortSrc && len(src) < len(w.src):
|
||||
m := copy(w.src, src)
|
||||
// If w.n > 0, bytes from data were already copied to w.src and n
|
||||
@@ -467,30 +468,125 @@ func (t removeF) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err err
|
||||
return
|
||||
}
|
||||
|
||||
// Bytes returns a new byte slice with the result of converting b using t.
|
||||
// If any unrecoverable error occurs it returns nil.
|
||||
func Bytes(t Transformer, b []byte) []byte {
|
||||
out := make([]byte, len(b))
|
||||
n := 0
|
||||
for {
|
||||
nDst, nSrc, err := t.Transform(out[n:], b, true)
|
||||
n += nDst
|
||||
if err == nil {
|
||||
return out[:n]
|
||||
} else if err != ErrShortDst {
|
||||
return nil
|
||||
}
|
||||
b = b[nSrc:]
|
||||
// grow returns a new []byte that is longer than b, and copies the first n bytes
|
||||
// of b to the start of the new slice.
|
||||
func grow(b []byte, n int) []byte {
|
||||
m := len(b)
|
||||
if m <= 256 {
|
||||
m *= 2
|
||||
} else {
|
||||
m += m >> 1
|
||||
}
|
||||
buf := make([]byte, m)
|
||||
copy(buf, b[:n])
|
||||
return buf
|
||||
}
|
||||
|
||||
// Grow the destination buffer.
|
||||
sz := len(out)
|
||||
if sz <= 256 {
|
||||
sz *= 2
|
||||
} else {
|
||||
sz += sz >> 1
|
||||
const initialBufSize = 128
|
||||
|
||||
// String returns a string with the result of converting s[:n] using t, where
|
||||
// n <= len(s). If err == nil, n will be len(s).
|
||||
func String(t Transformer, s string) (result string, n int, err error) {
|
||||
if s == "" {
|
||||
return "", 0, nil
|
||||
}
|
||||
|
||||
// Allocate only once. Note that both dst and src escape when passed to
|
||||
// Transform.
|
||||
buf := [2 * initialBufSize]byte{}
|
||||
dst := buf[:initialBufSize:initialBufSize]
|
||||
src := buf[initialBufSize : 2*initialBufSize]
|
||||
|
||||
// Avoid allocation if the transformed string is identical to the original.
|
||||
// After this loop, pDst will point to the furthest point in s for which it
|
||||
// could be detected that t gives equal results, src[:nSrc] will
|
||||
// indicated the last processed chunk of s for which the output is not equal
|
||||
// and dst[:nDst] will be the transform of this chunk.
|
||||
var nDst, nSrc int
|
||||
pDst := 0 // Used as index in both src and dst in this loop.
|
||||
for {
|
||||
n := copy(src, s[pDst:])
|
||||
nDst, nSrc, err = t.Transform(dst, src[:n], pDst+n == len(s))
|
||||
|
||||
// Note 1: we will not enter the loop with pDst == len(s) and we will
|
||||
// not end the loop with it either. So if nSrc is 0, this means there is
|
||||
// some kind of error from which we cannot recover given the current
|
||||
// buffer sizes. We will give up in this case.
|
||||
// Note 2: it is not entirely correct to simply do a bytes.Equal as
|
||||
// a Transformer may buffer internally. It will work in most cases,
|
||||
// though, and no harm is done if it doesn't work.
|
||||
// TODO: let transformers implement an optional Spanner interface, akin
|
||||
// to norm's QuickSpan. This would even allow us to avoid any allocation.
|
||||
if nSrc == 0 || !bytes.Equal(dst[:nDst], src[:nSrc]) {
|
||||
break
|
||||
}
|
||||
|
||||
if pDst += nDst; pDst == len(s) {
|
||||
return s, pDst, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Move the bytes seen so far to dst.
|
||||
pSrc := pDst + nSrc
|
||||
if pDst+nDst <= initialBufSize {
|
||||
copy(dst[pDst:], dst[:nDst])
|
||||
} else {
|
||||
b := make([]byte, len(s)+nDst-nSrc)
|
||||
copy(b[pDst:], dst[:nDst])
|
||||
dst = b
|
||||
}
|
||||
copy(dst, s[:pDst])
|
||||
pDst += nDst
|
||||
|
||||
if err != nil && err != ErrShortDst && err != ErrShortSrc {
|
||||
return string(dst[:pDst]), pSrc, err
|
||||
}
|
||||
|
||||
// Complete the string with the remainder.
|
||||
for {
|
||||
n := copy(src, s[pSrc:])
|
||||
nDst, nSrc, err = t.Transform(dst[pDst:], src[:n], pSrc+n == len(s))
|
||||
pDst += nDst
|
||||
pSrc += nSrc
|
||||
|
||||
switch err {
|
||||
case nil:
|
||||
if pSrc == len(s) {
|
||||
return string(dst[:pDst]), pSrc, nil
|
||||
}
|
||||
case ErrShortDst:
|
||||
// Do not grow as long as we can make progress. This may avoid
|
||||
// excessive allocations.
|
||||
if nDst == 0 {
|
||||
dst = grow(dst, pDst)
|
||||
}
|
||||
case ErrShortSrc:
|
||||
if nSrc == 0 {
|
||||
src = grow(src, 0)
|
||||
}
|
||||
default:
|
||||
return string(dst[:pDst]), pSrc, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Bytes returns a new byte slice with the result of converting b[:n] using t,
|
||||
// where n <= len(b). If err == nil, n will be len(b).
|
||||
func Bytes(t Transformer, b []byte) (result []byte, n int, err error) {
|
||||
dst := make([]byte, len(b))
|
||||
pDst, pSrc := 0, 0
|
||||
for {
|
||||
nDst, nSrc, err := t.Transform(dst[pDst:], b[pSrc:], true)
|
||||
pDst += nDst
|
||||
pSrc += nSrc
|
||||
if err != ErrShortDst {
|
||||
return dst[:pDst], pSrc, err
|
||||
}
|
||||
|
||||
// Grow the destination buffer, but do not grow as long as we can make
|
||||
// progress. This may avoid excessive allocations.
|
||||
if nDst == 0 {
|
||||
dst = grow(dst, pDst)
|
||||
}
|
||||
out2 := make([]byte, sz)
|
||||
copy(out2, out[:n])
|
||||
out = out2
|
||||
}
|
||||
}
|
||||
|
||||
200
Godeps/_workspace/src/code.google.com/p/go.text/transform/transform_test.go
generated
vendored
200
Godeps/_workspace/src/code.google.com/p/go.text/transform/transform_test.go
generated
vendored
@@ -12,6 +12,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
@@ -132,6 +133,43 @@ func (e rleEncode) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err e
|
||||
return nDst, nSrc, nil
|
||||
}
|
||||
|
||||
// trickler consumes all input bytes, but writes a single byte at a time to dst.
|
||||
type trickler []byte
|
||||
|
||||
func (t *trickler) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
*t = append(*t, src...)
|
||||
if len(*t) == 0 {
|
||||
return 0, 0, nil
|
||||
}
|
||||
if len(dst) == 0 {
|
||||
return 0, len(src), ErrShortDst
|
||||
}
|
||||
dst[0] = (*t)[0]
|
||||
*t = (*t)[1:]
|
||||
if len(*t) > 0 {
|
||||
err = ErrShortDst
|
||||
}
|
||||
return 1, len(src), err
|
||||
}
|
||||
|
||||
// delayedTrickler is like trickler, but delays writing output to dst. This is
|
||||
// highly unlikely to be relevant in practice, but it seems like a good idea
|
||||
// to have some tolerance as long as progress can be detected.
|
||||
type delayedTrickler []byte
|
||||
|
||||
func (t *delayedTrickler) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
if len(*t) > 0 && len(dst) > 0 {
|
||||
dst[0] = (*t)[0]
|
||||
*t = (*t)[1:]
|
||||
nDst = 1
|
||||
}
|
||||
*t = append(*t, src...)
|
||||
if len(*t) > 0 {
|
||||
err = ErrShortDst
|
||||
}
|
||||
return nDst, len(src), err
|
||||
}
|
||||
|
||||
type testCase struct {
|
||||
desc string
|
||||
t Transformer
|
||||
@@ -170,6 +208,15 @@ func (c chain) String() string {
|
||||
}
|
||||
|
||||
var testCases = []testCase{
|
||||
{
|
||||
desc: "empty",
|
||||
t: lowerCaseASCII{},
|
||||
src: "",
|
||||
dstSize: 100,
|
||||
srcSize: 100,
|
||||
wantStr: "",
|
||||
},
|
||||
|
||||
{
|
||||
desc: "basic",
|
||||
t: lowerCaseASCII{},
|
||||
@@ -378,6 +425,24 @@ var testCases = []testCase{
|
||||
ioSize: 10,
|
||||
wantStr: "4a6b2b4c4d1d",
|
||||
},
|
||||
|
||||
{
|
||||
desc: "trickler",
|
||||
t: &trickler{},
|
||||
src: "abcdefghijklm",
|
||||
dstSize: 3,
|
||||
srcSize: 15,
|
||||
wantStr: "abcdefghijklm",
|
||||
},
|
||||
|
||||
{
|
||||
desc: "delayedTrickler",
|
||||
t: &delayedTrickler{},
|
||||
src: "abcdefghijklm",
|
||||
dstSize: 3,
|
||||
srcSize: 15,
|
||||
wantStr: "abcdefghijklm",
|
||||
},
|
||||
}
|
||||
|
||||
func TestReader(t *testing.T) {
|
||||
@@ -685,7 +750,7 @@ func doTransform(tc testCase) (res string, iter int, err error) {
|
||||
switch {
|
||||
case err == nil && len(in) != 0:
|
||||
case err == ErrShortSrc && nSrc > 0:
|
||||
case err == ErrShortDst && nDst > 0:
|
||||
case err == ErrShortDst && (nDst > 0 || nSrc > 0):
|
||||
default:
|
||||
return string(out), iter, err
|
||||
}
|
||||
@@ -875,27 +940,136 @@ func TestRemoveFunc(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBytes(t *testing.T) {
|
||||
func testString(t *testing.T, f func(Transformer, string) (string, int, error)) {
|
||||
for _, tt := range append(testCases, chainTests()...) {
|
||||
if tt.desc == "allowStutter = true" {
|
||||
// We don't have control over the buffer size, so we eliminate tests
|
||||
// that depend on a specific buffer size being set.
|
||||
continue
|
||||
}
|
||||
got := Bytes(tt.t, []byte(tt.src))
|
||||
if tt.wantErr != nil {
|
||||
if tt.wantErr != ErrShortDst && tt.wantErr != ErrShortSrc {
|
||||
// Bytes should return nil for non-recoverable errors.
|
||||
if g, w := (got == nil), (tt.wantErr != nil); g != w {
|
||||
t.Errorf("%s:error: got %v; want %v", tt.desc, g, w)
|
||||
}
|
||||
}
|
||||
// The output strings in the tests that expect an error will
|
||||
// almost certainly not be the same as the result of Bytes.
|
||||
reset(tt.t)
|
||||
if tt.wantErr == ErrShortDst || tt.wantErr == ErrShortSrc {
|
||||
// The result string will be different.
|
||||
continue
|
||||
}
|
||||
if string(got) != tt.wantStr {
|
||||
got, n, err := f(tt.t, tt.src)
|
||||
if tt.wantErr != err {
|
||||
t.Errorf("%s:error: got %v; want %v", tt.desc, err, tt.wantErr)
|
||||
}
|
||||
if got, want := err == nil, n == len(tt.src); got != want {
|
||||
t.Errorf("%s:n: got %v; want %v", tt.desc, got, want)
|
||||
}
|
||||
if got != tt.wantStr {
|
||||
t.Errorf("%s:string: got %q; want %q", tt.desc, got, tt.wantStr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBytes(t *testing.T) {
|
||||
testString(t, func(z Transformer, s string) (string, int, error) {
|
||||
b, n, err := Bytes(z, []byte(s))
|
||||
return string(b), n, err
|
||||
})
|
||||
}
|
||||
|
||||
func TestString(t *testing.T) {
|
||||
testString(t, String)
|
||||
|
||||
// Overrun the internal destination buffer.
|
||||
for i, s := range []string{
|
||||
strings.Repeat("a", initialBufSize-1),
|
||||
strings.Repeat("a", initialBufSize+0),
|
||||
strings.Repeat("a", initialBufSize+1),
|
||||
strings.Repeat("A", initialBufSize-1),
|
||||
strings.Repeat("A", initialBufSize+0),
|
||||
strings.Repeat("A", initialBufSize+1),
|
||||
strings.Repeat("A", 2*initialBufSize-1),
|
||||
strings.Repeat("A", 2*initialBufSize+0),
|
||||
strings.Repeat("A", 2*initialBufSize+1),
|
||||
strings.Repeat("a", initialBufSize-2) + "A",
|
||||
strings.Repeat("a", initialBufSize-1) + "A",
|
||||
strings.Repeat("a", initialBufSize+0) + "A",
|
||||
strings.Repeat("a", initialBufSize+1) + "A",
|
||||
} {
|
||||
got, _, _ := String(lowerCaseASCII{}, s)
|
||||
if want := strings.ToLower(s); got != want {
|
||||
t.Errorf("%d:dst buffer test: got %s (%d); want %s (%d)", i, got, len(got), want, len(want))
|
||||
}
|
||||
}
|
||||
|
||||
// Overrun the internal source buffer.
|
||||
for i, s := range []string{
|
||||
strings.Repeat("a", initialBufSize-1),
|
||||
strings.Repeat("a", initialBufSize+0),
|
||||
strings.Repeat("a", initialBufSize+1),
|
||||
strings.Repeat("a", 2*initialBufSize+1),
|
||||
strings.Repeat("a", 2*initialBufSize+0),
|
||||
strings.Repeat("a", 2*initialBufSize+1),
|
||||
} {
|
||||
got, _, _ := String(rleEncode{}, s)
|
||||
if want := fmt.Sprintf("%da", len(s)); got != want {
|
||||
t.Errorf("%d:src buffer test: got %s (%d); want %s (%d)", i, got, len(got), want, len(want))
|
||||
}
|
||||
}
|
||||
|
||||
// Test allocations for non-changing strings.
|
||||
// Note we still need to allocate a single buffer.
|
||||
for i, s := range []string{
|
||||
"",
|
||||
"123",
|
||||
"123456789",
|
||||
strings.Repeat("a", initialBufSize),
|
||||
strings.Repeat("a", 10*initialBufSize),
|
||||
} {
|
||||
if n := testing.AllocsPerRun(5, func() { String(&lowerCaseASCII{}, s) }); n > 1 {
|
||||
t.Errorf("%d: #allocs was %f; want 1", i, n)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestBytesAllocation tests that buffer growth stays limited with the trickler
|
||||
// transformer, which behaves oddly but within spec. In case buffer growth is
|
||||
// not correctly handled, the test will either panic with a failed allocation or
|
||||
// thrash. To ensure the tests terminate under the last condition, we time out
|
||||
// after some sufficiently long period of time.
|
||||
func TestBytesAllocation(t *testing.T) {
|
||||
done := make(chan bool)
|
||||
go func() {
|
||||
in := bytes.Repeat([]byte{'a'}, 1000)
|
||||
tr := trickler(make([]byte, 1))
|
||||
Bytes(&tr, in)
|
||||
done <- true
|
||||
}()
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(3 * time.Second):
|
||||
t.Error("time out, likely due to excessive allocation")
|
||||
}
|
||||
}
|
||||
|
||||
// TestStringAllocation tests that buffer growth stays limited with the trickler
|
||||
// transformer, which behaves oddly but within spec. In case buffer growth is
|
||||
// not correctly handled, the test will either panic with a failed allocation or
|
||||
// thrash. To ensure the tests terminate under the last condition, we time out
|
||||
// after some sufficiently long period of time.
|
||||
func TestStringAllocation(t *testing.T) {
|
||||
done := make(chan bool)
|
||||
go func() {
|
||||
in := strings.Repeat("a", 1000)
|
||||
tr := trickler(make([]byte, 1))
|
||||
String(&tr, in)
|
||||
done <- true
|
||||
}()
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(3 * time.Second):
|
||||
t.Error("time out, likely due to excessive allocation")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkStringLower(b *testing.B) {
|
||||
in := strings.Repeat("a", 4096)
|
||||
for i := 0; i < b.N; i++ {
|
||||
String(&lowerCaseASCII{}, in)
|
||||
}
|
||||
}
|
||||
|
||||
212
Godeps/_workspace/src/code.google.com/p/go.text/unicode/norm/maketables.go
generated
vendored
212
Godeps/_workspace/src/code.google.com/p/go.text/unicode/norm/maketables.go
generated
vendored
@@ -11,7 +11,6 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
@@ -24,6 +23,8 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"code.google.com/p/go.text/internal/ucd"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -63,31 +64,7 @@ var localFiles = flag.Bool("local",
|
||||
|
||||
var logger = log.New(os.Stderr, "", log.Lshortfile)
|
||||
|
||||
// UnicodeData.txt has form:
|
||||
// 0037;DIGIT SEVEN;Nd;0;EN;;7;7;7;N;;;;;
|
||||
// 007A;LATIN SMALL LETTER Z;Ll;0;L;;;;;N;;;005A;;005A
|
||||
// See http://unicode.org/reports/tr44/ for full explanation
|
||||
// The fields:
|
||||
const (
|
||||
FCodePoint = iota
|
||||
FName
|
||||
FGeneralCategory
|
||||
FCanonicalCombiningClass
|
||||
FBidiClass
|
||||
FDecompMapping
|
||||
FDecimalValue
|
||||
FDigitValue
|
||||
FNumericValue
|
||||
FBidiMirrored
|
||||
FUnicode1Name
|
||||
FISOComment
|
||||
FSimpleUppercaseMapping
|
||||
FSimpleLowercaseMapping
|
||||
FSimpleTitlecaseMapping
|
||||
NumField
|
||||
|
||||
MaxChar = 0x10FFFF // anything above this shouldn't exist
|
||||
)
|
||||
const MaxChar = 0x10FFFF // anything above this shouldn't exist
|
||||
|
||||
// Quick Check properties of runes allow us to quickly
|
||||
// determine whether a rune may occur in a normal form.
|
||||
@@ -232,7 +209,7 @@ func openReader(file string) (input io.ReadCloser) {
|
||||
return
|
||||
}
|
||||
|
||||
func parseDecomposition(s string, skipfirst bool) (a []rune, e error) {
|
||||
func parseDecomposition(s string, skipfirst bool) (a []rune, err error) {
|
||||
decomp := strings.Split(s, " ")
|
||||
if len(decomp) > 0 && skipfirst {
|
||||
decomp = decomp[1:]
|
||||
@@ -247,56 +224,31 @@ func parseDecomposition(s string, skipfirst bool) (a []rune, e error) {
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func parseCharacter(line string) {
|
||||
field := strings.Split(line, ";")
|
||||
if len(field) != NumField {
|
||||
logger.Fatalf("%5s: %d fields (expected %d)\n", line, len(field), NumField)
|
||||
}
|
||||
x, err := strconv.ParseUint(field[FCodePoint], 16, 64)
|
||||
point := int(x)
|
||||
if err != nil {
|
||||
logger.Fatalf("%.5s...: %s", line, err)
|
||||
}
|
||||
if point == 0 {
|
||||
return // not interesting and we use 0 as unset
|
||||
}
|
||||
if point > MaxChar {
|
||||
logger.Fatalf("%5s: Rune %X > MaxChar (%X)", line, point, MaxChar)
|
||||
return
|
||||
}
|
||||
state := SNormal
|
||||
switch {
|
||||
case strings.Index(field[FName], ", First>") > 0:
|
||||
state = SFirst
|
||||
case strings.Index(field[FName], ", Last>") > 0:
|
||||
state = SLast
|
||||
}
|
||||
firstChar := lastChar + 1
|
||||
lastChar = rune(point)
|
||||
if state != SLast {
|
||||
firstChar = lastChar
|
||||
}
|
||||
x, err = strconv.ParseUint(field[FCanonicalCombiningClass], 10, 64)
|
||||
if err != nil {
|
||||
logger.Fatalf("%U: bad ccc field: %s", int(x), err)
|
||||
}
|
||||
ccc := uint8(x)
|
||||
decmap := field[FDecompMapping]
|
||||
exp, e := parseDecomposition(decmap, false)
|
||||
isCompat := false
|
||||
if e != nil {
|
||||
if len(decmap) > 0 {
|
||||
exp, e = parseDecomposition(decmap, true)
|
||||
if e != nil {
|
||||
logger.Fatalf(`%U: bad decomp |%v|: "%s"`, int(x), decmap, e)
|
||||
func loadUnicodeData() {
|
||||
f := openReader("UnicodeData.txt")
|
||||
defer f.Close()
|
||||
p := ucd.New(f)
|
||||
for p.Next() {
|
||||
r := p.Rune(ucd.CodePoint)
|
||||
char := &chars[r]
|
||||
|
||||
char.ccc = uint8(p.Uint(ucd.CanonicalCombiningClass))
|
||||
decmap := p.String(ucd.DecompMapping)
|
||||
|
||||
exp, err := parseDecomposition(decmap, false)
|
||||
isCompat := false
|
||||
if err != nil {
|
||||
if len(decmap) > 0 {
|
||||
exp, err = parseDecomposition(decmap, true)
|
||||
if err != nil {
|
||||
logger.Fatalf(`%U: bad decomp |%v|: "%s"`, r, decmap, err)
|
||||
}
|
||||
isCompat = true
|
||||
}
|
||||
isCompat = true
|
||||
}
|
||||
}
|
||||
for i := firstChar; i <= lastChar; i++ {
|
||||
char := &chars[i]
|
||||
char.name = field[FName]
|
||||
char.codePoint = i
|
||||
|
||||
char.name = p.String(ucd.Name)
|
||||
char.codePoint = r
|
||||
char.forms[FCompatibility].decomp = exp
|
||||
if !isCompat {
|
||||
char.forms[FCanonical].decomp = exp
|
||||
@@ -306,24 +258,9 @@ func parseCharacter(line string) {
|
||||
if len(decmap) > 0 {
|
||||
char.forms[FCompatibility].decomp = exp
|
||||
}
|
||||
char.ccc = ccc
|
||||
char.state = SMissing
|
||||
if i == lastChar {
|
||||
char.state = state
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func loadUnicodeData() {
|
||||
f := openReader("UnicodeData.txt")
|
||||
defer f.Close()
|
||||
scanner := bufio.NewScanner(f)
|
||||
for scanner.Scan() {
|
||||
parseCharacter(scanner.Text())
|
||||
}
|
||||
if scanner.Err() != nil {
|
||||
logger.Fatal(scanner.Err())
|
||||
if err := p.Err(); err != nil {
|
||||
logger.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -354,47 +291,22 @@ func compactCCC() {
|
||||
}
|
||||
}
|
||||
|
||||
var singlePointRe = regexp.MustCompile(`^([0-9A-F]+) *$`)
|
||||
|
||||
// CompositionExclusions.txt has form:
|
||||
// 0958 # ...
|
||||
// See http://unicode.org/reports/tr44/ for full explanation
|
||||
func parseExclusion(line string) int {
|
||||
comment := strings.Index(line, "#")
|
||||
if comment >= 0 {
|
||||
line = line[0:comment]
|
||||
}
|
||||
if len(line) == 0 {
|
||||
return 0
|
||||
}
|
||||
matches := singlePointRe.FindStringSubmatch(line)
|
||||
if len(matches) != 2 {
|
||||
logger.Fatalf("%s: %d matches (expected 1)\n", line, len(matches))
|
||||
}
|
||||
point, err := strconv.ParseUint(matches[1], 16, 64)
|
||||
if err != nil {
|
||||
logger.Fatalf("%.5s...: %s", line, err)
|
||||
}
|
||||
return int(point)
|
||||
}
|
||||
|
||||
func loadCompositionExclusions() {
|
||||
f := openReader("CompositionExclusions.txt")
|
||||
defer f.Close()
|
||||
scanner := bufio.NewScanner(f)
|
||||
for scanner.Scan() {
|
||||
point := parseExclusion(scanner.Text())
|
||||
if point == 0 {
|
||||
continue
|
||||
}
|
||||
c := &chars[point]
|
||||
p := ucd.New(f)
|
||||
for p.Next() {
|
||||
c := &chars[p.Rune(0)]
|
||||
if c.excludeInComp {
|
||||
logger.Fatalf("%U: Duplicate entry in exclusions.", c.codePoint)
|
||||
}
|
||||
c.excludeInComp = true
|
||||
}
|
||||
if scanner.Err() != nil {
|
||||
log.Fatal(scanner.Err())
|
||||
if e := p.Err(); e != nil {
|
||||
logger.Fatal(e)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -988,8 +900,6 @@ func verifyComputed() {
|
||||
}
|
||||
}
|
||||
|
||||
var qcRe = regexp.MustCompile(`([0-9A-F\.]+) *; (NF.*_QC); ([YNM]) #.*`)
|
||||
|
||||
// Use values in DerivedNormalizationProps.txt to compare against the
|
||||
// values we computed.
|
||||
// DerivedNormalizationProps.txt has form:
|
||||
@@ -999,27 +909,13 @@ var qcRe = regexp.MustCompile(`([0-9A-F\.]+) *; (NF.*_QC); ([YNM]) #.*`)
|
||||
func testDerived() {
|
||||
f := openReader("DerivedNormalizationProps.txt")
|
||||
defer f.Close()
|
||||
scanner := bufio.NewScanner(f)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
qc := qcRe.FindStringSubmatch(line)
|
||||
if qc == nil {
|
||||
continue
|
||||
}
|
||||
rng := strings.Split(qc[1], "..")
|
||||
i, err := strconv.ParseUint(rng[0], 16, 64)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
j := i
|
||||
if len(rng) > 1 {
|
||||
j, err = strconv.ParseUint(rng[1], 16, 64)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
p := ucd.New(f)
|
||||
for p.Next() {
|
||||
r := p.Rune(0)
|
||||
c := &chars[r]
|
||||
|
||||
var ftype, mode int
|
||||
qt := strings.TrimSpace(qc[2])
|
||||
qt := p.String(1)
|
||||
switch qt {
|
||||
case "NFC_QC":
|
||||
ftype, mode = FCanonical, MComposed
|
||||
@@ -1030,10 +926,10 @@ func testDerived() {
|
||||
case "NFKD_QC":
|
||||
ftype, mode = FCompatibility, MDecomposed
|
||||
default:
|
||||
log.Fatalf(`Unexpected quick check type "%s"`, qt)
|
||||
continue
|
||||
}
|
||||
var qr QCResult
|
||||
switch qc[3] {
|
||||
switch p.String(2) {
|
||||
case "Y":
|
||||
qr = QCYes
|
||||
case "N":
|
||||
@@ -1041,27 +937,15 @@ func testDerived() {
|
||||
case "M":
|
||||
qr = QCMaybe
|
||||
default:
|
||||
log.Fatalf(`Unexpected quick check value "%s"`, qc[3])
|
||||
log.Fatalf(`Unexpected quick check value "%s"`, p.String(2))
|
||||
}
|
||||
var lastFailed bool
|
||||
// Verify current
|
||||
for ; i <= j; i++ {
|
||||
c := &chars[int(i)]
|
||||
c.forms[ftype].verified[mode] = true
|
||||
curqr := c.forms[ftype].quickCheck[mode]
|
||||
if curqr != qr {
|
||||
if !lastFailed {
|
||||
logger.Printf("%s: %.4X..%.4X -- %s\n",
|
||||
qt, int(i), int(j), line[0:50])
|
||||
}
|
||||
logger.Printf("%U: FAILED %s (was %v need %v)\n",
|
||||
int(i), qt, curqr, qr)
|
||||
lastFailed = true
|
||||
}
|
||||
if got := c.forms[ftype].quickCheck[mode]; got != qr {
|
||||
logger.Printf("%U: FAILED %s (was %v need %v)\n", r, qt, got, qr)
|
||||
}
|
||||
c.forms[ftype].verified[mode] = true
|
||||
}
|
||||
if scanner.Err() != nil {
|
||||
logger.Fatal(scanner.Err())
|
||||
if err := p.Err(); err != nil {
|
||||
logger.Fatal(err)
|
||||
}
|
||||
// Any unspecified value must be QCYes. Verify this.
|
||||
for i, c := range chars {
|
||||
|
||||
124
Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/decode.go
generated
vendored
Normal file
124
Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/decode.go
generated
vendored
Normal file
@@ -0,0 +1,124 @@
|
||||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package snappy
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
)
|
||||
|
||||
// ErrCorrupt reports that the input is invalid.
|
||||
var ErrCorrupt = errors.New("snappy: corrupt input")
|
||||
|
||||
// DecodedLen returns the length of the decoded block.
|
||||
func DecodedLen(src []byte) (int, error) {
|
||||
v, _, err := decodedLen(src)
|
||||
return v, err
|
||||
}
|
||||
|
||||
// decodedLen returns the length of the decoded block and the number of bytes
|
||||
// that the length header occupied.
|
||||
func decodedLen(src []byte) (blockLen, headerLen int, err error) {
|
||||
v, n := binary.Uvarint(src)
|
||||
if n == 0 {
|
||||
return 0, 0, ErrCorrupt
|
||||
}
|
||||
if uint64(int(v)) != v {
|
||||
return 0, 0, errors.New("snappy: decoded block is too large")
|
||||
}
|
||||
return int(v), n, nil
|
||||
}
|
||||
|
||||
// Decode returns the decoded form of src. The returned slice may be a sub-
|
||||
// slice of dst if dst was large enough to hold the entire decoded block.
|
||||
// Otherwise, a newly allocated slice will be returned.
|
||||
// It is valid to pass a nil dst.
|
||||
func Decode(dst, src []byte) ([]byte, error) {
|
||||
dLen, s, err := decodedLen(src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(dst) < dLen {
|
||||
dst = make([]byte, dLen)
|
||||
}
|
||||
|
||||
var d, offset, length int
|
||||
for s < len(src) {
|
||||
switch src[s] & 0x03 {
|
||||
case tagLiteral:
|
||||
x := uint(src[s] >> 2)
|
||||
switch {
|
||||
case x < 60:
|
||||
s += 1
|
||||
case x == 60:
|
||||
s += 2
|
||||
if s > len(src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
x = uint(src[s-1])
|
||||
case x == 61:
|
||||
s += 3
|
||||
if s > len(src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
x = uint(src[s-2]) | uint(src[s-1])<<8
|
||||
case x == 62:
|
||||
s += 4
|
||||
if s > len(src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
x = uint(src[s-3]) | uint(src[s-2])<<8 | uint(src[s-1])<<16
|
||||
case x == 63:
|
||||
s += 5
|
||||
if s > len(src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
x = uint(src[s-4]) | uint(src[s-3])<<8 | uint(src[s-2])<<16 | uint(src[s-1])<<24
|
||||
}
|
||||
length = int(x + 1)
|
||||
if length <= 0 {
|
||||
return nil, errors.New("snappy: unsupported literal length")
|
||||
}
|
||||
if length > len(dst)-d || length > len(src)-s {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
copy(dst[d:], src[s:s+length])
|
||||
d += length
|
||||
s += length
|
||||
continue
|
||||
|
||||
case tagCopy1:
|
||||
s += 2
|
||||
if s > len(src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
length = 4 + int(src[s-2])>>2&0x7
|
||||
offset = int(src[s-2])&0xe0<<3 | int(src[s-1])
|
||||
|
||||
case tagCopy2:
|
||||
s += 3
|
||||
if s > len(src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
length = 1 + int(src[s-3])>>2
|
||||
offset = int(src[s-2]) | int(src[s-1])<<8
|
||||
|
||||
case tagCopy4:
|
||||
return nil, errors.New("snappy: unsupported COPY_4 tag")
|
||||
}
|
||||
|
||||
end := d + length
|
||||
if offset > d || end > len(dst) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
for ; d < end; d++ {
|
||||
dst[d] = dst[d-offset]
|
||||
}
|
||||
}
|
||||
if d != dLen {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
return dst[:d], nil
|
||||
}
|
||||
174
Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/encode.go
generated
vendored
Normal file
174
Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/encode.go
generated
vendored
Normal file
@@ -0,0 +1,174 @@
|
||||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package snappy
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
// We limit how far copy back-references can go, the same as the C++ code.
|
||||
const maxOffset = 1 << 15
|
||||
|
||||
// emitLiteral writes a literal chunk and returns the number of bytes written.
|
||||
func emitLiteral(dst, lit []byte) int {
|
||||
i, n := 0, uint(len(lit)-1)
|
||||
switch {
|
||||
case n < 60:
|
||||
dst[0] = uint8(n)<<2 | tagLiteral
|
||||
i = 1
|
||||
case n < 1<<8:
|
||||
dst[0] = 60<<2 | tagLiteral
|
||||
dst[1] = uint8(n)
|
||||
i = 2
|
||||
case n < 1<<16:
|
||||
dst[0] = 61<<2 | tagLiteral
|
||||
dst[1] = uint8(n)
|
||||
dst[2] = uint8(n >> 8)
|
||||
i = 3
|
||||
case n < 1<<24:
|
||||
dst[0] = 62<<2 | tagLiteral
|
||||
dst[1] = uint8(n)
|
||||
dst[2] = uint8(n >> 8)
|
||||
dst[3] = uint8(n >> 16)
|
||||
i = 4
|
||||
case int64(n) < 1<<32:
|
||||
dst[0] = 63<<2 | tagLiteral
|
||||
dst[1] = uint8(n)
|
||||
dst[2] = uint8(n >> 8)
|
||||
dst[3] = uint8(n >> 16)
|
||||
dst[4] = uint8(n >> 24)
|
||||
i = 5
|
||||
default:
|
||||
panic("snappy: source buffer is too long")
|
||||
}
|
||||
if copy(dst[i:], lit) != len(lit) {
|
||||
panic("snappy: destination buffer is too short")
|
||||
}
|
||||
return i + len(lit)
|
||||
}
|
||||
|
||||
// emitCopy writes a copy chunk and returns the number of bytes written.
|
||||
func emitCopy(dst []byte, offset, length int) int {
|
||||
i := 0
|
||||
for length > 0 {
|
||||
x := length - 4
|
||||
if 0 <= x && x < 1<<3 && offset < 1<<11 {
|
||||
dst[i+0] = uint8(offset>>8)&0x07<<5 | uint8(x)<<2 | tagCopy1
|
||||
dst[i+1] = uint8(offset)
|
||||
i += 2
|
||||
break
|
||||
}
|
||||
|
||||
x = length
|
||||
if x > 1<<6 {
|
||||
x = 1 << 6
|
||||
}
|
||||
dst[i+0] = uint8(x-1)<<2 | tagCopy2
|
||||
dst[i+1] = uint8(offset)
|
||||
dst[i+2] = uint8(offset >> 8)
|
||||
i += 3
|
||||
length -= x
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// Encode returns the encoded form of src. The returned slice may be a sub-
|
||||
// slice of dst if dst was large enough to hold the entire encoded block.
|
||||
// Otherwise, a newly allocated slice will be returned.
|
||||
// It is valid to pass a nil dst.
|
||||
func Encode(dst, src []byte) ([]byte, error) {
|
||||
if n := MaxEncodedLen(len(src)); len(dst) < n {
|
||||
dst = make([]byte, n)
|
||||
}
|
||||
|
||||
// The block starts with the varint-encoded length of the decompressed bytes.
|
||||
d := binary.PutUvarint(dst, uint64(len(src)))
|
||||
|
||||
// Return early if src is short.
|
||||
if len(src) <= 4 {
|
||||
if len(src) != 0 {
|
||||
d += emitLiteral(dst[d:], src)
|
||||
}
|
||||
return dst[:d], nil
|
||||
}
|
||||
|
||||
// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
|
||||
const maxTableSize = 1 << 14
|
||||
shift, tableSize := uint(32-8), 1<<8
|
||||
for tableSize < maxTableSize && tableSize < len(src) {
|
||||
shift--
|
||||
tableSize *= 2
|
||||
}
|
||||
var table [maxTableSize]int
|
||||
|
||||
// Iterate over the source bytes.
|
||||
var (
|
||||
s int // The iterator position.
|
||||
t int // The last position with the same hash as s.
|
||||
lit int // The start position of any pending literal bytes.
|
||||
)
|
||||
for s+3 < len(src) {
|
||||
// Update the hash table.
|
||||
b0, b1, b2, b3 := src[s], src[s+1], src[s+2], src[s+3]
|
||||
h := uint32(b0) | uint32(b1)<<8 | uint32(b2)<<16 | uint32(b3)<<24
|
||||
p := &table[(h*0x1e35a7bd)>>shift]
|
||||
// We need to to store values in [-1, inf) in table. To save
|
||||
// some initialization time, (re)use the table's zero value
|
||||
// and shift the values against this zero: add 1 on writes,
|
||||
// subtract 1 on reads.
|
||||
t, *p = *p-1, s+1
|
||||
// If t is invalid or src[s:s+4] differs from src[t:t+4], accumulate a literal byte.
|
||||
if t < 0 || s-t >= maxOffset || b0 != src[t] || b1 != src[t+1] || b2 != src[t+2] || b3 != src[t+3] {
|
||||
s++
|
||||
continue
|
||||
}
|
||||
// Otherwise, we have a match. First, emit any pending literal bytes.
|
||||
if lit != s {
|
||||
d += emitLiteral(dst[d:], src[lit:s])
|
||||
}
|
||||
// Extend the match to be as long as possible.
|
||||
s0 := s
|
||||
s, t = s+4, t+4
|
||||
for s < len(src) && src[s] == src[t] {
|
||||
s++
|
||||
t++
|
||||
}
|
||||
// Emit the copied bytes.
|
||||
d += emitCopy(dst[d:], s-t, s-s0)
|
||||
lit = s
|
||||
}
|
||||
|
||||
// Emit any final pending literal bytes and return.
|
||||
if lit != len(src) {
|
||||
d += emitLiteral(dst[d:], src[lit:])
|
||||
}
|
||||
return dst[:d], nil
|
||||
}
|
||||
|
||||
// MaxEncodedLen returns the maximum length of a snappy block, given its
|
||||
// uncompressed length.
|
||||
func MaxEncodedLen(srcLen int) int {
|
||||
// Compressed data can be defined as:
|
||||
// compressed := item* literal*
|
||||
// item := literal* copy
|
||||
//
|
||||
// The trailing literal sequence has a space blowup of at most 62/60
|
||||
// since a literal of length 60 needs one tag byte + one extra byte
|
||||
// for length information.
|
||||
//
|
||||
// Item blowup is trickier to measure. Suppose the "copy" op copies
|
||||
// 4 bytes of data. Because of a special check in the encoding code,
|
||||
// we produce a 4-byte copy only if the offset is < 65536. Therefore
|
||||
// the copy op takes 3 bytes to encode, and this type of item leads
|
||||
// to at most the 62/60 blowup for representing literals.
|
||||
//
|
||||
// Suppose the "copy" op copies 5 bytes of data. If the offset is big
|
||||
// enough, it will take 5 bytes to encode the copy op. Therefore the
|
||||
// worst case here is a one-byte literal followed by a five-byte copy.
|
||||
// That is, 6 bytes of input turn into 7 bytes of "compressed" data.
|
||||
//
|
||||
// This last factor dominates the blowup, so the final estimate is:
|
||||
return 32 + srcLen + srcLen/6
|
||||
}
|
||||
38
Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/snappy.go
generated
vendored
Normal file
38
Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/snappy.go
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package snappy implements the snappy block-based compression format.
|
||||
// It aims for very high speeds and reasonable compression.
|
||||
//
|
||||
// The C++ snappy implementation is at http://code.google.com/p/snappy/
|
||||
package snappy
|
||||
|
||||
/*
|
||||
Each encoded block begins with the varint-encoded length of the decoded data,
|
||||
followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
|
||||
first byte of each chunk is broken into its 2 least and 6 most significant bits
|
||||
called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
|
||||
Zero means a literal tag. All other values mean a copy tag.
|
||||
|
||||
For literal tags:
|
||||
- If m < 60, the next 1 + m bytes are literal bytes.
|
||||
- Otherwise, let n be the little-endian unsigned integer denoted by the next
|
||||
m - 59 bytes. The next 1 + n bytes after that are literal bytes.
|
||||
|
||||
For copy tags, length bytes are copied from offset bytes ago, in the style of
|
||||
Lempel-Ziv compression algorithms. In particular:
|
||||
- For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
|
||||
The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
|
||||
of the offset. The next byte is bits 0-7 of the offset.
|
||||
- For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
|
||||
The length is 1 + m. The offset is the little-endian unsigned integer
|
||||
denoted by the next 2 bytes.
|
||||
- For l == 3, this tag is a legacy format that is no longer supported.
|
||||
*/
|
||||
const (
|
||||
tagLiteral = 0x00
|
||||
tagCopy1 = 0x01
|
||||
tagCopy2 = 0x02
|
||||
tagCopy4 = 0x03
|
||||
)
|
||||
261
Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/snappy_test.go
generated
vendored
Normal file
261
Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/snappy_test.go
generated
vendored
Normal file
@@ -0,0 +1,261 @@
|
||||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package snappy
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var download = flag.Bool("download", false, "If true, download any missing files before running benchmarks")
|
||||
|
||||
func roundtrip(b, ebuf, dbuf []byte) error {
|
||||
e, err := Encode(ebuf, b)
|
||||
if err != nil {
|
||||
return fmt.Errorf("encoding error: %v", err)
|
||||
}
|
||||
d, err := Decode(dbuf, e)
|
||||
if err != nil {
|
||||
return fmt.Errorf("decoding error: %v", err)
|
||||
}
|
||||
if !bytes.Equal(b, d) {
|
||||
return fmt.Errorf("roundtrip mismatch:\n\twant %v\n\tgot %v", b, d)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestEmpty(t *testing.T) {
|
||||
if err := roundtrip(nil, nil, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSmallCopy(t *testing.T) {
|
||||
for _, ebuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} {
|
||||
for _, dbuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} {
|
||||
for i := 0; i < 32; i++ {
|
||||
s := "aaaa" + strings.Repeat("b", i) + "aaaabbbb"
|
||||
if err := roundtrip([]byte(s), ebuf, dbuf); err != nil {
|
||||
t.Errorf("len(ebuf)=%d, len(dbuf)=%d, i=%d: %v", len(ebuf), len(dbuf), i, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSmallRand(t *testing.T) {
|
||||
rand.Seed(27354294)
|
||||
for n := 1; n < 20000; n += 23 {
|
||||
b := make([]byte, n)
|
||||
for i, _ := range b {
|
||||
b[i] = uint8(rand.Uint32())
|
||||
}
|
||||
if err := roundtrip(b, nil, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSmallRegular(t *testing.T) {
|
||||
for n := 1; n < 20000; n += 23 {
|
||||
b := make([]byte, n)
|
||||
for i, _ := range b {
|
||||
b[i] = uint8(i%10 + 'a')
|
||||
}
|
||||
if err := roundtrip(b, nil, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func benchDecode(b *testing.B, src []byte) {
|
||||
encoded, err := Encode(nil, src)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
// Bandwidth is in amount of uncompressed data.
|
||||
b.SetBytes(int64(len(src)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
Decode(src, encoded)
|
||||
}
|
||||
}
|
||||
|
||||
func benchEncode(b *testing.B, src []byte) {
|
||||
// Bandwidth is in amount of uncompressed data.
|
||||
b.SetBytes(int64(len(src)))
|
||||
dst := make([]byte, MaxEncodedLen(len(src)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
Encode(dst, src)
|
||||
}
|
||||
}
|
||||
|
||||
func readFile(b *testing.B, filename string) []byte {
|
||||
src, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
b.Fatalf("failed reading %s: %s", filename, err)
|
||||
}
|
||||
if len(src) == 0 {
|
||||
b.Fatalf("%s has zero length", filename)
|
||||
}
|
||||
return src
|
||||
}
|
||||
|
||||
// expand returns a slice of length n containing repeated copies of src.
|
||||
func expand(src []byte, n int) []byte {
|
||||
dst := make([]byte, n)
|
||||
for x := dst; len(x) > 0; {
|
||||
i := copy(x, src)
|
||||
x = x[i:]
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
func benchWords(b *testing.B, n int, decode bool) {
|
||||
// Note: the file is OS-language dependent so the resulting values are not
|
||||
// directly comparable for non-US-English OS installations.
|
||||
data := expand(readFile(b, "/usr/share/dict/words"), n)
|
||||
if decode {
|
||||
benchDecode(b, data)
|
||||
} else {
|
||||
benchEncode(b, data)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkWordsDecode1e3(b *testing.B) { benchWords(b, 1e3, true) }
|
||||
func BenchmarkWordsDecode1e4(b *testing.B) { benchWords(b, 1e4, true) }
|
||||
func BenchmarkWordsDecode1e5(b *testing.B) { benchWords(b, 1e5, true) }
|
||||
func BenchmarkWordsDecode1e6(b *testing.B) { benchWords(b, 1e6, true) }
|
||||
func BenchmarkWordsEncode1e3(b *testing.B) { benchWords(b, 1e3, false) }
|
||||
func BenchmarkWordsEncode1e4(b *testing.B) { benchWords(b, 1e4, false) }
|
||||
func BenchmarkWordsEncode1e5(b *testing.B) { benchWords(b, 1e5, false) }
|
||||
func BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) }
|
||||
|
||||
// testFiles' values are copied directly from
|
||||
// https://code.google.com/p/snappy/source/browse/trunk/snappy_unittest.cc.
|
||||
// The label field is unused in snappy-go.
|
||||
var testFiles = []struct {
|
||||
label string
|
||||
filename string
|
||||
}{
|
||||
{"html", "html"},
|
||||
{"urls", "urls.10K"},
|
||||
{"jpg", "house.jpg"},
|
||||
{"pdf", "mapreduce-osdi-1.pdf"},
|
||||
{"html4", "html_x_4"},
|
||||
{"cp", "cp.html"},
|
||||
{"c", "fields.c"},
|
||||
{"lsp", "grammar.lsp"},
|
||||
{"xls", "kennedy.xls"},
|
||||
{"txt1", "alice29.txt"},
|
||||
{"txt2", "asyoulik.txt"},
|
||||
{"txt3", "lcet10.txt"},
|
||||
{"txt4", "plrabn12.txt"},
|
||||
{"bin", "ptt5"},
|
||||
{"sum", "sum"},
|
||||
{"man", "xargs.1"},
|
||||
{"pb", "geo.protodata"},
|
||||
{"gaviota", "kppkn.gtb"},
|
||||
}
|
||||
|
||||
// The test data files are present at this canonical URL.
|
||||
const baseURL = "https://snappy.googlecode.com/svn/trunk/testdata/"
|
||||
|
||||
func downloadTestdata(basename string) (errRet error) {
|
||||
filename := filepath.Join("testdata", basename)
|
||||
f, err := os.Create(filename)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create %s: %s", filename, err)
|
||||
}
|
||||
defer f.Close()
|
||||
defer func() {
|
||||
if errRet != nil {
|
||||
os.Remove(filename)
|
||||
}
|
||||
}()
|
||||
resp, err := http.Get(baseURL + basename)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to download %s: %s", baseURL+basename, err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
_, err = io.Copy(f, resp.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write %s: %s", filename, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func benchFile(b *testing.B, n int, decode bool) {
|
||||
filename := filepath.Join("testdata", testFiles[n].filename)
|
||||
if stat, err := os.Stat(filename); err != nil || stat.Size() == 0 {
|
||||
if !*download {
|
||||
b.Fatal("test data not found; skipping benchmark without the -download flag")
|
||||
}
|
||||
// Download the official snappy C++ implementation reference test data
|
||||
// files for benchmarking.
|
||||
if err := os.Mkdir("testdata", 0777); err != nil && !os.IsExist(err) {
|
||||
b.Fatalf("failed to create testdata: %s", err)
|
||||
}
|
||||
for _, tf := range testFiles {
|
||||
if err := downloadTestdata(tf.filename); err != nil {
|
||||
b.Fatalf("failed to download testdata: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
data := readFile(b, filename)
|
||||
if decode {
|
||||
benchDecode(b, data)
|
||||
} else {
|
||||
benchEncode(b, data)
|
||||
}
|
||||
}
|
||||
|
||||
// Naming convention is kept similar to what snappy's C++ implementation uses.
|
||||
func Benchmark_UFlat0(b *testing.B) { benchFile(b, 0, true) }
|
||||
func Benchmark_UFlat1(b *testing.B) { benchFile(b, 1, true) }
|
||||
func Benchmark_UFlat2(b *testing.B) { benchFile(b, 2, true) }
|
||||
func Benchmark_UFlat3(b *testing.B) { benchFile(b, 3, true) }
|
||||
func Benchmark_UFlat4(b *testing.B) { benchFile(b, 4, true) }
|
||||
func Benchmark_UFlat5(b *testing.B) { benchFile(b, 5, true) }
|
||||
func Benchmark_UFlat6(b *testing.B) { benchFile(b, 6, true) }
|
||||
func Benchmark_UFlat7(b *testing.B) { benchFile(b, 7, true) }
|
||||
func Benchmark_UFlat8(b *testing.B) { benchFile(b, 8, true) }
|
||||
func Benchmark_UFlat9(b *testing.B) { benchFile(b, 9, true) }
|
||||
func Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) }
|
||||
func Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) }
|
||||
func Benchmark_UFlat12(b *testing.B) { benchFile(b, 12, true) }
|
||||
func Benchmark_UFlat13(b *testing.B) { benchFile(b, 13, true) }
|
||||
func Benchmark_UFlat14(b *testing.B) { benchFile(b, 14, true) }
|
||||
func Benchmark_UFlat15(b *testing.B) { benchFile(b, 15, true) }
|
||||
func Benchmark_UFlat16(b *testing.B) { benchFile(b, 16, true) }
|
||||
func Benchmark_UFlat17(b *testing.B) { benchFile(b, 17, true) }
|
||||
func Benchmark_ZFlat0(b *testing.B) { benchFile(b, 0, false) }
|
||||
func Benchmark_ZFlat1(b *testing.B) { benchFile(b, 1, false) }
|
||||
func Benchmark_ZFlat2(b *testing.B) { benchFile(b, 2, false) }
|
||||
func Benchmark_ZFlat3(b *testing.B) { benchFile(b, 3, false) }
|
||||
func Benchmark_ZFlat4(b *testing.B) { benchFile(b, 4, false) }
|
||||
func Benchmark_ZFlat5(b *testing.B) { benchFile(b, 5, false) }
|
||||
func Benchmark_ZFlat6(b *testing.B) { benchFile(b, 6, false) }
|
||||
func Benchmark_ZFlat7(b *testing.B) { benchFile(b, 7, false) }
|
||||
func Benchmark_ZFlat8(b *testing.B) { benchFile(b, 8, false) }
|
||||
func Benchmark_ZFlat9(b *testing.B) { benchFile(b, 9, false) }
|
||||
func Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) }
|
||||
func Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) }
|
||||
func Benchmark_ZFlat12(b *testing.B) { benchFile(b, 12, false) }
|
||||
func Benchmark_ZFlat13(b *testing.B) { benchFile(b, 13, false) }
|
||||
func Benchmark_ZFlat14(b *testing.B) { benchFile(b, 14, false) }
|
||||
func Benchmark_ZFlat15(b *testing.B) { benchFile(b, 15, false) }
|
||||
func Benchmark_ZFlat16(b *testing.B) { benchFile(b, 16, false) }
|
||||
func Benchmark_ZFlat17(b *testing.B) { benchFile(b, 17, false) }
|
||||
1
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/.gitignore
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/.gitignore
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
/lz4-example/lz4-example
|
||||
7
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/.travis.yml
generated
vendored
Normal file
7
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.1
|
||||
- 1.2
|
||||
- 1.3
|
||||
- tip
|
||||
24
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/LICENSE
generated
vendored
Normal file
24
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
Copyright 2011-2012 Branimir Karadzic. All rights reserved.
|
||||
Copyright 2013 Damian Gryski. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||
THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
71
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/README.md
generated
vendored
Normal file
71
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/README.md
generated
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
go-lz4
|
||||
======
|
||||
|
||||
go-lz4 is port of LZ4 lossless compression algorithm to Go. The original C code
|
||||
is located at:
|
||||
|
||||
https://code.google.com/p/lz4/
|
||||
|
||||
Status
|
||||
------
|
||||
[](http://travis-ci.org/bkaradzic/go-lz4)
|
||||
[](https://godoc.org/github.com/bkaradzic/go-lz4)
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
go get github.com/bkaradzic/go-lz4
|
||||
|
||||
import "github.com/bkaradzic/go-lz4"
|
||||
|
||||
The package name is `lz4`
|
||||
|
||||
Notes
|
||||
-----
|
||||
|
||||
* go-lz4 saves a uint32 with the original uncompressed length at the beginning
|
||||
of the encoded buffer. They may get in the way of interoperability with
|
||||
other implementations.
|
||||
|
||||
Contributors
|
||||
------------
|
||||
|
||||
Damian Gryski ([@dgryski](https://github.com/dgryski))
|
||||
Dustin Sallings ([@dustin](https://github.com/dustin))
|
||||
|
||||
Contact
|
||||
-------
|
||||
|
||||
[@bkaradzic](https://twitter.com/bkaradzic)
|
||||
http://www.stuckingeometry.com
|
||||
|
||||
Project page
|
||||
https://github.com/bkaradzic/go-lz4
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
Copyright 2011-2012 Branimir Karadzic. All rights reserved.
|
||||
Copyright 2013 Damian Gryski. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||
THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
74
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/fuzzer/main.go
generated
vendored
Normal file
74
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/fuzzer/main.go
generated
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
|
||||
"github.com/bkaradzic/go-lz4"
|
||||
|
||||
// lz4's API matches snappy's, so we can easily see how it performs
|
||||
// lz4 "code.google.com/p/snappy-go/snappy"
|
||||
)
|
||||
|
||||
var input = `
|
||||
ADVENTURE I. A SCANDAL IN BOHEMIA
|
||||
|
||||
I.
|
||||
|
||||
To Sherlock Holmes she is always THE woman. I have seldom heard
|
||||
him mention her under any other name. In his eyes she eclipses
|
||||
and predominates the whole of her sex. It was not that he felt
|
||||
any emotion akin to love for Irene Adler. All emotions, and that
|
||||
one particularly, were abhorrent to his cold, precise but
|
||||
admirably balanced mind. He was, I take it, the most perfect
|
||||
reasoning and observing machine that the world has seen, but as a
|
||||
lover he would have placed himself in a false position. He never
|
||||
spoke of the softer passions, save with a gibe and a sneer. They
|
||||
were admirable things for the observer--excellent for drawing the
|
||||
veil from men's motives and actions. But for the trained reasoner
|
||||
to admit such intrusions into his own delicate and finely
|
||||
adjusted temperament was to introduce a distracting factor which
|
||||
might throw a doubt upon all his mental results. Grit in a
|
||||
sensitive instrument, or a crack in one of his own high-power
|
||||
lenses, would not be more disturbing than a strong emotion in a
|
||||
nature such as his. And yet there was but one woman to him, and
|
||||
that woman was the late Irene Adler, of dubious and questionable
|
||||
memory.
|
||||
|
||||
I had seen little of Holmes lately. My marriage had drifted us
|
||||
away from each other. My own complete happiness, and the
|
||||
home-centred interests which rise up around the man who first
|
||||
finds himself master of his own establishment, were sufficient to
|
||||
absorb all my attention, while Holmes, who loathed every form of
|
||||
society with his whole Bohemian soul, remained in our lodgings in
|
||||
Baker Street, buried among his old books, and alternating from
|
||||
week to week between cocaine and ambition, the drowsiness of the
|
||||
drug, and the fierce energy of his own keen nature. He was still,
|
||||
as ever, deeply attracted by the study of crime, and occupied his
|
||||
immense faculties and extraordinary powers of observation in
|
||||
following out those clues, and clearing up those mysteries which
|
||||
had been abandoned as hopeless by the official police. From time
|
||||
to time I heard some vague account of his doings: of his summons
|
||||
to Odessa in the case of the Trepoff murder, of his clearing up
|
||||
of the singular tragedy of the Atkinson brothers at Trincomalee,
|
||||
and finally of the mission which he had accomplished so
|
||||
delicately and successfully for the reigning family of Holland.
|
||||
Beyond these signs of his activity, however, which I merely
|
||||
shared with all the readers of the daily press, I knew little of
|
||||
my former friend and companion.
|
||||
`
|
||||
|
||||
func main() {
|
||||
|
||||
compressed, _ := lz4.Encode(nil, []byte(input))
|
||||
|
||||
modified := make([]byte, len(compressed))
|
||||
|
||||
for {
|
||||
copy(modified, compressed)
|
||||
for i := 0; i < 100; i++ {
|
||||
modified[rand.Intn(len(compressed)-4)+4] = byte(rand.Intn(256))
|
||||
}
|
||||
lz4.Decode(nil, modified)
|
||||
}
|
||||
|
||||
}
|
||||
86
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/lz4-example/main.go
generated
vendored
Normal file
86
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/lz4-example/main.go
generated
vendored
Normal file
@@ -0,0 +1,86 @@
|
||||
/*
|
||||
* Copyright 2011 Branimir Karadzic. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice, this
|
||||
* list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
* SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||
* THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"runtime/pprof"
|
||||
|
||||
lz4 "github.com/bkaradzic/go-lz4"
|
||||
)
|
||||
|
||||
var (
|
||||
decompress = flag.Bool("d", false, "decompress")
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
||||
var optCPUProfile = flag.String("cpuprofile", "", "profile")
|
||||
flag.Parse()
|
||||
|
||||
if *optCPUProfile != "" {
|
||||
f, err := os.Create(*optCPUProfile)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
pprof.StartCPUProfile(f)
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
|
||||
args := flag.Args()
|
||||
|
||||
var data []byte
|
||||
|
||||
if len(args) < 2 {
|
||||
fmt.Print("Usage: lz4 [-d] <input> <output>\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
input, err := os.OpenFile(args[0], os.O_RDONLY, 0644)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to open input file %s\n", args[0])
|
||||
os.Exit(1)
|
||||
}
|
||||
defer input.Close()
|
||||
|
||||
if *decompress {
|
||||
data, _ = ioutil.ReadAll(input)
|
||||
data, _ = lz4.Decode(nil, data)
|
||||
} else {
|
||||
data, _ = ioutil.ReadAll(input)
|
||||
data, _ = lz4.Encode(nil, data)
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(args[1], data, 0644)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to open output file %s\n", args[1])
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
63
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/lz4_test.go
generated
vendored
Normal file
63
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/lz4_test.go
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
package lz4
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var testfile, _ = ioutil.ReadFile("testdata/pg1661.txt")
|
||||
|
||||
func roundtrip(t *testing.T, input []byte) {
|
||||
|
||||
dst, err := Encode(nil, input)
|
||||
if err != nil {
|
||||
t.Errorf("got error during compression: %s", err)
|
||||
}
|
||||
|
||||
output, err := Decode(nil, dst)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("got error during decompress: %s", err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(output, input) {
|
||||
t.Errorf("roundtrip failed")
|
||||
}
|
||||
}
|
||||
|
||||
func TestEmpty(t *testing.T) {
|
||||
roundtrip(t, nil)
|
||||
}
|
||||
|
||||
func TestLengths(t *testing.T) {
|
||||
|
||||
for i := 0; i < 1024; i++ {
|
||||
roundtrip(t, testfile[:i])
|
||||
}
|
||||
|
||||
for i := 1024; i < 4096; i += 23 {
|
||||
roundtrip(t, testfile[:i])
|
||||
}
|
||||
}
|
||||
|
||||
func TestWords(t *testing.T) {
|
||||
roundtrip(t, testfile)
|
||||
}
|
||||
|
||||
func BenchmarkLZ4Encode(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
Encode(nil, testfile)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLZ4Decode(b *testing.B) {
|
||||
|
||||
var compressed, _ = Encode(nil, testfile)
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
Decode(nil, compressed)
|
||||
}
|
||||
}
|
||||
194
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/reader.go
generated
vendored
Normal file
194
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/reader.go
generated
vendored
Normal file
@@ -0,0 +1,194 @@
|
||||
/*
|
||||
* Copyright 2011-2012 Branimir Karadzic. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice, this
|
||||
* list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
* SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||
* THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package lz4
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrCorrupt indicates the input was corrupt
|
||||
ErrCorrupt = errors.New("corrupt input")
|
||||
)
|
||||
|
||||
const (
|
||||
mlBits = 4
|
||||
mlMask = (1 << mlBits) - 1
|
||||
runBits = 8 - mlBits
|
||||
runMask = (1 << runBits) - 1
|
||||
)
|
||||
|
||||
type decoder struct {
|
||||
src []byte
|
||||
dst []byte
|
||||
spos uint32
|
||||
dpos uint32
|
||||
ref uint32
|
||||
}
|
||||
|
||||
func (d *decoder) readByte() (uint8, error) {
|
||||
if int(d.spos) == len(d.src) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
b := d.src[d.spos]
|
||||
d.spos++
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (d *decoder) getLen() (uint32, error) {
|
||||
|
||||
length := uint32(0)
|
||||
ln, err := d.readByte()
|
||||
if err != nil {
|
||||
return 0, ErrCorrupt
|
||||
}
|
||||
for ln == 255 {
|
||||
length += 255
|
||||
ln, err = d.readByte()
|
||||
if err != nil {
|
||||
return 0, ErrCorrupt
|
||||
}
|
||||
}
|
||||
length += uint32(ln)
|
||||
|
||||
return length, nil
|
||||
}
|
||||
|
||||
func (d *decoder) cp(length, decr uint32) {
|
||||
|
||||
if int(d.ref+length) < int(d.dpos) {
|
||||
copy(d.dst[d.dpos:], d.dst[d.ref:d.ref+length])
|
||||
} else {
|
||||
for ii := uint32(0); ii < length; ii++ {
|
||||
d.dst[d.dpos+ii] = d.dst[d.ref+ii]
|
||||
}
|
||||
}
|
||||
d.dpos += length
|
||||
d.ref += length - decr
|
||||
}
|
||||
|
||||
func (d *decoder) finish(err error) error {
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Decode returns the decoded form of src. The returned slice may be a
|
||||
// subslice of dst if it was large enough to hold the entire decoded block.
|
||||
func Decode(dst, src []byte) ([]byte, error) {
|
||||
|
||||
if len(src) < 4 {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
uncompressedLen := binary.LittleEndian.Uint32(src)
|
||||
|
||||
if uncompressedLen == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if uncompressedLen > MaxInputSize {
|
||||
return nil, ErrTooLarge
|
||||
}
|
||||
|
||||
if dst == nil || len(dst) < int(uncompressedLen) {
|
||||
dst = make([]byte, uncompressedLen)
|
||||
}
|
||||
|
||||
d := decoder{src: src, dst: dst[:uncompressedLen], spos: 4}
|
||||
|
||||
decr := []uint32{0, 3, 2, 3}
|
||||
|
||||
for {
|
||||
code, err := d.readByte()
|
||||
if err != nil {
|
||||
return d.dst, d.finish(err)
|
||||
}
|
||||
|
||||
length := uint32(code >> mlBits)
|
||||
if length == runMask {
|
||||
ln, err := d.getLen()
|
||||
if err != nil {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
length += ln
|
||||
}
|
||||
|
||||
if int(d.spos+length) > len(d.src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
for ii := uint32(0); ii < length; ii++ {
|
||||
d.dst[d.dpos+ii] = d.src[d.spos+ii]
|
||||
}
|
||||
|
||||
d.spos += length
|
||||
d.dpos += length
|
||||
|
||||
if int(d.spos) == len(d.src) {
|
||||
return d.dst, nil
|
||||
}
|
||||
|
||||
if int(d.spos+2) >= len(d.src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
back := uint32(d.src[d.spos]) | uint32(d.src[d.spos+1])<<8
|
||||
|
||||
if back > d.dpos {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
d.spos += 2
|
||||
d.ref = d.dpos - back
|
||||
|
||||
length = uint32(code & mlMask)
|
||||
if length == mlMask {
|
||||
ln, err := d.getLen()
|
||||
if err != nil {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
length += ln
|
||||
}
|
||||
|
||||
literal := d.dpos - d.ref
|
||||
if literal < 4 {
|
||||
d.cp(4, decr[literal])
|
||||
} else {
|
||||
length += 4
|
||||
}
|
||||
|
||||
if d.dpos+length > uncompressedLen {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
d.cp(length, 0)
|
||||
}
|
||||
}
|
||||
13052
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/testdata/pg1661.txt
generated
vendored
Normal file
13052
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/testdata/pg1661.txt
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
188
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/writer.go
generated
vendored
Normal file
188
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/writer.go
generated
vendored
Normal file
@@ -0,0 +1,188 @@
|
||||
/*
|
||||
* Copyright 2011-2012 Branimir Karadzic. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice, this
|
||||
* list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
* SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||
* THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package lz4
|
||||
|
||||
import "encoding/binary"
|
||||
import "errors"
|
||||
|
||||
const (
|
||||
minMatch = 4
|
||||
hashLog = 17
|
||||
hashTableSize = 1 << hashLog
|
||||
hashShift = (minMatch * 8) - hashLog
|
||||
incompressible uint32 = 128
|
||||
uninitHash = 0x88888888
|
||||
|
||||
// MaxInputSize is the largest buffer than can be compressed in a single block
|
||||
MaxInputSize = 0x7E000000
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrTooLarge indicates the input buffer was too large
|
||||
ErrTooLarge = errors.New("input too large")
|
||||
)
|
||||
|
||||
type encoder struct {
|
||||
src []byte
|
||||
dst []byte
|
||||
hashTable []uint32
|
||||
pos uint32
|
||||
anchor uint32
|
||||
dpos uint32
|
||||
}
|
||||
|
||||
// CompressBound returns the maximum length of a lz4 block, given it's uncompressed length
|
||||
func CompressBound(isize int) int {
|
||||
if isize > MaxInputSize {
|
||||
return 0
|
||||
}
|
||||
return isize + ((isize) / 255) + 16 + 4
|
||||
}
|
||||
|
||||
func (e *encoder) writeLiterals(length, mlLen, pos uint32) {
|
||||
|
||||
ln := length
|
||||
|
||||
var code byte
|
||||
if ln > runMask-1 {
|
||||
code = runMask
|
||||
} else {
|
||||
code = byte(ln)
|
||||
}
|
||||
|
||||
if mlLen > mlMask-1 {
|
||||
e.dst[e.dpos] = (code << mlBits) + byte(mlMask)
|
||||
} else {
|
||||
e.dst[e.dpos] = (code << mlBits) + byte(mlLen)
|
||||
}
|
||||
e.dpos++
|
||||
|
||||
if code == runMask {
|
||||
ln -= runMask
|
||||
for ; ln > 254; ln -= 255 {
|
||||
e.dst[e.dpos] = 255
|
||||
e.dpos++
|
||||
}
|
||||
|
||||
e.dst[e.dpos] = byte(ln)
|
||||
e.dpos++
|
||||
}
|
||||
|
||||
for ii := uint32(0); ii < length; ii++ {
|
||||
e.dst[e.dpos+ii] = e.src[pos+ii]
|
||||
}
|
||||
|
||||
e.dpos += length
|
||||
}
|
||||
|
||||
// Encode returns the encoded form of src. The returned array may be a
|
||||
// sub-slice of dst if it was large enough to hold the entire output.
|
||||
func Encode(dst, src []byte) ([]byte, error) {
|
||||
|
||||
if len(src) >= MaxInputSize {
|
||||
return nil, ErrTooLarge
|
||||
}
|
||||
|
||||
if n := CompressBound(len(src)); len(dst) < n {
|
||||
dst = make([]byte, n)
|
||||
}
|
||||
|
||||
e := encoder{src: src, dst: dst, hashTable: make([]uint32, hashTableSize)}
|
||||
|
||||
binary.LittleEndian.PutUint32(dst, uint32(len(src)))
|
||||
e.dpos = 4
|
||||
|
||||
var (
|
||||
step uint32 = 1
|
||||
limit = incompressible
|
||||
)
|
||||
|
||||
for {
|
||||
if int(e.pos)+4 >= len(e.src) {
|
||||
e.writeLiterals(uint32(len(e.src))-e.anchor, 0, e.anchor)
|
||||
return e.dst[:e.dpos], nil
|
||||
}
|
||||
|
||||
sequence := uint32(e.src[e.pos+3])<<24 | uint32(e.src[e.pos+2])<<16 | uint32(e.src[e.pos+1])<<8 | uint32(e.src[e.pos+0])
|
||||
|
||||
hash := (sequence * 2654435761) >> hashShift
|
||||
ref := e.hashTable[hash] + uninitHash
|
||||
e.hashTable[hash] = e.pos - uninitHash
|
||||
|
||||
if ((e.pos-ref)>>16) != 0 || uint32(e.src[ref+3])<<24|uint32(e.src[ref+2])<<16|uint32(e.src[ref+1])<<8|uint32(e.src[ref+0]) != sequence {
|
||||
if e.pos-e.anchor > limit {
|
||||
limit <<= 1
|
||||
step += 1 + (step >> 2)
|
||||
}
|
||||
e.pos += step
|
||||
continue
|
||||
}
|
||||
|
||||
if step > 1 {
|
||||
e.hashTable[hash] = ref - uninitHash
|
||||
e.pos -= step - 1
|
||||
step = 1
|
||||
continue
|
||||
}
|
||||
limit = incompressible
|
||||
|
||||
ln := e.pos - e.anchor
|
||||
back := e.pos - ref
|
||||
|
||||
anchor := e.anchor
|
||||
|
||||
e.pos += minMatch
|
||||
ref += minMatch
|
||||
e.anchor = e.pos
|
||||
|
||||
for int(e.pos) < len(e.src) && e.src[e.pos] == e.src[ref] {
|
||||
e.pos++
|
||||
ref++
|
||||
}
|
||||
|
||||
mlLen := e.pos - e.anchor
|
||||
|
||||
e.writeLiterals(ln, mlLen, anchor)
|
||||
e.dst[e.dpos] = uint8(back)
|
||||
e.dst[e.dpos+1] = uint8(back >> 8)
|
||||
e.dpos += 2
|
||||
|
||||
if mlLen > mlMask-1 {
|
||||
mlLen -= mlMask
|
||||
for mlLen > 254 {
|
||||
mlLen -= 255
|
||||
|
||||
e.dst[e.dpos] = 255
|
||||
e.dpos++
|
||||
}
|
||||
|
||||
e.dst[e.dpos] = byte(mlLen)
|
||||
e.dpos++
|
||||
}
|
||||
|
||||
e.anchor = e.pos
|
||||
}
|
||||
}
|
||||
39
Godeps/_workspace/src/github.com/calmh/ini/README.md
generated
vendored
39
Godeps/_workspace/src/github.com/calmh/ini/README.md
generated
vendored
@@ -1,39 +0,0 @@
|
||||
ini [](https://drone.io/github.com/calmh/ini/latest)
|
||||
===
|
||||
|
||||
Yet another .INI file parser / writer. Created because the existing ones
|
||||
were either not general enough (allowing easy access to all parts of the
|
||||
original file) or made annoying assumptions about the format. And
|
||||
probably equal parts NIH. You might want to just write your own instead
|
||||
of using this one, you know that's where you'll end up in the end
|
||||
anyhow.
|
||||
|
||||
Documentation
|
||||
-------------
|
||||
|
||||
http://godoc.org/github.com/calmh/ini
|
||||
|
||||
Example
|
||||
-------
|
||||
|
||||
```go
|
||||
fd, _ := os.Open("foo.ini")
|
||||
cfg := ini.Parse(fd)
|
||||
fd.Close()
|
||||
|
||||
val := cfg.Get("general", "foo")
|
||||
cfg.Set("general", "bar", "baz")
|
||||
|
||||
fd, _ = os.Create("bar.ini")
|
||||
err := cfg.Write(fd)
|
||||
if err != nil {
|
||||
// ...
|
||||
}
|
||||
err = fd.Close()
|
||||
|
||||
```
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
MIT
|
||||
235
Godeps/_workspace/src/github.com/calmh/ini/ini.go
generated
vendored
235
Godeps/_workspace/src/github.com/calmh/ini/ini.go
generated
vendored
@@ -1,235 +0,0 @@
|
||||
// Package ini provides trivial parsing of .INI format files.
|
||||
package ini
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Config is a parsed INI format file.
|
||||
type Config struct {
|
||||
sections []section
|
||||
comments []string
|
||||
}
|
||||
|
||||
type section struct {
|
||||
name string
|
||||
comments []string
|
||||
options []option
|
||||
}
|
||||
|
||||
type option struct {
|
||||
name, value string
|
||||
}
|
||||
|
||||
var (
|
||||
iniSectionRe = regexp.MustCompile(`^\[(.+)\]$`)
|
||||
iniOptionRe = regexp.MustCompile(`^([^\s=]+)\s*=\s*(.+?)$`)
|
||||
)
|
||||
|
||||
// Sections returns the list of sections in the file.
|
||||
func (c *Config) Sections() []string {
|
||||
var sections []string
|
||||
for _, sect := range c.sections {
|
||||
sections = append(sections, sect.name)
|
||||
}
|
||||
return sections
|
||||
}
|
||||
|
||||
// Options returns the list of options in a given section.
|
||||
func (c *Config) Options(section string) []string {
|
||||
var options []string
|
||||
for _, sect := range c.sections {
|
||||
if sect.name == section {
|
||||
for _, opt := range sect.options {
|
||||
options = append(options, opt.name)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
return options
|
||||
}
|
||||
|
||||
// OptionMap returns the map option => value for a given section.
|
||||
func (c *Config) OptionMap(section string) map[string]string {
|
||||
options := make(map[string]string)
|
||||
for _, sect := range c.sections {
|
||||
if sect.name == section {
|
||||
for _, opt := range sect.options {
|
||||
options[opt.name] = opt.value
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
return options
|
||||
}
|
||||
|
||||
// Comments returns the list of comments in a given section.
|
||||
// For the empty string, returns the file comments.
|
||||
func (c *Config) Comments(section string) []string {
|
||||
if section == "" {
|
||||
return c.comments
|
||||
}
|
||||
for _, sect := range c.sections {
|
||||
if sect.name == section {
|
||||
return sect.comments
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddComments appends the comment to the list of comments for the section.
|
||||
func (c *Config) AddComment(sect, comment string) {
|
||||
if sect == "" {
|
||||
c.comments = append(c.comments, comment)
|
||||
return
|
||||
}
|
||||
|
||||
for i, s := range c.sections {
|
||||
if s.name == sect {
|
||||
c.sections[i].comments = append(s.comments, comment)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
c.sections = append(c.sections, section{
|
||||
name: sect,
|
||||
comments: []string{comment},
|
||||
})
|
||||
}
|
||||
|
||||
// Parse reads the given io.Reader and returns a parsed Config object.
|
||||
func Parse(stream io.Reader) Config {
|
||||
var cfg Config
|
||||
var curSection string
|
||||
|
||||
scanner := bufio.NewScanner(bufio.NewReader(stream))
|
||||
for scanner.Scan() {
|
||||
line := strings.TrimSpace(scanner.Text())
|
||||
if strings.HasPrefix(line, "#") || strings.HasPrefix(line, ";") {
|
||||
comment := strings.TrimLeft(line, ";# ")
|
||||
cfg.AddComment(curSection, comment)
|
||||
} else if len(line) > 0 {
|
||||
if m := iniSectionRe.FindStringSubmatch(line); len(m) > 0 {
|
||||
curSection = m[1]
|
||||
} else if m := iniOptionRe.FindStringSubmatch(line); len(m) > 0 {
|
||||
key := m[1]
|
||||
val := m[2]
|
||||
if !strings.Contains(val, "\"") {
|
||||
// If val does not contain any quote characers, we can make it
|
||||
// a quoted string and safely let strconv.Unquote sort out any
|
||||
// escapes
|
||||
val = "\"" + val + "\""
|
||||
}
|
||||
if val[0] == '"' {
|
||||
val, _ = strconv.Unquote(val)
|
||||
}
|
||||
|
||||
cfg.Set(curSection, key, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
return cfg
|
||||
}
|
||||
|
||||
// Write writes the sections and options to the io.Writer in INI format.
|
||||
func (c *Config) Write(out io.Writer) error {
|
||||
for _, cmt := range c.comments {
|
||||
fmt.Fprintln(out, "; "+cmt)
|
||||
}
|
||||
if len(c.comments) > 0 {
|
||||
fmt.Fprintln(out)
|
||||
}
|
||||
|
||||
for _, sect := range c.sections {
|
||||
fmt.Fprintf(out, "[%s]\n", sect.name)
|
||||
for _, cmt := range sect.comments {
|
||||
fmt.Fprintln(out, "; "+cmt)
|
||||
}
|
||||
for _, opt := range sect.options {
|
||||
val := opt.value
|
||||
if len(val) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Quote the string if it begins or ends with space
|
||||
needsQuoting := val[0] == ' ' || val[len(val)-1] == ' '
|
||||
|
||||
if !needsQuoting {
|
||||
// Quote the string if it contains any unprintable characters
|
||||
for _, r := range val {
|
||||
if !strconv.IsPrint(r) {
|
||||
needsQuoting = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if needsQuoting {
|
||||
val = strconv.Quote(val)
|
||||
}
|
||||
|
||||
fmt.Fprintf(out, "%s=%s\n", opt.name, val)
|
||||
}
|
||||
fmt.Fprintln(out)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get gets the value from the specified section and key name, or the empty
|
||||
// string if either the section or the key is missing.
|
||||
func (c *Config) Get(section, key string) string {
|
||||
for _, sect := range c.sections {
|
||||
if sect.name == section {
|
||||
for _, opt := range sect.options {
|
||||
if opt.name == key {
|
||||
return opt.value
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Set sets a value for an option in a section. If the option exists, it's
|
||||
// value will be overwritten. If the option does not exist, it will be added.
|
||||
// If the section does not exist, it will be added and the option added to it.
|
||||
func (c *Config) Set(sectionName, key, value string) {
|
||||
for i, sect := range c.sections {
|
||||
if sect.name == sectionName {
|
||||
for j, opt := range sect.options {
|
||||
if opt.name == key {
|
||||
c.sections[i].options[j].value = value
|
||||
return
|
||||
}
|
||||
}
|
||||
c.sections[i].options = append(sect.options, option{key, value})
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
c.sections = append(c.sections, section{
|
||||
name: sectionName,
|
||||
options: []option{{key, value}},
|
||||
})
|
||||
}
|
||||
|
||||
// Delete removes the option from the specified section.
|
||||
func (c *Config) Delete(section, key string) {
|
||||
for sn, sect := range c.sections {
|
||||
if sect.name == section {
|
||||
for i, opt := range sect.options {
|
||||
if opt.name == key {
|
||||
c.sections[sn].options = append(sect.options[:i], sect.options[i+1:]...)
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
214
Godeps/_workspace/src/github.com/calmh/ini/ini_test.go
generated
vendored
214
Godeps/_workspace/src/github.com/calmh/ini/ini_test.go
generated
vendored
@@ -1,214 +0,0 @@
|
||||
package ini_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/calmh/ini"
|
||||
)
|
||||
|
||||
func TestParseValues(t *testing.T) {
|
||||
strs := []string{
|
||||
`[general]`,
|
||||
`k1=v1`,
|
||||
`k2 = v2`,
|
||||
` k3 = v3 `,
|
||||
`k4=" quoted spaces "`,
|
||||
`k5 = " quoted spaces " `,
|
||||
`k6 = with\nnewline`,
|
||||
`k7 = "with\nnewline"`,
|
||||
`k8 = a "quoted" word`,
|
||||
`k9 = "a \"quoted\" word"`,
|
||||
}
|
||||
buf := bytes.NewBufferString(strings.Join(strs, "\n"))
|
||||
cfg := ini.Parse(buf)
|
||||
|
||||
correct := map[string]string{
|
||||
"k1": "v1",
|
||||
"k2": "v2",
|
||||
"k3": "v3",
|
||||
"k4": " quoted spaces ",
|
||||
"k5": " quoted spaces ",
|
||||
"k6": "with\nnewline",
|
||||
"k7": "with\nnewline",
|
||||
"k8": "a \"quoted\" word",
|
||||
"k9": "a \"quoted\" word",
|
||||
}
|
||||
|
||||
for k, v := range correct {
|
||||
if v2 := cfg.Get("general", k); v2 != v {
|
||||
t.Errorf("Incorrect general.%s, %q != %q", k, v2, v)
|
||||
}
|
||||
}
|
||||
|
||||
if v := cfg.Get("general", "nonexistant"); v != "" {
|
||||
t.Errorf("Unexpected non-empty value %q", v)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseComments(t *testing.T) {
|
||||
strs := []string{
|
||||
";file comment 1", // No leading space
|
||||
"; file comment 2 ", // Trailing space
|
||||
"; file comment 3", // Multiple leading spaces
|
||||
"[general]",
|
||||
"; b general comment 1", // Comments in unsorted order
|
||||
"somekey = somevalue",
|
||||
"; a general comment 2",
|
||||
"[other]",
|
||||
"; other comment 1", // Comments in section with no values
|
||||
"; other comment 2",
|
||||
"[other2]",
|
||||
"; other2 comment 1",
|
||||
"; other2 comment 2", // Comments on last section
|
||||
"somekey = somevalue",
|
||||
}
|
||||
buf := bytes.NewBufferString(strings.Join(strs, "\n"))
|
||||
|
||||
correct := map[string][]string{
|
||||
"": []string{"file comment 1", "file comment 2", "file comment 3"},
|
||||
"general": []string{"b general comment 1", "a general comment 2"},
|
||||
"other": []string{"other comment 1", "other comment 2"},
|
||||
"other2": []string{"other2 comment 1", "other2 comment 2"},
|
||||
}
|
||||
|
||||
cfg := ini.Parse(buf)
|
||||
|
||||
for section, comments := range correct {
|
||||
cmts := cfg.Comments(section)
|
||||
if len(cmts) != len(comments) {
|
||||
t.Errorf("Incorrect number of comments for section %q: %d != %d", section, len(cmts), len(comments))
|
||||
} else {
|
||||
for i := range comments {
|
||||
if cmts[i] != comments[i] {
|
||||
t.Errorf("Incorrect comment: %q != %q", cmts[i], comments[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWrite(t *testing.T) {
|
||||
cfg := ini.Config{}
|
||||
cfg.Set("general", "k1", "v1")
|
||||
cfg.Set("general", "k2", "foo bar")
|
||||
cfg.Set("general", "k3", " foo bar ")
|
||||
cfg.Set("general", "k4", "foo\nbar")
|
||||
|
||||
var out bytes.Buffer
|
||||
cfg.Write(&out)
|
||||
|
||||
correct := `[general]
|
||||
k1=v1
|
||||
k2=foo bar
|
||||
k3=" foo bar "
|
||||
k4="foo\nbar"
|
||||
|
||||
`
|
||||
if s := out.String(); s != correct {
|
||||
t.Errorf("Incorrect written .INI:\n%s\ncorrect:\n%s", s, correct)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSet(t *testing.T) {
|
||||
buf := bytes.NewBufferString("[general]\nfoo=bar\nfoo2=bar2\n")
|
||||
cfg := ini.Parse(buf)
|
||||
|
||||
cfg.Set("general", "foo", "baz") // Overwrite existing
|
||||
cfg.Set("general", "baz", "quux") // Create new value
|
||||
cfg.Set("other", "baz2", "quux2") // Create new section + value
|
||||
|
||||
var out bytes.Buffer
|
||||
cfg.Write(&out)
|
||||
|
||||
correct := `[general]
|
||||
foo=baz
|
||||
foo2=bar2
|
||||
baz=quux
|
||||
|
||||
[other]
|
||||
baz2=quux2
|
||||
|
||||
`
|
||||
|
||||
if s := out.String(); s != correct {
|
||||
t.Errorf("Incorrect INI after set:\n%s", s)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDelete(t *testing.T) {
|
||||
buf := bytes.NewBufferString("[general]\nfoo=bar\nfoo2=bar2\nfoo3=baz\n")
|
||||
cfg := ini.Parse(buf)
|
||||
cfg.Delete("general", "foo")
|
||||
out := new(bytes.Buffer)
|
||||
cfg.Write(out)
|
||||
correct := "[general]\nfoo2=bar2\nfoo3=baz\n\n"
|
||||
|
||||
if s := out.String(); s != correct {
|
||||
t.Errorf("Incorrect INI after delete:\n%s", s)
|
||||
}
|
||||
|
||||
buf = bytes.NewBufferString("[general]\nfoo=bar\nfoo2=bar2\nfoo3=baz\n")
|
||||
cfg = ini.Parse(buf)
|
||||
cfg.Delete("general", "foo2")
|
||||
out = new(bytes.Buffer)
|
||||
cfg.Write(out)
|
||||
correct = "[general]\nfoo=bar\nfoo3=baz\n\n"
|
||||
|
||||
if s := out.String(); s != correct {
|
||||
t.Errorf("Incorrect INI after delete:\n%s", s)
|
||||
}
|
||||
|
||||
buf = bytes.NewBufferString("[general]\nfoo=bar\nfoo2=bar2\nfoo3=baz\n")
|
||||
cfg = ini.Parse(buf)
|
||||
cfg.Delete("general", "foo3")
|
||||
out = new(bytes.Buffer)
|
||||
cfg.Write(out)
|
||||
correct = "[general]\nfoo=bar\nfoo2=bar2\n\n"
|
||||
|
||||
if s := out.String(); s != correct {
|
||||
t.Errorf("Incorrect INI after delete:\n%s", s)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetManyEquals(t *testing.T) {
|
||||
buf := bytes.NewBufferString("[general]\nfoo=bar==\nfoo2=bar2==\n")
|
||||
cfg := ini.Parse(buf)
|
||||
|
||||
cfg.Set("general", "foo", "baz==")
|
||||
|
||||
var out bytes.Buffer
|
||||
cfg.Write(&out)
|
||||
|
||||
correct := `[general]
|
||||
foo=baz==
|
||||
foo2=bar2==
|
||||
|
||||
`
|
||||
|
||||
if s := out.String(); s != correct {
|
||||
t.Errorf("Incorrect INI after set:\n%s", s)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRewriteDuplicate(t *testing.T) {
|
||||
buf := bytes.NewBufferString("[general]\nfoo=bar==\nfoo=bar2==\n")
|
||||
cfg := ini.Parse(buf)
|
||||
|
||||
if v := cfg.Get("general", "foo"); v != "bar2==" {
|
||||
t.Errorf("incorrect get %q", v)
|
||||
}
|
||||
|
||||
var out bytes.Buffer
|
||||
cfg.Write(&out)
|
||||
|
||||
correct := `[general]
|
||||
foo=bar2==
|
||||
|
||||
`
|
||||
|
||||
if s := out.String(); s != correct {
|
||||
t.Errorf("Incorrect INI after set:\n%s", s)
|
||||
}
|
||||
}
|
||||
1
Godeps/_workspace/src/github.com/calmh/xdr/.gitignore
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/calmh/xdr/.gitignore
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
coverage.out
|
||||
19
Godeps/_workspace/src/github.com/calmh/xdr/.travis.yml
generated
vendored
Normal file
19
Godeps/_workspace/src/github.com/calmh/xdr/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
language: go
|
||||
go:
|
||||
- tip
|
||||
|
||||
install:
|
||||
- export PATH=$PATH:$HOME/gopath/bin
|
||||
- go get code.google.com/p/go.tools/cmd/cover
|
||||
- go get github.com/mattn/goveralls
|
||||
|
||||
script:
|
||||
- ./generate.sh
|
||||
- go test -coverprofile=coverage.out
|
||||
|
||||
after_success:
|
||||
- goveralls -coverprofile=coverage.out -service=travis-ci -package=calmh/xdr -repotoken="$COVERALLS_TOKEN"
|
||||
|
||||
env:
|
||||
global:
|
||||
secure: SmgnrGfp2zLrA44ChRMpjPeujubt9veZ8Fx/OseMWECmacyV5N/TuDhzIbwo6QwV4xB0sBacoPzvxQbJRVjNKsPiSu72UbcQmQ7flN4Tf7nW09tSh1iW8NgrpBCq/3UYLoBu2iPBEBKm93IK0aGNAKs6oEkB0fU27iTVBwiTXOY=
|
||||
@@ -1,11 +1,11 @@
|
||||
Copyright (C) 2013 Jakob Borg
|
||||
Copyright (C) 2014 Jakob Borg.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to
|
||||
deal in the Software without restriction, including without limitation the
|
||||
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||
sell copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
- The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
@@ -14,6 +14,6 @@ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
IN THE SOFTWARE.
|
||||
12
Godeps/_workspace/src/github.com/calmh/xdr/README.md
generated
vendored
Normal file
12
Godeps/_workspace/src/github.com/calmh/xdr/README.md
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
xdr
|
||||
===
|
||||
|
||||
[](https://travis-ci.org/calmh/xdr)
|
||||
[](https://coveralls.io/r/calmh/xdr?branch=master)
|
||||
[](http://godoc.org/github.com/calmh/xdr)
|
||||
[](http://opensource.org/licenses/MIT)
|
||||
|
||||
This is an XDR encoding/decoding library. It uses code generation and
|
||||
not reflection. It supports the IPDR bastardized XDR format when built
|
||||
with `-tags ipdr`.
|
||||
|
||||
113
Godeps/_workspace/src/github.com/calmh/xdr/bench_test.go
generated
vendored
Normal file
113
Godeps/_workspace/src/github.com/calmh/xdr/bench_test.go
generated
vendored
Normal file
@@ -0,0 +1,113 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
package xdr_test
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/calmh/xdr"
|
||||
)
|
||||
|
||||
type XDRBenchStruct struct {
|
||||
I1 uint64
|
||||
I2 uint32
|
||||
I3 uint16
|
||||
I4 uint8
|
||||
Bs0 []byte // max:128
|
||||
Bs1 []byte
|
||||
S0 string // max:128
|
||||
S1 string
|
||||
}
|
||||
|
||||
var res []byte // no to be optimized away
|
||||
var s = XDRBenchStruct{
|
||||
I1: 42,
|
||||
I2: 43,
|
||||
I3: 44,
|
||||
I4: 45,
|
||||
Bs0: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18},
|
||||
Bs1: []byte{11, 12, 13, 14, 15, 16, 17, 18, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
|
||||
S0: "Hello World! String one.",
|
||||
S1: "Hello World! String two.",
|
||||
}
|
||||
var e = s.MarshalXDR()
|
||||
|
||||
func BenchmarkThisMarshal(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
res = s.MarshalXDR()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkThisUnmarshal(b *testing.B) {
|
||||
var t XDRBenchStruct
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := t.UnmarshalXDR(e)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkThisEncode(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := s.EncodeXDR(ioutil.Discard)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkThisEncoder(b *testing.B) {
|
||||
w := xdr.NewWriter(ioutil.Discard)
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := s.encodeXDR(w)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type repeatReader struct {
|
||||
data []byte
|
||||
}
|
||||
|
||||
func (r *repeatReader) Read(bs []byte) (n int, err error) {
|
||||
if len(bs) > len(r.data) {
|
||||
err = io.EOF
|
||||
}
|
||||
n = copy(bs, r.data)
|
||||
r.data = r.data[n:]
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (r *repeatReader) Reset(bs []byte) {
|
||||
r.data = bs
|
||||
}
|
||||
|
||||
func BenchmarkThisDecode(b *testing.B) {
|
||||
rr := &repeatReader{e}
|
||||
var t XDRBenchStruct
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := t.DecodeXDR(rr)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
rr.Reset(e)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkThisDecoder(b *testing.B) {
|
||||
rr := &repeatReader{e}
|
||||
r := xdr.NewReader(rr)
|
||||
var t XDRBenchStruct
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := t.decodeXDR(r)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
rr.Reset(e)
|
||||
}
|
||||
}
|
||||
183
Godeps/_workspace/src/github.com/calmh/xdr/bench_xdr_test.go
generated
vendored
Normal file
183
Godeps/_workspace/src/github.com/calmh/xdr/bench_xdr_test.go
generated
vendored
Normal file
@@ -0,0 +1,183 @@
|
||||
// ************************************************************
|
||||
// This file is automatically generated by genxdr. Do not edit.
|
||||
// ************************************************************
|
||||
|
||||
package xdr_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
|
||||
"github.com/calmh/xdr"
|
||||
)
|
||||
|
||||
/*
|
||||
|
||||
XDRBenchStruct Structure:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
+ I1 (64 bits) +
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| I2 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| 0x0000 | I3 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| uint8 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of Bs0 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Bs0 (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of Bs1 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Bs1 (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of S0 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ S0 (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of S1 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ S1 (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
struct XDRBenchStruct {
|
||||
unsigned hyper I1;
|
||||
unsigned int I2;
|
||||
unsigned int I3;
|
||||
uint8 I4;
|
||||
opaque Bs0<128>;
|
||||
opaque Bs1<>;
|
||||
string S0<128>;
|
||||
string S1<>;
|
||||
}
|
||||
|
||||
*/
|
||||
|
||||
func (o XDRBenchStruct) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o XDRBenchStruct) MarshalXDR() []byte {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}
|
||||
|
||||
func (o XDRBenchStruct) AppendXDR(bs []byte) []byte {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
o.encodeXDR(xw)
|
||||
return []byte(aw)
|
||||
}
|
||||
|
||||
func (o XDRBenchStruct) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
xw.WriteUint64(o.I1)
|
||||
xw.WriteUint32(o.I2)
|
||||
xw.WriteUint16(o.I3)
|
||||
xw.WriteUint8(o.I4)
|
||||
if len(o.Bs0) > 128 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
}
|
||||
xw.WriteBytes(o.Bs0)
|
||||
xw.WriteBytes(o.Bs1)
|
||||
if len(o.S0) > 128 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
}
|
||||
xw.WriteString(o.S0)
|
||||
xw.WriteString(o.S1)
|
||||
return xw.Tot(), xw.Error()
|
||||
}
|
||||
|
||||
func (o *XDRBenchStruct) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *XDRBenchStruct) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *XDRBenchStruct) decodeXDR(xr *xdr.Reader) error {
|
||||
o.I1 = xr.ReadUint64()
|
||||
o.I2 = xr.ReadUint32()
|
||||
o.I3 = xr.ReadUint16()
|
||||
o.I4 = xr.ReadUint8()
|
||||
o.Bs0 = xr.ReadBytesMax(128)
|
||||
o.Bs1 = xr.ReadBytes()
|
||||
o.S0 = xr.ReadStringMax(128)
|
||||
o.S1 = xr.ReadString()
|
||||
return xr.Error()
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
repeatReader Structure:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of data |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ data (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
struct repeatReader {
|
||||
opaque data<>;
|
||||
}
|
||||
|
||||
*/
|
||||
|
||||
func (o repeatReader) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o repeatReader) MarshalXDR() []byte {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}
|
||||
|
||||
func (o repeatReader) AppendXDR(bs []byte) []byte {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
o.encodeXDR(xw)
|
||||
return []byte(aw)
|
||||
}
|
||||
|
||||
func (o repeatReader) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
xw.WriteBytes(o.data)
|
||||
return xw.Tot(), xw.Error()
|
||||
}
|
||||
|
||||
func (o *repeatReader) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *repeatReader) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *repeatReader) decodeXDR(xr *xdr.Reader) error {
|
||||
o.data = xr.ReadBytes()
|
||||
return xr.Error()
|
||||
}
|
||||
279
xdr/cmd/coder/main.go → Godeps/_workspace/src/github.com/calmh/xdr/cmd/genxdr/main.go
generated
vendored
279
xdr/cmd/coder/main.go → Godeps/_workspace/src/github.com/calmh/xdr/cmd/genxdr/main.go
generated
vendored
@@ -1,3 +1,6 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
@@ -15,27 +18,32 @@ import (
|
||||
"text/template"
|
||||
)
|
||||
|
||||
var output string
|
||||
|
||||
type field struct {
|
||||
type fieldInfo struct {
|
||||
Name string
|
||||
IsBasic bool
|
||||
IsSlice bool
|
||||
IsMap bool
|
||||
FieldType string
|
||||
KeyType string
|
||||
Encoder string
|
||||
Convert string
|
||||
Max int
|
||||
IsBasic bool // handled by one the native Read/WriteUint64 etc functions
|
||||
IsSlice bool // field is a slice of FieldType
|
||||
FieldType string // original type of field, i.e. "int"
|
||||
Encoder string // the encoder name, i.e. "Uint64" for Read/WriteUint64
|
||||
Convert string // what to convert to when encoding, i.e. "uint64"
|
||||
Max int // max size for slices and strings
|
||||
}
|
||||
|
||||
var headerTpl = template.Must(template.New("header").Parse(`package {{.Package}}
|
||||
type structInfo struct {
|
||||
Name string
|
||||
Fields []fieldInfo
|
||||
}
|
||||
|
||||
var headerTpl = template.Must(template.New("header").Parse(`// ************************************************************
|
||||
// This file is automatically generated by genxdr. Do not edit.
|
||||
// ************************************************************
|
||||
|
||||
package {{.Package}}
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
|
||||
"github.com/calmh/syncthing/xdr"
|
||||
"github.com/calmh/xdr"
|
||||
)
|
||||
`))
|
||||
|
||||
@@ -46,44 +54,54 @@ func (o {{.TypeName}}) EncodeXDR(w io.Writer) (int, error) {
|
||||
}//+n
|
||||
|
||||
func (o {{.TypeName}}) MarshalXDR() []byte {
|
||||
var buf bytes.Buffer
|
||||
var xw = xdr.NewWriter(&buf)
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}//+n
|
||||
|
||||
func (o {{.TypeName}}) AppendXDR(bs []byte) []byte {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
o.encodeXDR(xw)
|
||||
return buf.Bytes()
|
||||
return []byte(aw)
|
||||
}//+n
|
||||
|
||||
func (o {{.TypeName}}) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
{{range $field := .Fields}}
|
||||
{{if not $field.IsSlice}}
|
||||
{{if ne $field.Convert ""}}
|
||||
xw.Write{{$field.Encoder}}({{$field.Convert}}(o.{{$field.Name}}))
|
||||
{{else if $field.IsBasic}}
|
||||
{{if ge $field.Max 1}}
|
||||
if len(o.{{$field.Name}}) > {{$field.Max}} {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
}
|
||||
{{end}}
|
||||
xw.Write{{$field.Encoder}}(o.{{$field.Name}})
|
||||
{{range $fieldInfo := .Fields}}
|
||||
{{if not $fieldInfo.IsSlice}}
|
||||
{{if ne $fieldInfo.Convert ""}}
|
||||
xw.Write{{$fieldInfo.Encoder}}({{$fieldInfo.Convert}}(o.{{$fieldInfo.Name}}))
|
||||
{{else if $fieldInfo.IsBasic}}
|
||||
{{if ge $fieldInfo.Max 1}}
|
||||
if len(o.{{$fieldInfo.Name}}) > {{$fieldInfo.Max}} {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
}
|
||||
{{end}}
|
||||
xw.Write{{$fieldInfo.Encoder}}(o.{{$fieldInfo.Name}})
|
||||
{{else}}
|
||||
_, err := o.{{$fieldInfo.Name}}.encodeXDR(xw)
|
||||
if err != nil {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
{{end}}
|
||||
{{else}}
|
||||
o.{{$field.Name}}.encodeXDR(xw)
|
||||
{{if ge $fieldInfo.Max 1}}
|
||||
if len(o.{{$fieldInfo.Name}}) > {{$fieldInfo.Max}} {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
}
|
||||
{{end}}
|
||||
xw.WriteUint32(uint32(len(o.{{$fieldInfo.Name}})))
|
||||
for i := range o.{{$fieldInfo.Name}} {
|
||||
{{if ne $fieldInfo.Convert ""}}
|
||||
xw.Write{{$fieldInfo.Encoder}}({{$fieldInfo.Convert}}(o.{{$fieldInfo.Name}}[i]))
|
||||
{{else if $fieldInfo.IsBasic}}
|
||||
xw.Write{{$fieldInfo.Encoder}}(o.{{$fieldInfo.Name}}[i])
|
||||
{{else}}
|
||||
_, err := o.{{$fieldInfo.Name}}[i].encodeXDR(xw)
|
||||
if err != nil {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
{{end}}
|
||||
}
|
||||
{{end}}
|
||||
{{else}}
|
||||
{{if ge $field.Max 1}}
|
||||
if len(o.{{$field.Name}}) > {{$field.Max}} {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
}
|
||||
{{end}}
|
||||
xw.WriteUint32(uint32(len(o.{{$field.Name}})))
|
||||
for i := range o.{{$field.Name}} {
|
||||
{{if ne $field.Convert ""}}
|
||||
xw.Write{{$field.Encoder}}({{$field.Convert}}(o.{{$field.Name}}[i]))
|
||||
{{else if $field.IsBasic}}
|
||||
xw.Write{{$field.Encoder}}(o.{{$field.Name}}[i])
|
||||
{{else}}
|
||||
o.{{$field.Name}}[i].encodeXDR(xw)
|
||||
{{end}}
|
||||
}
|
||||
{{end}}
|
||||
{{end}}
|
||||
return xw.Tot(), xw.Error()
|
||||
}//+n
|
||||
@@ -94,43 +112,43 @@ func (o *{{.TypeName}}) DecodeXDR(r io.Reader) error {
|
||||
}//+n
|
||||
|
||||
func (o *{{.TypeName}}) UnmarshalXDR(bs []byte) error {
|
||||
var buf = bytes.NewBuffer(bs)
|
||||
var xr = xdr.NewReader(buf)
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.decodeXDR(xr)
|
||||
}//+n
|
||||
|
||||
func (o *{{.TypeName}}) decodeXDR(xr *xdr.Reader) error {
|
||||
{{range $field := .Fields}}
|
||||
{{if not $field.IsSlice}}
|
||||
{{if ne $field.Convert ""}}
|
||||
o.{{$field.Name}} = {{$field.FieldType}}(xr.Read{{$field.Encoder}}())
|
||||
{{else if $field.IsBasic}}
|
||||
{{if ge $field.Max 1}}
|
||||
o.{{$field.Name}} = xr.Read{{$field.Encoder}}Max({{$field.Max}})
|
||||
{{range $fieldInfo := .Fields}}
|
||||
{{if not $fieldInfo.IsSlice}}
|
||||
{{if ne $fieldInfo.Convert ""}}
|
||||
o.{{$fieldInfo.Name}} = {{$fieldInfo.FieldType}}(xr.Read{{$fieldInfo.Encoder}}())
|
||||
{{else if $fieldInfo.IsBasic}}
|
||||
{{if ge $fieldInfo.Max 1}}
|
||||
o.{{$fieldInfo.Name}} = xr.Read{{$fieldInfo.Encoder}}Max({{$fieldInfo.Max}})
|
||||
{{else}}
|
||||
o.{{$fieldInfo.Name}} = xr.Read{{$fieldInfo.Encoder}}()
|
||||
{{end}}
|
||||
{{else}}
|
||||
(&o.{{$fieldInfo.Name}}).decodeXDR(xr)
|
||||
{{end}}
|
||||
{{else}}
|
||||
o.{{$field.Name}} = xr.Read{{$field.Encoder}}()
|
||||
_{{$fieldInfo.Name}}Size := int(xr.ReadUint32())
|
||||
{{if ge $fieldInfo.Max 1}}
|
||||
if _{{$fieldInfo.Name}}Size > {{$fieldInfo.Max}} {
|
||||
return xdr.ErrElementSizeExceeded
|
||||
}
|
||||
{{end}}
|
||||
o.{{$fieldInfo.Name}} = make([]{{$fieldInfo.FieldType}}, _{{$fieldInfo.Name}}Size)
|
||||
for i := range o.{{$fieldInfo.Name}} {
|
||||
{{if ne $fieldInfo.Convert ""}}
|
||||
o.{{$fieldInfo.Name}}[i] = {{$fieldInfo.FieldType}}(xr.Read{{$fieldInfo.Encoder}}())
|
||||
{{else if $fieldInfo.IsBasic}}
|
||||
o.{{$fieldInfo.Name}}[i] = xr.Read{{$fieldInfo.Encoder}}()
|
||||
{{else}}
|
||||
(&o.{{$fieldInfo.Name}}[i]).decodeXDR(xr)
|
||||
{{end}}
|
||||
}
|
||||
{{end}}
|
||||
{{else}}
|
||||
(&o.{{$field.Name}}).decodeXDR(xr)
|
||||
{{end}}
|
||||
{{else}}
|
||||
_{{$field.Name}}Size := int(xr.ReadUint32())
|
||||
{{if ge $field.Max 1}}
|
||||
if _{{$field.Name}}Size > {{$field.Max}} {
|
||||
return xdr.ErrElementSizeExceeded
|
||||
}
|
||||
{{end}}
|
||||
o.{{$field.Name}} = make([]{{$field.FieldType}}, _{{$field.Name}}Size)
|
||||
for i := range o.{{$field.Name}} {
|
||||
{{if ne $field.Convert ""}}
|
||||
o.{{$field.Name}}[i] = {{$field.FieldType}}(xr.Read{{$field.Encoder}}())
|
||||
{{else if $field.IsBasic}}
|
||||
o.{{$field.Name}}[i] = xr.Read{{$field.Encoder}}()
|
||||
{{else}}
|
||||
(&o.{{$field.Name}}[i]).decodeXDR(xr)
|
||||
{{end}}
|
||||
}
|
||||
{{end}}
|
||||
{{end}}
|
||||
return xr.Error()
|
||||
}`))
|
||||
@@ -143,6 +161,8 @@ type typeSet struct {
|
||||
}
|
||||
|
||||
var xdrEncoders = map[string]typeSet{
|
||||
"int8": typeSet{"uint8", "Uint8"},
|
||||
"uint8": typeSet{"", "Uint8"},
|
||||
"int16": typeSet{"uint16", "Uint16"},
|
||||
"uint16": typeSet{"", "Uint16"},
|
||||
"int32": typeSet{"uint32", "Uint32"},
|
||||
@@ -155,8 +175,9 @@ var xdrEncoders = map[string]typeSet{
|
||||
"bool": typeSet{"", "Bool"},
|
||||
}
|
||||
|
||||
func handleStruct(name string, t *ast.StructType) {
|
||||
var fs []field
|
||||
func handleStruct(t *ast.StructType) []fieldInfo {
|
||||
var fs []fieldInfo
|
||||
|
||||
for _, sf := range t.Fields.List {
|
||||
if len(sf.Names) == 0 {
|
||||
// We don't handle anonymous fields
|
||||
@@ -170,14 +191,17 @@ func handleStruct(name string, t *ast.StructType) {
|
||||
if m := maxRe.FindStringSubmatch(c); m != nil {
|
||||
max, _ = strconv.Atoi(m[1])
|
||||
}
|
||||
if strings.Contains(c, "noencode") {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
var f field
|
||||
var f fieldInfo
|
||||
switch ft := sf.Type.(type) {
|
||||
case *ast.Ident:
|
||||
tn := ft.Name
|
||||
if enc, ok := xdrEncoders[tn]; ok {
|
||||
f = field{
|
||||
f = fieldInfo{
|
||||
Name: fn,
|
||||
IsBasic: true,
|
||||
FieldType: tn,
|
||||
@@ -186,7 +210,7 @@ func handleStruct(name string, t *ast.StructType) {
|
||||
Max: max,
|
||||
}
|
||||
} else {
|
||||
f = field{
|
||||
f = fieldInfo{
|
||||
Name: fn,
|
||||
IsBasic: false,
|
||||
FieldType: tn,
|
||||
@@ -202,7 +226,7 @@ func handleStruct(name string, t *ast.StructType) {
|
||||
|
||||
tn := ft.Elt.(*ast.Ident).Name
|
||||
if enc, ok := xdrEncoders["[]"+tn]; ok {
|
||||
f = field{
|
||||
f = fieldInfo{
|
||||
Name: fn,
|
||||
IsBasic: true,
|
||||
FieldType: tn,
|
||||
@@ -211,7 +235,7 @@ func handleStruct(name string, t *ast.StructType) {
|
||||
Max: max,
|
||||
}
|
||||
} else if enc, ok := xdrEncoders[tn]; ok {
|
||||
f = field{
|
||||
f = fieldInfo{
|
||||
Name: fn,
|
||||
IsBasic: true,
|
||||
IsSlice: true,
|
||||
@@ -221,7 +245,7 @@ func handleStruct(name string, t *ast.StructType) {
|
||||
Max: max,
|
||||
}
|
||||
} else {
|
||||
f = field{
|
||||
f = fieldInfo{
|
||||
Name: fn,
|
||||
IsBasic: false,
|
||||
IsSlice: true,
|
||||
@@ -234,17 +258,13 @@ func handleStruct(name string, t *ast.StructType) {
|
||||
fs = append(fs, f)
|
||||
}
|
||||
|
||||
switch output {
|
||||
case "code":
|
||||
generateCode(name, fs)
|
||||
case "diagram":
|
||||
generateDiagram(name, fs)
|
||||
case "xdr":
|
||||
generateXdr(name, fs)
|
||||
}
|
||||
return fs
|
||||
}
|
||||
|
||||
func generateCode(name string, fs []field) {
|
||||
func generateCode(s structInfo) {
|
||||
name := s.Name
|
||||
fs := s.Fields
|
||||
|
||||
var buf bytes.Buffer
|
||||
err := encodeTpl.Execute(&buf, map[string]interface{}{"TypeName": name, "Fields": fs})
|
||||
if err != nil {
|
||||
@@ -261,7 +281,16 @@ func generateCode(name string, fs []field) {
|
||||
fmt.Println(string(bs))
|
||||
}
|
||||
|
||||
func generateDiagram(sn string, fs []field) {
|
||||
func uncamelize(s string) string {
|
||||
return regexp.MustCompile("[a-z][A-Z]").ReplaceAllStringFunc(s, func(camel string) string {
|
||||
return camel[:1] + " " + camel[1:]
|
||||
})
|
||||
}
|
||||
|
||||
func generateDiagram(s structInfo) {
|
||||
sn := s.Name
|
||||
fs := s.Fields
|
||||
|
||||
fmt.Println(sn + " Structure:")
|
||||
fmt.Println()
|
||||
fmt.Println(" 0 1 2 3")
|
||||
@@ -272,28 +301,32 @@ func generateDiagram(sn string, fs []field) {
|
||||
for _, f := range fs {
|
||||
tn := f.FieldType
|
||||
sl := f.IsSlice
|
||||
name := uncamelize(f.Name)
|
||||
|
||||
if sl {
|
||||
fmt.Printf("| %s |\n", center("Number of "+f.Name, 61))
|
||||
fmt.Printf("| %s |\n", center("Number of "+name, 61))
|
||||
fmt.Println(line)
|
||||
}
|
||||
switch tn {
|
||||
case "bool":
|
||||
fmt.Printf("| %s |V|\n", center(name+" (V=0 or 1)", 59))
|
||||
fmt.Println(line)
|
||||
case "uint16":
|
||||
fmt.Printf("| %s | %s |\n", center(f.Name, 29), center("0x0000", 29))
|
||||
fmt.Printf("| %s | %s |\n", center("0x0000", 29), center(name, 29))
|
||||
fmt.Println(line)
|
||||
case "uint32":
|
||||
fmt.Printf("| %s |\n", center(f.Name, 61))
|
||||
fmt.Printf("| %s |\n", center(name, 61))
|
||||
fmt.Println(line)
|
||||
case "int64", "uint64":
|
||||
fmt.Printf("| %-61s |\n", "")
|
||||
fmt.Printf("+ %s +\n", center(f.Name+" (64 bits)", 61))
|
||||
fmt.Printf("+ %s +\n", center(name+" (64 bits)", 61))
|
||||
fmt.Printf("| %-61s |\n", "")
|
||||
fmt.Println(line)
|
||||
case "string", "byte": // XXX We assume slice of byte!
|
||||
fmt.Printf("| %s |\n", center("Length of "+f.Name, 61))
|
||||
fmt.Printf("| %s |\n", center("Length of "+name, 61))
|
||||
fmt.Println(line)
|
||||
fmt.Printf("/ %61s /\n", "")
|
||||
fmt.Printf("\\ %s \\\n", center(f.Name+" (variable length)", 61))
|
||||
fmt.Printf("\\ %s \\\n", center(name+" (variable length)", 61))
|
||||
fmt.Printf("/ %61s /\n", "")
|
||||
fmt.Println(line)
|
||||
default:
|
||||
@@ -312,30 +345,35 @@ func generateDiagram(sn string, fs []field) {
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
func generateXdr(sn string, fs []field) {
|
||||
func generateXdr(s structInfo) {
|
||||
sn := s.Name
|
||||
fs := s.Fields
|
||||
|
||||
fmt.Printf("struct %s {\n", sn)
|
||||
|
||||
for _, f := range fs {
|
||||
tn := f.FieldType
|
||||
fn := f.Name
|
||||
suf := ""
|
||||
l := ""
|
||||
if f.Max > 0 {
|
||||
l = strconv.Itoa(f.Max)
|
||||
}
|
||||
if f.IsSlice {
|
||||
suf = "<>"
|
||||
suf = "<" + l + ">"
|
||||
}
|
||||
|
||||
switch tn {
|
||||
case "uint16":
|
||||
fmt.Printf("\tunsigned short %s%s;\n", fn, suf)
|
||||
case "uint32":
|
||||
case "uint16", "uint32":
|
||||
fmt.Printf("\tunsigned int %s%s;\n", fn, suf)
|
||||
case "int64":
|
||||
fmt.Printf("\thyper %s%s;\n", fn, suf)
|
||||
case "uint64":
|
||||
fmt.Printf("\tunsigned hyper %s%s;\n", fn, suf)
|
||||
case "string":
|
||||
fmt.Printf("\tstring %s<>;\n", fn)
|
||||
fmt.Printf("\tstring %s<%s>;\n", fn, l)
|
||||
case "byte":
|
||||
fmt.Printf("\topaque %s<>;\n", fn)
|
||||
fmt.Printf("\topaque %s<%s>;\n", fn, l)
|
||||
default:
|
||||
fmt.Printf("\t%s %s%s;\n", tn, fn, suf)
|
||||
}
|
||||
@@ -354,14 +392,15 @@ func center(s string, w int) string {
|
||||
return strings.Repeat(" ", l) + s + strings.Repeat(" ", r)
|
||||
}
|
||||
|
||||
func inspector(fset *token.FileSet) func(ast.Node) bool {
|
||||
func inspector(structs *[]structInfo) func(ast.Node) bool {
|
||||
return func(n ast.Node) bool {
|
||||
switch n := n.(type) {
|
||||
case *ast.TypeSpec:
|
||||
switch t := n.Type.(type) {
|
||||
case *ast.StructType:
|
||||
name := n.Name.Name
|
||||
handleStruct(name, t)
|
||||
fs := handleStruct(t)
|
||||
*structs = append(*structs, structInfo{name, fs})
|
||||
}
|
||||
return false
|
||||
default:
|
||||
@@ -371,23 +410,25 @@ func inspector(fset *token.FileSet) func(ast.Node) bool {
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.StringVar(&output, "output", "code", "code,xdr,diagram")
|
||||
flag.Parse()
|
||||
fname := flag.Arg(0)
|
||||
|
||||
// Create the AST by parsing src.
|
||||
fset := token.NewFileSet() // positions are relative to fset
|
||||
fset := token.NewFileSet()
|
||||
f, err := parser.ParseFile(fset, fname, nil, parser.ParseComments)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
//ast.Print(fset, f)
|
||||
|
||||
if output == "code" {
|
||||
headerTpl.Execute(os.Stdout, map[string]string{"Package": f.Name.Name})
|
||||
}
|
||||
|
||||
i := inspector(fset)
|
||||
var structs []structInfo
|
||||
i := inspector(&structs)
|
||||
ast.Inspect(f, i)
|
||||
|
||||
headerTpl.Execute(os.Stdout, map[string]string{"Package": f.Name.Name})
|
||||
for _, s := range structs {
|
||||
fmt.Printf("\n/*\n\n")
|
||||
generateDiagram(s)
|
||||
generateXdr(s)
|
||||
fmt.Printf("*/\n")
|
||||
generateCode(s)
|
||||
}
|
||||
}
|
||||
16
Godeps/_workspace/src/github.com/calmh/xdr/debug.go
generated
vendored
Normal file
16
Godeps/_workspace/src/github.com/calmh/xdr/debug.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
package xdr
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
var (
|
||||
debug = len(os.Getenv("XDRTRACE")) > 0
|
||||
dl = log.New(os.Stdout, "xdr: ", log.Lshortfile|log.Ltime|log.Lmicroseconds)
|
||||
)
|
||||
|
||||
const maxDebugBytes = 32
|
||||
5
Godeps/_workspace/src/github.com/calmh/xdr/doc.go
generated
vendored
Normal file
5
Godeps/_workspace/src/github.com/calmh/xdr/doc.go
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
// Package xdr implements an XDR (RFC 4506) encoder/decoder.
|
||||
package xdr
|
||||
75
Godeps/_workspace/src/github.com/calmh/xdr/encdec_test.go
generated
vendored
Normal file
75
Godeps/_workspace/src/github.com/calmh/xdr/encdec_test.go
generated
vendored
Normal file
@@ -0,0 +1,75 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
package xdr_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
|
||||
"github.com/calmh/xdr"
|
||||
)
|
||||
|
||||
// Contains all supported types
|
||||
type TestStruct struct {
|
||||
I int
|
||||
I8 int8
|
||||
UI8 uint8
|
||||
I16 int16
|
||||
UI16 uint16
|
||||
I32 int32
|
||||
UI32 uint32
|
||||
I64 int64
|
||||
UI64 uint64
|
||||
BS []byte
|
||||
S string
|
||||
C Opaque
|
||||
}
|
||||
|
||||
type Opaque [32]byte
|
||||
|
||||
func (u *Opaque) encodeXDR(w *xdr.Writer) (int, error) {
|
||||
return w.WriteRaw(u[:])
|
||||
}
|
||||
|
||||
func (u *Opaque) decodeXDR(r *xdr.Reader) (int, error) {
|
||||
return r.ReadRaw(u[:])
|
||||
}
|
||||
|
||||
func (Opaque) Generate(rand *rand.Rand, size int) reflect.Value {
|
||||
var u Opaque
|
||||
for i := range u[:] {
|
||||
u[i] = byte(rand.Int())
|
||||
}
|
||||
return reflect.ValueOf(u)
|
||||
}
|
||||
|
||||
func TestEncDec(t *testing.T) {
|
||||
fn := func(t0 TestStruct) bool {
|
||||
bs := t0.MarshalXDR()
|
||||
var t1 TestStruct
|
||||
err := t1.UnmarshalXDR(bs)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Not comparing with DeepEqual since we'll unmarshal nil slices as empty
|
||||
if t0.I != t1.I ||
|
||||
t0.I16 != t1.I16 || t0.UI16 != t1.UI16 ||
|
||||
t0.I32 != t1.I32 || t0.UI32 != t1.UI32 ||
|
||||
t0.I64 != t1.I64 || t0.UI64 != t1.UI64 ||
|
||||
bytes.Compare(t0.BS, t1.BS) != 0 ||
|
||||
t0.S != t1.S || t0.C != t1.C {
|
||||
t.Logf("%#v", t0)
|
||||
t.Logf("%#v", t1)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
if err := quick.Check(fn, nil); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
136
Godeps/_workspace/src/github.com/calmh/xdr/encdec_xdr_test.go
generated
vendored
Normal file
136
Godeps/_workspace/src/github.com/calmh/xdr/encdec_xdr_test.go
generated
vendored
Normal file
@@ -0,0 +1,136 @@
|
||||
// ************************************************************
|
||||
// This file is automatically generated by genxdr. Do not edit.
|
||||
// ************************************************************
|
||||
|
||||
package xdr_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
|
||||
"github.com/calmh/xdr"
|
||||
)
|
||||
|
||||
/*
|
||||
|
||||
TestStruct Structure:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| int |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| int8 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| uint8 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| int16 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| 0x0000 | UI16 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| int32 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| UI32 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
+ I64 (64 bits) +
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
+ UI64 (64 bits) +
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of BS |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ BS (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of S |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ S (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Opaque |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
struct TestStruct {
|
||||
int I;
|
||||
int8 I8;
|
||||
uint8 UI8;
|
||||
int16 I16;
|
||||
unsigned int UI16;
|
||||
int32 I32;
|
||||
unsigned int UI32;
|
||||
hyper I64;
|
||||
unsigned hyper UI64;
|
||||
opaque BS<>;
|
||||
string S<>;
|
||||
Opaque C;
|
||||
}
|
||||
|
||||
*/
|
||||
|
||||
func (o TestStruct) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o TestStruct) MarshalXDR() []byte {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}
|
||||
|
||||
func (o TestStruct) AppendXDR(bs []byte) []byte {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
o.encodeXDR(xw)
|
||||
return []byte(aw)
|
||||
}
|
||||
|
||||
func (o TestStruct) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
xw.WriteUint64(uint64(o.I))
|
||||
xw.WriteUint8(uint8(o.I8))
|
||||
xw.WriteUint8(o.UI8)
|
||||
xw.WriteUint16(uint16(o.I16))
|
||||
xw.WriteUint16(o.UI16)
|
||||
xw.WriteUint32(uint32(o.I32))
|
||||
xw.WriteUint32(o.UI32)
|
||||
xw.WriteUint64(uint64(o.I64))
|
||||
xw.WriteUint64(o.UI64)
|
||||
xw.WriteBytes(o.BS)
|
||||
xw.WriteString(o.S)
|
||||
_, err := o.C.encodeXDR(xw)
|
||||
if err != nil {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
return xw.Tot(), xw.Error()
|
||||
}
|
||||
|
||||
func (o *TestStruct) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *TestStruct) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *TestStruct) decodeXDR(xr *xdr.Reader) error {
|
||||
o.I = int(xr.ReadUint64())
|
||||
o.I8 = int8(xr.ReadUint8())
|
||||
o.UI8 = xr.ReadUint8()
|
||||
o.I16 = int16(xr.ReadUint16())
|
||||
o.UI16 = xr.ReadUint16()
|
||||
o.I32 = int32(xr.ReadUint32())
|
||||
o.UI32 = xr.ReadUint32()
|
||||
o.I64 = int64(xr.ReadUint64())
|
||||
o.UI64 = xr.ReadUint64()
|
||||
o.BS = xr.ReadBytes()
|
||||
o.S = xr.ReadString()
|
||||
(&o.C).decodeXDR(xr)
|
||||
return xr.Error()
|
||||
}
|
||||
4
Godeps/_workspace/src/github.com/calmh/xdr/generate.sh
generated
vendored
Normal file
4
Godeps/_workspace/src/github.com/calmh/xdr/generate.sh
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
#!/bin/sh
|
||||
|
||||
go run cmd/genxdr/main.go -- bench_test.go > bench_xdr_test.go
|
||||
go run cmd/genxdr/main.go -- encdec_test.go > encdec_xdr_test.go
|
||||
10
Godeps/_workspace/src/github.com/calmh/xdr/pad_ipdr.go
generated
vendored
Normal file
10
Godeps/_workspace/src/github.com/calmh/xdr/pad_ipdr.go
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
// +build ipdr
|
||||
|
||||
package xdr
|
||||
|
||||
func pad(l int) int {
|
||||
return 0
|
||||
}
|
||||
14
Godeps/_workspace/src/github.com/calmh/xdr/pad_xdr.go
generated
vendored
Normal file
14
Godeps/_workspace/src/github.com/calmh/xdr/pad_xdr.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
// +build !ipdr
|
||||
|
||||
package xdr
|
||||
|
||||
func pad(l int) int {
|
||||
d := l % 4
|
||||
if d == 0 {
|
||||
return 0
|
||||
}
|
||||
return 4 - d
|
||||
}
|
||||
164
Godeps/_workspace/src/github.com/calmh/xdr/reader.go
generated
vendored
Normal file
164
Godeps/_workspace/src/github.com/calmh/xdr/reader.go
generated
vendored
Normal file
@@ -0,0 +1,164 @@
|
||||
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
// All rights reserved. Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package xdr
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var ErrElementSizeExceeded = errors.New("element size exceeded")
|
||||
|
||||
type Reader struct {
|
||||
r io.Reader
|
||||
err error
|
||||
b [8]byte
|
||||
}
|
||||
|
||||
func NewReader(r io.Reader) *Reader {
|
||||
return &Reader{
|
||||
r: r,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reader) ReadRaw(bs []byte) (int, error) {
|
||||
if r.err != nil {
|
||||
return 0, r.err
|
||||
}
|
||||
|
||||
var n int
|
||||
n, r.err = io.ReadFull(r.r, bs)
|
||||
return n, r.err
|
||||
}
|
||||
|
||||
func (r *Reader) ReadString() string {
|
||||
return r.ReadStringMax(0)
|
||||
}
|
||||
|
||||
func (r *Reader) ReadStringMax(max int) string {
|
||||
buf := r.ReadBytesMaxInto(max, nil)
|
||||
bh := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
|
||||
sh := reflect.StringHeader{
|
||||
Data: bh.Data,
|
||||
Len: bh.Len,
|
||||
}
|
||||
return *((*string)(unsafe.Pointer(&sh)))
|
||||
}
|
||||
|
||||
func (r *Reader) ReadBytes() []byte {
|
||||
return r.ReadBytesInto(nil)
|
||||
}
|
||||
|
||||
func (r *Reader) ReadBytesMax(max int) []byte {
|
||||
return r.ReadBytesMaxInto(max, nil)
|
||||
}
|
||||
|
||||
func (r *Reader) ReadBytesInto(dst []byte) []byte {
|
||||
return r.ReadBytesMaxInto(0, dst)
|
||||
}
|
||||
|
||||
func (r *Reader) ReadBytesMaxInto(max int, dst []byte) []byte {
|
||||
if r.err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
l := int(r.ReadUint32())
|
||||
if r.err != nil {
|
||||
return nil
|
||||
}
|
||||
if max > 0 && l > max {
|
||||
r.err = ErrElementSizeExceeded
|
||||
return nil
|
||||
}
|
||||
|
||||
if fullLen := l + pad(l); fullLen > len(dst) {
|
||||
dst = make([]byte, fullLen)
|
||||
} else {
|
||||
dst = dst[:fullLen]
|
||||
}
|
||||
|
||||
var n int
|
||||
n, r.err = io.ReadFull(r.r, dst)
|
||||
if r.err != nil {
|
||||
if debug {
|
||||
dl.Printf("rd bytes (%d): %v", len(dst), r.err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if debug {
|
||||
if n > maxDebugBytes {
|
||||
dl.Printf("rd bytes (%d): %x...", len(dst), dst[:maxDebugBytes])
|
||||
} else {
|
||||
dl.Printf("rd bytes (%d): %x", len(dst), dst)
|
||||
}
|
||||
}
|
||||
return dst[:l]
|
||||
}
|
||||
|
||||
func (r *Reader) ReadBool() bool {
|
||||
return r.ReadUint8() != 0
|
||||
}
|
||||
|
||||
func (r *Reader) ReadUint32() uint32 {
|
||||
if r.err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
_, r.err = io.ReadFull(r.r, r.b[:4])
|
||||
if r.err != nil {
|
||||
if debug {
|
||||
dl.Printf("rd uint32: %v", r.err)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
v := uint32(r.b[3]) | uint32(r.b[2])<<8 | uint32(r.b[1])<<16 | uint32(r.b[0])<<24
|
||||
|
||||
if debug {
|
||||
dl.Printf("rd uint32=%d (0x%08x)", v, v)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func (r *Reader) ReadUint64() uint64 {
|
||||
if r.err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
_, r.err = io.ReadFull(r.r, r.b[:8])
|
||||
if r.err != nil {
|
||||
if debug {
|
||||
dl.Printf("rd uint64: %v", r.err)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
v := uint64(r.b[7]) | uint64(r.b[6])<<8 | uint64(r.b[5])<<16 | uint64(r.b[4])<<24 |
|
||||
uint64(r.b[3])<<32 | uint64(r.b[2])<<40 | uint64(r.b[1])<<48 | uint64(r.b[0])<<56
|
||||
|
||||
if debug {
|
||||
dl.Printf("rd uint64=%d (0x%016x)", v, v)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
type XDRError struct {
|
||||
op string
|
||||
err error
|
||||
}
|
||||
|
||||
func (e XDRError) Error() string {
|
||||
return "xdr " + e.op + ": " + e.err.Error()
|
||||
}
|
||||
|
||||
func (r *Reader) Error() error {
|
||||
if r.err == nil {
|
||||
return nil
|
||||
}
|
||||
return XDRError{"read", r.err}
|
||||
}
|
||||
49
Godeps/_workspace/src/github.com/calmh/xdr/reader_ipdr.go
generated
vendored
Normal file
49
Godeps/_workspace/src/github.com/calmh/xdr/reader_ipdr.go
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
// All rights reserved. Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ipdr
|
||||
|
||||
package xdr
|
||||
|
||||
import "io"
|
||||
|
||||
func (r *Reader) ReadUint8() uint8 {
|
||||
if r.err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
_, r.err = io.ReadFull(r.r, r.b[:1])
|
||||
if r.err != nil {
|
||||
if debug {
|
||||
dl.Printf("rd uint8: %v", r.err)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
if debug {
|
||||
dl.Printf("rd uint8=%d (0x%02x)", r.b[0], r.b[0])
|
||||
}
|
||||
return r.b[0]
|
||||
}
|
||||
|
||||
func (r *Reader) ReadUint16() uint16 {
|
||||
if r.err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
_, r.err = io.ReadFull(r.r, r.b[:2])
|
||||
if r.err != nil {
|
||||
if debug {
|
||||
dl.Printf("rd uint16: %v", r.err)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
v := uint16(r.b[1]) | uint16(r.b[0])<<8
|
||||
|
||||
if debug {
|
||||
dl.Printf("rd uint16=%d (0x%04x)", v, v)
|
||||
}
|
||||
return v
|
||||
}
|
||||
15
Godeps/_workspace/src/github.com/calmh/xdr/reader_xdr.go
generated
vendored
Normal file
15
Godeps/_workspace/src/github.com/calmh/xdr/reader_xdr.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
// All rights reserved. Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !ipdr
|
||||
|
||||
package xdr
|
||||
|
||||
func (r *Reader) ReadUint8() uint8 {
|
||||
return uint8(r.ReadUint32())
|
||||
}
|
||||
|
||||
func (r *Reader) ReadUint16() uint16 {
|
||||
return uint16(r.ReadUint32())
|
||||
}
|
||||
44
Godeps/_workspace/src/github.com/calmh/xdr/refl_test.go
generated
vendored
Normal file
44
Godeps/_workspace/src/github.com/calmh/xdr/refl_test.go
generated
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
// +build refl
|
||||
|
||||
package xdr_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
refl "github.com/davecgh/go-xdr/xdr"
|
||||
)
|
||||
|
||||
func TestCompareMarshals(t *testing.T) {
|
||||
e0 := s.MarshalXDR()
|
||||
e1, err := refl.Marshal(s)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if bytes.Compare(e0, e1) != 0 {
|
||||
t.Fatalf("Encoding mismatch;\n\t%x (this)\n\t%x (refl)", e0, e1)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkReflMarshal(b *testing.B) {
|
||||
var err error
|
||||
for i := 0; i < b.N; i++ {
|
||||
res, err = refl.Marshal(s)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkReflUnmarshal(b *testing.B) {
|
||||
var t XDRBenchStruct
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := refl.Unmarshal(e, &t)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
82
xdr/writer.go → Godeps/_workspace/src/github.com/calmh/xdr/writer.go
generated
vendored
82
xdr/writer.go → Godeps/_workspace/src/github.com/calmh/xdr/writer.go
generated
vendored
@@ -1,14 +1,13 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
package xdr
|
||||
|
||||
import "io"
|
||||
|
||||
func pad(l int) int {
|
||||
d := l % 4
|
||||
if d == 0 {
|
||||
return 0
|
||||
}
|
||||
return 4 - d
|
||||
}
|
||||
import (
|
||||
"io"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var padBytes = []byte{0, 0, 0}
|
||||
|
||||
@@ -19,14 +18,37 @@ type Writer struct {
|
||||
b [8]byte
|
||||
}
|
||||
|
||||
type AppendWriter []byte
|
||||
|
||||
func (w *AppendWriter) Write(bs []byte) (int, error) {
|
||||
*w = append(*w, bs...)
|
||||
return len(bs), nil
|
||||
}
|
||||
|
||||
func NewWriter(w io.Writer) *Writer {
|
||||
return &Writer{
|
||||
w: w,
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Writer) WriteRaw(bs []byte) (int, error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
var n int
|
||||
n, w.err = w.w.Write(bs)
|
||||
return n, w.err
|
||||
}
|
||||
|
||||
func (w *Writer) WriteString(s string) (int, error) {
|
||||
return w.WriteBytes([]byte(s))
|
||||
sh := *((*reflect.StringHeader)(unsafe.Pointer(&s)))
|
||||
bh := reflect.SliceHeader{
|
||||
Data: sh.Data,
|
||||
Len: sh.Len,
|
||||
Cap: sh.Len,
|
||||
}
|
||||
return w.WriteBytes(*(*[]byte)(unsafe.Pointer(&bh)))
|
||||
}
|
||||
|
||||
func (w *Writer) WriteBytes(bs []byte) (int, error) {
|
||||
@@ -39,6 +61,14 @@ func (w *Writer) WriteBytes(bs []byte) (int, error) {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
if debug {
|
||||
if len(bs) > maxDebugBytes {
|
||||
dl.Printf("wr bytes (%d): %x...", len(bs), bs[:maxDebugBytes])
|
||||
} else {
|
||||
dl.Printf("wr bytes (%d): %x", len(bs), bs)
|
||||
}
|
||||
}
|
||||
|
||||
var l, n int
|
||||
n, w.err = w.w.Write(bs)
|
||||
l += n
|
||||
@@ -52,25 +82,23 @@ func (w *Writer) WriteBytes(bs []byte) (int, error) {
|
||||
return l, w.err
|
||||
}
|
||||
|
||||
func (w *Writer) WriteUint16(v uint16) (int, error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
func (w *Writer) WriteBool(v bool) (int, error) {
|
||||
if v {
|
||||
return w.WriteUint8(1)
|
||||
} else {
|
||||
return w.WriteUint8(0)
|
||||
}
|
||||
w.b[0] = byte(v >> 8)
|
||||
w.b[1] = byte(v)
|
||||
w.b[2] = 0
|
||||
w.b[3] = 0
|
||||
|
||||
var l int
|
||||
l, w.err = w.w.Write(w.b[:4])
|
||||
w.tot += l
|
||||
return l, w.err
|
||||
}
|
||||
|
||||
func (w *Writer) WriteUint32(v uint32) (int, error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
if debug {
|
||||
dl.Printf("wr uint32=%d", v)
|
||||
}
|
||||
|
||||
w.b[0] = byte(v >> 24)
|
||||
w.b[1] = byte(v >> 16)
|
||||
w.b[2] = byte(v >> 8)
|
||||
@@ -86,6 +114,11 @@ func (w *Writer) WriteUint64(v uint64) (int, error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
if debug {
|
||||
dl.Printf("wr uint64=%d", v)
|
||||
}
|
||||
|
||||
w.b[0] = byte(v >> 56)
|
||||
w.b[1] = byte(v >> 48)
|
||||
w.b[2] = byte(v >> 40)
|
||||
@@ -106,5 +139,8 @@ func (w *Writer) Tot() int {
|
||||
}
|
||||
|
||||
func (w *Writer) Error() error {
|
||||
return w.err
|
||||
if w.err == nil {
|
||||
return nil
|
||||
}
|
||||
return XDRError{"write", w.err}
|
||||
}
|
||||
41
Godeps/_workspace/src/github.com/calmh/xdr/writer_ipdr.go
generated
vendored
Normal file
41
Godeps/_workspace/src/github.com/calmh/xdr/writer_ipdr.go
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
// +build ipdr
|
||||
|
||||
package xdr
|
||||
|
||||
func (w *Writer) WriteUint8(v uint8) (int, error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
if debug {
|
||||
dl.Printf("wr uint8=%d", v)
|
||||
}
|
||||
|
||||
w.b[0] = byte(v)
|
||||
|
||||
var l int
|
||||
l, w.err = w.w.Write(w.b[:1])
|
||||
w.tot += l
|
||||
return l, w.err
|
||||
}
|
||||
|
||||
func (w *Writer) WriteUint16(v uint16) (int, error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
if debug {
|
||||
dl.Printf("wr uint8=%d", v)
|
||||
}
|
||||
|
||||
w.b[0] = byte(v >> 8)
|
||||
w.b[1] = byte(v)
|
||||
|
||||
var l int
|
||||
l, w.err = w.w.Write(w.b[:2])
|
||||
w.tot += l
|
||||
return l, w.err
|
||||
}
|
||||
14
Godeps/_workspace/src/github.com/calmh/xdr/writer_xdr.go
generated
vendored
Normal file
14
Godeps/_workspace/src/github.com/calmh/xdr/writer_xdr.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
// +build !ipdr
|
||||
|
||||
package xdr
|
||||
|
||||
func (w *Writer) WriteUint8(v uint8) (int, error) {
|
||||
return w.WriteUint32(uint32(v))
|
||||
}
|
||||
|
||||
func (w *Writer) WriteUint16(v uint16) (int, error) {
|
||||
return w.WriteUint32(uint32(v))
|
||||
}
|
||||
92
Godeps/_workspace/src/github.com/calmh/xdr/xdr_test.go
generated
vendored
Normal file
92
Godeps/_workspace/src/github.com/calmh/xdr/xdr_test.go
generated
vendored
Normal file
@@ -0,0 +1,92 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
package xdr
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
)
|
||||
|
||||
func TestBytesNil(t *testing.T) {
|
||||
fn := func(bs []byte) bool {
|
||||
var b = new(bytes.Buffer)
|
||||
var w = NewWriter(b)
|
||||
var r = NewReader(b)
|
||||
w.WriteBytes(bs)
|
||||
w.WriteBytes(bs)
|
||||
r.ReadBytes()
|
||||
res := r.ReadBytes()
|
||||
return bytes.Compare(bs, res) == 0
|
||||
}
|
||||
if err := quick.Check(fn, nil); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBytesGiven(t *testing.T) {
|
||||
fn := func(bs []byte) bool {
|
||||
var b = new(bytes.Buffer)
|
||||
var w = NewWriter(b)
|
||||
var r = NewReader(b)
|
||||
w.WriteBytes(bs)
|
||||
w.WriteBytes(bs)
|
||||
res := make([]byte, 12)
|
||||
res = r.ReadBytesInto(res)
|
||||
res = r.ReadBytesInto(res)
|
||||
return bytes.Compare(bs, res) == 0
|
||||
}
|
||||
if err := quick.Check(fn, nil); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadBytesMaxInto(t *testing.T) {
|
||||
var max = 64
|
||||
for tot := 32; tot < 128; tot++ {
|
||||
for diff := -32; diff <= 32; diff++ {
|
||||
var b = new(bytes.Buffer)
|
||||
var r = NewReader(b)
|
||||
var w = NewWriter(b)
|
||||
|
||||
var toWrite = make([]byte, tot)
|
||||
w.WriteBytes(toWrite)
|
||||
|
||||
var buf = make([]byte, tot+diff)
|
||||
var bs = r.ReadBytesMaxInto(max, buf)
|
||||
|
||||
if tot <= max {
|
||||
if read := len(bs); read != tot {
|
||||
t.Errorf("Incorrect read bytes, wrote=%d, buf=%d, max=%d, read=%d", tot, tot+diff, max, read)
|
||||
}
|
||||
} else if r.err != ErrElementSizeExceeded {
|
||||
t.Errorf("Unexpected non-ErrElementSizeExceeded error for wrote=%d, max=%d: %v", tot, max, r.err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadStringMax(t *testing.T) {
|
||||
for tot := 42; tot < 72; tot++ {
|
||||
for max := 0; max < 128; max++ {
|
||||
var b = new(bytes.Buffer)
|
||||
var r = NewReader(b)
|
||||
var w = NewWriter(b)
|
||||
|
||||
var toWrite = make([]byte, tot)
|
||||
w.WriteBytes(toWrite)
|
||||
|
||||
var str = r.ReadStringMax(max)
|
||||
var read = len(str)
|
||||
|
||||
if max == 0 || tot <= max {
|
||||
if read != tot {
|
||||
t.Errorf("Incorrect read bytes, wrote=%d, max=%d, read=%d", tot, max, read)
|
||||
}
|
||||
} else if r.err != ErrElementSizeExceeded {
|
||||
t.Errorf("Unexpected non-ErrElementSizeExceeded error for wrote=%d, max=%d, read=%d: %v", tot, max, read, r.err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
2
Godeps/_workspace/src/github.com/codegangsta/inject/.gitignore
generated
vendored
2
Godeps/_workspace/src/github.com/codegangsta/inject/.gitignore
generated
vendored
@@ -1,2 +0,0 @@
|
||||
inject
|
||||
inject.test
|
||||
20
Godeps/_workspace/src/github.com/codegangsta/inject/LICENSE
generated
vendored
20
Godeps/_workspace/src/github.com/codegangsta/inject/LICENSE
generated
vendored
@@ -1,20 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013 Jeremy Saenz
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
4
Godeps/_workspace/src/github.com/codegangsta/inject/README.md
generated
vendored
4
Godeps/_workspace/src/github.com/codegangsta/inject/README.md
generated
vendored
@@ -1,4 +0,0 @@
|
||||
inject
|
||||
======
|
||||
|
||||
Dependency injection for go
|
||||
168
Godeps/_workspace/src/github.com/codegangsta/inject/inject.go
generated
vendored
168
Godeps/_workspace/src/github.com/codegangsta/inject/inject.go
generated
vendored
@@ -1,168 +0,0 @@
|
||||
// Package inject provides utilities for mapping and injecting dependencies in various ways.
|
||||
package inject
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// Injector represents an interface for mapping and injecting dependencies into structs
|
||||
// and function arguments.
|
||||
type Injector interface {
|
||||
Applicator
|
||||
Invoker
|
||||
TypeMapper
|
||||
// SetParent sets the parent of the injector. If the injector cannot find a
|
||||
// dependency in its Type map it will check its parent before returning an
|
||||
// error.
|
||||
SetParent(Injector)
|
||||
}
|
||||
|
||||
// Applicator represents an interface for mapping dependencies to a struct.
|
||||
type Applicator interface {
|
||||
// Maps dependencies in the Type map to each field in the struct
|
||||
// that is tagged with 'inject'. Returns an error if the injection
|
||||
// fails.
|
||||
Apply(interface{}) error
|
||||
}
|
||||
|
||||
// Invoker represents an interface for calling functions via reflection.
|
||||
type Invoker interface {
|
||||
// Invoke attempts to call the interface{} provided as a function,
|
||||
// providing dependencies for function arguments based on Type. Returns
|
||||
// a slice of reflect.Value representing the returned values of the function.
|
||||
// Returns an error if the injection fails.
|
||||
Invoke(interface{}) ([]reflect.Value, error)
|
||||
}
|
||||
|
||||
// TypeMapper represents an interface for mapping interface{} values based on type.
|
||||
type TypeMapper interface {
|
||||
// Maps the interface{} value based on its immediate type from reflect.TypeOf.
|
||||
Map(interface{}) TypeMapper
|
||||
// Maps the interface{} value based on the pointer of an Interface provided.
|
||||
// This is really only useful for mapping a value as an interface, as interfaces
|
||||
// cannot at this time be referenced directly without a pointer.
|
||||
MapTo(interface{}, interface{}) TypeMapper
|
||||
// Provides a possibility to directly insert a mapping based on type and value.
|
||||
// This makes it possible to directly map type arguments not possible to instantiate
|
||||
// with reflect like unidirectional channels.
|
||||
Set(reflect.Type, reflect.Value) TypeMapper
|
||||
// Returns the Value that is mapped to the current type. Returns a zeroed Value if
|
||||
// the Type has not been mapped.
|
||||
Get(reflect.Type) reflect.Value
|
||||
}
|
||||
|
||||
type injector struct {
|
||||
values map[reflect.Type]reflect.Value
|
||||
parent Injector
|
||||
}
|
||||
|
||||
// InterfaceOf dereferences a pointer to an Interface type.
|
||||
// It panics if value is not an pointer to an interface.
|
||||
func InterfaceOf(value interface{}) reflect.Type {
|
||||
t := reflect.TypeOf(value)
|
||||
|
||||
for t.Kind() == reflect.Ptr {
|
||||
t = t.Elem()
|
||||
}
|
||||
|
||||
if t.Kind() != reflect.Interface {
|
||||
panic("Called inject.InterfaceOf with a value that is not a pointer to an interface. (*MyInterface)(nil)")
|
||||
}
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
// New returns a new Injector.
|
||||
func New() Injector {
|
||||
return &injector{
|
||||
values: make(map[reflect.Type]reflect.Value),
|
||||
}
|
||||
}
|
||||
|
||||
// Invoke attempts to call the interface{} provided as a function,
|
||||
// providing dependencies for function arguments based on Type.
|
||||
// Returns a slice of reflect.Value representing the returned values of the function.
|
||||
// Returns an error if the injection fails.
|
||||
// It panics if f is not a function
|
||||
func (inj *injector) Invoke(f interface{}) ([]reflect.Value, error) {
|
||||
t := reflect.TypeOf(f)
|
||||
|
||||
var in = make([]reflect.Value, t.NumIn()) //Panic if t is not kind of Func
|
||||
for i := 0; i < t.NumIn(); i++ {
|
||||
argType := t.In(i)
|
||||
val := inj.Get(argType)
|
||||
if !val.IsValid() {
|
||||
return nil, fmt.Errorf("Value not found for type %v", argType)
|
||||
}
|
||||
|
||||
in[i] = val
|
||||
}
|
||||
|
||||
return reflect.ValueOf(f).Call(in), nil
|
||||
}
|
||||
|
||||
// Maps dependencies in the Type map to each field in the struct
|
||||
// that is tagged with 'inject'.
|
||||
// Returns an error if the injection fails.
|
||||
func (inj *injector) Apply(val interface{}) error {
|
||||
v := reflect.ValueOf(val)
|
||||
|
||||
for v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
}
|
||||
|
||||
if v.Kind() != reflect.Struct {
|
||||
return nil // Should not panic here ?
|
||||
}
|
||||
|
||||
t := v.Type()
|
||||
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
f := v.Field(i)
|
||||
structField := t.Field(i)
|
||||
if f.CanSet() && structField.Tag == "inject" {
|
||||
ft := f.Type()
|
||||
v := inj.Get(ft)
|
||||
if !v.IsValid() {
|
||||
return fmt.Errorf("Value not found for type %v", ft)
|
||||
}
|
||||
|
||||
f.Set(v)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Maps the concrete value of val to its dynamic type using reflect.TypeOf,
|
||||
// It returns the TypeMapper registered in.
|
||||
func (i *injector) Map(val interface{}) TypeMapper {
|
||||
i.values[reflect.TypeOf(val)] = reflect.ValueOf(val)
|
||||
return i
|
||||
}
|
||||
|
||||
func (i *injector) MapTo(val interface{}, ifacePtr interface{}) TypeMapper {
|
||||
i.values[InterfaceOf(ifacePtr)] = reflect.ValueOf(val)
|
||||
return i
|
||||
}
|
||||
|
||||
// Maps the given reflect.Type to the given reflect.Value and returns
|
||||
// the Typemapper the mapping has been registered in.
|
||||
func (i *injector) Set(typ reflect.Type, val reflect.Value) TypeMapper {
|
||||
i.values[typ] = val
|
||||
return i
|
||||
}
|
||||
|
||||
func (i *injector) Get(t reflect.Type) reflect.Value {
|
||||
val := i.values[t]
|
||||
if !val.IsValid() && i.parent != nil {
|
||||
val = i.parent.Get(t)
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
func (i *injector) SetParent(parent Injector) {
|
||||
i.parent = parent
|
||||
}
|
||||
142
Godeps/_workspace/src/github.com/codegangsta/inject/inject_test.go
generated
vendored
142
Godeps/_workspace/src/github.com/codegangsta/inject/inject_test.go
generated
vendored
@@ -1,142 +0,0 @@
|
||||
package inject_test
|
||||
|
||||
import (
|
||||
"github.com/codegangsta/inject"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type SpecialString interface {
|
||||
}
|
||||
|
||||
type TestStruct struct {
|
||||
Dep1 string `inject`
|
||||
Dep2 SpecialString `inject`
|
||||
Dep3 string
|
||||
}
|
||||
|
||||
/* Test Helpers */
|
||||
func expect(t *testing.T, a interface{}, b interface{}) {
|
||||
if a != b {
|
||||
t.Errorf("Expected %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a))
|
||||
}
|
||||
}
|
||||
|
||||
func refute(t *testing.T, a interface{}, b interface{}) {
|
||||
if a == b {
|
||||
t.Errorf("Did not expect %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a))
|
||||
}
|
||||
}
|
||||
|
||||
func Test_InjectorInvoke(t *testing.T) {
|
||||
injector := inject.New()
|
||||
expect(t, injector == nil, false)
|
||||
|
||||
dep := "some dependency"
|
||||
injector.Map(dep)
|
||||
dep2 := "another dep"
|
||||
injector.MapTo(dep2, (*SpecialString)(nil))
|
||||
dep3 := make(chan *SpecialString)
|
||||
dep4 := make(chan *SpecialString)
|
||||
typRecv := reflect.ChanOf(reflect.RecvDir, reflect.TypeOf(dep3).Elem())
|
||||
typSend := reflect.ChanOf(reflect.SendDir, reflect.TypeOf(dep4).Elem())
|
||||
injector.Set(typRecv, reflect.ValueOf(dep3))
|
||||
injector.Set(typSend, reflect.ValueOf(dep4))
|
||||
|
||||
_, err := injector.Invoke(func(d1 string, d2 SpecialString, d3 <-chan *SpecialString, d4 chan<- *SpecialString) {
|
||||
expect(t, d1, dep)
|
||||
expect(t, d2, dep2)
|
||||
expect(t, reflect.TypeOf(d3).Elem(), reflect.TypeOf(dep3).Elem())
|
||||
expect(t, reflect.TypeOf(d4).Elem(), reflect.TypeOf(dep4).Elem())
|
||||
expect(t, reflect.TypeOf(d3).ChanDir(), reflect.RecvDir)
|
||||
expect(t, reflect.TypeOf(d4).ChanDir(), reflect.SendDir)
|
||||
})
|
||||
|
||||
expect(t, err, nil)
|
||||
}
|
||||
|
||||
func Test_InjectorInvokeReturnValues(t *testing.T) {
|
||||
injector := inject.New()
|
||||
expect(t, injector == nil, false)
|
||||
|
||||
dep := "some dependency"
|
||||
injector.Map(dep)
|
||||
dep2 := "another dep"
|
||||
injector.MapTo(dep2, (*SpecialString)(nil))
|
||||
|
||||
result, err := injector.Invoke(func(d1 string, d2 SpecialString) string {
|
||||
expect(t, d1, dep)
|
||||
expect(t, d2, dep2)
|
||||
return "Hello world"
|
||||
})
|
||||
|
||||
expect(t, result[0].String(), "Hello world")
|
||||
expect(t, err, nil)
|
||||
}
|
||||
|
||||
func Test_InjectorApply(t *testing.T) {
|
||||
injector := inject.New()
|
||||
|
||||
injector.Map("a dep").MapTo("another dep", (*SpecialString)(nil))
|
||||
|
||||
s := TestStruct{}
|
||||
err := injector.Apply(&s)
|
||||
expect(t, err, nil)
|
||||
|
||||
expect(t, s.Dep1, "a dep")
|
||||
expect(t, s.Dep2, "another dep")
|
||||
}
|
||||
|
||||
func Test_InterfaceOf(t *testing.T) {
|
||||
iType := inject.InterfaceOf((*SpecialString)(nil))
|
||||
expect(t, iType.Kind(), reflect.Interface)
|
||||
|
||||
iType = inject.InterfaceOf((**SpecialString)(nil))
|
||||
expect(t, iType.Kind(), reflect.Interface)
|
||||
|
||||
// Expecting nil
|
||||
defer func() {
|
||||
rec := recover()
|
||||
refute(t, rec, nil)
|
||||
}()
|
||||
iType = inject.InterfaceOf((*testing.T)(nil))
|
||||
}
|
||||
|
||||
func Test_InjectorSet(t *testing.T) {
|
||||
injector := inject.New()
|
||||
typ := reflect.TypeOf("string")
|
||||
typSend := reflect.ChanOf(reflect.SendDir, typ)
|
||||
typRecv := reflect.ChanOf(reflect.RecvDir, typ)
|
||||
|
||||
// instantiating unidirectional channels is not possible using reflect
|
||||
// http://golang.org/src/pkg/reflect/value.go?s=60463:60504#L2064
|
||||
chanRecv := reflect.MakeChan(reflect.ChanOf(reflect.BothDir, typ), 0)
|
||||
chanSend := reflect.MakeChan(reflect.ChanOf(reflect.BothDir, typ), 0)
|
||||
|
||||
injector.Set(typSend, chanSend)
|
||||
injector.Set(typRecv, chanRecv)
|
||||
|
||||
expect(t, injector.Get(typSend).IsValid(), true)
|
||||
expect(t, injector.Get(typRecv).IsValid(), true)
|
||||
expect(t, injector.Get(chanSend.Type()).IsValid(), false)
|
||||
}
|
||||
|
||||
|
||||
func Test_InjectorGet(t *testing.T) {
|
||||
injector := inject.New()
|
||||
|
||||
injector.Map("some dependency")
|
||||
|
||||
expect(t, injector.Get(reflect.TypeOf("string")).IsValid(), true)
|
||||
expect(t, injector.Get(reflect.TypeOf(11)).IsValid(), false)
|
||||
}
|
||||
|
||||
func Test_InjectorSetParent(t *testing.T) {
|
||||
injector := inject.New()
|
||||
injector.MapTo("another dep", (*SpecialString)(nil))
|
||||
|
||||
injector2 := inject.New()
|
||||
injector2.SetParent(injector)
|
||||
|
||||
expect(t, injector2.Get(inject.InterfaceOf((*SpecialString)(nil))).IsValid(), true)
|
||||
}
|
||||
23
Godeps/_workspace/src/github.com/codegangsta/martini/.gitignore
generated
vendored
23
Godeps/_workspace/src/github.com/codegangsta/martini/.gitignore
generated
vendored
@@ -1,23 +0,0 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
20
Godeps/_workspace/src/github.com/codegangsta/martini/LICENSE
generated
vendored
20
Godeps/_workspace/src/github.com/codegangsta/martini/LICENSE
generated
vendored
@@ -1,20 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013 Jeremy Saenz
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
345
Godeps/_workspace/src/github.com/codegangsta/martini/README.md
generated
vendored
345
Godeps/_workspace/src/github.com/codegangsta/martini/README.md
generated
vendored
@@ -1,345 +0,0 @@
|
||||
# Martini [](https://app.wercker.com/project/bykey/174bef7e3c999e103cacfe2770102266) [](http://godoc.org/github.com/codegangsta/martini)
|
||||
|
||||
Martini is a powerful package for quickly writing modular web applications/services in Golang.
|
||||
|
||||
Language Translations: [Simplified Chinese (zh_CN)](translations/README_zh_cn.md)
|
||||
|
||||
|
||||
## Getting Started
|
||||
|
||||
After installing Go and setting up your [GOPATH](http://golang.org/doc/code.html#GOPATH), create your first `.go` file. We'll call it `server.go`.
|
||||
|
||||
~~~ go
|
||||
package main
|
||||
|
||||
import "github.com/codegangsta/martini"
|
||||
|
||||
func main() {
|
||||
m := martini.Classic()
|
||||
m.Get("/", func() string {
|
||||
return "Hello world!"
|
||||
})
|
||||
m.Run()
|
||||
}
|
||||
~~~
|
||||
|
||||
Then install the Martini package (**go 1.1** and greater is required):
|
||||
~~~
|
||||
go get github.com/codegangsta/martini
|
||||
~~~
|
||||
|
||||
Then run your server:
|
||||
~~~
|
||||
go run server.go
|
||||
~~~
|
||||
|
||||
You will now have a Martini webserver running on `localhost:3000`.
|
||||
|
||||
## Getting Help
|
||||
|
||||
Join the [Mailing list](https://groups.google.com/forum/#!forum/martini-go)
|
||||
|
||||
Watch the [Demo Video](http://martini.codegangsta.io/#demo)
|
||||
|
||||
## Features
|
||||
* Extremely simple to use.
|
||||
* Non-intrusive design.
|
||||
* Plays nice with other Golang packages.
|
||||
* Awesome path matching and routing.
|
||||
* Modular design - Easy to add functionality, easy to rip stuff out.
|
||||
* Lots of good handlers/middlewares to use.
|
||||
* Great 'out of the box' feature set.
|
||||
* **Fully compatible with the [http.HandlerFunc](http://godoc.org/net/http#HandlerFunc) interface.**
|
||||
|
||||
## More Middleware
|
||||
For more middleware and functionality, check out the repositories in the [martini-contrib](https://github.com/martini-contrib) organization.
|
||||
|
||||
## Table of Contents
|
||||
* [Classic Martini](#classic-martini)
|
||||
* [Handlers](#handlers)
|
||||
* [Routing](#routing)
|
||||
* [Services](#services)
|
||||
* [Serving Static Files](#serving-static-files)
|
||||
* [Middleware Handlers](#middleware-handlers)
|
||||
* [Next()](#next)
|
||||
* [Martini Env](#martini-env)
|
||||
* [FAQ](#faq)
|
||||
|
||||
## Classic Martini
|
||||
To get up and running quickly, [martini.Classic()](http://godoc.org/github.com/codegangsta/martini#Classic) provides some reasonable defaults that work well for most web applications:
|
||||
~~~ go
|
||||
m := martini.Classic()
|
||||
// ... middleware and routing goes here
|
||||
m.Run()
|
||||
~~~
|
||||
|
||||
Below is some of the functionality [martini.Classic()](http://godoc.org/github.com/codegangsta/martini#Classic) pulls in automatically:
|
||||
* Request/Response Logging - [martini.Logger](http://godoc.org/github.com/codegangsta/martini#Logger)
|
||||
* Panic Recovery - [martini.Recovery](http://godoc.org/github.com/codegangsta/martini#Recovery)
|
||||
* Static File serving - [martini.Static](http://godoc.org/github.com/codegangsta/martini#Static)
|
||||
* Routing - [martini.Router](http://godoc.org/github.com/codegangsta/martini#Router)
|
||||
|
||||
### Handlers
|
||||
Handlers are the heart and soul of Martini. A handler is basically any kind of callable function:
|
||||
~~~ go
|
||||
m.Get("/", func() {
|
||||
println("hello world")
|
||||
})
|
||||
~~~
|
||||
|
||||
#### Return Values
|
||||
If a handler returns something, Martini will write the result to the current [http.ResponseWriter](http://godoc.org/net/http#ResponseWriter) as a string:
|
||||
~~~ go
|
||||
m.Get("/", func() string {
|
||||
return "hello world" // HTTP 200 : "hello world"
|
||||
})
|
||||
~~~
|
||||
|
||||
You can also optionally return a status code:
|
||||
~~~ go
|
||||
m.Get("/", func() (int, string) {
|
||||
return 418, "i'm a teapot" // HTTP 418 : "i'm a teapot"
|
||||
})
|
||||
~~~
|
||||
|
||||
#### Service Injection
|
||||
Handlers are invoked via reflection. Martini makes use of *Dependency Injection* to resolve dependencies in a Handlers argument list. **This makes Martini completely compatible with golang's `http.HandlerFunc` interface.**
|
||||
|
||||
If you add an argument to your Handler, Martini will search its list of services and attempt to resolve the dependency via type assertion:
|
||||
~~~ go
|
||||
m.Get("/", func(res http.ResponseWriter, req *http.Request) { // res and req are injected by Martini
|
||||
res.WriteHeader(200) // HTTP 200
|
||||
})
|
||||
~~~
|
||||
|
||||
The following services are included with [martini.Classic()](http://godoc.org/github.com/codegangsta/martini#Classic):
|
||||
* [*log.Logger](http://godoc.org/log#Logger) - Global logger for Martini.
|
||||
* [martini.Context](http://godoc.org/github.com/codegangsta/martini#Context) - http request context.
|
||||
* [martini.Params](http://godoc.org/github.com/codegangsta/martini#Params) - `map[string]string` of named params found by route matching.
|
||||
* [martini.Routes](http://godoc.org/github.com/codegangsta/martini#Routes) - Route helper service.
|
||||
* [http.ResponseWriter](http://godoc.org/net/http/#ResponseWriter) - http Response writer interface.
|
||||
* [*http.Request](http://godoc.org/net/http/#Request) - http Request.
|
||||
|
||||
### Routing
|
||||
In Martini, a route is an HTTP method paired with a URL-matching pattern.
|
||||
Each route can take one or more handler methods:
|
||||
~~~ go
|
||||
m.Get("/", func() {
|
||||
// show something
|
||||
})
|
||||
|
||||
m.Patch("/", func() {
|
||||
// update something
|
||||
})
|
||||
|
||||
m.Post("/", func() {
|
||||
// create something
|
||||
})
|
||||
|
||||
m.Put("/", func() {
|
||||
// replace something
|
||||
})
|
||||
|
||||
m.Delete("/", func() {
|
||||
// destroy something
|
||||
})
|
||||
|
||||
m.Options("/", func() {
|
||||
// http options
|
||||
})
|
||||
|
||||
m.NotFound(func() {
|
||||
// handle 404
|
||||
})
|
||||
~~~
|
||||
|
||||
Routes are matched in the order they are defined. The first route that
|
||||
matches the request is invoked.
|
||||
|
||||
Route patterns may include named parameters, accessible via the [martini.Params](http://godoc.org/github.com/codegangsta/martini#Params) service:
|
||||
~~~ go
|
||||
m.Get("/hello/:name", func(params martini.Params) string {
|
||||
return "Hello " + params["name"]
|
||||
})
|
||||
~~~
|
||||
|
||||
Routes can be matched with regular expressions and globs as well:
|
||||
~~~ go
|
||||
m.Get("/hello/**", func(params martini.Params) string {
|
||||
return "Hello " + params["_1"]
|
||||
})
|
||||
~~~
|
||||
|
||||
Route handlers can be stacked on top of each other, which is useful for things like authentication and authorization:
|
||||
~~~ go
|
||||
m.Get("/secret", authorize, func() {
|
||||
// this will execute as long as authorize doesn't write a response
|
||||
})
|
||||
~~~
|
||||
|
||||
Route groups can be added too using the Group method.
|
||||
~~~ go
|
||||
m.Group("/books", func(r Router) {
|
||||
r.Get("/:id", GetBooks)
|
||||
r.Post("/new", NewBook)
|
||||
r.Put("/update/:id", UpdateBook)
|
||||
r.Delete("/delete/:id", DeleteBook)
|
||||
})
|
||||
~~~
|
||||
|
||||
Just like you can pass middlewares to a handler you can pass middlewares to groups.
|
||||
~~~ go
|
||||
m.Group("/books", func(r Router) {
|
||||
r.Get("/:id", GetBooks)
|
||||
r.Post("/new", NewBook)
|
||||
r.Put("/update/:id", UpdateBook)
|
||||
r.Delete("/delete/:id", DeleteBook)
|
||||
}, MyMiddleware1, MyMiddleware2)
|
||||
~~~
|
||||
|
||||
### Services
|
||||
Services are objects that are available to be injected into a Handler's argument list. You can map a service on a *Global* or *Request* level.
|
||||
|
||||
#### Global Mapping
|
||||
A Martini instance implements the inject.Injector interface, so mapping a service is easy:
|
||||
~~~ go
|
||||
db := &MyDatabase{}
|
||||
m := martini.Classic()
|
||||
m.Map(db) // the service will be available to all handlers as *MyDatabase
|
||||
// ...
|
||||
m.Run()
|
||||
~~~
|
||||
|
||||
#### Request-Level Mapping
|
||||
Mapping on the request level can be done in a handler via [martini.Context](http://godoc.org/github.com/codegangsta/martini#Context):
|
||||
~~~ go
|
||||
func MyCustomLoggerHandler(c martini.Context, req *http.Request) {
|
||||
logger := &MyCustomLogger{req}
|
||||
c.Map(logger) // mapped as *MyCustomLogger
|
||||
}
|
||||
~~~
|
||||
|
||||
#### Mapping values to Interfaces
|
||||
One of the most powerful parts about services is the ability to map a service to an interface. For instance, if you wanted to override the [http.ResponseWriter](http://godoc.org/net/http#ResponseWriter) with an object that wrapped it and performed extra operations, you can write the following handler:
|
||||
~~~ go
|
||||
func WrapResponseWriter(res http.ResponseWriter, c martini.Context) {
|
||||
rw := NewSpecialResponseWriter(res)
|
||||
c.MapTo(rw, (*http.ResponseWriter)(nil)) // override ResponseWriter with our wrapper ResponseWriter
|
||||
}
|
||||
~~~
|
||||
|
||||
### Serving Static Files
|
||||
A [martini.Classic()](http://godoc.org/github.com/codegangsta/martini#Classic) instance automatically serves static files from the "public" directory in the root of your server.
|
||||
You can serve from more directories by adding more [martini.Static](http://godoc.org/github.com/codegangsta/martini#Static) handlers.
|
||||
~~~ go
|
||||
m.Use(martini.Static("assets")) // serve from the "assets" directory as well
|
||||
~~~
|
||||
|
||||
## Middleware Handlers
|
||||
Middleware Handlers sit between the incoming http request and the router. In essence they are no different than any other Handler in Martini. You can add a middleware handler to the stack like so:
|
||||
~~~ go
|
||||
m.Use(func() {
|
||||
// do some middleware stuff
|
||||
})
|
||||
~~~
|
||||
|
||||
You can have full control over the middleware stack with the `Handlers` function. This will replace any handlers that have been previously set:
|
||||
~~~ go
|
||||
m.Handlers(
|
||||
Middleware1,
|
||||
Middleware2,
|
||||
Middleware3,
|
||||
)
|
||||
~~~
|
||||
|
||||
Middleware Handlers work really well for things like logging, authorization, authentication, sessions, gzipping, error pages and any other operations that must happen before or after an http request:
|
||||
~~~ go
|
||||
// validate an api key
|
||||
m.Use(func(res http.ResponseWriter, req *http.Request) {
|
||||
if req.Header.Get("X-API-KEY") != "secret123" {
|
||||
res.WriteHeader(http.StatusUnauthorized)
|
||||
}
|
||||
})
|
||||
~~~
|
||||
|
||||
### Next()
|
||||
[Context.Next()](http://godoc.org/github.com/codegangsta/martini#Context) is an optional function that Middleware Handlers can call to yield the until after the other Handlers have been executed. This works really well for any operations that must happen after an http request:
|
||||
~~~ go
|
||||
// log before and after a request
|
||||
m.Use(func(c martini.Context, log *log.Logger){
|
||||
log.Println("before a request")
|
||||
|
||||
c.Next()
|
||||
|
||||
log.Println("after a request")
|
||||
})
|
||||
~~~
|
||||
|
||||
## Martini Env
|
||||
|
||||
Some Martini handlers make use of the `martini.Env` global variable to provide special functionality for development environments vs production environments. It is reccomended that the `MARTINI_ENV=production` environment variable to be set when deploying a Martini server into a production environment.
|
||||
|
||||
## FAQ
|
||||
|
||||
### Where do I find middleware X?
|
||||
|
||||
Start by looking in the [martini-contrib](https://github.com/martini-contrib) projects. If it is not there feel free to contact a martini-contrib team member about adding a new repo to the organization.
|
||||
|
||||
* [auth](https://github.com/martini-contrib/auth) - Handlers for authentication.
|
||||
* [binding](https://github.com/martini-contrib/binding) - Handler for mapping/validating a raw request into a structure.
|
||||
* [gzip](https://github.com/martini-contrib/gzip) - Handler for adding gzip compress to requests
|
||||
* [render](https://github.com/martini-contrib/render) - Handler that provides a service for easily rendering JSON and HTML templates.
|
||||
* [acceptlang](https://github.com/martini-contrib/acceptlang) - Handler for parsing the `Accept-Language` HTTP header.
|
||||
* [sessions](https://github.com/martini-contrib/sessions) - Handler that provides a Session service.
|
||||
* [strip](https://github.com/martini-contrib/strip) - URL Prefix stripping.
|
||||
* [method](https://github.com/martini-contrib/method) - HTTP method overriding via Header or form fields.
|
||||
* [secure](https://github.com/martini-contrib/secure) - Implements a few quick security wins.
|
||||
* [encoder](https://github.com/martini-contrib/encoder) - Encoder service for rendering data in several formats and content negotiation.
|
||||
* [cors](https://github.com/martini-contrib/cors) - Handler that enables CORS support.
|
||||
* [oauth2](https://github.com/martini-contrib/oauth2) - Handler that provides OAuth 2.0 login for Martini apps. Google Sign-in, Facebook Connect and Github login is supported.
|
||||
|
||||
### How do I integrate with existing servers?
|
||||
|
||||
A Martini instance implements `http.Handler`, so it can easily be used to serve subtrees
|
||||
on existing Go servers. For example this is a working Martini app for Google App Engine:
|
||||
|
||||
~~~ go
|
||||
package hello
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"github.com/codegangsta/martini"
|
||||
)
|
||||
|
||||
func init() {
|
||||
m := martini.Classic()
|
||||
m.Get("/", func() string {
|
||||
return "Hello world!"
|
||||
})
|
||||
http.Handle("/", m)
|
||||
}
|
||||
~~~
|
||||
|
||||
### How do I change the port/host?
|
||||
|
||||
Martini's `Run` function looks for the PORT and HOST environment variables and uses those. Otherwise Martini will default to localhost:3000.
|
||||
To have more flexibility over port and host, use the `http.ListenAndServe` function instead.
|
||||
|
||||
~~~ go
|
||||
m := martini.Classic()
|
||||
// ...
|
||||
log.Fatal(http.ListenAndServe(":8080", m))
|
||||
~~~
|
||||
|
||||
### Live code reload?
|
||||
|
||||
[gin](https://github.com/codegangsta/gin) and [fresh](https://github.com/pilu/fresh) both live reload martini apps.
|
||||
|
||||
## Contributing
|
||||
Martini is meant to be kept tiny and clean. Most contributions should end up in a repository in the [martini-contrib](https://github.com/martini-contrib) organization. If you do have a contribution for the core of Martini feel free to put up a Pull Request.
|
||||
|
||||
## About
|
||||
|
||||
Inspired by [express](https://github.com/visionmedia/express) and [sinatra](https://github.com/sinatra/sinatra)
|
||||
|
||||
Martini is obsessively designed by none other than the [Code Gangsta](http://codegangsta.io/)
|
||||
25
Godeps/_workspace/src/github.com/codegangsta/martini/env.go
generated
vendored
25
Godeps/_workspace/src/github.com/codegangsta/martini/env.go
generated
vendored
@@ -1,25 +0,0 @@
|
||||
package martini
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
// Envs
|
||||
const (
|
||||
Dev string = "development"
|
||||
Prod string = "production"
|
||||
Test string = "test"
|
||||
)
|
||||
|
||||
// Env is the environment that Martini is executing in. The MARTINI_ENV is read on initialization to set this variable.
|
||||
var Env = Dev
|
||||
|
||||
func setENV(e string) {
|
||||
if len(e) > 0 {
|
||||
Env = e
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
setENV(os.Getenv("MARTINI_ENV"))
|
||||
}
|
||||
22
Godeps/_workspace/src/github.com/codegangsta/martini/env_test.go
generated
vendored
22
Godeps/_workspace/src/github.com/codegangsta/martini/env_test.go
generated
vendored
@@ -1,22 +0,0 @@
|
||||
package martini
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_SetENV(t *testing.T) {
|
||||
tests := []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"", "development"},
|
||||
{"not_development", "not_development"},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
setENV(test.in)
|
||||
if Env != test.out {
|
||||
expect(t, Env, test.out)
|
||||
}
|
||||
}
|
||||
}
|
||||
7
Godeps/_workspace/src/github.com/codegangsta/martini/go_version.go
generated
vendored
7
Godeps/_workspace/src/github.com/codegangsta/martini/go_version.go
generated
vendored
@@ -1,7 +0,0 @@
|
||||
// +build !go1.1
|
||||
|
||||
package martini
|
||||
|
||||
func MartiniDoesNotSupportGo1Point0() {
|
||||
"Martini requires Go 1.1 or greater."
|
||||
}
|
||||
20
Godeps/_workspace/src/github.com/codegangsta/martini/logger.go
generated
vendored
20
Godeps/_workspace/src/github.com/codegangsta/martini/logger.go
generated
vendored
@@ -1,20 +0,0 @@
|
||||
package martini
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Logger returns a middleware handler that logs the request as it goes in and the response as it goes out.
|
||||
func Logger() Handler {
|
||||
return func(res http.ResponseWriter, req *http.Request, c Context, log *log.Logger) {
|
||||
start := time.Now()
|
||||
log.Printf("Started %s %s", req.Method, req.URL.Path)
|
||||
|
||||
rw := res.(ResponseWriter)
|
||||
c.Next()
|
||||
|
||||
log.Printf("Completed %v %s in %v\n", rw.Status(), http.StatusText(rw.Status()), time.Since(start))
|
||||
}
|
||||
}
|
||||
31
Godeps/_workspace/src/github.com/codegangsta/martini/logger_test.go
generated
vendored
31
Godeps/_workspace/src/github.com/codegangsta/martini/logger_test.go
generated
vendored
@@ -1,31 +0,0 @@
|
||||
package martini
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_Logger(t *testing.T) {
|
||||
buff := bytes.NewBufferString("")
|
||||
recorder := httptest.NewRecorder()
|
||||
|
||||
m := New()
|
||||
// replace log for testing
|
||||
m.Map(log.New(buff, "[martini] ", 0))
|
||||
m.Use(Logger())
|
||||
m.Use(func(res http.ResponseWriter) {
|
||||
res.WriteHeader(http.StatusNotFound)
|
||||
})
|
||||
|
||||
req, err := http.NewRequest("GET", "http://localhost:3000/foobar", nil)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
m.ServeHTTP(recorder, req)
|
||||
expect(t, recorder.Code, http.StatusNotFound)
|
||||
refute(t, len(buff.String()), 0)
|
||||
}
|
||||
173
Godeps/_workspace/src/github.com/codegangsta/martini/martini.go
generated
vendored
173
Godeps/_workspace/src/github.com/codegangsta/martini/martini.go
generated
vendored
@@ -1,173 +0,0 @@
|
||||
// Package martini is a powerful package for quickly writing modular web applications/services in Golang.
|
||||
//
|
||||
// For a full guide visit http://github.com/codegangsta/martini
|
||||
//
|
||||
// package main
|
||||
//
|
||||
// import "github.com/codegangsta/martini"
|
||||
//
|
||||
// func main() {
|
||||
// m := martini.Classic()
|
||||
//
|
||||
// m.Get("/", func() string {
|
||||
// return "Hello world!"
|
||||
// })
|
||||
//
|
||||
// m.Run()
|
||||
// }
|
||||
package martini
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"reflect"
|
||||
|
||||
"github.com/codegangsta/inject"
|
||||
)
|
||||
|
||||
// Martini represents the top level web application. inject.Injector methods can be invoked to map services on a global level.
|
||||
type Martini struct {
|
||||
inject.Injector
|
||||
handlers []Handler
|
||||
action Handler
|
||||
logger *log.Logger
|
||||
}
|
||||
|
||||
// New creates a bare bones Martini instance. Use this method if you want to have full control over the middleware that is used.
|
||||
func New() *Martini {
|
||||
m := &Martini{Injector: inject.New(), action: func() {}, logger: log.New(os.Stdout, "[martini] ", 0)}
|
||||
m.Map(m.logger)
|
||||
m.Map(defaultReturnHandler())
|
||||
return m
|
||||
}
|
||||
|
||||
// Handlers sets the entire middleware stack with the given Handlers. This will clear any current middleware handlers.
|
||||
// Will panic if any of the handlers is not a callable function
|
||||
func (m *Martini) Handlers(handlers ...Handler) {
|
||||
m.handlers = make([]Handler, 0)
|
||||
for _, handler := range handlers {
|
||||
m.Use(handler)
|
||||
}
|
||||
}
|
||||
|
||||
// Action sets the handler that will be called after all the middleware has been invoked. This is set to martini.Router in a martini.Classic().
|
||||
func (m *Martini) Action(handler Handler) {
|
||||
validateHandler(handler)
|
||||
m.action = handler
|
||||
}
|
||||
|
||||
// Use adds a middleware Handler to the stack. Will panic if the handler is not a callable func. Middleware Handlers are invoked in the order that they are added.
|
||||
func (m *Martini) Use(handler Handler) {
|
||||
validateHandler(handler)
|
||||
|
||||
m.handlers = append(m.handlers, handler)
|
||||
}
|
||||
|
||||
// ServeHTTP is the HTTP Entry point for a Martini instance. Useful if you want to control your own HTTP server.
|
||||
func (m *Martini) ServeHTTP(res http.ResponseWriter, req *http.Request) {
|
||||
m.createContext(res, req).run()
|
||||
}
|
||||
|
||||
// Run the http server. Listening on os.GetEnv("PORT") or 3000 by default.
|
||||
func (m *Martini) Run() {
|
||||
port := os.Getenv("PORT")
|
||||
if port == "" {
|
||||
port = "3000"
|
||||
}
|
||||
|
||||
host := os.Getenv("HOST")
|
||||
|
||||
m.logger.Println("listening on " + host + ":" + port)
|
||||
m.logger.Fatalln(http.ListenAndServe(host+":"+port, m))
|
||||
}
|
||||
|
||||
func (m *Martini) createContext(res http.ResponseWriter, req *http.Request) *context {
|
||||
c := &context{inject.New(), m.handlers, m.action, NewResponseWriter(res), 0}
|
||||
c.SetParent(m)
|
||||
c.MapTo(c, (*Context)(nil))
|
||||
c.MapTo(c.rw, (*http.ResponseWriter)(nil))
|
||||
c.Map(req)
|
||||
return c
|
||||
}
|
||||
|
||||
// ClassicMartini represents a Martini with some reasonable defaults. Embeds the router functions for convenience.
|
||||
type ClassicMartini struct {
|
||||
*Martini
|
||||
Router
|
||||
}
|
||||
|
||||
// Classic creates a classic Martini with some basic default middleware - martini.Logger, martini.Recovery and martini.Static.
|
||||
// Classic also maps martini.Routes as a service.
|
||||
func Classic() *ClassicMartini {
|
||||
r := NewRouter()
|
||||
m := New()
|
||||
m.Use(Logger())
|
||||
m.Use(Recovery())
|
||||
m.Use(Static("public"))
|
||||
m.MapTo(r, (*Routes)(nil))
|
||||
m.Action(r.Handle)
|
||||
return &ClassicMartini{m, r}
|
||||
}
|
||||
|
||||
// Handler can be any callable function. Martini attempts to inject services into the handler's argument list.
|
||||
// Martini will panic if an argument could not be fullfilled via dependency injection.
|
||||
type Handler interface{}
|
||||
|
||||
func validateHandler(handler Handler) {
|
||||
if reflect.TypeOf(handler).Kind() != reflect.Func {
|
||||
panic("martini handler must be a callable func")
|
||||
}
|
||||
}
|
||||
|
||||
// Context represents a request context. Services can be mapped on the request level from this interface.
|
||||
type Context interface {
|
||||
inject.Injector
|
||||
// Next is an optional function that Middleware Handlers can call to yield the until after
|
||||
// the other Handlers have been executed. This works really well for any operations that must
|
||||
// happen after an http request
|
||||
Next()
|
||||
// Written returns whether or not the response for this context has been written.
|
||||
Written() bool
|
||||
}
|
||||
|
||||
type context struct {
|
||||
inject.Injector
|
||||
handlers []Handler
|
||||
action Handler
|
||||
rw ResponseWriter
|
||||
index int
|
||||
}
|
||||
|
||||
func (c *context) handler() Handler {
|
||||
if c.index < len(c.handlers) {
|
||||
return c.handlers[c.index]
|
||||
}
|
||||
if c.index == len(c.handlers) {
|
||||
return c.action
|
||||
}
|
||||
panic("invalid index for context handler")
|
||||
}
|
||||
|
||||
func (c *context) Next() {
|
||||
c.index += 1
|
||||
c.run()
|
||||
}
|
||||
|
||||
func (c *context) Written() bool {
|
||||
return c.rw.Written()
|
||||
}
|
||||
|
||||
func (c *context) run() {
|
||||
for c.index <= len(c.handlers) {
|
||||
_, err := c.Invoke(c.handler())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
c.index += 1
|
||||
|
||||
if c.Written() {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
141
Godeps/_workspace/src/github.com/codegangsta/martini/martini_test.go
generated
vendored
141
Godeps/_workspace/src/github.com/codegangsta/martini/martini_test.go
generated
vendored
@@ -1,141 +0,0 @@
|
||||
package martini
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
/* Test Helpers */
|
||||
func expect(t *testing.T, a interface{}, b interface{}) {
|
||||
if a != b {
|
||||
t.Errorf("Expected %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a))
|
||||
}
|
||||
}
|
||||
|
||||
func refute(t *testing.T, a interface{}, b interface{}) {
|
||||
if a == b {
|
||||
t.Errorf("Did not expect %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a))
|
||||
}
|
||||
}
|
||||
|
||||
func Test_New(t *testing.T) {
|
||||
m := New()
|
||||
if m == nil {
|
||||
t.Error("martini.New() cannot return nil")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_Martini_Run(t *testing.T) {
|
||||
// just test that Run doesn't bomb
|
||||
go New().Run()
|
||||
}
|
||||
|
||||
func Test_Martini_ServeHTTP(t *testing.T) {
|
||||
result := ""
|
||||
response := httptest.NewRecorder()
|
||||
|
||||
m := New()
|
||||
m.Use(func(c Context) {
|
||||
result += "foo"
|
||||
c.Next()
|
||||
result += "ban"
|
||||
})
|
||||
m.Use(func(c Context) {
|
||||
result += "bar"
|
||||
c.Next()
|
||||
result += "baz"
|
||||
})
|
||||
m.Action(func(res http.ResponseWriter, req *http.Request) {
|
||||
result += "bat"
|
||||
res.WriteHeader(http.StatusBadRequest)
|
||||
})
|
||||
|
||||
m.ServeHTTP(response, (*http.Request)(nil))
|
||||
|
||||
expect(t, result, "foobarbatbazban")
|
||||
expect(t, response.Code, http.StatusBadRequest)
|
||||
}
|
||||
|
||||
func Test_Martini_Handlers(t *testing.T) {
|
||||
result := ""
|
||||
response := httptest.NewRecorder()
|
||||
|
||||
batman := func(c Context) {
|
||||
result += "batman!"
|
||||
}
|
||||
|
||||
m := New()
|
||||
m.Use(func(c Context) {
|
||||
result += "foo"
|
||||
c.Next()
|
||||
result += "ban"
|
||||
})
|
||||
m.Handlers(
|
||||
batman,
|
||||
batman,
|
||||
batman,
|
||||
)
|
||||
m.Action(func(res http.ResponseWriter, req *http.Request) {
|
||||
result += "bat"
|
||||
res.WriteHeader(http.StatusBadRequest)
|
||||
})
|
||||
|
||||
m.ServeHTTP(response, (*http.Request)(nil))
|
||||
|
||||
expect(t, result, "batman!batman!batman!bat")
|
||||
expect(t, response.Code, http.StatusBadRequest)
|
||||
}
|
||||
|
||||
func Test_Martini_EarlyWrite(t *testing.T) {
|
||||
result := ""
|
||||
response := httptest.NewRecorder()
|
||||
|
||||
m := New()
|
||||
m.Use(func(res http.ResponseWriter) {
|
||||
result += "foobar"
|
||||
res.Write([]byte("Hello world"))
|
||||
})
|
||||
m.Use(func() {
|
||||
result += "bat"
|
||||
})
|
||||
m.Action(func(res http.ResponseWriter) {
|
||||
result += "baz"
|
||||
res.WriteHeader(http.StatusBadRequest)
|
||||
})
|
||||
|
||||
m.ServeHTTP(response, (*http.Request)(nil))
|
||||
|
||||
expect(t, result, "foobar")
|
||||
expect(t, response.Code, http.StatusOK)
|
||||
}
|
||||
|
||||
func Test_Martini_Written(t *testing.T) {
|
||||
response := httptest.NewRecorder()
|
||||
|
||||
m := New()
|
||||
m.Handlers(func(res http.ResponseWriter) {
|
||||
res.WriteHeader(http.StatusOK)
|
||||
})
|
||||
|
||||
ctx := m.createContext(response, (*http.Request)(nil))
|
||||
expect(t, ctx.Written(), false)
|
||||
|
||||
ctx.run()
|
||||
expect(t, ctx.Written(), true)
|
||||
}
|
||||
|
||||
func Test_Martini_Basic_NoRace(t *testing.T) {
|
||||
m := New()
|
||||
handlers := []Handler{func() {}, func() {}}
|
||||
// Ensure append will not realloc to trigger the race condition
|
||||
m.handlers = handlers[:1]
|
||||
req, _ := http.NewRequest("GET", "/", nil)
|
||||
for i := 0; i < 2; i++ {
|
||||
go func() {
|
||||
response := httptest.NewRecorder()
|
||||
m.ServeHTTP(response, req)
|
||||
}()
|
||||
}
|
||||
}
|
||||
142
Godeps/_workspace/src/github.com/codegangsta/martini/recovery.go
generated
vendored
142
Godeps/_workspace/src/github.com/codegangsta/martini/recovery.go
generated
vendored
@@ -1,142 +0,0 @@
|
||||
package martini
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"runtime"
|
||||
|
||||
"github.com/codegangsta/inject"
|
||||
)
|
||||
|
||||
const (
|
||||
panicHtml = `<html>
|
||||
<head><title>PANIC: %s</title>
|
||||
<style type="text/css">
|
||||
html, body {
|
||||
font-family: "Roboto", sans-serif;
|
||||
color: #333333;
|
||||
background-color: #ea5343;
|
||||
margin: 0px;
|
||||
}
|
||||
h1 {
|
||||
color: #d04526;
|
||||
background-color: #ffffff;
|
||||
padding: 20px;
|
||||
border-bottom: 1px dashed #2b3848;
|
||||
}
|
||||
pre {
|
||||
margin: 20px;
|
||||
padding: 20px;
|
||||
border: 2px solid #2b3848;
|
||||
background-color: #ffffff;
|
||||
}
|
||||
</style>
|
||||
</head><body>
|
||||
<h1>PANIC</h1>
|
||||
<pre style="font-weight: bold;">%s</pre>
|
||||
<pre>%s</pre>
|
||||
</body>
|
||||
</html>`
|
||||
)
|
||||
|
||||
var (
|
||||
dunno = []byte("???")
|
||||
centerDot = []byte("·")
|
||||
dot = []byte(".")
|
||||
slash = []byte("/")
|
||||
)
|
||||
|
||||
// stack returns a nicely formated stack frame, skipping skip frames
|
||||
func stack(skip int) []byte {
|
||||
buf := new(bytes.Buffer) // the returned data
|
||||
// As we loop, we open files and read them. These variables record the currently
|
||||
// loaded file.
|
||||
var lines [][]byte
|
||||
var lastFile string
|
||||
for i := skip; ; i++ { // Skip the expected number of frames
|
||||
pc, file, line, ok := runtime.Caller(i)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
// Print this much at least. If we can't find the source, it won't show.
|
||||
fmt.Fprintf(buf, "%s:%d (0x%x)\n", file, line, pc)
|
||||
if file != lastFile {
|
||||
data, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
lines = bytes.Split(data, []byte{'\n'})
|
||||
lastFile = file
|
||||
}
|
||||
fmt.Fprintf(buf, "\t%s: %s\n", function(pc), source(lines, line))
|
||||
}
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
// source returns a space-trimmed slice of the n'th line.
|
||||
func source(lines [][]byte, n int) []byte {
|
||||
n-- // in stack trace, lines are 1-indexed but our array is 0-indexed
|
||||
if n < 0 || n >= len(lines) {
|
||||
return dunno
|
||||
}
|
||||
return bytes.TrimSpace(lines[n])
|
||||
}
|
||||
|
||||
// function returns, if possible, the name of the function containing the PC.
|
||||
func function(pc uintptr) []byte {
|
||||
fn := runtime.FuncForPC(pc)
|
||||
if fn == nil {
|
||||
return dunno
|
||||
}
|
||||
name := []byte(fn.Name())
|
||||
// The name includes the path name to the package, which is unnecessary
|
||||
// since the file name is already included. Plus, it has center dots.
|
||||
// That is, we see
|
||||
// runtime/debug.*T·ptrmethod
|
||||
// and want
|
||||
// *T.ptrmethod
|
||||
// Also the package path might contains dot (e.g. code.google.com/...),
|
||||
// so first eliminate the path prefix
|
||||
if lastslash := bytes.LastIndex(name, slash); lastslash >= 0 {
|
||||
name = name[lastslash+1:]
|
||||
}
|
||||
if period := bytes.Index(name, dot); period >= 0 {
|
||||
name = name[period+1:]
|
||||
}
|
||||
name = bytes.Replace(name, centerDot, dot, -1)
|
||||
return name
|
||||
}
|
||||
|
||||
// Recovery returns a middleware that recovers from any panics and writes a 500 if there was one.
|
||||
// While Martini is in development mode, Recovery will also output the panic as HTML.
|
||||
func Recovery() Handler {
|
||||
return func(c Context, log *log.Logger) {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
stack := stack(3)
|
||||
log.Printf("PANIC: %s\n%s", err, stack)
|
||||
|
||||
// Lookup the current responsewriter
|
||||
val := c.Get(inject.InterfaceOf((*http.ResponseWriter)(nil)))
|
||||
res := val.Interface().(http.ResponseWriter)
|
||||
|
||||
// respond with panic message while in development mode
|
||||
var body []byte
|
||||
if Env == Dev {
|
||||
res.Header().Set("Content-Type", "text/html")
|
||||
body = []byte(fmt.Sprintf(panicHtml, err, err, stack))
|
||||
}
|
||||
|
||||
res.WriteHeader(http.StatusInternalServerError)
|
||||
if nil != body {
|
||||
res.Write(body)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
c.Next()
|
||||
}
|
||||
}
|
||||
49
Godeps/_workspace/src/github.com/codegangsta/martini/recovery_test.go
generated
vendored
49
Godeps/_workspace/src/github.com/codegangsta/martini/recovery_test.go
generated
vendored
@@ -1,49 +0,0 @@
|
||||
package martini
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_Recovery(t *testing.T) {
|
||||
buff := bytes.NewBufferString("")
|
||||
recorder := httptest.NewRecorder()
|
||||
|
||||
setENV(Dev)
|
||||
m := New()
|
||||
// replace log for testing
|
||||
m.Map(log.New(buff, "[martini] ", 0))
|
||||
m.Use(func(res http.ResponseWriter, req *http.Request) {
|
||||
res.Header().Set("Content-Type", "unpredictable")
|
||||
})
|
||||
m.Use(Recovery())
|
||||
m.Use(func(res http.ResponseWriter, req *http.Request) {
|
||||
panic("here is a panic!")
|
||||
})
|
||||
m.ServeHTTP(recorder, (*http.Request)(nil))
|
||||
expect(t, recorder.Code, http.StatusInternalServerError)
|
||||
expect(t, recorder.HeaderMap.Get("Content-Type"), "text/html")
|
||||
refute(t, recorder.Body.Len(), 0)
|
||||
refute(t, len(buff.String()), 0)
|
||||
}
|
||||
|
||||
func Test_Recovery_ResponseWriter(t *testing.T) {
|
||||
recorder := httptest.NewRecorder()
|
||||
recorder2 := httptest.NewRecorder()
|
||||
|
||||
setENV(Dev)
|
||||
m := New()
|
||||
m.Use(Recovery())
|
||||
m.Use(func(c Context) {
|
||||
c.MapTo(recorder2, (*http.ResponseWriter)(nil))
|
||||
panic("here is a panic!")
|
||||
})
|
||||
m.ServeHTTP(recorder, (*http.Request)(nil))
|
||||
|
||||
expect(t, recorder2.Code, http.StatusInternalServerError)
|
||||
expect(t, recorder2.HeaderMap.Get("Content-Type"), "text/html")
|
||||
refute(t, recorder2.Body.Len(), 0)
|
||||
}
|
||||
97
Godeps/_workspace/src/github.com/codegangsta/martini/response_writer.go
generated
vendored
97
Godeps/_workspace/src/github.com/codegangsta/martini/response_writer.go
generated
vendored
@@ -1,97 +0,0 @@
|
||||
package martini
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// ResponseWriter is a wrapper around http.ResponseWriter that provides extra information about
|
||||
// the response. It is recommended that middleware handlers use this construct to wrap a responsewriter
|
||||
// if the functionality calls for it.
|
||||
type ResponseWriter interface {
|
||||
http.ResponseWriter
|
||||
http.Flusher
|
||||
// Status returns the status code of the response or 0 if the response has not been written.
|
||||
Status() int
|
||||
// Written returns whether or not the ResponseWriter has been written.
|
||||
Written() bool
|
||||
// Size returns the size of the response body.
|
||||
Size() int
|
||||
// Before allows for a function to be called before the ResponseWriter has been written to. This is
|
||||
// useful for setting headers or any other operations that must happen before a response has been written.
|
||||
Before(BeforeFunc)
|
||||
}
|
||||
|
||||
// BeforeFunc is a function that is called before the ResponseWriter has been written to.
|
||||
type BeforeFunc func(ResponseWriter)
|
||||
|
||||
// NewResponseWriter creates a ResponseWriter that wraps an http.ResponseWriter
|
||||
func NewResponseWriter(rw http.ResponseWriter) ResponseWriter {
|
||||
return &responseWriter{rw, 0, 0, nil}
|
||||
}
|
||||
|
||||
type responseWriter struct {
|
||||
http.ResponseWriter
|
||||
status int
|
||||
size int
|
||||
beforeFuncs []BeforeFunc
|
||||
}
|
||||
|
||||
func (rw *responseWriter) WriteHeader(s int) {
|
||||
rw.callBefore()
|
||||
rw.ResponseWriter.WriteHeader(s)
|
||||
rw.status = s
|
||||
}
|
||||
|
||||
func (rw *responseWriter) Write(b []byte) (int, error) {
|
||||
if !rw.Written() {
|
||||
// The status will be StatusOK if WriteHeader has not been called yet
|
||||
rw.WriteHeader(http.StatusOK)
|
||||
}
|
||||
size, err := rw.ResponseWriter.Write(b)
|
||||
rw.size += size
|
||||
return size, err
|
||||
}
|
||||
|
||||
func (rw *responseWriter) Status() int {
|
||||
return rw.status
|
||||
}
|
||||
|
||||
func (rw *responseWriter) Size() int {
|
||||
return rw.size
|
||||
}
|
||||
|
||||
func (rw *responseWriter) Written() bool {
|
||||
return rw.status != 0
|
||||
}
|
||||
|
||||
func (rw *responseWriter) Before(before BeforeFunc) {
|
||||
rw.beforeFuncs = append(rw.beforeFuncs, before)
|
||||
}
|
||||
|
||||
func (rw *responseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||
hijacker, ok := rw.ResponseWriter.(http.Hijacker)
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("the ResponseWriter doesn't support the Hijacker interface")
|
||||
}
|
||||
return hijacker.Hijack()
|
||||
}
|
||||
|
||||
func (rw *responseWriter) CloseNotify() <-chan bool {
|
||||
return rw.ResponseWriter.(http.CloseNotifier).CloseNotify()
|
||||
}
|
||||
|
||||
func (rw *responseWriter) callBefore() {
|
||||
for i := len(rw.beforeFuncs) - 1; i >= 0; i-- {
|
||||
rw.beforeFuncs[i](rw)
|
||||
}
|
||||
}
|
||||
|
||||
func (rw *responseWriter) Flush() {
|
||||
flusher, ok := rw.ResponseWriter.(http.Flusher)
|
||||
if ok {
|
||||
flusher.Flush()
|
||||
}
|
||||
}
|
||||
188
Godeps/_workspace/src/github.com/codegangsta/martini/response_writer_test.go
generated
vendored
188
Godeps/_workspace/src/github.com/codegangsta/martini/response_writer_test.go
generated
vendored
@@ -1,188 +0,0 @@
|
||||
package martini
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
type closeNotifyingRecorder struct {
|
||||
*httptest.ResponseRecorder
|
||||
closed chan bool
|
||||
}
|
||||
|
||||
func newCloseNotifyingRecorder() *closeNotifyingRecorder {
|
||||
return &closeNotifyingRecorder{
|
||||
httptest.NewRecorder(),
|
||||
make(chan bool, 1),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *closeNotifyingRecorder) close() {
|
||||
c.closed <- true
|
||||
}
|
||||
|
||||
func (c *closeNotifyingRecorder) CloseNotify() <-chan bool {
|
||||
return c.closed
|
||||
}
|
||||
|
||||
type hijackableResponse struct {
|
||||
Hijacked bool
|
||||
}
|
||||
|
||||
func newHijackableResponse() *hijackableResponse {
|
||||
return &hijackableResponse{}
|
||||
}
|
||||
|
||||
func (h *hijackableResponse) Header() http.Header { return nil }
|
||||
func (h *hijackableResponse) Write(buf []byte) (int, error) { return 0, nil }
|
||||
func (h *hijackableResponse) WriteHeader(code int) {}
|
||||
func (h *hijackableResponse) Flush() {}
|
||||
func (h *hijackableResponse) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||
h.Hijacked = true
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
func Test_ResponseWriter_WritingString(t *testing.T) {
|
||||
rec := httptest.NewRecorder()
|
||||
rw := NewResponseWriter(rec)
|
||||
|
||||
rw.Write([]byte("Hello world"))
|
||||
|
||||
expect(t, rec.Code, rw.Status())
|
||||
expect(t, rec.Body.String(), "Hello world")
|
||||
expect(t, rw.Status(), http.StatusOK)
|
||||
expect(t, rw.Size(), 11)
|
||||
expect(t, rw.Written(), true)
|
||||
}
|
||||
|
||||
func Test_ResponseWriter_WritingStrings(t *testing.T) {
|
||||
rec := httptest.NewRecorder()
|
||||
rw := NewResponseWriter(rec)
|
||||
|
||||
rw.Write([]byte("Hello world"))
|
||||
rw.Write([]byte("foo bar bat baz"))
|
||||
|
||||
expect(t, rec.Code, rw.Status())
|
||||
expect(t, rec.Body.String(), "Hello worldfoo bar bat baz")
|
||||
expect(t, rw.Status(), http.StatusOK)
|
||||
expect(t, rw.Size(), 26)
|
||||
}
|
||||
|
||||
func Test_ResponseWriter_WritingHeader(t *testing.T) {
|
||||
rec := httptest.NewRecorder()
|
||||
rw := NewResponseWriter(rec)
|
||||
|
||||
rw.WriteHeader(http.StatusNotFound)
|
||||
|
||||
expect(t, rec.Code, rw.Status())
|
||||
expect(t, rec.Body.String(), "")
|
||||
expect(t, rw.Status(), http.StatusNotFound)
|
||||
expect(t, rw.Size(), 0)
|
||||
}
|
||||
|
||||
func Test_ResponseWriter_Before(t *testing.T) {
|
||||
rec := httptest.NewRecorder()
|
||||
rw := NewResponseWriter(rec)
|
||||
result := ""
|
||||
|
||||
rw.Before(func(ResponseWriter) {
|
||||
result += "foo"
|
||||
})
|
||||
rw.Before(func(ResponseWriter) {
|
||||
result += "bar"
|
||||
})
|
||||
|
||||
rw.WriteHeader(http.StatusNotFound)
|
||||
|
||||
expect(t, rec.Code, rw.Status())
|
||||
expect(t, rec.Body.String(), "")
|
||||
expect(t, rw.Status(), http.StatusNotFound)
|
||||
expect(t, rw.Size(), 0)
|
||||
expect(t, result, "barfoo")
|
||||
}
|
||||
|
||||
func Test_ResponseWriter_Hijack(t *testing.T) {
|
||||
hijackable := newHijackableResponse()
|
||||
rw := NewResponseWriter(hijackable)
|
||||
hijacker, ok := rw.(http.Hijacker)
|
||||
expect(t, ok, true)
|
||||
_, _, err := hijacker.Hijack()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
expect(t, hijackable.Hijacked, true)
|
||||
}
|
||||
|
||||
func Test_ResponseWrite_Hijack_NotOK(t *testing.T) {
|
||||
hijackable := new(http.ResponseWriter)
|
||||
rw := NewResponseWriter(*hijackable)
|
||||
hijacker, ok := rw.(http.Hijacker)
|
||||
expect(t, ok, true)
|
||||
_, _, err := hijacker.Hijack()
|
||||
|
||||
refute(t, err, nil)
|
||||
}
|
||||
|
||||
func Test_ResponseWriter_CloseNotify(t *testing.T) {
|
||||
rec := newCloseNotifyingRecorder()
|
||||
rw := NewResponseWriter(rec)
|
||||
closed := false
|
||||
notifier := rw.(http.CloseNotifier).CloseNotify()
|
||||
rec.close()
|
||||
select {
|
||||
case <-notifier:
|
||||
closed = true
|
||||
case <-time.After(time.Second):
|
||||
}
|
||||
expect(t, closed, true)
|
||||
}
|
||||
|
||||
func Test_ResponseWriter_Flusher(t *testing.T) {
|
||||
|
||||
rec := httptest.NewRecorder()
|
||||
rw := NewResponseWriter(rec)
|
||||
|
||||
_, ok := rw.(http.Flusher)
|
||||
expect(t, ok, true)
|
||||
}
|
||||
|
||||
func Test_ResponseWriter_FlusherHandler(t *testing.T) {
|
||||
|
||||
// New martini instance
|
||||
m := Classic()
|
||||
|
||||
m.Get("/events", func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
f, ok := w.(http.Flusher)
|
||||
expect(t, ok, true)
|
||||
|
||||
w.Header().Set("Content-Type", "text/event-stream")
|
||||
w.Header().Set("Cache-Control", "no-cache")
|
||||
w.Header().Set("Connection", "keep-alive")
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
io.WriteString(w, "data: Hello\n\n")
|
||||
f.Flush()
|
||||
}
|
||||
|
||||
})
|
||||
|
||||
recorder := httptest.NewRecorder()
|
||||
r, _ := http.NewRequest("GET", "/events", nil)
|
||||
m.ServeHTTP(recorder, r)
|
||||
|
||||
if recorder.Code != 200 {
|
||||
t.Error("Response not 200")
|
||||
}
|
||||
|
||||
if recorder.Body.String() != "data: Hello\n\ndata: Hello\n\n" {
|
||||
t.Error("Didn't receive correct body, got:", recorder.Body.String())
|
||||
}
|
||||
|
||||
}
|
||||
43
Godeps/_workspace/src/github.com/codegangsta/martini/return_handler.go
generated
vendored
43
Godeps/_workspace/src/github.com/codegangsta/martini/return_handler.go
generated
vendored
@@ -1,43 +0,0 @@
|
||||
package martini
|
||||
|
||||
import (
|
||||
"github.com/codegangsta/inject"
|
||||
"net/http"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// ReturnHandler is a service that Martini provides that is called
|
||||
// when a route handler returns something. The ReturnHandler is
|
||||
// responsible for writing to the ResponseWriter based on the values
|
||||
// that are passed into this function.
|
||||
type ReturnHandler func(Context, []reflect.Value)
|
||||
|
||||
func defaultReturnHandler() ReturnHandler {
|
||||
return func(ctx Context, vals []reflect.Value) {
|
||||
rv := ctx.Get(inject.InterfaceOf((*http.ResponseWriter)(nil)))
|
||||
res := rv.Interface().(http.ResponseWriter)
|
||||
var responseVal reflect.Value
|
||||
if len(vals) > 1 && vals[0].Kind() == reflect.Int {
|
||||
res.WriteHeader(int(vals[0].Int()))
|
||||
responseVal = vals[1]
|
||||
} else if len(vals) > 0 {
|
||||
responseVal = vals[0]
|
||||
}
|
||||
if canDeref(responseVal) {
|
||||
responseVal = responseVal.Elem()
|
||||
}
|
||||
if isByteSlice(responseVal) {
|
||||
res.Write(responseVal.Bytes())
|
||||
} else {
|
||||
res.Write([]byte(responseVal.String()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func isByteSlice(val reflect.Value) bool {
|
||||
return val.Kind() == reflect.Slice && val.Type().Elem().Kind() == reflect.Uint8
|
||||
}
|
||||
|
||||
func canDeref(val reflect.Value) bool {
|
||||
return val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr
|
||||
}
|
||||
331
Godeps/_workspace/src/github.com/codegangsta/martini/router.go
generated
vendored
331
Godeps/_workspace/src/github.com/codegangsta/martini/router.go
generated
vendored
@@ -1,331 +0,0 @@
|
||||
package martini
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Params is a map of name/value pairs for named routes. An instance of martini.Params is available to be injected into any route handler.
|
||||
type Params map[string]string
|
||||
|
||||
// Router is Martini's de-facto routing interface. Supports HTTP verbs, stacked handlers, and dependency injection.
|
||||
type Router interface {
|
||||
Routes
|
||||
|
||||
// Group adds a group where related routes can be added.
|
||||
Group(string, func(Router), ...Handler)
|
||||
// Get adds a route for a HTTP GET request to the specified matching pattern.
|
||||
Get(string, ...Handler) Route
|
||||
// Patch adds a route for a HTTP PATCH request to the specified matching pattern.
|
||||
Patch(string, ...Handler) Route
|
||||
// Post adds a route for a HTTP POST request to the specified matching pattern.
|
||||
Post(string, ...Handler) Route
|
||||
// Put adds a route for a HTTP PUT request to the specified matching pattern.
|
||||
Put(string, ...Handler) Route
|
||||
// Delete adds a route for a HTTP DELETE request to the specified matching pattern.
|
||||
Delete(string, ...Handler) Route
|
||||
// Options adds a route for a HTTP OPTIONS request to the specified matching pattern.
|
||||
Options(string, ...Handler) Route
|
||||
// Head adds a route for a HTTP HEAD request to the specified matching pattern.
|
||||
Head(string, ...Handler) Route
|
||||
// Any adds a route for any HTTP method request to the specified matching pattern.
|
||||
Any(string, ...Handler) Route
|
||||
|
||||
// NotFound sets the handlers that are called when a no route matches a request. Throws a basic 404 by default.
|
||||
NotFound(...Handler)
|
||||
|
||||
// Handle is the entry point for routing. This is used as a martini.Handler
|
||||
Handle(http.ResponseWriter, *http.Request, Context)
|
||||
}
|
||||
|
||||
type router struct {
|
||||
routes []*route
|
||||
notFounds []Handler
|
||||
groups []group
|
||||
}
|
||||
|
||||
type group struct {
|
||||
pattern string
|
||||
handlers []Handler
|
||||
}
|
||||
|
||||
// NewRouter creates a new Router instance.
|
||||
// If you aren't using ClassicMartini, then you can add Routes as a
|
||||
// service with:
|
||||
//
|
||||
// m := martini.New()
|
||||
// r := martini.NewRouter()
|
||||
// m.MapTo(r, (*martini.Routes)(nil))
|
||||
//
|
||||
// If you are using ClassicMartini, then this is done for you.
|
||||
func NewRouter() Router {
|
||||
return &router{notFounds: []Handler{http.NotFound}, groups: make([]group, 0)}
|
||||
}
|
||||
|
||||
func (r *router) Group(pattern string, fn func(Router), h ...Handler) {
|
||||
r.groups = append(r.groups, group{pattern, h})
|
||||
fn(r)
|
||||
r.groups = r.groups[:len(r.groups)-1]
|
||||
}
|
||||
|
||||
func (r *router) Get(pattern string, h ...Handler) Route {
|
||||
return r.addRoute("GET", pattern, h)
|
||||
}
|
||||
|
||||
func (r *router) Patch(pattern string, h ...Handler) Route {
|
||||
return r.addRoute("PATCH", pattern, h)
|
||||
}
|
||||
|
||||
func (r *router) Post(pattern string, h ...Handler) Route {
|
||||
return r.addRoute("POST", pattern, h)
|
||||
}
|
||||
|
||||
func (r *router) Put(pattern string, h ...Handler) Route {
|
||||
return r.addRoute("PUT", pattern, h)
|
||||
}
|
||||
|
||||
func (r *router) Delete(pattern string, h ...Handler) Route {
|
||||
return r.addRoute("DELETE", pattern, h)
|
||||
}
|
||||
|
||||
func (r *router) Options(pattern string, h ...Handler) Route {
|
||||
return r.addRoute("OPTIONS", pattern, h)
|
||||
}
|
||||
|
||||
func (r *router) Head(pattern string, h ...Handler) Route {
|
||||
return r.addRoute("HEAD", pattern, h)
|
||||
}
|
||||
|
||||
func (r *router) Any(pattern string, h ...Handler) Route {
|
||||
return r.addRoute("*", pattern, h)
|
||||
}
|
||||
|
||||
func (r *router) Handle(res http.ResponseWriter, req *http.Request, context Context) {
|
||||
for _, route := range r.routes {
|
||||
ok, vals := route.Match(req.Method, req.URL.Path)
|
||||
if ok {
|
||||
params := Params(vals)
|
||||
context.Map(params)
|
||||
route.Handle(context, res)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// no routes exist, 404
|
||||
c := &routeContext{context, 0, r.notFounds}
|
||||
context.MapTo(c, (*Context)(nil))
|
||||
c.run()
|
||||
}
|
||||
|
||||
func (r *router) NotFound(handler ...Handler) {
|
||||
r.notFounds = handler
|
||||
}
|
||||
|
||||
func (r *router) addRoute(method string, pattern string, handlers []Handler) *route {
|
||||
if len(r.groups) > 0 {
|
||||
group := r.groups[len(r.groups)-1]
|
||||
pattern = group.pattern + pattern
|
||||
h := make([]Handler, len(group.handlers)+len(handlers))
|
||||
copy(h, group.handlers)
|
||||
copy(h[len(group.handlers):], handlers)
|
||||
handlers = h
|
||||
}
|
||||
|
||||
route := newRoute(method, pattern, handlers)
|
||||
route.Validate()
|
||||
r.routes = append(r.routes, route)
|
||||
return route
|
||||
}
|
||||
|
||||
func (r *router) findRoute(name string) *route {
|
||||
for _, route := range r.routes {
|
||||
if route.name == name {
|
||||
return route
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Route is an interface representing a Route in Martini's routing layer.
|
||||
type Route interface {
|
||||
// URLWith returns a rendering of the Route's url with the given string params.
|
||||
URLWith([]string) string
|
||||
Name(string)
|
||||
}
|
||||
|
||||
type route struct {
|
||||
method string
|
||||
regex *regexp.Regexp
|
||||
handlers []Handler
|
||||
pattern string
|
||||
name string
|
||||
}
|
||||
|
||||
func newRoute(method string, pattern string, handlers []Handler) *route {
|
||||
route := route{method, nil, handlers, pattern, ""}
|
||||
r := regexp.MustCompile(`:[^/#?()\.\\]+`)
|
||||
pattern = r.ReplaceAllStringFunc(pattern, func(m string) string {
|
||||
return fmt.Sprintf(`(?P<%s>[^/#?]+)`, m[1:])
|
||||
})
|
||||
r2 := regexp.MustCompile(`\*\*`)
|
||||
var index int
|
||||
pattern = r2.ReplaceAllStringFunc(pattern, func(m string) string {
|
||||
index++
|
||||
return fmt.Sprintf(`(?P<_%d>[^#?]*)`, index)
|
||||
})
|
||||
pattern += `\/?`
|
||||
route.regex = regexp.MustCompile(pattern)
|
||||
return &route
|
||||
}
|
||||
|
||||
func (r route) MatchMethod(method string) bool {
|
||||
return r.method == "*" || method == r.method || (method == "HEAD" && r.method == "GET")
|
||||
}
|
||||
|
||||
func (r route) Match(method string, path string) (bool, map[string]string) {
|
||||
// add Any method matching support
|
||||
if !r.MatchMethod(method) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
matches := r.regex.FindStringSubmatch(path)
|
||||
if len(matches) > 0 && matches[0] == path {
|
||||
params := make(map[string]string)
|
||||
for i, name := range r.regex.SubexpNames() {
|
||||
if len(name) > 0 {
|
||||
params[name] = matches[i]
|
||||
}
|
||||
}
|
||||
return true, params
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (r *route) Validate() {
|
||||
for _, handler := range r.handlers {
|
||||
validateHandler(handler)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *route) Handle(c Context, res http.ResponseWriter) {
|
||||
context := &routeContext{c, 0, r.handlers}
|
||||
c.MapTo(context, (*Context)(nil))
|
||||
context.run()
|
||||
}
|
||||
|
||||
// URLWith returns the url pattern replacing the parameters for its values
|
||||
func (r *route) URLWith(args []string) string {
|
||||
if len(args) > 0 {
|
||||
reg := regexp.MustCompile(`:[^/#?()\.\\]+`)
|
||||
argCount := len(args)
|
||||
i := 0
|
||||
url := reg.ReplaceAllStringFunc(r.pattern, func(m string) string {
|
||||
var val interface{}
|
||||
if i < argCount {
|
||||
val = args[i]
|
||||
} else {
|
||||
val = m
|
||||
}
|
||||
i += 1
|
||||
return fmt.Sprintf(`%v`, val)
|
||||
})
|
||||
|
||||
return url
|
||||
}
|
||||
return r.pattern
|
||||
}
|
||||
|
||||
func (r *route) Name(name string) {
|
||||
r.name = name
|
||||
}
|
||||
|
||||
// Routes is a helper service for Martini's routing layer.
|
||||
type Routes interface {
|
||||
// URLFor returns a rendered URL for the given route. Optional params can be passed to fulfill named parameters in the route.
|
||||
URLFor(name string, params ...interface{}) string
|
||||
// MethodsFor returns an array of methods available for the path
|
||||
MethodsFor(path string) []string
|
||||
}
|
||||
|
||||
// URLFor returns the url for the given route name.
|
||||
func (r *router) URLFor(name string, params ...interface{}) string {
|
||||
route := r.findRoute(name)
|
||||
|
||||
if route == nil {
|
||||
panic("route not found")
|
||||
}
|
||||
|
||||
var args []string
|
||||
for _, param := range params {
|
||||
switch v := param.(type) {
|
||||
case int:
|
||||
args = append(args, strconv.FormatInt(int64(v), 10))
|
||||
case string:
|
||||
args = append(args, v)
|
||||
default:
|
||||
if v != nil {
|
||||
panic("Arguments passed to URLFor must be integers or strings")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return route.URLWith(args)
|
||||
}
|
||||
|
||||
func hasMethod(methods []string, method string) bool {
|
||||
for _, v := range methods {
|
||||
if v == method {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// MethodsFor returns all methods available for path
|
||||
func (r *router) MethodsFor(path string) []string {
|
||||
methods := []string{}
|
||||
for _, route := range r.routes {
|
||||
matches := route.regex.FindStringSubmatch(path)
|
||||
if len(matches) > 0 && matches[0] == path && !hasMethod(methods, route.method) {
|
||||
methods = append(methods, route.method)
|
||||
}
|
||||
}
|
||||
return methods
|
||||
}
|
||||
|
||||
type routeContext struct {
|
||||
Context
|
||||
index int
|
||||
handlers []Handler
|
||||
}
|
||||
|
||||
func (r *routeContext) Next() {
|
||||
r.index += 1
|
||||
r.run()
|
||||
}
|
||||
|
||||
func (r *routeContext) run() {
|
||||
for r.index < len(r.handlers) {
|
||||
handler := r.handlers[r.index]
|
||||
vals, err := r.Invoke(handler)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
r.index += 1
|
||||
|
||||
// if the handler returned something, write it to the http response
|
||||
if len(vals) > 0 {
|
||||
ev := r.Get(reflect.TypeOf(ReturnHandler(nil)))
|
||||
handleReturn := ev.Interface().(ReturnHandler)
|
||||
handleReturn(r, vals)
|
||||
}
|
||||
|
||||
if r.Written() {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
410
Godeps/_workspace/src/github.com/codegangsta/martini/router_test.go
generated
vendored
410
Godeps/_workspace/src/github.com/codegangsta/martini/router_test.go
generated
vendored
@@ -1,410 +0,0 @@
|
||||
package martini
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_Routing(t *testing.T) {
|
||||
router := NewRouter()
|
||||
recorder := httptest.NewRecorder()
|
||||
|
||||
req, _ := http.NewRequest("GET", "http://localhost:3000/foo", nil)
|
||||
context := New().createContext(recorder, req)
|
||||
|
||||
req2, _ := http.NewRequest("POST", "http://localhost:3000/bar/bat", nil)
|
||||
context2 := New().createContext(recorder, req2)
|
||||
|
||||
req3, _ := http.NewRequest("DELETE", "http://localhost:3000/baz", nil)
|
||||
context3 := New().createContext(recorder, req3)
|
||||
|
||||
req4, _ := http.NewRequest("PATCH", "http://localhost:3000/bar/foo", nil)
|
||||
context4 := New().createContext(recorder, req4)
|
||||
|
||||
req5, _ := http.NewRequest("GET", "http://localhost:3000/fez/this/should/match", nil)
|
||||
context5 := New().createContext(recorder, req5)
|
||||
|
||||
req6, _ := http.NewRequest("PUT", "http://localhost:3000/pop/blah/blah/blah/bap/foo/", nil)
|
||||
context6 := New().createContext(recorder, req6)
|
||||
|
||||
req7, _ := http.NewRequest("DELETE", "http://localhost:3000/wap//pow", nil)
|
||||
context7 := New().createContext(recorder, req7)
|
||||
|
||||
req8, _ := http.NewRequest("HEAD", "http://localhost:3000/wap//pow", nil)
|
||||
context8 := New().createContext(recorder, req8)
|
||||
|
||||
req9, _ := http.NewRequest("OPTIONS", "http://localhost:3000/opts", nil)
|
||||
context9 := New().createContext(recorder, req9)
|
||||
|
||||
req10, _ := http.NewRequest("HEAD", "http://localhost:3000/foo", nil)
|
||||
context10 := New().createContext(recorder, req10)
|
||||
|
||||
req11, _ := http.NewRequest("GET", "http://localhost:3000/bazz/inga", nil)
|
||||
context11 := New().createContext(recorder, req11)
|
||||
|
||||
req12, _ := http.NewRequest("POST", "http://localhost:3000/bazz/inga", nil)
|
||||
context12 := New().createContext(recorder, req12)
|
||||
|
||||
result := ""
|
||||
router.Get("/foo", func(req *http.Request) {
|
||||
result += "foo"
|
||||
})
|
||||
router.Patch("/bar/:id", func(params Params) {
|
||||
expect(t, params["id"], "foo")
|
||||
result += "barfoo"
|
||||
})
|
||||
router.Post("/bar/:id", func(params Params) {
|
||||
expect(t, params["id"], "bat")
|
||||
result += "barbat"
|
||||
})
|
||||
router.Put("/fizzbuzz", func() {
|
||||
result += "fizzbuzz"
|
||||
})
|
||||
router.Delete("/bazzer", func(c Context) {
|
||||
result += "baz"
|
||||
})
|
||||
router.Get("/fez/**", func(params Params) {
|
||||
expect(t, params["_1"], "this/should/match")
|
||||
result += "fez"
|
||||
})
|
||||
router.Put("/pop/**/bap/:id/**", func(params Params) {
|
||||
expect(t, params["id"], "foo")
|
||||
expect(t, params["_1"], "blah/blah/blah")
|
||||
expect(t, params["_2"], "")
|
||||
result += "popbap"
|
||||
})
|
||||
router.Delete("/wap/**/pow", func(params Params) {
|
||||
expect(t, params["_1"], "")
|
||||
result += "wappow"
|
||||
})
|
||||
router.Options("/opts", func() {
|
||||
result += "opts"
|
||||
})
|
||||
router.Head("/wap/**/pow", func(params Params) {
|
||||
expect(t, params["_1"], "")
|
||||
result += "wappow"
|
||||
})
|
||||
router.Group("/bazz", func(r Router) {
|
||||
r.Get("/inga", func() {
|
||||
result += "get"
|
||||
})
|
||||
|
||||
r.Post("/inga", func() {
|
||||
result += "post"
|
||||
})
|
||||
}, func() {
|
||||
result += "bazz"
|
||||
}, func() {
|
||||
result += "inga"
|
||||
})
|
||||
|
||||
router.Handle(recorder, req, context)
|
||||
router.Handle(recorder, req2, context2)
|
||||
router.Handle(recorder, req3, context3)
|
||||
router.Handle(recorder, req4, context4)
|
||||
router.Handle(recorder, req5, context5)
|
||||
router.Handle(recorder, req6, context6)
|
||||
router.Handle(recorder, req7, context7)
|
||||
router.Handle(recorder, req8, context8)
|
||||
router.Handle(recorder, req9, context9)
|
||||
router.Handle(recorder, req10, context10)
|
||||
router.Handle(recorder, req11, context11)
|
||||
router.Handle(recorder, req12, context12)
|
||||
expect(t, result, "foobarbatbarfoofezpopbapwappowwappowoptsfoobazzingagetbazzingapost")
|
||||
expect(t, recorder.Code, http.StatusNotFound)
|
||||
expect(t, recorder.Body.String(), "404 page not found\n")
|
||||
}
|
||||
|
||||
func Test_RouterHandlerStatusCode(t *testing.T) {
|
||||
router := NewRouter()
|
||||
router.Get("/foo", func() string {
|
||||
return "foo"
|
||||
})
|
||||
router.Get("/bar", func() (int, string) {
|
||||
return http.StatusForbidden, "bar"
|
||||
})
|
||||
router.Get("/baz", func() (string, string) {
|
||||
return "baz", "BAZ!"
|
||||
})
|
||||
router.Get("/bytes", func() []byte {
|
||||
return []byte("Bytes!")
|
||||
})
|
||||
router.Get("/interface", func() interface{} {
|
||||
return "Interface!"
|
||||
})
|
||||
|
||||
// code should be 200 if none is returned from the handler
|
||||
recorder := httptest.NewRecorder()
|
||||
req, _ := http.NewRequest("GET", "http://localhost:3000/foo", nil)
|
||||
context := New().createContext(recorder, req)
|
||||
router.Handle(recorder, req, context)
|
||||
expect(t, recorder.Code, http.StatusOK)
|
||||
expect(t, recorder.Body.String(), "foo")
|
||||
|
||||
// if a status code is returned, it should be used
|
||||
recorder = httptest.NewRecorder()
|
||||
req, _ = http.NewRequest("GET", "http://localhost:3000/bar", nil)
|
||||
context = New().createContext(recorder, req)
|
||||
router.Handle(recorder, req, context)
|
||||
expect(t, recorder.Code, http.StatusForbidden)
|
||||
expect(t, recorder.Body.String(), "bar")
|
||||
|
||||
// shouldn't use the first returned value as a status code if not an integer
|
||||
recorder = httptest.NewRecorder()
|
||||
req, _ = http.NewRequest("GET", "http://localhost:3000/baz", nil)
|
||||
context = New().createContext(recorder, req)
|
||||
router.Handle(recorder, req, context)
|
||||
expect(t, recorder.Code, http.StatusOK)
|
||||
expect(t, recorder.Body.String(), "baz")
|
||||
|
||||
// Should render bytes as a return value as well.
|
||||
recorder = httptest.NewRecorder()
|
||||
req, _ = http.NewRequest("GET", "http://localhost:3000/bytes", nil)
|
||||
context = New().createContext(recorder, req)
|
||||
router.Handle(recorder, req, context)
|
||||
expect(t, recorder.Code, http.StatusOK)
|
||||
expect(t, recorder.Body.String(), "Bytes!")
|
||||
|
||||
// Should render interface{} values.
|
||||
recorder = httptest.NewRecorder()
|
||||
req, _ = http.NewRequest("GET", "http://localhost:3000/interface", nil)
|
||||
context = New().createContext(recorder, req)
|
||||
router.Handle(recorder, req, context)
|
||||
expect(t, recorder.Code, http.StatusOK)
|
||||
expect(t, recorder.Body.String(), "Interface!")
|
||||
}
|
||||
|
||||
func Test_RouterHandlerStacking(t *testing.T) {
|
||||
router := NewRouter()
|
||||
recorder := httptest.NewRecorder()
|
||||
|
||||
req, _ := http.NewRequest("GET", "http://localhost:3000/foo", nil)
|
||||
context := New().createContext(recorder, req)
|
||||
|
||||
result := ""
|
||||
|
||||
f1 := func() {
|
||||
result += "foo"
|
||||
}
|
||||
|
||||
f2 := func(c Context) {
|
||||
result += "bar"
|
||||
c.Next()
|
||||
result += "bing"
|
||||
}
|
||||
|
||||
f3 := func() string {
|
||||
result += "bat"
|
||||
return "Hello world"
|
||||
}
|
||||
|
||||
f4 := func() {
|
||||
result += "baz"
|
||||
}
|
||||
|
||||
router.Get("/foo", f1, f2, f3, f4)
|
||||
|
||||
router.Handle(recorder, req, context)
|
||||
expect(t, result, "foobarbatbing")
|
||||
expect(t, recorder.Body.String(), "Hello world")
|
||||
}
|
||||
|
||||
var routeTests = []struct {
|
||||
// in
|
||||
method string
|
||||
path string
|
||||
|
||||
// out
|
||||
ok bool
|
||||
params map[string]string
|
||||
}{
|
||||
{"GET", "/foo/123/bat/321", true, map[string]string{"bar": "123", "baz": "321"}},
|
||||
{"POST", "/foo/123/bat/321", false, map[string]string{}},
|
||||
{"GET", "/foo/hello/bat/world", true, map[string]string{"bar": "hello", "baz": "world"}},
|
||||
{"GET", "foo/hello/bat/world", false, map[string]string{}},
|
||||
{"GET", "/foo/123/bat/321/", true, map[string]string{"bar": "123", "baz": "321"}},
|
||||
{"GET", "/foo/123/bat/321//", false, map[string]string{}},
|
||||
{"GET", "/foo/123//bat/321/", false, map[string]string{}},
|
||||
}
|
||||
|
||||
func Test_RouteMatching(t *testing.T) {
|
||||
route := newRoute("GET", "/foo/:bar/bat/:baz", nil)
|
||||
for _, tt := range routeTests {
|
||||
ok, params := route.Match(tt.method, tt.path)
|
||||
if ok != tt.ok || params["bar"] != tt.params["bar"] || params["baz"] != tt.params["baz"] {
|
||||
t.Errorf("expected: (%v, %v) got: (%v, %v)", tt.ok, tt.params, ok, params)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Test_MethodsFor(t *testing.T) {
|
||||
router := NewRouter()
|
||||
recorder := httptest.NewRecorder()
|
||||
|
||||
req, _ := http.NewRequest("POST", "http://localhost:3000/foo", nil)
|
||||
context := New().createContext(recorder, req)
|
||||
context.MapTo(router, (*Routes)(nil))
|
||||
router.Post("/foo/bar", func() {
|
||||
})
|
||||
|
||||
router.Post("/fo", func() {
|
||||
})
|
||||
|
||||
router.Get("/foo", func() {
|
||||
})
|
||||
|
||||
router.Put("/foo", func() {
|
||||
})
|
||||
|
||||
router.NotFound(func(routes Routes, w http.ResponseWriter, r *http.Request) {
|
||||
methods := routes.MethodsFor(r.URL.Path)
|
||||
if len(methods) != 0 {
|
||||
w.Header().Set("Allow", strings.Join(methods, ","))
|
||||
w.WriteHeader(http.StatusMethodNotAllowed)
|
||||
}
|
||||
})
|
||||
router.Handle(recorder, req, context)
|
||||
expect(t, recorder.Code, http.StatusMethodNotAllowed)
|
||||
expect(t, recorder.Header().Get("Allow"), "GET,PUT")
|
||||
}
|
||||
|
||||
func Test_NotFound(t *testing.T) {
|
||||
router := NewRouter()
|
||||
recorder := httptest.NewRecorder()
|
||||
|
||||
req, _ := http.NewRequest("GET", "http://localhost:3000/foo", nil)
|
||||
context := New().createContext(recorder, req)
|
||||
|
||||
router.NotFound(func(res http.ResponseWriter) {
|
||||
http.Error(res, "Nope", http.StatusNotFound)
|
||||
})
|
||||
|
||||
router.Handle(recorder, req, context)
|
||||
expect(t, recorder.Code, http.StatusNotFound)
|
||||
expect(t, recorder.Body.String(), "Nope\n")
|
||||
}
|
||||
|
||||
func Test_NotFoundAsHandler(t *testing.T) {
|
||||
router := NewRouter()
|
||||
recorder := httptest.NewRecorder()
|
||||
|
||||
req, _ := http.NewRequest("GET", "http://localhost:3000/foo", nil)
|
||||
context := New().createContext(recorder, req)
|
||||
|
||||
router.NotFound(func() string {
|
||||
return "not found"
|
||||
})
|
||||
|
||||
router.Handle(recorder, req, context)
|
||||
expect(t, recorder.Code, http.StatusOK)
|
||||
expect(t, recorder.Body.String(), "not found")
|
||||
|
||||
recorder = httptest.NewRecorder()
|
||||
|
||||
context = New().createContext(recorder, req)
|
||||
|
||||
router.NotFound(func() (int, string) {
|
||||
return 404, "not found"
|
||||
})
|
||||
|
||||
router.Handle(recorder, req, context)
|
||||
expect(t, recorder.Code, http.StatusNotFound)
|
||||
expect(t, recorder.Body.String(), "not found")
|
||||
|
||||
recorder = httptest.NewRecorder()
|
||||
|
||||
context = New().createContext(recorder, req)
|
||||
|
||||
router.NotFound(func() (int, string) {
|
||||
return 200, ""
|
||||
})
|
||||
|
||||
router.Handle(recorder, req, context)
|
||||
expect(t, recorder.Code, http.StatusOK)
|
||||
expect(t, recorder.Body.String(), "")
|
||||
}
|
||||
|
||||
func Test_NotFoundStacking(t *testing.T) {
|
||||
router := NewRouter()
|
||||
recorder := httptest.NewRecorder()
|
||||
|
||||
req, _ := http.NewRequest("GET", "http://localhost:3000/foo", nil)
|
||||
context := New().createContext(recorder, req)
|
||||
|
||||
result := ""
|
||||
|
||||
f1 := func() {
|
||||
result += "foo"
|
||||
}
|
||||
|
||||
f2 := func(c Context) {
|
||||
result += "bar"
|
||||
c.Next()
|
||||
result += "bing"
|
||||
}
|
||||
|
||||
f3 := func() string {
|
||||
result += "bat"
|
||||
return "Not Found"
|
||||
}
|
||||
|
||||
f4 := func() {
|
||||
result += "baz"
|
||||
}
|
||||
|
||||
router.NotFound(f1, f2, f3, f4)
|
||||
|
||||
router.Handle(recorder, req, context)
|
||||
expect(t, result, "foobarbatbing")
|
||||
expect(t, recorder.Body.String(), "Not Found")
|
||||
}
|
||||
|
||||
func Test_Any(t *testing.T) {
|
||||
router := NewRouter()
|
||||
router.Any("/foo", func(res http.ResponseWriter) {
|
||||
http.Error(res, "Nope", http.StatusNotFound)
|
||||
})
|
||||
|
||||
recorder := httptest.NewRecorder()
|
||||
req, _ := http.NewRequest("GET", "http://localhost:3000/foo", nil)
|
||||
context := New().createContext(recorder, req)
|
||||
router.Handle(recorder, req, context)
|
||||
|
||||
expect(t, recorder.Code, http.StatusNotFound)
|
||||
expect(t, recorder.Body.String(), "Nope\n")
|
||||
|
||||
recorder = httptest.NewRecorder()
|
||||
req, _ = http.NewRequest("PUT", "http://localhost:3000/foo", nil)
|
||||
context = New().createContext(recorder, req)
|
||||
router.Handle(recorder, req, context)
|
||||
|
||||
expect(t, recorder.Code, http.StatusNotFound)
|
||||
expect(t, recorder.Body.String(), "Nope\n")
|
||||
}
|
||||
|
||||
func Test_URLFor(t *testing.T) {
|
||||
router := NewRouter()
|
||||
|
||||
router.Get("/foo", func() {
|
||||
// Nothing
|
||||
}).Name("foo")
|
||||
|
||||
router.Post("/bar/:id", func(params Params) {
|
||||
// Nothing
|
||||
}).Name("bar")
|
||||
|
||||
router.Get("/bar/:id/:name", func(params Params, routes Routes) {
|
||||
expect(t, routes.URLFor("foo", nil), "/foo")
|
||||
expect(t, routes.URLFor("bar", 5), "/bar/5")
|
||||
expect(t, routes.URLFor("bar_id", 5, "john"), "/bar/5/john")
|
||||
}).Name("bar_id")
|
||||
|
||||
// code should be 200 if none is returned from the handler
|
||||
recorder := httptest.NewRecorder()
|
||||
req, _ := http.NewRequest("GET", "http://localhost:3000/bar/foo/bar", nil)
|
||||
context := New().createContext(recorder, req)
|
||||
context.MapTo(router, (*Routes)(nil))
|
||||
router.Handle(recorder, req, context)
|
||||
}
|
||||
109
Godeps/_workspace/src/github.com/codegangsta/martini/static.go
generated
vendored
109
Godeps/_workspace/src/github.com/codegangsta/martini/static.go
generated
vendored
@@ -1,109 +0,0 @@
|
||||
package martini
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// StaticOptions is a struct for specifying configuration options for the martini.Static middleware.
|
||||
type StaticOptions struct {
|
||||
// Prefix is the optional prefix used to serve the static directory content
|
||||
Prefix string
|
||||
// SkipLogging will disable [Static] log messages when a static file is served.
|
||||
SkipLogging bool
|
||||
// IndexFile defines which file to serve as index if it exists.
|
||||
IndexFile string
|
||||
// Expires defines which user-defined function to use for producing a HTTP Expires Header
|
||||
// https://developers.google.com/speed/docs/insights/LeverageBrowserCaching
|
||||
Expires func() string
|
||||
}
|
||||
|
||||
func prepareStaticOptions(options []StaticOptions) StaticOptions {
|
||||
var opt StaticOptions
|
||||
if len(options) > 0 {
|
||||
opt = options[0]
|
||||
}
|
||||
|
||||
// Defaults
|
||||
if len(opt.IndexFile) == 0 {
|
||||
opt.IndexFile = "index.html"
|
||||
}
|
||||
// Normalize the prefix if provided
|
||||
if opt.Prefix != "" {
|
||||
// Ensure we have a leading '/'
|
||||
if opt.Prefix[0] != '/' {
|
||||
opt.Prefix = "/" + opt.Prefix
|
||||
}
|
||||
// Remove any trailing '/'
|
||||
opt.Prefix = strings.TrimRight(opt.Prefix, "/")
|
||||
}
|
||||
return opt
|
||||
}
|
||||
|
||||
// Static returns a middleware handler that serves static files in the given directory.
|
||||
func Static(directory string, staticOpt ...StaticOptions) Handler {
|
||||
dir := http.Dir(directory)
|
||||
opt := prepareStaticOptions(staticOpt)
|
||||
|
||||
return func(res http.ResponseWriter, req *http.Request, log *log.Logger) {
|
||||
if req.Method != "GET" && req.Method != "HEAD" {
|
||||
return
|
||||
}
|
||||
file := req.URL.Path
|
||||
// if we have a prefix, filter requests by stripping the prefix
|
||||
if opt.Prefix != "" {
|
||||
if !strings.HasPrefix(file, opt.Prefix) {
|
||||
return
|
||||
}
|
||||
file = file[len(opt.Prefix):]
|
||||
if file != "" && file[0] != '/' {
|
||||
return
|
||||
}
|
||||
}
|
||||
f, err := dir.Open(file)
|
||||
if err != nil {
|
||||
// discard the error?
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
fi, err := f.Stat()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// try to serve index file
|
||||
if fi.IsDir() {
|
||||
// redirect if missing trailing slash
|
||||
if !strings.HasSuffix(req.URL.Path, "/") {
|
||||
http.Redirect(res, req, req.URL.Path+"/", http.StatusFound)
|
||||
return
|
||||
}
|
||||
|
||||
file = path.Join(file, opt.IndexFile)
|
||||
f, err = dir.Open(file)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
fi, err = f.Stat()
|
||||
if err != nil || fi.IsDir() {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if !opt.SkipLogging {
|
||||
log.Println("[Static] Serving " + file)
|
||||
}
|
||||
|
||||
// Add an Expires header to the static content
|
||||
if opt.Expires != nil {
|
||||
res.Header().Set("Expires", opt.Expires())
|
||||
}
|
||||
|
||||
http.ServeContent(res, req, file, fi.ModTime(), f)
|
||||
}
|
||||
}
|
||||
200
Godeps/_workspace/src/github.com/codegangsta/martini/static_test.go
generated
vendored
200
Godeps/_workspace/src/github.com/codegangsta/martini/static_test.go
generated
vendored
@@ -1,200 +0,0 @@
|
||||
package martini
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/codegangsta/inject"
|
||||
)
|
||||
|
||||
func Test_Static(t *testing.T) {
|
||||
response := httptest.NewRecorder()
|
||||
response.Body = new(bytes.Buffer)
|
||||
|
||||
m := New()
|
||||
r := NewRouter()
|
||||
|
||||
m.Use(Static("."))
|
||||
m.Action(r.Handle)
|
||||
|
||||
req, err := http.NewRequest("GET", "http://localhost:3000/martini.go", nil)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
m.ServeHTTP(response, req)
|
||||
expect(t, response.Code, http.StatusOK)
|
||||
expect(t, response.Header().Get("Expires"), "")
|
||||
if response.Body.Len() == 0 {
|
||||
t.Errorf("Got empty body for GET request")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_Static_Head(t *testing.T) {
|
||||
response := httptest.NewRecorder()
|
||||
response.Body = new(bytes.Buffer)
|
||||
|
||||
m := New()
|
||||
r := NewRouter()
|
||||
|
||||
m.Use(Static("."))
|
||||
m.Action(r.Handle)
|
||||
|
||||
req, err := http.NewRequest("HEAD", "http://localhost:3000/martini.go", nil)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
m.ServeHTTP(response, req)
|
||||
expect(t, response.Code, http.StatusOK)
|
||||
if response.Body.Len() != 0 {
|
||||
t.Errorf("Got non-empty body for HEAD request")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_Static_As_Post(t *testing.T) {
|
||||
response := httptest.NewRecorder()
|
||||
|
||||
m := New()
|
||||
r := NewRouter()
|
||||
|
||||
m.Use(Static("."))
|
||||
m.Action(r.Handle)
|
||||
|
||||
req, err := http.NewRequest("POST", "http://localhost:3000/martini.go", nil)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
m.ServeHTTP(response, req)
|
||||
expect(t, response.Code, http.StatusNotFound)
|
||||
}
|
||||
|
||||
func Test_Static_BadDir(t *testing.T) {
|
||||
response := httptest.NewRecorder()
|
||||
|
||||
m := Classic()
|
||||
|
||||
req, err := http.NewRequest("GET", "http://localhost:3000/martini.go", nil)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
m.ServeHTTP(response, req)
|
||||
refute(t, response.Code, http.StatusOK)
|
||||
}
|
||||
|
||||
func Test_Static_Options_Logging(t *testing.T) {
|
||||
response := httptest.NewRecorder()
|
||||
|
||||
var buffer bytes.Buffer
|
||||
m := &Martini{Injector: inject.New(), action: func() {}, logger: log.New(&buffer, "[martini] ", 0)}
|
||||
m.Map(m.logger)
|
||||
m.Map(defaultReturnHandler())
|
||||
|
||||
opt := StaticOptions{}
|
||||
m.Use(Static(".", opt))
|
||||
|
||||
req, err := http.NewRequest("GET", "http://localhost:3000/martini.go", nil)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
m.ServeHTTP(response, req)
|
||||
expect(t, response.Code, http.StatusOK)
|
||||
expect(t, buffer.String(), "[martini] [Static] Serving /martini.go\n")
|
||||
|
||||
// Now without logging
|
||||
m.Handlers()
|
||||
buffer.Reset()
|
||||
|
||||
// This should disable logging
|
||||
opt.SkipLogging = true
|
||||
m.Use(Static(".", opt))
|
||||
|
||||
m.ServeHTTP(response, req)
|
||||
expect(t, response.Code, http.StatusOK)
|
||||
expect(t, buffer.String(), "")
|
||||
}
|
||||
|
||||
func Test_Static_Options_ServeIndex(t *testing.T) {
|
||||
response := httptest.NewRecorder()
|
||||
|
||||
var buffer bytes.Buffer
|
||||
m := &Martini{Injector: inject.New(), action: func() {}, logger: log.New(&buffer, "[martini] ", 0)}
|
||||
m.Map(m.logger)
|
||||
m.Map(defaultReturnHandler())
|
||||
|
||||
opt := StaticOptions{IndexFile: "martini.go"} // Define martini.go as index file
|
||||
m.Use(Static(".", opt))
|
||||
|
||||
req, err := http.NewRequest("GET", "http://localhost:3000/", nil)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
m.ServeHTTP(response, req)
|
||||
expect(t, response.Code, http.StatusOK)
|
||||
expect(t, buffer.String(), "[martini] [Static] Serving /martini.go\n")
|
||||
}
|
||||
|
||||
func Test_Static_Options_Prefix(t *testing.T) {
|
||||
response := httptest.NewRecorder()
|
||||
|
||||
var buffer bytes.Buffer
|
||||
m := &Martini{Injector: inject.New(), action: func() {}, logger: log.New(&buffer, "[martini] ", 0)}
|
||||
m.Map(m.logger)
|
||||
m.Map(defaultReturnHandler())
|
||||
|
||||
// Serve current directory under /public
|
||||
m.Use(Static(".", StaticOptions{Prefix: "/public"}))
|
||||
|
||||
// Check file content behaviour
|
||||
req, err := http.NewRequest("GET", "http://localhost:3000/public/martini.go", nil)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
m.ServeHTTP(response, req)
|
||||
expect(t, response.Code, http.StatusOK)
|
||||
expect(t, buffer.String(), "[martini] [Static] Serving /martini.go\n")
|
||||
}
|
||||
|
||||
func Test_Static_Options_Expires(t *testing.T) {
|
||||
response := httptest.NewRecorder()
|
||||
|
||||
var buffer bytes.Buffer
|
||||
m := &Martini{Injector: inject.New(), action: func() {}, logger: log.New(&buffer, "[martini] ", 0)}
|
||||
m.Map(m.logger)
|
||||
m.Map(defaultReturnHandler())
|
||||
|
||||
// Serve current directory under /public
|
||||
m.Use(Static(".", StaticOptions{Expires: func() string { return "46" }}))
|
||||
|
||||
// Check file content behaviour
|
||||
req, err := http.NewRequest("GET", "http://localhost:3000/martini.go", nil)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
m.ServeHTTP(response, req)
|
||||
expect(t, response.Header().Get("Expires"), "46")
|
||||
}
|
||||
|
||||
func Test_Static_Redirect(t *testing.T) {
|
||||
response := httptest.NewRecorder()
|
||||
|
||||
m := New()
|
||||
m.Use(Static(".", StaticOptions{Prefix: "/public"}))
|
||||
|
||||
req, err := http.NewRequest("GET", "http://localhost:3000/public", nil)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
m.ServeHTTP(response, req)
|
||||
expect(t, response.Code, http.StatusFound)
|
||||
expect(t, response.Header().Get("Location"), "/public/")
|
||||
}
|
||||
311
Godeps/_workspace/src/github.com/codegangsta/martini/translations/README_zh_cn.md
generated
vendored
311
Godeps/_workspace/src/github.com/codegangsta/martini/translations/README_zh_cn.md
generated
vendored
@@ -1,311 +0,0 @@
|
||||
# Martini [](https://app.wercker.com/project/bykey/174bef7e3c999e103cacfe2770102266) [](http://godoc.org/github.com/codegangsta/martini)
|
||||
|
||||
Martini是一个强大为了编写模块化Web应用而生的GO语言框架.
|
||||
|
||||
## 第一个应用
|
||||
|
||||
在你安装了GO语言和设置了你的[GOPATH](http://golang.org/doc/code.html#GOPATH)之后, 创建你的自己的`.go`文件, 这里我们假设它的名字叫做 `server.go`.
|
||||
|
||||
~~~ go
|
||||
package main
|
||||
|
||||
import "github.com/codegangsta/martini"
|
||||
|
||||
func main() {
|
||||
m := martini.Classic()
|
||||
m.Get("/", func() string {
|
||||
return "Hello world!"
|
||||
})
|
||||
m.Run()
|
||||
}
|
||||
~~~
|
||||
|
||||
然后安装Martini的包. (注意Martini需要Go语言1.1或者以上的版本支持):
|
||||
~~~
|
||||
go get github.com/codegangsta/martini
|
||||
~~~
|
||||
|
||||
最后运行你的服务:
|
||||
~~~
|
||||
go run server.go
|
||||
~~~
|
||||
|
||||
这时你将会有一个Martini的服务监听了, 地址是: `localhost:3000`.
|
||||
|
||||
## 获得帮助
|
||||
|
||||
请加入: [邮件列表](https://groups.google.com/forum/#!forum/martini-go)
|
||||
|
||||
或者可以查看在线演示地址: [演示视频](http://martini.codegangsta.io/#demo)
|
||||
|
||||
## 功能列表
|
||||
* 使用极其简单.
|
||||
* 无侵入式的设计.
|
||||
* 很好的与其他的Go语言包协同使用.
|
||||
* 超赞的路径匹配和路由.
|
||||
* 模块化的设计 - 容易插入功能件,也容易将其拔出来.
|
||||
* 已有很多的中间件可以直接使用.
|
||||
* 框架内已拥有很好的开箱即用的功能支持.
|
||||
* **完全兼容[http.HandlerFunc](http://godoc.org/net/http#HandlerFunc)接口.**
|
||||
|
||||
## 更多中间件
|
||||
更多的中间件和功能组件, 请查看代码仓库: [martini-contrib](https://github.com/martini-contrib).
|
||||
|
||||
## 目录
|
||||
* [核心 Martini](#classic-martini)
|
||||
* [处理器](#handlers)
|
||||
* [路由](#routing)
|
||||
* [服务](#services)
|
||||
* [服务静态文件](#serving-static-files)
|
||||
* [中间件处理器](#middleware-handlers)
|
||||
* [Next()](#next)
|
||||
* [常见问答](#faq)
|
||||
|
||||
## 核心 Martini
|
||||
为了更快速的启用Martini, [martini.Classic()](http://godoc.org/github.com/codegangsta/martini#Classic) 提供了一些默认的方便Web开发的工具:
|
||||
~~~ go
|
||||
m := martini.Classic()
|
||||
// ... middleware and routing goes here
|
||||
m.Run()
|
||||
~~~
|
||||
|
||||
下面是Martini核心已经包含的功能 [martini.Classic()](http://godoc.org/github.com/codegangsta/martini#Classic):
|
||||
* Request/Response Logging (请求/相应日志) - [martini.Logger](http://godoc.org/github.com/codegangsta/martini#Logger)
|
||||
* Panic Recovery (容错) - [martini.Recovery](http://godoc.org/github.com/codegangsta/martini#Recovery)
|
||||
* Static File serving (静态文件服务) - [martini.Static](http://godoc.org/github.com/codegangsta/martini#Static)
|
||||
* Routing (路由) - [martini.Router](http://godoc.org/github.com/codegangsta/martini#Router)
|
||||
|
||||
### 处理器
|
||||
处理器是Martini的灵魂和核心所在. 一个处理器基本上可以是任何的函数:
|
||||
~~~ go
|
||||
m.Get("/", func() {
|
||||
println("hello world")
|
||||
})
|
||||
~~~
|
||||
|
||||
#### 返回值
|
||||
当一个处理器返回结果的时候, Martini将会把返回值作为字符串写入到当前的[http.ResponseWriter](http://godoc.org/net/http#ResponseWriter)里面:
|
||||
~~~ go
|
||||
m.Get("/", func() string {
|
||||
return "hello world" // HTTP 200 : "hello world"
|
||||
})
|
||||
~~~
|
||||
|
||||
另外你也可以选择性的返回多一个状态码:
|
||||
~~~ go
|
||||
m.Get("/", func() (int, string) {
|
||||
return 418, "i'm a teapot" // HTTP 418 : "i'm a teapot"
|
||||
})
|
||||
~~~
|
||||
|
||||
#### 服务的注入
|
||||
处理器是通过反射来调用的. Martini 通过*Dependency Injection* *(依赖注入)* 来为处理器注入参数列表. **这样使得Martini与Go语言的`http.HandlerFunc`接口完全兼容.**
|
||||
|
||||
如果你加入一个参数到你的处理器, Martini将会搜索它参数列表中的服务,并且通过类型判断来解决依赖关系:
|
||||
~~~ go
|
||||
m.Get("/", func(res http.ResponseWriter, req *http.Request) { // res 和 req 是通过Martini注入的
|
||||
res.WriteHeader(200) // HTTP 200
|
||||
})
|
||||
~~~
|
||||
|
||||
下面的这些服务已经被包含在核心Martini中: [martini.Classic()](http://godoc.org/github.com/codegangsta/martini#Classic):
|
||||
* [*log.Logger](http://godoc.org/log#Logger) - Martini的全局日志.
|
||||
* [martini.Context](http://godoc.org/github.com/codegangsta/martini#Context) - http request context (请求上下文).
|
||||
* [martini.Params](http://godoc.org/github.com/codegangsta/martini#Params) - `map[string]string` of named params found by route matching. (名字和参数键值对的参数列表)
|
||||
* [martini.Routes](http://godoc.org/github.com/codegangsta/martini#Routes) - Route helper service. (路由协助处理)
|
||||
* [http.ResponseWriter](http://godoc.org/net/http/#ResponseWriter) - http Response writer interface. (响应结果的流接口)
|
||||
* [*http.Request](http://godoc.org/net/http/#Request) - http Request. (http请求)
|
||||
|
||||
### 路由
|
||||
在Martini中, 路由是一个HTTP方法配对一个URL匹配模型. 每一个路由可以对应一个或多个处理器方法:
|
||||
~~~ go
|
||||
m.Get("/", func() {
|
||||
// 显示
|
||||
})
|
||||
|
||||
m.Patch("/", func() {
|
||||
// 更新
|
||||
})
|
||||
|
||||
m.Post("/", func() {
|
||||
// 创建
|
||||
})
|
||||
|
||||
m.Put("/", func() {
|
||||
// 替换
|
||||
})
|
||||
|
||||
m.Delete("/", func() {
|
||||
// 删除
|
||||
})
|
||||
|
||||
m.Options("/", func() {
|
||||
// http 选项
|
||||
})
|
||||
|
||||
m.NotFound(func() {
|
||||
// 处理 404
|
||||
})
|
||||
~~~
|
||||
|
||||
路由匹配的顺序是按照他们被定义的顺序执行的. 最先被定义的路由将会首先被用户请求匹配并调用.
|
||||
|
||||
路由模型可能包含参数列表, 可以通过[martini.Params](http://godoc.org/github.com/codegangsta/martini#Params)服务来获取:
|
||||
~~~ go
|
||||
m.Get("/hello/:name", func(params martini.Params) string {
|
||||
return "Hello " + params["name"]
|
||||
})
|
||||
~~~
|
||||
|
||||
路由匹配可以通过正则表达式或者glob的形式:
|
||||
~~~ go
|
||||
m.Get("/hello/**", func(params martini.Params) string {
|
||||
return "Hello " + params["_1"]
|
||||
})
|
||||
~~~
|
||||
|
||||
路由处理器可以被相互叠加使用, 例如很有用的地方可以是在验证和授权的时候:
|
||||
~~~ go
|
||||
m.Get("/secret", authorize, func() {
|
||||
// 该方法将会在authorize方法没有输出结果的时候执行.
|
||||
})
|
||||
~~~
|
||||
|
||||
### 服务
|
||||
服务即是被注入到处理器中的参数. 你可以映射一个服务到 *全局* 或者 *请求* 的级别.
|
||||
|
||||
|
||||
#### 全局映射
|
||||
如果一个Martini实现了inject.Injector的接口, 那么映射成为一个服务就非常简单:
|
||||
~~~ go
|
||||
db := &MyDatabase{}
|
||||
m := martini.Classic()
|
||||
m.Map(db) // *MyDatabase 这个服务将可以在所有的处理器中被使用到.
|
||||
// ...
|
||||
m.Run()
|
||||
~~~
|
||||
|
||||
#### 请求级别的映射
|
||||
映射在请求级别的服务可以用[martini.Context](http://godoc.org/github.com/codegangsta/martini#Context)来完成:
|
||||
~~~ go
|
||||
func MyCustomLoggerHandler(c martini.Context, req *http.Request) {
|
||||
logger := &MyCustomLogger{req}
|
||||
c.Map(logger) // 映射成为了 *MyCustomLogger
|
||||
}
|
||||
~~~
|
||||
|
||||
#### 映射值到接口
|
||||
关于服务最强悍的地方之一就是它能够映射服务到接口. 例如说, 假设你想要覆盖[http.ResponseWriter](http://godoc.org/net/http#ResponseWriter)成为一个对象, 那么你可以封装它并包含你自己的额外操作, 你可以如下这样来编写你的处理器:
|
||||
~~~ go
|
||||
func WrapResponseWriter(res http.ResponseWriter, c martini.Context) {
|
||||
rw := NewSpecialResponseWriter(res)
|
||||
c.MapTo(rw, (*http.ResponseWriter)(nil)) // 覆盖 ResponseWriter 成为我们封装过的 ResponseWriter
|
||||
}
|
||||
~~~
|
||||
|
||||
### 服务静态文件
|
||||
[martini.Classic()](http://godoc.org/github.com/codegangsta/martini#Classic) 默认会服务位于你服务器环境根目录下的"public"文件夹.
|
||||
你可以通过加入[martini.Static](http://godoc.org/github.com/codegangsta/martini#Static)的处理器来加入更多的静态文件服务的文件夹.
|
||||
~~~ go
|
||||
m.Use(martini.Static("assets")) // 也会服务静态文件于"assets"的文件夹
|
||||
~~~
|
||||
|
||||
## 中间件处理器
|
||||
中间件处理器是工作于请求和路由之间的. 本质上来说和Martini其他的处理器没有分别. 你可以像如下这样添加一个中间件处理器到它的堆中:
|
||||
~~~ go
|
||||
m.Use(func() {
|
||||
// 做一些中间件该做的事情
|
||||
})
|
||||
~~~
|
||||
|
||||
你可以通过`Handlers`函数对中间件堆有完全的控制. 它将会替换掉之前的任何设置过的处理器:
|
||||
~~~ go
|
||||
m.Handlers(
|
||||
Middleware1,
|
||||
Middleware2,
|
||||
Middleware3,
|
||||
)
|
||||
~~~
|
||||
|
||||
中间件处理器可以非常好处理一些功能,像logging(日志), authorization(授权), authentication(认证), sessions(会话), error pages(错误页面), 以及任何其他的操作需要在http请求发生之前或者之后的:
|
||||
|
||||
~~~ go
|
||||
// 验证api密匙
|
||||
m.Use(func(res http.ResponseWriter, req *http.Request) {
|
||||
if req.Header.Get("X-API-KEY") != "secret123" {
|
||||
res.WriteHeader(http.StatusUnauthorized)
|
||||
}
|
||||
})
|
||||
~~~
|
||||
|
||||
### Next()
|
||||
[Context.Next()](http://godoc.org/github.com/codegangsta/martini#Context)是一个可选的函数用于中间件处理器暂时放弃执行直到其他的处理器都执行完毕. 这样就可以很好的处理在http请求完成后需要做的操作.
|
||||
~~~ go
|
||||
// log 记录请求完成前后 (*译者注: 很巧妙,掌声鼓励.)
|
||||
m.Use(func(c martini.Context, log *log.Logger){
|
||||
log.Println("before a request")
|
||||
|
||||
c.Next()
|
||||
|
||||
log.Println("after a request")
|
||||
})
|
||||
~~~
|
||||
|
||||
## 常见问答
|
||||
|
||||
### 我在哪里可以找到中间件资源?
|
||||
|
||||
可以查看 [martini-contrib](https://github.com/martini-contrib) 项目. 如果看了觉得没有什么好货色, 可以联系martini-contrib的团队成员为你创建一个新的代码资源库.
|
||||
|
||||
* [auth](https://github.com/martini-contrib/auth) - 认证处理器.
|
||||
* [binding](https://github.com/martini-contrib/binding) - 映射/验证raw请求到结构体(structure)里的处理器
|
||||
* [gzip](https://github.com/martini-contrib/gzip) - 加入giz支持的处理器
|
||||
* [render](https://github.com/martini-contrib/render) - 渲染JSON和HTML模板的处理器.
|
||||
* [acceptlang](https://github.com/martini-contrib/acceptlang) - 解析`Accept-Language` HTTP报头的处理器.
|
||||
* [sessions](https://github.com/martini-contrib/sessions) - 提供会话服务支持的处理器.
|
||||
* [strip](https://github.com/martini-contrib/strip) - URL Prefix stripping.
|
||||
* [method](https://github.com/martini-contrib/method) - HTTP method overriding via Header or form fields.
|
||||
* [secure](https://github.com/martini-contrib/secure) - Implements a few quick security wins.
|
||||
* [encoder](https://github.com/martini-contrib/encoder) - Encoder service for rendering data in several formats and content negotiation.
|
||||
|
||||
### 我如何整合到我现有的服务器中?
|
||||
|
||||
由于Martini实现了 `http.Handler`, 所以它可以很简单的应用到现有Go服务器的子集中. 例如说这是一段在Google App Engine中的示例:
|
||||
|
||||
~~~ go
|
||||
package hello
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"github.com/codegangsta/martini"
|
||||
)
|
||||
|
||||
func init() {
|
||||
m := martini.Classic()
|
||||
m.Get("/", func() string {
|
||||
return "Hello world!"
|
||||
})
|
||||
http.Handle("/", m)
|
||||
}
|
||||
~~~
|
||||
|
||||
### 我如何修改port/host?
|
||||
|
||||
Martini的`Run`函数会检查PORT和HOST的环境变量并使用它们. 否则Martini将会默认使用localhost:3000
|
||||
如果想要自定义PORT和HOST, 使用`http.ListenAndServe`函数来代替.
|
||||
|
||||
~~~ go
|
||||
m := martini.Classic()
|
||||
// ...
|
||||
http.ListenAndServe(":8080", m)
|
||||
~~~
|
||||
|
||||
## 贡献
|
||||
Martini项目想要保持简单且干净的代码. 大部分的代码应该贡献到[martini-contrib](https://github.com/martini-contrib)组织中作为一个项目. 如果你想要贡献Martini的核心代码也可以发起一个Pull Request.
|
||||
|
||||
## 关于
|
||||
|
||||
灵感来自于 [express](https://github.com/visionmedia/express) 和 [sinatra](https://github.com/sinatra/sinatra)
|
||||
|
||||
Martini作者 [Code Gangsta](http://codegangsta.io/)
|
||||
译者: [Leon](http://github.com/leonli)
|
||||
1
Godeps/_workspace/src/github.com/codegangsta/martini/wercker.yml
generated
vendored
1
Godeps/_workspace/src/github.com/codegangsta/martini/wercker.yml
generated
vendored
@@ -1 +0,0 @@
|
||||
box: wercker/golang@1.1.1
|
||||
185
Godeps/_workspace/src/github.com/juju/ratelimit/LICENSE
generated
vendored
Normal file
185
Godeps/_workspace/src/github.com/juju/ratelimit/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,185 @@
|
||||
This software is licensed under the LGPLv3, included below.
|
||||
|
||||
As a special exception to the GNU Lesser General Public License version 3
|
||||
("LGPL3"), the copyright holders of this Library give you permission to
|
||||
convey to a third party a Combined Work that links statically or dynamically
|
||||
to this Library without providing any Minimal Corresponding Source or
|
||||
Minimal Application Code as set out in 4d or providing the installation
|
||||
information set out in section 4e, provided that you comply with the other
|
||||
provisions of LGPL3 and provided that you meet, for the Application the
|
||||
terms and conditions of the license(s) which apply to the Application.
|
||||
|
||||
Except as stated in this special exception, the provisions of LGPL3 will
|
||||
continue to comply in full to this Library. If you modify this Library, you
|
||||
may apply this exception to your version of this Library, but you are not
|
||||
obliged to do so. If you do not wish to do so, delete this exception
|
||||
statement from your version. This exception does not (and cannot) modify any
|
||||
license terms which apply to the Application, with which you must still
|
||||
comply.
|
||||
|
||||
|
||||
GNU LESSER GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
|
||||
This version of the GNU Lesser General Public License incorporates
|
||||
the terms and conditions of version 3 of the GNU General Public
|
||||
License, supplemented by the additional permissions listed below.
|
||||
|
||||
0. Additional Definitions.
|
||||
|
||||
As used herein, "this License" refers to version 3 of the GNU Lesser
|
||||
General Public License, and the "GNU GPL" refers to version 3 of the GNU
|
||||
General Public License.
|
||||
|
||||
"The Library" refers to a covered work governed by this License,
|
||||
other than an Application or a Combined Work as defined below.
|
||||
|
||||
An "Application" is any work that makes use of an interface provided
|
||||
by the Library, but which is not otherwise based on the Library.
|
||||
Defining a subclass of a class defined by the Library is deemed a mode
|
||||
of using an interface provided by the Library.
|
||||
|
||||
A "Combined Work" is a work produced by combining or linking an
|
||||
Application with the Library. The particular version of the Library
|
||||
with which the Combined Work was made is also called the "Linked
|
||||
Version".
|
||||
|
||||
The "Minimal Corresponding Source" for a Combined Work means the
|
||||
Corresponding Source for the Combined Work, excluding any source code
|
||||
for portions of the Combined Work that, considered in isolation, are
|
||||
based on the Application, and not on the Linked Version.
|
||||
|
||||
The "Corresponding Application Code" for a Combined Work means the
|
||||
object code and/or source code for the Application, including any data
|
||||
and utility programs needed for reproducing the Combined Work from the
|
||||
Application, but excluding the System Libraries of the Combined Work.
|
||||
|
||||
1. Exception to Section 3 of the GNU GPL.
|
||||
|
||||
You may convey a covered work under sections 3 and 4 of this License
|
||||
without being bound by section 3 of the GNU GPL.
|
||||
|
||||
2. Conveying Modified Versions.
|
||||
|
||||
If you modify a copy of the Library, and, in your modifications, a
|
||||
facility refers to a function or data to be supplied by an Application
|
||||
that uses the facility (other than as an argument passed when the
|
||||
facility is invoked), then you may convey a copy of the modified
|
||||
version:
|
||||
|
||||
a) under this License, provided that you make a good faith effort to
|
||||
ensure that, in the event an Application does not supply the
|
||||
function or data, the facility still operates, and performs
|
||||
whatever part of its purpose remains meaningful, or
|
||||
|
||||
b) under the GNU GPL, with none of the additional permissions of
|
||||
this License applicable to that copy.
|
||||
|
||||
3. Object Code Incorporating Material from Library Header Files.
|
||||
|
||||
The object code form of an Application may incorporate material from
|
||||
a header file that is part of the Library. You may convey such object
|
||||
code under terms of your choice, provided that, if the incorporated
|
||||
material is not limited to numerical parameters, data structure
|
||||
layouts and accessors, or small macros, inline functions and templates
|
||||
(ten or fewer lines in length), you do both of the following:
|
||||
|
||||
a) Give prominent notice with each copy of the object code that the
|
||||
Library is used in it and that the Library and its use are
|
||||
covered by this License.
|
||||
|
||||
b) Accompany the object code with a copy of the GNU GPL and this license
|
||||
document.
|
||||
|
||||
4. Combined Works.
|
||||
|
||||
You may convey a Combined Work under terms of your choice that,
|
||||
taken together, effectively do not restrict modification of the
|
||||
portions of the Library contained in the Combined Work and reverse
|
||||
engineering for debugging such modifications, if you also do each of
|
||||
the following:
|
||||
|
||||
a) Give prominent notice with each copy of the Combined Work that
|
||||
the Library is used in it and that the Library and its use are
|
||||
covered by this License.
|
||||
|
||||
b) Accompany the Combined Work with a copy of the GNU GPL and this license
|
||||
document.
|
||||
|
||||
c) For a Combined Work that displays copyright notices during
|
||||
execution, include the copyright notice for the Library among
|
||||
these notices, as well as a reference directing the user to the
|
||||
copies of the GNU GPL and this license document.
|
||||
|
||||
d) Do one of the following:
|
||||
|
||||
0) Convey the Minimal Corresponding Source under the terms of this
|
||||
License, and the Corresponding Application Code in a form
|
||||
suitable for, and under terms that permit, the user to
|
||||
recombine or relink the Application with a modified version of
|
||||
the Linked Version to produce a modified Combined Work, in the
|
||||
manner specified by section 6 of the GNU GPL for conveying
|
||||
Corresponding Source.
|
||||
|
||||
1) Use a suitable shared library mechanism for linking with the
|
||||
Library. A suitable mechanism is one that (a) uses at run time
|
||||
a copy of the Library already present on the user's computer
|
||||
system, and (b) will operate properly with a modified version
|
||||
of the Library that is interface-compatible with the Linked
|
||||
Version.
|
||||
|
||||
e) Provide Installation Information, but only if you would otherwise
|
||||
be required to provide such information under section 6 of the
|
||||
GNU GPL, and only to the extent that such information is
|
||||
necessary to install and execute a modified version of the
|
||||
Combined Work produced by recombining or relinking the
|
||||
Application with a modified version of the Linked Version. (If
|
||||
you use option 4d0, the Installation Information must accompany
|
||||
the Minimal Corresponding Source and Corresponding Application
|
||||
Code. If you use option 4d1, you must provide the Installation
|
||||
Information in the manner specified by section 6 of the GNU GPL
|
||||
for conveying Corresponding Source.)
|
||||
|
||||
5. Combined Libraries.
|
||||
|
||||
You may place library facilities that are a work based on the
|
||||
Library side by side in a single library together with other library
|
||||
facilities that are not Applications and are not covered by this
|
||||
License, and convey such a combined library under terms of your
|
||||
choice, if you do both of the following:
|
||||
|
||||
a) Accompany the combined library with a copy of the same work based
|
||||
on the Library, uncombined with any other library facilities,
|
||||
conveyed under the terms of this License.
|
||||
|
||||
b) Give prominent notice with the combined library that part of it
|
||||
is a work based on the Library, and explaining where to find the
|
||||
accompanying uncombined form of the same work.
|
||||
|
||||
6. Revised Versions of the GNU Lesser General Public License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions
|
||||
of the GNU Lesser General Public License from time to time. Such new
|
||||
versions will be similar in spirit to the present version, but may
|
||||
differ in detail to address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Library as you received it specifies that a certain numbered version
|
||||
of the GNU Lesser General Public License "or any later version"
|
||||
applies to it, you have the option of following the terms and
|
||||
conditions either of that published version or of any later version
|
||||
published by the Free Software Foundation. If the Library as you
|
||||
received it does not specify a version number of the GNU Lesser
|
||||
General Public License, you may choose any version of the GNU Lesser
|
||||
General Public License ever published by the Free Software Foundation.
|
||||
|
||||
If the Library as you received it specifies that a proxy can decide
|
||||
whether future versions of the GNU Lesser General Public License shall
|
||||
apply, that proxy's public statement of acceptance of any version is
|
||||
permanent authorization for you to choose that version for the
|
||||
Library.
|
||||
117
Godeps/_workspace/src/github.com/juju/ratelimit/README.md
generated
vendored
Normal file
117
Godeps/_workspace/src/github.com/juju/ratelimit/README.md
generated
vendored
Normal file
@@ -0,0 +1,117 @@
|
||||
# ratelimit
|
||||
--
|
||||
import "github.com/juju/ratelimit"
|
||||
|
||||
The ratelimit package provides an efficient token bucket implementation. See
|
||||
http://en.wikipedia.org/wiki/Token_bucket.
|
||||
|
||||
## Usage
|
||||
|
||||
#### func Reader
|
||||
|
||||
```go
|
||||
func Reader(r io.Reader, bucket *Bucket) io.Reader
|
||||
```
|
||||
Reader returns a reader that is rate limited by the given token bucket. Each
|
||||
token in the bucket represents one byte.
|
||||
|
||||
#### func Writer
|
||||
|
||||
```go
|
||||
func Writer(w io.Writer, bucket *Bucket) io.Writer
|
||||
```
|
||||
Writer returns a reader that is rate limited by the given token bucket. Each
|
||||
token in the bucket represents one byte.
|
||||
|
||||
#### type Bucket
|
||||
|
||||
```go
|
||||
type Bucket struct {
|
||||
}
|
||||
```
|
||||
|
||||
Bucket represents a token bucket that fills at a predetermined rate. Methods on
|
||||
Bucket may be called concurrently.
|
||||
|
||||
#### func NewBucket
|
||||
|
||||
```go
|
||||
func NewBucket(fillInterval time.Duration, capacity int64) *Bucket
|
||||
```
|
||||
NewBucket returns a new token bucket that fills at the rate of one token every
|
||||
fillInterval, up to the given maximum capacity. Both arguments must be positive.
|
||||
The bucket is initially full.
|
||||
|
||||
#### func NewBucketWithQuantum
|
||||
|
||||
```go
|
||||
func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *Bucket
|
||||
```
|
||||
NewBucketWithQuantum is similar to NewBucket, but allows the specification of
|
||||
the quantum size - quantum tokens are added every fillInterval.
|
||||
|
||||
#### func NewBucketWithRate
|
||||
|
||||
```go
|
||||
func NewBucketWithRate(rate float64, capacity int64) *Bucket
|
||||
```
|
||||
NewBucketWithRate returns a token bucket that fills the bucket at the rate of
|
||||
rate tokens per second up to the given maximum capacity. Because of limited
|
||||
clock resolution, at high rates, the actual rate may be up to 1% different from
|
||||
the specified rate.
|
||||
|
||||
#### func (*Bucket) Rate
|
||||
|
||||
```go
|
||||
func (tb *Bucket) Rate() float64
|
||||
```
|
||||
Rate returns the fill rate of the bucket, in tokens per second.
|
||||
|
||||
#### func (*Bucket) Take
|
||||
|
||||
```go
|
||||
func (tb *Bucket) Take(count int64) time.Duration
|
||||
```
|
||||
Take takes count tokens from the bucket without blocking. It returns the time
|
||||
that the caller should wait until the tokens are actually available.
|
||||
|
||||
Note that if the request is irrevocable - there is no way to return tokens to
|
||||
the bucket once this method commits us to taking them.
|
||||
|
||||
#### func (*Bucket) TakeAvailable
|
||||
|
||||
```go
|
||||
func (tb *Bucket) TakeAvailable(count int64) int64
|
||||
```
|
||||
TakeAvailable takes up to count immediately available tokens from the bucket. It
|
||||
returns the number of tokens removed, or zero if there are no available tokens.
|
||||
It does not block.
|
||||
|
||||
#### func (*Bucket) TakeMaxDuration
|
||||
|
||||
```go
|
||||
func (tb *Bucket) TakeMaxDuration(count int64, maxWait time.Duration) (time.Duration, bool)
|
||||
```
|
||||
TakeMaxDuration is like Take, except that it will only take tokens from the
|
||||
bucket if the wait time for the tokens is no greater than maxWait.
|
||||
|
||||
If it would take longer than maxWait for the tokens to become available, it does
|
||||
nothing and reports false, otherwise it returns the time that the caller should
|
||||
wait until the tokens are actually available, and reports true.
|
||||
|
||||
#### func (*Bucket) Wait
|
||||
|
||||
```go
|
||||
func (tb *Bucket) Wait(count int64)
|
||||
```
|
||||
Wait takes count tokens from the bucket, waiting until they are available.
|
||||
|
||||
#### func (*Bucket) WaitMaxDuration
|
||||
|
||||
```go
|
||||
func (tb *Bucket) WaitMaxDuration(count int64, maxWait time.Duration) bool
|
||||
```
|
||||
WaitMaxDuration is like Wait except that it will only take tokens from the
|
||||
bucket if it needs to wait for no greater than maxWait. It reports whether any
|
||||
tokens have been removed from the bucket If no tokens have been removed, it
|
||||
returns immediately.
|
||||
226
Godeps/_workspace/src/github.com/juju/ratelimit/ratelimit.go
generated
vendored
Normal file
226
Godeps/_workspace/src/github.com/juju/ratelimit/ratelimit.go
generated
vendored
Normal file
@@ -0,0 +1,226 @@
|
||||
// Copyright 2014 Canonical Ltd.
|
||||
// Licensed under the LGPLv3 with static-linking exception.
|
||||
// See LICENCE file for details.
|
||||
|
||||
// The ratelimit package provides an efficient token bucket implementation.
|
||||
// See http://en.wikipedia.org/wiki/Token_bucket.
|
||||
package ratelimit
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Bucket represents a token bucket that fills at a predetermined rate.
|
||||
// Methods on Bucket may be called concurrently.
|
||||
type Bucket struct {
|
||||
startTime time.Time
|
||||
capacity int64
|
||||
quantum int64
|
||||
fillInterval time.Duration
|
||||
|
||||
// The mutex guards the fields following it.
|
||||
mu sync.Mutex
|
||||
|
||||
// avail holds the number of available tokens
|
||||
// in the bucket, as of availTick ticks from startTime.
|
||||
// It will be negative when there are consumers
|
||||
// waiting for tokens.
|
||||
avail int64
|
||||
availTick int64
|
||||
}
|
||||
|
||||
// NewBucket returns a new token bucket that fills at the
|
||||
// rate of one token every fillInterval, up to the given
|
||||
// maximum capacity. Both arguments must be
|
||||
// positive. The bucket is initially full.
|
||||
func NewBucket(fillInterval time.Duration, capacity int64) *Bucket {
|
||||
return NewBucketWithQuantum(fillInterval, capacity, 1)
|
||||
}
|
||||
|
||||
// rateMargin specifes the allowed variance of actual
|
||||
// rate from specified rate. 1% seems reasonable.
|
||||
const rateMargin = 0.01
|
||||
|
||||
// NewBucketWithRate returns a token bucket that fills the bucket
|
||||
// at the rate of rate tokens per second up to the given
|
||||
// maximum capacity. Because of limited clock resolution,
|
||||
// at high rates, the actual rate may be up to 1% different from the
|
||||
// specified rate.
|
||||
func NewBucketWithRate(rate float64, capacity int64) *Bucket {
|
||||
for quantum := int64(1); quantum < 1<<50; quantum = nextQuantum(quantum) {
|
||||
fillInterval := time.Duration(1e9 * float64(quantum) / rate)
|
||||
if fillInterval <= 0 {
|
||||
continue
|
||||
}
|
||||
tb := NewBucketWithQuantum(fillInterval, capacity, quantum)
|
||||
if diff := abs(tb.Rate() - rate); diff/rate <= rateMargin {
|
||||
return tb
|
||||
}
|
||||
}
|
||||
panic("cannot find suitable quantum for " + strconv.FormatFloat(rate, 'g', -1, 64))
|
||||
}
|
||||
|
||||
// nextQuantum returns the next quantum to try after q.
|
||||
// We grow the quantum exponentially, but slowly, so we
|
||||
// get a good fit in the lower numbers.
|
||||
func nextQuantum(q int64) int64 {
|
||||
q1 := q * 11 / 10
|
||||
if q1 == q {
|
||||
q1++
|
||||
}
|
||||
return q1
|
||||
}
|
||||
|
||||
// NewBucketWithQuantum is similar to NewBucket, but allows
|
||||
// the specification of the quantum size - quantum tokens
|
||||
// are added every fillInterval.
|
||||
func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *Bucket {
|
||||
if fillInterval <= 0 {
|
||||
panic("token bucket fill interval is not > 0")
|
||||
}
|
||||
if capacity <= 0 {
|
||||
panic("token bucket capacity is not > 0")
|
||||
}
|
||||
if quantum <= 0 {
|
||||
panic("token bucket quantum is not > 0")
|
||||
}
|
||||
return &Bucket{
|
||||
startTime: time.Now(),
|
||||
capacity: capacity,
|
||||
quantum: quantum,
|
||||
avail: capacity,
|
||||
fillInterval: fillInterval,
|
||||
}
|
||||
}
|
||||
|
||||
// Wait takes count tokens from the bucket, waiting until they are
|
||||
// available.
|
||||
func (tb *Bucket) Wait(count int64) {
|
||||
if d := tb.Take(count); d > 0 {
|
||||
time.Sleep(d)
|
||||
}
|
||||
}
|
||||
|
||||
// WaitMaxDuration is like Wait except that it will
|
||||
// only take tokens from the bucket if it needs to wait
|
||||
// for no greater than maxWait. It reports whether
|
||||
// any tokens have been removed from the bucket
|
||||
// If no tokens have been removed, it returns immediately.
|
||||
func (tb *Bucket) WaitMaxDuration(count int64, maxWait time.Duration) bool {
|
||||
d, ok := tb.TakeMaxDuration(count, maxWait)
|
||||
if d > 0 {
|
||||
time.Sleep(d)
|
||||
}
|
||||
return ok
|
||||
}
|
||||
|
||||
const infinityDuration time.Duration = 0x7fffffffffffffff
|
||||
|
||||
// Take takes count tokens from the bucket without blocking. It returns
|
||||
// the time that the caller should wait until the tokens are actually
|
||||
// available.
|
||||
//
|
||||
// Note that if the request is irrevocable - there is no way to return
|
||||
// tokens to the bucket once this method commits us to taking them.
|
||||
func (tb *Bucket) Take(count int64) time.Duration {
|
||||
d, _ := tb.take(time.Now(), count, infinityDuration)
|
||||
return d
|
||||
}
|
||||
|
||||
// TakeMaxDuration is like Take, except that
|
||||
// it will only take tokens from the bucket if the wait
|
||||
// time for the tokens is no greater than maxWait.
|
||||
//
|
||||
// If it would take longer than maxWait for the tokens
|
||||
// to become available, it does nothing and reports false,
|
||||
// otherwise it returns the time that the caller should
|
||||
// wait until the tokens are actually available, and reports
|
||||
// true.
|
||||
func (tb *Bucket) TakeMaxDuration(count int64, maxWait time.Duration) (time.Duration, bool) {
|
||||
return tb.take(time.Now(), count, maxWait)
|
||||
}
|
||||
|
||||
// TakeAvailable takes up to count immediately available tokens from the
|
||||
// bucket. It returns the number of tokens removed, or zero if there are
|
||||
// no available tokens. It does not block.
|
||||
func (tb *Bucket) TakeAvailable(count int64) int64 {
|
||||
return tb.takeAvailable(time.Now(), count)
|
||||
}
|
||||
|
||||
// takeAvailable is the internal version of TakeAvailable - it takes the
|
||||
// current time as an argument to enable easy testing.
|
||||
func (tb *Bucket) takeAvailable(now time.Time, count int64) int64 {
|
||||
if count <= 0 {
|
||||
return 0
|
||||
}
|
||||
tb.mu.Lock()
|
||||
defer tb.mu.Unlock()
|
||||
|
||||
tb.adjust(now)
|
||||
if tb.avail <= 0 {
|
||||
return 0
|
||||
}
|
||||
if count > tb.avail {
|
||||
count = tb.avail
|
||||
}
|
||||
tb.avail -= count
|
||||
return count
|
||||
}
|
||||
|
||||
// Rate returns the fill rate of the bucket, in tokens per second.
|
||||
func (tb *Bucket) Rate() float64 {
|
||||
return 1e9 * float64(tb.quantum) / float64(tb.fillInterval)
|
||||
}
|
||||
|
||||
// take is the internal version of Take - it takes the current time as
|
||||
// an argument to enable easy testing.
|
||||
func (tb *Bucket) take(now time.Time, count int64, maxWait time.Duration) (time.Duration, bool) {
|
||||
if count <= 0 {
|
||||
return 0, true
|
||||
}
|
||||
tb.mu.Lock()
|
||||
defer tb.mu.Unlock()
|
||||
|
||||
currentTick := tb.adjust(now)
|
||||
avail := tb.avail - count
|
||||
if avail >= 0 {
|
||||
tb.avail = avail
|
||||
return 0, true
|
||||
}
|
||||
// Round up the missing tokens to the nearest multiple
|
||||
// of quantum - the tokens won't be available until
|
||||
// that tick.
|
||||
endTick := currentTick + (-avail+tb.quantum-1)/tb.quantum
|
||||
endTime := tb.startTime.Add(time.Duration(endTick) * tb.fillInterval)
|
||||
waitTime := endTime.Sub(now)
|
||||
if waitTime > maxWait {
|
||||
return 0, false
|
||||
}
|
||||
tb.avail = avail
|
||||
return waitTime, true
|
||||
}
|
||||
|
||||
// adjust adjusts the current bucket capacity based on the current time.
|
||||
// It returns the current tick.
|
||||
func (tb *Bucket) adjust(now time.Time) (currentTick int64) {
|
||||
currentTick = int64(now.Sub(tb.startTime) / tb.fillInterval)
|
||||
|
||||
if tb.avail >= tb.capacity {
|
||||
return
|
||||
}
|
||||
tb.avail += (currentTick - tb.availTick) * tb.quantum
|
||||
if tb.avail > tb.capacity {
|
||||
tb.avail = tb.capacity
|
||||
}
|
||||
tb.availTick = currentTick
|
||||
return
|
||||
}
|
||||
|
||||
func abs(f float64) float64 {
|
||||
if f < 0 {
|
||||
return -f
|
||||
}
|
||||
return f
|
||||
}
|
||||
328
Godeps/_workspace/src/github.com/juju/ratelimit/ratelimit_test.go
generated
vendored
Normal file
328
Godeps/_workspace/src/github.com/juju/ratelimit/ratelimit_test.go
generated
vendored
Normal file
@@ -0,0 +1,328 @@
|
||||
// Copyright 2014 Canonical Ltd.
|
||||
// Licensed under the LGPLv3 with static-linking exception.
|
||||
// See LICENCE file for details.
|
||||
|
||||
package ratelimit
|
||||
|
||||
import (
|
||||
gc "launchpad.net/gocheck"
|
||||
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestPackage(t *testing.T) {
|
||||
gc.TestingT(t)
|
||||
}
|
||||
|
||||
type rateLimitSuite struct{}
|
||||
|
||||
var _ = gc.Suite(rateLimitSuite{})
|
||||
|
||||
type takeReq struct {
|
||||
time time.Duration
|
||||
count int64
|
||||
expectWait time.Duration
|
||||
}
|
||||
|
||||
var takeTests = []struct {
|
||||
about string
|
||||
fillInterval time.Duration
|
||||
capacity int64
|
||||
reqs []takeReq
|
||||
}{{
|
||||
about: "serial requests",
|
||||
fillInterval: 250 * time.Millisecond,
|
||||
capacity: 10,
|
||||
reqs: []takeReq{{
|
||||
time: 0,
|
||||
count: 0,
|
||||
expectWait: 0,
|
||||
}, {
|
||||
time: 0,
|
||||
count: 10,
|
||||
expectWait: 0,
|
||||
}, {
|
||||
time: 0,
|
||||
count: 1,
|
||||
expectWait: 250 * time.Millisecond,
|
||||
}, {
|
||||
time: 250 * time.Millisecond,
|
||||
count: 1,
|
||||
expectWait: 250 * time.Millisecond,
|
||||
}},
|
||||
}, {
|
||||
about: "concurrent requests",
|
||||
fillInterval: 250 * time.Millisecond,
|
||||
capacity: 10,
|
||||
reqs: []takeReq{{
|
||||
time: 0,
|
||||
count: 10,
|
||||
expectWait: 0,
|
||||
}, {
|
||||
time: 0,
|
||||
count: 2,
|
||||
expectWait: 500 * time.Millisecond,
|
||||
}, {
|
||||
time: 0,
|
||||
count: 2,
|
||||
expectWait: 1000 * time.Millisecond,
|
||||
}, {
|
||||
time: 0,
|
||||
count: 1,
|
||||
expectWait: 1250 * time.Millisecond,
|
||||
}},
|
||||
}, {
|
||||
about: "more than capacity",
|
||||
fillInterval: 1 * time.Millisecond,
|
||||
capacity: 10,
|
||||
reqs: []takeReq{{
|
||||
time: 0,
|
||||
count: 10,
|
||||
expectWait: 0,
|
||||
}, {
|
||||
time: 20 * time.Millisecond,
|
||||
count: 15,
|
||||
expectWait: 5 * time.Millisecond,
|
||||
}},
|
||||
}, {
|
||||
about: "sub-quantum time",
|
||||
fillInterval: 10 * time.Millisecond,
|
||||
capacity: 10,
|
||||
reqs: []takeReq{{
|
||||
time: 0,
|
||||
count: 10,
|
||||
expectWait: 0,
|
||||
}, {
|
||||
time: 7 * time.Millisecond,
|
||||
count: 1,
|
||||
expectWait: 3 * time.Millisecond,
|
||||
}, {
|
||||
time: 8 * time.Millisecond,
|
||||
count: 1,
|
||||
expectWait: 12 * time.Millisecond,
|
||||
}},
|
||||
}, {
|
||||
about: "within capacity",
|
||||
fillInterval: 10 * time.Millisecond,
|
||||
capacity: 5,
|
||||
reqs: []takeReq{{
|
||||
time: 0,
|
||||
count: 5,
|
||||
expectWait: 0,
|
||||
}, {
|
||||
time: 60 * time.Millisecond,
|
||||
count: 5,
|
||||
expectWait: 0,
|
||||
}, {
|
||||
time: 60 * time.Millisecond,
|
||||
count: 1,
|
||||
expectWait: 10 * time.Millisecond,
|
||||
}, {
|
||||
time: 80 * time.Millisecond,
|
||||
count: 2,
|
||||
expectWait: 10 * time.Millisecond,
|
||||
}},
|
||||
}}
|
||||
|
||||
func (rateLimitSuite) TestTake(c *gc.C) {
|
||||
for i, test := range takeTests {
|
||||
tb := NewBucket(test.fillInterval, test.capacity)
|
||||
for j, req := range test.reqs {
|
||||
d, ok := tb.take(tb.startTime.Add(req.time), req.count, infinityDuration)
|
||||
c.Assert(ok, gc.Equals, true)
|
||||
if d != req.expectWait {
|
||||
c.Fatalf("test %d.%d, %s, got %v want %v", i, j, test.about, d, req.expectWait)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rateLimitSuite) TestTakeMaxDuration(c *gc.C) {
|
||||
for i, test := range takeTests {
|
||||
tb := NewBucket(test.fillInterval, test.capacity)
|
||||
for j, req := range test.reqs {
|
||||
if req.expectWait > 0 {
|
||||
d, ok := tb.take(tb.startTime.Add(req.time), req.count, req.expectWait-1)
|
||||
c.Assert(ok, gc.Equals, false)
|
||||
c.Assert(d, gc.Equals, time.Duration(0))
|
||||
}
|
||||
d, ok := tb.take(tb.startTime.Add(req.time), req.count, req.expectWait)
|
||||
c.Assert(ok, gc.Equals, true)
|
||||
if d != req.expectWait {
|
||||
c.Fatalf("test %d.%d, %s, got %v want %v", i, j, test.about, d, req.expectWait)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type takeAvailableReq struct {
|
||||
time time.Duration
|
||||
count int64
|
||||
expect int64
|
||||
}
|
||||
|
||||
var takeAvailableTests = []struct {
|
||||
about string
|
||||
fillInterval time.Duration
|
||||
capacity int64
|
||||
reqs []takeAvailableReq
|
||||
}{{
|
||||
about: "serial requests",
|
||||
fillInterval: 250 * time.Millisecond,
|
||||
capacity: 10,
|
||||
reqs: []takeAvailableReq{{
|
||||
time: 0,
|
||||
count: 0,
|
||||
expect: 0,
|
||||
}, {
|
||||
time: 0,
|
||||
count: 10,
|
||||
expect: 10,
|
||||
}, {
|
||||
time: 0,
|
||||
count: 1,
|
||||
expect: 0,
|
||||
}, {
|
||||
time: 250 * time.Millisecond,
|
||||
count: 1,
|
||||
expect: 1,
|
||||
}},
|
||||
}, {
|
||||
about: "concurrent requests",
|
||||
fillInterval: 250 * time.Millisecond,
|
||||
capacity: 10,
|
||||
reqs: []takeAvailableReq{{
|
||||
time: 0,
|
||||
count: 5,
|
||||
expect: 5,
|
||||
}, {
|
||||
time: 0,
|
||||
count: 2,
|
||||
expect: 2,
|
||||
}, {
|
||||
time: 0,
|
||||
count: 5,
|
||||
expect: 3,
|
||||
}, {
|
||||
time: 0,
|
||||
count: 1,
|
||||
expect: 0,
|
||||
}},
|
||||
}, {
|
||||
about: "more than capacity",
|
||||
fillInterval: 1 * time.Millisecond,
|
||||
capacity: 10,
|
||||
reqs: []takeAvailableReq{{
|
||||
time: 0,
|
||||
count: 10,
|
||||
expect: 10,
|
||||
}, {
|
||||
time: 20 * time.Millisecond,
|
||||
count: 15,
|
||||
expect: 10,
|
||||
}},
|
||||
}, {
|
||||
about: "within capacity",
|
||||
fillInterval: 10 * time.Millisecond,
|
||||
capacity: 5,
|
||||
reqs: []takeAvailableReq{{
|
||||
time: 0,
|
||||
count: 5,
|
||||
expect: 5,
|
||||
}, {
|
||||
time: 60 * time.Millisecond,
|
||||
count: 5,
|
||||
expect: 5,
|
||||
}, {
|
||||
time: 70 * time.Millisecond,
|
||||
count: 1,
|
||||
expect: 1,
|
||||
}},
|
||||
}}
|
||||
|
||||
func (rateLimitSuite) TestTakeAvailable(c *gc.C) {
|
||||
for i, test := range takeAvailableTests {
|
||||
tb := NewBucket(test.fillInterval, test.capacity)
|
||||
for j, req := range test.reqs {
|
||||
d := tb.takeAvailable(tb.startTime.Add(req.time), req.count)
|
||||
if d != req.expect {
|
||||
c.Fatalf("test %d.%d, %s, got %v want %v", i, j, test.about, d, req.expect)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rateLimitSuite) TestPanics(c *gc.C) {
|
||||
c.Assert(func() { NewBucket(0, 1) }, gc.PanicMatches, "token bucket fill interval is not > 0")
|
||||
c.Assert(func() { NewBucket(-2, 1) }, gc.PanicMatches, "token bucket fill interval is not > 0")
|
||||
c.Assert(func() { NewBucket(1, 0) }, gc.PanicMatches, "token bucket capacity is not > 0")
|
||||
c.Assert(func() { NewBucket(1, -2) }, gc.PanicMatches, "token bucket capacity is not > 0")
|
||||
}
|
||||
|
||||
func isCloseTo(x, y, tolerance float64) bool {
|
||||
return abs(x-y)/y < tolerance
|
||||
}
|
||||
|
||||
func (rateLimitSuite) TestRate(c *gc.C) {
|
||||
tb := NewBucket(1, 1)
|
||||
if !isCloseTo(tb.Rate(), 1e9, 0.00001) {
|
||||
c.Fatalf("got %v want 1e9", tb.Rate())
|
||||
}
|
||||
tb = NewBucket(2*time.Second, 1)
|
||||
if !isCloseTo(tb.Rate(), 0.5, 0.00001) {
|
||||
c.Fatalf("got %v want 0.5", tb.Rate())
|
||||
}
|
||||
tb = NewBucketWithQuantum(100*time.Millisecond, 1, 5)
|
||||
if !isCloseTo(tb.Rate(), 50, 0.00001) {
|
||||
c.Fatalf("got %v want 50", tb.Rate())
|
||||
}
|
||||
}
|
||||
|
||||
func checkRate(c *gc.C, rate float64) {
|
||||
tb := NewBucketWithRate(rate, 1<<62)
|
||||
if !isCloseTo(tb.Rate(), rate, rateMargin) {
|
||||
c.Fatalf("got %g want %v", tb.Rate(), rate)
|
||||
}
|
||||
d, ok := tb.take(tb.startTime, 1<<62, infinityDuration)
|
||||
c.Assert(ok, gc.Equals, true)
|
||||
c.Assert(d, gc.Equals, time.Duration(0))
|
||||
|
||||
// Check that the actual rate is as expected by
|
||||
// asking for a not-quite multiple of the bucket's
|
||||
// quantum and checking that the wait time
|
||||
// correct.
|
||||
d, ok = tb.take(tb.startTime, tb.quantum*2-tb.quantum/2, infinityDuration)
|
||||
c.Assert(ok, gc.Equals, true)
|
||||
expectTime := 1e9 * float64(tb.quantum) * 2 / rate
|
||||
if !isCloseTo(float64(d), expectTime, rateMargin) {
|
||||
c.Fatalf("rate %g: got %g want %v", rate, float64(d), expectTime)
|
||||
}
|
||||
}
|
||||
|
||||
func (rateLimitSuite) TestNewWithRate(c *gc.C) {
|
||||
for rate := float64(1); rate < 1e6; rate += 7 {
|
||||
checkRate(c, rate)
|
||||
}
|
||||
for _, rate := range []float64{
|
||||
1024 * 1024 * 1024,
|
||||
1e-5,
|
||||
0.9e-5,
|
||||
0.5,
|
||||
0.9,
|
||||
0.9e8,
|
||||
3e12,
|
||||
4e18,
|
||||
} {
|
||||
checkRate(c, rate)
|
||||
checkRate(c, rate/3)
|
||||
checkRate(c, rate*1.3)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkWait(b *testing.B) {
|
||||
tb := NewBucket(1, 16*1024)
|
||||
for i := b.N - 1; i >= 0; i-- {
|
||||
tb.Wait(1)
|
||||
}
|
||||
}
|
||||
51
Godeps/_workspace/src/github.com/juju/ratelimit/reader.go
generated
vendored
Normal file
51
Godeps/_workspace/src/github.com/juju/ratelimit/reader.go
generated
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
// Copyright 2014 Canonical Ltd.
|
||||
// Licensed under the LGPLv3 with static-linking exception.
|
||||
// See LICENCE file for details.
|
||||
|
||||
package ratelimit
|
||||
|
||||
import "io"
|
||||
|
||||
type reader struct {
|
||||
r io.Reader
|
||||
bucket *Bucket
|
||||
}
|
||||
|
||||
// Reader returns a reader that is rate limited by
|
||||
// the given token bucket. Each token in the bucket
|
||||
// represents one byte.
|
||||
func Reader(r io.Reader, bucket *Bucket) io.Reader {
|
||||
return &reader{
|
||||
r: r,
|
||||
bucket: bucket,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *reader) Read(buf []byte) (int, error) {
|
||||
n, err := r.r.Read(buf)
|
||||
if n <= 0 {
|
||||
return n, err
|
||||
}
|
||||
r.bucket.Wait(int64(n))
|
||||
return n, err
|
||||
}
|
||||
|
||||
type writer struct {
|
||||
w io.Writer
|
||||
bucket *Bucket
|
||||
}
|
||||
|
||||
// Writer returns a reader that is rate limited by
|
||||
// the given token bucket. Each token in the bucket
|
||||
// represents one byte.
|
||||
func Writer(w io.Writer, bucket *Bucket) io.Writer {
|
||||
return &writer{
|
||||
w: w,
|
||||
bucket: bucket,
|
||||
}
|
||||
}
|
||||
|
||||
func (w *writer) Write(buf []byte) (int, error) {
|
||||
w.bucket.Wait(int64(len(buf)))
|
||||
return w.w.Write(buf)
|
||||
}
|
||||
216
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go
generated
vendored
Normal file
216
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go
generated
vendored
Normal file
@@ -0,0 +1,216 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/memdb"
|
||||
)
|
||||
|
||||
var (
|
||||
errBatchTooShort = errors.New("leveldb: batch is too short")
|
||||
errBatchBadRecord = errors.New("leveldb: bad record in batch")
|
||||
)
|
||||
|
||||
const kBatchHdrLen = 8 + 4
|
||||
|
||||
type batchReplay interface {
|
||||
put(key, value []byte, seq uint64)
|
||||
delete(key []byte, seq uint64)
|
||||
}
|
||||
|
||||
// Batch is a write batch.
|
||||
type Batch struct {
|
||||
buf []byte
|
||||
rLen, bLen int
|
||||
seq uint64
|
||||
sync bool
|
||||
}
|
||||
|
||||
func (b *Batch) grow(n int) {
|
||||
off := len(b.buf)
|
||||
if off == 0 {
|
||||
// include headers
|
||||
off = kBatchHdrLen
|
||||
n += off
|
||||
}
|
||||
if cap(b.buf)-off >= n {
|
||||
return
|
||||
}
|
||||
buf := make([]byte, 2*cap(b.buf)+n)
|
||||
copy(buf, b.buf)
|
||||
b.buf = buf[:off]
|
||||
}
|
||||
|
||||
func (b *Batch) appendRec(t vType, key, value []byte) {
|
||||
n := 1 + binary.MaxVarintLen32 + len(key)
|
||||
if t == tVal {
|
||||
n += binary.MaxVarintLen32 + len(value)
|
||||
}
|
||||
b.grow(n)
|
||||
off := len(b.buf)
|
||||
buf := b.buf[:off+n]
|
||||
buf[off] = byte(t)
|
||||
off += 1
|
||||
off += binary.PutUvarint(buf[off:], uint64(len(key)))
|
||||
copy(buf[off:], key)
|
||||
off += len(key)
|
||||
if t == tVal {
|
||||
off += binary.PutUvarint(buf[off:], uint64(len(value)))
|
||||
copy(buf[off:], value)
|
||||
off += len(value)
|
||||
}
|
||||
b.buf = buf[:off]
|
||||
b.rLen++
|
||||
// Include 8-byte ikey header
|
||||
b.bLen += len(key) + len(value) + 8
|
||||
}
|
||||
|
||||
// Put appends 'put operation' of the given key/value pair to the batch.
|
||||
// It is safe to modify the contents of the argument after Put returns.
|
||||
func (b *Batch) Put(key, value []byte) {
|
||||
b.appendRec(tVal, key, value)
|
||||
}
|
||||
|
||||
// Delete appends 'delete operation' of the given key to the batch.
|
||||
// It is safe to modify the contents of the argument after Delete returns.
|
||||
func (b *Batch) Delete(key []byte) {
|
||||
b.appendRec(tDel, key, nil)
|
||||
}
|
||||
|
||||
// Reset resets the batch.
|
||||
func (b *Batch) Reset() {
|
||||
b.buf = nil
|
||||
b.seq = 0
|
||||
b.rLen = 0
|
||||
b.bLen = 0
|
||||
b.sync = false
|
||||
}
|
||||
|
||||
func (b *Batch) init(sync bool) {
|
||||
b.sync = sync
|
||||
}
|
||||
|
||||
func (b *Batch) put(key, value []byte, seq uint64) {
|
||||
if b.rLen == 0 {
|
||||
b.seq = seq
|
||||
}
|
||||
b.Put(key, value)
|
||||
}
|
||||
|
||||
func (b *Batch) delete(key []byte, seq uint64) {
|
||||
if b.rLen == 0 {
|
||||
b.seq = seq
|
||||
}
|
||||
b.Delete(key)
|
||||
}
|
||||
|
||||
func (b *Batch) append(p *Batch) {
|
||||
if p.rLen > 0 {
|
||||
b.grow(len(p.buf) - kBatchHdrLen)
|
||||
b.buf = append(b.buf, p.buf[kBatchHdrLen:]...)
|
||||
b.rLen += p.rLen
|
||||
}
|
||||
if p.sync {
|
||||
b.sync = true
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Batch) len() int {
|
||||
return b.rLen
|
||||
}
|
||||
|
||||
func (b *Batch) size() int {
|
||||
return b.bLen
|
||||
}
|
||||
|
||||
func (b *Batch) encode() []byte {
|
||||
b.grow(0)
|
||||
binary.LittleEndian.PutUint64(b.buf, b.seq)
|
||||
binary.LittleEndian.PutUint32(b.buf[8:], uint32(b.rLen))
|
||||
|
||||
return b.buf
|
||||
}
|
||||
|
||||
func (b *Batch) decode(buf []byte) error {
|
||||
if len(buf) < kBatchHdrLen {
|
||||
return errBatchTooShort
|
||||
}
|
||||
|
||||
b.seq = binary.LittleEndian.Uint64(buf)
|
||||
b.rLen = int(binary.LittleEndian.Uint32(buf[8:]))
|
||||
// No need to be precise at this point, it won't be used anyway
|
||||
b.bLen = len(buf) - kBatchHdrLen
|
||||
b.buf = buf
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Batch) decodeRec(f func(i int, t vType, key, value []byte)) error {
|
||||
off := kBatchHdrLen
|
||||
for i := 0; i < b.rLen; i++ {
|
||||
if off >= len(b.buf) {
|
||||
return errors.New("leveldb: invalid batch record length")
|
||||
}
|
||||
|
||||
t := vType(b.buf[off])
|
||||
if t > tVal {
|
||||
return errors.New("leveldb: invalid batch record type in batch")
|
||||
}
|
||||
off += 1
|
||||
|
||||
x, n := binary.Uvarint(b.buf[off:])
|
||||
off += n
|
||||
if n <= 0 || off+int(x) > len(b.buf) {
|
||||
return errBatchBadRecord
|
||||
}
|
||||
key := b.buf[off : off+int(x)]
|
||||
off += int(x)
|
||||
|
||||
var value []byte
|
||||
if t == tVal {
|
||||
x, n := binary.Uvarint(b.buf[off:])
|
||||
off += n
|
||||
if n <= 0 || off+int(x) > len(b.buf) {
|
||||
return errBatchBadRecord
|
||||
}
|
||||
value = b.buf[off : off+int(x)]
|
||||
off += int(x)
|
||||
}
|
||||
|
||||
f(i, t, key, value)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Batch) replay(to batchReplay) error {
|
||||
return b.decodeRec(func(i int, t vType, key, value []byte) {
|
||||
switch t {
|
||||
case tVal:
|
||||
to.put(key, value, b.seq+uint64(i))
|
||||
case tDel:
|
||||
to.delete(key, b.seq+uint64(i))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (b *Batch) memReplay(to *memdb.DB) error {
|
||||
return b.decodeRec(func(i int, t vType, key, value []byte) {
|
||||
ikey := newIKey(key, b.seq+uint64(i), t)
|
||||
to.Put(ikey, value)
|
||||
})
|
||||
}
|
||||
|
||||
func (b *Batch) revertMemReplay(to *memdb.DB) error {
|
||||
return b.decodeRec(func(i int, t vType, key, value []byte) {
|
||||
ikey := newIKey(key, b.seq+uint64(i), t)
|
||||
to.Delete(ikey)
|
||||
})
|
||||
}
|
||||
120
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go
generated
vendored
Normal file
120
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go
generated
vendored
Normal file
@@ -0,0 +1,120 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/comparer"
|
||||
"github.com/syndtr/goleveldb/leveldb/memdb"
|
||||
)
|
||||
|
||||
type tbRec struct {
|
||||
t vType
|
||||
key, value []byte
|
||||
}
|
||||
|
||||
type testBatch struct {
|
||||
rec []*tbRec
|
||||
}
|
||||
|
||||
func (p *testBatch) put(key, value []byte, seq uint64) {
|
||||
p.rec = append(p.rec, &tbRec{tVal, key, value})
|
||||
}
|
||||
|
||||
func (p *testBatch) delete(key []byte, seq uint64) {
|
||||
p.rec = append(p.rec, &tbRec{tDel, key, nil})
|
||||
}
|
||||
|
||||
func compareBatch(t *testing.T, b1, b2 *Batch) {
|
||||
if b1.seq != b2.seq {
|
||||
t.Errorf("invalid seq number want %d, got %d", b1.seq, b2.seq)
|
||||
}
|
||||
if b1.len() != b2.len() {
|
||||
t.Fatalf("invalid record length want %d, got %d", b1.len(), b2.len())
|
||||
}
|
||||
p1, p2 := new(testBatch), new(testBatch)
|
||||
err := b1.replay(p1)
|
||||
if err != nil {
|
||||
t.Fatal("error when replaying batch 1: ", err)
|
||||
}
|
||||
err = b2.replay(p2)
|
||||
if err != nil {
|
||||
t.Fatal("error when replaying batch 2: ", err)
|
||||
}
|
||||
for i := range p1.rec {
|
||||
r1, r2 := p1.rec[i], p2.rec[i]
|
||||
if r1.t != r2.t {
|
||||
t.Errorf("invalid type on record '%d' want %d, got %d", i, r1.t, r2.t)
|
||||
}
|
||||
if !bytes.Equal(r1.key, r2.key) {
|
||||
t.Errorf("invalid key on record '%d' want %s, got %s", i, string(r1.key), string(r2.key))
|
||||
}
|
||||
if r1.t == tVal {
|
||||
if !bytes.Equal(r1.value, r2.value) {
|
||||
t.Errorf("invalid value on record '%d' want %s, got %s", i, string(r1.value), string(r2.value))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBatch_EncodeDecode(t *testing.T) {
|
||||
b1 := new(Batch)
|
||||
b1.seq = 10009
|
||||
b1.Put([]byte("key1"), []byte("value1"))
|
||||
b1.Put([]byte("key2"), []byte("value2"))
|
||||
b1.Delete([]byte("key1"))
|
||||
b1.Put([]byte("k"), []byte(""))
|
||||
b1.Put([]byte("zzzzzzzzzzz"), []byte("zzzzzzzzzzzzzzzzzzzzzzzz"))
|
||||
b1.Delete([]byte("key10000"))
|
||||
b1.Delete([]byte("k"))
|
||||
buf := b1.encode()
|
||||
b2 := new(Batch)
|
||||
err := b2.decode(buf)
|
||||
if err != nil {
|
||||
t.Error("error when decoding batch: ", err)
|
||||
}
|
||||
compareBatch(t, b1, b2)
|
||||
}
|
||||
|
||||
func TestBatch_Append(t *testing.T) {
|
||||
b1 := new(Batch)
|
||||
b1.seq = 10009
|
||||
b1.Put([]byte("key1"), []byte("value1"))
|
||||
b1.Put([]byte("key2"), []byte("value2"))
|
||||
b1.Delete([]byte("key1"))
|
||||
b1.Put([]byte("foo"), []byte("foovalue"))
|
||||
b1.Put([]byte("bar"), []byte("barvalue"))
|
||||
b2a := new(Batch)
|
||||
b2a.seq = 10009
|
||||
b2a.Put([]byte("key1"), []byte("value1"))
|
||||
b2a.Put([]byte("key2"), []byte("value2"))
|
||||
b2a.Delete([]byte("key1"))
|
||||
b2b := new(Batch)
|
||||
b2b.Put([]byte("foo"), []byte("foovalue"))
|
||||
b2b.Put([]byte("bar"), []byte("barvalue"))
|
||||
b2a.append(b2b)
|
||||
compareBatch(t, b1, b2a)
|
||||
}
|
||||
|
||||
func TestBatch_Size(t *testing.T) {
|
||||
b := new(Batch)
|
||||
for i := 0; i < 2; i++ {
|
||||
b.Put([]byte("key1"), []byte("value1"))
|
||||
b.Put([]byte("key2"), []byte("value2"))
|
||||
b.Delete([]byte("key1"))
|
||||
b.Put([]byte("foo"), []byte("foovalue"))
|
||||
b.Put([]byte("bar"), []byte("barvalue"))
|
||||
mem := memdb.New(&iComparer{comparer.DefaultComparer}, 0)
|
||||
b.memReplay(mem)
|
||||
if b.size() != mem.Size() {
|
||||
t.Errorf("invalid batch size calculation, want=%d got=%d", mem.Size(), b.size())
|
||||
}
|
||||
b.Reset()
|
||||
}
|
||||
}
|
||||
464
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go
generated
vendored
Normal file
464
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go
generated
vendored
Normal file
@@ -0,0 +1,464 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/iterator"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||
)
|
||||
|
||||
func randomString(r *rand.Rand, n int) []byte {
|
||||
b := new(bytes.Buffer)
|
||||
for i := 0; i < n; i++ {
|
||||
b.WriteByte(' ' + byte(r.Intn(95)))
|
||||
}
|
||||
return b.Bytes()
|
||||
}
|
||||
|
||||
func compressibleStr(r *rand.Rand, frac float32, n int) []byte {
|
||||
nn := int(float32(n) * frac)
|
||||
rb := randomString(r, nn)
|
||||
b := make([]byte, 0, n+nn)
|
||||
for len(b) < n {
|
||||
b = append(b, rb...)
|
||||
}
|
||||
return b[:n]
|
||||
}
|
||||
|
||||
type valueGen struct {
|
||||
src []byte
|
||||
pos int
|
||||
}
|
||||
|
||||
func newValueGen(frac float32) *valueGen {
|
||||
v := new(valueGen)
|
||||
r := rand.New(rand.NewSource(301))
|
||||
v.src = make([]byte, 0, 1048576+100)
|
||||
for len(v.src) < 1048576 {
|
||||
v.src = append(v.src, compressibleStr(r, frac, 100)...)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func (v *valueGen) get(n int) []byte {
|
||||
if v.pos+n > len(v.src) {
|
||||
v.pos = 0
|
||||
}
|
||||
v.pos += n
|
||||
return v.src[v.pos-n : v.pos]
|
||||
}
|
||||
|
||||
var benchDB = filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbbench-%d", os.Getuid()))
|
||||
|
||||
type dbBench struct {
|
||||
b *testing.B
|
||||
stor storage.Storage
|
||||
db *DB
|
||||
|
||||
o *opt.Options
|
||||
ro *opt.ReadOptions
|
||||
wo *opt.WriteOptions
|
||||
|
||||
keys, values [][]byte
|
||||
}
|
||||
|
||||
func openDBBench(b *testing.B, noCompress bool) *dbBench {
|
||||
_, err := os.Stat(benchDB)
|
||||
if err == nil {
|
||||
err = os.RemoveAll(benchDB)
|
||||
if err != nil {
|
||||
b.Fatal("cannot remove old db: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
p := &dbBench{
|
||||
b: b,
|
||||
o: &opt.Options{},
|
||||
ro: &opt.ReadOptions{},
|
||||
wo: &opt.WriteOptions{},
|
||||
}
|
||||
p.stor, err = storage.OpenFile(benchDB)
|
||||
if err != nil {
|
||||
b.Fatal("cannot open stor: ", err)
|
||||
}
|
||||
if noCompress {
|
||||
p.o.Compression = opt.NoCompression
|
||||
}
|
||||
|
||||
p.db, err = Open(p.stor, p.o)
|
||||
if err != nil {
|
||||
b.Fatal("cannot open db: ", err)
|
||||
}
|
||||
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *dbBench) reopen() {
|
||||
p.db.Close()
|
||||
var err error
|
||||
p.db, err = Open(p.stor, p.o)
|
||||
if err != nil {
|
||||
p.b.Fatal("Reopen: got error: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *dbBench) populate(n int) {
|
||||
p.keys, p.values = make([][]byte, n), make([][]byte, n)
|
||||
v := newValueGen(0.5)
|
||||
for i := range p.keys {
|
||||
p.keys[i], p.values[i] = []byte(fmt.Sprintf("%016d", i)), v.get(100)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *dbBench) randomize() {
|
||||
m := len(p.keys)
|
||||
times := m * 2
|
||||
r1, r2 := rand.New(rand.NewSource(0xdeadbeef)), rand.New(rand.NewSource(0xbeefface))
|
||||
for n := 0; n < times; n++ {
|
||||
i, j := r1.Int()%m, r2.Int()%m
|
||||
if i == j {
|
||||
continue
|
||||
}
|
||||
p.keys[i], p.keys[j] = p.keys[j], p.keys[i]
|
||||
p.values[i], p.values[j] = p.values[j], p.values[i]
|
||||
}
|
||||
}
|
||||
|
||||
func (p *dbBench) writes(perBatch int) {
|
||||
b := p.b
|
||||
db := p.db
|
||||
|
||||
n := len(p.keys)
|
||||
m := n / perBatch
|
||||
if n%perBatch > 0 {
|
||||
m++
|
||||
}
|
||||
batches := make([]Batch, m)
|
||||
j := 0
|
||||
for i := range batches {
|
||||
first := true
|
||||
for ; j < n && ((j+1)%perBatch != 0 || first); j++ {
|
||||
first = false
|
||||
batches[i].Put(p.keys[j], p.values[j])
|
||||
}
|
||||
}
|
||||
runtime.GC()
|
||||
|
||||
b.ResetTimer()
|
||||
b.StartTimer()
|
||||
for i := range batches {
|
||||
err := db.Write(&(batches[i]), p.wo)
|
||||
if err != nil {
|
||||
b.Fatal("write failed: ", err)
|
||||
}
|
||||
}
|
||||
b.StopTimer()
|
||||
b.SetBytes(116)
|
||||
}
|
||||
|
||||
func (p *dbBench) gc() {
|
||||
p.keys, p.values = nil, nil
|
||||
runtime.GC()
|
||||
}
|
||||
|
||||
func (p *dbBench) puts() {
|
||||
b := p.b
|
||||
db := p.db
|
||||
|
||||
b.ResetTimer()
|
||||
b.StartTimer()
|
||||
for i := range p.keys {
|
||||
err := db.Put(p.keys[i], p.values[i], p.wo)
|
||||
if err != nil {
|
||||
b.Fatal("put failed: ", err)
|
||||
}
|
||||
}
|
||||
b.StopTimer()
|
||||
b.SetBytes(116)
|
||||
}
|
||||
|
||||
func (p *dbBench) fill() {
|
||||
b := p.b
|
||||
db := p.db
|
||||
|
||||
perBatch := 10000
|
||||
batch := new(Batch)
|
||||
for i, n := 0, len(p.keys); i < n; {
|
||||
first := true
|
||||
for ; i < n && ((i+1)%perBatch != 0 || first); i++ {
|
||||
first = false
|
||||
batch.Put(p.keys[i], p.values[i])
|
||||
}
|
||||
err := db.Write(batch, p.wo)
|
||||
if err != nil {
|
||||
b.Fatal("write failed: ", err)
|
||||
}
|
||||
batch.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
func (p *dbBench) gets() {
|
||||
b := p.b
|
||||
db := p.db
|
||||
|
||||
b.ResetTimer()
|
||||
for i := range p.keys {
|
||||
_, err := db.Get(p.keys[i], p.ro)
|
||||
if err != nil {
|
||||
b.Error("got error: ", err)
|
||||
}
|
||||
}
|
||||
b.StopTimer()
|
||||
}
|
||||
|
||||
func (p *dbBench) seeks() {
|
||||
b := p.b
|
||||
|
||||
iter := p.newIter()
|
||||
defer iter.Release()
|
||||
b.ResetTimer()
|
||||
for i := range p.keys {
|
||||
if !iter.Seek(p.keys[i]) {
|
||||
b.Error("value not found for: ", string(p.keys[i]))
|
||||
}
|
||||
}
|
||||
b.StopTimer()
|
||||
}
|
||||
|
||||
func (p *dbBench) newIter() iterator.Iterator {
|
||||
iter := p.db.NewIterator(nil, p.ro)
|
||||
err := iter.Error()
|
||||
if err != nil {
|
||||
p.b.Fatal("cannot create iterator: ", err)
|
||||
}
|
||||
return iter
|
||||
}
|
||||
|
||||
func (p *dbBench) close() {
|
||||
if bp, err := p.db.GetProperty("leveldb.blockpool"); err == nil {
|
||||
p.b.Log("Block pool stats: ", bp)
|
||||
}
|
||||
p.db.Close()
|
||||
p.stor.Close()
|
||||
os.RemoveAll(benchDB)
|
||||
p.db = nil
|
||||
p.keys = nil
|
||||
p.values = nil
|
||||
runtime.GC()
|
||||
runtime.GOMAXPROCS(1)
|
||||
}
|
||||
|
||||
func BenchmarkDBWrite(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.writes(1)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBWriteBatch(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.writes(1000)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBWriteUncompressed(b *testing.B) {
|
||||
p := openDBBench(b, true)
|
||||
p.populate(b.N)
|
||||
p.writes(1)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBWriteBatchUncompressed(b *testing.B) {
|
||||
p := openDBBench(b, true)
|
||||
p.populate(b.N)
|
||||
p.writes(1000)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBWriteRandom(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.randomize()
|
||||
p.writes(1)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBWriteRandomSync(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.wo.Sync = true
|
||||
p.populate(b.N)
|
||||
p.writes(1)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBOverwrite(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.writes(1)
|
||||
p.writes(1)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBOverwriteRandom(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.writes(1)
|
||||
p.randomize()
|
||||
p.writes(1)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBPut(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.puts()
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBRead(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.gc()
|
||||
|
||||
iter := p.newIter()
|
||||
b.ResetTimer()
|
||||
for iter.Next() {
|
||||
}
|
||||
iter.Release()
|
||||
b.StopTimer()
|
||||
b.SetBytes(116)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBReadGC(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
|
||||
iter := p.newIter()
|
||||
b.ResetTimer()
|
||||
for iter.Next() {
|
||||
}
|
||||
iter.Release()
|
||||
b.StopTimer()
|
||||
b.SetBytes(116)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBReadUncompressed(b *testing.B) {
|
||||
p := openDBBench(b, true)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.gc()
|
||||
|
||||
iter := p.newIter()
|
||||
b.ResetTimer()
|
||||
for iter.Next() {
|
||||
}
|
||||
iter.Release()
|
||||
b.StopTimer()
|
||||
b.SetBytes(116)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBReadTable(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.reopen()
|
||||
p.gc()
|
||||
|
||||
iter := p.newIter()
|
||||
b.ResetTimer()
|
||||
for iter.Next() {
|
||||
}
|
||||
iter.Release()
|
||||
b.StopTimer()
|
||||
b.SetBytes(116)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBReadReverse(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.gc()
|
||||
|
||||
iter := p.newIter()
|
||||
b.ResetTimer()
|
||||
iter.Last()
|
||||
for iter.Prev() {
|
||||
}
|
||||
iter.Release()
|
||||
b.StopTimer()
|
||||
b.SetBytes(116)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBReadReverseTable(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.reopen()
|
||||
p.gc()
|
||||
|
||||
iter := p.newIter()
|
||||
b.ResetTimer()
|
||||
iter.Last()
|
||||
for iter.Prev() {
|
||||
}
|
||||
iter.Release()
|
||||
b.StopTimer()
|
||||
b.SetBytes(116)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBSeek(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.seeks()
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBSeekRandom(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.randomize()
|
||||
p.seeks()
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBGet(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.gets()
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBGetRandom(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.randomize()
|
||||
p.gets()
|
||||
p.close()
|
||||
}
|
||||
159
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go
generated
vendored
Normal file
159
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go
generated
vendored
Normal file
@@ -0,0 +1,159 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Package cache provides interface and implementation of a cache algorithms.
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// SetFunc is the function that will be called by Namespace.Get to create
|
||||
// a cache object, if charge is less than one than the cache object will
|
||||
// not be registered to cache tree, if value is nil then the cache object
|
||||
// will not be created.
|
||||
type SetFunc func() (charge int, value interface{})
|
||||
|
||||
// DelFin is the function that will be called as the result of a delete operation.
|
||||
// Exist == true is indication that the object is exist, and pending == true is
|
||||
// indication of deletion already happen but haven't done yet (wait for all handles
|
||||
// to be released). And exist == false means the object doesn't exist.
|
||||
type DelFin func(exist, pending bool)
|
||||
|
||||
// PurgeFin is the function that will be called as the result of a purge operation.
|
||||
type PurgeFin func(ns, key uint64)
|
||||
|
||||
// Cache is a cache tree. A cache instance must be goroutine-safe.
|
||||
type Cache interface {
|
||||
// SetCapacity sets cache tree capacity.
|
||||
SetCapacity(capacity int)
|
||||
|
||||
// Capacity returns cache tree capacity.
|
||||
Capacity() int
|
||||
|
||||
// Used returns used cache tree capacity.
|
||||
Used() int
|
||||
|
||||
// Size returns entire alive cache objects size.
|
||||
Size() int
|
||||
|
||||
// NumObjects returns number of alive objects.
|
||||
NumObjects() int
|
||||
|
||||
// GetNamespace gets cache namespace with the given id.
|
||||
// GetNamespace is never return nil.
|
||||
GetNamespace(id uint64) Namespace
|
||||
|
||||
// PurgeNamespace purges cache namespace with the given id from this cache tree.
|
||||
// Also read Namespace.Purge.
|
||||
PurgeNamespace(id uint64, fin PurgeFin)
|
||||
|
||||
// ZapNamespace detaches cache namespace with the given id from this cache tree.
|
||||
// Also read Namespace.Zap.
|
||||
ZapNamespace(id uint64)
|
||||
|
||||
// Purge purges all cache namespace from this cache tree.
|
||||
// This is behave the same as calling Namespace.Purge method on all cache namespace.
|
||||
Purge(fin PurgeFin)
|
||||
|
||||
// Zap detaches all cache namespace from this cache tree.
|
||||
// This is behave the same as calling Namespace.Zap method on all cache namespace.
|
||||
Zap()
|
||||
}
|
||||
|
||||
// Namespace is a cache namespace. A namespace instance must be goroutine-safe.
|
||||
type Namespace interface {
|
||||
// Get gets cache object with the given key.
|
||||
// If cache object is not found and setf is not nil, Get will atomically creates
|
||||
// the cache object by calling setf. Otherwise Get will returns nil.
|
||||
//
|
||||
// The returned cache handle should be released after use by calling Release
|
||||
// method.
|
||||
Get(key uint64, setf SetFunc) Handle
|
||||
|
||||
// Delete removes cache object with the given key from cache tree.
|
||||
// A deleted cache object will be released as soon as all of its handles have
|
||||
// been released.
|
||||
// Delete only happen once, subsequent delete will consider cache object doesn't
|
||||
// exist, even if the cache object ins't released yet.
|
||||
//
|
||||
// If not nil, fin will be called if the cache object doesn't exist or when
|
||||
// finally be released.
|
||||
//
|
||||
// Delete returns true if such cache object exist and never been deleted.
|
||||
Delete(key uint64, fin DelFin) bool
|
||||
|
||||
// Purge removes all cache objects within this namespace from cache tree.
|
||||
// This is the same as doing delete on all cache objects.
|
||||
//
|
||||
// If not nil, fin will be called on all cache objects when its finally be
|
||||
// released.
|
||||
Purge(fin PurgeFin)
|
||||
|
||||
// Zap detaches namespace from cache tree and release all its cache objects.
|
||||
// A zapped namespace can never be filled again.
|
||||
// Calling Get on zapped namespace will always return nil.
|
||||
Zap()
|
||||
}
|
||||
|
||||
// Handle is a cache handle.
|
||||
type Handle interface {
|
||||
// Release releases this cache handle. This method can be safely called mutiple
|
||||
// times.
|
||||
Release()
|
||||
|
||||
// Value returns value of this cache handle.
|
||||
// Value will returns nil after this cache handle have be released.
|
||||
Value() interface{}
|
||||
}
|
||||
|
||||
const (
|
||||
DelNotExist = iota
|
||||
DelExist
|
||||
DelPendig
|
||||
)
|
||||
|
||||
// Namespace state.
|
||||
type nsState int
|
||||
|
||||
const (
|
||||
nsEffective nsState = iota
|
||||
nsZapped
|
||||
)
|
||||
|
||||
// Node state.
|
||||
type nodeState int
|
||||
|
||||
const (
|
||||
nodeZero nodeState = iota
|
||||
nodeEffective
|
||||
nodeEvicted
|
||||
nodeDeleted
|
||||
)
|
||||
|
||||
// Fake handle.
|
||||
type fakeHandle struct {
|
||||
value interface{}
|
||||
fin func()
|
||||
once uint32
|
||||
}
|
||||
|
||||
func (h *fakeHandle) Value() interface{} {
|
||||
if atomic.LoadUint32(&h.once) == 0 {
|
||||
return h.value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *fakeHandle) Release() {
|
||||
if !atomic.CompareAndSwapUint32(&h.once, 0, 1) {
|
||||
return
|
||||
}
|
||||
if h.fin != nil {
|
||||
h.fin()
|
||||
h.fin = nil
|
||||
}
|
||||
}
|
||||
597
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go
generated
vendored
Normal file
597
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go
generated
vendored
Normal file
@@ -0,0 +1,597 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
type releaserFunc struct {
|
||||
fn func()
|
||||
value interface{}
|
||||
}
|
||||
|
||||
func (r releaserFunc) Release() {
|
||||
if r.fn != nil {
|
||||
r.fn()
|
||||
}
|
||||
}
|
||||
|
||||
func set(ns Namespace, key uint64, value interface{}, charge int, relf func()) Handle {
|
||||
return ns.Get(key, func() (int, interface{}) {
|
||||
if relf != nil {
|
||||
return charge, releaserFunc{relf, value}
|
||||
} else {
|
||||
return charge, value
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestCache_HitMiss(t *testing.T) {
|
||||
cases := []struct {
|
||||
key uint64
|
||||
value string
|
||||
}{
|
||||
{1, "vvvvvvvvv"},
|
||||
{100, "v1"},
|
||||
{0, "v2"},
|
||||
{12346, "v3"},
|
||||
{777, "v4"},
|
||||
{999, "v5"},
|
||||
{7654, "v6"},
|
||||
{2, "v7"},
|
||||
{3, "v8"},
|
||||
{9, "v9"},
|
||||
}
|
||||
|
||||
setfin := 0
|
||||
c := NewLRUCache(1000)
|
||||
ns := c.GetNamespace(0)
|
||||
for i, x := range cases {
|
||||
set(ns, x.key, x.value, len(x.value), func() {
|
||||
setfin++
|
||||
}).Release()
|
||||
for j, y := range cases {
|
||||
h := ns.Get(y.key, nil)
|
||||
if j <= i {
|
||||
// should hit
|
||||
if h == nil {
|
||||
t.Errorf("case '%d' iteration '%d' is miss", i, j)
|
||||
} else {
|
||||
if x := h.Value().(releaserFunc).value.(string); x != y.value {
|
||||
t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// should miss
|
||||
if h != nil {
|
||||
t.Errorf("case '%d' iteration '%d' is hit , value '%s'", i, j, h.Value().(releaserFunc).value.(string))
|
||||
}
|
||||
}
|
||||
if h != nil {
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for i, x := range cases {
|
||||
finalizerOk := false
|
||||
ns.Delete(x.key, func(exist, pending bool) {
|
||||
finalizerOk = true
|
||||
})
|
||||
|
||||
if !finalizerOk {
|
||||
t.Errorf("case %d delete finalizer not executed", i)
|
||||
}
|
||||
|
||||
for j, y := range cases {
|
||||
h := ns.Get(y.key, nil)
|
||||
if j > i {
|
||||
// should hit
|
||||
if h == nil {
|
||||
t.Errorf("case '%d' iteration '%d' is miss", i, j)
|
||||
} else {
|
||||
if x := h.Value().(releaserFunc).value.(string); x != y.value {
|
||||
t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// should miss
|
||||
if h != nil {
|
||||
t.Errorf("case '%d' iteration '%d' is hit, value '%s'", i, j, h.Value().(releaserFunc).value.(string))
|
||||
}
|
||||
}
|
||||
if h != nil {
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if setfin != len(cases) {
|
||||
t.Errorf("some set finalizer may not be executed, want=%d got=%d", len(cases), setfin)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLRUCache_Eviction(t *testing.T) {
|
||||
c := NewLRUCache(12)
|
||||
ns := c.GetNamespace(0)
|
||||
o1 := set(ns, 1, 1, 1, nil)
|
||||
set(ns, 2, 2, 1, nil).Release()
|
||||
set(ns, 3, 3, 1, nil).Release()
|
||||
set(ns, 4, 4, 1, nil).Release()
|
||||
set(ns, 5, 5, 1, nil).Release()
|
||||
if h := ns.Get(2, nil); h != nil { // 1,3,4,5,2
|
||||
h.Release()
|
||||
}
|
||||
set(ns, 9, 9, 10, nil).Release() // 5,2,9
|
||||
|
||||
for _, key := range []uint64{9, 2, 5, 1} {
|
||||
h := ns.Get(key, nil)
|
||||
if h == nil {
|
||||
t.Errorf("miss for key '%d'", key)
|
||||
} else {
|
||||
if x := h.Value().(int); x != int(key) {
|
||||
t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
|
||||
}
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
o1.Release()
|
||||
for _, key := range []uint64{1, 2, 5} {
|
||||
h := ns.Get(key, nil)
|
||||
if h == nil {
|
||||
t.Errorf("miss for key '%d'", key)
|
||||
} else {
|
||||
if x := h.Value().(int); x != int(key) {
|
||||
t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
|
||||
}
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
for _, key := range []uint64{3, 4, 9} {
|
||||
h := ns.Get(key, nil)
|
||||
if h != nil {
|
||||
t.Errorf("hit for key '%d'", key)
|
||||
if x := h.Value().(int); x != int(key) {
|
||||
t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
|
||||
}
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLRUCache_SetGet(t *testing.T) {
|
||||
c := NewLRUCache(13)
|
||||
ns := c.GetNamespace(0)
|
||||
for i := 0; i < 200; i++ {
|
||||
n := uint64(rand.Intn(99999) % 20)
|
||||
set(ns, n, n, 1, nil).Release()
|
||||
if h := ns.Get(n, nil); h != nil {
|
||||
if h.Value() == nil {
|
||||
t.Errorf("key '%d' contains nil value", n)
|
||||
} else {
|
||||
if x := h.Value().(uint64); x != n {
|
||||
t.Errorf("invalid value for key '%d' want '%d', got '%d'", n, n, x)
|
||||
}
|
||||
}
|
||||
h.Release()
|
||||
} else {
|
||||
t.Errorf("key '%d' doesn't exist", n)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLRUCache_Purge(t *testing.T) {
|
||||
c := NewLRUCache(3)
|
||||
ns1 := c.GetNamespace(0)
|
||||
o1 := set(ns1, 1, 1, 1, nil)
|
||||
o2 := set(ns1, 2, 2, 1, nil)
|
||||
ns1.Purge(nil)
|
||||
set(ns1, 3, 3, 1, nil).Release()
|
||||
for _, key := range []uint64{1, 2, 3} {
|
||||
h := ns1.Get(key, nil)
|
||||
if h == nil {
|
||||
t.Errorf("miss for key '%d'", key)
|
||||
} else {
|
||||
if x := h.Value().(int); x != int(key) {
|
||||
t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
|
||||
}
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
o1.Release()
|
||||
o2.Release()
|
||||
for _, key := range []uint64{1, 2} {
|
||||
h := ns1.Get(key, nil)
|
||||
if h != nil {
|
||||
t.Errorf("hit for key '%d'", key)
|
||||
if x := h.Value().(int); x != int(key) {
|
||||
t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
|
||||
}
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type testingCacheObjectCounter struct {
|
||||
created uint32
|
||||
released uint32
|
||||
}
|
||||
|
||||
func (c *testingCacheObjectCounter) createOne() {
|
||||
atomic.AddUint32(&c.created, 1)
|
||||
}
|
||||
|
||||
func (c *testingCacheObjectCounter) releaseOne() {
|
||||
atomic.AddUint32(&c.released, 1)
|
||||
}
|
||||
|
||||
type testingCacheObject struct {
|
||||
t *testing.T
|
||||
cnt *testingCacheObjectCounter
|
||||
|
||||
ns, key uint64
|
||||
|
||||
releaseCalled uint32
|
||||
}
|
||||
|
||||
func (x *testingCacheObject) Release() {
|
||||
if atomic.CompareAndSwapUint32(&x.releaseCalled, 0, 1) {
|
||||
x.cnt.releaseOne()
|
||||
} else {
|
||||
x.t.Errorf("duplicate setfin NS#%d KEY#%s", x.ns, x.key)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLRUCache_Finalizer(t *testing.T) {
|
||||
const (
|
||||
capacity = 100
|
||||
goroutines = 100
|
||||
iterations = 10000
|
||||
keymax = 8000
|
||||
)
|
||||
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
defer runtime.GOMAXPROCS(1)
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
cnt := &testingCacheObjectCounter{}
|
||||
|
||||
c := NewLRUCache(capacity)
|
||||
|
||||
type instance struct {
|
||||
seed int64
|
||||
rnd *rand.Rand
|
||||
ns uint64
|
||||
effective int32
|
||||
handles []Handle
|
||||
handlesMap map[uint64]int
|
||||
|
||||
delete bool
|
||||
purge bool
|
||||
zap bool
|
||||
wantDel int32
|
||||
delfinCalledAll int32
|
||||
delfinCalledEff int32
|
||||
purgefinCalled int32
|
||||
}
|
||||
|
||||
instanceGet := func(p *instance, ns Namespace, key uint64) {
|
||||
h := ns.Get(key, func() (charge int, value interface{}) {
|
||||
to := &testingCacheObject{
|
||||
t: t, cnt: cnt,
|
||||
ns: p.ns,
|
||||
key: key,
|
||||
}
|
||||
atomic.AddInt32(&p.effective, 1)
|
||||
cnt.createOne()
|
||||
return 1, releaserFunc{func() {
|
||||
to.Release()
|
||||
atomic.AddInt32(&p.effective, -1)
|
||||
}, to}
|
||||
})
|
||||
p.handles = append(p.handles, h)
|
||||
p.handlesMap[key] = p.handlesMap[key] + 1
|
||||
}
|
||||
instanceRelease := func(p *instance, ns Namespace, i int) {
|
||||
h := p.handles[i]
|
||||
key := h.Value().(releaserFunc).value.(*testingCacheObject).key
|
||||
if n := p.handlesMap[key]; n == 0 {
|
||||
t.Fatal("key ref == 0")
|
||||
} else if n > 1 {
|
||||
p.handlesMap[key] = n - 1
|
||||
} else {
|
||||
delete(p.handlesMap, key)
|
||||
}
|
||||
h.Release()
|
||||
p.handles = append(p.handles[:i], p.handles[i+1:]...)
|
||||
p.handles[len(p.handles) : len(p.handles)+1][0] = nil
|
||||
}
|
||||
|
||||
seeds := make([]int64, goroutines)
|
||||
instances := make([]instance, goroutines)
|
||||
for i := range instances {
|
||||
p := &instances[i]
|
||||
p.handlesMap = make(map[uint64]int)
|
||||
if seeds[i] == 0 {
|
||||
seeds[i] = time.Now().UnixNano()
|
||||
}
|
||||
p.seed = seeds[i]
|
||||
p.rnd = rand.New(rand.NewSource(p.seed))
|
||||
p.ns = uint64(i)
|
||||
p.delete = i%6 == 0
|
||||
p.purge = i%8 == 0
|
||||
p.zap = i%12 == 0 || i%3 == 0
|
||||
}
|
||||
|
||||
seedsStr := make([]string, len(seeds))
|
||||
for i, seed := range seeds {
|
||||
seedsStr[i] = fmt.Sprint(seed)
|
||||
}
|
||||
t.Logf("seeds := []int64{%s}", strings.Join(seedsStr, ", "))
|
||||
|
||||
// Get and release.
|
||||
for i := range instances {
|
||||
p := &instances[i]
|
||||
|
||||
wg.Add(1)
|
||||
go func(p *instance) {
|
||||
defer wg.Done()
|
||||
|
||||
ns := c.GetNamespace(p.ns)
|
||||
for i := 0; i < iterations; i++ {
|
||||
if len(p.handles) == 0 || p.rnd.Int()%2 == 0 {
|
||||
instanceGet(p, ns, uint64(p.rnd.Intn(keymax)))
|
||||
} else {
|
||||
instanceRelease(p, ns, p.rnd.Intn(len(p.handles)))
|
||||
}
|
||||
}
|
||||
}(p)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
if used, cap := c.Used(), c.Capacity(); used > cap {
|
||||
t.Errorf("Used > capacity, used=%d cap=%d", used, cap)
|
||||
}
|
||||
|
||||
// Check effective objects.
|
||||
for i := range instances {
|
||||
p := &instances[i]
|
||||
if int(p.effective) < len(p.handlesMap) {
|
||||
t.Errorf("#%d effective objects < acquired handle, eo=%d ah=%d", i, p.effective, len(p.handlesMap))
|
||||
}
|
||||
}
|
||||
|
||||
if want := int(cnt.created - cnt.released); c.Size() != want {
|
||||
t.Errorf("Invalid cache size, want=%d got=%d", want, c.Size())
|
||||
}
|
||||
|
||||
// Delete and purge.
|
||||
for i := range instances {
|
||||
p := &instances[i]
|
||||
p.wantDel = p.effective
|
||||
|
||||
wg.Add(1)
|
||||
go func(p *instance) {
|
||||
defer wg.Done()
|
||||
|
||||
ns := c.GetNamespace(p.ns)
|
||||
|
||||
if p.delete {
|
||||
for key := uint64(0); key < keymax; key++ {
|
||||
_, wantExist := p.handlesMap[key]
|
||||
gotExist := ns.Delete(key, func(exist, pending bool) {
|
||||
atomic.AddInt32(&p.delfinCalledAll, 1)
|
||||
if exist {
|
||||
atomic.AddInt32(&p.delfinCalledEff, 1)
|
||||
}
|
||||
})
|
||||
if !gotExist && wantExist {
|
||||
t.Errorf("delete on NS#%d KEY#%d not found", p.ns, key)
|
||||
}
|
||||
}
|
||||
|
||||
var delfinCalled int
|
||||
for key := uint64(0); key < keymax; key++ {
|
||||
func(key uint64) {
|
||||
gotExist := ns.Delete(key, func(exist, pending bool) {
|
||||
if exist && !pending {
|
||||
t.Errorf("delete fin on NS#%d KEY#%d exist and not pending for deletion", p.ns, key)
|
||||
}
|
||||
delfinCalled++
|
||||
})
|
||||
if gotExist {
|
||||
t.Errorf("delete on NS#%d KEY#%d found", p.ns, key)
|
||||
}
|
||||
}(key)
|
||||
}
|
||||
if delfinCalled != keymax {
|
||||
t.Errorf("(2) #%d not all delete fin called, diff=%d", p.ns, keymax-delfinCalled)
|
||||
}
|
||||
}
|
||||
|
||||
if p.purge {
|
||||
ns.Purge(func(ns, key uint64) {
|
||||
atomic.AddInt32(&p.purgefinCalled, 1)
|
||||
})
|
||||
}
|
||||
}(p)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
if want := int(cnt.created - cnt.released); c.Size() != want {
|
||||
t.Errorf("Invalid cache size, want=%d got=%d", want, c.Size())
|
||||
}
|
||||
|
||||
// Release.
|
||||
for i := range instances {
|
||||
p := &instances[i]
|
||||
|
||||
if !p.zap {
|
||||
wg.Add(1)
|
||||
go func(p *instance) {
|
||||
defer wg.Done()
|
||||
|
||||
ns := c.GetNamespace(p.ns)
|
||||
for i := len(p.handles) - 1; i >= 0; i-- {
|
||||
instanceRelease(p, ns, i)
|
||||
}
|
||||
}(p)
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
if want := int(cnt.created - cnt.released); c.Size() != want {
|
||||
t.Errorf("Invalid cache size, want=%d got=%d", want, c.Size())
|
||||
}
|
||||
|
||||
// Zap.
|
||||
for i := range instances {
|
||||
p := &instances[i]
|
||||
|
||||
if p.zap {
|
||||
wg.Add(1)
|
||||
go func(p *instance) {
|
||||
defer wg.Done()
|
||||
|
||||
ns := c.GetNamespace(p.ns)
|
||||
ns.Zap()
|
||||
|
||||
p.handles = nil
|
||||
p.handlesMap = nil
|
||||
}(p)
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
if want := int(cnt.created - cnt.released); c.Size() != want {
|
||||
t.Errorf("Invalid cache size, want=%d got=%d", want, c.Size())
|
||||
}
|
||||
|
||||
if notrel, used := int(cnt.created-cnt.released), c.Used(); notrel != used {
|
||||
t.Errorf("Invalid used value, want=%d got=%d", notrel, used)
|
||||
}
|
||||
|
||||
c.Purge(nil)
|
||||
|
||||
for i := range instances {
|
||||
p := &instances[i]
|
||||
|
||||
if p.delete {
|
||||
if p.delfinCalledAll != keymax {
|
||||
t.Errorf("#%d not all delete fin called, purge=%v zap=%v diff=%d", p.ns, p.purge, p.zap, keymax-p.delfinCalledAll)
|
||||
}
|
||||
if p.delfinCalledEff != p.wantDel {
|
||||
t.Errorf("#%d not all effective delete fin called, diff=%d", p.ns, p.wantDel-p.delfinCalledEff)
|
||||
}
|
||||
if p.purge && p.purgefinCalled > 0 {
|
||||
t.Errorf("#%d some purge fin called, delete=%v zap=%v n=%d", p.ns, p.delete, p.zap, p.purgefinCalled)
|
||||
}
|
||||
} else {
|
||||
if p.purge {
|
||||
if p.purgefinCalled != p.wantDel {
|
||||
t.Errorf("#%d not all purge fin called, delete=%v zap=%v diff=%d", p.ns, p.delete, p.zap, p.wantDel-p.purgefinCalled)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if cnt.created != cnt.released {
|
||||
t.Errorf("Some cache object weren't released, created=%d released=%d", cnt.created, cnt.released)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLRUCache_Set(b *testing.B) {
|
||||
c := NewLRUCache(0)
|
||||
ns := c.GetNamespace(0)
|
||||
b.ResetTimer()
|
||||
for i := uint64(0); i < uint64(b.N); i++ {
|
||||
set(ns, i, "", 1, nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLRUCache_Get(b *testing.B) {
|
||||
c := NewLRUCache(0)
|
||||
ns := c.GetNamespace(0)
|
||||
b.ResetTimer()
|
||||
for i := uint64(0); i < uint64(b.N); i++ {
|
||||
set(ns, i, "", 1, nil)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := uint64(0); i < uint64(b.N); i++ {
|
||||
ns.Get(i, nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLRUCache_Get2(b *testing.B) {
|
||||
c := NewLRUCache(0)
|
||||
ns := c.GetNamespace(0)
|
||||
b.ResetTimer()
|
||||
for i := uint64(0); i < uint64(b.N); i++ {
|
||||
set(ns, i, "", 1, nil)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := uint64(0); i < uint64(b.N); i++ {
|
||||
ns.Get(i, func() (charge int, value interface{}) {
|
||||
return 0, nil
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLRUCache_Release(b *testing.B) {
|
||||
c := NewLRUCache(0)
|
||||
ns := c.GetNamespace(0)
|
||||
handles := make([]Handle, b.N)
|
||||
for i := uint64(0); i < uint64(b.N); i++ {
|
||||
handles[i] = set(ns, i, "", 1, nil)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for _, h := range handles {
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLRUCache_SetRelease(b *testing.B) {
|
||||
capacity := b.N / 100
|
||||
if capacity <= 0 {
|
||||
capacity = 10
|
||||
}
|
||||
c := NewLRUCache(capacity)
|
||||
ns := c.GetNamespace(0)
|
||||
b.ResetTimer()
|
||||
for i := uint64(0); i < uint64(b.N); i++ {
|
||||
set(ns, i, "", 1, nil).Release()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLRUCache_SetReleaseTwice(b *testing.B) {
|
||||
capacity := b.N / 100
|
||||
if capacity <= 0 {
|
||||
capacity = 10
|
||||
}
|
||||
c := NewLRUCache(capacity)
|
||||
ns := c.GetNamespace(0)
|
||||
b.ResetTimer()
|
||||
|
||||
na := b.N / 2
|
||||
nb := b.N - na
|
||||
|
||||
for i := uint64(0); i < uint64(na); i++ {
|
||||
set(ns, i, "", 1, nil).Release()
|
||||
}
|
||||
|
||||
for i := uint64(0); i < uint64(nb); i++ {
|
||||
set(ns, i, "", 1, nil).Release()
|
||||
}
|
||||
}
|
||||
613
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru_cache.go
generated
vendored
Normal file
613
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru_cache.go
generated
vendored
Normal file
@@ -0,0 +1,613 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
)
|
||||
|
||||
// The LLRB implementation were taken from https://github.com/petar/GoLLRB,
|
||||
// which conatins the following header:
|
||||
//
|
||||
// Copyright 2010 Petar Maymounkov. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// lruCache represent a LRU cache state.
|
||||
type lruCache struct {
|
||||
mu sync.Mutex
|
||||
recent lruNode
|
||||
table map[uint64]*lruNs
|
||||
capacity int
|
||||
used, size, alive int
|
||||
}
|
||||
|
||||
// NewLRUCache creates a new initialized LRU cache with the given capacity.
|
||||
func NewLRUCache(capacity int) Cache {
|
||||
c := &lruCache{
|
||||
table: make(map[uint64]*lruNs),
|
||||
capacity: capacity,
|
||||
}
|
||||
c.recent.rNext = &c.recent
|
||||
c.recent.rPrev = &c.recent
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *lruCache) Capacity() int {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.capacity
|
||||
}
|
||||
|
||||
func (c *lruCache) Used() int {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.used
|
||||
}
|
||||
|
||||
func (c *lruCache) Size() int {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.size
|
||||
}
|
||||
|
||||
func (c *lruCache) NumObjects() int {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.alive
|
||||
}
|
||||
|
||||
// SetCapacity set cache capacity.
|
||||
func (c *lruCache) SetCapacity(capacity int) {
|
||||
c.mu.Lock()
|
||||
c.capacity = capacity
|
||||
c.evict()
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// GetNamespace return namespace object for given id.
|
||||
func (c *lruCache) GetNamespace(id uint64) Namespace {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if ns, ok := c.table[id]; ok {
|
||||
return ns
|
||||
}
|
||||
|
||||
ns := &lruNs{lru: c, id: id}
|
||||
c.table[id] = ns
|
||||
return ns
|
||||
}
|
||||
|
||||
func (c *lruCache) ZapNamespace(id uint64) {
|
||||
c.mu.Lock()
|
||||
if ns, exist := c.table[id]; exist {
|
||||
ns.zapNB()
|
||||
delete(c.table, id)
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *lruCache) PurgeNamespace(id uint64, fin PurgeFin) {
|
||||
c.mu.Lock()
|
||||
if ns, exist := c.table[id]; exist {
|
||||
ns.purgeNB(fin)
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// Purge purge entire cache.
|
||||
func (c *lruCache) Purge(fin PurgeFin) {
|
||||
c.mu.Lock()
|
||||
for _, ns := range c.table {
|
||||
ns.purgeNB(fin)
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *lruCache) Zap() {
|
||||
c.mu.Lock()
|
||||
for _, ns := range c.table {
|
||||
ns.zapNB()
|
||||
}
|
||||
c.table = make(map[uint64]*lruNs)
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *lruCache) evict() {
|
||||
top := &c.recent
|
||||
for n := c.recent.rPrev; c.used > c.capacity && n != top; {
|
||||
n.state = nodeEvicted
|
||||
n.rRemove()
|
||||
n.derefNB()
|
||||
c.used -= n.charge
|
||||
n = c.recent.rPrev
|
||||
}
|
||||
}
|
||||
|
||||
type lruNs struct {
|
||||
lru *lruCache
|
||||
id uint64
|
||||
rbRoot *lruNode
|
||||
state nsState
|
||||
}
|
||||
|
||||
func (ns *lruNs) rbGetOrCreateNode(h *lruNode, key uint64) (hn, n *lruNode) {
|
||||
if h == nil {
|
||||
n = &lruNode{ns: ns, key: key}
|
||||
return n, n
|
||||
}
|
||||
|
||||
if key < h.key {
|
||||
hn, n = ns.rbGetOrCreateNode(h.rbLeft, key)
|
||||
if hn != nil {
|
||||
h.rbLeft = hn
|
||||
} else {
|
||||
return nil, n
|
||||
}
|
||||
} else if key > h.key {
|
||||
hn, n = ns.rbGetOrCreateNode(h.rbRight, key)
|
||||
if hn != nil {
|
||||
h.rbRight = hn
|
||||
} else {
|
||||
return nil, n
|
||||
}
|
||||
} else {
|
||||
return nil, h
|
||||
}
|
||||
|
||||
if rbIsRed(h.rbRight) && !rbIsRed(h.rbLeft) {
|
||||
h = rbRotLeft(h)
|
||||
}
|
||||
if rbIsRed(h.rbLeft) && rbIsRed(h.rbLeft.rbLeft) {
|
||||
h = rbRotRight(h)
|
||||
}
|
||||
if rbIsRed(h.rbLeft) && rbIsRed(h.rbRight) {
|
||||
rbFlip(h)
|
||||
}
|
||||
return h, n
|
||||
}
|
||||
|
||||
func (ns *lruNs) getOrCreateNode(key uint64) *lruNode {
|
||||
hn, n := ns.rbGetOrCreateNode(ns.rbRoot, key)
|
||||
if hn != nil {
|
||||
ns.rbRoot = hn
|
||||
ns.rbRoot.rbBlack = true
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (ns *lruNs) rbGetNode(key uint64) *lruNode {
|
||||
h := ns.rbRoot
|
||||
for h != nil {
|
||||
switch {
|
||||
case key < h.key:
|
||||
h = h.rbLeft
|
||||
case key > h.key:
|
||||
h = h.rbRight
|
||||
default:
|
||||
return h
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ns *lruNs) getNode(key uint64) *lruNode {
|
||||
return ns.rbGetNode(key)
|
||||
}
|
||||
|
||||
func (ns *lruNs) rbDeleteNode(h *lruNode, key uint64) *lruNode {
|
||||
if h == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if key < h.key {
|
||||
if h.rbLeft == nil { // key not present. Nothing to delete
|
||||
return h
|
||||
}
|
||||
if !rbIsRed(h.rbLeft) && !rbIsRed(h.rbLeft.rbLeft) {
|
||||
h = rbMoveLeft(h)
|
||||
}
|
||||
h.rbLeft = ns.rbDeleteNode(h.rbLeft, key)
|
||||
} else {
|
||||
if rbIsRed(h.rbLeft) {
|
||||
h = rbRotRight(h)
|
||||
}
|
||||
// If @key equals @h.key and no right children at @h
|
||||
if h.key == key && h.rbRight == nil {
|
||||
return nil
|
||||
}
|
||||
if h.rbRight != nil && !rbIsRed(h.rbRight) && !rbIsRed(h.rbRight.rbLeft) {
|
||||
h = rbMoveRight(h)
|
||||
}
|
||||
// If @key equals @h.key, and (from above) 'h.Right != nil'
|
||||
if h.key == key {
|
||||
var x *lruNode
|
||||
h.rbRight, x = rbDeleteMin(h.rbRight)
|
||||
if x == nil {
|
||||
panic("logic")
|
||||
}
|
||||
x.rbLeft, h.rbLeft = h.rbLeft, nil
|
||||
x.rbRight, h.rbRight = h.rbRight, nil
|
||||
x.rbBlack = h.rbBlack
|
||||
h = x
|
||||
} else { // Else, @key is bigger than @h.key
|
||||
h.rbRight = ns.rbDeleteNode(h.rbRight, key)
|
||||
}
|
||||
}
|
||||
|
||||
return rbFixup(h)
|
||||
}
|
||||
|
||||
func (ns *lruNs) deleteNode(key uint64) {
|
||||
ns.rbRoot = ns.rbDeleteNode(ns.rbRoot, key)
|
||||
if ns.rbRoot != nil {
|
||||
ns.rbRoot.rbBlack = true
|
||||
}
|
||||
}
|
||||
|
||||
func (ns *lruNs) rbIterateNodes(h *lruNode, pivot uint64, iter func(n *lruNode) bool) bool {
|
||||
if h == nil {
|
||||
return true
|
||||
}
|
||||
if h.key >= pivot {
|
||||
if !ns.rbIterateNodes(h.rbLeft, pivot, iter) {
|
||||
return false
|
||||
}
|
||||
if !iter(h) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return ns.rbIterateNodes(h.rbRight, pivot, iter)
|
||||
}
|
||||
|
||||
func (ns *lruNs) iterateNodes(iter func(n *lruNode) bool) {
|
||||
ns.rbIterateNodes(ns.rbRoot, 0, iter)
|
||||
}
|
||||
|
||||
func (ns *lruNs) Get(key uint64, setf SetFunc) Handle {
|
||||
ns.lru.mu.Lock()
|
||||
defer ns.lru.mu.Unlock()
|
||||
|
||||
if ns.state != nsEffective {
|
||||
return nil
|
||||
}
|
||||
|
||||
var n *lruNode
|
||||
if setf == nil {
|
||||
n = ns.getNode(key)
|
||||
if n == nil {
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
n = ns.getOrCreateNode(key)
|
||||
}
|
||||
switch n.state {
|
||||
case nodeZero:
|
||||
charge, value := setf()
|
||||
if value == nil {
|
||||
ns.deleteNode(key)
|
||||
return nil
|
||||
}
|
||||
if charge < 0 {
|
||||
charge = 0
|
||||
}
|
||||
|
||||
n.value = value
|
||||
n.charge = charge
|
||||
n.state = nodeEvicted
|
||||
|
||||
ns.lru.size += charge
|
||||
ns.lru.alive++
|
||||
|
||||
fallthrough
|
||||
case nodeEvicted:
|
||||
if n.charge == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Insert to recent list.
|
||||
n.state = nodeEffective
|
||||
n.ref++
|
||||
ns.lru.used += n.charge
|
||||
ns.lru.evict()
|
||||
|
||||
fallthrough
|
||||
case nodeEffective:
|
||||
// Bump to front.
|
||||
n.rRemove()
|
||||
n.rInsert(&ns.lru.recent)
|
||||
}
|
||||
n.ref++
|
||||
|
||||
return &lruHandle{node: n}
|
||||
}
|
||||
|
||||
func (ns *lruNs) Delete(key uint64, fin DelFin) bool {
|
||||
ns.lru.mu.Lock()
|
||||
defer ns.lru.mu.Unlock()
|
||||
|
||||
if ns.state != nsEffective {
|
||||
if fin != nil {
|
||||
fin(false, false)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
n := ns.getNode(key)
|
||||
if n == nil {
|
||||
if fin != nil {
|
||||
fin(false, false)
|
||||
}
|
||||
return false
|
||||
|
||||
}
|
||||
|
||||
switch n.state {
|
||||
case nodeEffective:
|
||||
ns.lru.used -= n.charge
|
||||
n.state = nodeDeleted
|
||||
n.delfin = fin
|
||||
n.rRemove()
|
||||
n.derefNB()
|
||||
case nodeEvicted:
|
||||
n.state = nodeDeleted
|
||||
n.delfin = fin
|
||||
case nodeDeleted:
|
||||
if fin != nil {
|
||||
fin(true, true)
|
||||
}
|
||||
return false
|
||||
default:
|
||||
panic("invalid state")
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (ns *lruNs) purgeNB(fin PurgeFin) {
|
||||
if ns.state == nsEffective {
|
||||
var nodes []*lruNode
|
||||
ns.iterateNodes(func(n *lruNode) bool {
|
||||
nodes = append(nodes, n)
|
||||
return true
|
||||
})
|
||||
for _, n := range nodes {
|
||||
switch n.state {
|
||||
case nodeEffective:
|
||||
ns.lru.used -= n.charge
|
||||
n.state = nodeDeleted
|
||||
n.purgefin = fin
|
||||
n.rRemove()
|
||||
n.derefNB()
|
||||
case nodeEvicted:
|
||||
n.state = nodeDeleted
|
||||
n.purgefin = fin
|
||||
case nodeDeleted:
|
||||
default:
|
||||
panic("invalid state")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ns *lruNs) Purge(fin PurgeFin) {
|
||||
ns.lru.mu.Lock()
|
||||
ns.purgeNB(fin)
|
||||
ns.lru.mu.Unlock()
|
||||
}
|
||||
|
||||
func (ns *lruNs) zapNB() {
|
||||
if ns.state == nsEffective {
|
||||
ns.state = nsZapped
|
||||
|
||||
ns.iterateNodes(func(n *lruNode) bool {
|
||||
if n.state == nodeEffective {
|
||||
ns.lru.used -= n.charge
|
||||
n.rRemove()
|
||||
}
|
||||
ns.lru.size -= n.charge
|
||||
n.state = nodeDeleted
|
||||
n.fin()
|
||||
|
||||
return true
|
||||
})
|
||||
ns.rbRoot = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (ns *lruNs) Zap() {
|
||||
ns.lru.mu.Lock()
|
||||
ns.zapNB()
|
||||
delete(ns.lru.table, ns.id)
|
||||
ns.lru.mu.Unlock()
|
||||
}
|
||||
|
||||
type lruNode struct {
|
||||
ns *lruNs
|
||||
|
||||
rNext, rPrev *lruNode
|
||||
rbLeft, rbRight *lruNode
|
||||
rbBlack bool
|
||||
|
||||
key uint64
|
||||
value interface{}
|
||||
charge int
|
||||
ref int
|
||||
state nodeState
|
||||
delfin DelFin
|
||||
purgefin PurgeFin
|
||||
}
|
||||
|
||||
func (n *lruNode) rInsert(at *lruNode) {
|
||||
x := at.rNext
|
||||
at.rNext = n
|
||||
n.rPrev = at
|
||||
n.rNext = x
|
||||
x.rPrev = n
|
||||
}
|
||||
|
||||
func (n *lruNode) rRemove() bool {
|
||||
if n.rPrev == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
n.rPrev.rNext = n.rNext
|
||||
n.rNext.rPrev = n.rPrev
|
||||
n.rPrev = nil
|
||||
n.rNext = nil
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (n *lruNode) fin() {
|
||||
if r, ok := n.value.(util.Releaser); ok {
|
||||
r.Release()
|
||||
}
|
||||
if n.purgefin != nil {
|
||||
n.purgefin(n.ns.id, n.key)
|
||||
n.delfin = nil
|
||||
n.purgefin = nil
|
||||
} else if n.delfin != nil {
|
||||
n.delfin(true, false)
|
||||
n.delfin = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (n *lruNode) derefNB() {
|
||||
n.ref--
|
||||
if n.ref == 0 {
|
||||
if n.ns.state == nsEffective {
|
||||
// Remove elemement.
|
||||
n.ns.deleteNode(n.key)
|
||||
n.ns.lru.size -= n.charge
|
||||
n.ns.lru.alive--
|
||||
n.fin()
|
||||
}
|
||||
n.value = nil
|
||||
} else if n.ref < 0 {
|
||||
panic("leveldb/cache: lruCache: negative node reference")
|
||||
}
|
||||
}
|
||||
|
||||
func (n *lruNode) deref() {
|
||||
n.ns.lru.mu.Lock()
|
||||
n.derefNB()
|
||||
n.ns.lru.mu.Unlock()
|
||||
}
|
||||
|
||||
type lruHandle struct {
|
||||
node *lruNode
|
||||
once uint32
|
||||
}
|
||||
|
||||
func (h *lruHandle) Value() interface{} {
|
||||
if atomic.LoadUint32(&h.once) == 0 {
|
||||
return h.node.value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *lruHandle) Release() {
|
||||
if !atomic.CompareAndSwapUint32(&h.once, 0, 1) {
|
||||
return
|
||||
}
|
||||
h.node.deref()
|
||||
h.node = nil
|
||||
}
|
||||
|
||||
func rbIsRed(h *lruNode) bool {
|
||||
if h == nil {
|
||||
return false
|
||||
}
|
||||
return !h.rbBlack
|
||||
}
|
||||
|
||||
func rbRotLeft(h *lruNode) *lruNode {
|
||||
x := h.rbRight
|
||||
if x.rbBlack {
|
||||
panic("rotating a black link")
|
||||
}
|
||||
h.rbRight = x.rbLeft
|
||||
x.rbLeft = h
|
||||
x.rbBlack = h.rbBlack
|
||||
h.rbBlack = false
|
||||
return x
|
||||
}
|
||||
|
||||
func rbRotRight(h *lruNode) *lruNode {
|
||||
x := h.rbLeft
|
||||
if x.rbBlack {
|
||||
panic("rotating a black link")
|
||||
}
|
||||
h.rbLeft = x.rbRight
|
||||
x.rbRight = h
|
||||
x.rbBlack = h.rbBlack
|
||||
h.rbBlack = false
|
||||
return x
|
||||
}
|
||||
|
||||
func rbFlip(h *lruNode) {
|
||||
h.rbBlack = !h.rbBlack
|
||||
h.rbLeft.rbBlack = !h.rbLeft.rbBlack
|
||||
h.rbRight.rbBlack = !h.rbRight.rbBlack
|
||||
}
|
||||
|
||||
func rbMoveLeft(h *lruNode) *lruNode {
|
||||
rbFlip(h)
|
||||
if rbIsRed(h.rbRight.rbLeft) {
|
||||
h.rbRight = rbRotRight(h.rbRight)
|
||||
h = rbRotLeft(h)
|
||||
rbFlip(h)
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
func rbMoveRight(h *lruNode) *lruNode {
|
||||
rbFlip(h)
|
||||
if rbIsRed(h.rbLeft.rbLeft) {
|
||||
h = rbRotRight(h)
|
||||
rbFlip(h)
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
func rbFixup(h *lruNode) *lruNode {
|
||||
if rbIsRed(h.rbRight) {
|
||||
h = rbRotLeft(h)
|
||||
}
|
||||
|
||||
if rbIsRed(h.rbLeft) && rbIsRed(h.rbLeft.rbLeft) {
|
||||
h = rbRotRight(h)
|
||||
}
|
||||
|
||||
if rbIsRed(h.rbLeft) && rbIsRed(h.rbRight) {
|
||||
rbFlip(h)
|
||||
}
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
func rbDeleteMin(h *lruNode) (hn, n *lruNode) {
|
||||
if h == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if h.rbLeft == nil {
|
||||
return nil, h
|
||||
}
|
||||
|
||||
if !rbIsRed(h.rbLeft) && !rbIsRed(h.rbLeft.rbLeft) {
|
||||
h = rbMoveLeft(h)
|
||||
}
|
||||
|
||||
h.rbLeft, n = rbDeleteMin(h.rbLeft)
|
||||
|
||||
return rbFixup(h), n
|
||||
}
|
||||
75
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer.go
generated
vendored
Normal file
75
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer.go
generated
vendored
Normal file
@@ -0,0 +1,75 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package leveldb
|
||||
|
||||
import "github.com/syndtr/goleveldb/leveldb/comparer"
|
||||
|
||||
type iComparer struct {
|
||||
ucmp comparer.Comparer
|
||||
}
|
||||
|
||||
func (icmp *iComparer) uName() string {
|
||||
return icmp.ucmp.Name()
|
||||
}
|
||||
|
||||
func (icmp *iComparer) uCompare(a, b []byte) int {
|
||||
return icmp.ucmp.Compare(a, b)
|
||||
}
|
||||
|
||||
func (icmp *iComparer) uSeparator(dst, a, b []byte) []byte {
|
||||
return icmp.ucmp.Separator(dst, a, b)
|
||||
}
|
||||
|
||||
func (icmp *iComparer) uSuccessor(dst, b []byte) []byte {
|
||||
return icmp.ucmp.Successor(dst, b)
|
||||
}
|
||||
|
||||
func (icmp *iComparer) Name() string {
|
||||
return icmp.uName()
|
||||
}
|
||||
|
||||
func (icmp *iComparer) Compare(a, b []byte) int {
|
||||
x := icmp.ucmp.Compare(iKey(a).ukey(), iKey(b).ukey())
|
||||
if x == 0 {
|
||||
if m, n := iKey(a).num(), iKey(b).num(); m > n {
|
||||
x = -1
|
||||
} else if m < n {
|
||||
x = 1
|
||||
}
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
func (icmp *iComparer) Separator(dst, a, b []byte) []byte {
|
||||
ua, ub := iKey(a).ukey(), iKey(b).ukey()
|
||||
dst = icmp.ucmp.Separator(dst, ua, ub)
|
||||
if dst == nil {
|
||||
return nil
|
||||
}
|
||||
if len(dst) < len(ua) && icmp.uCompare(ua, dst) < 0 {
|
||||
dst = append(dst, kMaxNumBytes...)
|
||||
} else {
|
||||
// Did not close possibilities that n maybe longer than len(ub).
|
||||
dst = append(dst, a[len(a)-8:]...)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
func (icmp *iComparer) Successor(dst, b []byte) []byte {
|
||||
ub := iKey(b).ukey()
|
||||
dst = icmp.ucmp.Successor(dst, ub)
|
||||
if dst == nil {
|
||||
return nil
|
||||
}
|
||||
if len(dst) < len(ub) && icmp.uCompare(ub, dst) < 0 {
|
||||
dst = append(dst, kMaxNumBytes...)
|
||||
} else {
|
||||
// Did not close possibilities that n maybe longer than len(ub).
|
||||
dst = append(dst, b[len(b)-8:]...)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
51
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go
generated
vendored
Normal file
51
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go
generated
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package comparer
|
||||
|
||||
import "bytes"
|
||||
|
||||
type bytesComparer struct{}
|
||||
|
||||
func (bytesComparer) Compare(a, b []byte) int {
|
||||
return bytes.Compare(a, b)
|
||||
}
|
||||
|
||||
func (bytesComparer) Name() string {
|
||||
return "leveldb.BytewiseComparator"
|
||||
}
|
||||
|
||||
func (bytesComparer) Separator(dst, a, b []byte) []byte {
|
||||
i, n := 0, len(a)
|
||||
if n > len(b) {
|
||||
n = len(b)
|
||||
}
|
||||
for ; i < n && a[i] == b[i]; i++ {
|
||||
}
|
||||
if i >= n {
|
||||
// Do not shorten if one string is a prefix of the other
|
||||
} else if c := a[i]; c < 0xff && c+1 < b[i] {
|
||||
dst = append(dst, a[:i+1]...)
|
||||
dst[i]++
|
||||
return dst
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bytesComparer) Successor(dst, b []byte) []byte {
|
||||
for i, c := range b {
|
||||
if c != 0xff {
|
||||
dst = append(dst, b[:i+1]...)
|
||||
dst[i]++
|
||||
return dst
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DefaultComparer are default implementation of the Comparer interface.
|
||||
// It uses the natural ordering, consistent with bytes.Compare.
|
||||
var DefaultComparer = bytesComparer{}
|
||||
57
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go
generated
vendored
Normal file
57
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go
generated
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Package comparer provides interface and implementation for ordering
|
||||
// sets of data.
|
||||
package comparer
|
||||
|
||||
// BasicComparer is the interface that wraps the basic Compare method.
|
||||
type BasicComparer interface {
|
||||
// Compare returns -1, 0, or +1 depending on whether a is 'less than',
|
||||
// 'equal to' or 'greater than' b. The two arguments can only be 'equal'
|
||||
// if their contents are exactly equal. Furthermore, the empty slice
|
||||
// must be 'less than' any non-empty slice.
|
||||
Compare(a, b []byte) int
|
||||
}
|
||||
|
||||
// Comparer defines a total ordering over the space of []byte keys: a 'less
|
||||
// than' relationship.
|
||||
type Comparer interface {
|
||||
BasicComparer
|
||||
|
||||
// Name returns name of the comparer.
|
||||
//
|
||||
// The Level-DB on-disk format stores the comparer name, and opening a
|
||||
// database with a different comparer from the one it was created with
|
||||
// will result in an error.
|
||||
//
|
||||
// An implementation to a new name whenever the comparer implementation
|
||||
// changes in a way that will cause the relative ordering of any two keys
|
||||
// to change.
|
||||
//
|
||||
// Names starting with "leveldb." are reserved and should not be used
|
||||
// by any users of this package.
|
||||
Name() string
|
||||
|
||||
// Bellow are advanced functions used used to reduce the space requirements
|
||||
// for internal data structures such as index blocks.
|
||||
|
||||
// Separator appends a sequence of bytes x to dst such that a <= x && x < b,
|
||||
// where 'less than' is consistent with Compare. An implementation should
|
||||
// return nil if x equal to a.
|
||||
//
|
||||
// Either contents of a or b should not by any means modified. Doing so
|
||||
// may cause corruption on the internal state.
|
||||
Separator(dst, a, b []byte) []byte
|
||||
|
||||
// Successor appends a sequence of bytes x to dst such that x >= b, where
|
||||
// 'less than' is consistent with Compare. An implementation should return
|
||||
// nil if x equal to b.
|
||||
//
|
||||
// Contents of b should not by any means modified. Doing so may cause
|
||||
// corruption on the internal state.
|
||||
Successor(dst, b []byte) []byte
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user