mirror of
https://github.com/syncthing/syncthing.git
synced 2026-01-03 19:39:20 -05:00
Compare commits
9 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5ce8261d89 | ||
|
|
25c080b46f | ||
|
|
ee765990b7 | ||
|
|
77fcb52b6a | ||
|
|
aaec3330a9 | ||
|
|
3f55a885a1 | ||
|
|
80a1c931a9 | ||
|
|
a147f4bb77 | ||
|
|
e4316aec6e |
8
.gitattributes
vendored
8
.gitattributes
vendored
@@ -1,8 +0,0 @@
|
||||
# Text files use LF line endings in this repository
|
||||
* text=auto
|
||||
|
||||
# Except the dependencies, which we leave alone
|
||||
vendor/** -text=auto
|
||||
|
||||
# Diffs on these files are meaningless
|
||||
*.svg -diff
|
||||
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
@@ -1,2 +0,0 @@
|
||||
/AUTHORS @calmh
|
||||
/*.md @calmh
|
||||
42
.github/ISSUE_TEMPLATE.md
vendored
42
.github/ISSUE_TEMPLATE.md
vendored
@@ -1,42 +0,0 @@
|
||||
### DO NOT REPORT SECURITY ISSUES IN THIS ISSUE TRACKER
|
||||
|
||||
Instead, contact security@syncthing.net directly - see
|
||||
https://syncthing.net/security.html for more information.
|
||||
|
||||
### DO NOT POST SUPPORT REQUESTS OR GENERAL QUESTIONS IN THIS ISSUE TRACKER
|
||||
|
||||
Please use the forum at https://forum.syncthing.net/ where a large number of
|
||||
helpful people hang out. This issue tracker is for reporting bugs or feature
|
||||
requests directly to the developers. Worst case you might get a short
|
||||
"that's a bug, please report it on GitHub" response on the forum, in which
|
||||
case we thank you for your patience and following our advice. :)
|
||||
|
||||
### Please use the correct issue tracker
|
||||
|
||||
If your problem relates to a Syncthing wrapper or [sub-project](https://github.com/syncthing) such as [Syncthing for Android](https://github.com/syncthing/syncthing-android/issues), [SyncTrayzor](https://github.com/canton7/synctrayzor) or the [documentation](https://github.com/syncthing/docs/issues), please use their respective issue trackers.
|
||||
|
||||
### Does your log mention database corruption?
|
||||
|
||||
If your Syncthing log reports panics because of database corruption it is most likely a fault with your system's storage or memory. Affected log entries will contain lines starting with `panic: leveldb`. You will need to delete the index database to clear this, by running `syncthing -reset-database`.
|
||||
|
||||
### Please do post actual bug reports and feature requests.
|
||||
|
||||
If your issue is a bug report, replace this boilerplate with a description
|
||||
of the problem, being sure to include at least:
|
||||
|
||||
- what happened,
|
||||
- what you expected to happen instead, and
|
||||
- any steps to reproduce the problem.
|
||||
|
||||
Also fill out the version information below and add log output or
|
||||
screenshots as appropriate.
|
||||
|
||||
If your issue is a feature request, simply replace this template text in
|
||||
its entirety.
|
||||
|
||||
### Version Information
|
||||
|
||||
Syncthing Version: v0.x.y
|
||||
OS Version: Windows 7 / Ubuntu 14.04 / ...
|
||||
Browser Version: (if applicable, for GUI issues)
|
||||
|
||||
27
.github/PULL_REQUEST_TEMPLATE.md
vendored
27
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,27 +0,0 @@
|
||||
### Purpose
|
||||
|
||||
Describe the purpose of this change. If there is an existing issue that is
|
||||
resolved by this pull request, ensure that the commit subject is on the form
|
||||
`Some short description (fixes #1234)` where 1234 is the issue number.
|
||||
|
||||
### Testing
|
||||
|
||||
Describe what testing has been done, and how the reviewer can test the change
|
||||
if new tests are not included.
|
||||
|
||||
### Screenshots
|
||||
|
||||
If this is a GUI change, include screenshots of the change. If not, please
|
||||
feel free to just delete this section.
|
||||
|
||||
### Documentation
|
||||
|
||||
If this is a user visible change (including API and protocol changes), add a link here
|
||||
to the corresponding pull request on https://github.com/syncthing/docs or describe
|
||||
the documentation changes necessary.
|
||||
|
||||
## Authorship
|
||||
|
||||
Your name and email will be added automatically to the AUTHORS file
|
||||
based on the commit metadata.
|
||||
|
||||
17
.gitignore
vendored
17
.gitignore
vendored
@@ -1,25 +1,12 @@
|
||||
/syncthing
|
||||
/stdiscosrv
|
||||
syncthing
|
||||
syncthing.exe
|
||||
stdiscosrv.exe
|
||||
*.tar.gz
|
||||
*.zip
|
||||
*.asc
|
||||
*.deb
|
||||
*.sublime*
|
||||
.jshintrc
|
||||
coverage.out
|
||||
files/pidx
|
||||
bin
|
||||
perfstats*.csv
|
||||
coverage.xml
|
||||
syncthing.sig
|
||||
RELEASE
|
||||
deb
|
||||
lib/auto/gui.files.go
|
||||
snapcraft.yaml
|
||||
prime/
|
||||
snap/
|
||||
parts/
|
||||
stage/
|
||||
*.snap
|
||||
*.bz2
|
||||
|
||||
190
AUTHORS
190
AUTHORS
@@ -1,190 +0,0 @@
|
||||
# This is the official list of Syncthing authors for copyright purposes.
|
||||
#
|
||||
# THIS FILE IS MOSTLY AUTO GENERATED. IF YOU'VE MADE A COMMIT TO THE
|
||||
# REPOSITORY YOU WILL BE ADDED HERE AUTOMATICALLY WITHOUT THE NEED FOR
|
||||
# ANY MANUAL ACTION.
|
||||
#
|
||||
# That said, you are welcome to correct your name or add a nickname / GitHub
|
||||
# user name as appropriate. The format is:
|
||||
#
|
||||
# Name Name Name (nickname) <email1@example.com> <email2@example.com>
|
||||
#
|
||||
# The in-GUI authors list is periodically automatically updated from the
|
||||
# contents of this file.
|
||||
#
|
||||
|
||||
Aaron Bieber (qbit) <qbit@deftly.net>
|
||||
Adam Piggott (ProactiveServices) <aD@simplypeachy.co.uk> <simplypeachy@users.noreply.github.com> <ProactiveServices@users.noreply.github.com> <adam@proactiveservices.co.uk>
|
||||
Adel Qalieh (adelq) <aqalieh95@gmail.com> <adelq@users.noreply.github.com>
|
||||
Alessandro G. (alessandro.g89) <alessandro.g89@gmail.com>
|
||||
Alexander Graf (alex2108) <register-github@alex-graf.de>
|
||||
Alexandre Viau (aviau) <alexandre@alexandreviau.net> <aviau@debian.org>
|
||||
Anderson Mesquita (andersonvom) <andersonvom@gmail.com>
|
||||
andresvia <andres.via@gmail.com>
|
||||
Andrew Dunham (andrew-d) <andrew@du.nham.ca>
|
||||
Andrew Rabert (nvllsvm) <ar@nullsum.net> <6550543+nvllsvm@users.noreply.github.com>
|
||||
Andrey D (scienmind) <scintertech@cryptolab.net> <scienmind@users.noreply.github.com>
|
||||
andyleap <andyleap@gmail.com>
|
||||
Antoine Lamielle (0x010C) <antoine.lamielle@0x010c.fr> <gh@0x010c.fr>
|
||||
Antony Male (canton7) <antony.male@gmail.com>
|
||||
Aranjedeath <Aranjedeath@users.noreply.github.com>
|
||||
Arthur Axel fREW Schmidt (frioux) <frew@afoolishmanifesto.com> <frioux@gmail.com>
|
||||
Audrius Butkevicius (AudriusButkevicius) <audrius.butkevicius@gmail.com>
|
||||
BAHADIR YILMAZ <bahadiryilmaz32@gmail.com>
|
||||
Bart De Vries (mogwa1) <devriesb@gmail.com>
|
||||
Ben Curthoys (bencurthoys) <ben@bencurthoys.com>
|
||||
Ben Schulz (uok) <ueomkail@gmail.com> <uok@users.noreply.github.com>
|
||||
Ben Shepherd (benshep) <bjashepherd@gmail.com>
|
||||
Ben Sidhom (bsidhom) <bsidhom@gmail.com>
|
||||
Benedikt Heine (bebehei) <bebe@bebehei.de>
|
||||
Benedikt Morbach <benedikt.morbach@googlemail.com>
|
||||
Benno Fünfstück <benno.fuenfstueck@gmail.com>
|
||||
Benny Ng (tpng) <benny.tpng@gmail.com>
|
||||
Boris Rybalkin <ribalkin@gmail.com>
|
||||
Brandon Philips (philips) <brandon@ifup.org>
|
||||
Brendan Long (brendanlong) <self@brendanlong.com>
|
||||
Brian R. Becker (brbecker) <brbecker@gmail.com>
|
||||
Caleb Callaway (cqcallaw) <enlightened.despot@gmail.com>
|
||||
Carsten Hagemann (Moter8) <moter8@gmail.com>
|
||||
Cathryne Linenweaver (Cathryne) <cathryne.linenweaver@gmail.com> <Cathryne@users.noreply.github.com> <katrinleinweber@MAC.local>
|
||||
Cedric Staniewski (xduugu) <cedric@gmx.ca>
|
||||
Chris Howie (cdhowie) <me@chrishowie.com>
|
||||
Chris Joel (cdata) <chris@scriptolo.gy>
|
||||
Chris Tonkinson <chris@masterbran.ch>
|
||||
chucic <chucic@seznam.cz>
|
||||
Colin Kennedy (moshen) <moshen.colin@gmail.com>
|
||||
Dale Visser <dale.visser@live.com>
|
||||
Daniel Bergmann (brgmnn) <dan.arne.bergmann@gmail.com> <brgmnn@users.noreply.github.com>
|
||||
Daniel Harte (norgeous) <daniel@harte.me> <daniel@danielharte.co.uk> <norgeous@users.noreply.github.com>
|
||||
Daniel Martí (mvdan) <mvdan@mvdan.cc>
|
||||
Darshil Chanpura (dtchanpura) <dtchanpura@gmail.com> <dcprime314@gmail.com>
|
||||
David Rimmer (dinosore) <dinosore@dbrsoftware.co.uk>
|
||||
Denis A. (dva) <denisva@gmail.com>
|
||||
Dennis Wilson (snnd) <dw@risu.io>
|
||||
derekriemer <derek.riemer@colorado.edu>
|
||||
Dmitry Saveliev (dsaveliev) <d.e.saveliev@gmail.com>
|
||||
Dominik Heidler (asdil12) <dominik@heidler.eu>
|
||||
Elias Jarlebring (jarlebring) <jarlebring@gmail.com>
|
||||
Elliot Huffman <thelich2@gmail.com>
|
||||
Emil Hessman (ceh) <emil@hessman.se>
|
||||
Erik Meitner (WSGCSysadmin) <e.meitner@willystreet.coop>
|
||||
Federico Castagnini (facastagnini) <federico.castagnini@gmail.com>
|
||||
Felix Ableitner (Nutomic) <me@nutomic.com>
|
||||
Felix Unterpaintner (bigbear2nd) <bigbear2nd@gmail.com>
|
||||
Francois-Xavier Gsell (zukoo) <fxgsell@gmail.com>
|
||||
Frank Isemann (fti7) <frank@isemann.name>
|
||||
Gilli Sigurdsson (gillisig) <gilli@vx.is>
|
||||
Graham Miln (grahammiln) <graham.miln@dssw.co.uk> <graham.miln@miln.eu>
|
||||
Han Boetes <han@boetes.org>
|
||||
Harrison Jones (harrisonhjones) <harrisonhjones@users.noreply.github.com>
|
||||
Heiko Zuerker (Smiley73) <heiko@zuerker.org>
|
||||
Iain Barnett <iainspeed@gmail.com>
|
||||
Ian Johnson (anonymouse64) <ian.johnson@canonical.com> <person.uwsome@gmail.com>
|
||||
Jaakko Hannikainen (jgke) <jgke@jgke.fi>
|
||||
Jacek Szafarkiewicz (hadogenes) <szafar@linux.pl>
|
||||
Jake Peterson (acogdev) <jake@acogdev.com>
|
||||
Jakob Borg (calmh) <jakob@nym.se> <jakob@kastelo.net>
|
||||
James Patterson (jpjp) <jamespatterson@operamail.com> <jpjp@users.noreply.github.com>
|
||||
janost <janost@tuta.io>
|
||||
Jaroslav Malec (dzarda) <dzardacz@gmail.com>
|
||||
jaseg <githubaccount@jaseg.net>
|
||||
Jaya Chithra (jayachithra) <s.k.jayachithra@gmail.com>
|
||||
Jens Diemer (jedie) <github.com@jensdiemer.de> <git@jensdiemer.de>
|
||||
Jerry Jacobs (xor-gate) <jerry.jacobs@xor-gate.org> <xor-gate@users.noreply.github.com>
|
||||
Jochen Voss (seehuhn) <voss@seehuhn.de>
|
||||
Johan Andersson <j@i19.se>
|
||||
Johan Vromans (sciurius) <jvromans@squirrel.nl>
|
||||
John Rinehart (fuzzybear3965) <johnrichardrinehart@gmail.com>
|
||||
Jonathan Cross <jcross@gmail.com>
|
||||
Jose Manuel Delicado (jmdaweb) <jmdaweb@hotmail.com> <jmdaweb@users.noreply.github.com>
|
||||
Karol Różycki (krozycki) <rozycki.karol@gmail.com>
|
||||
Keith Turner <kturner@apache.org>
|
||||
Kelong Cong (kc1212) <kc04bc@gmx.com> <kc1212@users.noreply.github.com>
|
||||
Ken'ichi Kamada (kamadak) <kamada@nanohz.org>
|
||||
Kevin Allen (ironmig) <kma1660@gmail.com>
|
||||
Kevin White, Jr. (kwhite17) <kevinwhite1710@gmail.com>
|
||||
klemens <ka7@github.com>
|
||||
Kurt Fitzner (Kudalufi) <kurt@va1der.ca> <kurt.fitzner@gmail.com>
|
||||
Lars K.W. Gohlke (lkwg82) <lkwg82@gmx.de>
|
||||
Laurent Arnoud <laurent@spkdev.net>
|
||||
Laurent Etiemble (letiemble) <laurent.etiemble@gmail.com> <laurent.etiemble@monobjc.net>
|
||||
Leo Arias (elopio) <yo@elopio.net>
|
||||
Liu Siyuan (liusy182) <liusy182@gmail.com> <liusy182@hotmail.com>
|
||||
Lode Hoste (Zillode) <zillode@zillode.be>
|
||||
Lord Landon Agahnim (LordLandon) <lordlandon@gmail.com>
|
||||
Majed Abdulaziz (majedev) <majed.alhajry@gmail.com>
|
||||
Marc Laporte (marclaporte) <marc@marclaporte.com> <marc@laporte.name>
|
||||
Marc Pujol (kilburn) <kilburn@la3.org>
|
||||
Marcin Dziadus (marcindziadus) <dziadus.marcin@gmail.com>
|
||||
marco-m <marco.molteni@laposte.net>
|
||||
Mark Pulford (mpx) <mark@kyne.com.au>
|
||||
Mateusz Naściszewski (mateon1) <matin1111@wp.pl>
|
||||
Matic Potočnik <hairyfotr@gmail.com>
|
||||
Matt Burke (burkemw3) <mburke@amplify.com> <burkemw3@gmail.com>
|
||||
Matteo Ruina <matteo.ruina@gmail.com>
|
||||
Max Schulze (kralo) <max.schulze@online.de> <kralo@users.noreply.github.com>
|
||||
MaximAL <almaximal@ya.ru>
|
||||
Maxime Thirouin <m@moox.io>
|
||||
Michael Jephcote (Rewt0r) <rewt0r@gmx.com> <Rewt0r@users.noreply.github.com>
|
||||
Michael Ploujnikov (plouj) <ploujj@gmail.com>
|
||||
Michael Tilli (pyfisch) <pyfisch@gmail.com>
|
||||
Mike Boone <mike@boonedocks.net>
|
||||
MikeLund <MikeLund@users.noreply.github.com>
|
||||
Nate Morrison (nrm21) <natemorrison@gmail.com>
|
||||
Nicholas Rishel (PrototypeNM1) <rishel.nick@gmail.com> <PrototypeNM1@users.noreply.github.com>
|
||||
Nico Stapelbroek <3368018+nstapelbroek@users.noreply.github.com>
|
||||
Nicolas Braud-Santoni <nicolas@braud-santoni.eu>
|
||||
Niels Peter Roest (Niller303) <nielsproest@hotmail.com> <seje.niels@hotmail.com>
|
||||
Nils Jakobi (thunderstorm99) <jakobi.nils@gmail.com>
|
||||
NoLooseEnds <jon.koslung@gmail.com>
|
||||
Oyebanji Jacob Mayowa <oyebanji05@gmail.com>
|
||||
Pascal Jungblut (pascalj) <github@pascalj.com> <mail@pascal-jungblut.com>
|
||||
Pawel Palenica (qepasa) <pawelpalenica11@gmail.com>
|
||||
Paweł Rozlach <vespian@users.noreply.github.com>
|
||||
perewa <cavalcante.ten@gmail.com>
|
||||
Peter Badida <KeyWeeUsr@users.noreply.github.com>
|
||||
Peter Dave Hello <hsu@peterdavehello.org>
|
||||
Peter Hoeg (peterhoeg) <peter@speartail.com>
|
||||
Peter Marquardt (wwwutz) <wwwutz@gmail.com> <wwwutz@googlemail.com>
|
||||
Phil Davis <phil.davis@inf.org>
|
||||
Philippe Schommers (filoozoom) <philippe@schommers.be>
|
||||
Phill Luby (pluby) <phill.luby@newredo.com>
|
||||
Pier Paolo Ramon <ramonpierre@gmail.com>
|
||||
Piotr Bejda (piobpl) <piotrb10@gmail.com>
|
||||
Pramodh KP (pramodhkp) <pramodh.p@directi.com> <1507241+pramodhkp@users.noreply.github.com>
|
||||
Richard Hartmann <RichiH@users.noreply.github.com>
|
||||
Robert Carosi (nov1n) <robert@carosi.nl>
|
||||
Roman Zaynetdinov (zaynetro) <romanznet@gmail.com>
|
||||
Ross Smith II (rasa) <ross@smithii.com>
|
||||
rubenbe <github-com-00ff86@vandamme.email>
|
||||
Ryan Sullivan (KayoticSully) <kayoticsully@gmail.com>
|
||||
Sacheendra Talluri (sacheendra) <sacheendra.t@gmail.com>
|
||||
Scott Klupfel (kluppy) <kluppy@going2blue.com>
|
||||
Sergey Mishin (ralder) <ralder@yandex.ru>
|
||||
Simon Frei (imsodin) <freisim93@gmail.com>
|
||||
Sly_tom_cat <slytomcat@mail.ru>
|
||||
Stefan Kuntz (Stefan-Code) <stefan.github@gmail.com> <Stefan.github@gmail.com>
|
||||
Stefan Tatschner (rumpelsepp) <stefan@sevenbyte.org> <rumpelsepp@sevenbyte.org>
|
||||
Suhas Gundimeda (snugghash) <suhas.gundimeda@gmail.com> <snugghash@gmail.com>
|
||||
Taylor Khan (nelsonkhan) <nelsonkhan@gmail.com>
|
||||
Thomas Hipp <thomashipp@gmail.com>
|
||||
Tim Abell (timabell) <tim@timwise.co.uk>
|
||||
Tim Howes (timhowes) <timhowes@berkeley.edu>
|
||||
Tobias Nygren (tnn2) <tnn@nygren.pp.se>
|
||||
Tobias Tom (tobiastom) <t.tom@succont.de>
|
||||
Tomas Cerveny (kozec) <kozec@kozec.com>
|
||||
Tommy Thorn <tommy-github-email@thorn.ws>
|
||||
Tully Robinson (tojrobinson) <tully@tojr.org>
|
||||
Tyler Brazier (tylerbrazier) <tyler@tylerbrazier.com>
|
||||
Unrud (Unrud) <unrud@openaliasbox.org> <Unrud@users.noreply.github.com>
|
||||
Veeti Paananen (veeti) <veeti.paananen@rojekti.fi>
|
||||
Victor Buinsky (buinsky) <vix_booja@tut.by>
|
||||
Vil Brekin (Vilbrekin) <vilbrekin@gmail.com>
|
||||
Vladimir Rusinov <vrusinov@google.com>
|
||||
wangguoliang <liangcszzu@163.com>
|
||||
William A. Kennington III (wkennington) <william@wkennington.com>
|
||||
Wulf Weich (wweich) <wweich@users.noreply.github.com> <wweich@gmx.de> <wulf@weich-kr.de>
|
||||
Xavier O. (damajor) <damajor@gmail.com>
|
||||
xjtdy888 (xjtdy888) <xjtdy888@163.com>
|
||||
Yannic A. (eipiminus1) <eipiminusone+github@gmail.com> <eipiminus1@users.noreply.github.com>
|
||||
佛跳墙 <daoquan@qq.com>
|
||||
73
CONDUCT.md
73
CONDUCT.md
@@ -1,73 +0,0 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as
|
||||
contributors and maintainers pledge to making participation in our project and
|
||||
our community a harassment-free experience for everyone, regardless of age, body
|
||||
size, disability, ethnicity, gender identity and expression, level of experience,
|
||||
education, socio-economic status, nationality, personal appearance, race,
|
||||
religion, or sexual identity and orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment
|
||||
include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or
|
||||
advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic
|
||||
address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable
|
||||
behavior and are expected to take appropriate and fair corrective action in
|
||||
response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||
permanently any contributor for other behaviors that they deem inappropriate,
|
||||
threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community. Examples of
|
||||
representing a project or community include using an official project e-mail
|
||||
address, posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event. Representation of a project may be
|
||||
further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting the project team at security@syncthing.net. All
|
||||
complaints will be reviewed and investigated and will result in a response that
|
||||
is deemed necessary and appropriate to the circumstances. The project team is
|
||||
obligated to maintain confidentiality with regard to the reporter of an incident.
|
||||
Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||
faith may face temporary or permanent repercussions as determined by other
|
||||
members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
||||
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
@@ -12,7 +12,7 @@ least the following:
|
||||
- What operating system, operating system version and version of
|
||||
Syncthing you are running
|
||||
|
||||
- The same for other connected devices, where relevant
|
||||
- The same for other connected nodes, where relevant
|
||||
|
||||
- Screenshot if the issue concerns something visible in the GUI
|
||||
|
||||
@@ -31,33 +31,74 @@ latest info on Transifex.
|
||||
|
||||
## Contributing Code
|
||||
|
||||
Every contribution is welcome. If you want to contribute but are unsure
|
||||
where to start, any open issues are fair game! See the [Contribution
|
||||
Guidelines](https://docs.syncthing.net/dev/contributing.html) for the full
|
||||
story on committing code.
|
||||
Please do contribute! If you want to contribute but are unsure where to
|
||||
start, the [Contributions Needed
|
||||
topic](http://discourse.syncthing.net/t/49) lists areas in need of
|
||||
attention. In general, any open issues are fair game! Be prepared for a
|
||||
[certain amount of
|
||||
review](https://discourse.syncthing.net/t/733); it's all in the name of
|
||||
quality. :)
|
||||
|
||||
## Contributing Documentation
|
||||
You may be asked to "rebase" or "squash" your pull request before it's
|
||||
accepted. This means to make sure that the pull request:
|
||||
|
||||
Updates to the [documentation site](https://docs.syncthing.net/) can be
|
||||
made as pull requests on the [documentation
|
||||
repository](https://github.com/syncthing/docs).
|
||||
- Is based on the latest commit on `master`.
|
||||
|
||||
- Only contains a single commit (exceptions to be made where
|
||||
appropriate).
|
||||
|
||||
- Has a descriptive commit message, ending in `(fixes #123)` if it
|
||||
resolves an existing issue.
|
||||
|
||||
## Licensing
|
||||
|
||||
All contributions are made available under the same license as the already
|
||||
existing material being contributed to. For most of the project and unless
|
||||
otherwise stated this means MPLv2, but there are exceptions:
|
||||
All contributions are made under the same MIT License as the rest of the
|
||||
project, except documentation which is licensed under the Creative
|
||||
Commons Attribution 4.0 International License. You retain the copyright
|
||||
to code you have written.
|
||||
|
||||
- Certain commands (under cmd/...) may have a separate license, indicated by
|
||||
the presence of a LICENSE file in the corresponding directory.
|
||||
When accepting your first contribution, the maintainer of the project
|
||||
will ensure that you are added to the CONTRIBUTORS file. You are welcome
|
||||
to add yourself as a separate commit in your first pull request.
|
||||
|
||||
- The documentation (man/...) is licensed under the Creative Commons
|
||||
Attribution 4.0 International License.
|
||||
## Building
|
||||
|
||||
- Projects under vendor/... are copyright by and licensed from their
|
||||
respective original authors. Contributions should be made to the original
|
||||
project, not here.
|
||||
[See the documentation](http://discourse.syncthing.net/t/44) on how to
|
||||
get started with a build environment.
|
||||
|
||||
Regardless of the license in effect, you retain the copyright to your
|
||||
contribution.
|
||||
## Branches
|
||||
|
||||
- `master` is the main branch containing good code that will end up in
|
||||
the next release. You should base your work on it. It won't ever be
|
||||
rebased or force-pushed to.
|
||||
|
||||
- `vx.y` branches exist to make patch releases on otherwise obsolete
|
||||
minor releases. Should only contain fixes cherry picked from master.
|
||||
Don't base any work on them.
|
||||
|
||||
- Other branches are probably topic branches and may be subject to
|
||||
rebasing. Don't base any work on them unless you specifically know
|
||||
otherwise.
|
||||
|
||||
## Tags
|
||||
|
||||
All releases are tagged semver style as `vx.y.z`. Release tags are
|
||||
signed by GPG key BCE524C7.
|
||||
|
||||
## Tests
|
||||
|
||||
Yes please!
|
||||
|
||||
## Style
|
||||
|
||||
- `go fmt`
|
||||
|
||||
- Unix line breaks
|
||||
|
||||
## Documentation
|
||||
|
||||
[Over here!](http://discourse.syncthing.net/category/documentation)
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
||||
|
||||
17
CONTRIBUTORS
Normal file
17
CONTRIBUTORS
Normal file
@@ -0,0 +1,17 @@
|
||||
Aaron Bieber <qbit@deftly.net>
|
||||
Alexander Graf <register-github@alex-graf.de>
|
||||
Andrew Dunham <andrew@du.nham.ca>
|
||||
Audrius Butkevicius <audrius.butkevicius@gmail.com>
|
||||
Arthur Axel fREW Schmidt <frew@afoolishmanifesto.com> <frioux@gmail.com>
|
||||
Ben Sidhom <bsidhom@gmail.com>
|
||||
Brandon Philips <brandon@ifup.org>
|
||||
Gilli Sigurdsson <gilli@vx.is>
|
||||
James Patterson <jamespatterson@operamail.com> <jpjp@users.noreply.github.com>
|
||||
Jens Diemer <github.com@jensdiemer.de> <git@jensdiemer.de>
|
||||
Lode Hoste <zillode@zillode.be>
|
||||
Marcin Dziadus <dziadus.marcin@gmail.com>
|
||||
Michael Tilli <pyfisch@gmail.com>
|
||||
Philippe Schommers <philippe@schommers.be>
|
||||
Ryan Sullivan <kayoticsully@gmail.com>
|
||||
Tully Robinson <tully@tojr.org>
|
||||
Veeti Paananen <veeti.paananen@rojekti.fi>
|
||||
32
Dockerfile
32
Dockerfile
@@ -1,32 +0,0 @@
|
||||
FROM golang:1.11 AS builder
|
||||
|
||||
WORKDIR /go/src/github.com/syncthing/syncthing
|
||||
COPY . .
|
||||
|
||||
ENV CGO_ENABLED=0
|
||||
ENV BUILD_HOST=syncthing.net
|
||||
ENV BUILD_USER=docker
|
||||
RUN rm -f syncthing && go run build.go -no-upgrade build syncthing
|
||||
|
||||
FROM alpine
|
||||
|
||||
EXPOSE 8384 22000 21027/udp
|
||||
|
||||
VOLUME ["/var/syncthing"]
|
||||
|
||||
RUN apk add --no-cache ca-certificates su-exec
|
||||
|
||||
COPY --from=builder /go/src/github.com/syncthing/syncthing/syncthing /bin/syncthing
|
||||
|
||||
ENV PUID=1000 PGID=1000
|
||||
|
||||
HEALTHCHECK --interval=1m --timeout=10s \
|
||||
CMD nc -z localhost 8384 || exit 1
|
||||
|
||||
ENTRYPOINT \
|
||||
chown "${PUID}:${PGID}" /var/syncthing \
|
||||
&& su-exec "${PUID}:${PGID}" \
|
||||
env HOME=/var/syncthing \
|
||||
/bin/syncthing \
|
||||
-home /var/syncthing/config \
|
||||
-gui-address 0.0.0.0:8384
|
||||
83
GOALS.md
83
GOALS.md
@@ -1,83 +0,0 @@
|
||||
# The Syncthing Goals
|
||||
|
||||
Syncthing is a **continuous file synchronization program**. It synchronizes
|
||||
files between two or more computers. We strive to fulfill the goals below.
|
||||
The goals are listed in order of importance, the most important one being
|
||||
the first.
|
||||
|
||||
> "Syncing files" here is precise. It means we specifically exclude things
|
||||
> that are not files - calendar items, instant messages, and so on. If those
|
||||
> are in fact stored as files on disk, they can of course be synced as
|
||||
> files.
|
||||
|
||||
Syncthing should be:
|
||||
|
||||
### 1. Safe From Data Loss
|
||||
|
||||
Protecting the user's data is paramount. We take every reasonable precaution
|
||||
to avoid corrupting the user's files.
|
||||
|
||||
> This is the overriding goal, without which synchronizing files becomes
|
||||
> pointless. This means that we do not make unsafe trade offs for the sake
|
||||
> of performance or, in some cases, even usability.
|
||||
|
||||
### 2. Secure Against Attackers
|
||||
|
||||
Again, protecting the user's data is paramount. Regardless of our other
|
||||
goals we must never allow the user's data to be susceptible to eavesdropping
|
||||
or modification by unauthorized parties.
|
||||
|
||||
> This should be understood in context. It is not necessarily reasonable to
|
||||
> expect Syncthing to be resistant against well equipped state level
|
||||
> attackers. We will however do our best. Note also that this is different
|
||||
> from anonymity which is not, currently, a goal.
|
||||
|
||||
### 3. Easy to Use
|
||||
|
||||
Syncthing should be approachable, understandable and inclusive.
|
||||
|
||||
> Complex concepts and maths form the base of Syncthing's functionality.
|
||||
> This should nonetheless be abstracted or hidden to a degree where
|
||||
> Syncthing is usable by the general public.
|
||||
|
||||
### 4. Automatic
|
||||
|
||||
User interaction should be required only when absolutely necessary.
|
||||
|
||||
> Specifically this means that changes to files are picked up without
|
||||
> prompting, conflicts are resolved without prompting and connections are
|
||||
> maintained without prompting. We only prompt the user when it is required
|
||||
> to fulfill one of the (overriding) Secure, Safe or Easy goals.
|
||||
|
||||
### 5. Universally Available
|
||||
|
||||
Syncthing should run on every common computer. We are mindful that the
|
||||
latest technology is not always available to any given individual.
|
||||
|
||||
> Computers include desktops, laptops, servers, virtual machines, small
|
||||
> general purpose computers such as Raspberry Pis and, *where possible*,
|
||||
> tablets and phones. NAS appliances, toasters, cars, firearms, thermostats
|
||||
> and so on may include computing capabitilies but it is not our goal for
|
||||
> Syncthing to run smoothly on these devices.
|
||||
|
||||
### 6. For Individuals
|
||||
|
||||
Syncthing is primarily about empowering the individual user with safe,
|
||||
secure and easy to use file synchronization.
|
||||
|
||||
> We acknowledge that it's also useful in an enterprise setting and include
|
||||
> functionality to support that. If this is in conflict with the
|
||||
> requirements of the individual, those will however take priority.
|
||||
|
||||
### 7. Everything Else
|
||||
|
||||
There are many things we care about that don't make it on to the list. It is
|
||||
fine to optimize for these values as well, as long as they are not in
|
||||
conflict with the stated goals above.
|
||||
|
||||
> For example, performance is a thing we care about. We just don't care more
|
||||
> about it than safety, security, etc. Maintainability of the code base and
|
||||
> providing entertainment value for the maintainers are also things that
|
||||
> matter. It is understood that there are aspects of Syncthing that are
|
||||
> suboptimal or even in opposition with the goals above. However, we
|
||||
> continuously strive to align Syncthing more and more with these goals.
|
||||
67
Godeps/Godeps.json
generated
Normal file
67
Godeps/Godeps.json
generated
Normal file
@@ -0,0 +1,67 @@
|
||||
{
|
||||
"ImportPath": "github.com/syncthing/syncthing",
|
||||
"GoVersion": "go1.3.1",
|
||||
"Packages": [
|
||||
"./cmd/..."
|
||||
],
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "bitbucket.org/kardianos/osext",
|
||||
"Comment": "null-13",
|
||||
"Rev": "5d3ddcf53a508cc2f7404eaebf546ef2cb5cdb6e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "code.google.com/p/go.crypto/bcrypt",
|
||||
"Comment": "null-216",
|
||||
"Rev": "41cd4647fccc72b0b79ef1bd1fe6735e718257cd"
|
||||
},
|
||||
{
|
||||
"ImportPath": "code.google.com/p/go.crypto/blowfish",
|
||||
"Comment": "null-216",
|
||||
"Rev": "41cd4647fccc72b0b79ef1bd1fe6735e718257cd"
|
||||
},
|
||||
{
|
||||
"ImportPath": "code.google.com/p/go.text/transform",
|
||||
"Comment": "null-90",
|
||||
"Rev": "d65bffbc88a153d23a6d2a864531e6e7c2cde59b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "code.google.com/p/go.text/unicode/norm",
|
||||
"Comment": "null-90",
|
||||
"Rev": "d65bffbc88a153d23a6d2a864531e6e7c2cde59b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "code.google.com/p/snappy-go/snappy",
|
||||
"Comment": "null-15",
|
||||
"Rev": "12e4b4183793ac4b061921e7980845e750679fd0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/bkaradzic/go-lz4",
|
||||
"Rev": "93a831dcee242be64a9cc9803dda84af25932de7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/calmh/xdr",
|
||||
"Rev": "a597b63b87d6140f79084c8aab214b4d533833a1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/juju/ratelimit",
|
||||
"Rev": "f9f36d11773655c0485207f0ad30dc2655f69d56"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
|
||||
"Rev": "9bca75c48d6c31becfbb127702b425e7226052e3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vitrun/qart/coding",
|
||||
"Rev": "ccb109cf25f0cd24474da73b9fee4e7a3e8a8ce0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vitrun/qart/gf256",
|
||||
"Rev": "ccb109cf25f0cd24474da73b9fee4e7a3e8a8ce0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vitrun/qart/qr",
|
||||
"Rev": "ccb109cf25f0cd24474da73b9fee4e7a3e8a8ce0"
|
||||
}
|
||||
]
|
||||
}
|
||||
5
Godeps/Readme
generated
Normal file
5
Godeps/Readme
generated
Normal file
@@ -0,0 +1,5 @@
|
||||
This directory tree is generated automatically by godep.
|
||||
|
||||
Please do not edit.
|
||||
|
||||
See https://github.com/tools/godep for more information.
|
||||
2
Godeps/_workspace/.gitignore
generated
vendored
Normal file
2
Godeps/_workspace/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
/pkg
|
||||
/bin
|
||||
20
Godeps/_workspace/src/bitbucket.org/kardianos/osext/LICENSE
generated
vendored
Normal file
20
Godeps/_workspace/src/bitbucket.org/kardianos/osext/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
Copyright (c) 2012 Daniel Theophanes
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
|
||||
3. This notice may not be removed or altered from any source
|
||||
distribution.
|
||||
32
Godeps/_workspace/src/bitbucket.org/kardianos/osext/osext.go
generated
vendored
Normal file
32
Godeps/_workspace/src/bitbucket.org/kardianos/osext/osext.go
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Extensions to the standard "os" package.
|
||||
package osext
|
||||
|
||||
import "path/filepath"
|
||||
|
||||
// Executable returns an absolute path that can be used to
|
||||
// re-invoke the current program.
|
||||
// It may not be valid after the current program exits.
|
||||
func Executable() (string, error) {
|
||||
p, err := executable()
|
||||
return filepath.Clean(p), err
|
||||
}
|
||||
|
||||
// Returns same path as Executable, returns just the folder
|
||||
// path. Excludes the executable name.
|
||||
func ExecutableFolder() (string, error) {
|
||||
p, err := Executable()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
folder, _ := filepath.Split(p)
|
||||
return folder, nil
|
||||
}
|
||||
|
||||
// Depricated. Same as Executable().
|
||||
func GetExePath() (exePath string, err error) {
|
||||
return Executable()
|
||||
}
|
||||
20
Godeps/_workspace/src/bitbucket.org/kardianos/osext/osext_plan9.go
generated
vendored
Normal file
20
Godeps/_workspace/src/bitbucket.org/kardianos/osext/osext_plan9.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package osext
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"os"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func executable() (string, error) {
|
||||
f, err := os.Open("/proc/" + strconv.Itoa(os.Getpid()) + "/text")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
return syscall.Fd2path(int(f.Fd()))
|
||||
}
|
||||
25
Godeps/_workspace/src/bitbucket.org/kardianos/osext/osext_procfs.go
generated
vendored
Normal file
25
Godeps/_workspace/src/bitbucket.org/kardianos/osext/osext_procfs.go
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux netbsd openbsd
|
||||
|
||||
package osext
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
func executable() (string, error) {
|
||||
switch runtime.GOOS {
|
||||
case "linux":
|
||||
return os.Readlink("/proc/self/exe")
|
||||
case "netbsd":
|
||||
return os.Readlink("/proc/curproc/exe")
|
||||
case "openbsd":
|
||||
return os.Readlink("/proc/curproc/file")
|
||||
}
|
||||
return "", errors.New("ExecPath not implemented for " + runtime.GOOS)
|
||||
}
|
||||
79
Godeps/_workspace/src/bitbucket.org/kardianos/osext/osext_sysctl.go
generated
vendored
Normal file
79
Godeps/_workspace/src/bitbucket.org/kardianos/osext/osext_sysctl.go
generated
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin freebsd
|
||||
|
||||
package osext
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var initCwd, initCwdErr = os.Getwd()
|
||||
|
||||
func executable() (string, error) {
|
||||
var mib [4]int32
|
||||
switch runtime.GOOS {
|
||||
case "freebsd":
|
||||
mib = [4]int32{1 /* CTL_KERN */, 14 /* KERN_PROC */, 12 /* KERN_PROC_PATHNAME */, -1}
|
||||
case "darwin":
|
||||
mib = [4]int32{1 /* CTL_KERN */, 38 /* KERN_PROCARGS */, int32(os.Getpid()), -1}
|
||||
}
|
||||
|
||||
n := uintptr(0)
|
||||
// Get length.
|
||||
_, _, errNum := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, 0, uintptr(unsafe.Pointer(&n)), 0, 0)
|
||||
if errNum != 0 {
|
||||
return "", errNum
|
||||
}
|
||||
if n == 0 { // This shouldn't happen.
|
||||
return "", nil
|
||||
}
|
||||
buf := make([]byte, n)
|
||||
_, _, errNum = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&n)), 0, 0)
|
||||
if errNum != 0 {
|
||||
return "", errNum
|
||||
}
|
||||
if n == 0 { // This shouldn't happen.
|
||||
return "", nil
|
||||
}
|
||||
for i, v := range buf {
|
||||
if v == 0 {
|
||||
buf = buf[:i]
|
||||
break
|
||||
}
|
||||
}
|
||||
var err error
|
||||
execPath := string(buf)
|
||||
// execPath will not be empty due to above checks.
|
||||
// Try to get the absolute path if the execPath is not rooted.
|
||||
if execPath[0] != '/' {
|
||||
execPath, err = getAbs(execPath)
|
||||
if err != nil {
|
||||
return execPath, err
|
||||
}
|
||||
}
|
||||
// For darwin KERN_PROCARGS may return the path to a symlink rather than the
|
||||
// actual executable.
|
||||
if runtime.GOOS == "darwin" {
|
||||
if execPath, err = filepath.EvalSymlinks(execPath); err != nil {
|
||||
return execPath, err
|
||||
}
|
||||
}
|
||||
return execPath, nil
|
||||
}
|
||||
|
||||
func getAbs(execPath string) (string, error) {
|
||||
if initCwdErr != nil {
|
||||
return execPath, initCwdErr
|
||||
}
|
||||
// The execPath may begin with a "../" or a "./" so clean it first.
|
||||
// Join the two paths, trailing and starting slashes undetermined, so use
|
||||
// the generic Join function.
|
||||
return filepath.Join(initCwd, filepath.Clean(execPath)), nil
|
||||
}
|
||||
79
Godeps/_workspace/src/bitbucket.org/kardianos/osext/osext_test.go
generated
vendored
Normal file
79
Godeps/_workspace/src/bitbucket.org/kardianos/osext/osext_test.go
generated
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin linux freebsd netbsd windows
|
||||
|
||||
package osext
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
oexec "os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const execPath_EnvVar = "OSTEST_OUTPUT_EXECPATH"
|
||||
|
||||
func TestExecPath(t *testing.T) {
|
||||
ep, err := Executable()
|
||||
if err != nil {
|
||||
t.Fatalf("ExecPath failed: %v", err)
|
||||
}
|
||||
// we want fn to be of the form "dir/prog"
|
||||
dir := filepath.Dir(filepath.Dir(ep))
|
||||
fn, err := filepath.Rel(dir, ep)
|
||||
if err != nil {
|
||||
t.Fatalf("filepath.Rel: %v", err)
|
||||
}
|
||||
cmd := &oexec.Cmd{}
|
||||
// make child start with a relative program path
|
||||
cmd.Dir = dir
|
||||
cmd.Path = fn
|
||||
// forge argv[0] for child, so that we can verify we could correctly
|
||||
// get real path of the executable without influenced by argv[0].
|
||||
cmd.Args = []string{"-", "-test.run=XXXX"}
|
||||
cmd.Env = []string{fmt.Sprintf("%s=1", execPath_EnvVar)}
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Fatalf("exec(self) failed: %v", err)
|
||||
}
|
||||
outs := string(out)
|
||||
if !filepath.IsAbs(outs) {
|
||||
t.Fatalf("Child returned %q, want an absolute path", out)
|
||||
}
|
||||
if !sameFile(outs, ep) {
|
||||
t.Fatalf("Child returned %q, not the same file as %q", out, ep)
|
||||
}
|
||||
}
|
||||
|
||||
func sameFile(fn1, fn2 string) bool {
|
||||
fi1, err := os.Stat(fn1)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
fi2, err := os.Stat(fn2)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return os.SameFile(fi1, fi2)
|
||||
}
|
||||
|
||||
func init() {
|
||||
if e := os.Getenv(execPath_EnvVar); e != "" {
|
||||
// first chdir to another path
|
||||
dir := "/"
|
||||
if runtime.GOOS == "windows" {
|
||||
dir = filepath.VolumeName(".")
|
||||
}
|
||||
os.Chdir(dir)
|
||||
if ep, err := Executable(); err != nil {
|
||||
fmt.Fprint(os.Stderr, "ERROR: ", err)
|
||||
} else {
|
||||
fmt.Fprint(os.Stderr, ep)
|
||||
}
|
||||
os.Exit(0)
|
||||
}
|
||||
}
|
||||
34
Godeps/_workspace/src/bitbucket.org/kardianos/osext/osext_windows.go
generated
vendored
Normal file
34
Godeps/_workspace/src/bitbucket.org/kardianos/osext/osext_windows.go
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package osext
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unicode/utf16"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var (
|
||||
kernel = syscall.MustLoadDLL("kernel32.dll")
|
||||
getModuleFileNameProc = kernel.MustFindProc("GetModuleFileNameW")
|
||||
)
|
||||
|
||||
// GetModuleFileName() with hModule = NULL
|
||||
func executable() (exePath string, err error) {
|
||||
return getModuleFileName()
|
||||
}
|
||||
|
||||
func getModuleFileName() (string, error) {
|
||||
var n uint32
|
||||
b := make([]uint16, syscall.MAX_PATH)
|
||||
size := uint32(len(b))
|
||||
|
||||
r0, _, e1 := getModuleFileNameProc.Call(0, uintptr(unsafe.Pointer(&b[0])), uintptr(size))
|
||||
n = uint32(r0)
|
||||
if n == 0 {
|
||||
return "", e1
|
||||
}
|
||||
return string(utf16.Decode(b[0:n])), nil
|
||||
}
|
||||
@@ -4,18 +4,17 @@
|
||||
|
||||
// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing
|
||||
// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf
|
||||
package bcrypt // import "golang.org/x/crypto/bcrypt"
|
||||
package bcrypt
|
||||
|
||||
// The code is a port of Provos and Mazières's C implementation.
|
||||
import (
|
||||
"code.google.com/p/go.crypto/blowfish"
|
||||
"crypto/rand"
|
||||
"crypto/subtle"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
|
||||
"golang.org/x/crypto/blowfish"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -206,6 +205,7 @@ func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) {
|
||||
}
|
||||
|
||||
func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) {
|
||||
|
||||
csalt, err := base64Decode(salt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -213,8 +213,7 @@ func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cip
|
||||
|
||||
// Bug compatibility with C bcrypt implementations. They use the trailing
|
||||
// NULL in the key string during expansion.
|
||||
// We copy the key to prevent changing the underlying array.
|
||||
ckey := append(key[:len(key):len(key)], 0)
|
||||
ckey := append(key, 0)
|
||||
|
||||
c, err := blowfish.NewSaltedCipher(ckey, csalt)
|
||||
if err != nil {
|
||||
@@ -241,11 +240,11 @@ func (p *hashed) Hash() []byte {
|
||||
n = 3
|
||||
}
|
||||
arr[n] = '$'
|
||||
n++
|
||||
n += 1
|
||||
copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost)))
|
||||
n += 2
|
||||
arr[n] = '$'
|
||||
n++
|
||||
n += 1
|
||||
copy(arr[n:], p.salt)
|
||||
n += encodedSaltSize
|
||||
copy(arr[n:], p.hash)
|
||||
226
Godeps/_workspace/src/code.google.com/p/go.crypto/bcrypt/bcrypt_test.go
generated
vendored
Normal file
226
Godeps/_workspace/src/code.google.com/p/go.crypto/bcrypt/bcrypt_test.go
generated
vendored
Normal file
@@ -0,0 +1,226 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bcrypt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBcryptingIsEasy(t *testing.T) {
|
||||
pass := []byte("mypassword")
|
||||
hp, err := GenerateFromPassword(pass, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("GenerateFromPassword error: %s", err)
|
||||
}
|
||||
|
||||
if CompareHashAndPassword(hp, pass) != nil {
|
||||
t.Errorf("%v should hash %s correctly", hp, pass)
|
||||
}
|
||||
|
||||
notPass := "notthepass"
|
||||
err = CompareHashAndPassword(hp, []byte(notPass))
|
||||
if err != ErrMismatchedHashAndPassword {
|
||||
t.Errorf("%v and %s should be mismatched", hp, notPass)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBcryptingIsCorrect(t *testing.T) {
|
||||
pass := []byte("allmine")
|
||||
salt := []byte("XajjQvNhvvRt5GSeFk1xFe")
|
||||
expectedHash := []byte("$2a$10$XajjQvNhvvRt5GSeFk1xFeyqRrsxkhBkUiQeg0dt.wU1qD4aFDcga")
|
||||
|
||||
hash, err := bcrypt(pass, 10, salt)
|
||||
if err != nil {
|
||||
t.Fatalf("bcrypt blew up: %v", err)
|
||||
}
|
||||
if !bytes.HasSuffix(expectedHash, hash) {
|
||||
t.Errorf("%v should be the suffix of %v", hash, expectedHash)
|
||||
}
|
||||
|
||||
h, err := newFromHash(expectedHash)
|
||||
if err != nil {
|
||||
t.Errorf("Unable to parse %s: %v", string(expectedHash), err)
|
||||
}
|
||||
|
||||
// This is not the safe way to compare these hashes. We do this only for
|
||||
// testing clarity. Use bcrypt.CompareHashAndPassword()
|
||||
if err == nil && !bytes.Equal(expectedHash, h.Hash()) {
|
||||
t.Errorf("Parsed hash %v should equal %v", h.Hash(), expectedHash)
|
||||
}
|
||||
}
|
||||
|
||||
func TestVeryShortPasswords(t *testing.T) {
|
||||
key := []byte("k")
|
||||
salt := []byte("XajjQvNhvvRt5GSeFk1xFe")
|
||||
_, err := bcrypt(key, 10, salt)
|
||||
if err != nil {
|
||||
t.Errorf("One byte key resulted in error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTooLongPasswordsWork(t *testing.T) {
|
||||
salt := []byte("XajjQvNhvvRt5GSeFk1xFe")
|
||||
// One byte over the usual 56 byte limit that blowfish has
|
||||
tooLongPass := []byte("012345678901234567890123456789012345678901234567890123456")
|
||||
tooLongExpected := []byte("$2a$10$XajjQvNhvvRt5GSeFk1xFe5l47dONXg781AmZtd869sO8zfsHuw7C")
|
||||
hash, err := bcrypt(tooLongPass, 10, salt)
|
||||
if err != nil {
|
||||
t.Fatalf("bcrypt blew up on long password: %v", err)
|
||||
}
|
||||
if !bytes.HasSuffix(tooLongExpected, hash) {
|
||||
t.Errorf("%v should be the suffix of %v", hash, tooLongExpected)
|
||||
}
|
||||
}
|
||||
|
||||
type InvalidHashTest struct {
|
||||
err error
|
||||
hash []byte
|
||||
}
|
||||
|
||||
var invalidTests = []InvalidHashTest{
|
||||
{ErrHashTooShort, []byte("$2a$10$fooo")},
|
||||
{ErrHashTooShort, []byte("$2a")},
|
||||
{HashVersionTooNewError('3'), []byte("$3a$10$sssssssssssssssssssssshhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh")},
|
||||
{InvalidHashPrefixError('%'), []byte("%2a$10$sssssssssssssssssssssshhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh")},
|
||||
{InvalidCostError(32), []byte("$2a$32$sssssssssssssssssssssshhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh")},
|
||||
}
|
||||
|
||||
func TestInvalidHashErrors(t *testing.T) {
|
||||
check := func(name string, expected, err error) {
|
||||
if err == nil {
|
||||
t.Errorf("%s: Should have returned an error", name)
|
||||
}
|
||||
if err != nil && err != expected {
|
||||
t.Errorf("%s gave err %v but should have given %v", name, err, expected)
|
||||
}
|
||||
}
|
||||
for _, iht := range invalidTests {
|
||||
_, err := newFromHash(iht.hash)
|
||||
check("newFromHash", iht.err, err)
|
||||
err = CompareHashAndPassword(iht.hash, []byte("anything"))
|
||||
check("CompareHashAndPassword", iht.err, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnpaddedBase64Encoding(t *testing.T) {
|
||||
original := []byte{101, 201, 101, 75, 19, 227, 199, 20, 239, 236, 133, 32, 30, 109, 243, 30}
|
||||
encodedOriginal := []byte("XajjQvNhvvRt5GSeFk1xFe")
|
||||
|
||||
encoded := base64Encode(original)
|
||||
|
||||
if !bytes.Equal(encodedOriginal, encoded) {
|
||||
t.Errorf("Encoded %v should have equaled %v", encoded, encodedOriginal)
|
||||
}
|
||||
|
||||
decoded, err := base64Decode(encodedOriginal)
|
||||
if err != nil {
|
||||
t.Fatalf("base64Decode blew up: %s", err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(decoded, original) {
|
||||
t.Errorf("Decoded %v should have equaled %v", decoded, original)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCost(t *testing.T) {
|
||||
suffix := "XajjQvNhvvRt5GSeFk1xFe5l47dONXg781AmZtd869sO8zfsHuw7C"
|
||||
for _, vers := range []string{"2a", "2"} {
|
||||
for _, cost := range []int{4, 10} {
|
||||
s := fmt.Sprintf("$%s$%02d$%s", vers, cost, suffix)
|
||||
h := []byte(s)
|
||||
actual, err := Cost(h)
|
||||
if err != nil {
|
||||
t.Errorf("Cost, error: %s", err)
|
||||
continue
|
||||
}
|
||||
if actual != cost {
|
||||
t.Errorf("Cost, expected: %d, actual: %d", cost, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
_, err := Cost([]byte("$a$a$" + suffix))
|
||||
if err == nil {
|
||||
t.Errorf("Cost, malformed but no error returned")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCostValidationInHash(t *testing.T) {
|
||||
if testing.Short() {
|
||||
return
|
||||
}
|
||||
|
||||
pass := []byte("mypassword")
|
||||
|
||||
for c := 0; c < MinCost; c++ {
|
||||
p, _ := newFromPassword(pass, c)
|
||||
if p.cost != DefaultCost {
|
||||
t.Errorf("newFromPassword should default costs below %d to %d, but was %d", MinCost, DefaultCost, p.cost)
|
||||
}
|
||||
}
|
||||
|
||||
p, _ := newFromPassword(pass, 14)
|
||||
if p.cost != 14 {
|
||||
t.Errorf("newFromPassword should default cost to 14, but was %d", p.cost)
|
||||
}
|
||||
|
||||
hp, _ := newFromHash(p.Hash())
|
||||
if p.cost != hp.cost {
|
||||
t.Errorf("newFromHash should maintain the cost at %d, but was %d", p.cost, hp.cost)
|
||||
}
|
||||
|
||||
_, err := newFromPassword(pass, 32)
|
||||
if err == nil {
|
||||
t.Fatalf("newFromPassword: should return a cost error")
|
||||
}
|
||||
if err != InvalidCostError(32) {
|
||||
t.Errorf("newFromPassword: should return cost error, got %#v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCostReturnsWithLeadingZeroes(t *testing.T) {
|
||||
hp, _ := newFromPassword([]byte("abcdefgh"), 7)
|
||||
cost := hp.Hash()[4:7]
|
||||
expected := []byte("07$")
|
||||
|
||||
if !bytes.Equal(expected, cost) {
|
||||
t.Errorf("single digit costs in hash should have leading zeros: was %v instead of %v", cost, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMinorNotRequired(t *testing.T) {
|
||||
noMinorHash := []byte("$2$10$XajjQvNhvvRt5GSeFk1xFeyqRrsxkhBkUiQeg0dt.wU1qD4aFDcga")
|
||||
h, err := newFromHash(noMinorHash)
|
||||
if err != nil {
|
||||
t.Fatalf("No minor hash blew up: %s", err)
|
||||
}
|
||||
if h.minor != 0 {
|
||||
t.Errorf("Should leave minor version at 0, but was %d", h.minor)
|
||||
}
|
||||
|
||||
if !bytes.Equal(noMinorHash, h.Hash()) {
|
||||
t.Errorf("Should generate hash %v, but created %v", noMinorHash, h.Hash())
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEqual(b *testing.B) {
|
||||
b.StopTimer()
|
||||
passwd := []byte("somepasswordyoulike")
|
||||
hash, _ := GenerateFromPassword(passwd, 10)
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
CompareHashAndPassword(hash, passwd)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkGeneration(b *testing.B) {
|
||||
b.StopTimer()
|
||||
passwd := []byte("mylongpassword1234")
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
GenerateFromPassword(passwd, 10)
|
||||
}
|
||||
}
|
||||
274
Godeps/_workspace/src/code.google.com/p/go.crypto/blowfish/blowfish_test.go
generated
vendored
Normal file
274
Godeps/_workspace/src/code.google.com/p/go.crypto/blowfish/blowfish_test.go
generated
vendored
Normal file
@@ -0,0 +1,274 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blowfish
|
||||
|
||||
import "testing"
|
||||
|
||||
type CryptTest struct {
|
||||
key []byte
|
||||
in []byte
|
||||
out []byte
|
||||
}
|
||||
|
||||
// Test vector values are from http://www.schneier.com/code/vectors.txt.
|
||||
var encryptTests = []CryptTest{
|
||||
{
|
||||
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
[]byte{0x4E, 0xF9, 0x97, 0x45, 0x61, 0x98, 0xDD, 0x78}},
|
||||
{
|
||||
[]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
|
||||
[]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
|
||||
[]byte{0x51, 0x86, 0x6F, 0xD5, 0xB8, 0x5E, 0xCB, 0x8A}},
|
||||
{
|
||||
[]byte{0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
[]byte{0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
|
||||
[]byte{0x7D, 0x85, 0x6F, 0x9A, 0x61, 0x30, 0x63, 0xF2}},
|
||||
{
|
||||
[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11},
|
||||
[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11},
|
||||
[]byte{0x24, 0x66, 0xDD, 0x87, 0x8B, 0x96, 0x3C, 0x9D}},
|
||||
|
||||
{
|
||||
[]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
|
||||
[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11},
|
||||
[]byte{0x61, 0xF9, 0xC3, 0x80, 0x22, 0x81, 0xB0, 0x96}},
|
||||
{
|
||||
[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11},
|
||||
[]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
|
||||
[]byte{0x7D, 0x0C, 0xC6, 0x30, 0xAF, 0xDA, 0x1E, 0xC7}},
|
||||
{
|
||||
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
[]byte{0x4E, 0xF9, 0x97, 0x45, 0x61, 0x98, 0xDD, 0x78}},
|
||||
{
|
||||
[]byte{0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10},
|
||||
[]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
|
||||
[]byte{0x0A, 0xCE, 0xAB, 0x0F, 0xC6, 0xA0, 0xA2, 0x8D}},
|
||||
{
|
||||
[]byte{0x7C, 0xA1, 0x10, 0x45, 0x4A, 0x1A, 0x6E, 0x57},
|
||||
[]byte{0x01, 0xA1, 0xD6, 0xD0, 0x39, 0x77, 0x67, 0x42},
|
||||
[]byte{0x59, 0xC6, 0x82, 0x45, 0xEB, 0x05, 0x28, 0x2B}},
|
||||
{
|
||||
[]byte{0x01, 0x31, 0xD9, 0x61, 0x9D, 0xC1, 0x37, 0x6E},
|
||||
[]byte{0x5C, 0xD5, 0x4C, 0xA8, 0x3D, 0xEF, 0x57, 0xDA},
|
||||
[]byte{0xB1, 0xB8, 0xCC, 0x0B, 0x25, 0x0F, 0x09, 0xA0}},
|
||||
{
|
||||
[]byte{0x07, 0xA1, 0x13, 0x3E, 0x4A, 0x0B, 0x26, 0x86},
|
||||
[]byte{0x02, 0x48, 0xD4, 0x38, 0x06, 0xF6, 0x71, 0x72},
|
||||
[]byte{0x17, 0x30, 0xE5, 0x77, 0x8B, 0xEA, 0x1D, 0xA4}},
|
||||
{
|
||||
[]byte{0x38, 0x49, 0x67, 0x4C, 0x26, 0x02, 0x31, 0x9E},
|
||||
[]byte{0x51, 0x45, 0x4B, 0x58, 0x2D, 0xDF, 0x44, 0x0A},
|
||||
[]byte{0xA2, 0x5E, 0x78, 0x56, 0xCF, 0x26, 0x51, 0xEB}},
|
||||
{
|
||||
[]byte{0x04, 0xB9, 0x15, 0xBA, 0x43, 0xFE, 0xB5, 0xB6},
|
||||
[]byte{0x42, 0xFD, 0x44, 0x30, 0x59, 0x57, 0x7F, 0xA2},
|
||||
[]byte{0x35, 0x38, 0x82, 0xB1, 0x09, 0xCE, 0x8F, 0x1A}},
|
||||
{
|
||||
[]byte{0x01, 0x13, 0xB9, 0x70, 0xFD, 0x34, 0xF2, 0xCE},
|
||||
[]byte{0x05, 0x9B, 0x5E, 0x08, 0x51, 0xCF, 0x14, 0x3A},
|
||||
[]byte{0x48, 0xF4, 0xD0, 0x88, 0x4C, 0x37, 0x99, 0x18}},
|
||||
{
|
||||
[]byte{0x01, 0x70, 0xF1, 0x75, 0x46, 0x8F, 0xB5, 0xE6},
|
||||
[]byte{0x07, 0x56, 0xD8, 0xE0, 0x77, 0x47, 0x61, 0xD2},
|
||||
[]byte{0x43, 0x21, 0x93, 0xB7, 0x89, 0x51, 0xFC, 0x98}},
|
||||
{
|
||||
[]byte{0x43, 0x29, 0x7F, 0xAD, 0x38, 0xE3, 0x73, 0xFE},
|
||||
[]byte{0x76, 0x25, 0x14, 0xB8, 0x29, 0xBF, 0x48, 0x6A},
|
||||
[]byte{0x13, 0xF0, 0x41, 0x54, 0xD6, 0x9D, 0x1A, 0xE5}},
|
||||
{
|
||||
[]byte{0x07, 0xA7, 0x13, 0x70, 0x45, 0xDA, 0x2A, 0x16},
|
||||
[]byte{0x3B, 0xDD, 0x11, 0x90, 0x49, 0x37, 0x28, 0x02},
|
||||
[]byte{0x2E, 0xED, 0xDA, 0x93, 0xFF, 0xD3, 0x9C, 0x79}},
|
||||
{
|
||||
[]byte{0x04, 0x68, 0x91, 0x04, 0xC2, 0xFD, 0x3B, 0x2F},
|
||||
[]byte{0x26, 0x95, 0x5F, 0x68, 0x35, 0xAF, 0x60, 0x9A},
|
||||
[]byte{0xD8, 0x87, 0xE0, 0x39, 0x3C, 0x2D, 0xA6, 0xE3}},
|
||||
{
|
||||
[]byte{0x37, 0xD0, 0x6B, 0xB5, 0x16, 0xCB, 0x75, 0x46},
|
||||
[]byte{0x16, 0x4D, 0x5E, 0x40, 0x4F, 0x27, 0x52, 0x32},
|
||||
[]byte{0x5F, 0x99, 0xD0, 0x4F, 0x5B, 0x16, 0x39, 0x69}},
|
||||
{
|
||||
[]byte{0x1F, 0x08, 0x26, 0x0D, 0x1A, 0xC2, 0x46, 0x5E},
|
||||
[]byte{0x6B, 0x05, 0x6E, 0x18, 0x75, 0x9F, 0x5C, 0xCA},
|
||||
[]byte{0x4A, 0x05, 0x7A, 0x3B, 0x24, 0xD3, 0x97, 0x7B}},
|
||||
{
|
||||
[]byte{0x58, 0x40, 0x23, 0x64, 0x1A, 0xBA, 0x61, 0x76},
|
||||
[]byte{0x00, 0x4B, 0xD6, 0xEF, 0x09, 0x17, 0x60, 0x62},
|
||||
[]byte{0x45, 0x20, 0x31, 0xC1, 0xE4, 0xFA, 0xDA, 0x8E}},
|
||||
{
|
||||
[]byte{0x02, 0x58, 0x16, 0x16, 0x46, 0x29, 0xB0, 0x07},
|
||||
[]byte{0x48, 0x0D, 0x39, 0x00, 0x6E, 0xE7, 0x62, 0xF2},
|
||||
[]byte{0x75, 0x55, 0xAE, 0x39, 0xF5, 0x9B, 0x87, 0xBD}},
|
||||
{
|
||||
[]byte{0x49, 0x79, 0x3E, 0xBC, 0x79, 0xB3, 0x25, 0x8F},
|
||||
[]byte{0x43, 0x75, 0x40, 0xC8, 0x69, 0x8F, 0x3C, 0xFA},
|
||||
[]byte{0x53, 0xC5, 0x5F, 0x9C, 0xB4, 0x9F, 0xC0, 0x19}},
|
||||
{
|
||||
[]byte{0x4F, 0xB0, 0x5E, 0x15, 0x15, 0xAB, 0x73, 0xA7},
|
||||
[]byte{0x07, 0x2D, 0x43, 0xA0, 0x77, 0x07, 0x52, 0x92},
|
||||
[]byte{0x7A, 0x8E, 0x7B, 0xFA, 0x93, 0x7E, 0x89, 0xA3}},
|
||||
{
|
||||
[]byte{0x49, 0xE9, 0x5D, 0x6D, 0x4C, 0xA2, 0x29, 0xBF},
|
||||
[]byte{0x02, 0xFE, 0x55, 0x77, 0x81, 0x17, 0xF1, 0x2A},
|
||||
[]byte{0xCF, 0x9C, 0x5D, 0x7A, 0x49, 0x86, 0xAD, 0xB5}},
|
||||
{
|
||||
[]byte{0x01, 0x83, 0x10, 0xDC, 0x40, 0x9B, 0x26, 0xD6},
|
||||
[]byte{0x1D, 0x9D, 0x5C, 0x50, 0x18, 0xF7, 0x28, 0xC2},
|
||||
[]byte{0xD1, 0xAB, 0xB2, 0x90, 0x65, 0x8B, 0xC7, 0x78}},
|
||||
{
|
||||
[]byte{0x1C, 0x58, 0x7F, 0x1C, 0x13, 0x92, 0x4F, 0xEF},
|
||||
[]byte{0x30, 0x55, 0x32, 0x28, 0x6D, 0x6F, 0x29, 0x5A},
|
||||
[]byte{0x55, 0xCB, 0x37, 0x74, 0xD1, 0x3E, 0xF2, 0x01}},
|
||||
{
|
||||
[]byte{0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01},
|
||||
[]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
|
||||
[]byte{0xFA, 0x34, 0xEC, 0x48, 0x47, 0xB2, 0x68, 0xB2}},
|
||||
{
|
||||
[]byte{0x1F, 0x1F, 0x1F, 0x1F, 0x0E, 0x0E, 0x0E, 0x0E},
|
||||
[]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
|
||||
[]byte{0xA7, 0x90, 0x79, 0x51, 0x08, 0xEA, 0x3C, 0xAE}},
|
||||
{
|
||||
[]byte{0xE0, 0xFE, 0xE0, 0xFE, 0xF1, 0xFE, 0xF1, 0xFE},
|
||||
[]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
|
||||
[]byte{0xC3, 0x9E, 0x07, 0x2D, 0x9F, 0xAC, 0x63, 0x1D}},
|
||||
{
|
||||
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
[]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
|
||||
[]byte{0x01, 0x49, 0x33, 0xE0, 0xCD, 0xAF, 0xF6, 0xE4}},
|
||||
{
|
||||
[]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
|
||||
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
[]byte{0xF2, 0x1E, 0x9A, 0x77, 0xB7, 0x1C, 0x49, 0xBC}},
|
||||
{
|
||||
[]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
|
||||
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
[]byte{0x24, 0x59, 0x46, 0x88, 0x57, 0x54, 0x36, 0x9A}},
|
||||
{
|
||||
[]byte{0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10},
|
||||
[]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
|
||||
[]byte{0x6B, 0x5C, 0x5A, 0x9C, 0x5D, 0x9E, 0x0A, 0x5A}},
|
||||
}
|
||||
|
||||
func TestCipherEncrypt(t *testing.T) {
|
||||
for i, tt := range encryptTests {
|
||||
c, err := NewCipher(tt.key)
|
||||
if err != nil {
|
||||
t.Errorf("NewCipher(%d bytes) = %s", len(tt.key), err)
|
||||
continue
|
||||
}
|
||||
ct := make([]byte, len(tt.out))
|
||||
c.Encrypt(ct, tt.in)
|
||||
for j, v := range ct {
|
||||
if v != tt.out[j] {
|
||||
t.Errorf("Cipher.Encrypt, test vector #%d: cipher-text[%d] = %#x, expected %#x", i, j, v, tt.out[j])
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCipherDecrypt(t *testing.T) {
|
||||
for i, tt := range encryptTests {
|
||||
c, err := NewCipher(tt.key)
|
||||
if err != nil {
|
||||
t.Errorf("NewCipher(%d bytes) = %s", len(tt.key), err)
|
||||
continue
|
||||
}
|
||||
pt := make([]byte, len(tt.in))
|
||||
c.Decrypt(pt, tt.out)
|
||||
for j, v := range pt {
|
||||
if v != tt.in[j] {
|
||||
t.Errorf("Cipher.Decrypt, test vector #%d: plain-text[%d] = %#x, expected %#x", i, j, v, tt.in[j])
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSaltedCipherKeyLength(t *testing.T) {
|
||||
if _, err := NewSaltedCipher(nil, []byte{'a'}); err != KeySizeError(0) {
|
||||
t.Errorf("NewSaltedCipher with short key, gave error %#v, expected %#v", err, KeySizeError(0))
|
||||
}
|
||||
|
||||
// A 57-byte key. One over the typical blowfish restriction.
|
||||
key := []byte("012345678901234567890123456789012345678901234567890123456")
|
||||
if _, err := NewSaltedCipher(key, []byte{'a'}); err != nil {
|
||||
t.Errorf("NewSaltedCipher with long key, gave error %#v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Test vectors generated with Blowfish from OpenSSH.
|
||||
var saltedVectors = [][8]byte{
|
||||
{0x0c, 0x82, 0x3b, 0x7b, 0x8d, 0x01, 0x4b, 0x7e},
|
||||
{0xd1, 0xe1, 0x93, 0xf0, 0x70, 0xa6, 0xdb, 0x12},
|
||||
{0xfc, 0x5e, 0xba, 0xde, 0xcb, 0xf8, 0x59, 0xad},
|
||||
{0x8a, 0x0c, 0x76, 0xe7, 0xdd, 0x2c, 0xd3, 0xa8},
|
||||
{0x2c, 0xcb, 0x7b, 0xee, 0xac, 0x7b, 0x7f, 0xf8},
|
||||
{0xbb, 0xf6, 0x30, 0x6f, 0xe1, 0x5d, 0x62, 0xbf},
|
||||
{0x97, 0x1e, 0xc1, 0x3d, 0x3d, 0xe0, 0x11, 0xe9},
|
||||
{0x06, 0xd7, 0x4d, 0xb1, 0x80, 0xa3, 0xb1, 0x38},
|
||||
{0x67, 0xa1, 0xa9, 0x75, 0x0e, 0x5b, 0xc6, 0xb4},
|
||||
{0x51, 0x0f, 0x33, 0x0e, 0x4f, 0x67, 0xd2, 0x0c},
|
||||
{0xf1, 0x73, 0x7e, 0xd8, 0x44, 0xea, 0xdb, 0xe5},
|
||||
{0x14, 0x0e, 0x16, 0xce, 0x7f, 0x4a, 0x9c, 0x7b},
|
||||
{0x4b, 0xfe, 0x43, 0xfd, 0xbf, 0x36, 0x04, 0x47},
|
||||
{0xb1, 0xeb, 0x3e, 0x15, 0x36, 0xa7, 0xbb, 0xe2},
|
||||
{0x6d, 0x0b, 0x41, 0xdd, 0x00, 0x98, 0x0b, 0x19},
|
||||
{0xd3, 0xce, 0x45, 0xce, 0x1d, 0x56, 0xb7, 0xfc},
|
||||
{0xd9, 0xf0, 0xfd, 0xda, 0xc0, 0x23, 0xb7, 0x93},
|
||||
{0x4c, 0x6f, 0xa1, 0xe4, 0x0c, 0xa8, 0xca, 0x57},
|
||||
{0xe6, 0x2f, 0x28, 0xa7, 0x0c, 0x94, 0x0d, 0x08},
|
||||
{0x8f, 0xe3, 0xf0, 0xb6, 0x29, 0xe3, 0x44, 0x03},
|
||||
{0xff, 0x98, 0xdd, 0x04, 0x45, 0xb4, 0x6d, 0x1f},
|
||||
{0x9e, 0x45, 0x4d, 0x18, 0x40, 0x53, 0xdb, 0xef},
|
||||
{0xb7, 0x3b, 0xef, 0x29, 0xbe, 0xa8, 0x13, 0x71},
|
||||
{0x02, 0x54, 0x55, 0x41, 0x8e, 0x04, 0xfc, 0xad},
|
||||
{0x6a, 0x0a, 0xee, 0x7c, 0x10, 0xd9, 0x19, 0xfe},
|
||||
{0x0a, 0x22, 0xd9, 0x41, 0xcc, 0x23, 0x87, 0x13},
|
||||
{0x6e, 0xff, 0x1f, 0xff, 0x36, 0x17, 0x9c, 0xbe},
|
||||
{0x79, 0xad, 0xb7, 0x40, 0xf4, 0x9f, 0x51, 0xa6},
|
||||
{0x97, 0x81, 0x99, 0xa4, 0xde, 0x9e, 0x9f, 0xb6},
|
||||
{0x12, 0x19, 0x7a, 0x28, 0xd0, 0xdc, 0xcc, 0x92},
|
||||
{0x81, 0xda, 0x60, 0x1e, 0x0e, 0xdd, 0x65, 0x56},
|
||||
{0x7d, 0x76, 0x20, 0xb2, 0x73, 0xc9, 0x9e, 0xee},
|
||||
}
|
||||
|
||||
func TestSaltedCipher(t *testing.T) {
|
||||
var key, salt [32]byte
|
||||
for i := range key {
|
||||
key[i] = byte(i)
|
||||
salt[i] = byte(i + 32)
|
||||
}
|
||||
for i, v := range saltedVectors {
|
||||
c, err := NewSaltedCipher(key[:], salt[:i])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var buf [8]byte
|
||||
c.Encrypt(buf[:], buf[:])
|
||||
if v != buf {
|
||||
t.Errorf("%d: expected %x, got %x", i, v, buf)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkExpandKeyWithSalt(b *testing.B) {
|
||||
key := make([]byte, 32)
|
||||
salt := make([]byte, 16)
|
||||
c, _ := NewCipher(key)
|
||||
for i := 0; i < b.N; i++ {
|
||||
expandKeyWithSalt(key, salt, c)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkExpandKey(b *testing.B) {
|
||||
key := make([]byte, 32)
|
||||
c, _ := NewCipher(key)
|
||||
for i := 0; i < b.N; i++ {
|
||||
ExpandKey(key, c)
|
||||
}
|
||||
}
|
||||
@@ -3,10 +3,10 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm.
|
||||
package blowfish // import "golang.org/x/crypto/blowfish"
|
||||
package blowfish
|
||||
|
||||
// The code is a port of Bruce Schneier's C implementation.
|
||||
// See https://www.schneier.com/blowfish.html.
|
||||
// See http://www.schneier.com/blowfish.html.
|
||||
|
||||
import "strconv"
|
||||
|
||||
@@ -39,7 +39,7 @@ func NewCipher(key []byte) (*Cipher, error) {
|
||||
|
||||
// NewSaltedCipher creates a returns a Cipher that folds a salt into its key
|
||||
// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is
|
||||
// sufficient and desirable. For bcrypt compatibility, the key can be over 56
|
||||
// sufficient and desirable. For bcrypt compatiblity, the key can be over 56
|
||||
// bytes.
|
||||
func NewSaltedCipher(key, salt []byte) (*Cipher, error) {
|
||||
if len(salt) == 0 {
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
// The startup permutation array and substitution boxes.
|
||||
// They are the hexadecimal digits of PI; see:
|
||||
// https://www.schneier.com/code/constants.txt.
|
||||
// http://www.schneier.com/code/constants.txt.
|
||||
|
||||
package blowfish
|
||||
|
||||
37
Godeps/_workspace/src/code.google.com/p/go.text/transform/examples_test.go
generated
vendored
Normal file
37
Godeps/_workspace/src/code.google.com/p/go.text/transform/examples_test.go
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package transform_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"unicode"
|
||||
|
||||
"code.google.com/p/go.text/transform"
|
||||
"code.google.com/p/go.text/unicode/norm"
|
||||
)
|
||||
|
||||
func ExampleRemoveFunc() {
|
||||
input := []byte(`tschüß; до свидания`)
|
||||
|
||||
b := make([]byte, len(input))
|
||||
|
||||
t := transform.RemoveFunc(unicode.IsSpace)
|
||||
n, _, _ := t.Transform(b, input, true)
|
||||
fmt.Println(string(b[:n]))
|
||||
|
||||
t = transform.RemoveFunc(func(r rune) bool {
|
||||
return !unicode.Is(unicode.Latin, r)
|
||||
})
|
||||
n, _, _ = t.Transform(b, input, true)
|
||||
fmt.Println(string(b[:n]))
|
||||
|
||||
n, _, _ = t.Transform(b, norm.NFD.Bytes(input), true)
|
||||
fmt.Println(string(b[:n]))
|
||||
|
||||
// Output:
|
||||
// tschüß;досвидания
|
||||
// tschüß
|
||||
// tschuß
|
||||
}
|
||||
@@ -6,7 +6,7 @@
|
||||
// bytes passing through as well as various transformations. Example
|
||||
// transformations provided by other packages include normalization and
|
||||
// conversion between character sets.
|
||||
package transform // import "golang.org/x/text/transform"
|
||||
package transform
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -24,10 +24,6 @@ var (
|
||||
// complete the transformation.
|
||||
ErrShortSrc = errors.New("transform: short source buffer")
|
||||
|
||||
// ErrEndOfSpan means that the input and output (the transformed input)
|
||||
// are not identical.
|
||||
ErrEndOfSpan = errors.New("transform: input and output are not identical")
|
||||
|
||||
// errInconsistentByteCount means that Transform returned success (nil
|
||||
// error) but also returned nSrc inconsistent with the src argument.
|
||||
errInconsistentByteCount = errors.New("transform: inconsistent byte count returned")
|
||||
@@ -59,52 +55,10 @@ type Transformer interface {
|
||||
// either error may be returned. Other than the error conditions listed
|
||||
// here, implementations are free to report other errors that arise.
|
||||
Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error)
|
||||
|
||||
// Reset resets the state and allows a Transformer to be reused.
|
||||
Reset()
|
||||
}
|
||||
|
||||
// SpanningTransformer extends the Transformer interface with a Span method
|
||||
// that determines how much of the input already conforms to the Transformer.
|
||||
type SpanningTransformer interface {
|
||||
Transformer
|
||||
|
||||
// Span returns a position in src such that transforming src[:n] results in
|
||||
// identical output src[:n] for these bytes. It does not necessarily return
|
||||
// the largest such n. The atEOF argument tells whether src represents the
|
||||
// last bytes of the input.
|
||||
//
|
||||
// Callers should always account for the n bytes consumed before
|
||||
// considering the error err.
|
||||
//
|
||||
// A nil error means that all input bytes are known to be identical to the
|
||||
// output produced by the Transformer. A nil error can be be returned
|
||||
// regardless of whether atEOF is true. If err is nil, then then n must
|
||||
// equal len(src); the converse is not necessarily true.
|
||||
//
|
||||
// ErrEndOfSpan means that the Transformer output may differ from the
|
||||
// input after n bytes. Note that n may be len(src), meaning that the output
|
||||
// would contain additional bytes after otherwise identical output.
|
||||
// ErrShortSrc means that src had insufficient data to determine whether the
|
||||
// remaining bytes would change. Other than the error conditions listed
|
||||
// here, implementations are free to report other errors that arise.
|
||||
//
|
||||
// Calling Span can modify the Transformer state as a side effect. In
|
||||
// effect, it does the transformation just as calling Transform would, only
|
||||
// without copying to a destination buffer and only up to a point it can
|
||||
// determine the input and output bytes are the same. This is obviously more
|
||||
// limited than calling Transform, but can be more efficient in terms of
|
||||
// copying and allocating buffers. Calls to Span and Transform may be
|
||||
// interleaved.
|
||||
Span(src []byte, atEOF bool) (n int, err error)
|
||||
}
|
||||
|
||||
// NopResetter can be embedded by implementations of Transformer to add a nop
|
||||
// Reset method.
|
||||
type NopResetter struct{}
|
||||
|
||||
// Reset implements the Reset method of the Transformer interface.
|
||||
func (NopResetter) Reset() {}
|
||||
// TODO: Do we require that a Transformer be reusable if it returns a nil error
|
||||
// or do we always require a reset after use? Is Reset mandatory or optional?
|
||||
|
||||
// Reader wraps another io.Reader by transforming the bytes read.
|
||||
type Reader struct {
|
||||
@@ -130,9 +84,8 @@ type Reader struct {
|
||||
const defaultBufSize = 4096
|
||||
|
||||
// NewReader returns a new Reader that wraps r by transforming the bytes read
|
||||
// via t. It calls Reset on t.
|
||||
// via t.
|
||||
func NewReader(r io.Reader, t Transformer) *Reader {
|
||||
t.Reset()
|
||||
return &Reader{
|
||||
r: r,
|
||||
t: t,
|
||||
@@ -217,9 +170,8 @@ type Writer struct {
|
||||
}
|
||||
|
||||
// NewWriter returns a new Writer that wraps w by transforming the bytes written
|
||||
// via t. It calls Reset on t.
|
||||
// via t.
|
||||
func NewWriter(w io.Writer, t Transformer) *Writer {
|
||||
t.Reset()
|
||||
return &Writer{
|
||||
w: w,
|
||||
t: t,
|
||||
@@ -246,9 +198,7 @@ func (w *Writer) Write(data []byte) (n int, err error) {
|
||||
return n, werr
|
||||
}
|
||||
src = src[nSrc:]
|
||||
if w.n == 0 {
|
||||
n += nSrc
|
||||
} else if len(src) <= n {
|
||||
if w.n > 0 && len(src) <= n {
|
||||
// Enough bytes from w.src have been consumed. We make src point
|
||||
// to data instead to reduce the copying.
|
||||
w.n = 0
|
||||
@@ -257,46 +207,35 @@ func (w *Writer) Write(data []byte) (n int, err error) {
|
||||
if n < len(data) && (err == nil || err == ErrShortSrc) {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
n += nSrc
|
||||
}
|
||||
switch err {
|
||||
case ErrShortDst:
|
||||
// This error is okay as long as we are making progress.
|
||||
if nDst > 0 || nSrc > 0 {
|
||||
continue
|
||||
}
|
||||
case ErrShortSrc:
|
||||
if len(src) < len(w.src) {
|
||||
m := copy(w.src, src)
|
||||
// If w.n > 0, bytes from data were already copied to w.src and n
|
||||
// was already set to the number of bytes consumed.
|
||||
if w.n == 0 {
|
||||
n += m
|
||||
}
|
||||
w.n = m
|
||||
err = nil
|
||||
} else if nDst > 0 || nSrc > 0 {
|
||||
// Not enough buffer to store the remainder. Keep processing as
|
||||
// long as there is progress. Without this case, transforms that
|
||||
// require a lookahead larger than the buffer may result in an
|
||||
// error. This is not something one may expect to be common in
|
||||
// practice, but it may occur when buffers are set to small
|
||||
// sizes during testing.
|
||||
continue
|
||||
}
|
||||
case nil:
|
||||
if w.n > 0 {
|
||||
err = errInconsistentByteCount
|
||||
switch {
|
||||
case err == ErrShortDst && (nDst > 0 || nSrc > 0):
|
||||
case err == ErrShortSrc && len(src) < len(w.src):
|
||||
m := copy(w.src, src)
|
||||
// If w.n > 0, bytes from data were already copied to w.src and n
|
||||
// was already set to the number of bytes consumed.
|
||||
if w.n == 0 {
|
||||
n += m
|
||||
}
|
||||
w.n = m
|
||||
return n, nil
|
||||
case err == nil && w.n > 0:
|
||||
return n, errInconsistentByteCount
|
||||
default:
|
||||
return n, err
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
|
||||
// Close implements the io.Closer interface.
|
||||
func (w *Writer) Close() error {
|
||||
src := w.src[:w.n]
|
||||
for {
|
||||
for src := w.src[:w.n]; len(src) > 0; {
|
||||
nDst, nSrc, err := w.t.Transform(w.dst, src, true)
|
||||
if nDst == 0 {
|
||||
return err
|
||||
}
|
||||
if _, werr := w.w.Write(w.dst[:nDst]); werr != nil {
|
||||
return werr
|
||||
}
|
||||
@@ -305,9 +244,10 @@ func (w *Writer) Close() error {
|
||||
}
|
||||
src = src[nSrc:]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type nop struct{ NopResetter }
|
||||
type nop struct{}
|
||||
|
||||
func (nop) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
n := copy(dst, src)
|
||||
@@ -317,11 +257,7 @@ func (nop) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
return n, n, err
|
||||
}
|
||||
|
||||
func (nop) Span(src []byte, atEOF bool) (n int, err error) {
|
||||
return len(src), nil
|
||||
}
|
||||
|
||||
type discard struct{ NopResetter }
|
||||
type discard struct{}
|
||||
|
||||
func (discard) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
return 0, len(src), nil
|
||||
@@ -332,8 +268,8 @@ var (
|
||||
// by consuming all bytes and writing nothing.
|
||||
Discard Transformer = discard{}
|
||||
|
||||
// Nop is a SpanningTransformer that copies src to dst.
|
||||
Nop SpanningTransformer = nop{}
|
||||
// Nop is a Transformer that copies src to dst.
|
||||
Nop Transformer = nop{}
|
||||
)
|
||||
|
||||
// chain is a sequence of links. A chain with N Transformers has N+1 links and
|
||||
@@ -391,18 +327,6 @@ func Chain(t ...Transformer) Transformer {
|
||||
return c
|
||||
}
|
||||
|
||||
// Reset resets the state of Chain. It calls Reset on all the Transformers.
|
||||
func (c *chain) Reset() {
|
||||
for i, l := range c.link {
|
||||
if l.t != nil {
|
||||
l.t.Reset()
|
||||
}
|
||||
c.link[i].p, c.link[i].n = 0, 0
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: make chain use Span (is going to be fun to implement!)
|
||||
|
||||
// Transform applies the transformers of c in sequence.
|
||||
func (c *chain) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
// Set up src and dst in the chain.
|
||||
@@ -493,15 +417,14 @@ func (c *chain) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err erro
|
||||
return dstL.n, srcL.p, err
|
||||
}
|
||||
|
||||
// Deprecated: use runes.Remove instead.
|
||||
// RemoveFunc returns a Transformer that removes from the input all runes r for
|
||||
// which f(r) is true. Illegal bytes in the input are replaced by RuneError.
|
||||
func RemoveFunc(f func(r rune) bool) Transformer {
|
||||
return removeF(f)
|
||||
}
|
||||
|
||||
type removeF func(r rune) bool
|
||||
|
||||
func (removeF) Reset() {}
|
||||
|
||||
// Transform implements the Transformer interface.
|
||||
func (t removeF) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
for r, sz := rune(0), 0; len(src) > 0; src = src[sz:] {
|
||||
@@ -513,7 +436,7 @@ func (t removeF) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err err
|
||||
|
||||
if sz == 1 {
|
||||
// Invalid rune.
|
||||
if !atEOF && !utf8.FullRune(src) {
|
||||
if !atEOF && !utf8.FullRune(src[nSrc:]) {
|
||||
err = ErrShortSrc
|
||||
break
|
||||
}
|
||||
@@ -549,9 +472,7 @@ func (t removeF) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err err
|
||||
// of b to the start of the new slice.
|
||||
func grow(b []byte, n int) []byte {
|
||||
m := len(b)
|
||||
if m <= 32 {
|
||||
m = 64
|
||||
} else if m <= 256 {
|
||||
if m <= 256 {
|
||||
m *= 2
|
||||
} else {
|
||||
m += m >> 1
|
||||
@@ -564,15 +485,10 @@ func grow(b []byte, n int) []byte {
|
||||
const initialBufSize = 128
|
||||
|
||||
// String returns a string with the result of converting s[:n] using t, where
|
||||
// n <= len(s). If err == nil, n will be len(s). It calls Reset on t.
|
||||
// n <= len(s). If err == nil, n will be len(s).
|
||||
func String(t Transformer, s string) (result string, n int, err error) {
|
||||
t.Reset()
|
||||
if s == "" {
|
||||
// Fast path for the common case for empty input. Results in about a
|
||||
// 86% reduction of running time for BenchmarkStringLowerEmpty.
|
||||
if _, _, err := t.Transform(nil, nil, true); err == nil {
|
||||
return "", 0, nil
|
||||
}
|
||||
return "", 0, nil
|
||||
}
|
||||
|
||||
// Allocate only once. Note that both dst and src escape when passed to
|
||||
@@ -581,115 +497,86 @@ func String(t Transformer, s string) (result string, n int, err error) {
|
||||
dst := buf[:initialBufSize:initialBufSize]
|
||||
src := buf[initialBufSize : 2*initialBufSize]
|
||||
|
||||
// The input string s is transformed in multiple chunks (starting with a
|
||||
// chunk size of initialBufSize). nDst and nSrc are per-chunk (or
|
||||
// per-Transform-call) indexes, pDst and pSrc are overall indexes.
|
||||
nDst, nSrc := 0, 0
|
||||
pDst, pSrc := 0, 0
|
||||
|
||||
// pPrefix is the length of a common prefix: the first pPrefix bytes of the
|
||||
// result will equal the first pPrefix bytes of s. It is not guaranteed to
|
||||
// be the largest such value, but if pPrefix, len(result) and len(s) are
|
||||
// all equal after the final transform (i.e. calling Transform with atEOF
|
||||
// being true returned nil error) then we don't need to allocate a new
|
||||
// result string.
|
||||
pPrefix := 0
|
||||
// Avoid allocation if the transformed string is identical to the original.
|
||||
// After this loop, pDst will point to the furthest point in s for which it
|
||||
// could be detected that t gives equal results, src[:nSrc] will
|
||||
// indicated the last processed chunk of s for which the output is not equal
|
||||
// and dst[:nDst] will be the transform of this chunk.
|
||||
var nDst, nSrc int
|
||||
pDst := 0 // Used as index in both src and dst in this loop.
|
||||
for {
|
||||
// Invariant: pDst == pPrefix && pSrc == pPrefix.
|
||||
|
||||
n := copy(src, s[pSrc:])
|
||||
nDst, nSrc, err = t.Transform(dst, src[:n], pSrc+n == len(s))
|
||||
pDst += nDst
|
||||
pSrc += nSrc
|
||||
n := copy(src, s[pDst:])
|
||||
nDst, nSrc, err = t.Transform(dst, src[:n], pDst+n == len(s))
|
||||
|
||||
// Note 1: we will not enter the loop with pDst == len(s) and we will
|
||||
// not end the loop with it either. So if nSrc is 0, this means there is
|
||||
// some kind of error from which we cannot recover given the current
|
||||
// buffer sizes. We will give up in this case.
|
||||
// Note 2: it is not entirely correct to simply do a bytes.Equal as
|
||||
// a Transformer may buffer internally. It will work in most cases,
|
||||
// though, and no harm is done if it doesn't work.
|
||||
// TODO: let transformers implement an optional Spanner interface, akin
|
||||
// to norm's QuickSpan. This would even allow us to avoid any allocation.
|
||||
if !bytes.Equal(dst[:nDst], src[:nSrc]) {
|
||||
if nSrc == 0 || !bytes.Equal(dst[:nDst], src[:nSrc]) {
|
||||
break
|
||||
}
|
||||
pPrefix = pSrc
|
||||
if err == ErrShortDst {
|
||||
// A buffer can only be short if a transformer modifies its input.
|
||||
break
|
||||
} else if err == ErrShortSrc {
|
||||
if nSrc == 0 {
|
||||
// No progress was made.
|
||||
break
|
||||
}
|
||||
// Equal so far and !atEOF, so continue checking.
|
||||
} else if err != nil || pPrefix == len(s) {
|
||||
return string(s[:pPrefix]), pPrefix, err
|
||||
|
||||
if pDst += nDst; pDst == len(s) {
|
||||
return s, pDst, nil
|
||||
}
|
||||
}
|
||||
// Post-condition: pDst == pPrefix + nDst && pSrc == pPrefix + nSrc.
|
||||
|
||||
// We have transformed the first pSrc bytes of the input s to become pDst
|
||||
// transformed bytes. Those transformed bytes are discontiguous: the first
|
||||
// pPrefix of them equal s[:pPrefix] and the last nDst of them equal
|
||||
// dst[:nDst]. We copy them around, into a new dst buffer if necessary, so
|
||||
// that they become one contiguous slice: dst[:pDst].
|
||||
if pPrefix != 0 {
|
||||
newDst := dst
|
||||
if pDst > len(newDst) {
|
||||
newDst = make([]byte, len(s)+nDst-nSrc)
|
||||
}
|
||||
copy(newDst[pPrefix:pDst], dst[:nDst])
|
||||
copy(newDst[:pPrefix], s[:pPrefix])
|
||||
dst = newDst
|
||||
// Move the bytes seen so far to dst.
|
||||
pSrc := pDst + nSrc
|
||||
if pDst+nDst <= initialBufSize {
|
||||
copy(dst[pDst:], dst[:nDst])
|
||||
} else {
|
||||
b := make([]byte, len(s)+nDst-nSrc)
|
||||
copy(b[pDst:], dst[:nDst])
|
||||
dst = b
|
||||
}
|
||||
copy(dst, s[:pDst])
|
||||
pDst += nDst
|
||||
|
||||
// Prevent duplicate Transform calls with atEOF being true at the end of
|
||||
// the input. Also return if we have an unrecoverable error.
|
||||
if (err == nil && pSrc == len(s)) ||
|
||||
(err != nil && err != ErrShortDst && err != ErrShortSrc) {
|
||||
if err != nil && err != ErrShortDst && err != ErrShortSrc {
|
||||
return string(dst[:pDst]), pSrc, err
|
||||
}
|
||||
|
||||
// Transform the remaining input, growing dst and src buffers as necessary.
|
||||
// Complete the string with the remainder.
|
||||
for {
|
||||
n := copy(src, s[pSrc:])
|
||||
nDst, nSrc, err := t.Transform(dst[pDst:], src[:n], pSrc+n == len(s))
|
||||
nDst, nSrc, err = t.Transform(dst[pDst:], src[:n], pSrc+n == len(s))
|
||||
pDst += nDst
|
||||
pSrc += nSrc
|
||||
|
||||
// If we got ErrShortDst or ErrShortSrc, do not grow as long as we can
|
||||
// make progress. This may avoid excessive allocations.
|
||||
if err == ErrShortDst {
|
||||
switch err {
|
||||
case nil:
|
||||
if pSrc == len(s) {
|
||||
return string(dst[:pDst]), pSrc, nil
|
||||
}
|
||||
case ErrShortDst:
|
||||
// Do not grow as long as we can make progress. This may avoid
|
||||
// excessive allocations.
|
||||
if nDst == 0 {
|
||||
dst = grow(dst, pDst)
|
||||
}
|
||||
} else if err == ErrShortSrc {
|
||||
case ErrShortSrc:
|
||||
if nSrc == 0 {
|
||||
src = grow(src, 0)
|
||||
}
|
||||
} else if err != nil || pSrc == len(s) {
|
||||
default:
|
||||
return string(dst[:pDst]), pSrc, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Bytes returns a new byte slice with the result of converting b[:n] using t,
|
||||
// where n <= len(b). If err == nil, n will be len(b). It calls Reset on t.
|
||||
// where n <= len(b). If err == nil, n will be len(b).
|
||||
func Bytes(t Transformer, b []byte) (result []byte, n int, err error) {
|
||||
return doAppend(t, 0, make([]byte, len(b)), b)
|
||||
}
|
||||
|
||||
// Append appends the result of converting src[:n] using t to dst, where
|
||||
// n <= len(src), If err == nil, n will be len(src). It calls Reset on t.
|
||||
func Append(t Transformer, dst, src []byte) (result []byte, n int, err error) {
|
||||
if len(dst) == cap(dst) {
|
||||
n := len(src) + len(dst) // It is okay for this to be 0.
|
||||
b := make([]byte, n)
|
||||
dst = b[:copy(b, dst)]
|
||||
}
|
||||
return doAppend(t, len(dst), dst[:cap(dst)], src)
|
||||
}
|
||||
|
||||
func doAppend(t Transformer, pDst int, dst, src []byte) (result []byte, n int, err error) {
|
||||
t.Reset()
|
||||
pSrc := 0
|
||||
dst := make([]byte, len(b))
|
||||
pDst, pSrc := 0, 0
|
||||
for {
|
||||
nDst, nSrc, err := t.Transform(dst[pDst:], src[pSrc:], true)
|
||||
nDst, nSrc, err := t.Transform(dst[pDst:], b[pSrc:], true)
|
||||
pDst += nDst
|
||||
pSrc += nSrc
|
||||
if err != ErrShortDst {
|
||||
1075
Godeps/_workspace/src/code.google.com/p/go.text/transform/transform_test.go
generated
vendored
Normal file
1075
Godeps/_workspace/src/code.google.com/p/go.text/transform/transform_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
30
Godeps/_workspace/src/code.google.com/p/go.text/unicode/norm/Makefile
generated
vendored
Normal file
30
Godeps/_workspace/src/code.google.com/p/go.text/unicode/norm/Makefile
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
# Copyright 2011 The Go Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style
|
||||
# license that can be found in the LICENSE file.
|
||||
|
||||
maketables: maketables.go triegen.go
|
||||
go build $^
|
||||
|
||||
maketesttables: maketesttables.go triegen.go
|
||||
go build $^
|
||||
|
||||
normregtest: normregtest.go
|
||||
go build $^
|
||||
|
||||
tables: maketables
|
||||
./maketables > tables.go
|
||||
gofmt -w tables.go
|
||||
|
||||
trietesttables: maketesttables
|
||||
./maketesttables > triedata_test.go
|
||||
gofmt -w triedata_test.go
|
||||
|
||||
# Downloads from www.unicode.org, so not part
|
||||
# of standard test scripts.
|
||||
test: testtables regtest
|
||||
|
||||
testtables: maketables
|
||||
./maketables -test > data_test.go && go test -tags=test
|
||||
|
||||
regtest: normregtest
|
||||
./normregtest
|
||||
@@ -33,9 +33,17 @@ const (
|
||||
// streamSafe implements the policy of when a CGJ should be inserted.
|
||||
type streamSafe uint8
|
||||
|
||||
// first inserts the first rune of a segment. It is a faster version of next if
|
||||
// it is known p represents the first rune in a segment.
|
||||
// mkStreamSafe is a shorthand for declaring a streamSafe var and calling
|
||||
// first on it.
|
||||
func mkStreamSafe(p Properties) streamSafe {
|
||||
return streamSafe(p.nTrailingNonStarters())
|
||||
}
|
||||
|
||||
// first inserts the first rune of a segment.
|
||||
func (ss *streamSafe) first(p Properties) {
|
||||
if *ss != 0 {
|
||||
panic("!= 0")
|
||||
}
|
||||
*ss = streamSafe(p.nTrailingNonStarters())
|
||||
}
|
||||
|
||||
@@ -58,7 +66,7 @@ func (ss *streamSafe) next(p Properties) ssState {
|
||||
// be a non-starter. Note that it always hold that if nLead > 0 then
|
||||
// nLead == nTrail.
|
||||
if n == 0 {
|
||||
*ss = streamSafe(p.nTrailingNonStarters())
|
||||
*ss = 0
|
||||
return ssStarter
|
||||
}
|
||||
return ssSuccess
|
||||
@@ -134,6 +142,7 @@ func (rb *reorderBuffer) setFlusher(out []byte, f func(*reorderBuffer) bool) {
|
||||
func (rb *reorderBuffer) reset() {
|
||||
rb.nrune = 0
|
||||
rb.nbyte = 0
|
||||
rb.ss = 0
|
||||
}
|
||||
|
||||
func (rb *reorderBuffer) doFlush() bool {
|
||||
@@ -248,9 +257,6 @@ func (rb *reorderBuffer) insertUnsafe(src input, i int, info Properties) {
|
||||
// It flushes the buffer on each new segment start.
|
||||
func (rb *reorderBuffer) insertDecomposed(dcomp []byte) insertErr {
|
||||
rb.tmpBytes.setBytes(dcomp)
|
||||
// As the streamSafe accounting already handles the counting for modifiers,
|
||||
// we don't have to call next. However, we do need to keep the accounting
|
||||
// intact when flushing the buffer.
|
||||
for i := 0; i < len(dcomp); {
|
||||
info := rb.f.info(rb.tmpBytes, i)
|
||||
if info.BoundaryBefore() && rb.nrune > 0 && !rb.doFlush() {
|
||||
130
Godeps/_workspace/src/code.google.com/p/go.text/unicode/norm/composition_test.go
generated
vendored
Normal file
130
Godeps/_workspace/src/code.google.com/p/go.text/unicode/norm/composition_test.go
generated
vendored
Normal file
@@ -0,0 +1,130 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package norm
|
||||
|
||||
import "testing"
|
||||
|
||||
// TestCase is used for most tests.
|
||||
type TestCase struct {
|
||||
in []rune
|
||||
out []rune
|
||||
}
|
||||
|
||||
func runTests(t *testing.T, name string, fm Form, tests []TestCase) {
|
||||
rb := reorderBuffer{}
|
||||
rb.init(fm, nil)
|
||||
for i, test := range tests {
|
||||
rb.setFlusher(nil, appendFlush)
|
||||
for j, rune := range test.in {
|
||||
b := []byte(string(rune))
|
||||
src := inputBytes(b)
|
||||
info := rb.f.info(src, 0)
|
||||
if j == 0 {
|
||||
rb.ss.first(info)
|
||||
} else {
|
||||
rb.ss.next(info)
|
||||
}
|
||||
if rb.insertFlush(src, 0, info) < 0 {
|
||||
t.Errorf("%s:%d: insert failed for rune %d", name, i, j)
|
||||
}
|
||||
}
|
||||
rb.doFlush()
|
||||
was := string(rb.out)
|
||||
want := string(test.out)
|
||||
if len(was) != len(want) {
|
||||
t.Errorf("%s:%d: length = %d; want %d", name, i, len(was), len(want))
|
||||
}
|
||||
if was != want {
|
||||
k, pfx := pidx(was, want)
|
||||
t.Errorf("%s:%d: \nwas %s%+q; \nwant %s%+q", name, i, pfx, was[k:], pfx, want[k:])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFlush(t *testing.T) {
|
||||
const (
|
||||
hello = "Hello "
|
||||
world = "world!"
|
||||
)
|
||||
buf := make([]byte, maxByteBufferSize)
|
||||
p := copy(buf, hello)
|
||||
out := buf[p:]
|
||||
rb := reorderBuffer{}
|
||||
rb.initString(NFC, world)
|
||||
if i := rb.flushCopy(out); i != 0 {
|
||||
t.Errorf("wrote bytes on flush of empty buffer. (len(out) = %d)", i)
|
||||
}
|
||||
|
||||
for i := range world {
|
||||
// No need to set streamSafe values for this test.
|
||||
rb.insertFlush(rb.src, i, rb.f.info(rb.src, i))
|
||||
n := rb.flushCopy(out)
|
||||
out = out[n:]
|
||||
p += n
|
||||
}
|
||||
|
||||
was := buf[:p]
|
||||
want := hello + world
|
||||
if string(was) != want {
|
||||
t.Errorf(`output after flush was "%s"; want "%s"`, string(was), want)
|
||||
}
|
||||
if rb.nrune != 0 {
|
||||
t.Errorf("non-null size of info buffer (rb.nrune == %d)", rb.nrune)
|
||||
}
|
||||
if rb.nbyte != 0 {
|
||||
t.Errorf("non-null size of byte buffer (rb.nbyte == %d)", rb.nbyte)
|
||||
}
|
||||
}
|
||||
|
||||
var insertTests = []TestCase{
|
||||
{[]rune{'a'}, []rune{'a'}},
|
||||
{[]rune{0x300}, []rune{0x300}},
|
||||
{[]rune{0x300, 0x316}, []rune{0x316, 0x300}}, // CCC(0x300)==230; CCC(0x316)==220
|
||||
{[]rune{0x316, 0x300}, []rune{0x316, 0x300}},
|
||||
{[]rune{0x41, 0x316, 0x300}, []rune{0x41, 0x316, 0x300}},
|
||||
{[]rune{0x41, 0x300, 0x316}, []rune{0x41, 0x316, 0x300}},
|
||||
{[]rune{0x300, 0x316, 0x41}, []rune{0x316, 0x300, 0x41}},
|
||||
{[]rune{0x41, 0x300, 0x40, 0x316}, []rune{0x41, 0x300, 0x40, 0x316}},
|
||||
}
|
||||
|
||||
func TestInsert(t *testing.T) {
|
||||
runTests(t, "TestInsert", NFD, insertTests)
|
||||
}
|
||||
|
||||
var decompositionNFDTest = []TestCase{
|
||||
{[]rune{0xC0}, []rune{0x41, 0x300}},
|
||||
{[]rune{0xAC00}, []rune{0x1100, 0x1161}},
|
||||
{[]rune{0x01C4}, []rune{0x01C4}},
|
||||
{[]rune{0x320E}, []rune{0x320E}},
|
||||
{[]rune("음ẻ과"), []rune{0x110B, 0x1173, 0x11B7, 0x65, 0x309, 0x1100, 0x116A}},
|
||||
}
|
||||
|
||||
var decompositionNFKDTest = []TestCase{
|
||||
{[]rune{0xC0}, []rune{0x41, 0x300}},
|
||||
{[]rune{0xAC00}, []rune{0x1100, 0x1161}},
|
||||
{[]rune{0x01C4}, []rune{0x44, 0x5A, 0x030C}},
|
||||
{[]rune{0x320E}, []rune{0x28, 0x1100, 0x1161, 0x29}},
|
||||
}
|
||||
|
||||
func TestDecomposition(t *testing.T) {
|
||||
runTests(t, "TestDecompositionNFD", NFD, decompositionNFDTest)
|
||||
runTests(t, "TestDecompositionNFKD", NFKD, decompositionNFKDTest)
|
||||
}
|
||||
|
||||
var compositionTest = []TestCase{
|
||||
{[]rune{0x41, 0x300}, []rune{0xC0}},
|
||||
{[]rune{0x41, 0x316}, []rune{0x41, 0x316}},
|
||||
{[]rune{0x41, 0x300, 0x35D}, []rune{0xC0, 0x35D}},
|
||||
{[]rune{0x41, 0x316, 0x300}, []rune{0xC0, 0x316}},
|
||||
// blocking starter
|
||||
{[]rune{0x41, 0x316, 0x40, 0x300}, []rune{0x41, 0x316, 0x40, 0x300}},
|
||||
{[]rune{0x1100, 0x1161}, []rune{0xAC00}},
|
||||
// parenthesized Hangul, alternate between ASCII and Hangul.
|
||||
{[]rune{0x28, 0x1100, 0x1161, 0x29}, []rune{0x28, 0xAC00, 0x29}},
|
||||
}
|
||||
|
||||
func TestComposition(t *testing.T) {
|
||||
runTests(t, "TestComposition", NFC, compositionTest)
|
||||
}
|
||||
82
Godeps/_workspace/src/code.google.com/p/go.text/unicode/norm/example_iter_test.go
generated
vendored
Normal file
82
Godeps/_workspace/src/code.google.com/p/go.text/unicode/norm/example_iter_test.go
generated
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package norm_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"unicode/utf8"
|
||||
|
||||
"code.google.com/p/go.text/unicode/norm"
|
||||
)
|
||||
|
||||
// EqualSimple uses a norm.Iter to compare two non-normalized
|
||||
// strings for equivalence.
|
||||
func EqualSimple(a, b string) bool {
|
||||
var ia, ib norm.Iter
|
||||
ia.InitString(norm.NFKD, a)
|
||||
ib.InitString(norm.NFKD, b)
|
||||
for !ia.Done() && !ib.Done() {
|
||||
if !bytes.Equal(ia.Next(), ib.Next()) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return ia.Done() && ib.Done()
|
||||
}
|
||||
|
||||
// FindPrefix finds the longest common prefix of ASCII characters
|
||||
// of a and b.
|
||||
func FindPrefix(a, b string) int {
|
||||
i := 0
|
||||
for ; i < len(a) && i < len(b) && a[i] < utf8.RuneSelf && a[i] == b[i]; i++ {
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// EqualOpt is like EqualSimple, but optimizes the special
|
||||
// case for ASCII characters.
|
||||
func EqualOpt(a, b string) bool {
|
||||
n := FindPrefix(a, b)
|
||||
a, b = a[n:], b[n:]
|
||||
var ia, ib norm.Iter
|
||||
ia.InitString(norm.NFKD, a)
|
||||
ib.InitString(norm.NFKD, b)
|
||||
for !ia.Done() && !ib.Done() {
|
||||
if !bytes.Equal(ia.Next(), ib.Next()) {
|
||||
return false
|
||||
}
|
||||
if n := int64(FindPrefix(a[ia.Pos():], b[ib.Pos():])); n != 0 {
|
||||
ia.Seek(n, 1)
|
||||
ib.Seek(n, 1)
|
||||
}
|
||||
}
|
||||
return ia.Done() && ib.Done()
|
||||
}
|
||||
|
||||
var compareTests = []struct{ a, b string }{
|
||||
{"aaa", "aaa"},
|
||||
{"aaa", "aab"},
|
||||
{"a\u0300a", "\u00E0a"},
|
||||
{"a\u0300\u0320b", "a\u0320\u0300b"},
|
||||
{"\u1E0A\u0323", "\x44\u0323\u0307"},
|
||||
// A character that decomposes into multiple segments
|
||||
// spans several iterations.
|
||||
{"\u3304", "\u30A4\u30CB\u30F3\u30AF\u3099"},
|
||||
}
|
||||
|
||||
func ExampleIter() {
|
||||
for i, t := range compareTests {
|
||||
r0 := EqualSimple(t.a, t.b)
|
||||
r1 := EqualOpt(t.a, t.b)
|
||||
fmt.Printf("%d: %v %v\n", i, r0, r1)
|
||||
}
|
||||
// Output:
|
||||
// 0: true true
|
||||
// 1: false false
|
||||
// 2: true true
|
||||
// 3: true true
|
||||
// 4: true true
|
||||
// 5: true true
|
||||
}
|
||||
@@ -10,7 +10,7 @@ package norm
|
||||
// and its corresponding decomposing form share the same trie. Each trie maps
|
||||
// a rune to a uint16. The values take two forms. For v >= 0x8000:
|
||||
// bits
|
||||
// 15: 1 (inverse of NFD_QC bit of qcInfo)
|
||||
// 15: 1 (inverse of NFD_QD bit of qcInfo)
|
||||
// 13..7: qcInfo (see below). isYesD is always true (no decompostion).
|
||||
// 6..0: ccc (compressed CCC value).
|
||||
// For v < 0x8000, the respective rune has a decomposition and v is an index
|
||||
@@ -56,31 +56,28 @@ type formInfo struct {
|
||||
nextMain iterFunc
|
||||
}
|
||||
|
||||
var formTable = []*formInfo{{
|
||||
form: NFC,
|
||||
composing: true,
|
||||
compatibility: false,
|
||||
info: lookupInfoNFC,
|
||||
nextMain: nextComposed,
|
||||
}, {
|
||||
form: NFD,
|
||||
composing: false,
|
||||
compatibility: false,
|
||||
info: lookupInfoNFC,
|
||||
nextMain: nextDecomposed,
|
||||
}, {
|
||||
form: NFKC,
|
||||
composing: true,
|
||||
compatibility: true,
|
||||
info: lookupInfoNFKC,
|
||||
nextMain: nextComposed,
|
||||
}, {
|
||||
form: NFKD,
|
||||
composing: false,
|
||||
compatibility: true,
|
||||
info: lookupInfoNFKC,
|
||||
nextMain: nextDecomposed,
|
||||
}}
|
||||
var formTable []*formInfo
|
||||
|
||||
func init() {
|
||||
formTable = make([]*formInfo, 4)
|
||||
|
||||
for i := range formTable {
|
||||
f := &formInfo{}
|
||||
formTable[i] = f
|
||||
f.form = Form(i)
|
||||
if Form(i) == NFKD || Form(i) == NFKC {
|
||||
f.compatibility = true
|
||||
f.info = lookupInfoNFKC
|
||||
} else {
|
||||
f.info = lookupInfoNFC
|
||||
}
|
||||
f.nextMain = nextDecomposed
|
||||
if Form(i) == NFC || Form(i) == NFKC {
|
||||
f.nextMain = nextComposed
|
||||
f.composing = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We do not distinguish between boundaries for NFC, NFD, etc. to avoid
|
||||
// unexpected behavior for the user. For example, in NFD, there is a boundary
|
||||
@@ -204,17 +201,17 @@ func lookupInfoNFKC(b input, i int) Properties {
|
||||
// Properties returns properties for the first rune in s.
|
||||
func (f Form) Properties(s []byte) Properties {
|
||||
if f == NFC || f == NFD {
|
||||
return compInfo(nfcData.lookup(s))
|
||||
return compInfo(nfcTrie.lookup(s))
|
||||
}
|
||||
return compInfo(nfkcData.lookup(s))
|
||||
return compInfo(nfkcTrie.lookup(s))
|
||||
}
|
||||
|
||||
// PropertiesString returns properties for the first rune in s.
|
||||
func (f Form) PropertiesString(s string) Properties {
|
||||
if f == NFC || f == NFD {
|
||||
return compInfo(nfcData.lookupString(s))
|
||||
return compInfo(nfcTrie.lookupString(s))
|
||||
}
|
||||
return compInfo(nfkcData.lookupString(s))
|
||||
return compInfo(nfkcTrie.lookupString(s))
|
||||
}
|
||||
|
||||
// compInfo converts the information contained in v and sz
|
||||
54
Godeps/_workspace/src/code.google.com/p/go.text/unicode/norm/forminfo_test.go
generated
vendored
Normal file
54
Godeps/_workspace/src/code.google.com/p/go.text/unicode/norm/forminfo_test.go
generated
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build test
|
||||
|
||||
package norm
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestProperties(t *testing.T) {
|
||||
var d runeData
|
||||
CK := [2]string{"C", "K"}
|
||||
for k, r := 1, rune(0); r < 0x2ffff; r++ {
|
||||
if k < len(testData) && r == testData[k].r {
|
||||
d = testData[k]
|
||||
k++
|
||||
}
|
||||
s := string(r)
|
||||
for j, p := range []Properties{NFC.PropertiesString(s), NFKC.PropertiesString(s)} {
|
||||
f := d.f[j]
|
||||
if p.CCC() != d.ccc {
|
||||
t.Errorf("%U: ccc(%s): was %d; want %d %X", r, CK[j], p.CCC(), d.ccc, p.index)
|
||||
}
|
||||
if p.isYesC() != (f.qc == Yes) {
|
||||
t.Errorf("%U: YesC(%s): was %v; want %v", r, CK[j], p.isYesC(), f.qc == Yes)
|
||||
}
|
||||
if p.combinesBackward() != (f.qc == Maybe) {
|
||||
t.Errorf("%U: combines backwards(%s): was %v; want %v", r, CK[j], p.combinesBackward(), f.qc == Maybe)
|
||||
}
|
||||
if p.nLeadingNonStarters() != d.nLead {
|
||||
t.Errorf("%U: nLead(%s): was %d; want %d %#v %#v", r, CK[j], p.nLeadingNonStarters(), d.nLead, p, d)
|
||||
}
|
||||
if p.nTrailingNonStarters() != d.nTrail {
|
||||
t.Errorf("%U: nTrail(%s): was %d; want %d %#v %#v", r, CK[j], p.nTrailingNonStarters(), d.nTrail, p, d)
|
||||
}
|
||||
if p.combinesForward() != f.combinesForward {
|
||||
t.Errorf("%U: combines forward(%s): was %v; want %v %#v", r, CK[j], p.combinesForward(), f.combinesForward, p)
|
||||
}
|
||||
// Skip Hangul as it is algorithmically computed.
|
||||
if r >= hangulBase && r < hangulEnd {
|
||||
continue
|
||||
}
|
||||
if p.hasDecomposition() {
|
||||
if has := f.decomposition != ""; !has {
|
||||
t.Errorf("%U: hasDecomposition(%s): was %v; want %v", r, CK[j], p.hasDecomposition(), has)
|
||||
}
|
||||
if string(p.Decomposition()) != f.decomposition {
|
||||
t.Errorf("%U: decomp(%s): was %+q; want %+q", r, CK[j], p.Decomposition(), f.decomposition)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -77,33 +77,29 @@ func (in *input) copySlice(buf []byte, b, e int) int {
|
||||
|
||||
func (in *input) charinfoNFC(p int) (uint16, int) {
|
||||
if in.bytes == nil {
|
||||
return nfcData.lookupString(in.str[p:])
|
||||
return nfcTrie.lookupString(in.str[p:])
|
||||
}
|
||||
return nfcData.lookup(in.bytes[p:])
|
||||
return nfcTrie.lookup(in.bytes[p:])
|
||||
}
|
||||
|
||||
func (in *input) charinfoNFKC(p int) (uint16, int) {
|
||||
if in.bytes == nil {
|
||||
return nfkcData.lookupString(in.str[p:])
|
||||
return nfkcTrie.lookupString(in.str[p:])
|
||||
}
|
||||
return nfkcData.lookup(in.bytes[p:])
|
||||
return nfkcTrie.lookup(in.bytes[p:])
|
||||
}
|
||||
|
||||
func (in *input) hangul(p int) (r rune) {
|
||||
var size int
|
||||
if in.bytes == nil {
|
||||
if !isHangulString(in.str[p:]) {
|
||||
return 0
|
||||
}
|
||||
r, size = utf8.DecodeRuneInString(in.str[p:])
|
||||
r, _ = utf8.DecodeRuneInString(in.str[p:])
|
||||
} else {
|
||||
if !isHangul(in.bytes[p:]) {
|
||||
return 0
|
||||
}
|
||||
r, size = utf8.DecodeRune(in.bytes[p:])
|
||||
}
|
||||
if size != hangulUTF8Size {
|
||||
return 0
|
||||
r, _ = utf8.DecodeRune(in.bytes[p:])
|
||||
}
|
||||
return r
|
||||
}
|
||||
@@ -9,8 +9,6 @@ import (
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// MaxSegmentSize is the maximum size of a byte buffer needed to consider any
|
||||
// sequence of starter and non-starter runes for the purpose of normalization.
|
||||
const MaxSegmentSize = maxByteBufferSize
|
||||
|
||||
// An Iter iterates over a string or byte slice, while normalizing it
|
||||
@@ -41,7 +39,6 @@ func (i *Iter) Init(f Form, src []byte) {
|
||||
i.next = i.rb.f.nextMain
|
||||
i.asciiF = nextASCIIBytes
|
||||
i.info = i.rb.f.info(i.rb.src, i.p)
|
||||
i.rb.ss.first(i.info)
|
||||
}
|
||||
|
||||
// InitString initializes i to iterate over src after normalizing it to Form f.
|
||||
@@ -57,12 +54,11 @@ func (i *Iter) InitString(f Form, src string) {
|
||||
i.next = i.rb.f.nextMain
|
||||
i.asciiF = nextASCIIString
|
||||
i.info = i.rb.f.info(i.rb.src, i.p)
|
||||
i.rb.ss.first(i.info)
|
||||
}
|
||||
|
||||
// Seek sets the segment to be returned by the next call to Next to start
|
||||
// at position p. It is the responsibility of the caller to set p to the
|
||||
// start of a segment.
|
||||
// start of a UTF8 rune.
|
||||
func (i *Iter) Seek(offset int64, whence int) (int64, error) {
|
||||
var abs int64
|
||||
switch whence {
|
||||
@@ -86,7 +82,6 @@ func (i *Iter) Seek(offset int64, whence int) (int64, error) {
|
||||
i.multiSeg = nil
|
||||
i.next = i.rb.f.nextMain
|
||||
i.info = i.rb.f.info(i.rb.src, i.p)
|
||||
i.rb.ss.first(i.info)
|
||||
return abs, nil
|
||||
}
|
||||
|
||||
@@ -164,7 +159,6 @@ func nextHangul(i *Iter) []byte {
|
||||
if next >= i.rb.nsrc {
|
||||
i.setDone()
|
||||
} else if i.rb.src.hangul(next) == 0 {
|
||||
i.rb.ss.next(i.info)
|
||||
i.info = i.rb.f.info(i.rb.src, i.p)
|
||||
i.next = i.rb.f.nextMain
|
||||
return i.next(i)
|
||||
@@ -208,10 +202,12 @@ func nextMultiNorm(i *Iter) []byte {
|
||||
if info.BoundaryBefore() {
|
||||
i.rb.compose()
|
||||
seg := i.buf[:i.rb.flushCopy(i.buf[:])]
|
||||
i.rb.ss.first(info)
|
||||
i.rb.insertUnsafe(input{bytes: d}, j, info)
|
||||
i.multiSeg = d[j+int(info.size):]
|
||||
return seg
|
||||
}
|
||||
i.rb.ss.next(info)
|
||||
i.rb.insertUnsafe(input{bytes: d}, j, info)
|
||||
j += int(info.size)
|
||||
}
|
||||
@@ -224,9 +220,9 @@ func nextMultiNorm(i *Iter) []byte {
|
||||
func nextDecomposed(i *Iter) (next []byte) {
|
||||
outp := 0
|
||||
inCopyStart, outCopyStart := i.p, 0
|
||||
ss := mkStreamSafe(i.info)
|
||||
for {
|
||||
if sz := int(i.info.size); sz <= 1 {
|
||||
i.rb.ss = 0
|
||||
p := i.p
|
||||
i.p++ // ASCII or illegal byte. Either way, advance by 1.
|
||||
if i.p >= i.rb.nsrc {
|
||||
@@ -245,8 +241,6 @@ func nextDecomposed(i *Iter) (next []byte) {
|
||||
p := outp + len(d)
|
||||
if outp > 0 {
|
||||
i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p)
|
||||
// TODO: this condition should not be possible, but we leave it
|
||||
// in for defensive purposes.
|
||||
if p > len(i.buf) {
|
||||
return i.buf[:outp]
|
||||
}
|
||||
@@ -270,7 +264,7 @@ func nextDecomposed(i *Iter) (next []byte) {
|
||||
} else {
|
||||
i.info = i.rb.f.info(i.rb.src, i.p)
|
||||
}
|
||||
switch i.rb.ss.next(i.info) {
|
||||
switch ss.next(i.info) {
|
||||
case ssOverflow:
|
||||
i.next = nextCGJDecompose
|
||||
fallthrough
|
||||
@@ -313,7 +307,7 @@ func nextDecomposed(i *Iter) (next []byte) {
|
||||
}
|
||||
prevCC := i.info.tccc
|
||||
i.info = i.rb.f.info(i.rb.src, i.p)
|
||||
if v := i.rb.ss.next(i.info); v == ssStarter {
|
||||
if v := ss.next(i.info); v == ssStarter {
|
||||
break
|
||||
} else if v == ssOverflow {
|
||||
i.next = nextCGJDecompose
|
||||
@@ -339,6 +333,10 @@ doNorm:
|
||||
|
||||
func doNormDecomposed(i *Iter) []byte {
|
||||
for {
|
||||
if s := i.rb.ss.next(i.info); s == ssOverflow {
|
||||
i.next = nextCGJDecompose
|
||||
break
|
||||
}
|
||||
i.rb.insertUnsafe(i.rb.src, i.p, i.info)
|
||||
if i.p += int(i.info.size); i.p >= i.rb.nsrc {
|
||||
i.setDone()
|
||||
@@ -348,10 +346,6 @@ func doNormDecomposed(i *Iter) []byte {
|
||||
if i.info.ccc == 0 {
|
||||
break
|
||||
}
|
||||
if s := i.rb.ss.next(i.info); s == ssOverflow {
|
||||
i.next = nextCGJDecompose
|
||||
break
|
||||
}
|
||||
}
|
||||
// new segment or too many combining characters: exit normalization
|
||||
return i.buf[:i.rb.flushCopy(i.buf[:])]
|
||||
@@ -361,7 +355,6 @@ func nextCGJDecompose(i *Iter) []byte {
|
||||
i.rb.ss = 0
|
||||
i.rb.insertCGJ()
|
||||
i.next = nextDecomposed
|
||||
i.rb.ss.first(i.info)
|
||||
buf := doNormDecomposed(i)
|
||||
return buf
|
||||
}
|
||||
@@ -370,6 +363,7 @@ func nextCGJDecompose(i *Iter) []byte {
|
||||
func nextComposed(i *Iter) []byte {
|
||||
outp, startp := 0, i.p
|
||||
var prevCC uint8
|
||||
ss := mkStreamSafe(i.info)
|
||||
for {
|
||||
if !i.info.isYesC() {
|
||||
goto doNorm
|
||||
@@ -389,12 +383,11 @@ func nextComposed(i *Iter) []byte {
|
||||
i.setDone()
|
||||
break
|
||||
} else if i.rb.src._byte(i.p) < utf8.RuneSelf {
|
||||
i.rb.ss = 0
|
||||
i.next = i.asciiF
|
||||
break
|
||||
}
|
||||
i.info = i.rb.f.info(i.rb.src, i.p)
|
||||
if v := i.rb.ss.next(i.info); v == ssStarter {
|
||||
if v := ss.next(i.info); v == ssStarter {
|
||||
break
|
||||
} else if v == ssOverflow {
|
||||
i.next = nextCGJCompose
|
||||
@@ -406,10 +399,8 @@ func nextComposed(i *Iter) []byte {
|
||||
}
|
||||
return i.returnSlice(startp, i.p)
|
||||
doNorm:
|
||||
// reset to start position
|
||||
i.p = startp
|
||||
i.info = i.rb.f.info(i.rb.src, i.p)
|
||||
i.rb.ss.first(i.info)
|
||||
if i.info.multiSegment() {
|
||||
d := i.info.Decomposition()
|
||||
info := i.rb.f.info(input{bytes: d}, 0)
|
||||
98
Godeps/_workspace/src/code.google.com/p/go.text/unicode/norm/iter_test.go
generated
vendored
Normal file
98
Godeps/_workspace/src/code.google.com/p/go.text/unicode/norm/iter_test.go
generated
vendored
Normal file
@@ -0,0 +1,98 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package norm
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func doIterNorm(f Form, s string) []byte {
|
||||
acc := []byte{}
|
||||
i := Iter{}
|
||||
i.InitString(f, s)
|
||||
for !i.Done() {
|
||||
acc = append(acc, i.Next()...)
|
||||
}
|
||||
return acc
|
||||
}
|
||||
|
||||
func TestIterNext(t *testing.T) {
|
||||
runNormTests(t, "IterNext", func(f Form, out []byte, s string) []byte {
|
||||
return doIterNorm(f, string(append(out, s...)))
|
||||
})
|
||||
}
|
||||
|
||||
type SegmentTest struct {
|
||||
in string
|
||||
out []string
|
||||
}
|
||||
|
||||
var segmentTests = []SegmentTest{
|
||||
{"\u1E0A\u0323a", []string{"\x44\u0323\u0307", "a", ""}},
|
||||
{rep('a', segSize), append(strings.Split(rep('a', segSize), ""), "")},
|
||||
{rep('a', segSize+2), append(strings.Split(rep('a', segSize+2), ""), "")},
|
||||
{rep('a', segSize) + "\u0300aa",
|
||||
append(strings.Split(rep('a', segSize-1), ""), "a\u0300", "a", "a", "")},
|
||||
|
||||
// U+0f73 is NOT treated as a starter as it is a modifier
|
||||
{"a" + grave(29) + "\u0f73", []string{"a" + grave(29), cgj + "\u0f73"}},
|
||||
{"a\u0f73", []string{"a\u0f73"}},
|
||||
|
||||
// U+ff9e is treated as a non-starter.
|
||||
// TODO: should we? Note that this will only affect iteration, as whether
|
||||
// or not we do so does not affect the normalization output and will either
|
||||
// way result in consistent iteration output.
|
||||
{"a" + grave(30) + "\uff9e", []string{"a" + grave(30), cgj + "\uff9e"}},
|
||||
{"a\uff9e", []string{"a\uff9e"}},
|
||||
}
|
||||
|
||||
var segmentTestsK = []SegmentTest{
|
||||
{"\u3332", []string{"\u30D5", "\u30A1", "\u30E9", "\u30C3", "\u30C8\u3099", ""}},
|
||||
// last segment of multi-segment decomposition needs normalization
|
||||
{"\u3332\u093C", []string{"\u30D5", "\u30A1", "\u30E9", "\u30C3", "\u30C8\u093C\u3099", ""}},
|
||||
{"\u320E", []string{"\x28", "\uAC00", "\x29"}},
|
||||
|
||||
// last segment should be copied to start of buffer.
|
||||
{"\ufdfa", []string{"\u0635", "\u0644", "\u0649", " ", "\u0627", "\u0644", "\u0644", "\u0647", " ", "\u0639", "\u0644", "\u064a", "\u0647", " ", "\u0648", "\u0633", "\u0644", "\u0645", ""}},
|
||||
{"\ufdfa" + grave(30), []string{"\u0635", "\u0644", "\u0649", " ", "\u0627", "\u0644", "\u0644", "\u0647", " ", "\u0639", "\u0644", "\u064a", "\u0647", " ", "\u0648", "\u0633", "\u0644", "\u0645" + grave(30), ""}},
|
||||
{"\uFDFA" + grave(64), []string{"\u0635", "\u0644", "\u0649", " ", "\u0627", "\u0644", "\u0644", "\u0647", " ", "\u0639", "\u0644", "\u064a", "\u0647", " ", "\u0648", "\u0633", "\u0644", "\u0645" + grave(30), cgj + grave(30), cgj + grave(4), ""}},
|
||||
|
||||
// Hangul and Jamo are grouped togeter.
|
||||
{"\uAC00", []string{"\u1100\u1161", ""}},
|
||||
{"\uAC01", []string{"\u1100\u1161\u11A8", ""}},
|
||||
{"\u1100\u1161", []string{"\u1100\u1161", ""}},
|
||||
}
|
||||
|
||||
// Note that, by design, segmentation is equal for composing and decomposing forms.
|
||||
func TestIterSegmentation(t *testing.T) {
|
||||
segmentTest(t, "SegmentTestD", NFD, segmentTests)
|
||||
segmentTest(t, "SegmentTestC", NFC, segmentTests)
|
||||
segmentTest(t, "SegmentTestKD", NFKD, segmentTestsK)
|
||||
segmentTest(t, "SegmentTestKC", NFKC, segmentTestsK)
|
||||
}
|
||||
|
||||
func segmentTest(t *testing.T, name string, f Form, tests []SegmentTest) {
|
||||
iter := Iter{}
|
||||
for i, tt := range tests {
|
||||
iter.InitString(f, tt.in)
|
||||
for j, seg := range tt.out {
|
||||
if seg == "" {
|
||||
if !iter.Done() {
|
||||
res := string(iter.Next())
|
||||
t.Errorf(`%s:%d:%d: expected Done()==true, found segment %+q`, name, i, j, res)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if iter.Done() {
|
||||
t.Errorf("%s:%d:%d: Done()==true, want false", name, i, j)
|
||||
}
|
||||
seg = f.String(seg)
|
||||
if res := string(iter.Next()); res != seg {
|
||||
t.Errorf(`%s:%d:%d" segment was %+q (%d); want %+q (%d)`, name, i, j, pc(res), len(res), pc(seg), len(seg))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -16,17 +16,19 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"golang.org/x/text/internal/gen"
|
||||
"golang.org/x/text/internal/triegen"
|
||||
"golang.org/x/text/internal/ucd"
|
||||
"code.google.com/p/go.text/internal/ucd"
|
||||
)
|
||||
|
||||
func main() {
|
||||
gen.Init()
|
||||
flag.Parse()
|
||||
loadUnicodeData()
|
||||
compactCCC()
|
||||
loadCompositionExclusions()
|
||||
@@ -35,23 +37,32 @@ func main() {
|
||||
computeNonStarterCounts()
|
||||
verifyComputed()
|
||||
printChars()
|
||||
testDerived()
|
||||
printTestdata()
|
||||
makeTables()
|
||||
if *test {
|
||||
testDerived()
|
||||
printTestdata()
|
||||
} else {
|
||||
makeTables()
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
tablelist = flag.String("tables",
|
||||
"all",
|
||||
"comma-separated list of which tables to generate; "+
|
||||
"can be 'decomp', 'recomp', 'info' and 'all'")
|
||||
test = flag.Bool("test",
|
||||
false,
|
||||
"test existing tables against DerivedNormalizationProps and generate test data for regression testing")
|
||||
verbose = flag.Bool("verbose",
|
||||
false,
|
||||
"write data to stdout as it is parsed")
|
||||
)
|
||||
var url = flag.String("url",
|
||||
"http://www.unicode.org/Public/"+unicode.Version+"/ucd/",
|
||||
"URL of Unicode database directory")
|
||||
var tablelist = flag.String("tables",
|
||||
"all",
|
||||
"comma-separated list of which tables to generate; "+
|
||||
"can be 'decomp', 'recomp', 'info' and 'all'")
|
||||
var test = flag.Bool("test",
|
||||
false,
|
||||
"test existing tables against DerivedNormalizationProps and generate test data for regression testing")
|
||||
var verbose = flag.Bool("verbose",
|
||||
false,
|
||||
"write data to stdout as it is parsed")
|
||||
var localFiles = flag.Bool("local",
|
||||
false,
|
||||
"data files have been copied to the current directory; for debugging only")
|
||||
|
||||
var logger = log.New(os.Stderr, "", log.Lshortfile)
|
||||
|
||||
const MaxChar = 0x10FFFF // anything above this shouldn't exist
|
||||
|
||||
@@ -177,6 +188,27 @@ func (f FormInfo) String() string {
|
||||
|
||||
type Decomposition []rune
|
||||
|
||||
func openReader(file string) (input io.ReadCloser) {
|
||||
if *localFiles {
|
||||
f, err := os.Open(file)
|
||||
if err != nil {
|
||||
logger.Fatal(err)
|
||||
}
|
||||
input = f
|
||||
} else {
|
||||
path := *url + file
|
||||
resp, err := http.Get(path)
|
||||
if err != nil {
|
||||
logger.Fatal(err)
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
logger.Fatal("bad GET status for "+file, resp.Status)
|
||||
}
|
||||
input = resp.Body
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func parseDecomposition(s string, skipfirst bool) (a []rune, err error) {
|
||||
decomp := strings.Split(s, " ")
|
||||
if len(decomp) > 0 && skipfirst {
|
||||
@@ -193,7 +225,7 @@ func parseDecomposition(s string, skipfirst bool) (a []rune, err error) {
|
||||
}
|
||||
|
||||
func loadUnicodeData() {
|
||||
f := gen.OpenUCDFile("UnicodeData.txt")
|
||||
f := openReader("UnicodeData.txt")
|
||||
defer f.Close()
|
||||
p := ucd.New(f)
|
||||
for p.Next() {
|
||||
@@ -209,7 +241,7 @@ func loadUnicodeData() {
|
||||
if len(decmap) > 0 {
|
||||
exp, err = parseDecomposition(decmap, true)
|
||||
if err != nil {
|
||||
log.Fatalf(`%U: bad decomp |%v|: "%s"`, r, decmap, err)
|
||||
logger.Fatalf(`%U: bad decomp |%v|: "%s"`, r, decmap, err)
|
||||
}
|
||||
isCompat = true
|
||||
}
|
||||
@@ -228,7 +260,7 @@ func loadUnicodeData() {
|
||||
}
|
||||
}
|
||||
if err := p.Err(); err != nil {
|
||||
log.Fatal(err)
|
||||
logger.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -263,18 +295,18 @@ func compactCCC() {
|
||||
// 0958 # ...
|
||||
// See http://unicode.org/reports/tr44/ for full explanation
|
||||
func loadCompositionExclusions() {
|
||||
f := gen.OpenUCDFile("CompositionExclusions.txt")
|
||||
f := openReader("CompositionExclusions.txt")
|
||||
defer f.Close()
|
||||
p := ucd.New(f)
|
||||
for p.Next() {
|
||||
c := &chars[p.Rune(0)]
|
||||
if c.excludeInComp {
|
||||
log.Fatalf("%U: Duplicate entry in exclusions.", c.codePoint)
|
||||
logger.Fatalf("%U: Duplicate entry in exclusions.", c.codePoint)
|
||||
}
|
||||
c.excludeInComp = true
|
||||
}
|
||||
if e := p.Err(); e != nil {
|
||||
log.Fatal(e)
|
||||
logger.Fatal(e)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -468,22 +500,29 @@ func computeNonStarterCounts() {
|
||||
if exp := c.forms[FCompatibility].expandedDecomp; len(exp) > 0 {
|
||||
runes = exp
|
||||
}
|
||||
// We consider runes that combine backwards to be non-starters for the
|
||||
// purpose of Stream-Safe Text Processing.
|
||||
for _, r := range runes {
|
||||
if cr := &chars[r]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward {
|
||||
if chars[r].ccc == 0 {
|
||||
break
|
||||
}
|
||||
c.nLeadingNonStarters++
|
||||
}
|
||||
for i := len(runes) - 1; i >= 0; i-- {
|
||||
if cr := &chars[runes[i]]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward {
|
||||
if chars[runes[i]].ccc == 0 {
|
||||
break
|
||||
}
|
||||
c.nTrailingNonStarters++
|
||||
}
|
||||
if c.nTrailingNonStarters > 3 {
|
||||
log.Fatalf("%U: Decomposition with more than 3 (%d) trailing modifiers (%U)", i, c.nTrailingNonStarters, runes)
|
||||
|
||||
// We consider runes that combine backwards to be non-starters for the
|
||||
// purpose of Stream-Safe Text Processing.
|
||||
for _, f := range c.forms {
|
||||
if c.ccc == 0 && f.combinesBackward {
|
||||
if len(c.forms[FCompatibility].expandedDecomp) > 0 {
|
||||
log.Fatalf("%U: CCC==0 modifier with an expansion is not supported.", i)
|
||||
}
|
||||
c.nTrailingNonStarters = 1
|
||||
c.nLeadingNonStarters = 1
|
||||
}
|
||||
}
|
||||
|
||||
if isHangul(rune(i)) {
|
||||
@@ -502,19 +541,19 @@ func computeNonStarterCounts() {
|
||||
}
|
||||
}
|
||||
|
||||
func printBytes(w io.Writer, b []byte, name string) {
|
||||
fmt.Fprintf(w, "// %s: %d bytes\n", name, len(b))
|
||||
fmt.Fprintf(w, "var %s = [...]byte {", name)
|
||||
func printBytes(b []byte, name string) {
|
||||
fmt.Printf("// %s: %d bytes\n", name, len(b))
|
||||
fmt.Printf("var %s = [...]byte {", name)
|
||||
for i, c := range b {
|
||||
switch {
|
||||
case i%64 == 0:
|
||||
fmt.Fprintf(w, "\n// Bytes %x - %x\n", i, i+63)
|
||||
fmt.Printf("\n// Bytes %x - %x\n", i, i+63)
|
||||
case i%8 == 0:
|
||||
fmt.Fprintf(w, "\n")
|
||||
fmt.Printf("\n")
|
||||
}
|
||||
fmt.Fprintf(w, "0x%.2X, ", c)
|
||||
fmt.Printf("0x%.2X, ", c)
|
||||
}
|
||||
fmt.Fprint(w, "\n}\n\n")
|
||||
fmt.Print("\n}\n\n")
|
||||
}
|
||||
|
||||
// See forminfo.go for format.
|
||||
@@ -570,13 +609,13 @@ func (m *decompSet) insert(key int, s string) {
|
||||
m[key][s] = true
|
||||
}
|
||||
|
||||
func printCharInfoTables(w io.Writer) int {
|
||||
func printCharInfoTables() int {
|
||||
mkstr := func(r rune, f *FormInfo) (int, string) {
|
||||
d := f.expandedDecomp
|
||||
s := string([]rune(d))
|
||||
if max := 1 << 6; len(s) >= max {
|
||||
const msg = "%U: too many bytes in decomposition: %d >= %d"
|
||||
log.Fatalf(msg, r, len(s), max)
|
||||
logger.Fatalf(msg, r, len(s), max)
|
||||
}
|
||||
head := uint8(len(s))
|
||||
if f.quickCheck[MComposed] != QCYes {
|
||||
@@ -591,15 +630,14 @@ func printCharInfoTables(w io.Writer) int {
|
||||
tccc := ccc(d[len(d)-1])
|
||||
cc := ccc(r)
|
||||
if cc != 0 && lccc == 0 && tccc == 0 {
|
||||
log.Fatalf("%U: trailing and leading ccc are 0 for non-zero ccc %d", r, cc)
|
||||
logger.Fatalf("%U: trailing and leading ccc are 0 for non-zero ccc %d", r, cc)
|
||||
}
|
||||
if tccc < lccc && lccc != 0 {
|
||||
const msg = "%U: lccc (%d) must be <= tcc (%d)"
|
||||
log.Fatalf(msg, r, lccc, tccc)
|
||||
logger.Fatalf(msg, r, lccc, tccc)
|
||||
}
|
||||
index := normalDecomp
|
||||
nTrail := chars[r].nTrailingNonStarters
|
||||
nLead := chars[r].nLeadingNonStarters
|
||||
if tccc > 0 || lccc > 0 || nTrail > 0 {
|
||||
tccc <<= 2
|
||||
tccc |= nTrail
|
||||
@@ -610,16 +648,16 @@ func printCharInfoTables(w io.Writer) int {
|
||||
index = firstCCC
|
||||
}
|
||||
}
|
||||
if lccc > 0 || nLead > 0 {
|
||||
if lccc > 0 {
|
||||
s += string([]byte{lccc})
|
||||
if index == firstCCC {
|
||||
log.Fatalf("%U: multi-segment decomposition not supported for decompositions with leading CCC != 0", r)
|
||||
logger.Fatalf("%U: multi-segment decomposition not supported for decompositions with leading CCC != 0", r)
|
||||
}
|
||||
index = firstLeadingCCC
|
||||
}
|
||||
if cc != lccc {
|
||||
if cc != 0 {
|
||||
log.Fatalf("%U: for lccc != ccc, expected ccc to be 0; was %d", r, cc)
|
||||
logger.Fatalf("%U: for lccc != ccc, expected ccc to be 0; was %d", r, cc)
|
||||
}
|
||||
index = firstCCCZeroExcept
|
||||
}
|
||||
@@ -641,7 +679,7 @@ func printCharInfoTables(w io.Writer) int {
|
||||
continue
|
||||
}
|
||||
if f.combinesBackward {
|
||||
log.Fatalf("%U: combinesBackward and decompose", c.codePoint)
|
||||
logger.Fatalf("%U: combinesBackward and decompose", c.codePoint)
|
||||
}
|
||||
index, s := mkstr(c.codePoint, &f)
|
||||
decompSet.insert(index, s)
|
||||
@@ -652,7 +690,7 @@ func printCharInfoTables(w io.Writer) int {
|
||||
size := 0
|
||||
positionMap := make(map[string]uint16)
|
||||
decompositions.WriteString("\000")
|
||||
fmt.Fprintln(w, "const (")
|
||||
fmt.Println("const (")
|
||||
for i, m := range decompSet {
|
||||
sa := []string{}
|
||||
for s := range m {
|
||||
@@ -665,44 +703,39 @@ func printCharInfoTables(w io.Writer) int {
|
||||
positionMap[s] = uint16(p)
|
||||
}
|
||||
if cname[i] != "" {
|
||||
fmt.Fprintf(w, "%s = 0x%X\n", cname[i], decompositions.Len())
|
||||
fmt.Printf("%s = 0x%X\n", cname[i], decompositions.Len())
|
||||
}
|
||||
}
|
||||
fmt.Fprintln(w, "maxDecomp = 0x8000")
|
||||
fmt.Fprintln(w, ")")
|
||||
fmt.Println("maxDecomp = 0x8000")
|
||||
fmt.Println(")")
|
||||
b := decompositions.Bytes()
|
||||
printBytes(w, b, "decomps")
|
||||
printBytes(b, "decomps")
|
||||
size += len(b)
|
||||
|
||||
varnames := []string{"nfc", "nfkc"}
|
||||
for i := 0; i < FNumberOfFormTypes; i++ {
|
||||
trie := triegen.NewTrie(varnames[i])
|
||||
|
||||
trie := newNode()
|
||||
for r, c := range chars {
|
||||
f := c.forms[i]
|
||||
d := f.expandedDecomp
|
||||
if len(d) != 0 {
|
||||
_, key := mkstr(c.codePoint, &f)
|
||||
trie.Insert(rune(r), uint64(positionMap[key]))
|
||||
trie.insert(rune(r), positionMap[key])
|
||||
if c.ccc != ccc(d[0]) {
|
||||
// We assume the lead ccc of a decomposition !=0 in this case.
|
||||
if ccc(d[0]) == 0 {
|
||||
log.Fatalf("Expected leading CCC to be non-zero; ccc is %d", c.ccc)
|
||||
logger.Fatalf("Expected leading CCC to be non-zero; ccc is %d", c.ccc)
|
||||
}
|
||||
}
|
||||
} else if c.nLeadingNonStarters > 0 && len(f.expandedDecomp) == 0 && c.ccc == 0 && !f.combinesBackward {
|
||||
// Handle cases where it can't be detected that the nLead should be equal
|
||||
// to nTrail.
|
||||
trie.Insert(c.codePoint, uint64(positionMap[nLeadStr]))
|
||||
trie.insert(c.codePoint, positionMap[nLeadStr])
|
||||
} else if v := makeEntry(&f, &c)<<8 | uint16(c.ccc); v != 0 {
|
||||
trie.Insert(c.codePoint, uint64(0x8000|v))
|
||||
trie.insert(c.codePoint, 0x8000|v)
|
||||
}
|
||||
}
|
||||
sz, err := trie.Gen(w, triegen.Compact(&normCompacter{name: varnames[i]}))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
size += sz
|
||||
size += trie.printTables(varnames[i])
|
||||
}
|
||||
return size
|
||||
}
|
||||
@@ -716,9 +749,30 @@ func contains(sa []string, s string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func makeTables() {
|
||||
w := &bytes.Buffer{}
|
||||
// Extract the version number from the URL.
|
||||
func version() string {
|
||||
// From http://www.unicode.org/standard/versions/#Version_Numbering:
|
||||
// for the later Unicode versions, data files are located in
|
||||
// versioned directories.
|
||||
fields := strings.Split(*url, "/")
|
||||
for _, f := range fields {
|
||||
if match, _ := regexp.MatchString(`[0-9]\.[0-9]\.[0-9]`, f); match {
|
||||
return f
|
||||
}
|
||||
}
|
||||
logger.Fatal("unknown version")
|
||||
return "Unknown"
|
||||
}
|
||||
|
||||
const fileHeader = `// Generated by running
|
||||
// maketables --tables=%s --url=%s
|
||||
// DO NOT EDIT
|
||||
|
||||
package norm
|
||||
|
||||
`
|
||||
|
||||
func makeTables() {
|
||||
size := 0
|
||||
if *tablelist == "" {
|
||||
return
|
||||
@@ -727,6 +781,7 @@ func makeTables() {
|
||||
if *tablelist == "all" {
|
||||
list = []string{"recomp", "info"}
|
||||
}
|
||||
fmt.Printf(fileHeader, *tablelist, *url)
|
||||
|
||||
// Compute maximum decomposition size.
|
||||
max := 0
|
||||
@@ -736,30 +791,30 @@ func makeTables() {
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Fprintln(w, "const (")
|
||||
fmt.Fprintln(w, "\t// Version is the Unicode edition from which the tables are derived.")
|
||||
fmt.Fprintf(w, "\tVersion = %q\n", gen.UnicodeVersion())
|
||||
fmt.Fprintln(w)
|
||||
fmt.Fprintln(w, "\t// MaxTransformChunkSize indicates the maximum number of bytes that Transform")
|
||||
fmt.Fprintln(w, "\t// may need to write atomically for any Form. Making a destination buffer at")
|
||||
fmt.Fprintln(w, "\t// least this size ensures that Transform can always make progress and that")
|
||||
fmt.Fprintln(w, "\t// the user does not need to grow the buffer on an ErrShortDst.")
|
||||
fmt.Fprintf(w, "\tMaxTransformChunkSize = %d+maxNonStarters*4\n", len(string(0x034F))+max)
|
||||
fmt.Fprintln(w, ")\n")
|
||||
fmt.Println("const (")
|
||||
fmt.Println("\t// Version is the Unicode edition from which the tables are derived.")
|
||||
fmt.Printf("\tVersion = %q\n", version())
|
||||
fmt.Println()
|
||||
fmt.Println("\t// MaxTransformChunkSize indicates the maximum number of bytes that Transform")
|
||||
fmt.Println("\t// may need to write atomically for any Form. Making a destination buffer at")
|
||||
fmt.Println("\t// least this size ensures that Transform can always make progress and that")
|
||||
fmt.Println("\t// the user does not need to grow the buffer on an ErrShortDst.")
|
||||
fmt.Printf("\tMaxTransformChunkSize = %d+maxNonStarters*4\n", len(string(0x034F))+max)
|
||||
fmt.Println(")\n")
|
||||
|
||||
// Print the CCC remap table.
|
||||
size += len(cccMap)
|
||||
fmt.Fprintf(w, "var ccc = [%d]uint8{", len(cccMap))
|
||||
fmt.Printf("var ccc = [%d]uint8{", len(cccMap))
|
||||
for i := 0; i < len(cccMap); i++ {
|
||||
if i%8 == 0 {
|
||||
fmt.Fprintln(w)
|
||||
fmt.Println()
|
||||
}
|
||||
fmt.Fprintf(w, "%3d, ", cccMap[uint8(i)])
|
||||
fmt.Printf("%3d, ", cccMap[uint8(i)])
|
||||
}
|
||||
fmt.Fprintln(w, "\n}\n")
|
||||
fmt.Println("\n}\n")
|
||||
|
||||
if contains(list, "info") {
|
||||
size += printCharInfoTables(w)
|
||||
size += printCharInfoTables()
|
||||
}
|
||||
|
||||
if contains(list, "recomp") {
|
||||
@@ -781,21 +836,20 @@ func makeTables() {
|
||||
}
|
||||
sz := nrentries * 8
|
||||
size += sz
|
||||
fmt.Fprintf(w, "// recompMap: %d bytes (entries only)\n", sz)
|
||||
fmt.Fprintln(w, "var recompMap = map[uint32]rune{")
|
||||
fmt.Printf("// recompMap: %d bytes (entries only)\n", sz)
|
||||
fmt.Println("var recompMap = map[uint32]rune{")
|
||||
for i, c := range chars {
|
||||
f := c.forms[FCanonical]
|
||||
d := f.decomp
|
||||
if !f.isOneWay && len(d) > 0 {
|
||||
key := uint32(uint16(d[0]))<<16 + uint32(uint16(d[1]))
|
||||
fmt.Fprintf(w, "0x%.8X: 0x%.4X,\n", key, i)
|
||||
fmt.Printf("0x%.8X: 0x%.4X,\n", key, i)
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(w, "}\n\n")
|
||||
fmt.Printf("}\n\n")
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, "// Total size of tables: %dKB (%d bytes)\n", (size+512)/1024, size)
|
||||
gen.WriteVersionedGoFile("tables.go", "norm", w.Bytes())
|
||||
fmt.Printf("// Total size of tables: %dKB (%d bytes)\n", (size+512)/1024, size)
|
||||
}
|
||||
|
||||
func printChars() {
|
||||
@@ -830,16 +884,10 @@ func verifyComputed() {
|
||||
continue
|
||||
}
|
||||
if a, b := c.nLeadingNonStarters > 0, (c.ccc > 0 || f.combinesBackward); a != b {
|
||||
// We accept these runes to be treated differently (it only affects
|
||||
// segment breaking in iteration, most likely on improper use), but
|
||||
// We accept these two runes to be treated differently (it only affects
|
||||
// segment breaking in iteration, most likely on inproper use), but
|
||||
// reconsider if more characters are added.
|
||||
// U+FF9E HALFWIDTH KATAKANA VOICED SOUND MARK;Lm;0;L;<narrow> 3099;;;;N;;;;;
|
||||
// U+FF9F HALFWIDTH KATAKANA SEMI-VOICED SOUND MARK;Lm;0;L;<narrow> 309A;;;;N;;;;;
|
||||
// U+3133 HANGUL LETTER KIYEOK-SIOS;Lo;0;L;<compat> 11AA;;;;N;HANGUL LETTER GIYEOG SIOS;;;;
|
||||
// U+318E HANGUL LETTER ARAEAE;Lo;0;L;<compat> 11A1;;;;N;HANGUL LETTER ALAE AE;;;;
|
||||
// U+FFA3 HALFWIDTH HANGUL LETTER KIYEOK-SIOS;Lo;0;L;<narrow> 3133;;;;N;HALFWIDTH HANGUL LETTER GIYEOG SIOS;;;;
|
||||
// U+FFDC HALFWIDTH HANGUL LETTER I;Lo;0;L;<narrow> 3163;;;;N;;;;;
|
||||
if i != 0xFF9E && i != 0xFF9F && !(0x3133 <= i && i <= 0x318E) && !(0xFFA3 <= i && i <= 0xFFDC) {
|
||||
if i != 0xFF9E && i != 0xFF9F {
|
||||
log.Fatalf("%U: nLead was %v; want %v", i, a, b)
|
||||
}
|
||||
}
|
||||
@@ -847,7 +895,7 @@ func verifyComputed() {
|
||||
nfc := c.forms[FCanonical]
|
||||
nfkc := c.forms[FCompatibility]
|
||||
if nfc.combinesBackward != nfkc.combinesBackward {
|
||||
log.Fatalf("%U: Cannot combine combinesBackward\n", c.codePoint)
|
||||
logger.Fatalf("%U: Cannot combine combinesBackward\n", c.codePoint)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -859,7 +907,7 @@ func verifyComputed() {
|
||||
// 0374 ; NFD_QC; N # ...
|
||||
// See http://unicode.org/reports/tr44/ for full explanation
|
||||
func testDerived() {
|
||||
f := gen.OpenUCDFile("DerivedNormalizationProps.txt")
|
||||
f := openReader("DerivedNormalizationProps.txt")
|
||||
defer f.Close()
|
||||
p := ucd.New(f)
|
||||
for p.Next() {
|
||||
@@ -892,12 +940,12 @@ func testDerived() {
|
||||
log.Fatalf(`Unexpected quick check value "%s"`, p.String(2))
|
||||
}
|
||||
if got := c.forms[ftype].quickCheck[mode]; got != qr {
|
||||
log.Printf("%U: FAILED %s (was %v need %v)\n", r, qt, got, qr)
|
||||
logger.Printf("%U: FAILED %s (was %v need %v)\n", r, qt, got, qr)
|
||||
}
|
||||
c.forms[ftype].verified[mode] = true
|
||||
}
|
||||
if err := p.Err(); err != nil {
|
||||
log.Fatal(err)
|
||||
logger.Fatal(err)
|
||||
}
|
||||
// Any unspecified value must be QCYes. Verify this.
|
||||
for i, c := range chars {
|
||||
@@ -905,14 +953,20 @@ func testDerived() {
|
||||
for k, qr := range fd.quickCheck {
|
||||
if !fd.verified[k] && qr != QCYes {
|
||||
m := "%U: FAIL F:%d M:%d (was %v need Yes) %s\n"
|
||||
log.Printf(m, i, j, k, qr, c.name)
|
||||
logger.Printf(m, i, j, k, qr, c.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var testHeader = `const (
|
||||
var testHeader = `// Generated by running
|
||||
// maketables --test --url=%s
|
||||
// +build test
|
||||
|
||||
package norm
|
||||
|
||||
const (
|
||||
Yes = iota
|
||||
No
|
||||
Maybe
|
||||
@@ -950,10 +1004,8 @@ func printTestdata() {
|
||||
nTrail uint8
|
||||
f string
|
||||
}
|
||||
|
||||
last := lastInfo{}
|
||||
w := &bytes.Buffer{}
|
||||
fmt.Fprintf(w, testHeader)
|
||||
fmt.Printf(testHeader, *url)
|
||||
for r, c := range chars {
|
||||
f := c.forms[FCanonical]
|
||||
qc, cf, d := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp)
|
||||
@@ -967,10 +1019,9 @@ func printTestdata() {
|
||||
}
|
||||
current := lastInfo{c.ccc, c.nLeadingNonStarters, c.nTrailingNonStarters, s}
|
||||
if last != current {
|
||||
fmt.Fprintf(w, "\t{0x%x, %d, %d, %d, %s},\n", r, c.origCCC, c.nLeadingNonStarters, c.nTrailingNonStarters, s)
|
||||
fmt.Printf("\t{0x%x, %d, %d, %d, %s},\n", r, c.origCCC, c.nLeadingNonStarters, c.nTrailingNonStarters, s)
|
||||
last = current
|
||||
}
|
||||
}
|
||||
fmt.Fprintln(w, "}")
|
||||
gen.WriteVersionedGoFile("data_test.go", "norm", w.Bytes())
|
||||
fmt.Println("}")
|
||||
}
|
||||
45
Godeps/_workspace/src/code.google.com/p/go.text/unicode/norm/maketesttables.go
generated
vendored
Normal file
45
Godeps/_workspace/src/code.google.com/p/go.text/unicode/norm/maketesttables.go
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
// Generate test data for trie code.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func main() {
|
||||
printTestTables()
|
||||
}
|
||||
|
||||
// We take the smallest, largest and an arbitrary value for each
|
||||
// of the UTF-8 sequence lengths.
|
||||
var testRunes = []rune{
|
||||
0x01, 0x0C, 0x7F, // 1-byte sequences
|
||||
0x80, 0x100, 0x7FF, // 2-byte sequences
|
||||
0x800, 0x999, 0xFFFF, // 3-byte sequences
|
||||
0x10000, 0x10101, 0x10FFFF, // 4-byte sequences
|
||||
0x200, 0x201, 0x202, 0x210, 0x215, // five entries in one sparse block
|
||||
}
|
||||
|
||||
const fileHeader = `// Generated by running
|
||||
// maketesttables
|
||||
// DO NOT EDIT
|
||||
|
||||
package norm
|
||||
|
||||
`
|
||||
|
||||
func printTestTables() {
|
||||
fmt.Print(fileHeader)
|
||||
fmt.Printf("var testRunes = %#v\n\n", testRunes)
|
||||
t := newNode()
|
||||
for i, r := range testRunes {
|
||||
t.insert(r, uint16(i))
|
||||
}
|
||||
t.printTables("testdata")
|
||||
}
|
||||
14
Godeps/_workspace/src/code.google.com/p/go.text/unicode/norm/norm_test.go
generated
vendored
Normal file
14
Godeps/_workspace/src/code.google.com/p/go.text/unicode/norm/norm_test.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package norm_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestPlaceHolder(t *testing.T) {
|
||||
// Does nothing, just allows the Makefile to be canonical
|
||||
// while waiting for the package itself to be written.
|
||||
}
|
||||
@@ -2,18 +2,10 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Note: the file data_test.go that is generated should not be checked in.
|
||||
//go:generate go run maketables.go triegen.go
|
||||
//go:generate go test -tags test
|
||||
|
||||
// Package norm contains types and functions for normalizing Unicode strings.
|
||||
package norm // import "golang.org/x/text/unicode/norm"
|
||||
package norm
|
||||
|
||||
import (
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/text/transform"
|
||||
)
|
||||
import "unicode/utf8"
|
||||
|
||||
// A Form denotes a canonical representation of Unicode code points.
|
||||
// The Unicode-defined normalization and equivalence forms are:
|
||||
@@ -268,34 +260,6 @@ func (f Form) QuickSpan(b []byte) int {
|
||||
return n
|
||||
}
|
||||
|
||||
// Span implements transform.SpanningTransformer. It returns a boundary n such
|
||||
// that b[0:n] == f(b[0:n]). It is not guaranteed to return the largest such n.
|
||||
func (f Form) Span(b []byte, atEOF bool) (n int, err error) {
|
||||
n, ok := formTable[f].quickSpan(inputBytes(b), 0, len(b), atEOF)
|
||||
if n < len(b) {
|
||||
if !ok {
|
||||
err = transform.ErrEndOfSpan
|
||||
} else {
|
||||
err = transform.ErrShortSrc
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// SpanString returns a boundary n such that s[0:n] == f(s[0:n]).
|
||||
// It is not guaranteed to return the largest such n.
|
||||
func (f Form) SpanString(s string, atEOF bool) (n int, err error) {
|
||||
n, ok := formTable[f].quickSpan(inputString(s), 0, len(s), atEOF)
|
||||
if n < len(s) {
|
||||
if !ok {
|
||||
err = transform.ErrEndOfSpan
|
||||
} else {
|
||||
err = transform.ErrShortSrc
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// quickSpan returns a boundary n such that src[0:n] == f(src[0:n]) and
|
||||
// whether any non-normalized parts were found. If atEOF is false, n will
|
||||
// not point past the last segment if this segment might be become
|
||||
@@ -324,6 +288,7 @@ func (f *formInfo) quickSpan(src input, i, end int, atEOF bool) (n int, ok bool)
|
||||
// have an overflow for runes that are starters (e.g. with U+FF9E).
|
||||
switch ss.next(info) {
|
||||
case ssStarter:
|
||||
ss.first(info)
|
||||
lastSegStart = i
|
||||
case ssOverflow:
|
||||
return lastSegStart, false
|
||||
@@ -353,7 +318,7 @@ func (f *formInfo) quickSpan(src input, i, end int, atEOF bool) (n int, ok bool)
|
||||
return lastSegStart, false
|
||||
}
|
||||
|
||||
// QuickSpanString returns a boundary n such that s[0:n] == f(s[0:n]).
|
||||
// QuickSpanString returns a boundary n such that b[0:n] == f(s[0:n]).
|
||||
// It is not guaranteed to return the largest such n.
|
||||
func (f Form) QuickSpanString(s string) int {
|
||||
n, _ := formTable[f].quickSpan(inputString(s), 0, len(s), true)
|
||||
@@ -376,6 +341,7 @@ func (f Form) firstBoundary(src input, nsrc int) int {
|
||||
// We should call ss.first here, but we can't as the first rune is
|
||||
// skipped already. This means FirstBoundary can't really determine
|
||||
// CGJ insertion points correctly. Luckily it doesn't have to.
|
||||
// TODO: consider adding NextBoundary
|
||||
for {
|
||||
info := fd.info(src, i)
|
||||
if info.size == 0 {
|
||||
@@ -400,58 +366,6 @@ func (f Form) FirstBoundaryInString(s string) int {
|
||||
return f.firstBoundary(inputString(s), len(s))
|
||||
}
|
||||
|
||||
// NextBoundary reports the index of the boundary between the first and next
|
||||
// segment in b or -1 if atEOF is false and there are not enough bytes to
|
||||
// determine this boundary.
|
||||
func (f Form) NextBoundary(b []byte, atEOF bool) int {
|
||||
return f.nextBoundary(inputBytes(b), len(b), atEOF)
|
||||
}
|
||||
|
||||
// NextBoundaryInString reports the index of the boundary between the first and
|
||||
// next segment in b or -1 if atEOF is false and there are not enough bytes to
|
||||
// determine this boundary.
|
||||
func (f Form) NextBoundaryInString(s string, atEOF bool) int {
|
||||
return f.nextBoundary(inputString(s), len(s), atEOF)
|
||||
}
|
||||
|
||||
func (f Form) nextBoundary(src input, nsrc int, atEOF bool) int {
|
||||
if nsrc == 0 {
|
||||
if atEOF {
|
||||
return 0
|
||||
}
|
||||
return -1
|
||||
}
|
||||
fd := formTable[f]
|
||||
info := fd.info(src, 0)
|
||||
if info.size == 0 {
|
||||
if atEOF {
|
||||
return 1
|
||||
}
|
||||
return -1
|
||||
}
|
||||
ss := streamSafe(0)
|
||||
ss.first(info)
|
||||
|
||||
for i := int(info.size); i < nsrc; i += int(info.size) {
|
||||
info = fd.info(src, i)
|
||||
if info.size == 0 {
|
||||
if atEOF {
|
||||
return i
|
||||
}
|
||||
return -1
|
||||
}
|
||||
// TODO: Using streamSafe to determine the boundary isn't the same as
|
||||
// using BoundaryBefore. Determine which should be used.
|
||||
if s := ss.next(info); s != ssSuccess {
|
||||
return i
|
||||
}
|
||||
}
|
||||
if !atEOF && !info.BoundaryAfter() && !ss.isMax() {
|
||||
return -1
|
||||
}
|
||||
return nsrc
|
||||
}
|
||||
|
||||
// LastBoundary returns the position i of the last boundary in b
|
||||
// or -1 if b contains no boundary.
|
||||
func (f Form) LastBoundary(b []byte) int {
|
||||
@@ -506,14 +420,15 @@ func decomposeSegment(rb *reorderBuffer, sp int, atEOF bool) int {
|
||||
if info.size == 0 {
|
||||
return 0
|
||||
}
|
||||
if s := rb.ss.next(info); s == ssStarter {
|
||||
// TODO: this could be removed if we don't support merging.
|
||||
if rb.nrune > 0 {
|
||||
if rb.nrune > 0 {
|
||||
if s := rb.ss.next(info); s == ssStarter {
|
||||
goto end
|
||||
} else if s == ssOverflow {
|
||||
rb.insertCGJ()
|
||||
goto end
|
||||
}
|
||||
} else if s == ssOverflow {
|
||||
rb.insertCGJ()
|
||||
goto end
|
||||
} else {
|
||||
rb.ss.first(info)
|
||||
}
|
||||
if err := rb.insertFlush(rb.src, sp, info); err != iSuccess {
|
||||
return int(err)
|
||||
1086
Godeps/_workspace/src/code.google.com/p/go.text/unicode/norm/normalize_test.go
generated
vendored
Normal file
1086
Godeps/_workspace/src/code.google.com/p/go.text/unicode/norm/normalize_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
318
Godeps/_workspace/src/code.google.com/p/go.text/unicode/norm/normregtest.go
generated
vendored
Normal file
318
Godeps/_workspace/src/code.google.com/p/go.text/unicode/norm/normregtest.go
generated
vendored
Normal file
@@ -0,0 +1,318 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"code.google.com/p/go.text/unicode/norm"
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
loadTestData()
|
||||
CharacterByCharacterTests()
|
||||
StandardTests()
|
||||
PerformanceTest()
|
||||
if errorCount == 0 {
|
||||
fmt.Println("PASS")
|
||||
}
|
||||
}
|
||||
|
||||
const file = "NormalizationTest.txt"
|
||||
|
||||
var url = flag.String("url",
|
||||
"http://www.unicode.org/Public/"+unicode.Version+"/ucd/"+file,
|
||||
"URL of Unicode database directory")
|
||||
var localFiles = flag.Bool("local",
|
||||
false,
|
||||
"data files have been copied to the current directory; for debugging only")
|
||||
|
||||
var logger = log.New(os.Stderr, "", log.Lshortfile)
|
||||
|
||||
// This regression test runs the test set in NormalizationTest.txt
|
||||
// (taken from http://www.unicode.org/Public/<unicode.Version>/ucd/).
|
||||
//
|
||||
// NormalizationTest.txt has form:
|
||||
// @Part0 # Specific cases
|
||||
// #
|
||||
// 1E0A;1E0A;0044 0307;1E0A;0044 0307; # (Ḋ; Ḋ; D◌̇; Ḋ; D◌̇; ) LATIN CAPITAL LETTER D WITH DOT ABOVE
|
||||
// 1E0C;1E0C;0044 0323;1E0C;0044 0323; # (Ḍ; Ḍ; D◌̣; Ḍ; D◌̣; ) LATIN CAPITAL LETTER D WITH DOT BELOW
|
||||
//
|
||||
// Each test has 5 columns (c1, c2, c3, c4, c5), where
|
||||
// (c1, c2, c3, c4, c5) == (c1, NFC(c1), NFD(c1), NFKC(c1), NFKD(c1))
|
||||
//
|
||||
// CONFORMANCE:
|
||||
// 1. The following invariants must be true for all conformant implementations
|
||||
//
|
||||
// NFC
|
||||
// c2 == NFC(c1) == NFC(c2) == NFC(c3)
|
||||
// c4 == NFC(c4) == NFC(c5)
|
||||
//
|
||||
// NFD
|
||||
// c3 == NFD(c1) == NFD(c2) == NFD(c3)
|
||||
// c5 == NFD(c4) == NFD(c5)
|
||||
//
|
||||
// NFKC
|
||||
// c4 == NFKC(c1) == NFKC(c2) == NFKC(c3) == NFKC(c4) == NFKC(c5)
|
||||
//
|
||||
// NFKD
|
||||
// c5 == NFKD(c1) == NFKD(c2) == NFKD(c3) == NFKD(c4) == NFKD(c5)
|
||||
//
|
||||
// 2. For every code point X assigned in this version of Unicode that is not
|
||||
// specifically listed in Part 1, the following invariants must be true
|
||||
// for all conformant implementations:
|
||||
//
|
||||
// X == NFC(X) == NFD(X) == NFKC(X) == NFKD(X)
|
||||
//
|
||||
|
||||
// Column types.
|
||||
const (
|
||||
cRaw = iota
|
||||
cNFC
|
||||
cNFD
|
||||
cNFKC
|
||||
cNFKD
|
||||
cMaxColumns
|
||||
)
|
||||
|
||||
// Holds data from NormalizationTest.txt
|
||||
var part []Part
|
||||
|
||||
type Part struct {
|
||||
name string
|
||||
number int
|
||||
tests []Test
|
||||
}
|
||||
|
||||
type Test struct {
|
||||
name string
|
||||
partnr int
|
||||
number int
|
||||
r rune // used for character by character test
|
||||
cols [cMaxColumns]string // Each has 5 entries, see below.
|
||||
}
|
||||
|
||||
func (t Test) Name() string {
|
||||
if t.number < 0 {
|
||||
return part[t.partnr].name
|
||||
}
|
||||
return fmt.Sprintf("%s:%d", part[t.partnr].name, t.number)
|
||||
}
|
||||
|
||||
var partRe = regexp.MustCompile(`@Part(\d) # (.*)$`)
|
||||
var testRe = regexp.MustCompile(`^` + strings.Repeat(`([\dA-F ]+);`, 5) + ` # (.*)$`)
|
||||
|
||||
var counter int
|
||||
|
||||
// Load the data form NormalizationTest.txt
|
||||
func loadTestData() {
|
||||
if *localFiles {
|
||||
pwd, _ := os.Getwd()
|
||||
*url = "file://" + path.Join(pwd, file)
|
||||
}
|
||||
t := &http.Transport{}
|
||||
t.RegisterProtocol("file", http.NewFileTransport(http.Dir("/")))
|
||||
c := &http.Client{Transport: t}
|
||||
resp, err := c.Get(*url)
|
||||
if err != nil {
|
||||
logger.Fatal(err)
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
logger.Fatal("bad GET status for "+file, resp.Status)
|
||||
}
|
||||
f := resp.Body
|
||||
defer f.Close()
|
||||
scanner := bufio.NewScanner(f)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if len(line) == 0 || line[0] == '#' {
|
||||
continue
|
||||
}
|
||||
m := partRe.FindStringSubmatch(line)
|
||||
if m != nil {
|
||||
if len(m) < 3 {
|
||||
logger.Fatal("Failed to parse Part: ", line)
|
||||
}
|
||||
i, err := strconv.Atoi(m[1])
|
||||
if err != nil {
|
||||
logger.Fatal(err)
|
||||
}
|
||||
name := m[2]
|
||||
part = append(part, Part{name: name[:len(name)-1], number: i})
|
||||
continue
|
||||
}
|
||||
m = testRe.FindStringSubmatch(line)
|
||||
if m == nil || len(m) < 7 {
|
||||
logger.Fatalf(`Failed to parse: "%s" result: %#v`, line, m)
|
||||
}
|
||||
test := Test{name: m[6], partnr: len(part) - 1, number: counter}
|
||||
counter++
|
||||
for j := 1; j < len(m)-1; j++ {
|
||||
for _, split := range strings.Split(m[j], " ") {
|
||||
r, err := strconv.ParseUint(split, 16, 64)
|
||||
if err != nil {
|
||||
logger.Fatal(err)
|
||||
}
|
||||
if test.r == 0 {
|
||||
// save for CharacterByCharacterTests
|
||||
test.r = rune(r)
|
||||
}
|
||||
var buf [utf8.UTFMax]byte
|
||||
sz := utf8.EncodeRune(buf[:], rune(r))
|
||||
test.cols[j-1] += string(buf[:sz])
|
||||
}
|
||||
}
|
||||
part := &part[len(part)-1]
|
||||
part.tests = append(part.tests, test)
|
||||
}
|
||||
if scanner.Err() != nil {
|
||||
logger.Fatal(scanner.Err())
|
||||
}
|
||||
}
|
||||
|
||||
var fstr = []string{"NFC", "NFD", "NFKC", "NFKD"}
|
||||
|
||||
var errorCount int
|
||||
|
||||
func cmpResult(t *Test, name string, f norm.Form, gold, test, result string) {
|
||||
if gold != result {
|
||||
errorCount++
|
||||
if errorCount > 20 {
|
||||
return
|
||||
}
|
||||
logger.Printf("%s:%s: %s(%+q)=%+q; want %+q: %s",
|
||||
t.Name(), name, fstr[f], test, result, gold, t.name)
|
||||
}
|
||||
}
|
||||
|
||||
func cmpIsNormal(t *Test, name string, f norm.Form, test string, result, want bool) {
|
||||
if result != want {
|
||||
errorCount++
|
||||
if errorCount > 20 {
|
||||
return
|
||||
}
|
||||
logger.Printf("%s:%s: %s(%+q)=%v; want %v", t.Name(), name, fstr[f], test, result, want)
|
||||
}
|
||||
}
|
||||
|
||||
func doTest(t *Test, f norm.Form, gold, test string) {
|
||||
testb := []byte(test)
|
||||
result := f.Bytes(testb)
|
||||
cmpResult(t, "Bytes", f, gold, test, string(result))
|
||||
|
||||
sresult := f.String(test)
|
||||
cmpResult(t, "String", f, gold, test, sresult)
|
||||
|
||||
acc := []byte{}
|
||||
i := norm.Iter{}
|
||||
i.InitString(f, test)
|
||||
for !i.Done() {
|
||||
acc = append(acc, i.Next()...)
|
||||
}
|
||||
cmpResult(t, "Iter.Next", f, gold, test, string(acc))
|
||||
|
||||
buf := make([]byte, 128)
|
||||
acc = nil
|
||||
for p := 0; p < len(testb); {
|
||||
nDst, nSrc, _ := f.Transform(buf, testb[p:], true)
|
||||
acc = append(acc, buf[:nDst]...)
|
||||
p += nSrc
|
||||
}
|
||||
cmpResult(t, "Transform", f, gold, test, string(acc))
|
||||
|
||||
for i := range test {
|
||||
out := f.Append(f.Bytes([]byte(test[:i])), []byte(test[i:])...)
|
||||
cmpResult(t, fmt.Sprintf(":Append:%d", i), f, gold, test, string(out))
|
||||
}
|
||||
cmpIsNormal(t, "IsNormal", f, test, f.IsNormal([]byte(test)), test == gold)
|
||||
cmpIsNormal(t, "IsNormalString", f, test, f.IsNormalString(test), test == gold)
|
||||
}
|
||||
|
||||
func doConformanceTests(t *Test, partn int) {
|
||||
for i := 0; i <= 2; i++ {
|
||||
doTest(t, norm.NFC, t.cols[1], t.cols[i])
|
||||
doTest(t, norm.NFD, t.cols[2], t.cols[i])
|
||||
doTest(t, norm.NFKC, t.cols[3], t.cols[i])
|
||||
doTest(t, norm.NFKD, t.cols[4], t.cols[i])
|
||||
}
|
||||
for i := 3; i <= 4; i++ {
|
||||
doTest(t, norm.NFC, t.cols[3], t.cols[i])
|
||||
doTest(t, norm.NFD, t.cols[4], t.cols[i])
|
||||
doTest(t, norm.NFKC, t.cols[3], t.cols[i])
|
||||
doTest(t, norm.NFKD, t.cols[4], t.cols[i])
|
||||
}
|
||||
}
|
||||
|
||||
func CharacterByCharacterTests() {
|
||||
tests := part[1].tests
|
||||
var last rune = 0
|
||||
for i := 0; i <= len(tests); i++ { // last one is special case
|
||||
var r rune
|
||||
if i == len(tests) {
|
||||
r = 0x2FA1E // Don't have to go to 0x10FFFF
|
||||
} else {
|
||||
r = tests[i].r
|
||||
}
|
||||
for last++; last < r; last++ {
|
||||
// Check all characters that were not explicitly listed in the test.
|
||||
t := &Test{partnr: 1, number: -1}
|
||||
char := string(last)
|
||||
doTest(t, norm.NFC, char, char)
|
||||
doTest(t, norm.NFD, char, char)
|
||||
doTest(t, norm.NFKC, char, char)
|
||||
doTest(t, norm.NFKD, char, char)
|
||||
}
|
||||
if i < len(tests) {
|
||||
doConformanceTests(&tests[i], 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func StandardTests() {
|
||||
for _, j := range []int{0, 2, 3} {
|
||||
for _, test := range part[j].tests {
|
||||
doConformanceTests(&test, j)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PerformanceTest verifies that normalization is O(n). If any of the
|
||||
// code does not properly check for maxCombiningChars, normalization
|
||||
// may exhibit O(n**2) behavior.
|
||||
func PerformanceTest() {
|
||||
runtime.GOMAXPROCS(2)
|
||||
success := make(chan bool, 1)
|
||||
go func() {
|
||||
buf := bytes.Repeat([]byte("\u035D"), 1024*1024)
|
||||
buf = append(buf, "\u035B"...)
|
||||
norm.NFC.Append(nil, buf...)
|
||||
success <- true
|
||||
}()
|
||||
timeout := time.After(1 * time.Second)
|
||||
select {
|
||||
case <-success:
|
||||
// test completed before the timeout
|
||||
case <-timeout:
|
||||
errorCount++
|
||||
logger.Printf(`unexpectedly long time to complete PerformanceTest`)
|
||||
}
|
||||
}
|
||||
@@ -112,6 +112,7 @@ func (r *normReader) Read(p []byte) (int, error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
panic("should not reach here")
|
||||
}
|
||||
|
||||
// Reader returns a new reader that implements Read
|
||||
56
Godeps/_workspace/src/code.google.com/p/go.text/unicode/norm/readwriter_test.go
generated
vendored
Normal file
56
Godeps/_workspace/src/code.google.com/p/go.text/unicode/norm/readwriter_test.go
generated
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package norm
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var bufSizes = []int{1, 2, 3, 4, 5, 6, 7, 8, 100, 101, 102, 103, 4000, 4001, 4002, 4003}
|
||||
|
||||
func readFunc(size int) appendFunc {
|
||||
return func(f Form, out []byte, s string) []byte {
|
||||
out = append(out, s...)
|
||||
r := f.Reader(bytes.NewBuffer(out))
|
||||
buf := make([]byte, size)
|
||||
result := []byte{}
|
||||
for n, err := 0, error(nil); err == nil; {
|
||||
n, err = r.Read(buf)
|
||||
result = append(result, buf[:n]...)
|
||||
}
|
||||
return result
|
||||
}
|
||||
}
|
||||
|
||||
func TestReader(t *testing.T) {
|
||||
for _, s := range bufSizes {
|
||||
name := fmt.Sprintf("TestReader%d", s)
|
||||
runNormTests(t, name, readFunc(s))
|
||||
}
|
||||
}
|
||||
|
||||
func writeFunc(size int) appendFunc {
|
||||
return func(f Form, out []byte, s string) []byte {
|
||||
in := append(out, s...)
|
||||
result := new(bytes.Buffer)
|
||||
w := f.Writer(result)
|
||||
buf := make([]byte, size)
|
||||
for n := 0; len(in) > 0; in = in[n:] {
|
||||
n = copy(buf, in)
|
||||
_, _ = w.Write(buf[:n])
|
||||
}
|
||||
w.Close()
|
||||
return result.Bytes()
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriter(t *testing.T) {
|
||||
for _, s := range bufSizes {
|
||||
name := fmt.Sprintf("TestWriter%d", s)
|
||||
runNormTests(t, name, writeFunc(s))
|
||||
}
|
||||
}
|
||||
6989
Godeps/_workspace/src/code.google.com/p/go.text/unicode/norm/tables.go
generated
vendored
Normal file
6989
Godeps/_workspace/src/code.google.com/p/go.text/unicode/norm/tables.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
@@ -7,16 +7,13 @@ package norm
|
||||
import (
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/text/transform"
|
||||
"code.google.com/p/go.text/transform"
|
||||
)
|
||||
|
||||
// Reset implements the Reset method of the transform.Transformer interface.
|
||||
func (Form) Reset() {}
|
||||
|
||||
// Transform implements the Transform method of the transform.Transformer
|
||||
// interface. It may need to write segments of up to MaxSegmentSize at once.
|
||||
// Users should either catch ErrShortDst and allow dst to grow or have dst be at
|
||||
// least of size MaxTransformChunkSize to be guaranteed of progress.
|
||||
// Transform implements the transform.Transformer interface. It may need to
|
||||
// write segments of up to MaxSegmentSize at once. Users should either catch
|
||||
// ErrShortDst and allow dst to grow or have dst be at least of size
|
||||
// MaxTransformChunkSize to be guaranteed of progress.
|
||||
func (f Form) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
n := 0
|
||||
// Cap the maximum number of src bytes to check.
|
||||
@@ -40,7 +37,7 @@ func (f Form) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error)
|
||||
}
|
||||
|
||||
func flushTransform(rb *reorderBuffer) bool {
|
||||
// Write out (must fully fit in dst, or else it is an ErrShortDst).
|
||||
// Write out (must fully fit in dst, or else it is a ErrShortDst).
|
||||
if len(rb.out) < rb.nrune*utf8.UTFMax {
|
||||
return false
|
||||
}
|
||||
101
Godeps/_workspace/src/code.google.com/p/go.text/unicode/norm/transform_test.go
generated
vendored
Normal file
101
Godeps/_workspace/src/code.google.com/p/go.text/unicode/norm/transform_test.go
generated
vendored
Normal file
@@ -0,0 +1,101 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package norm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"code.google.com/p/go.text/transform"
|
||||
)
|
||||
|
||||
func TestTransform(t *testing.T) {
|
||||
tests := []struct {
|
||||
f Form
|
||||
in, out string
|
||||
eof bool
|
||||
dstSize int
|
||||
err error
|
||||
}{
|
||||
{NFC, "ab", "ab", true, 2, nil},
|
||||
{NFC, "qx", "qx", true, 2, nil},
|
||||
{NFD, "qx", "qx", true, 2, nil},
|
||||
{NFC, "", "", true, 1, nil},
|
||||
{NFD, "", "", true, 1, nil},
|
||||
{NFC, "", "", false, 1, nil},
|
||||
{NFD, "", "", false, 1, nil},
|
||||
|
||||
// Normalized segment does not fit in destination.
|
||||
{NFD, "ö", "", true, 1, transform.ErrShortDst},
|
||||
{NFD, "ö", "", true, 2, transform.ErrShortDst},
|
||||
|
||||
// As an artifact of the algorithm, only full segments are written.
|
||||
// This is not strictly required, and some bytes could be written.
|
||||
// In practice, for Transform to not block, the destination buffer
|
||||
// should be at least MaxSegmentSize to work anyway and these edge
|
||||
// conditions will be relatively rare.
|
||||
{NFC, "ab", "", true, 1, transform.ErrShortDst},
|
||||
// This is even true for inert runes.
|
||||
{NFC, "qx", "", true, 1, transform.ErrShortDst},
|
||||
{NFC, "a\u0300abc", "\u00e0a", true, 4, transform.ErrShortDst},
|
||||
|
||||
// We cannot write a segment if succesive runes could still change the result.
|
||||
{NFD, "ö", "", false, 3, transform.ErrShortSrc},
|
||||
{NFC, "a\u0300", "", false, 4, transform.ErrShortSrc},
|
||||
{NFD, "a\u0300", "", false, 4, transform.ErrShortSrc},
|
||||
{NFC, "ö", "", false, 3, transform.ErrShortSrc},
|
||||
|
||||
{NFC, "a\u0300", "", true, 1, transform.ErrShortDst},
|
||||
// Theoretically could fit, but won't due to simplified checks.
|
||||
{NFC, "a\u0300", "", true, 2, transform.ErrShortDst},
|
||||
{NFC, "a\u0300", "", true, 3, transform.ErrShortDst},
|
||||
{NFC, "a\u0300", "\u00e0", true, 4, nil},
|
||||
|
||||
{NFD, "öa\u0300", "o\u0308", false, 8, transform.ErrShortSrc},
|
||||
{NFD, "öa\u0300ö", "o\u0308a\u0300", true, 8, transform.ErrShortDst},
|
||||
{NFD, "öa\u0300ö", "o\u0308a\u0300", false, 12, transform.ErrShortSrc},
|
||||
|
||||
// Illegal input is copied verbatim.
|
||||
{NFD, "\xbd\xb2=\xbc ", "\xbd\xb2=\xbc ", true, 8, nil},
|
||||
}
|
||||
b := make([]byte, 100)
|
||||
for i, tt := range tests {
|
||||
nDst, _, err := tt.f.Transform(b[:tt.dstSize], []byte(tt.in), tt.eof)
|
||||
out := string(b[:nDst])
|
||||
if out != tt.out || err != tt.err {
|
||||
t.Errorf("%d: was %+q (%v); want %+q (%v)", i, out, err, tt.out, tt.err)
|
||||
}
|
||||
if want := tt.f.String(tt.in)[:nDst]; want != out {
|
||||
t.Errorf("%d: incorect normalization: was %+q; want %+q", i, out, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var transBufSizes = []int{
|
||||
MaxTransformChunkSize,
|
||||
3 * MaxTransformChunkSize / 2,
|
||||
2 * MaxTransformChunkSize,
|
||||
3 * MaxTransformChunkSize,
|
||||
100 * MaxTransformChunkSize,
|
||||
}
|
||||
|
||||
func doTransNorm(f Form, buf []byte, b []byte) []byte {
|
||||
acc := []byte{}
|
||||
for p := 0; p < len(b); {
|
||||
nd, ns, _ := f.Transform(buf[:], b[p:], true)
|
||||
p += ns
|
||||
acc = append(acc, buf[:nd]...)
|
||||
}
|
||||
return acc
|
||||
}
|
||||
|
||||
func TestTransformNorm(t *testing.T) {
|
||||
for _, sz := range transBufSizes {
|
||||
buf := make([]byte, sz)
|
||||
runNormTests(t, fmt.Sprintf("Transform:%d", sz), func(f Form, out []byte, s string) []byte {
|
||||
return doTransNorm(f, buf, append(out, s...))
|
||||
})
|
||||
}
|
||||
}
|
||||
232
Godeps/_workspace/src/code.google.com/p/go.text/unicode/norm/trie.go
generated
vendored
Normal file
232
Godeps/_workspace/src/code.google.com/p/go.text/unicode/norm/trie.go
generated
vendored
Normal file
@@ -0,0 +1,232 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package norm
|
||||
|
||||
type valueRange struct {
|
||||
value uint16 // header: value:stride
|
||||
lo, hi byte // header: lo:n
|
||||
}
|
||||
|
||||
type trie struct {
|
||||
index []uint8
|
||||
values []uint16
|
||||
sparse []valueRange
|
||||
sparseOffset []uint16
|
||||
cutoff uint8 // indices >= cutoff are sparse
|
||||
}
|
||||
|
||||
// lookupValue determines the type of block n and looks up the value for b.
|
||||
// For n < t.cutoff, the block is a simple lookup table. Otherwise, the block
|
||||
// is a list of ranges with an accompanying value. Given a matching range r,
|
||||
// the value for b is by r.value + (b - r.lo) * stride.
|
||||
func (t *trie) lookupValue(n uint8, b byte) uint16 {
|
||||
if n < t.cutoff {
|
||||
return t.values[uint16(n)<<6+uint16(b)]
|
||||
}
|
||||
offset := t.sparseOffset[n-t.cutoff]
|
||||
header := t.sparse[offset]
|
||||
lo := offset + 1
|
||||
hi := lo + uint16(header.lo)
|
||||
for lo < hi {
|
||||
m := lo + (hi-lo)/2
|
||||
r := t.sparse[m]
|
||||
if r.lo <= b && b <= r.hi {
|
||||
return r.value + uint16(b-r.lo)*header.value
|
||||
}
|
||||
if b < r.lo {
|
||||
hi = m
|
||||
} else {
|
||||
lo = m + 1
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
const (
|
||||
t1 = 0x00 // 0000 0000
|
||||
tx = 0x80 // 1000 0000
|
||||
t2 = 0xC0 // 1100 0000
|
||||
t3 = 0xE0 // 1110 0000
|
||||
t4 = 0xF0 // 1111 0000
|
||||
t5 = 0xF8 // 1111 1000
|
||||
t6 = 0xFC // 1111 1100
|
||||
te = 0xFE // 1111 1110
|
||||
)
|
||||
|
||||
// lookup returns the trie value for the first UTF-8 encoding in s and
|
||||
// the width in bytes of this encoding. The size will be 0 if s does not
|
||||
// hold enough bytes to complete the encoding. len(s) must be greater than 0.
|
||||
func (t *trie) lookup(s []byte) (v uint16, sz int) {
|
||||
c0 := s[0]
|
||||
switch {
|
||||
case c0 < tx:
|
||||
return t.values[c0], 1
|
||||
case c0 < t2:
|
||||
return 0, 1
|
||||
case c0 < t3:
|
||||
if len(s) < 2 {
|
||||
return 0, 0
|
||||
}
|
||||
i := t.index[c0]
|
||||
c1 := s[1]
|
||||
if c1 < tx || t2 <= c1 {
|
||||
return 0, 1
|
||||
}
|
||||
return t.lookupValue(i, c1), 2
|
||||
case c0 < t4:
|
||||
if len(s) < 3 {
|
||||
return 0, 0
|
||||
}
|
||||
i := t.index[c0]
|
||||
c1 := s[1]
|
||||
if c1 < tx || t2 <= c1 {
|
||||
return 0, 1
|
||||
}
|
||||
o := uint16(i)<<6 + uint16(c1)
|
||||
i = t.index[o]
|
||||
c2 := s[2]
|
||||
if c2 < tx || t2 <= c2 {
|
||||
return 0, 2
|
||||
}
|
||||
return t.lookupValue(i, c2), 3
|
||||
case c0 < t5:
|
||||
if len(s) < 4 {
|
||||
return 0, 0
|
||||
}
|
||||
i := t.index[c0]
|
||||
c1 := s[1]
|
||||
if c1 < tx || t2 <= c1 {
|
||||
return 0, 1
|
||||
}
|
||||
o := uint16(i)<<6 + uint16(c1)
|
||||
i = t.index[o]
|
||||
c2 := s[2]
|
||||
if c2 < tx || t2 <= c2 {
|
||||
return 0, 2
|
||||
}
|
||||
o = uint16(i)<<6 + uint16(c2)
|
||||
i = t.index[o]
|
||||
c3 := s[3]
|
||||
if c3 < tx || t2 <= c3 {
|
||||
return 0, 3
|
||||
}
|
||||
return t.lookupValue(i, c3), 4
|
||||
}
|
||||
// Illegal rune
|
||||
return 0, 1
|
||||
}
|
||||
|
||||
// lookupString returns the trie value for the first UTF-8 encoding in s and
|
||||
// the width in bytes of this encoding. The size will be 0 if s does not
|
||||
// hold enough bytes to complete the encoding. len(s) must be greater than 0.
|
||||
func (t *trie) lookupString(s string) (v uint16, sz int) {
|
||||
c0 := s[0]
|
||||
switch {
|
||||
case c0 < tx:
|
||||
return t.values[c0], 1
|
||||
case c0 < t2:
|
||||
return 0, 1
|
||||
case c0 < t3:
|
||||
if len(s) < 2 {
|
||||
return 0, 0
|
||||
}
|
||||
i := t.index[c0]
|
||||
c1 := s[1]
|
||||
if c1 < tx || t2 <= c1 {
|
||||
return 0, 1
|
||||
}
|
||||
return t.lookupValue(i, c1), 2
|
||||
case c0 < t4:
|
||||
if len(s) < 3 {
|
||||
return 0, 0
|
||||
}
|
||||
i := t.index[c0]
|
||||
c1 := s[1]
|
||||
if c1 < tx || t2 <= c1 {
|
||||
return 0, 1
|
||||
}
|
||||
o := uint16(i)<<6 + uint16(c1)
|
||||
i = t.index[o]
|
||||
c2 := s[2]
|
||||
if c2 < tx || t2 <= c2 {
|
||||
return 0, 2
|
||||
}
|
||||
return t.lookupValue(i, c2), 3
|
||||
case c0 < t5:
|
||||
if len(s) < 4 {
|
||||
return 0, 0
|
||||
}
|
||||
i := t.index[c0]
|
||||
c1 := s[1]
|
||||
if c1 < tx || t2 <= c1 {
|
||||
return 0, 1
|
||||
}
|
||||
o := uint16(i)<<6 + uint16(c1)
|
||||
i = t.index[o]
|
||||
c2 := s[2]
|
||||
if c2 < tx || t2 <= c2 {
|
||||
return 0, 2
|
||||
}
|
||||
o = uint16(i)<<6 + uint16(c2)
|
||||
i = t.index[o]
|
||||
c3 := s[3]
|
||||
if c3 < tx || t2 <= c3 {
|
||||
return 0, 3
|
||||
}
|
||||
return t.lookupValue(i, c3), 4
|
||||
}
|
||||
// Illegal rune
|
||||
return 0, 1
|
||||
}
|
||||
|
||||
// lookupUnsafe returns the trie value for the first UTF-8 encoding in s.
|
||||
// s must hold a full encoding.
|
||||
func (t *trie) lookupUnsafe(s []byte) uint16 {
|
||||
c0 := s[0]
|
||||
if c0 < tx {
|
||||
return t.values[c0]
|
||||
}
|
||||
if c0 < t2 {
|
||||
return 0
|
||||
}
|
||||
i := t.index[c0]
|
||||
if c0 < t3 {
|
||||
return t.lookupValue(i, s[1])
|
||||
}
|
||||
i = t.index[uint16(i)<<6+uint16(s[1])]
|
||||
if c0 < t4 {
|
||||
return t.lookupValue(i, s[2])
|
||||
}
|
||||
i = t.index[uint16(i)<<6+uint16(s[2])]
|
||||
if c0 < t5 {
|
||||
return t.lookupValue(i, s[3])
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s.
|
||||
// s must hold a full encoding.
|
||||
func (t *trie) lookupStringUnsafe(s string) uint16 {
|
||||
c0 := s[0]
|
||||
if c0 < tx {
|
||||
return t.values[c0]
|
||||
}
|
||||
if c0 < t2 {
|
||||
return 0
|
||||
}
|
||||
i := t.index[c0]
|
||||
if c0 < t3 {
|
||||
return t.lookupValue(i, s[1])
|
||||
}
|
||||
i = t.index[uint16(i)<<6+uint16(s[1])]
|
||||
if c0 < t4 {
|
||||
return t.lookupValue(i, s[2])
|
||||
}
|
||||
i = t.index[uint16(i)<<6+uint16(s[2])]
|
||||
if c0 < t5 {
|
||||
return t.lookupValue(i, s[3])
|
||||
}
|
||||
return 0
|
||||
}
|
||||
152
Godeps/_workspace/src/code.google.com/p/go.text/unicode/norm/trie_test.go
generated
vendored
Normal file
152
Godeps/_workspace/src/code.google.com/p/go.text/unicode/norm/trie_test.go
generated
vendored
Normal file
@@ -0,0 +1,152 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package norm
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// Test data is located in triedata_test.go; generated by maketesttables.
|
||||
var testdata = testdataTrie
|
||||
|
||||
type rangeTest struct {
|
||||
block uint8
|
||||
lookup byte
|
||||
result uint16
|
||||
table []valueRange
|
||||
offsets []uint16
|
||||
}
|
||||
|
||||
var range1Off = []uint16{0, 2}
|
||||
var range1 = []valueRange{
|
||||
{0, 1, 0},
|
||||
{1, 0x80, 0x80},
|
||||
{0, 2, 0},
|
||||
{1, 0x80, 0x80},
|
||||
{9, 0xff, 0xff},
|
||||
}
|
||||
|
||||
var rangeTests = []rangeTest{
|
||||
{10, 0x80, 1, range1, range1Off},
|
||||
{10, 0x00, 0, range1, range1Off},
|
||||
{11, 0x80, 1, range1, range1Off},
|
||||
{11, 0xff, 9, range1, range1Off},
|
||||
{11, 0x00, 0, range1, range1Off},
|
||||
}
|
||||
|
||||
func TestLookupSparse(t *testing.T) {
|
||||
for i, test := range rangeTests {
|
||||
n := trie{sparse: test.table, sparseOffset: test.offsets, cutoff: 10}
|
||||
v := n.lookupValue(test.block, test.lookup)
|
||||
if v != test.result {
|
||||
t.Errorf("LookupSparse:%d: found %X; want %X", i, v, test.result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test cases for illegal runes.
|
||||
type trietest struct {
|
||||
size int
|
||||
bytes []byte
|
||||
}
|
||||
|
||||
var tests = []trietest{
|
||||
// illegal runes
|
||||
{1, []byte{0x80}},
|
||||
{1, []byte{0xFF}},
|
||||
{1, []byte{t2, tx - 1}},
|
||||
{1, []byte{t2, t2}},
|
||||
{2, []byte{t3, tx, tx - 1}},
|
||||
{2, []byte{t3, tx, t2}},
|
||||
{1, []byte{t3, tx - 1, tx}},
|
||||
{3, []byte{t4, tx, tx, tx - 1}},
|
||||
{3, []byte{t4, tx, tx, t2}},
|
||||
{1, []byte{t4, t2, tx, tx - 1}},
|
||||
{2, []byte{t4, tx, t2, tx - 1}},
|
||||
|
||||
// short runes
|
||||
{0, []byte{t2}},
|
||||
{0, []byte{t3, tx}},
|
||||
{0, []byte{t4, tx, tx}},
|
||||
|
||||
// we only support UTF-8 up to utf8.UTFMax bytes (4 bytes)
|
||||
{1, []byte{t5, tx, tx, tx, tx}},
|
||||
{1, []byte{t6, tx, tx, tx, tx, tx}},
|
||||
}
|
||||
|
||||
func mkUTF8(r rune) ([]byte, int) {
|
||||
var b [utf8.UTFMax]byte
|
||||
sz := utf8.EncodeRune(b[:], r)
|
||||
return b[:sz], sz
|
||||
}
|
||||
|
||||
func TestLookup(t *testing.T) {
|
||||
for i, tt := range testRunes {
|
||||
b, szg := mkUTF8(tt)
|
||||
v, szt := testdata.lookup(b)
|
||||
if int(v) != i {
|
||||
t.Errorf("lookup(%U): found value %#x, expected %#x", tt, v, i)
|
||||
}
|
||||
if szt != szg {
|
||||
t.Errorf("lookup(%U): found size %d, expected %d", tt, szt, szg)
|
||||
}
|
||||
}
|
||||
for i, tt := range tests {
|
||||
v, sz := testdata.lookup(tt.bytes)
|
||||
if v != 0 {
|
||||
t.Errorf("lookup of illegal rune, case %d: found value %#x, expected 0", i, v)
|
||||
}
|
||||
if sz != tt.size {
|
||||
t.Errorf("lookup of illegal rune, case %d: found size %d, expected %d", i, sz, tt.size)
|
||||
}
|
||||
}
|
||||
// Verify defaults.
|
||||
if v, _ := testdata.lookup([]byte{0xC1, 0x8C}); v != 0 {
|
||||
t.Errorf("lookup of non-existing rune should be 0; found %X", v)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLookupUnsafe(t *testing.T) {
|
||||
for i, tt := range testRunes {
|
||||
b, _ := mkUTF8(tt)
|
||||
v := testdata.lookupUnsafe(b)
|
||||
if int(v) != i {
|
||||
t.Errorf("lookupUnsafe(%U): found value %#x, expected %#x", i, v, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLookupString(t *testing.T) {
|
||||
for i, tt := range testRunes {
|
||||
b, szg := mkUTF8(tt)
|
||||
v, szt := testdata.lookupString(string(b))
|
||||
if int(v) != i {
|
||||
t.Errorf("lookup(%U): found value %#x, expected %#x", i, v, i)
|
||||
}
|
||||
if szt != szg {
|
||||
t.Errorf("lookup(%U): found size %d, expected %d", i, szt, szg)
|
||||
}
|
||||
}
|
||||
for i, tt := range tests {
|
||||
v, sz := testdata.lookupString(string(tt.bytes))
|
||||
if int(v) != 0 {
|
||||
t.Errorf("lookup of illegal rune, case %d: found value %#x, expected 0", i, v)
|
||||
}
|
||||
if sz != tt.size {
|
||||
t.Errorf("lookup of illegal rune, case %d: found size %d, expected %d", i, sz, tt.size)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLookupStringUnsafe(t *testing.T) {
|
||||
for i, tt := range testRunes {
|
||||
b, _ := mkUTF8(tt)
|
||||
v := testdata.lookupStringUnsafe(string(b))
|
||||
if int(v) != i {
|
||||
t.Errorf("lookupUnsafe(%U): found value %#x, expected %#x", i, v, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
85
Godeps/_workspace/src/code.google.com/p/go.text/unicode/norm/triedata_test.go
generated
vendored
Normal file
85
Godeps/_workspace/src/code.google.com/p/go.text/unicode/norm/triedata_test.go
generated
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
// Generated by running
|
||||
// maketesttables
|
||||
// DO NOT EDIT
|
||||
|
||||
package norm
|
||||
|
||||
var testRunes = []int32{1, 12, 127, 128, 256, 2047, 2048, 2457, 65535, 65536, 65793, 1114111, 512, 513, 514, 528, 533}
|
||||
|
||||
// testdataValues: 192 entries, 384 bytes
|
||||
// Block 2 is the null block.
|
||||
var testdataValues = [192]uint16{
|
||||
// Block 0x0, offset 0x0
|
||||
0x000c: 0x0001,
|
||||
// Block 0x1, offset 0x40
|
||||
0x007f: 0x0002,
|
||||
// Block 0x2, offset 0x80
|
||||
}
|
||||
|
||||
// testdataSparseOffset: 10 entries, 20 bytes
|
||||
var testdataSparseOffset = []uint16{0x0, 0x2, 0x4, 0x8, 0xa, 0xc, 0xe, 0x10, 0x12, 0x14}
|
||||
|
||||
// testdataSparseValues: 22 entries, 88 bytes
|
||||
var testdataSparseValues = [22]valueRange{
|
||||
// Block 0x0, offset 0x1
|
||||
{value: 0x0000, lo: 0x01},
|
||||
{value: 0x0003, lo: 0x80, hi: 0x80},
|
||||
// Block 0x1, offset 0x2
|
||||
{value: 0x0000, lo: 0x01},
|
||||
{value: 0x0004, lo: 0x80, hi: 0x80},
|
||||
// Block 0x2, offset 0x3
|
||||
{value: 0x0001, lo: 0x03},
|
||||
{value: 0x000c, lo: 0x80, hi: 0x82},
|
||||
{value: 0x000f, lo: 0x90, hi: 0x90},
|
||||
{value: 0x0010, lo: 0x95, hi: 0x95},
|
||||
// Block 0x3, offset 0x4
|
||||
{value: 0x0000, lo: 0x01},
|
||||
{value: 0x0005, lo: 0xbf, hi: 0xbf},
|
||||
// Block 0x4, offset 0x5
|
||||
{value: 0x0000, lo: 0x01},
|
||||
{value: 0x0006, lo: 0x80, hi: 0x80},
|
||||
// Block 0x5, offset 0x6
|
||||
{value: 0x0000, lo: 0x01},
|
||||
{value: 0x0007, lo: 0x99, hi: 0x99},
|
||||
// Block 0x6, offset 0x7
|
||||
{value: 0x0000, lo: 0x01},
|
||||
{value: 0x0008, lo: 0xbf, hi: 0xbf},
|
||||
// Block 0x7, offset 0x8
|
||||
{value: 0x0000, lo: 0x01},
|
||||
{value: 0x0009, lo: 0x80, hi: 0x80},
|
||||
// Block 0x8, offset 0x9
|
||||
{value: 0x0000, lo: 0x01},
|
||||
{value: 0x000a, lo: 0x81, hi: 0x81},
|
||||
// Block 0x9, offset 0xa
|
||||
{value: 0x0000, lo: 0x01},
|
||||
{value: 0x000b, lo: 0xbf, hi: 0xbf},
|
||||
}
|
||||
|
||||
// testdataLookup: 640 bytes
|
||||
// Block 0 is the null block.
|
||||
var testdataLookup = [640]uint8{
|
||||
// Block 0x0, offset 0x0
|
||||
// Block 0x1, offset 0x40
|
||||
// Block 0x2, offset 0x80
|
||||
// Block 0x3, offset 0xc0
|
||||
0x0c2: 0x01, 0x0c4: 0x02,
|
||||
0x0c8: 0x03,
|
||||
0x0df: 0x04,
|
||||
0x0e0: 0x02,
|
||||
0x0ef: 0x03,
|
||||
0x0f0: 0x05, 0x0f4: 0x07,
|
||||
// Block 0x4, offset 0x100
|
||||
0x120: 0x05, 0x126: 0x06,
|
||||
// Block 0x5, offset 0x140
|
||||
0x17f: 0x07,
|
||||
// Block 0x6, offset 0x180
|
||||
0x180: 0x08, 0x184: 0x09,
|
||||
// Block 0x7, offset 0x1c0
|
||||
0x1d0: 0x04,
|
||||
// Block 0x8, offset 0x200
|
||||
0x23f: 0x0a,
|
||||
// Block 0x9, offset 0x240
|
||||
0x24f: 0x06,
|
||||
}
|
||||
|
||||
var testdataTrie = trie{testdataLookup[:], testdataValues[:], testdataSparseValues[:], testdataSparseOffset[:], 1}
|
||||
317
Godeps/_workspace/src/code.google.com/p/go.text/unicode/norm/triegen.go
generated
vendored
Normal file
317
Godeps/_workspace/src/code.google.com/p/go.text/unicode/norm/triegen.go
generated
vendored
Normal file
@@ -0,0 +1,317 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
// Trie table generator.
|
||||
// Used by make*tables tools to generate a go file with trie data structures
|
||||
// for mapping UTF-8 to a 16-bit value. All but the last byte in a UTF-8 byte
|
||||
// sequence are used to lookup offsets in the index table to be used for the
|
||||
// next byte. The last byte is used to index into a table with 16-bit values.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"log"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
const (
|
||||
blockSize = 64
|
||||
blockOffset = 2 // Subtract two blocks to compensate for the 0x80 added to continuation bytes.
|
||||
maxSparseEntries = 16
|
||||
)
|
||||
|
||||
// Intermediate trie structure
|
||||
type trieNode struct {
|
||||
table [256]*trieNode
|
||||
value int
|
||||
b byte
|
||||
leaf bool
|
||||
}
|
||||
|
||||
func newNode() *trieNode {
|
||||
return new(trieNode)
|
||||
}
|
||||
|
||||
func (n trieNode) String() string {
|
||||
s := fmt.Sprint("trieNode{table: { non-nil at index: ")
|
||||
for i, v := range n.table {
|
||||
if v != nil {
|
||||
s += fmt.Sprintf("%d, ", i)
|
||||
}
|
||||
}
|
||||
s += fmt.Sprintf("}, value:%#x, b:%#x leaf:%v}", n.value, n.b, n.leaf)
|
||||
return s
|
||||
}
|
||||
|
||||
func (n trieNode) isInternal() bool {
|
||||
internal := true
|
||||
for i := 0; i < 256; i++ {
|
||||
if nn := n.table[i]; nn != nil {
|
||||
if !internal && !nn.leaf {
|
||||
log.Fatalf("triegen: isInternal: node contains both leaf and non-leaf children (%v)", n)
|
||||
}
|
||||
internal = internal && !nn.leaf
|
||||
}
|
||||
}
|
||||
return internal
|
||||
}
|
||||
|
||||
func (n trieNode) mostFrequentStride() int {
|
||||
counts := make(map[int]int)
|
||||
v := 0
|
||||
for _, t := range n.table[0x80 : 0x80+blockSize] {
|
||||
if t != nil {
|
||||
if stride := t.value - v; v != 0 && stride >= 0 {
|
||||
counts[stride]++
|
||||
}
|
||||
v = t.value
|
||||
} else {
|
||||
v = 0
|
||||
}
|
||||
}
|
||||
var maxs, maxc int
|
||||
for stride, cnt := range counts {
|
||||
if cnt > maxc || (cnt == maxc && stride < maxs) {
|
||||
maxs, maxc = stride, cnt
|
||||
}
|
||||
}
|
||||
return maxs
|
||||
}
|
||||
|
||||
func (n trieNode) countSparseEntries() int {
|
||||
stride := n.mostFrequentStride()
|
||||
var count, v int
|
||||
for _, t := range n.table[0x80 : 0x80+blockSize] {
|
||||
tv := 0
|
||||
if t != nil {
|
||||
tv = t.value
|
||||
}
|
||||
if tv-v != stride {
|
||||
if tv != 0 {
|
||||
count++
|
||||
}
|
||||
}
|
||||
v = tv
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
func (n *trieNode) insert(r rune, value uint16) {
|
||||
var p [utf8.UTFMax]byte
|
||||
sz := utf8.EncodeRune(p[:], r)
|
||||
|
||||
for i := 0; i < sz; i++ {
|
||||
if n.leaf {
|
||||
log.Fatalf("triegen: insert: node (%#v) should not be a leaf", n)
|
||||
}
|
||||
nn := n.table[p[i]]
|
||||
if nn == nil {
|
||||
nn = newNode()
|
||||
nn.b = p[i]
|
||||
n.table[p[i]] = nn
|
||||
}
|
||||
n = nn
|
||||
}
|
||||
n.value = int(value)
|
||||
n.leaf = true
|
||||
}
|
||||
|
||||
type nodeIndex struct {
|
||||
lookupBlocks []*trieNode
|
||||
valueBlocks []*trieNode
|
||||
sparseBlocks []*trieNode
|
||||
sparseOffset []uint16
|
||||
sparseCount int
|
||||
|
||||
lookupBlockIdx map[uint32]int
|
||||
valueBlockIdx map[uint32]int
|
||||
}
|
||||
|
||||
func newIndex() *nodeIndex {
|
||||
index := &nodeIndex{}
|
||||
index.lookupBlocks = make([]*trieNode, 0)
|
||||
index.valueBlocks = make([]*trieNode, 0)
|
||||
index.sparseBlocks = make([]*trieNode, 0)
|
||||
index.sparseOffset = make([]uint16, 1)
|
||||
index.lookupBlockIdx = make(map[uint32]int)
|
||||
index.valueBlockIdx = make(map[uint32]int)
|
||||
return index
|
||||
}
|
||||
|
||||
func computeOffsets(index *nodeIndex, n *trieNode) int {
|
||||
if n.leaf {
|
||||
return n.value
|
||||
}
|
||||
hasher := crc32.New(crc32.MakeTable(crc32.IEEE))
|
||||
// We only index continuation bytes.
|
||||
for i := 0; i < blockSize; i++ {
|
||||
v := 0
|
||||
if nn := n.table[0x80+i]; nn != nil {
|
||||
v = computeOffsets(index, nn)
|
||||
}
|
||||
hasher.Write([]byte{uint8(v >> 8), uint8(v)})
|
||||
}
|
||||
h := hasher.Sum32()
|
||||
if n.isInternal() {
|
||||
v, ok := index.lookupBlockIdx[h]
|
||||
if !ok {
|
||||
v = len(index.lookupBlocks) - blockOffset
|
||||
index.lookupBlocks = append(index.lookupBlocks, n)
|
||||
index.lookupBlockIdx[h] = v
|
||||
}
|
||||
n.value = v
|
||||
} else {
|
||||
v, ok := index.valueBlockIdx[h]
|
||||
if !ok {
|
||||
if c := n.countSparseEntries(); c > maxSparseEntries {
|
||||
v = len(index.valueBlocks) - blockOffset
|
||||
index.valueBlocks = append(index.valueBlocks, n)
|
||||
index.valueBlockIdx[h] = v
|
||||
} else {
|
||||
v = -len(index.sparseOffset)
|
||||
index.sparseBlocks = append(index.sparseBlocks, n)
|
||||
index.sparseOffset = append(index.sparseOffset, uint16(index.sparseCount))
|
||||
index.sparseCount += c + 1
|
||||
index.valueBlockIdx[h] = v
|
||||
}
|
||||
}
|
||||
n.value = v
|
||||
}
|
||||
return n.value
|
||||
}
|
||||
|
||||
func printValueBlock(nr int, n *trieNode, offset int) {
|
||||
boff := nr * blockSize
|
||||
fmt.Printf("\n// Block %#x, offset %#x", nr, boff)
|
||||
var printnewline bool
|
||||
for i := 0; i < blockSize; i++ {
|
||||
if i%6 == 0 {
|
||||
printnewline = true
|
||||
}
|
||||
v := 0
|
||||
if nn := n.table[i+offset]; nn != nil {
|
||||
v = nn.value
|
||||
}
|
||||
if v != 0 {
|
||||
if printnewline {
|
||||
fmt.Printf("\n")
|
||||
printnewline = false
|
||||
}
|
||||
fmt.Printf("%#04x:%#04x, ", boff+i, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func printSparseBlock(nr int, n *trieNode) {
|
||||
boff := -n.value
|
||||
fmt.Printf("\n// Block %#x, offset %#x", nr, boff)
|
||||
v := 0
|
||||
//stride := f(n)
|
||||
stride := n.mostFrequentStride()
|
||||
c := n.countSparseEntries()
|
||||
fmt.Printf("\n{value:%#04x,lo:%#02x},", stride, uint8(c))
|
||||
for i, nn := range n.table[0x80 : 0x80+blockSize] {
|
||||
nv := 0
|
||||
if nn != nil {
|
||||
nv = nn.value
|
||||
}
|
||||
if nv-v != stride {
|
||||
if v != 0 {
|
||||
fmt.Printf(",hi:%#02x},", 0x80+i-1)
|
||||
}
|
||||
if nv != 0 {
|
||||
fmt.Printf("\n{value:%#04x,lo:%#02x", nv, nn.b)
|
||||
}
|
||||
}
|
||||
v = nv
|
||||
}
|
||||
if v != 0 {
|
||||
fmt.Printf(",hi:%#02x},", 0x80+blockSize-1)
|
||||
}
|
||||
}
|
||||
|
||||
func printLookupBlock(nr int, n *trieNode, offset, cutoff int) {
|
||||
boff := nr * blockSize
|
||||
fmt.Printf("\n// Block %#x, offset %#x", nr, boff)
|
||||
var printnewline bool
|
||||
for i := 0; i < blockSize; i++ {
|
||||
if i%8 == 0 {
|
||||
printnewline = true
|
||||
}
|
||||
v := 0
|
||||
if nn := n.table[i+offset]; nn != nil {
|
||||
v = nn.value
|
||||
}
|
||||
if v != 0 {
|
||||
if v < 0 {
|
||||
v = -v - 1 + cutoff
|
||||
}
|
||||
if printnewline {
|
||||
fmt.Printf("\n")
|
||||
printnewline = false
|
||||
}
|
||||
fmt.Printf("%#03x:%#02x, ", boff+i, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// printTables returns the size in bytes of the generated tables.
|
||||
func (t *trieNode) printTables(name string) int {
|
||||
index := newIndex()
|
||||
// Values for 7-bit ASCII are stored in first two block, followed by nil block.
|
||||
index.valueBlocks = append(index.valueBlocks, nil, nil, nil)
|
||||
// First byte of multi-byte UTF-8 codepoints are indexed in 4th block.
|
||||
index.lookupBlocks = append(index.lookupBlocks, nil, nil, nil, nil)
|
||||
// Index starter bytes of multi-byte UTF-8.
|
||||
for i := 0xC0; i < 0x100; i++ {
|
||||
if t.table[i] != nil {
|
||||
computeOffsets(index, t.table[i])
|
||||
}
|
||||
}
|
||||
|
||||
nv := len(index.valueBlocks) * blockSize
|
||||
fmt.Printf("// %sValues: %d entries, %d bytes\n", name, nv, nv*2)
|
||||
fmt.Printf("// Block 2 is the null block.\n")
|
||||
fmt.Printf("var %sValues = [%d]uint16 {", name, nv)
|
||||
printValueBlock(0, t, 0)
|
||||
printValueBlock(1, t, 64)
|
||||
printValueBlock(2, newNode(), 0)
|
||||
for i := 3; i < len(index.valueBlocks); i++ {
|
||||
printValueBlock(i, index.valueBlocks[i], 0x80)
|
||||
}
|
||||
fmt.Print("\n}\n\n")
|
||||
|
||||
ls := len(index.sparseBlocks)
|
||||
fmt.Printf("// %sSparseOffset: %d entries, %d bytes\n", name, ls, ls*2)
|
||||
fmt.Printf("var %sSparseOffset = %#v\n\n", name, index.sparseOffset[1:])
|
||||
|
||||
ns := index.sparseCount
|
||||
fmt.Printf("// %sSparseValues: %d entries, %d bytes\n", name, ns, ns*4)
|
||||
fmt.Printf("var %sSparseValues = [%d]valueRange {", name, ns)
|
||||
for i, n := range index.sparseBlocks {
|
||||
printSparseBlock(i, n)
|
||||
}
|
||||
fmt.Print("\n}\n\n")
|
||||
|
||||
cutoff := len(index.valueBlocks) - blockOffset
|
||||
ni := len(index.lookupBlocks) * blockSize
|
||||
fmt.Printf("// %sLookup: %d bytes\n", name, ni)
|
||||
fmt.Printf("// Block 0 is the null block.\n")
|
||||
fmt.Printf("var %sLookup = [%d]uint8 {", name, ni)
|
||||
printLookupBlock(0, newNode(), 0, cutoff)
|
||||
printLookupBlock(1, newNode(), 0, cutoff)
|
||||
printLookupBlock(2, newNode(), 0, cutoff)
|
||||
printLookupBlock(3, t, 0xC0, cutoff)
|
||||
for i := 4; i < len(index.lookupBlocks); i++ {
|
||||
printLookupBlock(i, index.lookupBlocks[i], 0x80, cutoff)
|
||||
}
|
||||
fmt.Print("\n}\n\n")
|
||||
fmt.Printf("var %sTrie = trie{ %sLookup[:], %sValues[:], %sSparseValues[:], %sSparseOffset[:], %d}\n\n",
|
||||
name, name, name, name, name, cutoff)
|
||||
return nv*2 + ns*4 + ni + ls*2
|
||||
}
|
||||
124
Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/decode.go
generated
vendored
Normal file
124
Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/decode.go
generated
vendored
Normal file
@@ -0,0 +1,124 @@
|
||||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package snappy
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
)
|
||||
|
||||
// ErrCorrupt reports that the input is invalid.
|
||||
var ErrCorrupt = errors.New("snappy: corrupt input")
|
||||
|
||||
// DecodedLen returns the length of the decoded block.
|
||||
func DecodedLen(src []byte) (int, error) {
|
||||
v, _, err := decodedLen(src)
|
||||
return v, err
|
||||
}
|
||||
|
||||
// decodedLen returns the length of the decoded block and the number of bytes
|
||||
// that the length header occupied.
|
||||
func decodedLen(src []byte) (blockLen, headerLen int, err error) {
|
||||
v, n := binary.Uvarint(src)
|
||||
if n == 0 {
|
||||
return 0, 0, ErrCorrupt
|
||||
}
|
||||
if uint64(int(v)) != v {
|
||||
return 0, 0, errors.New("snappy: decoded block is too large")
|
||||
}
|
||||
return int(v), n, nil
|
||||
}
|
||||
|
||||
// Decode returns the decoded form of src. The returned slice may be a sub-
|
||||
// slice of dst if dst was large enough to hold the entire decoded block.
|
||||
// Otherwise, a newly allocated slice will be returned.
|
||||
// It is valid to pass a nil dst.
|
||||
func Decode(dst, src []byte) ([]byte, error) {
|
||||
dLen, s, err := decodedLen(src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(dst) < dLen {
|
||||
dst = make([]byte, dLen)
|
||||
}
|
||||
|
||||
var d, offset, length int
|
||||
for s < len(src) {
|
||||
switch src[s] & 0x03 {
|
||||
case tagLiteral:
|
||||
x := uint(src[s] >> 2)
|
||||
switch {
|
||||
case x < 60:
|
||||
s += 1
|
||||
case x == 60:
|
||||
s += 2
|
||||
if s > len(src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
x = uint(src[s-1])
|
||||
case x == 61:
|
||||
s += 3
|
||||
if s > len(src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
x = uint(src[s-2]) | uint(src[s-1])<<8
|
||||
case x == 62:
|
||||
s += 4
|
||||
if s > len(src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
x = uint(src[s-3]) | uint(src[s-2])<<8 | uint(src[s-1])<<16
|
||||
case x == 63:
|
||||
s += 5
|
||||
if s > len(src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
x = uint(src[s-4]) | uint(src[s-3])<<8 | uint(src[s-2])<<16 | uint(src[s-1])<<24
|
||||
}
|
||||
length = int(x + 1)
|
||||
if length <= 0 {
|
||||
return nil, errors.New("snappy: unsupported literal length")
|
||||
}
|
||||
if length > len(dst)-d || length > len(src)-s {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
copy(dst[d:], src[s:s+length])
|
||||
d += length
|
||||
s += length
|
||||
continue
|
||||
|
||||
case tagCopy1:
|
||||
s += 2
|
||||
if s > len(src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
length = 4 + int(src[s-2])>>2&0x7
|
||||
offset = int(src[s-2])&0xe0<<3 | int(src[s-1])
|
||||
|
||||
case tagCopy2:
|
||||
s += 3
|
||||
if s > len(src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
length = 1 + int(src[s-3])>>2
|
||||
offset = int(src[s-2]) | int(src[s-1])<<8
|
||||
|
||||
case tagCopy4:
|
||||
return nil, errors.New("snappy: unsupported COPY_4 tag")
|
||||
}
|
||||
|
||||
end := d + length
|
||||
if offset > d || end > len(dst) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
for ; d < end; d++ {
|
||||
dst[d] = dst[d-offset]
|
||||
}
|
||||
}
|
||||
if d != dLen {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
return dst[:d], nil
|
||||
}
|
||||
174
Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/encode.go
generated
vendored
Normal file
174
Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/encode.go
generated
vendored
Normal file
@@ -0,0 +1,174 @@
|
||||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package snappy
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
// We limit how far copy back-references can go, the same as the C++ code.
|
||||
const maxOffset = 1 << 15
|
||||
|
||||
// emitLiteral writes a literal chunk and returns the number of bytes written.
|
||||
func emitLiteral(dst, lit []byte) int {
|
||||
i, n := 0, uint(len(lit)-1)
|
||||
switch {
|
||||
case n < 60:
|
||||
dst[0] = uint8(n)<<2 | tagLiteral
|
||||
i = 1
|
||||
case n < 1<<8:
|
||||
dst[0] = 60<<2 | tagLiteral
|
||||
dst[1] = uint8(n)
|
||||
i = 2
|
||||
case n < 1<<16:
|
||||
dst[0] = 61<<2 | tagLiteral
|
||||
dst[1] = uint8(n)
|
||||
dst[2] = uint8(n >> 8)
|
||||
i = 3
|
||||
case n < 1<<24:
|
||||
dst[0] = 62<<2 | tagLiteral
|
||||
dst[1] = uint8(n)
|
||||
dst[2] = uint8(n >> 8)
|
||||
dst[3] = uint8(n >> 16)
|
||||
i = 4
|
||||
case int64(n) < 1<<32:
|
||||
dst[0] = 63<<2 | tagLiteral
|
||||
dst[1] = uint8(n)
|
||||
dst[2] = uint8(n >> 8)
|
||||
dst[3] = uint8(n >> 16)
|
||||
dst[4] = uint8(n >> 24)
|
||||
i = 5
|
||||
default:
|
||||
panic("snappy: source buffer is too long")
|
||||
}
|
||||
if copy(dst[i:], lit) != len(lit) {
|
||||
panic("snappy: destination buffer is too short")
|
||||
}
|
||||
return i + len(lit)
|
||||
}
|
||||
|
||||
// emitCopy writes a copy chunk and returns the number of bytes written.
|
||||
func emitCopy(dst []byte, offset, length int) int {
|
||||
i := 0
|
||||
for length > 0 {
|
||||
x := length - 4
|
||||
if 0 <= x && x < 1<<3 && offset < 1<<11 {
|
||||
dst[i+0] = uint8(offset>>8)&0x07<<5 | uint8(x)<<2 | tagCopy1
|
||||
dst[i+1] = uint8(offset)
|
||||
i += 2
|
||||
break
|
||||
}
|
||||
|
||||
x = length
|
||||
if x > 1<<6 {
|
||||
x = 1 << 6
|
||||
}
|
||||
dst[i+0] = uint8(x-1)<<2 | tagCopy2
|
||||
dst[i+1] = uint8(offset)
|
||||
dst[i+2] = uint8(offset >> 8)
|
||||
i += 3
|
||||
length -= x
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// Encode returns the encoded form of src. The returned slice may be a sub-
|
||||
// slice of dst if dst was large enough to hold the entire encoded block.
|
||||
// Otherwise, a newly allocated slice will be returned.
|
||||
// It is valid to pass a nil dst.
|
||||
func Encode(dst, src []byte) ([]byte, error) {
|
||||
if n := MaxEncodedLen(len(src)); len(dst) < n {
|
||||
dst = make([]byte, n)
|
||||
}
|
||||
|
||||
// The block starts with the varint-encoded length of the decompressed bytes.
|
||||
d := binary.PutUvarint(dst, uint64(len(src)))
|
||||
|
||||
// Return early if src is short.
|
||||
if len(src) <= 4 {
|
||||
if len(src) != 0 {
|
||||
d += emitLiteral(dst[d:], src)
|
||||
}
|
||||
return dst[:d], nil
|
||||
}
|
||||
|
||||
// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
|
||||
const maxTableSize = 1 << 14
|
||||
shift, tableSize := uint(32-8), 1<<8
|
||||
for tableSize < maxTableSize && tableSize < len(src) {
|
||||
shift--
|
||||
tableSize *= 2
|
||||
}
|
||||
var table [maxTableSize]int
|
||||
|
||||
// Iterate over the source bytes.
|
||||
var (
|
||||
s int // The iterator position.
|
||||
t int // The last position with the same hash as s.
|
||||
lit int // The start position of any pending literal bytes.
|
||||
)
|
||||
for s+3 < len(src) {
|
||||
// Update the hash table.
|
||||
b0, b1, b2, b3 := src[s], src[s+1], src[s+2], src[s+3]
|
||||
h := uint32(b0) | uint32(b1)<<8 | uint32(b2)<<16 | uint32(b3)<<24
|
||||
p := &table[(h*0x1e35a7bd)>>shift]
|
||||
// We need to to store values in [-1, inf) in table. To save
|
||||
// some initialization time, (re)use the table's zero value
|
||||
// and shift the values against this zero: add 1 on writes,
|
||||
// subtract 1 on reads.
|
||||
t, *p = *p-1, s+1
|
||||
// If t is invalid or src[s:s+4] differs from src[t:t+4], accumulate a literal byte.
|
||||
if t < 0 || s-t >= maxOffset || b0 != src[t] || b1 != src[t+1] || b2 != src[t+2] || b3 != src[t+3] {
|
||||
s++
|
||||
continue
|
||||
}
|
||||
// Otherwise, we have a match. First, emit any pending literal bytes.
|
||||
if lit != s {
|
||||
d += emitLiteral(dst[d:], src[lit:s])
|
||||
}
|
||||
// Extend the match to be as long as possible.
|
||||
s0 := s
|
||||
s, t = s+4, t+4
|
||||
for s < len(src) && src[s] == src[t] {
|
||||
s++
|
||||
t++
|
||||
}
|
||||
// Emit the copied bytes.
|
||||
d += emitCopy(dst[d:], s-t, s-s0)
|
||||
lit = s
|
||||
}
|
||||
|
||||
// Emit any final pending literal bytes and return.
|
||||
if lit != len(src) {
|
||||
d += emitLiteral(dst[d:], src[lit:])
|
||||
}
|
||||
return dst[:d], nil
|
||||
}
|
||||
|
||||
// MaxEncodedLen returns the maximum length of a snappy block, given its
|
||||
// uncompressed length.
|
||||
func MaxEncodedLen(srcLen int) int {
|
||||
// Compressed data can be defined as:
|
||||
// compressed := item* literal*
|
||||
// item := literal* copy
|
||||
//
|
||||
// The trailing literal sequence has a space blowup of at most 62/60
|
||||
// since a literal of length 60 needs one tag byte + one extra byte
|
||||
// for length information.
|
||||
//
|
||||
// Item blowup is trickier to measure. Suppose the "copy" op copies
|
||||
// 4 bytes of data. Because of a special check in the encoding code,
|
||||
// we produce a 4-byte copy only if the offset is < 65536. Therefore
|
||||
// the copy op takes 3 bytes to encode, and this type of item leads
|
||||
// to at most the 62/60 blowup for representing literals.
|
||||
//
|
||||
// Suppose the "copy" op copies 5 bytes of data. If the offset is big
|
||||
// enough, it will take 5 bytes to encode the copy op. Therefore the
|
||||
// worst case here is a one-byte literal followed by a five-byte copy.
|
||||
// That is, 6 bytes of input turn into 7 bytes of "compressed" data.
|
||||
//
|
||||
// This last factor dominates the blowup, so the final estimate is:
|
||||
return 32 + srcLen + srcLen/6
|
||||
}
|
||||
38
Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/snappy.go
generated
vendored
Normal file
38
Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/snappy.go
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package snappy implements the snappy block-based compression format.
|
||||
// It aims for very high speeds and reasonable compression.
|
||||
//
|
||||
// The C++ snappy implementation is at http://code.google.com/p/snappy/
|
||||
package snappy
|
||||
|
||||
/*
|
||||
Each encoded block begins with the varint-encoded length of the decoded data,
|
||||
followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
|
||||
first byte of each chunk is broken into its 2 least and 6 most significant bits
|
||||
called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
|
||||
Zero means a literal tag. All other values mean a copy tag.
|
||||
|
||||
For literal tags:
|
||||
- If m < 60, the next 1 + m bytes are literal bytes.
|
||||
- Otherwise, let n be the little-endian unsigned integer denoted by the next
|
||||
m - 59 bytes. The next 1 + n bytes after that are literal bytes.
|
||||
|
||||
For copy tags, length bytes are copied from offset bytes ago, in the style of
|
||||
Lempel-Ziv compression algorithms. In particular:
|
||||
- For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
|
||||
The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
|
||||
of the offset. The next byte is bits 0-7 of the offset.
|
||||
- For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
|
||||
The length is 1 + m. The offset is the little-endian unsigned integer
|
||||
denoted by the next 2 bytes.
|
||||
- For l == 3, this tag is a legacy format that is no longer supported.
|
||||
*/
|
||||
const (
|
||||
tagLiteral = 0x00
|
||||
tagCopy1 = 0x01
|
||||
tagCopy2 = 0x02
|
||||
tagCopy4 = 0x03
|
||||
)
|
||||
261
Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/snappy_test.go
generated
vendored
Normal file
261
Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/snappy_test.go
generated
vendored
Normal file
@@ -0,0 +1,261 @@
|
||||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package snappy
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var download = flag.Bool("download", false, "If true, download any missing files before running benchmarks")
|
||||
|
||||
func roundtrip(b, ebuf, dbuf []byte) error {
|
||||
e, err := Encode(ebuf, b)
|
||||
if err != nil {
|
||||
return fmt.Errorf("encoding error: %v", err)
|
||||
}
|
||||
d, err := Decode(dbuf, e)
|
||||
if err != nil {
|
||||
return fmt.Errorf("decoding error: %v", err)
|
||||
}
|
||||
if !bytes.Equal(b, d) {
|
||||
return fmt.Errorf("roundtrip mismatch:\n\twant %v\n\tgot %v", b, d)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestEmpty(t *testing.T) {
|
||||
if err := roundtrip(nil, nil, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSmallCopy(t *testing.T) {
|
||||
for _, ebuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} {
|
||||
for _, dbuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} {
|
||||
for i := 0; i < 32; i++ {
|
||||
s := "aaaa" + strings.Repeat("b", i) + "aaaabbbb"
|
||||
if err := roundtrip([]byte(s), ebuf, dbuf); err != nil {
|
||||
t.Errorf("len(ebuf)=%d, len(dbuf)=%d, i=%d: %v", len(ebuf), len(dbuf), i, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSmallRand(t *testing.T) {
|
||||
rand.Seed(27354294)
|
||||
for n := 1; n < 20000; n += 23 {
|
||||
b := make([]byte, n)
|
||||
for i, _ := range b {
|
||||
b[i] = uint8(rand.Uint32())
|
||||
}
|
||||
if err := roundtrip(b, nil, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSmallRegular(t *testing.T) {
|
||||
for n := 1; n < 20000; n += 23 {
|
||||
b := make([]byte, n)
|
||||
for i, _ := range b {
|
||||
b[i] = uint8(i%10 + 'a')
|
||||
}
|
||||
if err := roundtrip(b, nil, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func benchDecode(b *testing.B, src []byte) {
|
||||
encoded, err := Encode(nil, src)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
// Bandwidth is in amount of uncompressed data.
|
||||
b.SetBytes(int64(len(src)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
Decode(src, encoded)
|
||||
}
|
||||
}
|
||||
|
||||
func benchEncode(b *testing.B, src []byte) {
|
||||
// Bandwidth is in amount of uncompressed data.
|
||||
b.SetBytes(int64(len(src)))
|
||||
dst := make([]byte, MaxEncodedLen(len(src)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
Encode(dst, src)
|
||||
}
|
||||
}
|
||||
|
||||
func readFile(b *testing.B, filename string) []byte {
|
||||
src, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
b.Fatalf("failed reading %s: %s", filename, err)
|
||||
}
|
||||
if len(src) == 0 {
|
||||
b.Fatalf("%s has zero length", filename)
|
||||
}
|
||||
return src
|
||||
}
|
||||
|
||||
// expand returns a slice of length n containing repeated copies of src.
|
||||
func expand(src []byte, n int) []byte {
|
||||
dst := make([]byte, n)
|
||||
for x := dst; len(x) > 0; {
|
||||
i := copy(x, src)
|
||||
x = x[i:]
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
func benchWords(b *testing.B, n int, decode bool) {
|
||||
// Note: the file is OS-language dependent so the resulting values are not
|
||||
// directly comparable for non-US-English OS installations.
|
||||
data := expand(readFile(b, "/usr/share/dict/words"), n)
|
||||
if decode {
|
||||
benchDecode(b, data)
|
||||
} else {
|
||||
benchEncode(b, data)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkWordsDecode1e3(b *testing.B) { benchWords(b, 1e3, true) }
|
||||
func BenchmarkWordsDecode1e4(b *testing.B) { benchWords(b, 1e4, true) }
|
||||
func BenchmarkWordsDecode1e5(b *testing.B) { benchWords(b, 1e5, true) }
|
||||
func BenchmarkWordsDecode1e6(b *testing.B) { benchWords(b, 1e6, true) }
|
||||
func BenchmarkWordsEncode1e3(b *testing.B) { benchWords(b, 1e3, false) }
|
||||
func BenchmarkWordsEncode1e4(b *testing.B) { benchWords(b, 1e4, false) }
|
||||
func BenchmarkWordsEncode1e5(b *testing.B) { benchWords(b, 1e5, false) }
|
||||
func BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) }
|
||||
|
||||
// testFiles' values are copied directly from
|
||||
// https://code.google.com/p/snappy/source/browse/trunk/snappy_unittest.cc.
|
||||
// The label field is unused in snappy-go.
|
||||
var testFiles = []struct {
|
||||
label string
|
||||
filename string
|
||||
}{
|
||||
{"html", "html"},
|
||||
{"urls", "urls.10K"},
|
||||
{"jpg", "house.jpg"},
|
||||
{"pdf", "mapreduce-osdi-1.pdf"},
|
||||
{"html4", "html_x_4"},
|
||||
{"cp", "cp.html"},
|
||||
{"c", "fields.c"},
|
||||
{"lsp", "grammar.lsp"},
|
||||
{"xls", "kennedy.xls"},
|
||||
{"txt1", "alice29.txt"},
|
||||
{"txt2", "asyoulik.txt"},
|
||||
{"txt3", "lcet10.txt"},
|
||||
{"txt4", "plrabn12.txt"},
|
||||
{"bin", "ptt5"},
|
||||
{"sum", "sum"},
|
||||
{"man", "xargs.1"},
|
||||
{"pb", "geo.protodata"},
|
||||
{"gaviota", "kppkn.gtb"},
|
||||
}
|
||||
|
||||
// The test data files are present at this canonical URL.
|
||||
const baseURL = "https://snappy.googlecode.com/svn/trunk/testdata/"
|
||||
|
||||
func downloadTestdata(basename string) (errRet error) {
|
||||
filename := filepath.Join("testdata", basename)
|
||||
f, err := os.Create(filename)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create %s: %s", filename, err)
|
||||
}
|
||||
defer f.Close()
|
||||
defer func() {
|
||||
if errRet != nil {
|
||||
os.Remove(filename)
|
||||
}
|
||||
}()
|
||||
resp, err := http.Get(baseURL + basename)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to download %s: %s", baseURL+basename, err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
_, err = io.Copy(f, resp.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write %s: %s", filename, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func benchFile(b *testing.B, n int, decode bool) {
|
||||
filename := filepath.Join("testdata", testFiles[n].filename)
|
||||
if stat, err := os.Stat(filename); err != nil || stat.Size() == 0 {
|
||||
if !*download {
|
||||
b.Fatal("test data not found; skipping benchmark without the -download flag")
|
||||
}
|
||||
// Download the official snappy C++ implementation reference test data
|
||||
// files for benchmarking.
|
||||
if err := os.Mkdir("testdata", 0777); err != nil && !os.IsExist(err) {
|
||||
b.Fatalf("failed to create testdata: %s", err)
|
||||
}
|
||||
for _, tf := range testFiles {
|
||||
if err := downloadTestdata(tf.filename); err != nil {
|
||||
b.Fatalf("failed to download testdata: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
data := readFile(b, filename)
|
||||
if decode {
|
||||
benchDecode(b, data)
|
||||
} else {
|
||||
benchEncode(b, data)
|
||||
}
|
||||
}
|
||||
|
||||
// Naming convention is kept similar to what snappy's C++ implementation uses.
|
||||
func Benchmark_UFlat0(b *testing.B) { benchFile(b, 0, true) }
|
||||
func Benchmark_UFlat1(b *testing.B) { benchFile(b, 1, true) }
|
||||
func Benchmark_UFlat2(b *testing.B) { benchFile(b, 2, true) }
|
||||
func Benchmark_UFlat3(b *testing.B) { benchFile(b, 3, true) }
|
||||
func Benchmark_UFlat4(b *testing.B) { benchFile(b, 4, true) }
|
||||
func Benchmark_UFlat5(b *testing.B) { benchFile(b, 5, true) }
|
||||
func Benchmark_UFlat6(b *testing.B) { benchFile(b, 6, true) }
|
||||
func Benchmark_UFlat7(b *testing.B) { benchFile(b, 7, true) }
|
||||
func Benchmark_UFlat8(b *testing.B) { benchFile(b, 8, true) }
|
||||
func Benchmark_UFlat9(b *testing.B) { benchFile(b, 9, true) }
|
||||
func Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) }
|
||||
func Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) }
|
||||
func Benchmark_UFlat12(b *testing.B) { benchFile(b, 12, true) }
|
||||
func Benchmark_UFlat13(b *testing.B) { benchFile(b, 13, true) }
|
||||
func Benchmark_UFlat14(b *testing.B) { benchFile(b, 14, true) }
|
||||
func Benchmark_UFlat15(b *testing.B) { benchFile(b, 15, true) }
|
||||
func Benchmark_UFlat16(b *testing.B) { benchFile(b, 16, true) }
|
||||
func Benchmark_UFlat17(b *testing.B) { benchFile(b, 17, true) }
|
||||
func Benchmark_ZFlat0(b *testing.B) { benchFile(b, 0, false) }
|
||||
func Benchmark_ZFlat1(b *testing.B) { benchFile(b, 1, false) }
|
||||
func Benchmark_ZFlat2(b *testing.B) { benchFile(b, 2, false) }
|
||||
func Benchmark_ZFlat3(b *testing.B) { benchFile(b, 3, false) }
|
||||
func Benchmark_ZFlat4(b *testing.B) { benchFile(b, 4, false) }
|
||||
func Benchmark_ZFlat5(b *testing.B) { benchFile(b, 5, false) }
|
||||
func Benchmark_ZFlat6(b *testing.B) { benchFile(b, 6, false) }
|
||||
func Benchmark_ZFlat7(b *testing.B) { benchFile(b, 7, false) }
|
||||
func Benchmark_ZFlat8(b *testing.B) { benchFile(b, 8, false) }
|
||||
func Benchmark_ZFlat9(b *testing.B) { benchFile(b, 9, false) }
|
||||
func Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) }
|
||||
func Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) }
|
||||
func Benchmark_ZFlat12(b *testing.B) { benchFile(b, 12, false) }
|
||||
func Benchmark_ZFlat13(b *testing.B) { benchFile(b, 13, false) }
|
||||
func Benchmark_ZFlat14(b *testing.B) { benchFile(b, 14, false) }
|
||||
func Benchmark_ZFlat15(b *testing.B) { benchFile(b, 15, false) }
|
||||
func Benchmark_ZFlat16(b *testing.B) { benchFile(b, 16, false) }
|
||||
func Benchmark_ZFlat17(b *testing.B) { benchFile(b, 17, false) }
|
||||
1
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/.gitignore
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/.gitignore
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
/lz4-example/lz4-example
|
||||
7
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/.travis.yml
generated
vendored
Normal file
7
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.1
|
||||
- 1.2
|
||||
- 1.3
|
||||
- tip
|
||||
71
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/README.md
generated
vendored
Normal file
71
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/README.md
generated
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
go-lz4
|
||||
======
|
||||
|
||||
go-lz4 is port of LZ4 lossless compression algorithm to Go. The original C code
|
||||
is located at:
|
||||
|
||||
https://code.google.com/p/lz4/
|
||||
|
||||
Status
|
||||
------
|
||||
[](http://travis-ci.org/bkaradzic/go-lz4)
|
||||
[](https://godoc.org/github.com/bkaradzic/go-lz4)
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
go get github.com/bkaradzic/go-lz4
|
||||
|
||||
import "github.com/bkaradzic/go-lz4"
|
||||
|
||||
The package name is `lz4`
|
||||
|
||||
Notes
|
||||
-----
|
||||
|
||||
* go-lz4 saves a uint32 with the original uncompressed length at the beginning
|
||||
of the encoded buffer. They may get in the way of interoperability with
|
||||
other implementations.
|
||||
|
||||
Contributors
|
||||
------------
|
||||
|
||||
Damian Gryski ([@dgryski](https://github.com/dgryski))
|
||||
Dustin Sallings ([@dustin](https://github.com/dustin))
|
||||
|
||||
Contact
|
||||
-------
|
||||
|
||||
[@bkaradzic](https://twitter.com/bkaradzic)
|
||||
http://www.stuckingeometry.com
|
||||
|
||||
Project page
|
||||
https://github.com/bkaradzic/go-lz4
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
Copyright 2011-2012 Branimir Karadzic. All rights reserved.
|
||||
Copyright 2013 Damian Gryski. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||
THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
63
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/lz4_test.go
generated
vendored
Normal file
63
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/lz4_test.go
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
package lz4
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var testfile, _ = ioutil.ReadFile("testdata/pg1661.txt")
|
||||
|
||||
func roundtrip(t *testing.T, input []byte) {
|
||||
|
||||
dst, err := Encode(nil, input)
|
||||
if err != nil {
|
||||
t.Errorf("got error during compression: %s", err)
|
||||
}
|
||||
|
||||
output, err := Decode(nil, dst)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("got error during decompress: %s", err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(output, input) {
|
||||
t.Errorf("roundtrip failed")
|
||||
}
|
||||
}
|
||||
|
||||
func TestEmpty(t *testing.T) {
|
||||
roundtrip(t, nil)
|
||||
}
|
||||
|
||||
func TestLengths(t *testing.T) {
|
||||
|
||||
for i := 0; i < 1024; i++ {
|
||||
roundtrip(t, testfile[:i])
|
||||
}
|
||||
|
||||
for i := 1024; i < 4096; i += 23 {
|
||||
roundtrip(t, testfile[:i])
|
||||
}
|
||||
}
|
||||
|
||||
func TestWords(t *testing.T) {
|
||||
roundtrip(t, testfile)
|
||||
}
|
||||
|
||||
func BenchmarkLZ4Encode(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
Encode(nil, testfile)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLZ4Decode(b *testing.B) {
|
||||
|
||||
var compressed, _ = Encode(nil, testfile)
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
Decode(nil, compressed)
|
||||
}
|
||||
}
|
||||
@@ -1,199 +1,194 @@
|
||||
/*
|
||||
* Copyright 2011-2012 Branimir Karadzic. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice, this
|
||||
* list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
* SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||
* THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package lz4
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrCorrupt indicates the input was corrupt
|
||||
ErrCorrupt = errors.New("corrupt input")
|
||||
)
|
||||
|
||||
const (
|
||||
mlBits = 4
|
||||
mlMask = (1 << mlBits) - 1
|
||||
runBits = 8 - mlBits
|
||||
runMask = (1 << runBits) - 1
|
||||
)
|
||||
|
||||
type decoder struct {
|
||||
src []byte
|
||||
dst []byte
|
||||
spos uint32
|
||||
dpos uint32
|
||||
ref uint32
|
||||
}
|
||||
|
||||
func (d *decoder) readByte() (uint8, error) {
|
||||
if int(d.spos) == len(d.src) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
b := d.src[d.spos]
|
||||
d.spos++
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (d *decoder) getLen() (uint32, error) {
|
||||
|
||||
length := uint32(0)
|
||||
ln, err := d.readByte()
|
||||
if err != nil {
|
||||
return 0, ErrCorrupt
|
||||
}
|
||||
for ln == 255 {
|
||||
length += 255
|
||||
ln, err = d.readByte()
|
||||
if err != nil {
|
||||
return 0, ErrCorrupt
|
||||
}
|
||||
}
|
||||
length += uint32(ln)
|
||||
|
||||
return length, nil
|
||||
}
|
||||
|
||||
func (d *decoder) cp(length, decr uint32) {
|
||||
|
||||
if int(d.ref+length) < int(d.dpos) {
|
||||
copy(d.dst[d.dpos:], d.dst[d.ref:d.ref+length])
|
||||
} else {
|
||||
for ii := uint32(0); ii < length; ii++ {
|
||||
d.dst[d.dpos+ii] = d.dst[d.ref+ii]
|
||||
}
|
||||
}
|
||||
d.dpos += length
|
||||
d.ref += length - decr
|
||||
}
|
||||
|
||||
func (d *decoder) finish(err error) error {
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Decode returns the decoded form of src. The returned slice may be a
|
||||
// subslice of dst if it was large enough to hold the entire decoded block.
|
||||
func Decode(dst, src []byte) ([]byte, error) {
|
||||
|
||||
if len(src) < 4 {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
uncompressedLen := binary.LittleEndian.Uint32(src)
|
||||
|
||||
if uncompressedLen == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if uncompressedLen > MaxInputSize {
|
||||
return nil, ErrTooLarge
|
||||
}
|
||||
|
||||
if dst == nil || len(dst) < int(uncompressedLen) {
|
||||
dst = make([]byte, uncompressedLen)
|
||||
}
|
||||
|
||||
d := decoder{src: src, dst: dst[:uncompressedLen], spos: 4}
|
||||
|
||||
decr := []uint32{0, 3, 2, 3}
|
||||
|
||||
for {
|
||||
code, err := d.readByte()
|
||||
if err != nil {
|
||||
return d.dst, d.finish(err)
|
||||
}
|
||||
|
||||
length := uint32(code >> mlBits)
|
||||
if length == runMask {
|
||||
ln, err := d.getLen()
|
||||
if err != nil {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
length += ln
|
||||
}
|
||||
|
||||
if int(d.spos+length) > len(d.src) || int(d.dpos+length) > len(d.dst) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
for ii := uint32(0); ii < length; ii++ {
|
||||
d.dst[d.dpos+ii] = d.src[d.spos+ii]
|
||||
}
|
||||
|
||||
d.spos += length
|
||||
d.dpos += length
|
||||
|
||||
if int(d.spos) == len(d.src) {
|
||||
return d.dst, nil
|
||||
}
|
||||
|
||||
if int(d.spos+2) >= len(d.src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
back := uint32(d.src[d.spos]) | uint32(d.src[d.spos+1])<<8
|
||||
|
||||
if back > d.dpos {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
d.spos += 2
|
||||
d.ref = d.dpos - back
|
||||
|
||||
length = uint32(code & mlMask)
|
||||
if length == mlMask {
|
||||
ln, err := d.getLen()
|
||||
if err != nil {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
length += ln
|
||||
}
|
||||
|
||||
literal := d.dpos - d.ref
|
||||
|
||||
if literal < 4 {
|
||||
if int(d.dpos+4) > len(d.dst) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
d.cp(4, decr[literal])
|
||||
} else {
|
||||
length += 4
|
||||
}
|
||||
|
||||
if d.dpos+length > uncompressedLen {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
d.cp(length, 0)
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Copyright 2011-2012 Branimir Karadzic. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice, this
|
||||
* list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
* SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||
* THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package lz4
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrCorrupt indicates the input was corrupt
|
||||
ErrCorrupt = errors.New("corrupt input")
|
||||
)
|
||||
|
||||
const (
|
||||
mlBits = 4
|
||||
mlMask = (1 << mlBits) - 1
|
||||
runBits = 8 - mlBits
|
||||
runMask = (1 << runBits) - 1
|
||||
)
|
||||
|
||||
type decoder struct {
|
||||
src []byte
|
||||
dst []byte
|
||||
spos uint32
|
||||
dpos uint32
|
||||
ref uint32
|
||||
}
|
||||
|
||||
func (d *decoder) readByte() (uint8, error) {
|
||||
if int(d.spos) == len(d.src) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
b := d.src[d.spos]
|
||||
d.spos++
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (d *decoder) getLen() (uint32, error) {
|
||||
|
||||
length := uint32(0)
|
||||
ln, err := d.readByte()
|
||||
if err != nil {
|
||||
return 0, ErrCorrupt
|
||||
}
|
||||
for ln == 255 {
|
||||
length += 255
|
||||
ln, err = d.readByte()
|
||||
if err != nil {
|
||||
return 0, ErrCorrupt
|
||||
}
|
||||
}
|
||||
length += uint32(ln)
|
||||
|
||||
return length, nil
|
||||
}
|
||||
|
||||
func (d *decoder) cp(length, decr uint32) {
|
||||
|
||||
if int(d.ref+length) < int(d.dpos) {
|
||||
copy(d.dst[d.dpos:], d.dst[d.ref:d.ref+length])
|
||||
} else {
|
||||
for ii := uint32(0); ii < length; ii++ {
|
||||
d.dst[d.dpos+ii] = d.dst[d.ref+ii]
|
||||
}
|
||||
}
|
||||
d.dpos += length
|
||||
d.ref += length - decr
|
||||
}
|
||||
|
||||
func (d *decoder) finish(err error) error {
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Decode returns the decoded form of src. The returned slice may be a
|
||||
// subslice of dst if it was large enough to hold the entire decoded block.
|
||||
func Decode(dst, src []byte) ([]byte, error) {
|
||||
|
||||
if len(src) < 4 {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
uncompressedLen := binary.LittleEndian.Uint32(src)
|
||||
|
||||
if uncompressedLen == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if uncompressedLen > MaxInputSize {
|
||||
return nil, ErrTooLarge
|
||||
}
|
||||
|
||||
if dst == nil || len(dst) < int(uncompressedLen) {
|
||||
dst = make([]byte, uncompressedLen)
|
||||
}
|
||||
|
||||
d := decoder{src: src, dst: dst[:uncompressedLen], spos: 4}
|
||||
|
||||
decr := []uint32{0, 3, 2, 3}
|
||||
|
||||
for {
|
||||
code, err := d.readByte()
|
||||
if err != nil {
|
||||
return d.dst, d.finish(err)
|
||||
}
|
||||
|
||||
length := uint32(code >> mlBits)
|
||||
if length == runMask {
|
||||
ln, err := d.getLen()
|
||||
if err != nil {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
length += ln
|
||||
}
|
||||
|
||||
if int(d.spos+length) > len(d.src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
for ii := uint32(0); ii < length; ii++ {
|
||||
d.dst[d.dpos+ii] = d.src[d.spos+ii]
|
||||
}
|
||||
|
||||
d.spos += length
|
||||
d.dpos += length
|
||||
|
||||
if int(d.spos) == len(d.src) {
|
||||
return d.dst, nil
|
||||
}
|
||||
|
||||
if int(d.spos+2) >= len(d.src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
back := uint32(d.src[d.spos]) | uint32(d.src[d.spos+1])<<8
|
||||
|
||||
if back > d.dpos {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
d.spos += 2
|
||||
d.ref = d.dpos - back
|
||||
|
||||
length = uint32(code & mlMask)
|
||||
if length == mlMask {
|
||||
ln, err := d.getLen()
|
||||
if err != nil {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
length += ln
|
||||
}
|
||||
|
||||
literal := d.dpos - d.ref
|
||||
if literal < 4 {
|
||||
d.cp(4, decr[literal])
|
||||
} else {
|
||||
length += 4
|
||||
}
|
||||
|
||||
if d.dpos+length > uncompressedLen {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
d.cp(length, 0)
|
||||
}
|
||||
}
|
||||
13052
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/testdata/pg1661.txt
generated
vendored
Normal file
13052
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/testdata/pg1661.txt
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,190 +1,188 @@
|
||||
/*
|
||||
* Copyright 2011-2012 Branimir Karadzic. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice, this
|
||||
* list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
* SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||
* THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package lz4
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
)
|
||||
|
||||
const (
|
||||
minMatch = 4
|
||||
hashLog = 17
|
||||
hashTableSize = 1 << hashLog
|
||||
hashShift = (minMatch * 8) - hashLog
|
||||
incompressible uint32 = 128
|
||||
uninitHash = 0x88888888
|
||||
|
||||
// MaxInputSize is the largest buffer than can be compressed in a single block
|
||||
MaxInputSize = 0x7E000000
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrTooLarge indicates the input buffer was too large
|
||||
ErrTooLarge = errors.New("input too large")
|
||||
)
|
||||
|
||||
type encoder struct {
|
||||
src []byte
|
||||
dst []byte
|
||||
hashTable []uint32
|
||||
pos uint32
|
||||
anchor uint32
|
||||
dpos uint32
|
||||
}
|
||||
|
||||
// CompressBound returns the maximum length of a lz4 block, given it's uncompressed length
|
||||
func CompressBound(isize int) int {
|
||||
if isize > MaxInputSize {
|
||||
return 0
|
||||
}
|
||||
return isize + ((isize) / 255) + 16 + 4
|
||||
}
|
||||
|
||||
func (e *encoder) writeLiterals(length, mlLen, pos uint32) {
|
||||
|
||||
ln := length
|
||||
|
||||
var code byte
|
||||
if ln > runMask-1 {
|
||||
code = runMask
|
||||
} else {
|
||||
code = byte(ln)
|
||||
}
|
||||
|
||||
if mlLen > mlMask-1 {
|
||||
e.dst[e.dpos] = (code << mlBits) + byte(mlMask)
|
||||
} else {
|
||||
e.dst[e.dpos] = (code << mlBits) + byte(mlLen)
|
||||
}
|
||||
e.dpos++
|
||||
|
||||
if code == runMask {
|
||||
ln -= runMask
|
||||
for ; ln > 254; ln -= 255 {
|
||||
e.dst[e.dpos] = 255
|
||||
e.dpos++
|
||||
}
|
||||
|
||||
e.dst[e.dpos] = byte(ln)
|
||||
e.dpos++
|
||||
}
|
||||
|
||||
for ii := uint32(0); ii < length; ii++ {
|
||||
e.dst[e.dpos+ii] = e.src[pos+ii]
|
||||
}
|
||||
|
||||
e.dpos += length
|
||||
}
|
||||
|
||||
// Encode returns the encoded form of src. The returned array may be a
|
||||
// sub-slice of dst if it was large enough to hold the entire output.
|
||||
func Encode(dst, src []byte) ([]byte, error) {
|
||||
|
||||
if len(src) >= MaxInputSize {
|
||||
return nil, ErrTooLarge
|
||||
}
|
||||
|
||||
if n := CompressBound(len(src)); len(dst) < n {
|
||||
dst = make([]byte, n)
|
||||
}
|
||||
|
||||
e := encoder{src: src, dst: dst, hashTable: make([]uint32, hashTableSize)}
|
||||
|
||||
binary.LittleEndian.PutUint32(dst, uint32(len(src)))
|
||||
e.dpos = 4
|
||||
|
||||
var (
|
||||
step uint32 = 1
|
||||
limit = incompressible
|
||||
)
|
||||
|
||||
for {
|
||||
if int(e.pos)+12 >= len(e.src) {
|
||||
e.writeLiterals(uint32(len(e.src))-e.anchor, 0, e.anchor)
|
||||
return e.dst[:e.dpos], nil
|
||||
}
|
||||
|
||||
sequence := uint32(e.src[e.pos+3])<<24 | uint32(e.src[e.pos+2])<<16 | uint32(e.src[e.pos+1])<<8 | uint32(e.src[e.pos+0])
|
||||
|
||||
hash := (sequence * 2654435761) >> hashShift
|
||||
ref := e.hashTable[hash] + uninitHash
|
||||
e.hashTable[hash] = e.pos - uninitHash
|
||||
|
||||
if ((e.pos-ref)>>16) != 0 || uint32(e.src[ref+3])<<24|uint32(e.src[ref+2])<<16|uint32(e.src[ref+1])<<8|uint32(e.src[ref+0]) != sequence {
|
||||
if e.pos-e.anchor > limit {
|
||||
limit <<= 1
|
||||
step += 1 + (step >> 2)
|
||||
}
|
||||
e.pos += step
|
||||
continue
|
||||
}
|
||||
|
||||
if step > 1 {
|
||||
e.hashTable[hash] = ref - uninitHash
|
||||
e.pos -= step - 1
|
||||
step = 1
|
||||
continue
|
||||
}
|
||||
limit = incompressible
|
||||
|
||||
ln := e.pos - e.anchor
|
||||
back := e.pos - ref
|
||||
|
||||
anchor := e.anchor
|
||||
|
||||
e.pos += minMatch
|
||||
ref += minMatch
|
||||
e.anchor = e.pos
|
||||
|
||||
for int(e.pos) < len(e.src)-5 && e.src[e.pos] == e.src[ref] {
|
||||
e.pos++
|
||||
ref++
|
||||
}
|
||||
|
||||
mlLen := e.pos - e.anchor
|
||||
|
||||
e.writeLiterals(ln, mlLen, anchor)
|
||||
e.dst[e.dpos] = uint8(back)
|
||||
e.dst[e.dpos+1] = uint8(back >> 8)
|
||||
e.dpos += 2
|
||||
|
||||
if mlLen > mlMask-1 {
|
||||
mlLen -= mlMask
|
||||
for mlLen > 254 {
|
||||
mlLen -= 255
|
||||
|
||||
e.dst[e.dpos] = 255
|
||||
e.dpos++
|
||||
}
|
||||
|
||||
e.dst[e.dpos] = byte(mlLen)
|
||||
e.dpos++
|
||||
}
|
||||
|
||||
e.anchor = e.pos
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Copyright 2011-2012 Branimir Karadzic. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice, this
|
||||
* list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
* SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||
* THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package lz4
|
||||
|
||||
import "encoding/binary"
|
||||
import "errors"
|
||||
|
||||
const (
|
||||
minMatch = 4
|
||||
hashLog = 17
|
||||
hashTableSize = 1 << hashLog
|
||||
hashShift = (minMatch * 8) - hashLog
|
||||
incompressible uint32 = 128
|
||||
uninitHash = 0x88888888
|
||||
|
||||
// MaxInputSize is the largest buffer than can be compressed in a single block
|
||||
MaxInputSize = 0x7E000000
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrTooLarge indicates the input buffer was too large
|
||||
ErrTooLarge = errors.New("input too large")
|
||||
)
|
||||
|
||||
type encoder struct {
|
||||
src []byte
|
||||
dst []byte
|
||||
hashTable []uint32
|
||||
pos uint32
|
||||
anchor uint32
|
||||
dpos uint32
|
||||
}
|
||||
|
||||
// CompressBound returns the maximum length of a lz4 block, given it's uncompressed length
|
||||
func CompressBound(isize int) int {
|
||||
if isize > MaxInputSize {
|
||||
return 0
|
||||
}
|
||||
return isize + ((isize) / 255) + 16 + 4
|
||||
}
|
||||
|
||||
func (e *encoder) writeLiterals(length, mlLen, pos uint32) {
|
||||
|
||||
ln := length
|
||||
|
||||
var code byte
|
||||
if ln > runMask-1 {
|
||||
code = runMask
|
||||
} else {
|
||||
code = byte(ln)
|
||||
}
|
||||
|
||||
if mlLen > mlMask-1 {
|
||||
e.dst[e.dpos] = (code << mlBits) + byte(mlMask)
|
||||
} else {
|
||||
e.dst[e.dpos] = (code << mlBits) + byte(mlLen)
|
||||
}
|
||||
e.dpos++
|
||||
|
||||
if code == runMask {
|
||||
ln -= runMask
|
||||
for ; ln > 254; ln -= 255 {
|
||||
e.dst[e.dpos] = 255
|
||||
e.dpos++
|
||||
}
|
||||
|
||||
e.dst[e.dpos] = byte(ln)
|
||||
e.dpos++
|
||||
}
|
||||
|
||||
for ii := uint32(0); ii < length; ii++ {
|
||||
e.dst[e.dpos+ii] = e.src[pos+ii]
|
||||
}
|
||||
|
||||
e.dpos += length
|
||||
}
|
||||
|
||||
// Encode returns the encoded form of src. The returned array may be a
|
||||
// sub-slice of dst if it was large enough to hold the entire output.
|
||||
func Encode(dst, src []byte) ([]byte, error) {
|
||||
|
||||
if len(src) >= MaxInputSize {
|
||||
return nil, ErrTooLarge
|
||||
}
|
||||
|
||||
if n := CompressBound(len(src)); len(dst) < n {
|
||||
dst = make([]byte, n)
|
||||
}
|
||||
|
||||
e := encoder{src: src, dst: dst, hashTable: make([]uint32, hashTableSize)}
|
||||
|
||||
binary.LittleEndian.PutUint32(dst, uint32(len(src)))
|
||||
e.dpos = 4
|
||||
|
||||
var (
|
||||
step uint32 = 1
|
||||
limit = incompressible
|
||||
)
|
||||
|
||||
for {
|
||||
if int(e.pos)+12 >= len(e.src) {
|
||||
e.writeLiterals(uint32(len(e.src))-e.anchor, 0, e.anchor)
|
||||
return e.dst[:e.dpos], nil
|
||||
}
|
||||
|
||||
sequence := uint32(e.src[e.pos+3])<<24 | uint32(e.src[e.pos+2])<<16 | uint32(e.src[e.pos+1])<<8 | uint32(e.src[e.pos+0])
|
||||
|
||||
hash := (sequence * 2654435761) >> hashShift
|
||||
ref := e.hashTable[hash] + uninitHash
|
||||
e.hashTable[hash] = e.pos - uninitHash
|
||||
|
||||
if ((e.pos-ref)>>16) != 0 || uint32(e.src[ref+3])<<24|uint32(e.src[ref+2])<<16|uint32(e.src[ref+1])<<8|uint32(e.src[ref+0]) != sequence {
|
||||
if e.pos-e.anchor > limit {
|
||||
limit <<= 1
|
||||
step += 1 + (step >> 2)
|
||||
}
|
||||
e.pos += step
|
||||
continue
|
||||
}
|
||||
|
||||
if step > 1 {
|
||||
e.hashTable[hash] = ref - uninitHash
|
||||
e.pos -= step - 1
|
||||
step = 1
|
||||
continue
|
||||
}
|
||||
limit = incompressible
|
||||
|
||||
ln := e.pos - e.anchor
|
||||
back := e.pos - ref
|
||||
|
||||
anchor := e.anchor
|
||||
|
||||
e.pos += minMatch
|
||||
ref += minMatch
|
||||
e.anchor = e.pos
|
||||
|
||||
for int(e.pos) < len(e.src)-5 && e.src[e.pos] == e.src[ref] {
|
||||
e.pos++
|
||||
ref++
|
||||
}
|
||||
|
||||
mlLen := e.pos - e.anchor
|
||||
|
||||
e.writeLiterals(ln, mlLen, anchor)
|
||||
e.dst[e.dpos] = uint8(back)
|
||||
e.dst[e.dpos+1] = uint8(back >> 8)
|
||||
e.dpos += 2
|
||||
|
||||
if mlLen > mlMask-1 {
|
||||
mlLen -= mlMask
|
||||
for mlLen > 254 {
|
||||
mlLen -= 255
|
||||
|
||||
e.dst[e.dpos] = 255
|
||||
e.dpos++
|
||||
}
|
||||
|
||||
e.dst[e.dpos] = byte(mlLen)
|
||||
e.dpos++
|
||||
}
|
||||
|
||||
e.anchor = e.pos
|
||||
}
|
||||
}
|
||||
1
Godeps/_workspace/src/github.com/calmh/xdr/.gitignore
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/calmh/xdr/.gitignore
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
coverage.out
|
||||
19
Godeps/_workspace/src/github.com/calmh/xdr/.travis.yml
generated
vendored
Normal file
19
Godeps/_workspace/src/github.com/calmh/xdr/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
language: go
|
||||
go:
|
||||
- tip
|
||||
|
||||
install:
|
||||
- export PATH=$PATH:$HOME/gopath/bin
|
||||
- go get code.google.com/p/go.tools/cmd/cover
|
||||
- go get github.com/mattn/goveralls
|
||||
|
||||
script:
|
||||
- ./generate.sh
|
||||
- go test -coverprofile=coverage.out
|
||||
|
||||
after_success:
|
||||
- goveralls -coverprofile=coverage.out -service=travis-ci -package=calmh/xdr -repotoken="$COVERALLS_TOKEN"
|
||||
|
||||
env:
|
||||
global:
|
||||
secure: SmgnrGfp2zLrA44ChRMpjPeujubt9veZ8Fx/OseMWECmacyV5N/TuDhzIbwo6QwV4xB0sBacoPzvxQbJRVjNKsPiSu72UbcQmQ7flN4Tf7nW09tSh1iW8NgrpBCq/3UYLoBu2iPBEBKm93IK0aGNAKs6oEkB0fU27iTVBwiTXOY=
|
||||
12
Godeps/_workspace/src/github.com/calmh/xdr/README.md
generated
vendored
Normal file
12
Godeps/_workspace/src/github.com/calmh/xdr/README.md
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
xdr
|
||||
===
|
||||
|
||||
[](https://travis-ci.org/calmh/xdr)
|
||||
[](https://coveralls.io/r/calmh/xdr?branch=master)
|
||||
[](http://godoc.org/github.com/calmh/xdr)
|
||||
[](http://opensource.org/licenses/MIT)
|
||||
|
||||
This is an XDR encoding/decoding library. It uses code generation and
|
||||
not reflection. It supports the IPDR bastardized XDR format when built
|
||||
with `-tags ipdr`.
|
||||
|
||||
113
Godeps/_workspace/src/github.com/calmh/xdr/bench_test.go
generated
vendored
Normal file
113
Godeps/_workspace/src/github.com/calmh/xdr/bench_test.go
generated
vendored
Normal file
@@ -0,0 +1,113 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
package xdr_test
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/calmh/xdr"
|
||||
)
|
||||
|
||||
type XDRBenchStruct struct {
|
||||
I1 uint64
|
||||
I2 uint32
|
||||
I3 uint16
|
||||
I4 uint8
|
||||
Bs0 []byte // max:128
|
||||
Bs1 []byte
|
||||
S0 string // max:128
|
||||
S1 string
|
||||
}
|
||||
|
||||
var res []byte // no to be optimized away
|
||||
var s = XDRBenchStruct{
|
||||
I1: 42,
|
||||
I2: 43,
|
||||
I3: 44,
|
||||
I4: 45,
|
||||
Bs0: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18},
|
||||
Bs1: []byte{11, 12, 13, 14, 15, 16, 17, 18, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
|
||||
S0: "Hello World! String one.",
|
||||
S1: "Hello World! String two.",
|
||||
}
|
||||
var e = s.MarshalXDR()
|
||||
|
||||
func BenchmarkThisMarshal(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
res = s.MarshalXDR()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkThisUnmarshal(b *testing.B) {
|
||||
var t XDRBenchStruct
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := t.UnmarshalXDR(e)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkThisEncode(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := s.EncodeXDR(ioutil.Discard)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkThisEncoder(b *testing.B) {
|
||||
w := xdr.NewWriter(ioutil.Discard)
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := s.encodeXDR(w)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type repeatReader struct {
|
||||
data []byte
|
||||
}
|
||||
|
||||
func (r *repeatReader) Read(bs []byte) (n int, err error) {
|
||||
if len(bs) > len(r.data) {
|
||||
err = io.EOF
|
||||
}
|
||||
n = copy(bs, r.data)
|
||||
r.data = r.data[n:]
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (r *repeatReader) Reset(bs []byte) {
|
||||
r.data = bs
|
||||
}
|
||||
|
||||
func BenchmarkThisDecode(b *testing.B) {
|
||||
rr := &repeatReader{e}
|
||||
var t XDRBenchStruct
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := t.DecodeXDR(rr)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
rr.Reset(e)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkThisDecoder(b *testing.B) {
|
||||
rr := &repeatReader{e}
|
||||
r := xdr.NewReader(rr)
|
||||
var t XDRBenchStruct
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := t.decodeXDR(r)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
rr.Reset(e)
|
||||
}
|
||||
}
|
||||
183
Godeps/_workspace/src/github.com/calmh/xdr/bench_xdr_test.go
generated
vendored
Normal file
183
Godeps/_workspace/src/github.com/calmh/xdr/bench_xdr_test.go
generated
vendored
Normal file
@@ -0,0 +1,183 @@
|
||||
// ************************************************************
|
||||
// This file is automatically generated by genxdr. Do not edit.
|
||||
// ************************************************************
|
||||
|
||||
package xdr_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
|
||||
"github.com/calmh/xdr"
|
||||
)
|
||||
|
||||
/*
|
||||
|
||||
XDRBenchStruct Structure:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
+ I1 (64 bits) +
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| I2 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| 0x0000 | I3 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| uint8 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of Bs0 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Bs0 (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of Bs1 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Bs1 (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of S0 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ S0 (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of S1 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ S1 (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
struct XDRBenchStruct {
|
||||
unsigned hyper I1;
|
||||
unsigned int I2;
|
||||
unsigned int I3;
|
||||
uint8 I4;
|
||||
opaque Bs0<128>;
|
||||
opaque Bs1<>;
|
||||
string S0<128>;
|
||||
string S1<>;
|
||||
}
|
||||
|
||||
*/
|
||||
|
||||
func (o XDRBenchStruct) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o XDRBenchStruct) MarshalXDR() []byte {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}
|
||||
|
||||
func (o XDRBenchStruct) AppendXDR(bs []byte) []byte {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
o.encodeXDR(xw)
|
||||
return []byte(aw)
|
||||
}
|
||||
|
||||
func (o XDRBenchStruct) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
xw.WriteUint64(o.I1)
|
||||
xw.WriteUint32(o.I2)
|
||||
xw.WriteUint16(o.I3)
|
||||
xw.WriteUint8(o.I4)
|
||||
if len(o.Bs0) > 128 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
}
|
||||
xw.WriteBytes(o.Bs0)
|
||||
xw.WriteBytes(o.Bs1)
|
||||
if len(o.S0) > 128 {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
}
|
||||
xw.WriteString(o.S0)
|
||||
xw.WriteString(o.S1)
|
||||
return xw.Tot(), xw.Error()
|
||||
}
|
||||
|
||||
func (o *XDRBenchStruct) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *XDRBenchStruct) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *XDRBenchStruct) decodeXDR(xr *xdr.Reader) error {
|
||||
o.I1 = xr.ReadUint64()
|
||||
o.I2 = xr.ReadUint32()
|
||||
o.I3 = xr.ReadUint16()
|
||||
o.I4 = xr.ReadUint8()
|
||||
o.Bs0 = xr.ReadBytesMax(128)
|
||||
o.Bs1 = xr.ReadBytes()
|
||||
o.S0 = xr.ReadStringMax(128)
|
||||
o.S1 = xr.ReadString()
|
||||
return xr.Error()
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
repeatReader Structure:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of data |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ data (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
struct repeatReader {
|
||||
opaque data<>;
|
||||
}
|
||||
|
||||
*/
|
||||
|
||||
func (o repeatReader) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o repeatReader) MarshalXDR() []byte {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}
|
||||
|
||||
func (o repeatReader) AppendXDR(bs []byte) []byte {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
o.encodeXDR(xw)
|
||||
return []byte(aw)
|
||||
}
|
||||
|
||||
func (o repeatReader) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
xw.WriteBytes(o.data)
|
||||
return xw.Tot(), xw.Error()
|
||||
}
|
||||
|
||||
func (o *repeatReader) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *repeatReader) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *repeatReader) decodeXDR(xr *xdr.Reader) error {
|
||||
o.data = xr.ReadBytes()
|
||||
return xr.Error()
|
||||
}
|
||||
434
Godeps/_workspace/src/github.com/calmh/xdr/cmd/genxdr/main.go
generated
vendored
Normal file
434
Godeps/_workspace/src/github.com/calmh/xdr/cmd/genxdr/main.go
generated
vendored
Normal file
@@ -0,0 +1,434 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/format"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
type fieldInfo struct {
|
||||
Name string
|
||||
IsBasic bool // handled by one the native Read/WriteUint64 etc functions
|
||||
IsSlice bool // field is a slice of FieldType
|
||||
FieldType string // original type of field, i.e. "int"
|
||||
Encoder string // the encoder name, i.e. "Uint64" for Read/WriteUint64
|
||||
Convert string // what to convert to when encoding, i.e. "uint64"
|
||||
Max int // max size for slices and strings
|
||||
}
|
||||
|
||||
type structInfo struct {
|
||||
Name string
|
||||
Fields []fieldInfo
|
||||
}
|
||||
|
||||
var headerTpl = template.Must(template.New("header").Parse(`// ************************************************************
|
||||
// This file is automatically generated by genxdr. Do not edit.
|
||||
// ************************************************************
|
||||
|
||||
package {{.Package}}
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
|
||||
"github.com/calmh/xdr"
|
||||
)
|
||||
`))
|
||||
|
||||
var encodeTpl = template.Must(template.New("encoder").Parse(`
|
||||
func (o {{.TypeName}}) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
}//+n
|
||||
|
||||
func (o {{.TypeName}}) MarshalXDR() []byte {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}//+n
|
||||
|
||||
func (o {{.TypeName}}) AppendXDR(bs []byte) []byte {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
o.encodeXDR(xw)
|
||||
return []byte(aw)
|
||||
}//+n
|
||||
|
||||
func (o {{.TypeName}}) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
{{range $fieldInfo := .Fields}}
|
||||
{{if not $fieldInfo.IsSlice}}
|
||||
{{if ne $fieldInfo.Convert ""}}
|
||||
xw.Write{{$fieldInfo.Encoder}}({{$fieldInfo.Convert}}(o.{{$fieldInfo.Name}}))
|
||||
{{else if $fieldInfo.IsBasic}}
|
||||
{{if ge $fieldInfo.Max 1}}
|
||||
if len(o.{{$fieldInfo.Name}}) > {{$fieldInfo.Max}} {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
}
|
||||
{{end}}
|
||||
xw.Write{{$fieldInfo.Encoder}}(o.{{$fieldInfo.Name}})
|
||||
{{else}}
|
||||
_, err := o.{{$fieldInfo.Name}}.encodeXDR(xw)
|
||||
if err != nil {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
{{end}}
|
||||
{{else}}
|
||||
{{if ge $fieldInfo.Max 1}}
|
||||
if len(o.{{$fieldInfo.Name}}) > {{$fieldInfo.Max}} {
|
||||
return xw.Tot(), xdr.ErrElementSizeExceeded
|
||||
}
|
||||
{{end}}
|
||||
xw.WriteUint32(uint32(len(o.{{$fieldInfo.Name}})))
|
||||
for i := range o.{{$fieldInfo.Name}} {
|
||||
{{if ne $fieldInfo.Convert ""}}
|
||||
xw.Write{{$fieldInfo.Encoder}}({{$fieldInfo.Convert}}(o.{{$fieldInfo.Name}}[i]))
|
||||
{{else if $fieldInfo.IsBasic}}
|
||||
xw.Write{{$fieldInfo.Encoder}}(o.{{$fieldInfo.Name}}[i])
|
||||
{{else}}
|
||||
_, err := o.{{$fieldInfo.Name}}[i].encodeXDR(xw)
|
||||
if err != nil {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
{{end}}
|
||||
}
|
||||
{{end}}
|
||||
{{end}}
|
||||
return xw.Tot(), xw.Error()
|
||||
}//+n
|
||||
|
||||
func (o *{{.TypeName}}) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
}//+n
|
||||
|
||||
func (o *{{.TypeName}}) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.decodeXDR(xr)
|
||||
}//+n
|
||||
|
||||
func (o *{{.TypeName}}) decodeXDR(xr *xdr.Reader) error {
|
||||
{{range $fieldInfo := .Fields}}
|
||||
{{if not $fieldInfo.IsSlice}}
|
||||
{{if ne $fieldInfo.Convert ""}}
|
||||
o.{{$fieldInfo.Name}} = {{$fieldInfo.FieldType}}(xr.Read{{$fieldInfo.Encoder}}())
|
||||
{{else if $fieldInfo.IsBasic}}
|
||||
{{if ge $fieldInfo.Max 1}}
|
||||
o.{{$fieldInfo.Name}} = xr.Read{{$fieldInfo.Encoder}}Max({{$fieldInfo.Max}})
|
||||
{{else}}
|
||||
o.{{$fieldInfo.Name}} = xr.Read{{$fieldInfo.Encoder}}()
|
||||
{{end}}
|
||||
{{else}}
|
||||
(&o.{{$fieldInfo.Name}}).decodeXDR(xr)
|
||||
{{end}}
|
||||
{{else}}
|
||||
_{{$fieldInfo.Name}}Size := int(xr.ReadUint32())
|
||||
{{if ge $fieldInfo.Max 1}}
|
||||
if _{{$fieldInfo.Name}}Size > {{$fieldInfo.Max}} {
|
||||
return xdr.ErrElementSizeExceeded
|
||||
}
|
||||
{{end}}
|
||||
o.{{$fieldInfo.Name}} = make([]{{$fieldInfo.FieldType}}, _{{$fieldInfo.Name}}Size)
|
||||
for i := range o.{{$fieldInfo.Name}} {
|
||||
{{if ne $fieldInfo.Convert ""}}
|
||||
o.{{$fieldInfo.Name}}[i] = {{$fieldInfo.FieldType}}(xr.Read{{$fieldInfo.Encoder}}())
|
||||
{{else if $fieldInfo.IsBasic}}
|
||||
o.{{$fieldInfo.Name}}[i] = xr.Read{{$fieldInfo.Encoder}}()
|
||||
{{else}}
|
||||
(&o.{{$fieldInfo.Name}}[i]).decodeXDR(xr)
|
||||
{{end}}
|
||||
}
|
||||
{{end}}
|
||||
{{end}}
|
||||
return xr.Error()
|
||||
}`))
|
||||
|
||||
var maxRe = regexp.MustCompile(`\Wmax:(\d+)`)
|
||||
|
||||
type typeSet struct {
|
||||
Type string
|
||||
Encoder string
|
||||
}
|
||||
|
||||
var xdrEncoders = map[string]typeSet{
|
||||
"int8": typeSet{"uint8", "Uint8"},
|
||||
"uint8": typeSet{"", "Uint8"},
|
||||
"int16": typeSet{"uint16", "Uint16"},
|
||||
"uint16": typeSet{"", "Uint16"},
|
||||
"int32": typeSet{"uint32", "Uint32"},
|
||||
"uint32": typeSet{"", "Uint32"},
|
||||
"int64": typeSet{"uint64", "Uint64"},
|
||||
"uint64": typeSet{"", "Uint64"},
|
||||
"int": typeSet{"uint64", "Uint64"},
|
||||
"string": typeSet{"", "String"},
|
||||
"[]byte": typeSet{"", "Bytes"},
|
||||
"bool": typeSet{"", "Bool"},
|
||||
}
|
||||
|
||||
func handleStruct(t *ast.StructType) []fieldInfo {
|
||||
var fs []fieldInfo
|
||||
|
||||
for _, sf := range t.Fields.List {
|
||||
if len(sf.Names) == 0 {
|
||||
// We don't handle anonymous fields
|
||||
continue
|
||||
}
|
||||
|
||||
fn := sf.Names[0].Name
|
||||
var max = 0
|
||||
if sf.Comment != nil {
|
||||
c := sf.Comment.List[0].Text
|
||||
if m := maxRe.FindStringSubmatch(c); m != nil {
|
||||
max, _ = strconv.Atoi(m[1])
|
||||
}
|
||||
if strings.Contains(c, "noencode") {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
var f fieldInfo
|
||||
switch ft := sf.Type.(type) {
|
||||
case *ast.Ident:
|
||||
tn := ft.Name
|
||||
if enc, ok := xdrEncoders[tn]; ok {
|
||||
f = fieldInfo{
|
||||
Name: fn,
|
||||
IsBasic: true,
|
||||
FieldType: tn,
|
||||
Encoder: enc.Encoder,
|
||||
Convert: enc.Type,
|
||||
Max: max,
|
||||
}
|
||||
} else {
|
||||
f = fieldInfo{
|
||||
Name: fn,
|
||||
IsBasic: false,
|
||||
FieldType: tn,
|
||||
Max: max,
|
||||
}
|
||||
}
|
||||
|
||||
case *ast.ArrayType:
|
||||
if ft.Len != nil {
|
||||
// We don't handle arrays
|
||||
continue
|
||||
}
|
||||
|
||||
tn := ft.Elt.(*ast.Ident).Name
|
||||
if enc, ok := xdrEncoders["[]"+tn]; ok {
|
||||
f = fieldInfo{
|
||||
Name: fn,
|
||||
IsBasic: true,
|
||||
FieldType: tn,
|
||||
Encoder: enc.Encoder,
|
||||
Convert: enc.Type,
|
||||
Max: max,
|
||||
}
|
||||
} else if enc, ok := xdrEncoders[tn]; ok {
|
||||
f = fieldInfo{
|
||||
Name: fn,
|
||||
IsBasic: true,
|
||||
IsSlice: true,
|
||||
FieldType: tn,
|
||||
Encoder: enc.Encoder,
|
||||
Convert: enc.Type,
|
||||
Max: max,
|
||||
}
|
||||
} else {
|
||||
f = fieldInfo{
|
||||
Name: fn,
|
||||
IsBasic: false,
|
||||
IsSlice: true,
|
||||
FieldType: tn,
|
||||
Max: max,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fs = append(fs, f)
|
||||
}
|
||||
|
||||
return fs
|
||||
}
|
||||
|
||||
func generateCode(s structInfo) {
|
||||
name := s.Name
|
||||
fs := s.Fields
|
||||
|
||||
var buf bytes.Buffer
|
||||
err := encodeTpl.Execute(&buf, map[string]interface{}{"TypeName": name, "Fields": fs})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
bs := regexp.MustCompile(`(\s*\n)+`).ReplaceAll(buf.Bytes(), []byte("\n"))
|
||||
bs = bytes.Replace(bs, []byte("//+n"), []byte("\n"), -1)
|
||||
|
||||
bs, err = format.Source(bs)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Println(string(bs))
|
||||
}
|
||||
|
||||
func uncamelize(s string) string {
|
||||
return regexp.MustCompile("[a-z][A-Z]").ReplaceAllStringFunc(s, func(camel string) string {
|
||||
return camel[:1] + " " + camel[1:]
|
||||
})
|
||||
}
|
||||
|
||||
func generateDiagram(s structInfo) {
|
||||
sn := s.Name
|
||||
fs := s.Fields
|
||||
|
||||
fmt.Println(sn + " Structure:")
|
||||
fmt.Println()
|
||||
fmt.Println(" 0 1 2 3")
|
||||
fmt.Println(" 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1")
|
||||
line := "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+"
|
||||
fmt.Println(line)
|
||||
|
||||
for _, f := range fs {
|
||||
tn := f.FieldType
|
||||
sl := f.IsSlice
|
||||
name := uncamelize(f.Name)
|
||||
|
||||
if sl {
|
||||
fmt.Printf("| %s |\n", center("Number of "+name, 61))
|
||||
fmt.Println(line)
|
||||
}
|
||||
switch tn {
|
||||
case "bool":
|
||||
fmt.Printf("| %s |V|\n", center(name+" (V=0 or 1)", 59))
|
||||
fmt.Println(line)
|
||||
case "uint16":
|
||||
fmt.Printf("| %s | %s |\n", center("0x0000", 29), center(name, 29))
|
||||
fmt.Println(line)
|
||||
case "uint32":
|
||||
fmt.Printf("| %s |\n", center(name, 61))
|
||||
fmt.Println(line)
|
||||
case "int64", "uint64":
|
||||
fmt.Printf("| %-61s |\n", "")
|
||||
fmt.Printf("+ %s +\n", center(name+" (64 bits)", 61))
|
||||
fmt.Printf("| %-61s |\n", "")
|
||||
fmt.Println(line)
|
||||
case "string", "byte": // XXX We assume slice of byte!
|
||||
fmt.Printf("| %s |\n", center("Length of "+name, 61))
|
||||
fmt.Println(line)
|
||||
fmt.Printf("/ %61s /\n", "")
|
||||
fmt.Printf("\\ %s \\\n", center(name+" (variable length)", 61))
|
||||
fmt.Printf("/ %61s /\n", "")
|
||||
fmt.Println(line)
|
||||
default:
|
||||
if sl {
|
||||
tn = "Zero or more " + tn + " Structures"
|
||||
fmt.Printf("/ %s /\n", center("", 61))
|
||||
fmt.Printf("\\ %s \\\n", center(tn, 61))
|
||||
fmt.Printf("/ %s /\n", center("", 61))
|
||||
} else {
|
||||
fmt.Printf("| %s |\n", center(tn, 61))
|
||||
}
|
||||
fmt.Println(line)
|
||||
}
|
||||
}
|
||||
fmt.Println()
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
func generateXdr(s structInfo) {
|
||||
sn := s.Name
|
||||
fs := s.Fields
|
||||
|
||||
fmt.Printf("struct %s {\n", sn)
|
||||
|
||||
for _, f := range fs {
|
||||
tn := f.FieldType
|
||||
fn := f.Name
|
||||
suf := ""
|
||||
l := ""
|
||||
if f.Max > 0 {
|
||||
l = strconv.Itoa(f.Max)
|
||||
}
|
||||
if f.IsSlice {
|
||||
suf = "<" + l + ">"
|
||||
}
|
||||
|
||||
switch tn {
|
||||
case "uint16", "uint32":
|
||||
fmt.Printf("\tunsigned int %s%s;\n", fn, suf)
|
||||
case "int64":
|
||||
fmt.Printf("\thyper %s%s;\n", fn, suf)
|
||||
case "uint64":
|
||||
fmt.Printf("\tunsigned hyper %s%s;\n", fn, suf)
|
||||
case "string":
|
||||
fmt.Printf("\tstring %s<%s>;\n", fn, l)
|
||||
case "byte":
|
||||
fmt.Printf("\topaque %s<%s>;\n", fn, l)
|
||||
default:
|
||||
fmt.Printf("\t%s %s%s;\n", tn, fn, suf)
|
||||
}
|
||||
}
|
||||
fmt.Println("}")
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
func center(s string, w int) string {
|
||||
w -= len(s)
|
||||
l := w / 2
|
||||
r := l
|
||||
if l+r < w {
|
||||
r++
|
||||
}
|
||||
return strings.Repeat(" ", l) + s + strings.Repeat(" ", r)
|
||||
}
|
||||
|
||||
func inspector(structs *[]structInfo) func(ast.Node) bool {
|
||||
return func(n ast.Node) bool {
|
||||
switch n := n.(type) {
|
||||
case *ast.TypeSpec:
|
||||
switch t := n.Type.(type) {
|
||||
case *ast.StructType:
|
||||
name := n.Name.Name
|
||||
fs := handleStruct(t)
|
||||
*structs = append(*structs, structInfo{name, fs})
|
||||
}
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
fname := flag.Arg(0)
|
||||
|
||||
fset := token.NewFileSet()
|
||||
f, err := parser.ParseFile(fset, fname, nil, parser.ParseComments)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
var structs []structInfo
|
||||
i := inspector(&structs)
|
||||
ast.Inspect(f, i)
|
||||
|
||||
headerTpl.Execute(os.Stdout, map[string]string{"Package": f.Name.Name})
|
||||
for _, s := range structs {
|
||||
fmt.Printf("\n/*\n\n")
|
||||
generateDiagram(s)
|
||||
generateXdr(s)
|
||||
fmt.Printf("*/\n")
|
||||
generateCode(s)
|
||||
}
|
||||
}
|
||||
16
Godeps/_workspace/src/github.com/calmh/xdr/debug.go
generated
vendored
Normal file
16
Godeps/_workspace/src/github.com/calmh/xdr/debug.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
package xdr
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
var (
|
||||
debug = len(os.Getenv("XDRTRACE")) > 0
|
||||
dl = log.New(os.Stdout, "xdr: ", log.Lshortfile|log.Ltime|log.Lmicroseconds)
|
||||
)
|
||||
|
||||
const maxDebugBytes = 32
|
||||
@@ -1,5 +1,5 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
// Package xdr implements an XDR (RFC 4506) marshaller/unmarshaller.
|
||||
// Package xdr implements an XDR (RFC 4506) encoder/decoder.
|
||||
package xdr
|
||||
75
Godeps/_workspace/src/github.com/calmh/xdr/encdec_test.go
generated
vendored
Normal file
75
Godeps/_workspace/src/github.com/calmh/xdr/encdec_test.go
generated
vendored
Normal file
@@ -0,0 +1,75 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
package xdr_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
|
||||
"github.com/calmh/xdr"
|
||||
)
|
||||
|
||||
// Contains all supported types
|
||||
type TestStruct struct {
|
||||
I int
|
||||
I8 int8
|
||||
UI8 uint8
|
||||
I16 int16
|
||||
UI16 uint16
|
||||
I32 int32
|
||||
UI32 uint32
|
||||
I64 int64
|
||||
UI64 uint64
|
||||
BS []byte
|
||||
S string
|
||||
C Opaque
|
||||
}
|
||||
|
||||
type Opaque [32]byte
|
||||
|
||||
func (u *Opaque) encodeXDR(w *xdr.Writer) (int, error) {
|
||||
return w.WriteRaw(u[:])
|
||||
}
|
||||
|
||||
func (u *Opaque) decodeXDR(r *xdr.Reader) (int, error) {
|
||||
return r.ReadRaw(u[:])
|
||||
}
|
||||
|
||||
func (Opaque) Generate(rand *rand.Rand, size int) reflect.Value {
|
||||
var u Opaque
|
||||
for i := range u[:] {
|
||||
u[i] = byte(rand.Int())
|
||||
}
|
||||
return reflect.ValueOf(u)
|
||||
}
|
||||
|
||||
func TestEncDec(t *testing.T) {
|
||||
fn := func(t0 TestStruct) bool {
|
||||
bs := t0.MarshalXDR()
|
||||
var t1 TestStruct
|
||||
err := t1.UnmarshalXDR(bs)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Not comparing with DeepEqual since we'll unmarshal nil slices as empty
|
||||
if t0.I != t1.I ||
|
||||
t0.I16 != t1.I16 || t0.UI16 != t1.UI16 ||
|
||||
t0.I32 != t1.I32 || t0.UI32 != t1.UI32 ||
|
||||
t0.I64 != t1.I64 || t0.UI64 != t1.UI64 ||
|
||||
bytes.Compare(t0.BS, t1.BS) != 0 ||
|
||||
t0.S != t1.S || t0.C != t1.C {
|
||||
t.Logf("%#v", t0)
|
||||
t.Logf("%#v", t1)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
if err := quick.Check(fn, nil); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
136
Godeps/_workspace/src/github.com/calmh/xdr/encdec_xdr_test.go
generated
vendored
Normal file
136
Godeps/_workspace/src/github.com/calmh/xdr/encdec_xdr_test.go
generated
vendored
Normal file
@@ -0,0 +1,136 @@
|
||||
// ************************************************************
|
||||
// This file is automatically generated by genxdr. Do not edit.
|
||||
// ************************************************************
|
||||
|
||||
package xdr_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
|
||||
"github.com/calmh/xdr"
|
||||
)
|
||||
|
||||
/*
|
||||
|
||||
TestStruct Structure:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| int |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| int8 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| uint8 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| int16 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| 0x0000 | UI16 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| int32 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| UI32 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
+ I64 (64 bits) +
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
+ UI64 (64 bits) +
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of BS |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ BS (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of S |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ S (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Opaque |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
struct TestStruct {
|
||||
int I;
|
||||
int8 I8;
|
||||
uint8 UI8;
|
||||
int16 I16;
|
||||
unsigned int UI16;
|
||||
int32 I32;
|
||||
unsigned int UI32;
|
||||
hyper I64;
|
||||
unsigned hyper UI64;
|
||||
opaque BS<>;
|
||||
string S<>;
|
||||
Opaque C;
|
||||
}
|
||||
|
||||
*/
|
||||
|
||||
func (o TestStruct) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o TestStruct) MarshalXDR() []byte {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}
|
||||
|
||||
func (o TestStruct) AppendXDR(bs []byte) []byte {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
o.encodeXDR(xw)
|
||||
return []byte(aw)
|
||||
}
|
||||
|
||||
func (o TestStruct) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
xw.WriteUint64(uint64(o.I))
|
||||
xw.WriteUint8(uint8(o.I8))
|
||||
xw.WriteUint8(o.UI8)
|
||||
xw.WriteUint16(uint16(o.I16))
|
||||
xw.WriteUint16(o.UI16)
|
||||
xw.WriteUint32(uint32(o.I32))
|
||||
xw.WriteUint32(o.UI32)
|
||||
xw.WriteUint64(uint64(o.I64))
|
||||
xw.WriteUint64(o.UI64)
|
||||
xw.WriteBytes(o.BS)
|
||||
xw.WriteString(o.S)
|
||||
_, err := o.C.encodeXDR(xw)
|
||||
if err != nil {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
return xw.Tot(), xw.Error()
|
||||
}
|
||||
|
||||
func (o *TestStruct) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *TestStruct) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *TestStruct) decodeXDR(xr *xdr.Reader) error {
|
||||
o.I = int(xr.ReadUint64())
|
||||
o.I8 = int8(xr.ReadUint8())
|
||||
o.UI8 = xr.ReadUint8()
|
||||
o.I16 = int16(xr.ReadUint16())
|
||||
o.UI16 = xr.ReadUint16()
|
||||
o.I32 = int32(xr.ReadUint32())
|
||||
o.UI32 = xr.ReadUint32()
|
||||
o.I64 = int64(xr.ReadUint64())
|
||||
o.UI64 = xr.ReadUint64()
|
||||
o.BS = xr.ReadBytes()
|
||||
o.S = xr.ReadString()
|
||||
(&o.C).decodeXDR(xr)
|
||||
return xr.Error()
|
||||
}
|
||||
4
Godeps/_workspace/src/github.com/calmh/xdr/generate.sh
generated
vendored
Normal file
4
Godeps/_workspace/src/github.com/calmh/xdr/generate.sh
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
#!/bin/sh
|
||||
|
||||
go run cmd/genxdr/main.go -- bench_test.go > bench_xdr_test.go
|
||||
go run cmd/genxdr/main.go -- encdec_test.go > encdec_xdr_test.go
|
||||
10
Godeps/_workspace/src/github.com/calmh/xdr/pad_ipdr.go
generated
vendored
Normal file
10
Godeps/_workspace/src/github.com/calmh/xdr/pad_ipdr.go
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
// +build ipdr
|
||||
|
||||
package xdr
|
||||
|
||||
func pad(l int) int {
|
||||
return 0
|
||||
}
|
||||
14
Godeps/_workspace/src/github.com/calmh/xdr/pad_xdr.go
generated
vendored
Normal file
14
Godeps/_workspace/src/github.com/calmh/xdr/pad_xdr.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
// +build !ipdr
|
||||
|
||||
package xdr
|
||||
|
||||
func pad(l int) int {
|
||||
d := l % 4
|
||||
if d == 0 {
|
||||
return 0
|
||||
}
|
||||
return 4 - d
|
||||
}
|
||||
164
Godeps/_workspace/src/github.com/calmh/xdr/reader.go
generated
vendored
Normal file
164
Godeps/_workspace/src/github.com/calmh/xdr/reader.go
generated
vendored
Normal file
@@ -0,0 +1,164 @@
|
||||
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
// All rights reserved. Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package xdr
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var ErrElementSizeExceeded = errors.New("element size exceeded")
|
||||
|
||||
type Reader struct {
|
||||
r io.Reader
|
||||
err error
|
||||
b [8]byte
|
||||
}
|
||||
|
||||
func NewReader(r io.Reader) *Reader {
|
||||
return &Reader{
|
||||
r: r,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reader) ReadRaw(bs []byte) (int, error) {
|
||||
if r.err != nil {
|
||||
return 0, r.err
|
||||
}
|
||||
|
||||
var n int
|
||||
n, r.err = io.ReadFull(r.r, bs)
|
||||
return n, r.err
|
||||
}
|
||||
|
||||
func (r *Reader) ReadString() string {
|
||||
return r.ReadStringMax(0)
|
||||
}
|
||||
|
||||
func (r *Reader) ReadStringMax(max int) string {
|
||||
buf := r.ReadBytesMaxInto(max, nil)
|
||||
bh := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
|
||||
sh := reflect.StringHeader{
|
||||
Data: bh.Data,
|
||||
Len: bh.Len,
|
||||
}
|
||||
return *((*string)(unsafe.Pointer(&sh)))
|
||||
}
|
||||
|
||||
func (r *Reader) ReadBytes() []byte {
|
||||
return r.ReadBytesInto(nil)
|
||||
}
|
||||
|
||||
func (r *Reader) ReadBytesMax(max int) []byte {
|
||||
return r.ReadBytesMaxInto(max, nil)
|
||||
}
|
||||
|
||||
func (r *Reader) ReadBytesInto(dst []byte) []byte {
|
||||
return r.ReadBytesMaxInto(0, dst)
|
||||
}
|
||||
|
||||
func (r *Reader) ReadBytesMaxInto(max int, dst []byte) []byte {
|
||||
if r.err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
l := int(r.ReadUint32())
|
||||
if r.err != nil {
|
||||
return nil
|
||||
}
|
||||
if max > 0 && l > max {
|
||||
r.err = ErrElementSizeExceeded
|
||||
return nil
|
||||
}
|
||||
|
||||
if fullLen := l + pad(l); fullLen > len(dst) {
|
||||
dst = make([]byte, fullLen)
|
||||
} else {
|
||||
dst = dst[:fullLen]
|
||||
}
|
||||
|
||||
var n int
|
||||
n, r.err = io.ReadFull(r.r, dst)
|
||||
if r.err != nil {
|
||||
if debug {
|
||||
dl.Printf("rd bytes (%d): %v", len(dst), r.err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if debug {
|
||||
if n > maxDebugBytes {
|
||||
dl.Printf("rd bytes (%d): %x...", len(dst), dst[:maxDebugBytes])
|
||||
} else {
|
||||
dl.Printf("rd bytes (%d): %x", len(dst), dst)
|
||||
}
|
||||
}
|
||||
return dst[:l]
|
||||
}
|
||||
|
||||
func (r *Reader) ReadBool() bool {
|
||||
return r.ReadUint8() != 0
|
||||
}
|
||||
|
||||
func (r *Reader) ReadUint32() uint32 {
|
||||
if r.err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
_, r.err = io.ReadFull(r.r, r.b[:4])
|
||||
if r.err != nil {
|
||||
if debug {
|
||||
dl.Printf("rd uint32: %v", r.err)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
v := uint32(r.b[3]) | uint32(r.b[2])<<8 | uint32(r.b[1])<<16 | uint32(r.b[0])<<24
|
||||
|
||||
if debug {
|
||||
dl.Printf("rd uint32=%d (0x%08x)", v, v)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func (r *Reader) ReadUint64() uint64 {
|
||||
if r.err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
_, r.err = io.ReadFull(r.r, r.b[:8])
|
||||
if r.err != nil {
|
||||
if debug {
|
||||
dl.Printf("rd uint64: %v", r.err)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
v := uint64(r.b[7]) | uint64(r.b[6])<<8 | uint64(r.b[5])<<16 | uint64(r.b[4])<<24 |
|
||||
uint64(r.b[3])<<32 | uint64(r.b[2])<<40 | uint64(r.b[1])<<48 | uint64(r.b[0])<<56
|
||||
|
||||
if debug {
|
||||
dl.Printf("rd uint64=%d (0x%016x)", v, v)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
type XDRError struct {
|
||||
op string
|
||||
err error
|
||||
}
|
||||
|
||||
func (e XDRError) Error() string {
|
||||
return "xdr " + e.op + ": " + e.err.Error()
|
||||
}
|
||||
|
||||
func (r *Reader) Error() error {
|
||||
if r.err == nil {
|
||||
return nil
|
||||
}
|
||||
return XDRError{"read", r.err}
|
||||
}
|
||||
49
Godeps/_workspace/src/github.com/calmh/xdr/reader_ipdr.go
generated
vendored
Normal file
49
Godeps/_workspace/src/github.com/calmh/xdr/reader_ipdr.go
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
// All rights reserved. Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ipdr
|
||||
|
||||
package xdr
|
||||
|
||||
import "io"
|
||||
|
||||
func (r *Reader) ReadUint8() uint8 {
|
||||
if r.err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
_, r.err = io.ReadFull(r.r, r.b[:1])
|
||||
if r.err != nil {
|
||||
if debug {
|
||||
dl.Printf("rd uint8: %v", r.err)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
if debug {
|
||||
dl.Printf("rd uint8=%d (0x%02x)", r.b[0], r.b[0])
|
||||
}
|
||||
return r.b[0]
|
||||
}
|
||||
|
||||
func (r *Reader) ReadUint16() uint16 {
|
||||
if r.err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
_, r.err = io.ReadFull(r.r, r.b[:2])
|
||||
if r.err != nil {
|
||||
if debug {
|
||||
dl.Printf("rd uint16: %v", r.err)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
v := uint16(r.b[1]) | uint16(r.b[0])<<8
|
||||
|
||||
if debug {
|
||||
dl.Printf("rd uint16=%d (0x%04x)", v, v)
|
||||
}
|
||||
return v
|
||||
}
|
||||
15
Godeps/_workspace/src/github.com/calmh/xdr/reader_xdr.go
generated
vendored
Normal file
15
Godeps/_workspace/src/github.com/calmh/xdr/reader_xdr.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
// All rights reserved. Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !ipdr
|
||||
|
||||
package xdr
|
||||
|
||||
func (r *Reader) ReadUint8() uint8 {
|
||||
return uint8(r.ReadUint32())
|
||||
}
|
||||
|
||||
func (r *Reader) ReadUint16() uint16 {
|
||||
return uint16(r.ReadUint32())
|
||||
}
|
||||
44
Godeps/_workspace/src/github.com/calmh/xdr/refl_test.go
generated
vendored
Normal file
44
Godeps/_workspace/src/github.com/calmh/xdr/refl_test.go
generated
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
// +build refl
|
||||
|
||||
package xdr_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
refl "github.com/davecgh/go-xdr/xdr"
|
||||
)
|
||||
|
||||
func TestCompareMarshals(t *testing.T) {
|
||||
e0 := s.MarshalXDR()
|
||||
e1, err := refl.Marshal(s)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if bytes.Compare(e0, e1) != 0 {
|
||||
t.Fatalf("Encoding mismatch;\n\t%x (this)\n\t%x (refl)", e0, e1)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkReflMarshal(b *testing.B) {
|
||||
var err error
|
||||
for i := 0; i < b.N; i++ {
|
||||
res, err = refl.Marshal(s)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkReflUnmarshal(b *testing.B) {
|
||||
var t XDRBenchStruct
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := refl.Unmarshal(e, &t)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
146
Godeps/_workspace/src/github.com/calmh/xdr/writer.go
generated
vendored
Normal file
146
Godeps/_workspace/src/github.com/calmh/xdr/writer.go
generated
vendored
Normal file
@@ -0,0 +1,146 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
package xdr
|
||||
|
||||
import (
|
||||
"io"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var padBytes = []byte{0, 0, 0}
|
||||
|
||||
type Writer struct {
|
||||
w io.Writer
|
||||
tot int
|
||||
err error
|
||||
b [8]byte
|
||||
}
|
||||
|
||||
type AppendWriter []byte
|
||||
|
||||
func (w *AppendWriter) Write(bs []byte) (int, error) {
|
||||
*w = append(*w, bs...)
|
||||
return len(bs), nil
|
||||
}
|
||||
|
||||
func NewWriter(w io.Writer) *Writer {
|
||||
return &Writer{
|
||||
w: w,
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Writer) WriteRaw(bs []byte) (int, error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
var n int
|
||||
n, w.err = w.w.Write(bs)
|
||||
return n, w.err
|
||||
}
|
||||
|
||||
func (w *Writer) WriteString(s string) (int, error) {
|
||||
sh := *((*reflect.StringHeader)(unsafe.Pointer(&s)))
|
||||
bh := reflect.SliceHeader{
|
||||
Data: sh.Data,
|
||||
Len: sh.Len,
|
||||
Cap: sh.Len,
|
||||
}
|
||||
return w.WriteBytes(*(*[]byte)(unsafe.Pointer(&bh)))
|
||||
}
|
||||
|
||||
func (w *Writer) WriteBytes(bs []byte) (int, error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
w.WriteUint32(uint32(len(bs)))
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
if debug {
|
||||
if len(bs) > maxDebugBytes {
|
||||
dl.Printf("wr bytes (%d): %x...", len(bs), bs[:maxDebugBytes])
|
||||
} else {
|
||||
dl.Printf("wr bytes (%d): %x", len(bs), bs)
|
||||
}
|
||||
}
|
||||
|
||||
var l, n int
|
||||
n, w.err = w.w.Write(bs)
|
||||
l += n
|
||||
|
||||
if p := pad(len(bs)); w.err == nil && p > 0 {
|
||||
n, w.err = w.w.Write(padBytes[:p])
|
||||
l += n
|
||||
}
|
||||
|
||||
w.tot += l
|
||||
return l, w.err
|
||||
}
|
||||
|
||||
func (w *Writer) WriteBool(v bool) (int, error) {
|
||||
if v {
|
||||
return w.WriteUint8(1)
|
||||
} else {
|
||||
return w.WriteUint8(0)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Writer) WriteUint32(v uint32) (int, error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
if debug {
|
||||
dl.Printf("wr uint32=%d", v)
|
||||
}
|
||||
|
||||
w.b[0] = byte(v >> 24)
|
||||
w.b[1] = byte(v >> 16)
|
||||
w.b[2] = byte(v >> 8)
|
||||
w.b[3] = byte(v)
|
||||
|
||||
var l int
|
||||
l, w.err = w.w.Write(w.b[:4])
|
||||
w.tot += l
|
||||
return l, w.err
|
||||
}
|
||||
|
||||
func (w *Writer) WriteUint64(v uint64) (int, error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
if debug {
|
||||
dl.Printf("wr uint64=%d", v)
|
||||
}
|
||||
|
||||
w.b[0] = byte(v >> 56)
|
||||
w.b[1] = byte(v >> 48)
|
||||
w.b[2] = byte(v >> 40)
|
||||
w.b[3] = byte(v >> 32)
|
||||
w.b[4] = byte(v >> 24)
|
||||
w.b[5] = byte(v >> 16)
|
||||
w.b[6] = byte(v >> 8)
|
||||
w.b[7] = byte(v)
|
||||
|
||||
var l int
|
||||
l, w.err = w.w.Write(w.b[:8])
|
||||
w.tot += l
|
||||
return l, w.err
|
||||
}
|
||||
|
||||
func (w *Writer) Tot() int {
|
||||
return w.tot
|
||||
}
|
||||
|
||||
func (w *Writer) Error() error {
|
||||
if w.err == nil {
|
||||
return nil
|
||||
}
|
||||
return XDRError{"write", w.err}
|
||||
}
|
||||
41
Godeps/_workspace/src/github.com/calmh/xdr/writer_ipdr.go
generated
vendored
Normal file
41
Godeps/_workspace/src/github.com/calmh/xdr/writer_ipdr.go
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
// +build ipdr
|
||||
|
||||
package xdr
|
||||
|
||||
func (w *Writer) WriteUint8(v uint8) (int, error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
if debug {
|
||||
dl.Printf("wr uint8=%d", v)
|
||||
}
|
||||
|
||||
w.b[0] = byte(v)
|
||||
|
||||
var l int
|
||||
l, w.err = w.w.Write(w.b[:1])
|
||||
w.tot += l
|
||||
return l, w.err
|
||||
}
|
||||
|
||||
func (w *Writer) WriteUint16(v uint16) (int, error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
if debug {
|
||||
dl.Printf("wr uint8=%d", v)
|
||||
}
|
||||
|
||||
w.b[0] = byte(v >> 8)
|
||||
w.b[1] = byte(v)
|
||||
|
||||
var l int
|
||||
l, w.err = w.w.Write(w.b[:2])
|
||||
w.tot += l
|
||||
return l, w.err
|
||||
}
|
||||
14
Godeps/_workspace/src/github.com/calmh/xdr/writer_xdr.go
generated
vendored
Normal file
14
Godeps/_workspace/src/github.com/calmh/xdr/writer_xdr.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
// +build !ipdr
|
||||
|
||||
package xdr
|
||||
|
||||
func (w *Writer) WriteUint8(v uint8) (int, error) {
|
||||
return w.WriteUint32(uint32(v))
|
||||
}
|
||||
|
||||
func (w *Writer) WriteUint16(v uint16) (int, error) {
|
||||
return w.WriteUint32(uint32(v))
|
||||
}
|
||||
92
Godeps/_workspace/src/github.com/calmh/xdr/xdr_test.go
generated
vendored
Normal file
92
Godeps/_workspace/src/github.com/calmh/xdr/xdr_test.go
generated
vendored
Normal file
@@ -0,0 +1,92 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
package xdr
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
)
|
||||
|
||||
func TestBytesNil(t *testing.T) {
|
||||
fn := func(bs []byte) bool {
|
||||
var b = new(bytes.Buffer)
|
||||
var w = NewWriter(b)
|
||||
var r = NewReader(b)
|
||||
w.WriteBytes(bs)
|
||||
w.WriteBytes(bs)
|
||||
r.ReadBytes()
|
||||
res := r.ReadBytes()
|
||||
return bytes.Compare(bs, res) == 0
|
||||
}
|
||||
if err := quick.Check(fn, nil); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBytesGiven(t *testing.T) {
|
||||
fn := func(bs []byte) bool {
|
||||
var b = new(bytes.Buffer)
|
||||
var w = NewWriter(b)
|
||||
var r = NewReader(b)
|
||||
w.WriteBytes(bs)
|
||||
w.WriteBytes(bs)
|
||||
res := make([]byte, 12)
|
||||
res = r.ReadBytesInto(res)
|
||||
res = r.ReadBytesInto(res)
|
||||
return bytes.Compare(bs, res) == 0
|
||||
}
|
||||
if err := quick.Check(fn, nil); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadBytesMaxInto(t *testing.T) {
|
||||
var max = 64
|
||||
for tot := 32; tot < 128; tot++ {
|
||||
for diff := -32; diff <= 32; diff++ {
|
||||
var b = new(bytes.Buffer)
|
||||
var r = NewReader(b)
|
||||
var w = NewWriter(b)
|
||||
|
||||
var toWrite = make([]byte, tot)
|
||||
w.WriteBytes(toWrite)
|
||||
|
||||
var buf = make([]byte, tot+diff)
|
||||
var bs = r.ReadBytesMaxInto(max, buf)
|
||||
|
||||
if tot <= max {
|
||||
if read := len(bs); read != tot {
|
||||
t.Errorf("Incorrect read bytes, wrote=%d, buf=%d, max=%d, read=%d", tot, tot+diff, max, read)
|
||||
}
|
||||
} else if r.err != ErrElementSizeExceeded {
|
||||
t.Errorf("Unexpected non-ErrElementSizeExceeded error for wrote=%d, max=%d: %v", tot, max, r.err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadStringMax(t *testing.T) {
|
||||
for tot := 42; tot < 72; tot++ {
|
||||
for max := 0; max < 128; max++ {
|
||||
var b = new(bytes.Buffer)
|
||||
var r = NewReader(b)
|
||||
var w = NewWriter(b)
|
||||
|
||||
var toWrite = make([]byte, tot)
|
||||
w.WriteBytes(toWrite)
|
||||
|
||||
var str = r.ReadStringMax(max)
|
||||
var read = len(str)
|
||||
|
||||
if max == 0 || tot <= max {
|
||||
if read != tot {
|
||||
t.Errorf("Incorrect read bytes, wrote=%d, max=%d, read=%d", tot, max, read)
|
||||
}
|
||||
} else if r.err != ErrElementSizeExceeded {
|
||||
t.Errorf("Unexpected non-ErrElementSizeExceeded error for wrote=%d, max=%d, read=%d: %v", tot, max, read, r.err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
185
Godeps/_workspace/src/github.com/juju/ratelimit/LICENSE
generated
vendored
Normal file
185
Godeps/_workspace/src/github.com/juju/ratelimit/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,185 @@
|
||||
This software is licensed under the LGPLv3, included below.
|
||||
|
||||
As a special exception to the GNU Lesser General Public License version 3
|
||||
("LGPL3"), the copyright holders of this Library give you permission to
|
||||
convey to a third party a Combined Work that links statically or dynamically
|
||||
to this Library without providing any Minimal Corresponding Source or
|
||||
Minimal Application Code as set out in 4d or providing the installation
|
||||
information set out in section 4e, provided that you comply with the other
|
||||
provisions of LGPL3 and provided that you meet, for the Application the
|
||||
terms and conditions of the license(s) which apply to the Application.
|
||||
|
||||
Except as stated in this special exception, the provisions of LGPL3 will
|
||||
continue to comply in full to this Library. If you modify this Library, you
|
||||
may apply this exception to your version of this Library, but you are not
|
||||
obliged to do so. If you do not wish to do so, delete this exception
|
||||
statement from your version. This exception does not (and cannot) modify any
|
||||
license terms which apply to the Application, with which you must still
|
||||
comply.
|
||||
|
||||
|
||||
GNU LESSER GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
|
||||
This version of the GNU Lesser General Public License incorporates
|
||||
the terms and conditions of version 3 of the GNU General Public
|
||||
License, supplemented by the additional permissions listed below.
|
||||
|
||||
0. Additional Definitions.
|
||||
|
||||
As used herein, "this License" refers to version 3 of the GNU Lesser
|
||||
General Public License, and the "GNU GPL" refers to version 3 of the GNU
|
||||
General Public License.
|
||||
|
||||
"The Library" refers to a covered work governed by this License,
|
||||
other than an Application or a Combined Work as defined below.
|
||||
|
||||
An "Application" is any work that makes use of an interface provided
|
||||
by the Library, but which is not otherwise based on the Library.
|
||||
Defining a subclass of a class defined by the Library is deemed a mode
|
||||
of using an interface provided by the Library.
|
||||
|
||||
A "Combined Work" is a work produced by combining or linking an
|
||||
Application with the Library. The particular version of the Library
|
||||
with which the Combined Work was made is also called the "Linked
|
||||
Version".
|
||||
|
||||
The "Minimal Corresponding Source" for a Combined Work means the
|
||||
Corresponding Source for the Combined Work, excluding any source code
|
||||
for portions of the Combined Work that, considered in isolation, are
|
||||
based on the Application, and not on the Linked Version.
|
||||
|
||||
The "Corresponding Application Code" for a Combined Work means the
|
||||
object code and/or source code for the Application, including any data
|
||||
and utility programs needed for reproducing the Combined Work from the
|
||||
Application, but excluding the System Libraries of the Combined Work.
|
||||
|
||||
1. Exception to Section 3 of the GNU GPL.
|
||||
|
||||
You may convey a covered work under sections 3 and 4 of this License
|
||||
without being bound by section 3 of the GNU GPL.
|
||||
|
||||
2. Conveying Modified Versions.
|
||||
|
||||
If you modify a copy of the Library, and, in your modifications, a
|
||||
facility refers to a function or data to be supplied by an Application
|
||||
that uses the facility (other than as an argument passed when the
|
||||
facility is invoked), then you may convey a copy of the modified
|
||||
version:
|
||||
|
||||
a) under this License, provided that you make a good faith effort to
|
||||
ensure that, in the event an Application does not supply the
|
||||
function or data, the facility still operates, and performs
|
||||
whatever part of its purpose remains meaningful, or
|
||||
|
||||
b) under the GNU GPL, with none of the additional permissions of
|
||||
this License applicable to that copy.
|
||||
|
||||
3. Object Code Incorporating Material from Library Header Files.
|
||||
|
||||
The object code form of an Application may incorporate material from
|
||||
a header file that is part of the Library. You may convey such object
|
||||
code under terms of your choice, provided that, if the incorporated
|
||||
material is not limited to numerical parameters, data structure
|
||||
layouts and accessors, or small macros, inline functions and templates
|
||||
(ten or fewer lines in length), you do both of the following:
|
||||
|
||||
a) Give prominent notice with each copy of the object code that the
|
||||
Library is used in it and that the Library and its use are
|
||||
covered by this License.
|
||||
|
||||
b) Accompany the object code with a copy of the GNU GPL and this license
|
||||
document.
|
||||
|
||||
4. Combined Works.
|
||||
|
||||
You may convey a Combined Work under terms of your choice that,
|
||||
taken together, effectively do not restrict modification of the
|
||||
portions of the Library contained in the Combined Work and reverse
|
||||
engineering for debugging such modifications, if you also do each of
|
||||
the following:
|
||||
|
||||
a) Give prominent notice with each copy of the Combined Work that
|
||||
the Library is used in it and that the Library and its use are
|
||||
covered by this License.
|
||||
|
||||
b) Accompany the Combined Work with a copy of the GNU GPL and this license
|
||||
document.
|
||||
|
||||
c) For a Combined Work that displays copyright notices during
|
||||
execution, include the copyright notice for the Library among
|
||||
these notices, as well as a reference directing the user to the
|
||||
copies of the GNU GPL and this license document.
|
||||
|
||||
d) Do one of the following:
|
||||
|
||||
0) Convey the Minimal Corresponding Source under the terms of this
|
||||
License, and the Corresponding Application Code in a form
|
||||
suitable for, and under terms that permit, the user to
|
||||
recombine or relink the Application with a modified version of
|
||||
the Linked Version to produce a modified Combined Work, in the
|
||||
manner specified by section 6 of the GNU GPL for conveying
|
||||
Corresponding Source.
|
||||
|
||||
1) Use a suitable shared library mechanism for linking with the
|
||||
Library. A suitable mechanism is one that (a) uses at run time
|
||||
a copy of the Library already present on the user's computer
|
||||
system, and (b) will operate properly with a modified version
|
||||
of the Library that is interface-compatible with the Linked
|
||||
Version.
|
||||
|
||||
e) Provide Installation Information, but only if you would otherwise
|
||||
be required to provide such information under section 6 of the
|
||||
GNU GPL, and only to the extent that such information is
|
||||
necessary to install and execute a modified version of the
|
||||
Combined Work produced by recombining or relinking the
|
||||
Application with a modified version of the Linked Version. (If
|
||||
you use option 4d0, the Installation Information must accompany
|
||||
the Minimal Corresponding Source and Corresponding Application
|
||||
Code. If you use option 4d1, you must provide the Installation
|
||||
Information in the manner specified by section 6 of the GNU GPL
|
||||
for conveying Corresponding Source.)
|
||||
|
||||
5. Combined Libraries.
|
||||
|
||||
You may place library facilities that are a work based on the
|
||||
Library side by side in a single library together with other library
|
||||
facilities that are not Applications and are not covered by this
|
||||
License, and convey such a combined library under terms of your
|
||||
choice, if you do both of the following:
|
||||
|
||||
a) Accompany the combined library with a copy of the same work based
|
||||
on the Library, uncombined with any other library facilities,
|
||||
conveyed under the terms of this License.
|
||||
|
||||
b) Give prominent notice with the combined library that part of it
|
||||
is a work based on the Library, and explaining where to find the
|
||||
accompanying uncombined form of the same work.
|
||||
|
||||
6. Revised Versions of the GNU Lesser General Public License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions
|
||||
of the GNU Lesser General Public License from time to time. Such new
|
||||
versions will be similar in spirit to the present version, but may
|
||||
differ in detail to address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Library as you received it specifies that a certain numbered version
|
||||
of the GNU Lesser General Public License "or any later version"
|
||||
applies to it, you have the option of following the terms and
|
||||
conditions either of that published version or of any later version
|
||||
published by the Free Software Foundation. If the Library as you
|
||||
received it does not specify a version number of the GNU Lesser
|
||||
General Public License, you may choose any version of the GNU Lesser
|
||||
General Public License ever published by the Free Software Foundation.
|
||||
|
||||
If the Library as you received it specifies that a proxy can decide
|
||||
whether future versions of the GNU Lesser General Public License shall
|
||||
apply, that proxy's public statement of acceptance of any version is
|
||||
permanent authorization for you to choose that version for the
|
||||
Library.
|
||||
117
Godeps/_workspace/src/github.com/juju/ratelimit/README.md
generated
vendored
Normal file
117
Godeps/_workspace/src/github.com/juju/ratelimit/README.md
generated
vendored
Normal file
@@ -0,0 +1,117 @@
|
||||
# ratelimit
|
||||
--
|
||||
import "github.com/juju/ratelimit"
|
||||
|
||||
The ratelimit package provides an efficient token bucket implementation. See
|
||||
http://en.wikipedia.org/wiki/Token_bucket.
|
||||
|
||||
## Usage
|
||||
|
||||
#### func Reader
|
||||
|
||||
```go
|
||||
func Reader(r io.Reader, bucket *Bucket) io.Reader
|
||||
```
|
||||
Reader returns a reader that is rate limited by the given token bucket. Each
|
||||
token in the bucket represents one byte.
|
||||
|
||||
#### func Writer
|
||||
|
||||
```go
|
||||
func Writer(w io.Writer, bucket *Bucket) io.Writer
|
||||
```
|
||||
Writer returns a reader that is rate limited by the given token bucket. Each
|
||||
token in the bucket represents one byte.
|
||||
|
||||
#### type Bucket
|
||||
|
||||
```go
|
||||
type Bucket struct {
|
||||
}
|
||||
```
|
||||
|
||||
Bucket represents a token bucket that fills at a predetermined rate. Methods on
|
||||
Bucket may be called concurrently.
|
||||
|
||||
#### func NewBucket
|
||||
|
||||
```go
|
||||
func NewBucket(fillInterval time.Duration, capacity int64) *Bucket
|
||||
```
|
||||
NewBucket returns a new token bucket that fills at the rate of one token every
|
||||
fillInterval, up to the given maximum capacity. Both arguments must be positive.
|
||||
The bucket is initially full.
|
||||
|
||||
#### func NewBucketWithQuantum
|
||||
|
||||
```go
|
||||
func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *Bucket
|
||||
```
|
||||
NewBucketWithQuantum is similar to NewBucket, but allows the specification of
|
||||
the quantum size - quantum tokens are added every fillInterval.
|
||||
|
||||
#### func NewBucketWithRate
|
||||
|
||||
```go
|
||||
func NewBucketWithRate(rate float64, capacity int64) *Bucket
|
||||
```
|
||||
NewBucketWithRate returns a token bucket that fills the bucket at the rate of
|
||||
rate tokens per second up to the given maximum capacity. Because of limited
|
||||
clock resolution, at high rates, the actual rate may be up to 1% different from
|
||||
the specified rate.
|
||||
|
||||
#### func (*Bucket) Rate
|
||||
|
||||
```go
|
||||
func (tb *Bucket) Rate() float64
|
||||
```
|
||||
Rate returns the fill rate of the bucket, in tokens per second.
|
||||
|
||||
#### func (*Bucket) Take
|
||||
|
||||
```go
|
||||
func (tb *Bucket) Take(count int64) time.Duration
|
||||
```
|
||||
Take takes count tokens from the bucket without blocking. It returns the time
|
||||
that the caller should wait until the tokens are actually available.
|
||||
|
||||
Note that if the request is irrevocable - there is no way to return tokens to
|
||||
the bucket once this method commits us to taking them.
|
||||
|
||||
#### func (*Bucket) TakeAvailable
|
||||
|
||||
```go
|
||||
func (tb *Bucket) TakeAvailable(count int64) int64
|
||||
```
|
||||
TakeAvailable takes up to count immediately available tokens from the bucket. It
|
||||
returns the number of tokens removed, or zero if there are no available tokens.
|
||||
It does not block.
|
||||
|
||||
#### func (*Bucket) TakeMaxDuration
|
||||
|
||||
```go
|
||||
func (tb *Bucket) TakeMaxDuration(count int64, maxWait time.Duration) (time.Duration, bool)
|
||||
```
|
||||
TakeMaxDuration is like Take, except that it will only take tokens from the
|
||||
bucket if the wait time for the tokens is no greater than maxWait.
|
||||
|
||||
If it would take longer than maxWait for the tokens to become available, it does
|
||||
nothing and reports false, otherwise it returns the time that the caller should
|
||||
wait until the tokens are actually available, and reports true.
|
||||
|
||||
#### func (*Bucket) Wait
|
||||
|
||||
```go
|
||||
func (tb *Bucket) Wait(count int64)
|
||||
```
|
||||
Wait takes count tokens from the bucket, waiting until they are available.
|
||||
|
||||
#### func (*Bucket) WaitMaxDuration
|
||||
|
||||
```go
|
||||
func (tb *Bucket) WaitMaxDuration(count int64, maxWait time.Duration) bool
|
||||
```
|
||||
WaitMaxDuration is like Wait except that it will only take tokens from the
|
||||
bucket if it needs to wait for no greater than maxWait. It reports whether any
|
||||
tokens have been removed from the bucket If no tokens have been removed, it
|
||||
returns immediately.
|
||||
226
Godeps/_workspace/src/github.com/juju/ratelimit/ratelimit.go
generated
vendored
Normal file
226
Godeps/_workspace/src/github.com/juju/ratelimit/ratelimit.go
generated
vendored
Normal file
@@ -0,0 +1,226 @@
|
||||
// Copyright 2014 Canonical Ltd.
|
||||
// Licensed under the LGPLv3 with static-linking exception.
|
||||
// See LICENCE file for details.
|
||||
|
||||
// The ratelimit package provides an efficient token bucket implementation.
|
||||
// See http://en.wikipedia.org/wiki/Token_bucket.
|
||||
package ratelimit
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Bucket represents a token bucket that fills at a predetermined rate.
|
||||
// Methods on Bucket may be called concurrently.
|
||||
type Bucket struct {
|
||||
startTime time.Time
|
||||
capacity int64
|
||||
quantum int64
|
||||
fillInterval time.Duration
|
||||
|
||||
// The mutex guards the fields following it.
|
||||
mu sync.Mutex
|
||||
|
||||
// avail holds the number of available tokens
|
||||
// in the bucket, as of availTick ticks from startTime.
|
||||
// It will be negative when there are consumers
|
||||
// waiting for tokens.
|
||||
avail int64
|
||||
availTick int64
|
||||
}
|
||||
|
||||
// NewBucket returns a new token bucket that fills at the
|
||||
// rate of one token every fillInterval, up to the given
|
||||
// maximum capacity. Both arguments must be
|
||||
// positive. The bucket is initially full.
|
||||
func NewBucket(fillInterval time.Duration, capacity int64) *Bucket {
|
||||
return NewBucketWithQuantum(fillInterval, capacity, 1)
|
||||
}
|
||||
|
||||
// rateMargin specifes the allowed variance of actual
|
||||
// rate from specified rate. 1% seems reasonable.
|
||||
const rateMargin = 0.01
|
||||
|
||||
// NewBucketWithRate returns a token bucket that fills the bucket
|
||||
// at the rate of rate tokens per second up to the given
|
||||
// maximum capacity. Because of limited clock resolution,
|
||||
// at high rates, the actual rate may be up to 1% different from the
|
||||
// specified rate.
|
||||
func NewBucketWithRate(rate float64, capacity int64) *Bucket {
|
||||
for quantum := int64(1); quantum < 1<<50; quantum = nextQuantum(quantum) {
|
||||
fillInterval := time.Duration(1e9 * float64(quantum) / rate)
|
||||
if fillInterval <= 0 {
|
||||
continue
|
||||
}
|
||||
tb := NewBucketWithQuantum(fillInterval, capacity, quantum)
|
||||
if diff := abs(tb.Rate() - rate); diff/rate <= rateMargin {
|
||||
return tb
|
||||
}
|
||||
}
|
||||
panic("cannot find suitable quantum for " + strconv.FormatFloat(rate, 'g', -1, 64))
|
||||
}
|
||||
|
||||
// nextQuantum returns the next quantum to try after q.
|
||||
// We grow the quantum exponentially, but slowly, so we
|
||||
// get a good fit in the lower numbers.
|
||||
func nextQuantum(q int64) int64 {
|
||||
q1 := q * 11 / 10
|
||||
if q1 == q {
|
||||
q1++
|
||||
}
|
||||
return q1
|
||||
}
|
||||
|
||||
// NewBucketWithQuantum is similar to NewBucket, but allows
|
||||
// the specification of the quantum size - quantum tokens
|
||||
// are added every fillInterval.
|
||||
func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *Bucket {
|
||||
if fillInterval <= 0 {
|
||||
panic("token bucket fill interval is not > 0")
|
||||
}
|
||||
if capacity <= 0 {
|
||||
panic("token bucket capacity is not > 0")
|
||||
}
|
||||
if quantum <= 0 {
|
||||
panic("token bucket quantum is not > 0")
|
||||
}
|
||||
return &Bucket{
|
||||
startTime: time.Now(),
|
||||
capacity: capacity,
|
||||
quantum: quantum,
|
||||
avail: capacity,
|
||||
fillInterval: fillInterval,
|
||||
}
|
||||
}
|
||||
|
||||
// Wait takes count tokens from the bucket, waiting until they are
|
||||
// available.
|
||||
func (tb *Bucket) Wait(count int64) {
|
||||
if d := tb.Take(count); d > 0 {
|
||||
time.Sleep(d)
|
||||
}
|
||||
}
|
||||
|
||||
// WaitMaxDuration is like Wait except that it will
|
||||
// only take tokens from the bucket if it needs to wait
|
||||
// for no greater than maxWait. It reports whether
|
||||
// any tokens have been removed from the bucket
|
||||
// If no tokens have been removed, it returns immediately.
|
||||
func (tb *Bucket) WaitMaxDuration(count int64, maxWait time.Duration) bool {
|
||||
d, ok := tb.TakeMaxDuration(count, maxWait)
|
||||
if d > 0 {
|
||||
time.Sleep(d)
|
||||
}
|
||||
return ok
|
||||
}
|
||||
|
||||
const infinityDuration time.Duration = 0x7fffffffffffffff
|
||||
|
||||
// Take takes count tokens from the bucket without blocking. It returns
|
||||
// the time that the caller should wait until the tokens are actually
|
||||
// available.
|
||||
//
|
||||
// Note that if the request is irrevocable - there is no way to return
|
||||
// tokens to the bucket once this method commits us to taking them.
|
||||
func (tb *Bucket) Take(count int64) time.Duration {
|
||||
d, _ := tb.take(time.Now(), count, infinityDuration)
|
||||
return d
|
||||
}
|
||||
|
||||
// TakeMaxDuration is like Take, except that
|
||||
// it will only take tokens from the bucket if the wait
|
||||
// time for the tokens is no greater than maxWait.
|
||||
//
|
||||
// If it would take longer than maxWait for the tokens
|
||||
// to become available, it does nothing and reports false,
|
||||
// otherwise it returns the time that the caller should
|
||||
// wait until the tokens are actually available, and reports
|
||||
// true.
|
||||
func (tb *Bucket) TakeMaxDuration(count int64, maxWait time.Duration) (time.Duration, bool) {
|
||||
return tb.take(time.Now(), count, maxWait)
|
||||
}
|
||||
|
||||
// TakeAvailable takes up to count immediately available tokens from the
|
||||
// bucket. It returns the number of tokens removed, or zero if there are
|
||||
// no available tokens. It does not block.
|
||||
func (tb *Bucket) TakeAvailable(count int64) int64 {
|
||||
return tb.takeAvailable(time.Now(), count)
|
||||
}
|
||||
|
||||
// takeAvailable is the internal version of TakeAvailable - it takes the
|
||||
// current time as an argument to enable easy testing.
|
||||
func (tb *Bucket) takeAvailable(now time.Time, count int64) int64 {
|
||||
if count <= 0 {
|
||||
return 0
|
||||
}
|
||||
tb.mu.Lock()
|
||||
defer tb.mu.Unlock()
|
||||
|
||||
tb.adjust(now)
|
||||
if tb.avail <= 0 {
|
||||
return 0
|
||||
}
|
||||
if count > tb.avail {
|
||||
count = tb.avail
|
||||
}
|
||||
tb.avail -= count
|
||||
return count
|
||||
}
|
||||
|
||||
// Rate returns the fill rate of the bucket, in tokens per second.
|
||||
func (tb *Bucket) Rate() float64 {
|
||||
return 1e9 * float64(tb.quantum) / float64(tb.fillInterval)
|
||||
}
|
||||
|
||||
// take is the internal version of Take - it takes the current time as
|
||||
// an argument to enable easy testing.
|
||||
func (tb *Bucket) take(now time.Time, count int64, maxWait time.Duration) (time.Duration, bool) {
|
||||
if count <= 0 {
|
||||
return 0, true
|
||||
}
|
||||
tb.mu.Lock()
|
||||
defer tb.mu.Unlock()
|
||||
|
||||
currentTick := tb.adjust(now)
|
||||
avail := tb.avail - count
|
||||
if avail >= 0 {
|
||||
tb.avail = avail
|
||||
return 0, true
|
||||
}
|
||||
// Round up the missing tokens to the nearest multiple
|
||||
// of quantum - the tokens won't be available until
|
||||
// that tick.
|
||||
endTick := currentTick + (-avail+tb.quantum-1)/tb.quantum
|
||||
endTime := tb.startTime.Add(time.Duration(endTick) * tb.fillInterval)
|
||||
waitTime := endTime.Sub(now)
|
||||
if waitTime > maxWait {
|
||||
return 0, false
|
||||
}
|
||||
tb.avail = avail
|
||||
return waitTime, true
|
||||
}
|
||||
|
||||
// adjust adjusts the current bucket capacity based on the current time.
|
||||
// It returns the current tick.
|
||||
func (tb *Bucket) adjust(now time.Time) (currentTick int64) {
|
||||
currentTick = int64(now.Sub(tb.startTime) / tb.fillInterval)
|
||||
|
||||
if tb.avail >= tb.capacity {
|
||||
return
|
||||
}
|
||||
tb.avail += (currentTick - tb.availTick) * tb.quantum
|
||||
if tb.avail > tb.capacity {
|
||||
tb.avail = tb.capacity
|
||||
}
|
||||
tb.availTick = currentTick
|
||||
return
|
||||
}
|
||||
|
||||
func abs(f float64) float64 {
|
||||
if f < 0 {
|
||||
return -f
|
||||
}
|
||||
return f
|
||||
}
|
||||
328
Godeps/_workspace/src/github.com/juju/ratelimit/ratelimit_test.go
generated
vendored
Normal file
328
Godeps/_workspace/src/github.com/juju/ratelimit/ratelimit_test.go
generated
vendored
Normal file
@@ -0,0 +1,328 @@
|
||||
// Copyright 2014 Canonical Ltd.
|
||||
// Licensed under the LGPLv3 with static-linking exception.
|
||||
// See LICENCE file for details.
|
||||
|
||||
package ratelimit
|
||||
|
||||
import (
|
||||
gc "launchpad.net/gocheck"
|
||||
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestPackage(t *testing.T) {
|
||||
gc.TestingT(t)
|
||||
}
|
||||
|
||||
type rateLimitSuite struct{}
|
||||
|
||||
var _ = gc.Suite(rateLimitSuite{})
|
||||
|
||||
type takeReq struct {
|
||||
time time.Duration
|
||||
count int64
|
||||
expectWait time.Duration
|
||||
}
|
||||
|
||||
var takeTests = []struct {
|
||||
about string
|
||||
fillInterval time.Duration
|
||||
capacity int64
|
||||
reqs []takeReq
|
||||
}{{
|
||||
about: "serial requests",
|
||||
fillInterval: 250 * time.Millisecond,
|
||||
capacity: 10,
|
||||
reqs: []takeReq{{
|
||||
time: 0,
|
||||
count: 0,
|
||||
expectWait: 0,
|
||||
}, {
|
||||
time: 0,
|
||||
count: 10,
|
||||
expectWait: 0,
|
||||
}, {
|
||||
time: 0,
|
||||
count: 1,
|
||||
expectWait: 250 * time.Millisecond,
|
||||
}, {
|
||||
time: 250 * time.Millisecond,
|
||||
count: 1,
|
||||
expectWait: 250 * time.Millisecond,
|
||||
}},
|
||||
}, {
|
||||
about: "concurrent requests",
|
||||
fillInterval: 250 * time.Millisecond,
|
||||
capacity: 10,
|
||||
reqs: []takeReq{{
|
||||
time: 0,
|
||||
count: 10,
|
||||
expectWait: 0,
|
||||
}, {
|
||||
time: 0,
|
||||
count: 2,
|
||||
expectWait: 500 * time.Millisecond,
|
||||
}, {
|
||||
time: 0,
|
||||
count: 2,
|
||||
expectWait: 1000 * time.Millisecond,
|
||||
}, {
|
||||
time: 0,
|
||||
count: 1,
|
||||
expectWait: 1250 * time.Millisecond,
|
||||
}},
|
||||
}, {
|
||||
about: "more than capacity",
|
||||
fillInterval: 1 * time.Millisecond,
|
||||
capacity: 10,
|
||||
reqs: []takeReq{{
|
||||
time: 0,
|
||||
count: 10,
|
||||
expectWait: 0,
|
||||
}, {
|
||||
time: 20 * time.Millisecond,
|
||||
count: 15,
|
||||
expectWait: 5 * time.Millisecond,
|
||||
}},
|
||||
}, {
|
||||
about: "sub-quantum time",
|
||||
fillInterval: 10 * time.Millisecond,
|
||||
capacity: 10,
|
||||
reqs: []takeReq{{
|
||||
time: 0,
|
||||
count: 10,
|
||||
expectWait: 0,
|
||||
}, {
|
||||
time: 7 * time.Millisecond,
|
||||
count: 1,
|
||||
expectWait: 3 * time.Millisecond,
|
||||
}, {
|
||||
time: 8 * time.Millisecond,
|
||||
count: 1,
|
||||
expectWait: 12 * time.Millisecond,
|
||||
}},
|
||||
}, {
|
||||
about: "within capacity",
|
||||
fillInterval: 10 * time.Millisecond,
|
||||
capacity: 5,
|
||||
reqs: []takeReq{{
|
||||
time: 0,
|
||||
count: 5,
|
||||
expectWait: 0,
|
||||
}, {
|
||||
time: 60 * time.Millisecond,
|
||||
count: 5,
|
||||
expectWait: 0,
|
||||
}, {
|
||||
time: 60 * time.Millisecond,
|
||||
count: 1,
|
||||
expectWait: 10 * time.Millisecond,
|
||||
}, {
|
||||
time: 80 * time.Millisecond,
|
||||
count: 2,
|
||||
expectWait: 10 * time.Millisecond,
|
||||
}},
|
||||
}}
|
||||
|
||||
func (rateLimitSuite) TestTake(c *gc.C) {
|
||||
for i, test := range takeTests {
|
||||
tb := NewBucket(test.fillInterval, test.capacity)
|
||||
for j, req := range test.reqs {
|
||||
d, ok := tb.take(tb.startTime.Add(req.time), req.count, infinityDuration)
|
||||
c.Assert(ok, gc.Equals, true)
|
||||
if d != req.expectWait {
|
||||
c.Fatalf("test %d.%d, %s, got %v want %v", i, j, test.about, d, req.expectWait)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rateLimitSuite) TestTakeMaxDuration(c *gc.C) {
|
||||
for i, test := range takeTests {
|
||||
tb := NewBucket(test.fillInterval, test.capacity)
|
||||
for j, req := range test.reqs {
|
||||
if req.expectWait > 0 {
|
||||
d, ok := tb.take(tb.startTime.Add(req.time), req.count, req.expectWait-1)
|
||||
c.Assert(ok, gc.Equals, false)
|
||||
c.Assert(d, gc.Equals, time.Duration(0))
|
||||
}
|
||||
d, ok := tb.take(tb.startTime.Add(req.time), req.count, req.expectWait)
|
||||
c.Assert(ok, gc.Equals, true)
|
||||
if d != req.expectWait {
|
||||
c.Fatalf("test %d.%d, %s, got %v want %v", i, j, test.about, d, req.expectWait)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type takeAvailableReq struct {
|
||||
time time.Duration
|
||||
count int64
|
||||
expect int64
|
||||
}
|
||||
|
||||
var takeAvailableTests = []struct {
|
||||
about string
|
||||
fillInterval time.Duration
|
||||
capacity int64
|
||||
reqs []takeAvailableReq
|
||||
}{{
|
||||
about: "serial requests",
|
||||
fillInterval: 250 * time.Millisecond,
|
||||
capacity: 10,
|
||||
reqs: []takeAvailableReq{{
|
||||
time: 0,
|
||||
count: 0,
|
||||
expect: 0,
|
||||
}, {
|
||||
time: 0,
|
||||
count: 10,
|
||||
expect: 10,
|
||||
}, {
|
||||
time: 0,
|
||||
count: 1,
|
||||
expect: 0,
|
||||
}, {
|
||||
time: 250 * time.Millisecond,
|
||||
count: 1,
|
||||
expect: 1,
|
||||
}},
|
||||
}, {
|
||||
about: "concurrent requests",
|
||||
fillInterval: 250 * time.Millisecond,
|
||||
capacity: 10,
|
||||
reqs: []takeAvailableReq{{
|
||||
time: 0,
|
||||
count: 5,
|
||||
expect: 5,
|
||||
}, {
|
||||
time: 0,
|
||||
count: 2,
|
||||
expect: 2,
|
||||
}, {
|
||||
time: 0,
|
||||
count: 5,
|
||||
expect: 3,
|
||||
}, {
|
||||
time: 0,
|
||||
count: 1,
|
||||
expect: 0,
|
||||
}},
|
||||
}, {
|
||||
about: "more than capacity",
|
||||
fillInterval: 1 * time.Millisecond,
|
||||
capacity: 10,
|
||||
reqs: []takeAvailableReq{{
|
||||
time: 0,
|
||||
count: 10,
|
||||
expect: 10,
|
||||
}, {
|
||||
time: 20 * time.Millisecond,
|
||||
count: 15,
|
||||
expect: 10,
|
||||
}},
|
||||
}, {
|
||||
about: "within capacity",
|
||||
fillInterval: 10 * time.Millisecond,
|
||||
capacity: 5,
|
||||
reqs: []takeAvailableReq{{
|
||||
time: 0,
|
||||
count: 5,
|
||||
expect: 5,
|
||||
}, {
|
||||
time: 60 * time.Millisecond,
|
||||
count: 5,
|
||||
expect: 5,
|
||||
}, {
|
||||
time: 70 * time.Millisecond,
|
||||
count: 1,
|
||||
expect: 1,
|
||||
}},
|
||||
}}
|
||||
|
||||
func (rateLimitSuite) TestTakeAvailable(c *gc.C) {
|
||||
for i, test := range takeAvailableTests {
|
||||
tb := NewBucket(test.fillInterval, test.capacity)
|
||||
for j, req := range test.reqs {
|
||||
d := tb.takeAvailable(tb.startTime.Add(req.time), req.count)
|
||||
if d != req.expect {
|
||||
c.Fatalf("test %d.%d, %s, got %v want %v", i, j, test.about, d, req.expect)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rateLimitSuite) TestPanics(c *gc.C) {
|
||||
c.Assert(func() { NewBucket(0, 1) }, gc.PanicMatches, "token bucket fill interval is not > 0")
|
||||
c.Assert(func() { NewBucket(-2, 1) }, gc.PanicMatches, "token bucket fill interval is not > 0")
|
||||
c.Assert(func() { NewBucket(1, 0) }, gc.PanicMatches, "token bucket capacity is not > 0")
|
||||
c.Assert(func() { NewBucket(1, -2) }, gc.PanicMatches, "token bucket capacity is not > 0")
|
||||
}
|
||||
|
||||
func isCloseTo(x, y, tolerance float64) bool {
|
||||
return abs(x-y)/y < tolerance
|
||||
}
|
||||
|
||||
func (rateLimitSuite) TestRate(c *gc.C) {
|
||||
tb := NewBucket(1, 1)
|
||||
if !isCloseTo(tb.Rate(), 1e9, 0.00001) {
|
||||
c.Fatalf("got %v want 1e9", tb.Rate())
|
||||
}
|
||||
tb = NewBucket(2*time.Second, 1)
|
||||
if !isCloseTo(tb.Rate(), 0.5, 0.00001) {
|
||||
c.Fatalf("got %v want 0.5", tb.Rate())
|
||||
}
|
||||
tb = NewBucketWithQuantum(100*time.Millisecond, 1, 5)
|
||||
if !isCloseTo(tb.Rate(), 50, 0.00001) {
|
||||
c.Fatalf("got %v want 50", tb.Rate())
|
||||
}
|
||||
}
|
||||
|
||||
func checkRate(c *gc.C, rate float64) {
|
||||
tb := NewBucketWithRate(rate, 1<<62)
|
||||
if !isCloseTo(tb.Rate(), rate, rateMargin) {
|
||||
c.Fatalf("got %g want %v", tb.Rate(), rate)
|
||||
}
|
||||
d, ok := tb.take(tb.startTime, 1<<62, infinityDuration)
|
||||
c.Assert(ok, gc.Equals, true)
|
||||
c.Assert(d, gc.Equals, time.Duration(0))
|
||||
|
||||
// Check that the actual rate is as expected by
|
||||
// asking for a not-quite multiple of the bucket's
|
||||
// quantum and checking that the wait time
|
||||
// correct.
|
||||
d, ok = tb.take(tb.startTime, tb.quantum*2-tb.quantum/2, infinityDuration)
|
||||
c.Assert(ok, gc.Equals, true)
|
||||
expectTime := 1e9 * float64(tb.quantum) * 2 / rate
|
||||
if !isCloseTo(float64(d), expectTime, rateMargin) {
|
||||
c.Fatalf("rate %g: got %g want %v", rate, float64(d), expectTime)
|
||||
}
|
||||
}
|
||||
|
||||
func (rateLimitSuite) TestNewWithRate(c *gc.C) {
|
||||
for rate := float64(1); rate < 1e6; rate += 7 {
|
||||
checkRate(c, rate)
|
||||
}
|
||||
for _, rate := range []float64{
|
||||
1024 * 1024 * 1024,
|
||||
1e-5,
|
||||
0.9e-5,
|
||||
0.5,
|
||||
0.9,
|
||||
0.9e8,
|
||||
3e12,
|
||||
4e18,
|
||||
} {
|
||||
checkRate(c, rate)
|
||||
checkRate(c, rate/3)
|
||||
checkRate(c, rate*1.3)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkWait(b *testing.B) {
|
||||
tb := NewBucket(1, 16*1024)
|
||||
for i := b.N - 1; i >= 0; i-- {
|
||||
tb.Wait(1)
|
||||
}
|
||||
}
|
||||
51
Godeps/_workspace/src/github.com/juju/ratelimit/reader.go
generated
vendored
Normal file
51
Godeps/_workspace/src/github.com/juju/ratelimit/reader.go
generated
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
// Copyright 2014 Canonical Ltd.
|
||||
// Licensed under the LGPLv3 with static-linking exception.
|
||||
// See LICENCE file for details.
|
||||
|
||||
package ratelimit
|
||||
|
||||
import "io"
|
||||
|
||||
type reader struct {
|
||||
r io.Reader
|
||||
bucket *Bucket
|
||||
}
|
||||
|
||||
// Reader returns a reader that is rate limited by
|
||||
// the given token bucket. Each token in the bucket
|
||||
// represents one byte.
|
||||
func Reader(r io.Reader, bucket *Bucket) io.Reader {
|
||||
return &reader{
|
||||
r: r,
|
||||
bucket: bucket,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *reader) Read(buf []byte) (int, error) {
|
||||
n, err := r.r.Read(buf)
|
||||
if n <= 0 {
|
||||
return n, err
|
||||
}
|
||||
r.bucket.Wait(int64(n))
|
||||
return n, err
|
||||
}
|
||||
|
||||
type writer struct {
|
||||
w io.Writer
|
||||
bucket *Bucket
|
||||
}
|
||||
|
||||
// Writer returns a reader that is rate limited by
|
||||
// the given token bucket. Each token in the bucket
|
||||
// represents one byte.
|
||||
func Writer(w io.Writer, bucket *Bucket) io.Writer {
|
||||
return &writer{
|
||||
w: w,
|
||||
bucket: bucket,
|
||||
}
|
||||
}
|
||||
|
||||
func (w *writer) Write(buf []byte) (int, error) {
|
||||
w.bucket.Wait(int64(len(buf)))
|
||||
return w.w.Write(buf)
|
||||
}
|
||||
216
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go
generated
vendored
Normal file
216
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go
generated
vendored
Normal file
@@ -0,0 +1,216 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/memdb"
|
||||
)
|
||||
|
||||
var (
|
||||
errBatchTooShort = errors.New("leveldb: batch is too short")
|
||||
errBatchBadRecord = errors.New("leveldb: bad record in batch")
|
||||
)
|
||||
|
||||
const kBatchHdrLen = 8 + 4
|
||||
|
||||
type batchReplay interface {
|
||||
put(key, value []byte, seq uint64)
|
||||
delete(key []byte, seq uint64)
|
||||
}
|
||||
|
||||
// Batch is a write batch.
|
||||
type Batch struct {
|
||||
buf []byte
|
||||
rLen, bLen int
|
||||
seq uint64
|
||||
sync bool
|
||||
}
|
||||
|
||||
func (b *Batch) grow(n int) {
|
||||
off := len(b.buf)
|
||||
if off == 0 {
|
||||
// include headers
|
||||
off = kBatchHdrLen
|
||||
n += off
|
||||
}
|
||||
if cap(b.buf)-off >= n {
|
||||
return
|
||||
}
|
||||
buf := make([]byte, 2*cap(b.buf)+n)
|
||||
copy(buf, b.buf)
|
||||
b.buf = buf[:off]
|
||||
}
|
||||
|
||||
func (b *Batch) appendRec(t vType, key, value []byte) {
|
||||
n := 1 + binary.MaxVarintLen32 + len(key)
|
||||
if t == tVal {
|
||||
n += binary.MaxVarintLen32 + len(value)
|
||||
}
|
||||
b.grow(n)
|
||||
off := len(b.buf)
|
||||
buf := b.buf[:off+n]
|
||||
buf[off] = byte(t)
|
||||
off += 1
|
||||
off += binary.PutUvarint(buf[off:], uint64(len(key)))
|
||||
copy(buf[off:], key)
|
||||
off += len(key)
|
||||
if t == tVal {
|
||||
off += binary.PutUvarint(buf[off:], uint64(len(value)))
|
||||
copy(buf[off:], value)
|
||||
off += len(value)
|
||||
}
|
||||
b.buf = buf[:off]
|
||||
b.rLen++
|
||||
// Include 8-byte ikey header
|
||||
b.bLen += len(key) + len(value) + 8
|
||||
}
|
||||
|
||||
// Put appends 'put operation' of the given key/value pair to the batch.
|
||||
// It is safe to modify the contents of the argument after Put returns.
|
||||
func (b *Batch) Put(key, value []byte) {
|
||||
b.appendRec(tVal, key, value)
|
||||
}
|
||||
|
||||
// Delete appends 'delete operation' of the given key to the batch.
|
||||
// It is safe to modify the contents of the argument after Delete returns.
|
||||
func (b *Batch) Delete(key []byte) {
|
||||
b.appendRec(tDel, key, nil)
|
||||
}
|
||||
|
||||
// Reset resets the batch.
|
||||
func (b *Batch) Reset() {
|
||||
b.buf = nil
|
||||
b.seq = 0
|
||||
b.rLen = 0
|
||||
b.bLen = 0
|
||||
b.sync = false
|
||||
}
|
||||
|
||||
func (b *Batch) init(sync bool) {
|
||||
b.sync = sync
|
||||
}
|
||||
|
||||
func (b *Batch) put(key, value []byte, seq uint64) {
|
||||
if b.rLen == 0 {
|
||||
b.seq = seq
|
||||
}
|
||||
b.Put(key, value)
|
||||
}
|
||||
|
||||
func (b *Batch) delete(key []byte, seq uint64) {
|
||||
if b.rLen == 0 {
|
||||
b.seq = seq
|
||||
}
|
||||
b.Delete(key)
|
||||
}
|
||||
|
||||
func (b *Batch) append(p *Batch) {
|
||||
if p.rLen > 0 {
|
||||
b.grow(len(p.buf) - kBatchHdrLen)
|
||||
b.buf = append(b.buf, p.buf[kBatchHdrLen:]...)
|
||||
b.rLen += p.rLen
|
||||
}
|
||||
if p.sync {
|
||||
b.sync = true
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Batch) len() int {
|
||||
return b.rLen
|
||||
}
|
||||
|
||||
func (b *Batch) size() int {
|
||||
return b.bLen
|
||||
}
|
||||
|
||||
func (b *Batch) encode() []byte {
|
||||
b.grow(0)
|
||||
binary.LittleEndian.PutUint64(b.buf, b.seq)
|
||||
binary.LittleEndian.PutUint32(b.buf[8:], uint32(b.rLen))
|
||||
|
||||
return b.buf
|
||||
}
|
||||
|
||||
func (b *Batch) decode(buf []byte) error {
|
||||
if len(buf) < kBatchHdrLen {
|
||||
return errBatchTooShort
|
||||
}
|
||||
|
||||
b.seq = binary.LittleEndian.Uint64(buf)
|
||||
b.rLen = int(binary.LittleEndian.Uint32(buf[8:]))
|
||||
// No need to be precise at this point, it won't be used anyway
|
||||
b.bLen = len(buf) - kBatchHdrLen
|
||||
b.buf = buf
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Batch) decodeRec(f func(i int, t vType, key, value []byte)) error {
|
||||
off := kBatchHdrLen
|
||||
for i := 0; i < b.rLen; i++ {
|
||||
if off >= len(b.buf) {
|
||||
return errors.New("leveldb: invalid batch record length")
|
||||
}
|
||||
|
||||
t := vType(b.buf[off])
|
||||
if t > tVal {
|
||||
return errors.New("leveldb: invalid batch record type in batch")
|
||||
}
|
||||
off += 1
|
||||
|
||||
x, n := binary.Uvarint(b.buf[off:])
|
||||
off += n
|
||||
if n <= 0 || off+int(x) > len(b.buf) {
|
||||
return errBatchBadRecord
|
||||
}
|
||||
key := b.buf[off : off+int(x)]
|
||||
off += int(x)
|
||||
|
||||
var value []byte
|
||||
if t == tVal {
|
||||
x, n := binary.Uvarint(b.buf[off:])
|
||||
off += n
|
||||
if n <= 0 || off+int(x) > len(b.buf) {
|
||||
return errBatchBadRecord
|
||||
}
|
||||
value = b.buf[off : off+int(x)]
|
||||
off += int(x)
|
||||
}
|
||||
|
||||
f(i, t, key, value)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Batch) replay(to batchReplay) error {
|
||||
return b.decodeRec(func(i int, t vType, key, value []byte) {
|
||||
switch t {
|
||||
case tVal:
|
||||
to.put(key, value, b.seq+uint64(i))
|
||||
case tDel:
|
||||
to.delete(key, b.seq+uint64(i))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (b *Batch) memReplay(to *memdb.DB) error {
|
||||
return b.decodeRec(func(i int, t vType, key, value []byte) {
|
||||
ikey := newIKey(key, b.seq+uint64(i), t)
|
||||
to.Put(ikey, value)
|
||||
})
|
||||
}
|
||||
|
||||
func (b *Batch) revertMemReplay(to *memdb.DB) error {
|
||||
return b.decodeRec(func(i int, t vType, key, value []byte) {
|
||||
ikey := newIKey(key, b.seq+uint64(i), t)
|
||||
to.Delete(ikey)
|
||||
})
|
||||
}
|
||||
120
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go
generated
vendored
Normal file
120
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go
generated
vendored
Normal file
@@ -0,0 +1,120 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/comparer"
|
||||
"github.com/syndtr/goleveldb/leveldb/memdb"
|
||||
)
|
||||
|
||||
type tbRec struct {
|
||||
t vType
|
||||
key, value []byte
|
||||
}
|
||||
|
||||
type testBatch struct {
|
||||
rec []*tbRec
|
||||
}
|
||||
|
||||
func (p *testBatch) put(key, value []byte, seq uint64) {
|
||||
p.rec = append(p.rec, &tbRec{tVal, key, value})
|
||||
}
|
||||
|
||||
func (p *testBatch) delete(key []byte, seq uint64) {
|
||||
p.rec = append(p.rec, &tbRec{tDel, key, nil})
|
||||
}
|
||||
|
||||
func compareBatch(t *testing.T, b1, b2 *Batch) {
|
||||
if b1.seq != b2.seq {
|
||||
t.Errorf("invalid seq number want %d, got %d", b1.seq, b2.seq)
|
||||
}
|
||||
if b1.len() != b2.len() {
|
||||
t.Fatalf("invalid record length want %d, got %d", b1.len(), b2.len())
|
||||
}
|
||||
p1, p2 := new(testBatch), new(testBatch)
|
||||
err := b1.replay(p1)
|
||||
if err != nil {
|
||||
t.Fatal("error when replaying batch 1: ", err)
|
||||
}
|
||||
err = b2.replay(p2)
|
||||
if err != nil {
|
||||
t.Fatal("error when replaying batch 2: ", err)
|
||||
}
|
||||
for i := range p1.rec {
|
||||
r1, r2 := p1.rec[i], p2.rec[i]
|
||||
if r1.t != r2.t {
|
||||
t.Errorf("invalid type on record '%d' want %d, got %d", i, r1.t, r2.t)
|
||||
}
|
||||
if !bytes.Equal(r1.key, r2.key) {
|
||||
t.Errorf("invalid key on record '%d' want %s, got %s", i, string(r1.key), string(r2.key))
|
||||
}
|
||||
if r1.t == tVal {
|
||||
if !bytes.Equal(r1.value, r2.value) {
|
||||
t.Errorf("invalid value on record '%d' want %s, got %s", i, string(r1.value), string(r2.value))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBatch_EncodeDecode(t *testing.T) {
|
||||
b1 := new(Batch)
|
||||
b1.seq = 10009
|
||||
b1.Put([]byte("key1"), []byte("value1"))
|
||||
b1.Put([]byte("key2"), []byte("value2"))
|
||||
b1.Delete([]byte("key1"))
|
||||
b1.Put([]byte("k"), []byte(""))
|
||||
b1.Put([]byte("zzzzzzzzzzz"), []byte("zzzzzzzzzzzzzzzzzzzzzzzz"))
|
||||
b1.Delete([]byte("key10000"))
|
||||
b1.Delete([]byte("k"))
|
||||
buf := b1.encode()
|
||||
b2 := new(Batch)
|
||||
err := b2.decode(buf)
|
||||
if err != nil {
|
||||
t.Error("error when decoding batch: ", err)
|
||||
}
|
||||
compareBatch(t, b1, b2)
|
||||
}
|
||||
|
||||
func TestBatch_Append(t *testing.T) {
|
||||
b1 := new(Batch)
|
||||
b1.seq = 10009
|
||||
b1.Put([]byte("key1"), []byte("value1"))
|
||||
b1.Put([]byte("key2"), []byte("value2"))
|
||||
b1.Delete([]byte("key1"))
|
||||
b1.Put([]byte("foo"), []byte("foovalue"))
|
||||
b1.Put([]byte("bar"), []byte("barvalue"))
|
||||
b2a := new(Batch)
|
||||
b2a.seq = 10009
|
||||
b2a.Put([]byte("key1"), []byte("value1"))
|
||||
b2a.Put([]byte("key2"), []byte("value2"))
|
||||
b2a.Delete([]byte("key1"))
|
||||
b2b := new(Batch)
|
||||
b2b.Put([]byte("foo"), []byte("foovalue"))
|
||||
b2b.Put([]byte("bar"), []byte("barvalue"))
|
||||
b2a.append(b2b)
|
||||
compareBatch(t, b1, b2a)
|
||||
}
|
||||
|
||||
func TestBatch_Size(t *testing.T) {
|
||||
b := new(Batch)
|
||||
for i := 0; i < 2; i++ {
|
||||
b.Put([]byte("key1"), []byte("value1"))
|
||||
b.Put([]byte("key2"), []byte("value2"))
|
||||
b.Delete([]byte("key1"))
|
||||
b.Put([]byte("foo"), []byte("foovalue"))
|
||||
b.Put([]byte("bar"), []byte("barvalue"))
|
||||
mem := memdb.New(&iComparer{comparer.DefaultComparer}, 0)
|
||||
b.memReplay(mem)
|
||||
if b.size() != mem.Size() {
|
||||
t.Errorf("invalid batch size calculation, want=%d got=%d", mem.Size(), b.size())
|
||||
}
|
||||
b.Reset()
|
||||
}
|
||||
}
|
||||
464
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go
generated
vendored
Normal file
464
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go
generated
vendored
Normal file
@@ -0,0 +1,464 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/iterator"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||
)
|
||||
|
||||
func randomString(r *rand.Rand, n int) []byte {
|
||||
b := new(bytes.Buffer)
|
||||
for i := 0; i < n; i++ {
|
||||
b.WriteByte(' ' + byte(r.Intn(95)))
|
||||
}
|
||||
return b.Bytes()
|
||||
}
|
||||
|
||||
func compressibleStr(r *rand.Rand, frac float32, n int) []byte {
|
||||
nn := int(float32(n) * frac)
|
||||
rb := randomString(r, nn)
|
||||
b := make([]byte, 0, n+nn)
|
||||
for len(b) < n {
|
||||
b = append(b, rb...)
|
||||
}
|
||||
return b[:n]
|
||||
}
|
||||
|
||||
type valueGen struct {
|
||||
src []byte
|
||||
pos int
|
||||
}
|
||||
|
||||
func newValueGen(frac float32) *valueGen {
|
||||
v := new(valueGen)
|
||||
r := rand.New(rand.NewSource(301))
|
||||
v.src = make([]byte, 0, 1048576+100)
|
||||
for len(v.src) < 1048576 {
|
||||
v.src = append(v.src, compressibleStr(r, frac, 100)...)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func (v *valueGen) get(n int) []byte {
|
||||
if v.pos+n > len(v.src) {
|
||||
v.pos = 0
|
||||
}
|
||||
v.pos += n
|
||||
return v.src[v.pos-n : v.pos]
|
||||
}
|
||||
|
||||
var benchDB = filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbbench-%d", os.Getuid()))
|
||||
|
||||
type dbBench struct {
|
||||
b *testing.B
|
||||
stor storage.Storage
|
||||
db *DB
|
||||
|
||||
o *opt.Options
|
||||
ro *opt.ReadOptions
|
||||
wo *opt.WriteOptions
|
||||
|
||||
keys, values [][]byte
|
||||
}
|
||||
|
||||
func openDBBench(b *testing.B, noCompress bool) *dbBench {
|
||||
_, err := os.Stat(benchDB)
|
||||
if err == nil {
|
||||
err = os.RemoveAll(benchDB)
|
||||
if err != nil {
|
||||
b.Fatal("cannot remove old db: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
p := &dbBench{
|
||||
b: b,
|
||||
o: &opt.Options{},
|
||||
ro: &opt.ReadOptions{},
|
||||
wo: &opt.WriteOptions{},
|
||||
}
|
||||
p.stor, err = storage.OpenFile(benchDB)
|
||||
if err != nil {
|
||||
b.Fatal("cannot open stor: ", err)
|
||||
}
|
||||
if noCompress {
|
||||
p.o.Compression = opt.NoCompression
|
||||
}
|
||||
|
||||
p.db, err = Open(p.stor, p.o)
|
||||
if err != nil {
|
||||
b.Fatal("cannot open db: ", err)
|
||||
}
|
||||
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *dbBench) reopen() {
|
||||
p.db.Close()
|
||||
var err error
|
||||
p.db, err = Open(p.stor, p.o)
|
||||
if err != nil {
|
||||
p.b.Fatal("Reopen: got error: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *dbBench) populate(n int) {
|
||||
p.keys, p.values = make([][]byte, n), make([][]byte, n)
|
||||
v := newValueGen(0.5)
|
||||
for i := range p.keys {
|
||||
p.keys[i], p.values[i] = []byte(fmt.Sprintf("%016d", i)), v.get(100)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *dbBench) randomize() {
|
||||
m := len(p.keys)
|
||||
times := m * 2
|
||||
r1, r2 := rand.New(rand.NewSource(0xdeadbeef)), rand.New(rand.NewSource(0xbeefface))
|
||||
for n := 0; n < times; n++ {
|
||||
i, j := r1.Int()%m, r2.Int()%m
|
||||
if i == j {
|
||||
continue
|
||||
}
|
||||
p.keys[i], p.keys[j] = p.keys[j], p.keys[i]
|
||||
p.values[i], p.values[j] = p.values[j], p.values[i]
|
||||
}
|
||||
}
|
||||
|
||||
func (p *dbBench) writes(perBatch int) {
|
||||
b := p.b
|
||||
db := p.db
|
||||
|
||||
n := len(p.keys)
|
||||
m := n / perBatch
|
||||
if n%perBatch > 0 {
|
||||
m++
|
||||
}
|
||||
batches := make([]Batch, m)
|
||||
j := 0
|
||||
for i := range batches {
|
||||
first := true
|
||||
for ; j < n && ((j+1)%perBatch != 0 || first); j++ {
|
||||
first = false
|
||||
batches[i].Put(p.keys[j], p.values[j])
|
||||
}
|
||||
}
|
||||
runtime.GC()
|
||||
|
||||
b.ResetTimer()
|
||||
b.StartTimer()
|
||||
for i := range batches {
|
||||
err := db.Write(&(batches[i]), p.wo)
|
||||
if err != nil {
|
||||
b.Fatal("write failed: ", err)
|
||||
}
|
||||
}
|
||||
b.StopTimer()
|
||||
b.SetBytes(116)
|
||||
}
|
||||
|
||||
func (p *dbBench) gc() {
|
||||
p.keys, p.values = nil, nil
|
||||
runtime.GC()
|
||||
}
|
||||
|
||||
func (p *dbBench) puts() {
|
||||
b := p.b
|
||||
db := p.db
|
||||
|
||||
b.ResetTimer()
|
||||
b.StartTimer()
|
||||
for i := range p.keys {
|
||||
err := db.Put(p.keys[i], p.values[i], p.wo)
|
||||
if err != nil {
|
||||
b.Fatal("put failed: ", err)
|
||||
}
|
||||
}
|
||||
b.StopTimer()
|
||||
b.SetBytes(116)
|
||||
}
|
||||
|
||||
func (p *dbBench) fill() {
|
||||
b := p.b
|
||||
db := p.db
|
||||
|
||||
perBatch := 10000
|
||||
batch := new(Batch)
|
||||
for i, n := 0, len(p.keys); i < n; {
|
||||
first := true
|
||||
for ; i < n && ((i+1)%perBatch != 0 || first); i++ {
|
||||
first = false
|
||||
batch.Put(p.keys[i], p.values[i])
|
||||
}
|
||||
err := db.Write(batch, p.wo)
|
||||
if err != nil {
|
||||
b.Fatal("write failed: ", err)
|
||||
}
|
||||
batch.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
func (p *dbBench) gets() {
|
||||
b := p.b
|
||||
db := p.db
|
||||
|
||||
b.ResetTimer()
|
||||
for i := range p.keys {
|
||||
_, err := db.Get(p.keys[i], p.ro)
|
||||
if err != nil {
|
||||
b.Error("got error: ", err)
|
||||
}
|
||||
}
|
||||
b.StopTimer()
|
||||
}
|
||||
|
||||
func (p *dbBench) seeks() {
|
||||
b := p.b
|
||||
|
||||
iter := p.newIter()
|
||||
defer iter.Release()
|
||||
b.ResetTimer()
|
||||
for i := range p.keys {
|
||||
if !iter.Seek(p.keys[i]) {
|
||||
b.Error("value not found for: ", string(p.keys[i]))
|
||||
}
|
||||
}
|
||||
b.StopTimer()
|
||||
}
|
||||
|
||||
func (p *dbBench) newIter() iterator.Iterator {
|
||||
iter := p.db.NewIterator(nil, p.ro)
|
||||
err := iter.Error()
|
||||
if err != nil {
|
||||
p.b.Fatal("cannot create iterator: ", err)
|
||||
}
|
||||
return iter
|
||||
}
|
||||
|
||||
func (p *dbBench) close() {
|
||||
if bp, err := p.db.GetProperty("leveldb.blockpool"); err == nil {
|
||||
p.b.Log("Block pool stats: ", bp)
|
||||
}
|
||||
p.db.Close()
|
||||
p.stor.Close()
|
||||
os.RemoveAll(benchDB)
|
||||
p.db = nil
|
||||
p.keys = nil
|
||||
p.values = nil
|
||||
runtime.GC()
|
||||
runtime.GOMAXPROCS(1)
|
||||
}
|
||||
|
||||
func BenchmarkDBWrite(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.writes(1)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBWriteBatch(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.writes(1000)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBWriteUncompressed(b *testing.B) {
|
||||
p := openDBBench(b, true)
|
||||
p.populate(b.N)
|
||||
p.writes(1)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBWriteBatchUncompressed(b *testing.B) {
|
||||
p := openDBBench(b, true)
|
||||
p.populate(b.N)
|
||||
p.writes(1000)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBWriteRandom(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.randomize()
|
||||
p.writes(1)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBWriteRandomSync(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.wo.Sync = true
|
||||
p.populate(b.N)
|
||||
p.writes(1)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBOverwrite(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.writes(1)
|
||||
p.writes(1)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBOverwriteRandom(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.writes(1)
|
||||
p.randomize()
|
||||
p.writes(1)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBPut(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.puts()
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBRead(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.gc()
|
||||
|
||||
iter := p.newIter()
|
||||
b.ResetTimer()
|
||||
for iter.Next() {
|
||||
}
|
||||
iter.Release()
|
||||
b.StopTimer()
|
||||
b.SetBytes(116)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBReadGC(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
|
||||
iter := p.newIter()
|
||||
b.ResetTimer()
|
||||
for iter.Next() {
|
||||
}
|
||||
iter.Release()
|
||||
b.StopTimer()
|
||||
b.SetBytes(116)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBReadUncompressed(b *testing.B) {
|
||||
p := openDBBench(b, true)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.gc()
|
||||
|
||||
iter := p.newIter()
|
||||
b.ResetTimer()
|
||||
for iter.Next() {
|
||||
}
|
||||
iter.Release()
|
||||
b.StopTimer()
|
||||
b.SetBytes(116)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBReadTable(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.reopen()
|
||||
p.gc()
|
||||
|
||||
iter := p.newIter()
|
||||
b.ResetTimer()
|
||||
for iter.Next() {
|
||||
}
|
||||
iter.Release()
|
||||
b.StopTimer()
|
||||
b.SetBytes(116)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBReadReverse(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.gc()
|
||||
|
||||
iter := p.newIter()
|
||||
b.ResetTimer()
|
||||
iter.Last()
|
||||
for iter.Prev() {
|
||||
}
|
||||
iter.Release()
|
||||
b.StopTimer()
|
||||
b.SetBytes(116)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBReadReverseTable(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.reopen()
|
||||
p.gc()
|
||||
|
||||
iter := p.newIter()
|
||||
b.ResetTimer()
|
||||
iter.Last()
|
||||
for iter.Prev() {
|
||||
}
|
||||
iter.Release()
|
||||
b.StopTimer()
|
||||
b.SetBytes(116)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBSeek(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.seeks()
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBSeekRandom(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.randomize()
|
||||
p.seeks()
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBGet(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.gets()
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBGetRandom(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.randomize()
|
||||
p.gets()
|
||||
p.close()
|
||||
}
|
||||
159
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go
generated
vendored
Normal file
159
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go
generated
vendored
Normal file
@@ -0,0 +1,159 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Package cache provides interface and implementation of a cache algorithms.
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// SetFunc is the function that will be called by Namespace.Get to create
|
||||
// a cache object, if charge is less than one than the cache object will
|
||||
// not be registered to cache tree, if value is nil then the cache object
|
||||
// will not be created.
|
||||
type SetFunc func() (charge int, value interface{})
|
||||
|
||||
// DelFin is the function that will be called as the result of a delete operation.
|
||||
// Exist == true is indication that the object is exist, and pending == true is
|
||||
// indication of deletion already happen but haven't done yet (wait for all handles
|
||||
// to be released). And exist == false means the object doesn't exist.
|
||||
type DelFin func(exist, pending bool)
|
||||
|
||||
// PurgeFin is the function that will be called as the result of a purge operation.
|
||||
type PurgeFin func(ns, key uint64)
|
||||
|
||||
// Cache is a cache tree. A cache instance must be goroutine-safe.
|
||||
type Cache interface {
|
||||
// SetCapacity sets cache tree capacity.
|
||||
SetCapacity(capacity int)
|
||||
|
||||
// Capacity returns cache tree capacity.
|
||||
Capacity() int
|
||||
|
||||
// Used returns used cache tree capacity.
|
||||
Used() int
|
||||
|
||||
// Size returns entire alive cache objects size.
|
||||
Size() int
|
||||
|
||||
// NumObjects returns number of alive objects.
|
||||
NumObjects() int
|
||||
|
||||
// GetNamespace gets cache namespace with the given id.
|
||||
// GetNamespace is never return nil.
|
||||
GetNamespace(id uint64) Namespace
|
||||
|
||||
// PurgeNamespace purges cache namespace with the given id from this cache tree.
|
||||
// Also read Namespace.Purge.
|
||||
PurgeNamespace(id uint64, fin PurgeFin)
|
||||
|
||||
// ZapNamespace detaches cache namespace with the given id from this cache tree.
|
||||
// Also read Namespace.Zap.
|
||||
ZapNamespace(id uint64)
|
||||
|
||||
// Purge purges all cache namespace from this cache tree.
|
||||
// This is behave the same as calling Namespace.Purge method on all cache namespace.
|
||||
Purge(fin PurgeFin)
|
||||
|
||||
// Zap detaches all cache namespace from this cache tree.
|
||||
// This is behave the same as calling Namespace.Zap method on all cache namespace.
|
||||
Zap()
|
||||
}
|
||||
|
||||
// Namespace is a cache namespace. A namespace instance must be goroutine-safe.
|
||||
type Namespace interface {
|
||||
// Get gets cache object with the given key.
|
||||
// If cache object is not found and setf is not nil, Get will atomically creates
|
||||
// the cache object by calling setf. Otherwise Get will returns nil.
|
||||
//
|
||||
// The returned cache handle should be released after use by calling Release
|
||||
// method.
|
||||
Get(key uint64, setf SetFunc) Handle
|
||||
|
||||
// Delete removes cache object with the given key from cache tree.
|
||||
// A deleted cache object will be released as soon as all of its handles have
|
||||
// been released.
|
||||
// Delete only happen once, subsequent delete will consider cache object doesn't
|
||||
// exist, even if the cache object ins't released yet.
|
||||
//
|
||||
// If not nil, fin will be called if the cache object doesn't exist or when
|
||||
// finally be released.
|
||||
//
|
||||
// Delete returns true if such cache object exist and never been deleted.
|
||||
Delete(key uint64, fin DelFin) bool
|
||||
|
||||
// Purge removes all cache objects within this namespace from cache tree.
|
||||
// This is the same as doing delete on all cache objects.
|
||||
//
|
||||
// If not nil, fin will be called on all cache objects when its finally be
|
||||
// released.
|
||||
Purge(fin PurgeFin)
|
||||
|
||||
// Zap detaches namespace from cache tree and release all its cache objects.
|
||||
// A zapped namespace can never be filled again.
|
||||
// Calling Get on zapped namespace will always return nil.
|
||||
Zap()
|
||||
}
|
||||
|
||||
// Handle is a cache handle.
|
||||
type Handle interface {
|
||||
// Release releases this cache handle. This method can be safely called mutiple
|
||||
// times.
|
||||
Release()
|
||||
|
||||
// Value returns value of this cache handle.
|
||||
// Value will returns nil after this cache handle have be released.
|
||||
Value() interface{}
|
||||
}
|
||||
|
||||
const (
|
||||
DelNotExist = iota
|
||||
DelExist
|
||||
DelPendig
|
||||
)
|
||||
|
||||
// Namespace state.
|
||||
type nsState int
|
||||
|
||||
const (
|
||||
nsEffective nsState = iota
|
||||
nsZapped
|
||||
)
|
||||
|
||||
// Node state.
|
||||
type nodeState int
|
||||
|
||||
const (
|
||||
nodeZero nodeState = iota
|
||||
nodeEffective
|
||||
nodeEvicted
|
||||
nodeDeleted
|
||||
)
|
||||
|
||||
// Fake handle.
|
||||
type fakeHandle struct {
|
||||
value interface{}
|
||||
fin func()
|
||||
once uint32
|
||||
}
|
||||
|
||||
func (h *fakeHandle) Value() interface{} {
|
||||
if atomic.LoadUint32(&h.once) == 0 {
|
||||
return h.value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *fakeHandle) Release() {
|
||||
if !atomic.CompareAndSwapUint32(&h.once, 0, 1) {
|
||||
return
|
||||
}
|
||||
if h.fin != nil {
|
||||
h.fin()
|
||||
h.fin = nil
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user