mirror of
https://github.com/syncthing/syncthing.git
synced 2026-01-04 11:59:12 -05:00
Compare commits
20 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c294d5f087 | ||
|
|
10ead2e61f | ||
|
|
960b40fa89 | ||
|
|
afad329e99 | ||
|
|
4025284fba | ||
|
|
a595e814dd | ||
|
|
963d8121d9 | ||
|
|
03019988b1 | ||
|
|
97115afa32 | ||
|
|
c9f5bae177 | ||
|
|
2bd11ca4e3 | ||
|
|
a5de1acb46 | ||
|
|
5581751e9d | ||
|
|
055ae92273 | ||
|
|
dea7c77055 | ||
|
|
765dda6ad7 | ||
|
|
28702a1c9d | ||
|
|
40d1226612 | ||
|
|
effe8ce8a9 | ||
|
|
4c3ba24826 |
8
.gitattributes
vendored
8
.gitattributes
vendored
@@ -1,8 +0,0 @@
|
||||
# Text files use LF line endings in this repository
|
||||
* text=auto
|
||||
|
||||
# Except the dependencies, which we leave alone
|
||||
vendor/** -text=auto
|
||||
|
||||
# Diffs on these files are meaningless
|
||||
*.svg -diff
|
||||
15
.gitignore
vendored
15
.gitignore
vendored
@@ -1,19 +1,18 @@
|
||||
/syncthing
|
||||
/stdiscosrv
|
||||
./syncthing
|
||||
syncthing.exe
|
||||
stdiscosrv.exe
|
||||
*.tar.gz
|
||||
*.zip
|
||||
*.asc
|
||||
*.deb
|
||||
*.sublime*
|
||||
.idea/
|
||||
.jshintrc
|
||||
coverage.out
|
||||
files/pidx
|
||||
bin
|
||||
perfstats*.csv
|
||||
coverage.xml
|
||||
syncthing.sig
|
||||
!gui/scripts/syncthing
|
||||
.DS_Store
|
||||
syncthing.md5
|
||||
syncthing.exe.md5
|
||||
RELEASE
|
||||
deb
|
||||
lib/auto/gui.files.go
|
||||
snapcraft.yaml
|
||||
|
||||
168
AUTHORS
168
AUTHORS
@@ -1,122 +1,50 @@
|
||||
# This is the official list of Syncthing authors for copyright purposes.
|
||||
# The format is:
|
||||
#
|
||||
# Name Name Name (nickname) <email1@example.com> <email2@example.com>
|
||||
#
|
||||
# The NICKS list is auto generated from this file.
|
||||
|
||||
Aaron Bieber (qbit) <qbit@deftly.net>
|
||||
Adam Piggott (ProactiveServices) <aD@simplypeachy.co.uk> <simplypeachy@users.noreply.github.com> <ProactiveServices@users.noreply.github.com>
|
||||
Adel Qalieh (adelq) <aqalieh95@gmail.com> <adelq@users.noreply.github.com>
|
||||
Alessandro G. (alessandro.g89) <alessandro.g89@gmail.com>
|
||||
Alexander Graf (alex2108) <register-github@alex-graf.de>
|
||||
Alexandre Viau (aviau) <alexandre@alexandreviau.net> <aviau@debian.org>
|
||||
Anderson Mesquita (andersonvom) <andersonvom@gmail.com>
|
||||
Andrew Dunham (andrew-d) <andrew@du.nham.ca>
|
||||
Andrey D (scienmind) <scintertech@cryptolab.net>
|
||||
Antoine Lamielle (0x010C) <antoine.lamielle@0x010c.fr> <gh@0x010c.fr>
|
||||
Antony Male (canton7) <antony.male@gmail.com>
|
||||
Arthur Axel fREW Schmidt (frioux) <frew@afoolishmanifesto.com> <frioux@gmail.com>
|
||||
Audrius Butkevicius (AudriusButkevicius) <audrius.butkevicius@gmail.com>
|
||||
Bart De Vries (mogwa1) <devriesb@gmail.com>
|
||||
Ben Curthoys (bencurthoys) <ben@bencurthoys.com>
|
||||
Ben Schulz (uok) <ueomkail@gmail.com> <uok@users.noreply.github.com>
|
||||
Ben Shepherd (benshep) <bjashepherd@gmail.com>
|
||||
Ben Sidhom (bsidhom) <bsidhom@gmail.com>
|
||||
Benny Ng (tpng) <benny.tpng@gmail.com>
|
||||
Brandon Philips (philips) <brandon@ifup.org>
|
||||
Brendan Long (brendanlong) <self@brendanlong.com>
|
||||
Brian R. Becker (brbecker) <brbecker@gmail.com>
|
||||
Caleb Callaway (cqcallaw) <enlightened.despot@gmail.com>
|
||||
Carsten Hagemann (Moter8) <moter8@gmail.com>
|
||||
Cathryne Linenweaver (Cathryne) <cathryne.linenweaver@gmail.com> <Cathryne@users.noreply.github.com>
|
||||
Cedric Staniewski (xduugu) <cedric@gmx.ca>
|
||||
Chris Howie (cdhowie) <me@chrishowie.com>
|
||||
Chris Joel (cdata) <chris@scriptolo.gy>
|
||||
Colin Kennedy (moshen) <moshen.colin@gmail.com>
|
||||
Daniel Bergmann (brgmnn) <dan.arne.bergmann@gmail.com> <brgmnn@users.noreply.github.com>
|
||||
Daniel Harte (norgeous) <daniel@harte.me> <daniel@danielharte.co.uk> <norgeous@users.noreply.github.com>
|
||||
Daniel Martí (mvdan) <mvdan@mvdan.cc>
|
||||
Darshil Chanpura (dtchanpura) <dtchanpura@gmail.com> <dcprime314@gmail.com>
|
||||
David Rimmer (dinosore) <dinosore@dbrsoftware.co.uk>
|
||||
Denis A. (dva) <denisva@gmail.com>
|
||||
Dennis Wilson (snnd) <dw@risu.io>
|
||||
Dominik Heidler (asdil12) <dominik@heidler.eu>
|
||||
Elias Jarlebring (jarlebring) <jarlebring@gmail.com>
|
||||
Emil Hessman (ceh) <emil@hessman.se>
|
||||
Erik Meitner (WSGCSysadmin) <e.meitner@willystreet.coop>
|
||||
Federico Castagnini (facastagnini) <federico.castagnini@gmail.com>
|
||||
Felix Ableitner (Nutomic) <me@nutomic.com>
|
||||
Felix Unterpaintner (bigbear2nd) <bigbear2nd@gmail.com>
|
||||
Francois-Xavier Gsell (zukoo) <fxgsell@gmail.com>
|
||||
Frank Isemann (fti7) <frank@isemann.name>
|
||||
Gilli Sigurdsson (gillisig) <gilli@vx.is>
|
||||
Heiko Zuerker (Smiley73) <heiko@zuerker.org>
|
||||
Jaakko Hannikainen (jgke) <jgke@jgke.fi>
|
||||
Jacek Szafarkiewicz (hadogenes) <szafar@linux.pl>
|
||||
Jake Peterson (acogdev) <jake@acogdev.com>
|
||||
Jakob Borg (calmh) <jakob@nym.se> <jakob@kastelo.net>
|
||||
James Patterson (jpjp) <jamespatterson@operamail.com> <jpjp@users.noreply.github.com>
|
||||
Jaroslav Malec (dzarda) <dzardacz@gmail.com>
|
||||
Jaya Chithra (jayachithra) <s.k.jayachithra@gmail.com>
|
||||
Jens Diemer (jedie) <github.com@jensdiemer.de> <git@jensdiemer.de>
|
||||
Jochen Voss (seehuhn) <voss@seehuhn.de>
|
||||
Johan Vromans (sciurius) <jvromans@squirrel.nl>
|
||||
Jose Manuel Delicado (jmdaweb) <jmdaweb@hotmail.com> <jmdaweb@users.noreply.github.com>
|
||||
Karol Różycki (krozycki) <rozycki.karol@gmail.com>
|
||||
Kelong Cong (kc1212) <kc04bc@gmx.com> <kc1212@users.noreply.github.com>
|
||||
Ken'ichi Kamada (kamadak) <kamada@nanohz.org>
|
||||
Kevin Allen (ironmig) <kma1660@gmail.com>
|
||||
Kevin White, Jr. (kwhite17) <kevinwhite1710@gmail.com>
|
||||
Kurt Fitzner (Kudalufi) <kurt@va1der.ca> <kurt.fitzner@gmail.com>
|
||||
Lars K.W. Gohlke (lkwg82) <lkwg82@gmx.de>
|
||||
Laurent Etiemble (letiemble) <laurent.etiemble@gmail.com> <laurent.etiemble@monobjc.net>
|
||||
Leo Arias (elopio) <yo@elopio.net>
|
||||
Liu Siyuan (liusy182) <liusy182@gmail.com> <liusy182@hotmail.com>
|
||||
Lode Hoste (Zillode) <zillode@zillode.be>
|
||||
Lord Landon Agahnim (LordLandon) <lordlandon@gmail.com>
|
||||
Majed Abdulaziz (majedev) <majed.alhajry@gmail.com>
|
||||
Marc Laporte (marclaporte) <marc@marclaporte.com> <marc@laporte.name>
|
||||
Marc Pujol (kilburn) <kilburn@la3.org>
|
||||
Marcin Dziadus (marcindziadus) <dziadus.marcin@gmail.com>
|
||||
Mark Pulford (mpx) <mark@kyne.com.au>
|
||||
Mateusz Naściszewski (mateon1) <matin1111@wp.pl>
|
||||
Matt Burke (burkemw3) <mburke@amplify.com> <burkemw3@gmail.com>
|
||||
Max Schulze (kralo) <max.schulze@online.de> <kralo@users.noreply.github.com>
|
||||
Michael Jephcote (Rewt0r) <rewt0r@gmx.com> <Rewt0r@users.noreply.github.com>
|
||||
Michael Ploujnikov (plouj) <ploujj@gmail.com>
|
||||
Michael Tilli (pyfisch) <pyfisch@gmail.com>
|
||||
Nate Morrison (nrm21) <natemorrison@gmail.com>
|
||||
Niels Peter Roest (Niller303) <nielsproest@hotmail.com> <seje.niels@hotmail.com>
|
||||
Pascal Jungblut (pascalj) <github@pascalj.com> <mail@pascal-jungblut.com>
|
||||
Peter Hoeg (peterhoeg) <peter@speartail.com>
|
||||
Philippe Schommers (filoozoom) <philippe@schommers.be>
|
||||
Phill Luby (pluby) <phill.luby@newredo.com>
|
||||
Piotr Bejda (piobpl) <piotrb10@gmail.com>
|
||||
Robert Carosi (nov1n) <robert@carosi.nl>
|
||||
Roman Zaynetdinov (zaynetro) <romanznet@gmail.com>
|
||||
Ross Smith II (rasa) <ross@smithii.com>
|
||||
Ryan Sullivan (KayoticSully) <kayoticsully@gmail.com>
|
||||
Sacheendra Talluri (sacheendra) <sacheendra.t@gmail.com>
|
||||
Scott Klupfel (kluppy) <kluppy@going2blue.com>
|
||||
Sergey Mishin (ralder) <ralder@yandex.ru>
|
||||
Simon Frei (imsodin) <freisim93@gmail.com>
|
||||
Stefan Kuntz (Stefan-Code) <stefan.github@gmail.com> <Stefan.github@gmail.com>
|
||||
Stefan Tatschner (rumpelsepp) <stefan@sevenbyte.org> <rumpelsepp@sevenbyte.org>
|
||||
Suhas Gundimeda (snugghash) <suhas.gundimeda@gmail.com> <snugghash@gmail.com>
|
||||
Taylor Khan (nelsonkhan) <nelsonkhan@gmail.com>
|
||||
Tim Abell (timabell) <tim@timwise.co.uk>
|
||||
Tim Howes (timhowes) <timhowes@berkeley.edu>
|
||||
Tobias Nygren (tnn2) <tnn@nygren.pp.se>
|
||||
Tobias Tom (tobiastom) <t.tom@succont.de>
|
||||
Tomas Cerveny (kozec) <kozec@kozec.com>
|
||||
Tully Robinson (tojrobinson) <tully@tojr.org>
|
||||
Tyler Brazier (tylerbrazier) <tyler@tylerbrazier.com>
|
||||
Unrud (Unrud) <unrud@openaliasbox.org> <Unrud@users.noreply.github.com>
|
||||
Veeti Paananen (veeti) <veeti.paananen@rojekti.fi>
|
||||
Victor Buinsky (buinsky) <vix_booja@tut.by>
|
||||
Vil Brekin (Vilbrekin) <vilbrekin@gmail.com>
|
||||
William A. Kennington III (wkennington) <william@wkennington.com>
|
||||
Wulf Weich (wweich) <wweich@users.noreply.github.com> <wweich@gmx.de> <wulf@weich-kr.de>
|
||||
Xavier O. (damajor) <damajor@gmail.com>
|
||||
Yannic A. (eipiminus1) <eipiminusone+github@gmail.com> <eipiminus1@users.noreply.github.com>
|
||||
Aaron Bieber <qbit@deftly.net>
|
||||
Alexander Graf <register-github@alex-graf.de>
|
||||
Andrew Dunham <andrew@du.nham.ca>
|
||||
Audrius Butkevicius <audrius.butkevicius@gmail.com>
|
||||
Arthur Axel fREW Schmidt <frew@afoolishmanifesto.com> <frioux@gmail.com>
|
||||
Ben Curthoys <ben@bencurthoys.com>
|
||||
Ben Schulz <ueomkail@gmail.com> <uok@users.noreply.github.com>
|
||||
Ben Sidhom <bsidhom@gmail.com>
|
||||
Brandon Philips <brandon@ifup.org>
|
||||
Brendan Long <self@brendanlong.com>
|
||||
Caleb Callaway <enlightened.despot@gmail.com>
|
||||
Cathryne Linenweaver <cathryne.linenweaver@gmail.com> <Cathryne@users.noreply.github.com>
|
||||
Chris Joel <chris@scriptolo.gy>
|
||||
Colin Kennedy <moshen.colin@gmail.com>
|
||||
Daniel Martí <mvdan@mvdan.cc>
|
||||
Dennis Wilson <dw@risu.io>
|
||||
Dominik Heidler <dominik@heidler.eu>
|
||||
Emil Hessman <emil@hessman.se>
|
||||
Federico Castagnini <federico.castagnini@gmail.com>
|
||||
Felix Ableitner <me@nutomic.com>
|
||||
Felix Unterpaintner <bigbear2nd@gmail.com>
|
||||
Gilli Sigurdsson <gilli@vx.is>
|
||||
Jakob Borg <jakob@nym.se>
|
||||
James Patterson <jamespatterson@operamail.com> <jpjp@users.noreply.github.com>
|
||||
Jens Diemer <github.com@jensdiemer.de> <git@jensdiemer.de>
|
||||
Jochen Voss <voss@seehuhn.de>
|
||||
Johan Vromans <jvromans@squirrel.nl>
|
||||
Karol Różycki <rozycki.karol@gmail.com>
|
||||
Kamada Ken'ichi <kamada@nanohz.org>
|
||||
Lode Hoste <zillode@zillode.be>
|
||||
Marcin Dziadus <dziadus.marcin@gmail.com>
|
||||
Marc Laporte <marc@marclaporte.com>
|
||||
Marc Pujol <kilburn@la3.org>
|
||||
Michael Jephcote <rewt0r@gmx.com> <Rewt0r@users.noreply.github.com>
|
||||
Michael Tilli <pyfisch@gmail.com>
|
||||
Pascal Jungblut <github@pascalj.com> <mail@pascal-jungblut.com>
|
||||
Peter Hoeg <peter@speartail.com>
|
||||
Philippe Schommers <philippe@schommers.be>
|
||||
Phill Luby <phill.luby@newredo.com>
|
||||
Piotr Bejda <piotrb10@gmail.com>
|
||||
Ryan Sullivan <kayoticsully@gmail.com>
|
||||
Stefan Tatschner <stefan@sevenbyte.org>
|
||||
Tim Abell <tim@timwise.co.uk>
|
||||
Tobias Nygren <tnn@nygren.pp.se>
|
||||
Tomas Cerveny <kozec@kozec.com>
|
||||
Tully Robinson <tully@tojr.org>
|
||||
Veeti Paananen <veeti.paananen@rojekti.fi>
|
||||
Vil Brekin <vilbrekin@gmail.com>
|
||||
|
||||
115
CONTRIBUTING.md
115
CONTRIBUTING.md
@@ -32,32 +32,109 @@ latest info on Transifex.
|
||||
## Contributing Code
|
||||
|
||||
Every contribution is welcome. If you want to contribute but are unsure
|
||||
where to start, any open issues are fair game! See the [Contribution
|
||||
Guidelines](https://docs.syncthing.net/dev/contributing.html) for the full
|
||||
story on committing code.
|
||||
where to start, any open issues are fair game! Be prepared for a
|
||||
[certain amount of review](https://github.com/syncthing/syncthing/wiki/FAQ#why-are-you-being-so-hard-on-my-pull-request);
|
||||
it's all in the name of quality. :) Following the points below will make this
|
||||
a smoother process.
|
||||
|
||||
## Contributing Documentation
|
||||
Individuals making significant and valuable contributions are given
|
||||
commit-access to the project. If you make a significant contribution and
|
||||
are not considered for commit-access, please contact any of the
|
||||
Syncthing core team members.
|
||||
|
||||
Updates to the [documentation site](https://docs.syncthing.net/) can be
|
||||
made as pull requests on the [documentation
|
||||
repository](https://github.com/syncthing/docs).
|
||||
All nontrivial contributions should go through the pull request
|
||||
mechanism for internal review. Determining what is "nontrivial" is left
|
||||
at the discretion of the contributor.
|
||||
|
||||
### Authorship
|
||||
|
||||
All code authors are listed in the AUTHORS file. Commits must be made
|
||||
with the same name and email as listed in the AUTHORS file. To
|
||||
accomplish this, ensure that your git configuration is set correctly
|
||||
prior to making your first commit;
|
||||
|
||||
$ git config --global user.name "Jane Doe"
|
||||
$ git config --global user.email janedoe@example.com
|
||||
|
||||
You must be reachable on the given email address. If you do not wish to
|
||||
use your real name for whatever reason, using a nickname or pseudonym is
|
||||
perfectly acceptable.
|
||||
|
||||
### Core Team
|
||||
|
||||
The Syncthing core team currently consists of the following members;
|
||||
|
||||
- Jakob Borg (@calmh)
|
||||
- Audrius Butkevicius (@AudriusButkevicius)
|
||||
|
||||
## Coding Style
|
||||
|
||||
- Follow the conventions laid out in [Effective Go](https://golang.org/doc/effective_go.html)
|
||||
as much as makes sense.
|
||||
|
||||
- All text files use Unix line endings.
|
||||
|
||||
- Each commit should be `go fmt` clean.
|
||||
|
||||
- The commit message subject should be a single short sentence
|
||||
describing the change, starting with a capital letter.
|
||||
|
||||
- Commits that resolve an existing issue must include the issue number
|
||||
as `(fixes #123)` at the end of the commit message subject.
|
||||
|
||||
- Imports are grouped per `goimports` standard; that is, standard
|
||||
library first, then third party libraries after a blank line.
|
||||
|
||||
- A contribution solving a single issue or introducing a single new
|
||||
feature should probably be a single commit based on the current
|
||||
`master` branch. You may be asked to "rebase" or "squash" your pull
|
||||
request to make sure this is the case, especially if there have been
|
||||
amendments during review.
|
||||
|
||||
## Licensing
|
||||
|
||||
All contributions are made available under the same license as the already
|
||||
existing material being contributed to. For most of the project and unless
|
||||
otherwise stated this means MPLv2, but there are exceptions:
|
||||
All contributions are made under the same MPLv2 license as the rest of
|
||||
the project, except documentation, user interface text and translation
|
||||
strings which are licensed under the Creative Commons Attribution 4.0
|
||||
International License. You retain the copyright to code you have
|
||||
written.
|
||||
|
||||
- Certain commands (under cmd/...) may have a separate license, indicated by
|
||||
the presence of a LICENSE file in the corresponding directory.
|
||||
When accepting your first contribution, the maintainer of the project
|
||||
will ensure that you are added to the AUTHORS file. You are welcome to
|
||||
add yourself as a separate commit in your first pull request.
|
||||
|
||||
- The documentation (man/...) is licensed under the Creative Commons
|
||||
Attribution 4.0 International License.
|
||||
## Building
|
||||
|
||||
- Projects under vendor/... are copyright by and licensed from their
|
||||
respective original authors. Contributions should be made to the original
|
||||
project, not here.
|
||||
[See the documentation](https://github.com/syncthing/syncthing/wiki/Building)
|
||||
on how to get started with a build environment.
|
||||
|
||||
Regardless of the license in effect, you retain the copyright to your
|
||||
contribution.
|
||||
## Branches
|
||||
|
||||
- `master` is the main branch containing good code that will end up in
|
||||
the next release. You should base your work on it. It won't ever be
|
||||
rebased or force-pushed to.
|
||||
|
||||
- `vx.y` branches exist to make patch releases on otherwise obsolete
|
||||
minor releases. Should only contain fixes cherry picked from master.
|
||||
Don't base any work on them.
|
||||
|
||||
- Other branches are probably topic branches and may be subject to
|
||||
rebasing. Don't base any work on them unless you specifically know
|
||||
otherwise.
|
||||
|
||||
## Tags
|
||||
|
||||
All releases are tagged semver style as `vx.y.z`. Release tags are
|
||||
signed by GPG key BCE524C7.
|
||||
|
||||
## Tests
|
||||
|
||||
Yes please!
|
||||
|
||||
## Documentation
|
||||
|
||||
[Over here!](https://github.com/syncthing/syncthing/wiki)
|
||||
|
||||
## License
|
||||
|
||||
MPLv2
|
||||
|
||||
83
GOALS.md
83
GOALS.md
@@ -1,83 +0,0 @@
|
||||
# The Syncthing Goals
|
||||
|
||||
Syncthing is a **continuous file synchronization program**. It synchronizes
|
||||
files between two or more computers. We strive to fulfill the goals below.
|
||||
The goals are listed in order of importance, the most important one being
|
||||
the first.
|
||||
|
||||
> "Syncing files" here is precise. It means we specifically exclude things
|
||||
> that are not files - calendar items, instant messages, and so on. If those
|
||||
> are in fact stored as files on disk, they can of course be synced as
|
||||
> files.
|
||||
|
||||
Syncthing should be:
|
||||
|
||||
### 1. Safe From Data Loss
|
||||
|
||||
Protecting the user's data is paramount. We take every reasonable precaution
|
||||
to avoid corrupting the user's files.
|
||||
|
||||
> This is the overriding goal, without which synchronizing files becomes
|
||||
> pointless. This means that we do not make unsafe trade offs for the sake
|
||||
> of performance or, in some cases, even usability.
|
||||
|
||||
### 2. Secure Against Attackers
|
||||
|
||||
Again, protecting the user's data is paramount. Regardless of our other
|
||||
goals we must never allow the user's data to be susceptible to eavesdropping
|
||||
or modification by unauthorized parties.
|
||||
|
||||
> This should be understood in context. It is not necessarily reasonable to
|
||||
> expect Syncthing to be resistant against well equipped state level
|
||||
> attackers. We will however do our best. Note also that this is different
|
||||
> from anonymity which is not, currently, a goal.
|
||||
|
||||
### 3. Easy to Use
|
||||
|
||||
Syncthing should be approachable, understandable and inclusive.
|
||||
|
||||
> Complex concepts and maths form the base of Synchting's functionality.
|
||||
> This should nonetheless be abstracted or hidden to a degree where
|
||||
> Syncthing is usable by the general public.
|
||||
|
||||
### 4. Automatic
|
||||
|
||||
User interaction should be required only when absolutely necessary.
|
||||
|
||||
> Specifically this means that changes to files are picked up without
|
||||
> prompting, conflicts are resolved without prompting and connections are
|
||||
> maintained without prompting. We only prompt the user when it is required
|
||||
> to fulfill one of the (overriding) Secure, Safe or Easy goals.
|
||||
|
||||
### 5. Universally Available
|
||||
|
||||
Syncthing should run on every common computer. We are mindful that the
|
||||
latest technology is not always available to any given individual.
|
||||
|
||||
> Computers include desktops, laptops, servers, virtual machines, small
|
||||
> general purpose computers such as Raspberry Pis and, *where possible*,
|
||||
> tablets and phones. NAS appliances, toasters, cars, firearms, thermostats
|
||||
> and so on may include computing capabitilies but it is not our goal for
|
||||
> Syncthing to run smoothly on these devices.
|
||||
|
||||
### 6. For Individuals
|
||||
|
||||
Syncthing is primarily about empowering the individual user with safe,
|
||||
secure and easy to use file synchronization.
|
||||
|
||||
> We acknowledge that it's also useful in an enterprise setting and include
|
||||
> functionality to support that. If this is in conflict with the
|
||||
> requirements of the individual, those will however take priority.
|
||||
|
||||
### 7. Everything Else
|
||||
|
||||
There are many things we care about that don't make it on to the list. It is
|
||||
fine to optimize for these values as well, as long as they are not in
|
||||
conflict with the stated goals above.
|
||||
|
||||
> For example, performance is a thing we care about. We just don't care more
|
||||
> about it than safety, security, etc. Maintainability of the code base and
|
||||
> providing entertainment value for the maintainers are also things that
|
||||
> matter. It is understood that there are aspects of Syncthing that are
|
||||
> suboptimal or even in opposition with the goals above. However, we
|
||||
> continuously strive to align Syncthing more and more with these goals.
|
||||
73
Godeps/Godeps.json
generated
Normal file
73
Godeps/Godeps.json
generated
Normal file
@@ -0,0 +1,73 @@
|
||||
{
|
||||
"ImportPath": "github.com/syncthing/syncthing",
|
||||
"GoVersion": "go1.4",
|
||||
"Packages": [
|
||||
"./cmd/..."
|
||||
],
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "github.com/bkaradzic/go-lz4",
|
||||
"Rev": "93a831dcee242be64a9cc9803dda84af25932de7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/calmh/logger",
|
||||
"Rev": "f50d32b313bec2933a3e1049f7416a29f3413d29"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/calmh/luhn",
|
||||
"Rev": "0c8388ff95fa92d4094011e5a04fc99dea3d1632"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/calmh/xdr",
|
||||
"Rev": "ff948d7666c5e0fd18d398f6278881724d36a90b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/juju/ratelimit",
|
||||
"Rev": "f9f36d11773655c0485207f0ad30dc2655f69d56"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/kardianos/osext",
|
||||
"Rev": "91292666f7e40f03185cdd1da7d85633c973eca7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syncthing/protocol",
|
||||
"Rev": "1a4398cc55c8fe82a964097eaf59f2475b020a49"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
|
||||
"Rev": "e3f32eb300aa1e514fe8ba58d008da90a062273d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/gosnappy/snappy",
|
||||
"Rev": "ce8acff4829e0c2458a67ead32390ac0a381c862"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vitrun/qart/coding",
|
||||
"Rev": "ccb109cf25f0cd24474da73b9fee4e7a3e8a8ce0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vitrun/qart/gf256",
|
||||
"Rev": "ccb109cf25f0cd24474da73b9fee4e7a3e8a8ce0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vitrun/qart/qr",
|
||||
"Rev": "ccb109cf25f0cd24474da73b9fee4e7a3e8a8ce0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/bcrypt",
|
||||
"Rev": "4ed45ec682102c643324fae5dff8dab085b6c300"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/blowfish",
|
||||
"Rev": "4ed45ec682102c643324fae5dff8dab085b6c300"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/transform",
|
||||
"Rev": "c980adc4a823548817b9c47d38c6ca6b7d7d8b6a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/unicode/norm",
|
||||
"Rev": "c980adc4a823548817b9c47d38c6ca6b7d7d8b6a"
|
||||
}
|
||||
]
|
||||
}
|
||||
5
Godeps/Readme
generated
Normal file
5
Godeps/Readme
generated
Normal file
@@ -0,0 +1,5 @@
|
||||
This directory tree is generated automatically by godep.
|
||||
|
||||
Please do not edit.
|
||||
|
||||
See https://github.com/tools/godep for more information.
|
||||
2
Godeps/_workspace/.gitignore
generated
vendored
Normal file
2
Godeps/_workspace/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
/pkg
|
||||
/bin
|
||||
1
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/.gitignore
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/.gitignore
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
/lz4-example/lz4-example
|
||||
7
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/.travis.yml
generated
vendored
Normal file
7
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.1
|
||||
- 1.2
|
||||
- 1.3
|
||||
- tip
|
||||
71
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/README.md
generated
vendored
Normal file
71
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/README.md
generated
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
go-lz4
|
||||
======
|
||||
|
||||
go-lz4 is port of LZ4 lossless compression algorithm to Go. The original C code
|
||||
is located at:
|
||||
|
||||
https://code.google.com/p/lz4/
|
||||
|
||||
Status
|
||||
------
|
||||
[](http://travis-ci.org/bkaradzic/go-lz4)
|
||||
[](https://godoc.org/github.com/bkaradzic/go-lz4)
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
go get github.com/bkaradzic/go-lz4
|
||||
|
||||
import "github.com/bkaradzic/go-lz4"
|
||||
|
||||
The package name is `lz4`
|
||||
|
||||
Notes
|
||||
-----
|
||||
|
||||
* go-lz4 saves a uint32 with the original uncompressed length at the beginning
|
||||
of the encoded buffer. They may get in the way of interoperability with
|
||||
other implementations.
|
||||
|
||||
Contributors
|
||||
------------
|
||||
|
||||
Damian Gryski ([@dgryski](https://github.com/dgryski))
|
||||
Dustin Sallings ([@dustin](https://github.com/dustin))
|
||||
|
||||
Contact
|
||||
-------
|
||||
|
||||
[@bkaradzic](https://twitter.com/bkaradzic)
|
||||
http://www.stuckingeometry.com
|
||||
|
||||
Project page
|
||||
https://github.com/bkaradzic/go-lz4
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
Copyright 2011-2012 Branimir Karadzic. All rights reserved.
|
||||
Copyright 2013 Damian Gryski. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||
THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
63
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/lz4_test.go
generated
vendored
Normal file
63
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/lz4_test.go
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
package lz4
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var testfile, _ = ioutil.ReadFile("testdata/pg1661.txt")
|
||||
|
||||
func roundtrip(t *testing.T, input []byte) {
|
||||
|
||||
dst, err := Encode(nil, input)
|
||||
if err != nil {
|
||||
t.Errorf("got error during compression: %s", err)
|
||||
}
|
||||
|
||||
output, err := Decode(nil, dst)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("got error during decompress: %s", err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(output, input) {
|
||||
t.Errorf("roundtrip failed")
|
||||
}
|
||||
}
|
||||
|
||||
func TestEmpty(t *testing.T) {
|
||||
roundtrip(t, nil)
|
||||
}
|
||||
|
||||
func TestLengths(t *testing.T) {
|
||||
|
||||
for i := 0; i < 1024; i++ {
|
||||
roundtrip(t, testfile[:i])
|
||||
}
|
||||
|
||||
for i := 1024; i < 4096; i += 23 {
|
||||
roundtrip(t, testfile[:i])
|
||||
}
|
||||
}
|
||||
|
||||
func TestWords(t *testing.T) {
|
||||
roundtrip(t, testfile)
|
||||
}
|
||||
|
||||
func BenchmarkLZ4Encode(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
Encode(nil, testfile)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLZ4Decode(b *testing.B) {
|
||||
|
||||
var compressed, _ = Encode(nil, testfile)
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
Decode(nil, compressed)
|
||||
}
|
||||
}
|
||||
@@ -141,7 +141,7 @@ func Decode(dst, src []byte) ([]byte, error) {
|
||||
length += ln
|
||||
}
|
||||
|
||||
if int(d.spos+length) > len(d.src) || int(d.dpos+length) > len(d.dst) {
|
||||
if int(d.spos+length) > len(d.src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
@@ -179,12 +179,7 @@ func Decode(dst, src []byte) ([]byte, error) {
|
||||
}
|
||||
|
||||
literal := d.dpos - d.ref
|
||||
|
||||
if literal < 4 {
|
||||
if int(d.dpos+4) > len(d.dst) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
d.cp(4, decr[literal])
|
||||
} else {
|
||||
length += 4
|
||||
13052
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/testdata/pg1661.txt
generated
vendored
Normal file
13052
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/testdata/pg1661.txt
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
@@ -25,10 +25,8 @@
|
||||
|
||||
package lz4
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
)
|
||||
import "encoding/binary"
|
||||
import "errors"
|
||||
|
||||
const (
|
||||
minMatch = 4
|
||||
0
lib/logger/LICENSE → Godeps/_workspace/src/github.com/calmh/logger/LICENSE
generated
vendored
0
lib/logger/LICENSE → Godeps/_workspace/src/github.com/calmh/logger/LICENSE
generated
vendored
15
Godeps/_workspace/src/github.com/calmh/logger/README.md
generated
vendored
Normal file
15
Godeps/_workspace/src/github.com/calmh/logger/README.md
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
logger
|
||||
======
|
||||
|
||||
A small wrapper around `log` to provide log levels.
|
||||
|
||||
Documentation
|
||||
-------------
|
||||
|
||||
http://godoc.org/github.com/calmh/logger
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
MIT
|
||||
|
||||
160
Godeps/_workspace/src/github.com/calmh/logger/logger.go
generated
vendored
Normal file
160
Godeps/_workspace/src/github.com/calmh/logger/logger.go
generated
vendored
Normal file
@@ -0,0 +1,160 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
// Package logger implements a standardized logger with callback functionality
|
||||
package logger
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type LogLevel int
|
||||
|
||||
const (
|
||||
LevelDebug LogLevel = iota
|
||||
LevelInfo
|
||||
LevelOK
|
||||
LevelWarn
|
||||
LevelFatal
|
||||
NumLevels
|
||||
)
|
||||
|
||||
// A MessageHandler is called with the log level and message text.
|
||||
type MessageHandler func(l LogLevel, msg string)
|
||||
|
||||
type Logger struct {
|
||||
logger *log.Logger
|
||||
handlers [NumLevels][]MessageHandler
|
||||
mut sync.Mutex
|
||||
}
|
||||
|
||||
// The default logger logs to standard output with a time prefix.
|
||||
var DefaultLogger = New()
|
||||
|
||||
func New() *Logger {
|
||||
return &Logger{
|
||||
logger: log.New(os.Stdout, "", log.Ltime),
|
||||
}
|
||||
}
|
||||
|
||||
// AddHandler registers a new MessageHandler to receive messages with the
|
||||
// specified log level or above.
|
||||
func (l *Logger) AddHandler(level LogLevel, h MessageHandler) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
l.handlers[level] = append(l.handlers[level], h)
|
||||
}
|
||||
|
||||
// See log.SetFlags
|
||||
func (l *Logger) SetFlags(flag int) {
|
||||
l.logger.SetFlags(flag)
|
||||
}
|
||||
|
||||
// See log.SetPrefix
|
||||
func (l *Logger) SetPrefix(prefix string) {
|
||||
l.logger.SetPrefix(prefix)
|
||||
}
|
||||
|
||||
func (l *Logger) callHandlers(level LogLevel, s string) {
|
||||
for _, h := range l.handlers[level] {
|
||||
h(level, strings.TrimSpace(s))
|
||||
}
|
||||
}
|
||||
|
||||
// Debugln logs a line with a DEBUG prefix.
|
||||
func (l *Logger) Debugln(vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
s := fmt.Sprintln(vals...)
|
||||
l.logger.Output(2, "DEBUG: "+s)
|
||||
l.callHandlers(LevelDebug, s)
|
||||
}
|
||||
|
||||
// Debugf logs a formatted line with a DEBUG prefix.
|
||||
func (l *Logger) Debugf(format string, vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
s := fmt.Sprintf(format, vals...)
|
||||
l.logger.Output(2, "DEBUG: "+s)
|
||||
l.callHandlers(LevelDebug, s)
|
||||
}
|
||||
|
||||
// Infoln logs a line with an INFO prefix.
|
||||
func (l *Logger) Infoln(vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
s := fmt.Sprintln(vals...)
|
||||
l.logger.Output(2, "INFO: "+s)
|
||||
l.callHandlers(LevelInfo, s)
|
||||
}
|
||||
|
||||
// Infof logs a formatted line with an INFO prefix.
|
||||
func (l *Logger) Infof(format string, vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
s := fmt.Sprintf(format, vals...)
|
||||
l.logger.Output(2, "INFO: "+s)
|
||||
l.callHandlers(LevelInfo, s)
|
||||
}
|
||||
|
||||
// Okln logs a line with an OK prefix.
|
||||
func (l *Logger) Okln(vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
s := fmt.Sprintln(vals...)
|
||||
l.logger.Output(2, "OK: "+s)
|
||||
l.callHandlers(LevelOK, s)
|
||||
}
|
||||
|
||||
// Okf logs a formatted line with an OK prefix.
|
||||
func (l *Logger) Okf(format string, vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
s := fmt.Sprintf(format, vals...)
|
||||
l.logger.Output(2, "OK: "+s)
|
||||
l.callHandlers(LevelOK, s)
|
||||
}
|
||||
|
||||
// Warnln logs a formatted line with a WARNING prefix.
|
||||
func (l *Logger) Warnln(vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
s := fmt.Sprintln(vals...)
|
||||
l.logger.Output(2, "WARNING: "+s)
|
||||
l.callHandlers(LevelWarn, s)
|
||||
}
|
||||
|
||||
// Warnf logs a formatted line with a WARNING prefix.
|
||||
func (l *Logger) Warnf(format string, vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
s := fmt.Sprintf(format, vals...)
|
||||
l.logger.Output(2, "WARNING: "+s)
|
||||
l.callHandlers(LevelWarn, s)
|
||||
}
|
||||
|
||||
// Fatalln logs a line with a FATAL prefix and exits the process with exit
|
||||
// code 1.
|
||||
func (l *Logger) Fatalln(vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
s := fmt.Sprintln(vals...)
|
||||
l.logger.Output(2, "FATAL: "+s)
|
||||
l.callHandlers(LevelFatal, s)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Fatalf logs a formatted line with a FATAL prefix and exits the process with
|
||||
// exit code 1.
|
||||
func (l *Logger) Fatalf(format string, vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
s := fmt.Sprintf(format, vals...)
|
||||
l.logger.Output(2, "FATAL: "+s)
|
||||
l.callHandlers(LevelFatal, s)
|
||||
os.Exit(1)
|
||||
}
|
||||
58
Godeps/_workspace/src/github.com/calmh/logger/logger_test.go
generated
vendored
Normal file
58
Godeps/_workspace/src/github.com/calmh/logger/logger_test.go
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
package logger
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAPI(t *testing.T) {
|
||||
l := New()
|
||||
l.SetFlags(0)
|
||||
l.SetPrefix("testing")
|
||||
|
||||
debug := 0
|
||||
l.AddHandler(LevelDebug, checkFunc(t, LevelDebug, "test 0", &debug))
|
||||
info := 0
|
||||
l.AddHandler(LevelInfo, checkFunc(t, LevelInfo, "test 1", &info))
|
||||
warn := 0
|
||||
l.AddHandler(LevelWarn, checkFunc(t, LevelWarn, "test 2", &warn))
|
||||
ok := 0
|
||||
l.AddHandler(LevelOK, checkFunc(t, LevelOK, "test 3", &ok))
|
||||
|
||||
l.Debugf("test %d", 0)
|
||||
l.Debugln("test", 0)
|
||||
l.Infof("test %d", 1)
|
||||
l.Infoln("test", 1)
|
||||
l.Warnf("test %d", 2)
|
||||
l.Warnln("test", 2)
|
||||
l.Okf("test %d", 3)
|
||||
l.Okln("test", 3)
|
||||
|
||||
if debug != 2 {
|
||||
t.Errorf("Debug handler called %d != 2 times", debug)
|
||||
}
|
||||
if info != 2 {
|
||||
t.Errorf("Info handler called %d != 2 times", info)
|
||||
}
|
||||
if warn != 2 {
|
||||
t.Errorf("Warn handler called %d != 2 times", warn)
|
||||
}
|
||||
if ok != 2 {
|
||||
t.Errorf("Ok handler called %d != 2 times", ok)
|
||||
}
|
||||
}
|
||||
|
||||
func checkFunc(t *testing.T, expectl LogLevel, expectmsg string, counter *int) func(LogLevel, string) {
|
||||
return func(l LogLevel, msg string) {
|
||||
*counter++
|
||||
if l != expectl {
|
||||
t.Errorf("Incorrect message level %d != %d", l, expectl)
|
||||
}
|
||||
if !strings.HasSuffix(msg, expectmsg) {
|
||||
t.Errorf("%q does not end with %q", msg, expectmsg)
|
||||
}
|
||||
}
|
||||
}
|
||||
2
cmd/stcli/LICENSE → Godeps/_workspace/src/github.com/calmh/luhn/LICENSE
generated
vendored
2
cmd/stcli/LICENSE → Godeps/_workspace/src/github.com/calmh/luhn/LICENSE
generated
vendored
@@ -1,4 +1,4 @@
|
||||
Copyright (C) 2014 Audrius Butkevičius
|
||||
Copyright (C) 2014 Jakob Borg
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
70
Godeps/_workspace/src/github.com/calmh/luhn/luhn.go
generated
vendored
Normal file
70
Godeps/_workspace/src/github.com/calmh/luhn/luhn.go
generated
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
// Copyright (C) 2014 Jakob Borg
|
||||
|
||||
// Package luhn generates and validates Luhn mod N check digits.
|
||||
package luhn
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// An alphabet is a string of N characters, representing the digits of a given
|
||||
// base N.
|
||||
type Alphabet string
|
||||
|
||||
var (
|
||||
Base32 Alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567"
|
||||
)
|
||||
|
||||
// Generate returns a check digit for the string s, which should be composed
|
||||
// of characters from the Alphabet a.
|
||||
func (a Alphabet) Generate(s string) (rune, error) {
|
||||
if err := a.check(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
factor := 1
|
||||
sum := 0
|
||||
n := len(a)
|
||||
|
||||
for i := range s {
|
||||
codepoint := strings.IndexByte(string(a), s[i])
|
||||
if codepoint == -1 {
|
||||
return 0, fmt.Errorf("Digit %q not valid in alphabet %q", s[i], a)
|
||||
}
|
||||
addend := factor * codepoint
|
||||
if factor == 2 {
|
||||
factor = 1
|
||||
} else {
|
||||
factor = 2
|
||||
}
|
||||
addend = (addend / n) + (addend % n)
|
||||
sum += addend
|
||||
}
|
||||
remainder := sum % n
|
||||
checkCodepoint := (n - remainder) % n
|
||||
return rune(a[checkCodepoint]), nil
|
||||
}
|
||||
|
||||
// Validate returns true if the last character of the string s is correct, for
|
||||
// a string s composed of characters in the alphabet a.
|
||||
func (a Alphabet) Validate(s string) bool {
|
||||
t := s[:len(s)-1]
|
||||
c, err := a.Generate(t)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return rune(s[len(s)-1]) == c
|
||||
}
|
||||
|
||||
// check returns an error if the given alphabet does not consist of unique characters
|
||||
func (a Alphabet) check() error {
|
||||
cm := make(map[byte]bool, len(a))
|
||||
for i := range a {
|
||||
if cm[a[i]] {
|
||||
return fmt.Errorf("Digit %q non-unique in alphabet %q", a[i], a)
|
||||
}
|
||||
cm[a[i]] = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
33
lib/protocol/luhn_test.go → Godeps/_workspace/src/github.com/calmh/luhn/luhn_test.go
generated
vendored
33
lib/protocol/luhn_test.go → Godeps/_workspace/src/github.com/calmh/luhn/luhn_test.go
generated
vendored
@@ -1,15 +1,17 @@
|
||||
// Copyright (C) 2014 The Protocol Authors.
|
||||
// Copyright (C) 2014 Jakob Borg
|
||||
|
||||
package protocol
|
||||
package luhn_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/calmh/luhn"
|
||||
)
|
||||
|
||||
func TestGenerate(t *testing.T) {
|
||||
// Base 6 Luhn
|
||||
a := luhnAlphabet("abcdef")
|
||||
c, err := a.generate("abcdef")
|
||||
a := luhn.Alphabet("abcdef")
|
||||
c, err := a.Generate("abcdef")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -18,8 +20,8 @@ func TestGenerate(t *testing.T) {
|
||||
}
|
||||
|
||||
// Base 10 Luhn
|
||||
a = luhnAlphabet("0123456789")
|
||||
c, err = a.generate("7992739871")
|
||||
a = luhn.Alphabet("0123456789")
|
||||
c, err = a.Generate("7992739871")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -29,8 +31,17 @@ func TestGenerate(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInvalidString(t *testing.T) {
|
||||
a := luhnAlphabet("ABC")
|
||||
_, err := a.generate("7992739871")
|
||||
a := luhn.Alphabet("ABC")
|
||||
_, err := a.Generate("7992739871")
|
||||
t.Log(err)
|
||||
if err == nil {
|
||||
t.Error("Unexpected nil error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBadAlphabet(t *testing.T) {
|
||||
a := luhn.Alphabet("01234566789")
|
||||
_, err := a.Generate("7992739871")
|
||||
t.Log(err)
|
||||
if err == nil {
|
||||
t.Error("Unexpected nil error")
|
||||
@@ -38,11 +49,11 @@ func TestInvalidString(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestValidate(t *testing.T) {
|
||||
a := luhnAlphabet("abcdef")
|
||||
if !a.luhnValidate("abcdefe") {
|
||||
a := luhn.Alphabet("abcdef")
|
||||
if !a.Validate("abcdefe") {
|
||||
t.Errorf("Incorrect validation response for abcdefe")
|
||||
}
|
||||
if a.luhnValidate("abcdefd") {
|
||||
if a.Validate("abcdefd") {
|
||||
t.Errorf("Incorrect validation response for abcdefd")
|
||||
}
|
||||
}
|
||||
1
Godeps/_workspace/src/github.com/calmh/xdr/.gitignore
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/calmh/xdr/.gitignore
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
coverage.out
|
||||
19
Godeps/_workspace/src/github.com/calmh/xdr/.travis.yml
generated
vendored
Normal file
19
Godeps/_workspace/src/github.com/calmh/xdr/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
language: go
|
||||
go:
|
||||
- tip
|
||||
|
||||
install:
|
||||
- export PATH=$PATH:$HOME/gopath/bin
|
||||
- go get code.google.com/p/go.tools/cmd/cover
|
||||
- go get github.com/mattn/goveralls
|
||||
|
||||
script:
|
||||
- ./generate.sh
|
||||
- go test -coverprofile=coverage.out
|
||||
|
||||
after_success:
|
||||
- goveralls -coverprofile=coverage.out -service=travis-ci -package=calmh/xdr -repotoken="$COVERALLS_TOKEN"
|
||||
|
||||
env:
|
||||
global:
|
||||
secure: SmgnrGfp2zLrA44ChRMpjPeujubt9veZ8Fx/OseMWECmacyV5N/TuDhzIbwo6QwV4xB0sBacoPzvxQbJRVjNKsPiSu72UbcQmQ7flN4Tf7nW09tSh1iW8NgrpBCq/3UYLoBu2iPBEBKm93IK0aGNAKs6oEkB0fU27iTVBwiTXOY=
|
||||
12
Godeps/_workspace/src/github.com/calmh/xdr/README.md
generated
vendored
Normal file
12
Godeps/_workspace/src/github.com/calmh/xdr/README.md
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
xdr
|
||||
===
|
||||
|
||||
[](https://travis-ci.org/calmh/xdr)
|
||||
[](https://coveralls.io/r/calmh/xdr?branch=master)
|
||||
[](http://godoc.org/github.com/calmh/xdr)
|
||||
[](http://opensource.org/licenses/MIT)
|
||||
|
||||
This is an XDR encoding/decoding library. It uses code generation and
|
||||
not reflection. It supports the IPDR bastardized XDR format when built
|
||||
with `-tags ipdr`.
|
||||
|
||||
117
Godeps/_workspace/src/github.com/calmh/xdr/bench_test.go
generated
vendored
Normal file
117
Godeps/_workspace/src/github.com/calmh/xdr/bench_test.go
generated
vendored
Normal file
@@ -0,0 +1,117 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
package xdr_test
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/calmh/xdr"
|
||||
)
|
||||
|
||||
type XDRBenchStruct struct {
|
||||
I1 uint64
|
||||
I2 uint32
|
||||
I3 uint16
|
||||
I4 uint8
|
||||
Bs0 []byte // max:128
|
||||
Bs1 []byte
|
||||
S0 string // max:128
|
||||
S1 string
|
||||
}
|
||||
|
||||
var res []byte // no to be optimized away
|
||||
var s = XDRBenchStruct{
|
||||
I1: 42,
|
||||
I2: 43,
|
||||
I3: 44,
|
||||
I4: 45,
|
||||
Bs0: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18},
|
||||
Bs1: []byte{11, 12, 13, 14, 15, 16, 17, 18, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
|
||||
S0: "Hello World! String one.",
|
||||
S1: "Hello World! String two.",
|
||||
}
|
||||
var e []byte
|
||||
|
||||
func init() {
|
||||
e, _ = s.MarshalXDR()
|
||||
}
|
||||
|
||||
func BenchmarkThisMarshal(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
res, _ = s.MarshalXDR()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkThisUnmarshal(b *testing.B) {
|
||||
var t XDRBenchStruct
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := t.UnmarshalXDR(e)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkThisEncode(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := s.EncodeXDR(ioutil.Discard)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkThisEncoder(b *testing.B) {
|
||||
w := xdr.NewWriter(ioutil.Discard)
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := s.encodeXDR(w)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type repeatReader struct {
|
||||
data []byte
|
||||
}
|
||||
|
||||
func (r *repeatReader) Read(bs []byte) (n int, err error) {
|
||||
if len(bs) > len(r.data) {
|
||||
err = io.EOF
|
||||
}
|
||||
n = copy(bs, r.data)
|
||||
r.data = r.data[n:]
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (r *repeatReader) Reset(bs []byte) {
|
||||
r.data = bs
|
||||
}
|
||||
|
||||
func BenchmarkThisDecode(b *testing.B) {
|
||||
rr := &repeatReader{e}
|
||||
var t XDRBenchStruct
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := t.DecodeXDR(rr)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
rr.Reset(e)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkThisDecoder(b *testing.B) {
|
||||
rr := &repeatReader{e}
|
||||
r := xdr.NewReader(rr)
|
||||
var t XDRBenchStruct
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := t.decodeXDR(r)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
rr.Reset(e)
|
||||
}
|
||||
}
|
||||
199
Godeps/_workspace/src/github.com/calmh/xdr/bench_xdr_test.go
generated
vendored
Normal file
199
Godeps/_workspace/src/github.com/calmh/xdr/bench_xdr_test.go
generated
vendored
Normal file
@@ -0,0 +1,199 @@
|
||||
// ************************************************************
|
||||
// This file is automatically generated by genxdr. Do not edit.
|
||||
// ************************************************************
|
||||
|
||||
package xdr_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
|
||||
"github.com/calmh/xdr"
|
||||
)
|
||||
|
||||
/*
|
||||
|
||||
XDRBenchStruct Structure:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
+ I1 (64 bits) +
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| I2 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| 0x0000 | I3 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| uint8 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of Bs0 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Bs0 (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of Bs1 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Bs1 (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of S0 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ S0 (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of S1 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ S1 (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
struct XDRBenchStruct {
|
||||
unsigned hyper I1;
|
||||
unsigned int I2;
|
||||
unsigned int I3;
|
||||
uint8 I4;
|
||||
opaque Bs0<128>;
|
||||
opaque Bs1<>;
|
||||
string S0<128>;
|
||||
string S1<>;
|
||||
}
|
||||
|
||||
*/
|
||||
|
||||
func (o XDRBenchStruct) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o XDRBenchStruct) MarshalXDR() ([]byte, error) {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}
|
||||
|
||||
func (o XDRBenchStruct) MustMarshalXDR() []byte {
|
||||
bs, err := o.MarshalXDR()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
func (o XDRBenchStruct) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o XDRBenchStruct) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
xw.WriteUint64(o.I1)
|
||||
xw.WriteUint32(o.I2)
|
||||
xw.WriteUint16(o.I3)
|
||||
xw.WriteUint8(o.I4)
|
||||
if l := len(o.Bs0); l > 128 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("Bs0", l, 128)
|
||||
}
|
||||
xw.WriteBytes(o.Bs0)
|
||||
xw.WriteBytes(o.Bs1)
|
||||
if l := len(o.S0); l > 128 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("S0", l, 128)
|
||||
}
|
||||
xw.WriteString(o.S0)
|
||||
xw.WriteString(o.S1)
|
||||
return xw.Tot(), xw.Error()
|
||||
}
|
||||
|
||||
func (o *XDRBenchStruct) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *XDRBenchStruct) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *XDRBenchStruct) decodeXDR(xr *xdr.Reader) error {
|
||||
o.I1 = xr.ReadUint64()
|
||||
o.I2 = xr.ReadUint32()
|
||||
o.I3 = xr.ReadUint16()
|
||||
o.I4 = xr.ReadUint8()
|
||||
o.Bs0 = xr.ReadBytesMax(128)
|
||||
o.Bs1 = xr.ReadBytes()
|
||||
o.S0 = xr.ReadStringMax(128)
|
||||
o.S1 = xr.ReadString()
|
||||
return xr.Error()
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
repeatReader Structure:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of data |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ data (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
struct repeatReader {
|
||||
opaque data<>;
|
||||
}
|
||||
|
||||
*/
|
||||
|
||||
func (o repeatReader) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o repeatReader) MarshalXDR() ([]byte, error) {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}
|
||||
|
||||
func (o repeatReader) MustMarshalXDR() []byte {
|
||||
bs, err := o.MarshalXDR()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
func (o repeatReader) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o repeatReader) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
xw.WriteBytes(o.data)
|
||||
return xw.Tot(), xw.Error()
|
||||
}
|
||||
|
||||
func (o *repeatReader) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *repeatReader) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *repeatReader) decodeXDR(xr *xdr.Reader) error {
|
||||
o.data = xr.ReadBytes()
|
||||
return xr.Error()
|
||||
}
|
||||
456
Godeps/_workspace/src/github.com/calmh/xdr/cmd/genxdr/main.go
generated
vendored
Normal file
456
Godeps/_workspace/src/github.com/calmh/xdr/cmd/genxdr/main.go
generated
vendored
Normal file
@@ -0,0 +1,456 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/format"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
type fieldInfo struct {
|
||||
Name string
|
||||
IsBasic bool // handled by one the native Read/WriteUint64 etc functions
|
||||
IsSlice bool // field is a slice of FieldType
|
||||
FieldType string // original type of field, i.e. "int"
|
||||
Encoder string // the encoder name, i.e. "Uint64" for Read/WriteUint64
|
||||
Convert string // what to convert to when encoding, i.e. "uint64"
|
||||
Max int // max size for slices and strings
|
||||
}
|
||||
|
||||
type structInfo struct {
|
||||
Name string
|
||||
Fields []fieldInfo
|
||||
}
|
||||
|
||||
var headerTpl = template.Must(template.New("header").Parse(`// ************************************************************
|
||||
// This file is automatically generated by genxdr. Do not edit.
|
||||
// ************************************************************
|
||||
|
||||
package {{.Package}}
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
|
||||
"github.com/calmh/xdr"
|
||||
)
|
||||
`))
|
||||
|
||||
var encodeTpl = template.Must(template.New("encoder").Parse(`
|
||||
func (o {{.TypeName}}) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
}//+n
|
||||
|
||||
func (o {{.TypeName}}) MarshalXDR() ([]byte, error) {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}//+n
|
||||
|
||||
func (o {{.TypeName}}) MustMarshalXDR() []byte {
|
||||
bs, err := o.MarshalXDR()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bs
|
||||
}//+n
|
||||
|
||||
func (o {{.TypeName}}) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
return []byte(aw), err
|
||||
}//+n
|
||||
|
||||
func (o {{.TypeName}}) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
{{range $fieldInfo := .Fields}}
|
||||
{{if not $fieldInfo.IsSlice}}
|
||||
{{if ne $fieldInfo.Convert ""}}
|
||||
xw.Write{{$fieldInfo.Encoder}}({{$fieldInfo.Convert}}(o.{{$fieldInfo.Name}}))
|
||||
{{else if $fieldInfo.IsBasic}}
|
||||
{{if ge $fieldInfo.Max 1}}
|
||||
if l := len(o.{{$fieldInfo.Name}}); l > {{$fieldInfo.Max}} {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("{{$fieldInfo.Name}}", l, {{$fieldInfo.Max}})
|
||||
}
|
||||
{{end}}
|
||||
xw.Write{{$fieldInfo.Encoder}}(o.{{$fieldInfo.Name}})
|
||||
{{else}}
|
||||
_, err := o.{{$fieldInfo.Name}}.encodeXDR(xw)
|
||||
if err != nil {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
{{end}}
|
||||
{{else}}
|
||||
{{if ge $fieldInfo.Max 1}}
|
||||
if l := len(o.{{$fieldInfo.Name}}); l > {{$fieldInfo.Max}} {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("{{$fieldInfo.Name}}", l, {{$fieldInfo.Max}})
|
||||
}
|
||||
{{end}}
|
||||
xw.WriteUint32(uint32(len(o.{{$fieldInfo.Name}})))
|
||||
for i := range o.{{$fieldInfo.Name}} {
|
||||
{{if ne $fieldInfo.Convert ""}}
|
||||
xw.Write{{$fieldInfo.Encoder}}({{$fieldInfo.Convert}}(o.{{$fieldInfo.Name}}[i]))
|
||||
{{else if $fieldInfo.IsBasic}}
|
||||
xw.Write{{$fieldInfo.Encoder}}(o.{{$fieldInfo.Name}}[i])
|
||||
{{else}}
|
||||
_, err := o.{{$fieldInfo.Name}}[i].encodeXDR(xw)
|
||||
if err != nil {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
{{end}}
|
||||
}
|
||||
{{end}}
|
||||
{{end}}
|
||||
return xw.Tot(), xw.Error()
|
||||
}//+n
|
||||
|
||||
func (o *{{.TypeName}}) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
}//+n
|
||||
|
||||
func (o *{{.TypeName}}) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.decodeXDR(xr)
|
||||
}//+n
|
||||
|
||||
func (o *{{.TypeName}}) decodeXDR(xr *xdr.Reader) error {
|
||||
{{range $fieldInfo := .Fields}}
|
||||
{{if not $fieldInfo.IsSlice}}
|
||||
{{if ne $fieldInfo.Convert ""}}
|
||||
o.{{$fieldInfo.Name}} = {{$fieldInfo.FieldType}}(xr.Read{{$fieldInfo.Encoder}}())
|
||||
{{else if $fieldInfo.IsBasic}}
|
||||
{{if ge $fieldInfo.Max 1}}
|
||||
o.{{$fieldInfo.Name}} = xr.Read{{$fieldInfo.Encoder}}Max({{$fieldInfo.Max}})
|
||||
{{else}}
|
||||
o.{{$fieldInfo.Name}} = xr.Read{{$fieldInfo.Encoder}}()
|
||||
{{end}}
|
||||
{{else}}
|
||||
(&o.{{$fieldInfo.Name}}).decodeXDR(xr)
|
||||
{{end}}
|
||||
{{else}}
|
||||
_{{$fieldInfo.Name}}Size := int(xr.ReadUint32())
|
||||
{{if ge $fieldInfo.Max 1}}
|
||||
if _{{$fieldInfo.Name}}Size > {{$fieldInfo.Max}} {
|
||||
return xdr.ElementSizeExceeded("{{$fieldInfo.Name}}", _{{$fieldInfo.Name}}Size, {{$fieldInfo.Max}})
|
||||
}
|
||||
{{end}}
|
||||
o.{{$fieldInfo.Name}} = make([]{{$fieldInfo.FieldType}}, _{{$fieldInfo.Name}}Size)
|
||||
for i := range o.{{$fieldInfo.Name}} {
|
||||
{{if ne $fieldInfo.Convert ""}}
|
||||
o.{{$fieldInfo.Name}}[i] = {{$fieldInfo.FieldType}}(xr.Read{{$fieldInfo.Encoder}}())
|
||||
{{else if $fieldInfo.IsBasic}}
|
||||
o.{{$fieldInfo.Name}}[i] = xr.Read{{$fieldInfo.Encoder}}()
|
||||
{{else}}
|
||||
(&o.{{$fieldInfo.Name}}[i]).decodeXDR(xr)
|
||||
{{end}}
|
||||
}
|
||||
{{end}}
|
||||
{{end}}
|
||||
return xr.Error()
|
||||
}`))
|
||||
|
||||
var maxRe = regexp.MustCompile(`\Wmax:(\d+)`)
|
||||
|
||||
type typeSet struct {
|
||||
Type string
|
||||
Encoder string
|
||||
}
|
||||
|
||||
var xdrEncoders = map[string]typeSet{
|
||||
"int8": typeSet{"uint8", "Uint8"},
|
||||
"uint8": typeSet{"", "Uint8"},
|
||||
"int16": typeSet{"uint16", "Uint16"},
|
||||
"uint16": typeSet{"", "Uint16"},
|
||||
"int32": typeSet{"uint32", "Uint32"},
|
||||
"uint32": typeSet{"", "Uint32"},
|
||||
"int64": typeSet{"uint64", "Uint64"},
|
||||
"uint64": typeSet{"", "Uint64"},
|
||||
"int": typeSet{"uint64", "Uint64"},
|
||||
"string": typeSet{"", "String"},
|
||||
"[]byte": typeSet{"", "Bytes"},
|
||||
"bool": typeSet{"", "Bool"},
|
||||
}
|
||||
|
||||
func handleStruct(t *ast.StructType) []fieldInfo {
|
||||
var fs []fieldInfo
|
||||
|
||||
for _, sf := range t.Fields.List {
|
||||
if len(sf.Names) == 0 {
|
||||
// We don't handle anonymous fields
|
||||
continue
|
||||
}
|
||||
|
||||
fn := sf.Names[0].Name
|
||||
var max = 0
|
||||
if sf.Comment != nil {
|
||||
c := sf.Comment.List[0].Text
|
||||
if m := maxRe.FindStringSubmatch(c); m != nil {
|
||||
max, _ = strconv.Atoi(m[1])
|
||||
}
|
||||
if strings.Contains(c, "noencode") {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
var f fieldInfo
|
||||
switch ft := sf.Type.(type) {
|
||||
case *ast.Ident:
|
||||
tn := ft.Name
|
||||
if enc, ok := xdrEncoders[tn]; ok {
|
||||
f = fieldInfo{
|
||||
Name: fn,
|
||||
IsBasic: true,
|
||||
FieldType: tn,
|
||||
Encoder: enc.Encoder,
|
||||
Convert: enc.Type,
|
||||
Max: max,
|
||||
}
|
||||
} else {
|
||||
f = fieldInfo{
|
||||
Name: fn,
|
||||
IsBasic: false,
|
||||
FieldType: tn,
|
||||
Max: max,
|
||||
}
|
||||
}
|
||||
|
||||
case *ast.ArrayType:
|
||||
if ft.Len != nil {
|
||||
// We don't handle arrays
|
||||
continue
|
||||
}
|
||||
|
||||
tn := ft.Elt.(*ast.Ident).Name
|
||||
if enc, ok := xdrEncoders["[]"+tn]; ok {
|
||||
f = fieldInfo{
|
||||
Name: fn,
|
||||
IsBasic: true,
|
||||
FieldType: tn,
|
||||
Encoder: enc.Encoder,
|
||||
Convert: enc.Type,
|
||||
Max: max,
|
||||
}
|
||||
} else if enc, ok := xdrEncoders[tn]; ok {
|
||||
f = fieldInfo{
|
||||
Name: fn,
|
||||
IsBasic: true,
|
||||
IsSlice: true,
|
||||
FieldType: tn,
|
||||
Encoder: enc.Encoder,
|
||||
Convert: enc.Type,
|
||||
Max: max,
|
||||
}
|
||||
} else {
|
||||
f = fieldInfo{
|
||||
Name: fn,
|
||||
IsBasic: false,
|
||||
IsSlice: true,
|
||||
FieldType: tn,
|
||||
Max: max,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fs = append(fs, f)
|
||||
}
|
||||
|
||||
return fs
|
||||
}
|
||||
|
||||
func generateCode(output io.Writer, s structInfo) {
|
||||
name := s.Name
|
||||
fs := s.Fields
|
||||
|
||||
var buf bytes.Buffer
|
||||
err := encodeTpl.Execute(&buf, map[string]interface{}{"TypeName": name, "Fields": fs})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
bs := regexp.MustCompile(`(\s*\n)+`).ReplaceAll(buf.Bytes(), []byte("\n"))
|
||||
bs = bytes.Replace(bs, []byte("//+n"), []byte("\n"), -1)
|
||||
|
||||
bs, err = format.Source(bs)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Fprintln(output, string(bs))
|
||||
}
|
||||
|
||||
func uncamelize(s string) string {
|
||||
return regexp.MustCompile("[a-z][A-Z]").ReplaceAllStringFunc(s, func(camel string) string {
|
||||
return camel[:1] + " " + camel[1:]
|
||||
})
|
||||
}
|
||||
|
||||
func generateDiagram(output io.Writer, s structInfo) {
|
||||
sn := s.Name
|
||||
fs := s.Fields
|
||||
|
||||
fmt.Fprintln(output, sn+" Structure:")
|
||||
fmt.Fprintln(output)
|
||||
fmt.Fprintln(output, " 0 1 2 3")
|
||||
fmt.Fprintln(output, " 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1")
|
||||
line := "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+"
|
||||
fmt.Fprintln(output, line)
|
||||
|
||||
for _, f := range fs {
|
||||
tn := f.FieldType
|
||||
sl := f.IsSlice
|
||||
name := uncamelize(f.Name)
|
||||
|
||||
if sl {
|
||||
fmt.Fprintf(output, "| %s |\n", center("Number of "+name, 61))
|
||||
fmt.Fprintln(output, line)
|
||||
}
|
||||
switch tn {
|
||||
case "bool":
|
||||
fmt.Fprintf(output, "| %s |V|\n", center(name+" (V=0 or 1)", 59))
|
||||
fmt.Fprintln(output, line)
|
||||
case "int16", "uint16":
|
||||
fmt.Fprintf(output, "| %s | %s |\n", center("0x0000", 29), center(name, 29))
|
||||
fmt.Fprintln(output, line)
|
||||
case "int32", "uint32":
|
||||
fmt.Fprintf(output, "| %s |\n", center(name, 61))
|
||||
fmt.Fprintln(output, line)
|
||||
case "int64", "uint64":
|
||||
fmt.Fprintf(output, "| %-61s |\n", "")
|
||||
fmt.Fprintf(output, "+ %s +\n", center(name+" (64 bits)", 61))
|
||||
fmt.Fprintf(output, "| %-61s |\n", "")
|
||||
fmt.Fprintln(output, line)
|
||||
case "string", "byte": // XXX We assume slice of byte!
|
||||
fmt.Fprintf(output, "| %s |\n", center("Length of "+name, 61))
|
||||
fmt.Fprintln(output, line)
|
||||
fmt.Fprintf(output, "/ %61s /\n", "")
|
||||
fmt.Fprintf(output, "\\ %s \\\n", center(name+" (variable length)", 61))
|
||||
fmt.Fprintf(output, "/ %61s /\n", "")
|
||||
fmt.Fprintln(output, line)
|
||||
default:
|
||||
if sl {
|
||||
tn = "Zero or more " + tn + " Structures"
|
||||
fmt.Fprintf(output, "/ %s /\n", center("", 61))
|
||||
fmt.Fprintf(output, "\\ %s \\\n", center(tn, 61))
|
||||
fmt.Fprintf(output, "/ %s /\n", center("", 61))
|
||||
} else {
|
||||
fmt.Fprintf(output, "| %s |\n", center(tn, 61))
|
||||
}
|
||||
fmt.Fprintln(output, line)
|
||||
}
|
||||
}
|
||||
fmt.Fprintln(output)
|
||||
fmt.Fprintln(output)
|
||||
}
|
||||
|
||||
func generateXdr(output io.Writer, s structInfo) {
|
||||
sn := s.Name
|
||||
fs := s.Fields
|
||||
|
||||
fmt.Fprintf(output, "struct %s {\n", sn)
|
||||
|
||||
for _, f := range fs {
|
||||
tn := f.FieldType
|
||||
fn := f.Name
|
||||
suf := ""
|
||||
l := ""
|
||||
if f.Max > 0 {
|
||||
l = strconv.Itoa(f.Max)
|
||||
}
|
||||
if f.IsSlice {
|
||||
suf = "<" + l + ">"
|
||||
}
|
||||
|
||||
switch tn {
|
||||
case "int16", "int32":
|
||||
fmt.Fprintf(output, "\tint %s%s;\n", fn, suf)
|
||||
case "uint16", "uint32":
|
||||
fmt.Fprintf(output, "\tunsigned int %s%s;\n", fn, suf)
|
||||
case "int64":
|
||||
fmt.Fprintf(output, "\thyper %s%s;\n", fn, suf)
|
||||
case "uint64":
|
||||
fmt.Fprintf(output, "\tunsigned hyper %s%s;\n", fn, suf)
|
||||
case "string":
|
||||
fmt.Fprintf(output, "\tstring %s<%s>;\n", fn, l)
|
||||
case "byte":
|
||||
fmt.Fprintf(output, "\topaque %s<%s>;\n", fn, l)
|
||||
default:
|
||||
fmt.Fprintf(output, "\t%s %s%s;\n", tn, fn, suf)
|
||||
}
|
||||
}
|
||||
fmt.Fprintln(output, "}")
|
||||
fmt.Fprintln(output)
|
||||
}
|
||||
|
||||
func center(s string, w int) string {
|
||||
w -= len(s)
|
||||
l := w / 2
|
||||
r := l
|
||||
if l+r < w {
|
||||
r++
|
||||
}
|
||||
return strings.Repeat(" ", l) + s + strings.Repeat(" ", r)
|
||||
}
|
||||
|
||||
func inspector(structs *[]structInfo) func(ast.Node) bool {
|
||||
return func(n ast.Node) bool {
|
||||
switch n := n.(type) {
|
||||
case *ast.TypeSpec:
|
||||
switch t := n.Type.(type) {
|
||||
case *ast.StructType:
|
||||
name := n.Name.Name
|
||||
fs := handleStruct(t)
|
||||
*structs = append(*structs, structInfo{name, fs})
|
||||
}
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
outputFile := flag.String("o", "", "Output file, blank for stdout")
|
||||
flag.Parse()
|
||||
fname := flag.Arg(0)
|
||||
|
||||
fset := token.NewFileSet()
|
||||
f, err := parser.ParseFile(fset, fname, nil, parser.ParseComments)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
var structs []structInfo
|
||||
i := inspector(&structs)
|
||||
ast.Inspect(f, i)
|
||||
|
||||
var output io.Writer = os.Stdout
|
||||
if *outputFile != "" {
|
||||
fd, err := os.Create(*outputFile)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
output = fd
|
||||
}
|
||||
|
||||
headerTpl.Execute(output, map[string]string{"Package": f.Name.Name})
|
||||
for _, s := range structs {
|
||||
fmt.Fprintf(output, "\n/*\n\n")
|
||||
generateDiagram(output, s)
|
||||
generateXdr(output, s)
|
||||
fmt.Fprintf(output, "*/\n")
|
||||
generateCode(output, s)
|
||||
}
|
||||
}
|
||||
16
Godeps/_workspace/src/github.com/calmh/xdr/debug.go
generated
vendored
Normal file
16
Godeps/_workspace/src/github.com/calmh/xdr/debug.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
package xdr
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
var (
|
||||
debug = len(os.Getenv("XDRTRACE")) > 0
|
||||
dl = log.New(os.Stdout, "xdr: ", log.Lshortfile|log.Ltime|log.Lmicroseconds)
|
||||
)
|
||||
|
||||
const maxDebugBytes = 32
|
||||
@@ -1,5 +1,5 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
// Package xdr implements an XDR (RFC 4506) marshaller/unmarshaller.
|
||||
// Package xdr implements an XDR (RFC 4506) encoder/decoder.
|
||||
package xdr
|
||||
79
Godeps/_workspace/src/github.com/calmh/xdr/encdec_test.go
generated
vendored
Normal file
79
Godeps/_workspace/src/github.com/calmh/xdr/encdec_test.go
generated
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
package xdr_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
|
||||
"github.com/calmh/xdr"
|
||||
)
|
||||
|
||||
// Contains all supported types
|
||||
type TestStruct struct {
|
||||
I int
|
||||
I8 int8
|
||||
UI8 uint8
|
||||
I16 int16
|
||||
UI16 uint16
|
||||
I32 int32
|
||||
UI32 uint32
|
||||
I64 int64
|
||||
UI64 uint64
|
||||
BS []byte // max:1024
|
||||
S string // max:1024
|
||||
C Opaque
|
||||
SS []string // max:1024
|
||||
}
|
||||
|
||||
type Opaque [32]byte
|
||||
|
||||
func (u *Opaque) encodeXDR(w *xdr.Writer) (int, error) {
|
||||
return w.WriteRaw(u[:])
|
||||
}
|
||||
|
||||
func (u *Opaque) decodeXDR(r *xdr.Reader) (int, error) {
|
||||
return r.ReadRaw(u[:])
|
||||
}
|
||||
|
||||
func (Opaque) Generate(rand *rand.Rand, size int) reflect.Value {
|
||||
var u Opaque
|
||||
for i := range u[:] {
|
||||
u[i] = byte(rand.Int())
|
||||
}
|
||||
return reflect.ValueOf(u)
|
||||
}
|
||||
|
||||
func TestEncDec(t *testing.T) {
|
||||
fn := func(t0 TestStruct) bool {
|
||||
bs, err := t0.MarshalXDR()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var t1 TestStruct
|
||||
err = t1.UnmarshalXDR(bs)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Not comparing with DeepEqual since we'll unmarshal nil slices as empty
|
||||
if t0.I != t1.I ||
|
||||
t0.I16 != t1.I16 || t0.UI16 != t1.UI16 ||
|
||||
t0.I32 != t1.I32 || t0.UI32 != t1.UI32 ||
|
||||
t0.I64 != t1.I64 || t0.UI64 != t1.UI64 ||
|
||||
bytes.Compare(t0.BS, t1.BS) != 0 ||
|
||||
t0.S != t1.S || t0.C != t1.C {
|
||||
t.Logf("%#v", t0)
|
||||
t.Logf("%#v", t1)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
if err := quick.Check(fn, nil); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
174
Godeps/_workspace/src/github.com/calmh/xdr/encdec_xdr_test.go
generated
vendored
Normal file
174
Godeps/_workspace/src/github.com/calmh/xdr/encdec_xdr_test.go
generated
vendored
Normal file
@@ -0,0 +1,174 @@
|
||||
// ************************************************************
|
||||
// This file is automatically generated by genxdr. Do not edit.
|
||||
// ************************************************************
|
||||
|
||||
package xdr_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
|
||||
"github.com/calmh/xdr"
|
||||
)
|
||||
|
||||
/*
|
||||
|
||||
TestStruct Structure:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| int |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| int8 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| uint8 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| int16 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| 0x0000 | UI16 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| int32 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| UI32 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
+ I64 (64 bits) +
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
+ UI64 (64 bits) +
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of BS |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ BS (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of S |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ S (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Opaque |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Number of SS |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of SS |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ SS (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
struct TestStruct {
|
||||
int I;
|
||||
int8 I8;
|
||||
uint8 UI8;
|
||||
int16 I16;
|
||||
unsigned int UI16;
|
||||
int32 I32;
|
||||
unsigned int UI32;
|
||||
hyper I64;
|
||||
unsigned hyper UI64;
|
||||
opaque BS<1024>;
|
||||
string S<1024>;
|
||||
Opaque C;
|
||||
string SS<1024>;
|
||||
}
|
||||
|
||||
*/
|
||||
|
||||
func (o TestStruct) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
}
|
||||
|
||||
func (o TestStruct) MarshalXDR() ([]byte, error) {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}
|
||||
|
||||
func (o TestStruct) MustMarshalXDR() []byte {
|
||||
bs, err := o.MarshalXDR()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
func (o TestStruct) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o TestStruct) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
xw.WriteUint64(uint64(o.I))
|
||||
xw.WriteUint8(uint8(o.I8))
|
||||
xw.WriteUint8(o.UI8)
|
||||
xw.WriteUint16(uint16(o.I16))
|
||||
xw.WriteUint16(o.UI16)
|
||||
xw.WriteUint32(uint32(o.I32))
|
||||
xw.WriteUint32(o.UI32)
|
||||
xw.WriteUint64(uint64(o.I64))
|
||||
xw.WriteUint64(o.UI64)
|
||||
if l := len(o.BS); l > 1024 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("BS", l, 1024)
|
||||
}
|
||||
xw.WriteBytes(o.BS)
|
||||
if l := len(o.S); l > 1024 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("S", l, 1024)
|
||||
}
|
||||
xw.WriteString(o.S)
|
||||
_, err := o.C.encodeXDR(xw)
|
||||
if err != nil {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
if l := len(o.SS); l > 1024 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("SS", l, 1024)
|
||||
}
|
||||
xw.WriteUint32(uint32(len(o.SS)))
|
||||
for i := range o.SS {
|
||||
xw.WriteString(o.SS[i])
|
||||
}
|
||||
return xw.Tot(), xw.Error()
|
||||
}
|
||||
|
||||
func (o *TestStruct) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *TestStruct) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.decodeXDR(xr)
|
||||
}
|
||||
|
||||
func (o *TestStruct) decodeXDR(xr *xdr.Reader) error {
|
||||
o.I = int(xr.ReadUint64())
|
||||
o.I8 = int8(xr.ReadUint8())
|
||||
o.UI8 = xr.ReadUint8()
|
||||
o.I16 = int16(xr.ReadUint16())
|
||||
o.UI16 = xr.ReadUint16()
|
||||
o.I32 = int32(xr.ReadUint32())
|
||||
o.UI32 = xr.ReadUint32()
|
||||
o.I64 = int64(xr.ReadUint64())
|
||||
o.UI64 = xr.ReadUint64()
|
||||
o.BS = xr.ReadBytesMax(1024)
|
||||
o.S = xr.ReadStringMax(1024)
|
||||
(&o.C).decodeXDR(xr)
|
||||
_SSSize := int(xr.ReadUint32())
|
||||
if _SSSize > 1024 {
|
||||
return xdr.ElementSizeExceeded("SS", _SSSize, 1024)
|
||||
}
|
||||
o.SS = make([]string, _SSSize)
|
||||
for i := range o.SS {
|
||||
o.SS[i] = xr.ReadString()
|
||||
}
|
||||
return xr.Error()
|
||||
}
|
||||
4
Godeps/_workspace/src/github.com/calmh/xdr/generate.sh
generated
vendored
Normal file
4
Godeps/_workspace/src/github.com/calmh/xdr/generate.sh
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
#!/bin/sh
|
||||
|
||||
go run cmd/genxdr/main.go -- bench_test.go > bench_xdr_test.go
|
||||
go run cmd/genxdr/main.go -- encdec_test.go > encdec_xdr_test.go
|
||||
10
Godeps/_workspace/src/github.com/calmh/xdr/pad_ipdr.go
generated
vendored
Normal file
10
Godeps/_workspace/src/github.com/calmh/xdr/pad_ipdr.go
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
// +build ipdr
|
||||
|
||||
package xdr
|
||||
|
||||
func pad(l int) int {
|
||||
return 0
|
||||
}
|
||||
14
Godeps/_workspace/src/github.com/calmh/xdr/pad_xdr.go
generated
vendored
Normal file
14
Godeps/_workspace/src/github.com/calmh/xdr/pad_xdr.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
// +build !ipdr
|
||||
|
||||
package xdr
|
||||
|
||||
func pad(l int) int {
|
||||
d := l % 4
|
||||
if d == 0 {
|
||||
return 0
|
||||
}
|
||||
return 4 - d
|
||||
}
|
||||
170
Godeps/_workspace/src/github.com/calmh/xdr/reader.go
generated
vendored
Normal file
170
Godeps/_workspace/src/github.com/calmh/xdr/reader.go
generated
vendored
Normal file
@@ -0,0 +1,170 @@
|
||||
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
// All rights reserved. Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package xdr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type Reader struct {
|
||||
r io.Reader
|
||||
err error
|
||||
b [8]byte
|
||||
}
|
||||
|
||||
func NewReader(r io.Reader) *Reader {
|
||||
return &Reader{
|
||||
r: r,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reader) ReadRaw(bs []byte) (int, error) {
|
||||
if r.err != nil {
|
||||
return 0, r.err
|
||||
}
|
||||
|
||||
var n int
|
||||
n, r.err = io.ReadFull(r.r, bs)
|
||||
return n, r.err
|
||||
}
|
||||
|
||||
func (r *Reader) ReadString() string {
|
||||
return r.ReadStringMax(0)
|
||||
}
|
||||
|
||||
func (r *Reader) ReadStringMax(max int) string {
|
||||
buf := r.ReadBytesMaxInto(max, nil)
|
||||
bh := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
|
||||
sh := reflect.StringHeader{
|
||||
Data: bh.Data,
|
||||
Len: bh.Len,
|
||||
}
|
||||
return *((*string)(unsafe.Pointer(&sh)))
|
||||
}
|
||||
|
||||
func (r *Reader) ReadBytes() []byte {
|
||||
return r.ReadBytesInto(nil)
|
||||
}
|
||||
|
||||
func (r *Reader) ReadBytesMax(max int) []byte {
|
||||
return r.ReadBytesMaxInto(max, nil)
|
||||
}
|
||||
|
||||
func (r *Reader) ReadBytesInto(dst []byte) []byte {
|
||||
return r.ReadBytesMaxInto(0, dst)
|
||||
}
|
||||
|
||||
func (r *Reader) ReadBytesMaxInto(max int, dst []byte) []byte {
|
||||
if r.err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
l := int(r.ReadUint32())
|
||||
if r.err != nil {
|
||||
return nil
|
||||
}
|
||||
if max > 0 && l > max {
|
||||
r.err = ElementSizeExceeded("bytes field", l, max)
|
||||
return nil
|
||||
}
|
||||
|
||||
if fullLen := l + pad(l); fullLen > len(dst) {
|
||||
dst = make([]byte, fullLen)
|
||||
} else {
|
||||
dst = dst[:fullLen]
|
||||
}
|
||||
|
||||
var n int
|
||||
n, r.err = io.ReadFull(r.r, dst)
|
||||
if r.err != nil {
|
||||
if debug {
|
||||
dl.Printf("rd bytes (%d): %v", len(dst), r.err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if debug {
|
||||
if n > maxDebugBytes {
|
||||
dl.Printf("rd bytes (%d): %x...", len(dst), dst[:maxDebugBytes])
|
||||
} else {
|
||||
dl.Printf("rd bytes (%d): %x", len(dst), dst)
|
||||
}
|
||||
}
|
||||
return dst[:l]
|
||||
}
|
||||
|
||||
func (r *Reader) ReadBool() bool {
|
||||
return r.ReadUint8() != 0
|
||||
}
|
||||
|
||||
func (r *Reader) ReadUint32() uint32 {
|
||||
if r.err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
_, r.err = io.ReadFull(r.r, r.b[:4])
|
||||
if r.err != nil {
|
||||
if debug {
|
||||
dl.Printf("rd uint32: %v", r.err)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
v := uint32(r.b[3]) | uint32(r.b[2])<<8 | uint32(r.b[1])<<16 | uint32(r.b[0])<<24
|
||||
|
||||
if debug {
|
||||
dl.Printf("rd uint32=%d (0x%08x)", v, v)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func (r *Reader) ReadUint64() uint64 {
|
||||
if r.err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
_, r.err = io.ReadFull(r.r, r.b[:8])
|
||||
if r.err != nil {
|
||||
if debug {
|
||||
dl.Printf("rd uint64: %v", r.err)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
v := uint64(r.b[7]) | uint64(r.b[6])<<8 | uint64(r.b[5])<<16 | uint64(r.b[4])<<24 |
|
||||
uint64(r.b[3])<<32 | uint64(r.b[2])<<40 | uint64(r.b[1])<<48 | uint64(r.b[0])<<56
|
||||
|
||||
if debug {
|
||||
dl.Printf("rd uint64=%d (0x%016x)", v, v)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
type XDRError struct {
|
||||
op string
|
||||
err error
|
||||
}
|
||||
|
||||
func (e XDRError) Error() string {
|
||||
return "xdr " + e.op + ": " + e.err.Error()
|
||||
}
|
||||
|
||||
func (e XDRError) IsEOF() bool {
|
||||
return e.err == io.EOF
|
||||
}
|
||||
|
||||
func (r *Reader) Error() error {
|
||||
if r.err == nil {
|
||||
return nil
|
||||
}
|
||||
return XDRError{"read", r.err}
|
||||
}
|
||||
|
||||
func ElementSizeExceeded(field string, size, limit int) error {
|
||||
return fmt.Errorf("%s exceeds size limit; %d > %d", field, size, limit)
|
||||
}
|
||||
49
Godeps/_workspace/src/github.com/calmh/xdr/reader_ipdr.go
generated
vendored
Normal file
49
Godeps/_workspace/src/github.com/calmh/xdr/reader_ipdr.go
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
// All rights reserved. Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ipdr
|
||||
|
||||
package xdr
|
||||
|
||||
import "io"
|
||||
|
||||
func (r *Reader) ReadUint8() uint8 {
|
||||
if r.err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
_, r.err = io.ReadFull(r.r, r.b[:1])
|
||||
if r.err != nil {
|
||||
if debug {
|
||||
dl.Printf("rd uint8: %v", r.err)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
if debug {
|
||||
dl.Printf("rd uint8=%d (0x%02x)", r.b[0], r.b[0])
|
||||
}
|
||||
return r.b[0]
|
||||
}
|
||||
|
||||
func (r *Reader) ReadUint16() uint16 {
|
||||
if r.err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
_, r.err = io.ReadFull(r.r, r.b[:2])
|
||||
if r.err != nil {
|
||||
if debug {
|
||||
dl.Printf("rd uint16: %v", r.err)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
v := uint16(r.b[1]) | uint16(r.b[0])<<8
|
||||
|
||||
if debug {
|
||||
dl.Printf("rd uint16=%d (0x%04x)", v, v)
|
||||
}
|
||||
return v
|
||||
}
|
||||
15
Godeps/_workspace/src/github.com/calmh/xdr/reader_xdr.go
generated
vendored
Normal file
15
Godeps/_workspace/src/github.com/calmh/xdr/reader_xdr.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
// All rights reserved. Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !ipdr
|
||||
|
||||
package xdr
|
||||
|
||||
func (r *Reader) ReadUint8() uint8 {
|
||||
return uint8(r.ReadUint32())
|
||||
}
|
||||
|
||||
func (r *Reader) ReadUint16() uint16 {
|
||||
return uint16(r.ReadUint32())
|
||||
}
|
||||
44
Godeps/_workspace/src/github.com/calmh/xdr/refl_test.go
generated
vendored
Normal file
44
Godeps/_workspace/src/github.com/calmh/xdr/refl_test.go
generated
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
// +build refl
|
||||
|
||||
package xdr_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
refl "github.com/davecgh/go-xdr/xdr"
|
||||
)
|
||||
|
||||
func TestCompareMarshals(t *testing.T) {
|
||||
e0 := s.MarshalXDR()
|
||||
e1, err := refl.Marshal(s)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if bytes.Compare(e0, e1) != 0 {
|
||||
t.Fatalf("Encoding mismatch;\n\t%x (this)\n\t%x (refl)", e0, e1)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkReflMarshal(b *testing.B) {
|
||||
var err error
|
||||
for i := 0; i < b.N; i++ {
|
||||
res, err = refl.Marshal(s)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkReflUnmarshal(b *testing.B) {
|
||||
var t XDRBenchStruct
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := refl.Unmarshal(e, &t)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
146
Godeps/_workspace/src/github.com/calmh/xdr/writer.go
generated
vendored
Normal file
146
Godeps/_workspace/src/github.com/calmh/xdr/writer.go
generated
vendored
Normal file
@@ -0,0 +1,146 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
package xdr
|
||||
|
||||
import (
|
||||
"io"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var padBytes = []byte{0, 0, 0}
|
||||
|
||||
type Writer struct {
|
||||
w io.Writer
|
||||
tot int
|
||||
err error
|
||||
b [8]byte
|
||||
}
|
||||
|
||||
type AppendWriter []byte
|
||||
|
||||
func (w *AppendWriter) Write(bs []byte) (int, error) {
|
||||
*w = append(*w, bs...)
|
||||
return len(bs), nil
|
||||
}
|
||||
|
||||
func NewWriter(w io.Writer) *Writer {
|
||||
return &Writer{
|
||||
w: w,
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Writer) WriteRaw(bs []byte) (int, error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
var n int
|
||||
n, w.err = w.w.Write(bs)
|
||||
return n, w.err
|
||||
}
|
||||
|
||||
func (w *Writer) WriteString(s string) (int, error) {
|
||||
sh := *((*reflect.StringHeader)(unsafe.Pointer(&s)))
|
||||
bh := reflect.SliceHeader{
|
||||
Data: sh.Data,
|
||||
Len: sh.Len,
|
||||
Cap: sh.Len,
|
||||
}
|
||||
return w.WriteBytes(*(*[]byte)(unsafe.Pointer(&bh)))
|
||||
}
|
||||
|
||||
func (w *Writer) WriteBytes(bs []byte) (int, error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
w.WriteUint32(uint32(len(bs)))
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
if debug {
|
||||
if len(bs) > maxDebugBytes {
|
||||
dl.Printf("wr bytes (%d): %x...", len(bs), bs[:maxDebugBytes])
|
||||
} else {
|
||||
dl.Printf("wr bytes (%d): %x", len(bs), bs)
|
||||
}
|
||||
}
|
||||
|
||||
var l, n int
|
||||
n, w.err = w.w.Write(bs)
|
||||
l += n
|
||||
|
||||
if p := pad(len(bs)); w.err == nil && p > 0 {
|
||||
n, w.err = w.w.Write(padBytes[:p])
|
||||
l += n
|
||||
}
|
||||
|
||||
w.tot += l
|
||||
return l, w.err
|
||||
}
|
||||
|
||||
func (w *Writer) WriteBool(v bool) (int, error) {
|
||||
if v {
|
||||
return w.WriteUint8(1)
|
||||
} else {
|
||||
return w.WriteUint8(0)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Writer) WriteUint32(v uint32) (int, error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
if debug {
|
||||
dl.Printf("wr uint32=%d", v)
|
||||
}
|
||||
|
||||
w.b[0] = byte(v >> 24)
|
||||
w.b[1] = byte(v >> 16)
|
||||
w.b[2] = byte(v >> 8)
|
||||
w.b[3] = byte(v)
|
||||
|
||||
var l int
|
||||
l, w.err = w.w.Write(w.b[:4])
|
||||
w.tot += l
|
||||
return l, w.err
|
||||
}
|
||||
|
||||
func (w *Writer) WriteUint64(v uint64) (int, error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
if debug {
|
||||
dl.Printf("wr uint64=%d", v)
|
||||
}
|
||||
|
||||
w.b[0] = byte(v >> 56)
|
||||
w.b[1] = byte(v >> 48)
|
||||
w.b[2] = byte(v >> 40)
|
||||
w.b[3] = byte(v >> 32)
|
||||
w.b[4] = byte(v >> 24)
|
||||
w.b[5] = byte(v >> 16)
|
||||
w.b[6] = byte(v >> 8)
|
||||
w.b[7] = byte(v)
|
||||
|
||||
var l int
|
||||
l, w.err = w.w.Write(w.b[:8])
|
||||
w.tot += l
|
||||
return l, w.err
|
||||
}
|
||||
|
||||
func (w *Writer) Tot() int {
|
||||
return w.tot
|
||||
}
|
||||
|
||||
func (w *Writer) Error() error {
|
||||
if w.err == nil {
|
||||
return nil
|
||||
}
|
||||
return XDRError{"write", w.err}
|
||||
}
|
||||
41
Godeps/_workspace/src/github.com/calmh/xdr/writer_ipdr.go
generated
vendored
Normal file
41
Godeps/_workspace/src/github.com/calmh/xdr/writer_ipdr.go
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
// +build ipdr
|
||||
|
||||
package xdr
|
||||
|
||||
func (w *Writer) WriteUint8(v uint8) (int, error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
if debug {
|
||||
dl.Printf("wr uint8=%d", v)
|
||||
}
|
||||
|
||||
w.b[0] = byte(v)
|
||||
|
||||
var l int
|
||||
l, w.err = w.w.Write(w.b[:1])
|
||||
w.tot += l
|
||||
return l, w.err
|
||||
}
|
||||
|
||||
func (w *Writer) WriteUint16(v uint16) (int, error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
if debug {
|
||||
dl.Printf("wr uint8=%d", v)
|
||||
}
|
||||
|
||||
w.b[0] = byte(v >> 8)
|
||||
w.b[1] = byte(v)
|
||||
|
||||
var l int
|
||||
l, w.err = w.w.Write(w.b[:2])
|
||||
w.tot += l
|
||||
return l, w.err
|
||||
}
|
||||
14
Godeps/_workspace/src/github.com/calmh/xdr/writer_xdr.go
generated
vendored
Normal file
14
Godeps/_workspace/src/github.com/calmh/xdr/writer_xdr.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
// +build !ipdr
|
||||
|
||||
package xdr
|
||||
|
||||
func (w *Writer) WriteUint8(v uint8) (int, error) {
|
||||
return w.WriteUint32(uint32(v))
|
||||
}
|
||||
|
||||
func (w *Writer) WriteUint16(v uint16) (int, error) {
|
||||
return w.WriteUint32(uint32(v))
|
||||
}
|
||||
93
Godeps/_workspace/src/github.com/calmh/xdr/xdr_test.go
generated
vendored
Normal file
93
Godeps/_workspace/src/github.com/calmh/xdr/xdr_test.go
generated
vendored
Normal file
@@ -0,0 +1,93 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
package xdr
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
)
|
||||
|
||||
func TestBytesNil(t *testing.T) {
|
||||
fn := func(bs []byte) bool {
|
||||
var b = new(bytes.Buffer)
|
||||
var w = NewWriter(b)
|
||||
var r = NewReader(b)
|
||||
w.WriteBytes(bs)
|
||||
w.WriteBytes(bs)
|
||||
r.ReadBytes()
|
||||
res := r.ReadBytes()
|
||||
return bytes.Compare(bs, res) == 0
|
||||
}
|
||||
if err := quick.Check(fn, nil); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBytesGiven(t *testing.T) {
|
||||
fn := func(bs []byte) bool {
|
||||
var b = new(bytes.Buffer)
|
||||
var w = NewWriter(b)
|
||||
var r = NewReader(b)
|
||||
w.WriteBytes(bs)
|
||||
w.WriteBytes(bs)
|
||||
res := make([]byte, 12)
|
||||
res = r.ReadBytesInto(res)
|
||||
res = r.ReadBytesInto(res)
|
||||
return bytes.Compare(bs, res) == 0
|
||||
}
|
||||
if err := quick.Check(fn, nil); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadBytesMaxInto(t *testing.T) {
|
||||
var max = 64
|
||||
for tot := 32; tot < 128; tot++ {
|
||||
for diff := -32; diff <= 32; diff++ {
|
||||
var b = new(bytes.Buffer)
|
||||
var r = NewReader(b)
|
||||
var w = NewWriter(b)
|
||||
|
||||
var toWrite = make([]byte, tot)
|
||||
w.WriteBytes(toWrite)
|
||||
|
||||
var buf = make([]byte, tot+diff)
|
||||
var bs = r.ReadBytesMaxInto(max, buf)
|
||||
|
||||
if tot <= max {
|
||||
if read := len(bs); read != tot {
|
||||
t.Errorf("Incorrect read bytes, wrote=%d, buf=%d, max=%d, read=%d", tot, tot+diff, max, read)
|
||||
}
|
||||
} else if !strings.Contains(r.err.Error(), "exceeds size") {
|
||||
t.Errorf("Unexpected non-ErrElementSizeExceeded error for wrote=%d, max=%d: %v", tot, max, r.err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadStringMax(t *testing.T) {
|
||||
for tot := 42; tot < 72; tot++ {
|
||||
for max := 0; max < 128; max++ {
|
||||
var b = new(bytes.Buffer)
|
||||
var r = NewReader(b)
|
||||
var w = NewWriter(b)
|
||||
|
||||
var toWrite = make([]byte, tot)
|
||||
w.WriteBytes(toWrite)
|
||||
|
||||
var str = r.ReadStringMax(max)
|
||||
var read = len(str)
|
||||
|
||||
if max == 0 || tot <= max {
|
||||
if read != tot {
|
||||
t.Errorf("Incorrect read bytes, wrote=%d, max=%d, read=%d", tot, max, read)
|
||||
}
|
||||
} else if !strings.Contains(r.err.Error(), "exceeds size") {
|
||||
t.Errorf("Unexpected non-ErrElementSizeExceeded error for wrote=%d, max=%d, read=%d: %v", tot, max, read, r.err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
185
Godeps/_workspace/src/github.com/juju/ratelimit/LICENSE
generated
vendored
Normal file
185
Godeps/_workspace/src/github.com/juju/ratelimit/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,185 @@
|
||||
This software is licensed under the LGPLv3, included below.
|
||||
|
||||
As a special exception to the GNU Lesser General Public License version 3
|
||||
("LGPL3"), the copyright holders of this Library give you permission to
|
||||
convey to a third party a Combined Work that links statically or dynamically
|
||||
to this Library without providing any Minimal Corresponding Source or
|
||||
Minimal Application Code as set out in 4d or providing the installation
|
||||
information set out in section 4e, provided that you comply with the other
|
||||
provisions of LGPL3 and provided that you meet, for the Application the
|
||||
terms and conditions of the license(s) which apply to the Application.
|
||||
|
||||
Except as stated in this special exception, the provisions of LGPL3 will
|
||||
continue to comply in full to this Library. If you modify this Library, you
|
||||
may apply this exception to your version of this Library, but you are not
|
||||
obliged to do so. If you do not wish to do so, delete this exception
|
||||
statement from your version. This exception does not (and cannot) modify any
|
||||
license terms which apply to the Application, with which you must still
|
||||
comply.
|
||||
|
||||
|
||||
GNU LESSER GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
|
||||
This version of the GNU Lesser General Public License incorporates
|
||||
the terms and conditions of version 3 of the GNU General Public
|
||||
License, supplemented by the additional permissions listed below.
|
||||
|
||||
0. Additional Definitions.
|
||||
|
||||
As used herein, "this License" refers to version 3 of the GNU Lesser
|
||||
General Public License, and the "GNU GPL" refers to version 3 of the GNU
|
||||
General Public License.
|
||||
|
||||
"The Library" refers to a covered work governed by this License,
|
||||
other than an Application or a Combined Work as defined below.
|
||||
|
||||
An "Application" is any work that makes use of an interface provided
|
||||
by the Library, but which is not otherwise based on the Library.
|
||||
Defining a subclass of a class defined by the Library is deemed a mode
|
||||
of using an interface provided by the Library.
|
||||
|
||||
A "Combined Work" is a work produced by combining or linking an
|
||||
Application with the Library. The particular version of the Library
|
||||
with which the Combined Work was made is also called the "Linked
|
||||
Version".
|
||||
|
||||
The "Minimal Corresponding Source" for a Combined Work means the
|
||||
Corresponding Source for the Combined Work, excluding any source code
|
||||
for portions of the Combined Work that, considered in isolation, are
|
||||
based on the Application, and not on the Linked Version.
|
||||
|
||||
The "Corresponding Application Code" for a Combined Work means the
|
||||
object code and/or source code for the Application, including any data
|
||||
and utility programs needed for reproducing the Combined Work from the
|
||||
Application, but excluding the System Libraries of the Combined Work.
|
||||
|
||||
1. Exception to Section 3 of the GNU GPL.
|
||||
|
||||
You may convey a covered work under sections 3 and 4 of this License
|
||||
without being bound by section 3 of the GNU GPL.
|
||||
|
||||
2. Conveying Modified Versions.
|
||||
|
||||
If you modify a copy of the Library, and, in your modifications, a
|
||||
facility refers to a function or data to be supplied by an Application
|
||||
that uses the facility (other than as an argument passed when the
|
||||
facility is invoked), then you may convey a copy of the modified
|
||||
version:
|
||||
|
||||
a) under this License, provided that you make a good faith effort to
|
||||
ensure that, in the event an Application does not supply the
|
||||
function or data, the facility still operates, and performs
|
||||
whatever part of its purpose remains meaningful, or
|
||||
|
||||
b) under the GNU GPL, with none of the additional permissions of
|
||||
this License applicable to that copy.
|
||||
|
||||
3. Object Code Incorporating Material from Library Header Files.
|
||||
|
||||
The object code form of an Application may incorporate material from
|
||||
a header file that is part of the Library. You may convey such object
|
||||
code under terms of your choice, provided that, if the incorporated
|
||||
material is not limited to numerical parameters, data structure
|
||||
layouts and accessors, or small macros, inline functions and templates
|
||||
(ten or fewer lines in length), you do both of the following:
|
||||
|
||||
a) Give prominent notice with each copy of the object code that the
|
||||
Library is used in it and that the Library and its use are
|
||||
covered by this License.
|
||||
|
||||
b) Accompany the object code with a copy of the GNU GPL and this license
|
||||
document.
|
||||
|
||||
4. Combined Works.
|
||||
|
||||
You may convey a Combined Work under terms of your choice that,
|
||||
taken together, effectively do not restrict modification of the
|
||||
portions of the Library contained in the Combined Work and reverse
|
||||
engineering for debugging such modifications, if you also do each of
|
||||
the following:
|
||||
|
||||
a) Give prominent notice with each copy of the Combined Work that
|
||||
the Library is used in it and that the Library and its use are
|
||||
covered by this License.
|
||||
|
||||
b) Accompany the Combined Work with a copy of the GNU GPL and this license
|
||||
document.
|
||||
|
||||
c) For a Combined Work that displays copyright notices during
|
||||
execution, include the copyright notice for the Library among
|
||||
these notices, as well as a reference directing the user to the
|
||||
copies of the GNU GPL and this license document.
|
||||
|
||||
d) Do one of the following:
|
||||
|
||||
0) Convey the Minimal Corresponding Source under the terms of this
|
||||
License, and the Corresponding Application Code in a form
|
||||
suitable for, and under terms that permit, the user to
|
||||
recombine or relink the Application with a modified version of
|
||||
the Linked Version to produce a modified Combined Work, in the
|
||||
manner specified by section 6 of the GNU GPL for conveying
|
||||
Corresponding Source.
|
||||
|
||||
1) Use a suitable shared library mechanism for linking with the
|
||||
Library. A suitable mechanism is one that (a) uses at run time
|
||||
a copy of the Library already present on the user's computer
|
||||
system, and (b) will operate properly with a modified version
|
||||
of the Library that is interface-compatible with the Linked
|
||||
Version.
|
||||
|
||||
e) Provide Installation Information, but only if you would otherwise
|
||||
be required to provide such information under section 6 of the
|
||||
GNU GPL, and only to the extent that such information is
|
||||
necessary to install and execute a modified version of the
|
||||
Combined Work produced by recombining or relinking the
|
||||
Application with a modified version of the Linked Version. (If
|
||||
you use option 4d0, the Installation Information must accompany
|
||||
the Minimal Corresponding Source and Corresponding Application
|
||||
Code. If you use option 4d1, you must provide the Installation
|
||||
Information in the manner specified by section 6 of the GNU GPL
|
||||
for conveying Corresponding Source.)
|
||||
|
||||
5. Combined Libraries.
|
||||
|
||||
You may place library facilities that are a work based on the
|
||||
Library side by side in a single library together with other library
|
||||
facilities that are not Applications and are not covered by this
|
||||
License, and convey such a combined library under terms of your
|
||||
choice, if you do both of the following:
|
||||
|
||||
a) Accompany the combined library with a copy of the same work based
|
||||
on the Library, uncombined with any other library facilities,
|
||||
conveyed under the terms of this License.
|
||||
|
||||
b) Give prominent notice with the combined library that part of it
|
||||
is a work based on the Library, and explaining where to find the
|
||||
accompanying uncombined form of the same work.
|
||||
|
||||
6. Revised Versions of the GNU Lesser General Public License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions
|
||||
of the GNU Lesser General Public License from time to time. Such new
|
||||
versions will be similar in spirit to the present version, but may
|
||||
differ in detail to address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Library as you received it specifies that a certain numbered version
|
||||
of the GNU Lesser General Public License "or any later version"
|
||||
applies to it, you have the option of following the terms and
|
||||
conditions either of that published version or of any later version
|
||||
published by the Free Software Foundation. If the Library as you
|
||||
received it does not specify a version number of the GNU Lesser
|
||||
General Public License, you may choose any version of the GNU Lesser
|
||||
General Public License ever published by the Free Software Foundation.
|
||||
|
||||
If the Library as you received it specifies that a proxy can decide
|
||||
whether future versions of the GNU Lesser General Public License shall
|
||||
apply, that proxy's public statement of acceptance of any version is
|
||||
permanent authorization for you to choose that version for the
|
||||
Library.
|
||||
117
Godeps/_workspace/src/github.com/juju/ratelimit/README.md
generated
vendored
Normal file
117
Godeps/_workspace/src/github.com/juju/ratelimit/README.md
generated
vendored
Normal file
@@ -0,0 +1,117 @@
|
||||
# ratelimit
|
||||
--
|
||||
import "github.com/juju/ratelimit"
|
||||
|
||||
The ratelimit package provides an efficient token bucket implementation. See
|
||||
http://en.wikipedia.org/wiki/Token_bucket.
|
||||
|
||||
## Usage
|
||||
|
||||
#### func Reader
|
||||
|
||||
```go
|
||||
func Reader(r io.Reader, bucket *Bucket) io.Reader
|
||||
```
|
||||
Reader returns a reader that is rate limited by the given token bucket. Each
|
||||
token in the bucket represents one byte.
|
||||
|
||||
#### func Writer
|
||||
|
||||
```go
|
||||
func Writer(w io.Writer, bucket *Bucket) io.Writer
|
||||
```
|
||||
Writer returns a reader that is rate limited by the given token bucket. Each
|
||||
token in the bucket represents one byte.
|
||||
|
||||
#### type Bucket
|
||||
|
||||
```go
|
||||
type Bucket struct {
|
||||
}
|
||||
```
|
||||
|
||||
Bucket represents a token bucket that fills at a predetermined rate. Methods on
|
||||
Bucket may be called concurrently.
|
||||
|
||||
#### func NewBucket
|
||||
|
||||
```go
|
||||
func NewBucket(fillInterval time.Duration, capacity int64) *Bucket
|
||||
```
|
||||
NewBucket returns a new token bucket that fills at the rate of one token every
|
||||
fillInterval, up to the given maximum capacity. Both arguments must be positive.
|
||||
The bucket is initially full.
|
||||
|
||||
#### func NewBucketWithQuantum
|
||||
|
||||
```go
|
||||
func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *Bucket
|
||||
```
|
||||
NewBucketWithQuantum is similar to NewBucket, but allows the specification of
|
||||
the quantum size - quantum tokens are added every fillInterval.
|
||||
|
||||
#### func NewBucketWithRate
|
||||
|
||||
```go
|
||||
func NewBucketWithRate(rate float64, capacity int64) *Bucket
|
||||
```
|
||||
NewBucketWithRate returns a token bucket that fills the bucket at the rate of
|
||||
rate tokens per second up to the given maximum capacity. Because of limited
|
||||
clock resolution, at high rates, the actual rate may be up to 1% different from
|
||||
the specified rate.
|
||||
|
||||
#### func (*Bucket) Rate
|
||||
|
||||
```go
|
||||
func (tb *Bucket) Rate() float64
|
||||
```
|
||||
Rate returns the fill rate of the bucket, in tokens per second.
|
||||
|
||||
#### func (*Bucket) Take
|
||||
|
||||
```go
|
||||
func (tb *Bucket) Take(count int64) time.Duration
|
||||
```
|
||||
Take takes count tokens from the bucket without blocking. It returns the time
|
||||
that the caller should wait until the tokens are actually available.
|
||||
|
||||
Note that if the request is irrevocable - there is no way to return tokens to
|
||||
the bucket once this method commits us to taking them.
|
||||
|
||||
#### func (*Bucket) TakeAvailable
|
||||
|
||||
```go
|
||||
func (tb *Bucket) TakeAvailable(count int64) int64
|
||||
```
|
||||
TakeAvailable takes up to count immediately available tokens from the bucket. It
|
||||
returns the number of tokens removed, or zero if there are no available tokens.
|
||||
It does not block.
|
||||
|
||||
#### func (*Bucket) TakeMaxDuration
|
||||
|
||||
```go
|
||||
func (tb *Bucket) TakeMaxDuration(count int64, maxWait time.Duration) (time.Duration, bool)
|
||||
```
|
||||
TakeMaxDuration is like Take, except that it will only take tokens from the
|
||||
bucket if the wait time for the tokens is no greater than maxWait.
|
||||
|
||||
If it would take longer than maxWait for the tokens to become available, it does
|
||||
nothing and reports false, otherwise it returns the time that the caller should
|
||||
wait until the tokens are actually available, and reports true.
|
||||
|
||||
#### func (*Bucket) Wait
|
||||
|
||||
```go
|
||||
func (tb *Bucket) Wait(count int64)
|
||||
```
|
||||
Wait takes count tokens from the bucket, waiting until they are available.
|
||||
|
||||
#### func (*Bucket) WaitMaxDuration
|
||||
|
||||
```go
|
||||
func (tb *Bucket) WaitMaxDuration(count int64, maxWait time.Duration) bool
|
||||
```
|
||||
WaitMaxDuration is like Wait except that it will only take tokens from the
|
||||
bucket if it needs to wait for no greater than maxWait. It reports whether any
|
||||
tokens have been removed from the bucket If no tokens have been removed, it
|
||||
returns immediately.
|
||||
226
Godeps/_workspace/src/github.com/juju/ratelimit/ratelimit.go
generated
vendored
Normal file
226
Godeps/_workspace/src/github.com/juju/ratelimit/ratelimit.go
generated
vendored
Normal file
@@ -0,0 +1,226 @@
|
||||
// Copyright 2014 Canonical Ltd.
|
||||
// Licensed under the LGPLv3 with static-linking exception.
|
||||
// See LICENCE file for details.
|
||||
|
||||
// The ratelimit package provides an efficient token bucket implementation.
|
||||
// See http://en.wikipedia.org/wiki/Token_bucket.
|
||||
package ratelimit
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Bucket represents a token bucket that fills at a predetermined rate.
|
||||
// Methods on Bucket may be called concurrently.
|
||||
type Bucket struct {
|
||||
startTime time.Time
|
||||
capacity int64
|
||||
quantum int64
|
||||
fillInterval time.Duration
|
||||
|
||||
// The mutex guards the fields following it.
|
||||
mu sync.Mutex
|
||||
|
||||
// avail holds the number of available tokens
|
||||
// in the bucket, as of availTick ticks from startTime.
|
||||
// It will be negative when there are consumers
|
||||
// waiting for tokens.
|
||||
avail int64
|
||||
availTick int64
|
||||
}
|
||||
|
||||
// NewBucket returns a new token bucket that fills at the
|
||||
// rate of one token every fillInterval, up to the given
|
||||
// maximum capacity. Both arguments must be
|
||||
// positive. The bucket is initially full.
|
||||
func NewBucket(fillInterval time.Duration, capacity int64) *Bucket {
|
||||
return NewBucketWithQuantum(fillInterval, capacity, 1)
|
||||
}
|
||||
|
||||
// rateMargin specifes the allowed variance of actual
|
||||
// rate from specified rate. 1% seems reasonable.
|
||||
const rateMargin = 0.01
|
||||
|
||||
// NewBucketWithRate returns a token bucket that fills the bucket
|
||||
// at the rate of rate tokens per second up to the given
|
||||
// maximum capacity. Because of limited clock resolution,
|
||||
// at high rates, the actual rate may be up to 1% different from the
|
||||
// specified rate.
|
||||
func NewBucketWithRate(rate float64, capacity int64) *Bucket {
|
||||
for quantum := int64(1); quantum < 1<<50; quantum = nextQuantum(quantum) {
|
||||
fillInterval := time.Duration(1e9 * float64(quantum) / rate)
|
||||
if fillInterval <= 0 {
|
||||
continue
|
||||
}
|
||||
tb := NewBucketWithQuantum(fillInterval, capacity, quantum)
|
||||
if diff := abs(tb.Rate() - rate); diff/rate <= rateMargin {
|
||||
return tb
|
||||
}
|
||||
}
|
||||
panic("cannot find suitable quantum for " + strconv.FormatFloat(rate, 'g', -1, 64))
|
||||
}
|
||||
|
||||
// nextQuantum returns the next quantum to try after q.
|
||||
// We grow the quantum exponentially, but slowly, so we
|
||||
// get a good fit in the lower numbers.
|
||||
func nextQuantum(q int64) int64 {
|
||||
q1 := q * 11 / 10
|
||||
if q1 == q {
|
||||
q1++
|
||||
}
|
||||
return q1
|
||||
}
|
||||
|
||||
// NewBucketWithQuantum is similar to NewBucket, but allows
|
||||
// the specification of the quantum size - quantum tokens
|
||||
// are added every fillInterval.
|
||||
func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *Bucket {
|
||||
if fillInterval <= 0 {
|
||||
panic("token bucket fill interval is not > 0")
|
||||
}
|
||||
if capacity <= 0 {
|
||||
panic("token bucket capacity is not > 0")
|
||||
}
|
||||
if quantum <= 0 {
|
||||
panic("token bucket quantum is not > 0")
|
||||
}
|
||||
return &Bucket{
|
||||
startTime: time.Now(),
|
||||
capacity: capacity,
|
||||
quantum: quantum,
|
||||
avail: capacity,
|
||||
fillInterval: fillInterval,
|
||||
}
|
||||
}
|
||||
|
||||
// Wait takes count tokens from the bucket, waiting until they are
|
||||
// available.
|
||||
func (tb *Bucket) Wait(count int64) {
|
||||
if d := tb.Take(count); d > 0 {
|
||||
time.Sleep(d)
|
||||
}
|
||||
}
|
||||
|
||||
// WaitMaxDuration is like Wait except that it will
|
||||
// only take tokens from the bucket if it needs to wait
|
||||
// for no greater than maxWait. It reports whether
|
||||
// any tokens have been removed from the bucket
|
||||
// If no tokens have been removed, it returns immediately.
|
||||
func (tb *Bucket) WaitMaxDuration(count int64, maxWait time.Duration) bool {
|
||||
d, ok := tb.TakeMaxDuration(count, maxWait)
|
||||
if d > 0 {
|
||||
time.Sleep(d)
|
||||
}
|
||||
return ok
|
||||
}
|
||||
|
||||
const infinityDuration time.Duration = 0x7fffffffffffffff
|
||||
|
||||
// Take takes count tokens from the bucket without blocking. It returns
|
||||
// the time that the caller should wait until the tokens are actually
|
||||
// available.
|
||||
//
|
||||
// Note that if the request is irrevocable - there is no way to return
|
||||
// tokens to the bucket once this method commits us to taking them.
|
||||
func (tb *Bucket) Take(count int64) time.Duration {
|
||||
d, _ := tb.take(time.Now(), count, infinityDuration)
|
||||
return d
|
||||
}
|
||||
|
||||
// TakeMaxDuration is like Take, except that
|
||||
// it will only take tokens from the bucket if the wait
|
||||
// time for the tokens is no greater than maxWait.
|
||||
//
|
||||
// If it would take longer than maxWait for the tokens
|
||||
// to become available, it does nothing and reports false,
|
||||
// otherwise it returns the time that the caller should
|
||||
// wait until the tokens are actually available, and reports
|
||||
// true.
|
||||
func (tb *Bucket) TakeMaxDuration(count int64, maxWait time.Duration) (time.Duration, bool) {
|
||||
return tb.take(time.Now(), count, maxWait)
|
||||
}
|
||||
|
||||
// TakeAvailable takes up to count immediately available tokens from the
|
||||
// bucket. It returns the number of tokens removed, or zero if there are
|
||||
// no available tokens. It does not block.
|
||||
func (tb *Bucket) TakeAvailable(count int64) int64 {
|
||||
return tb.takeAvailable(time.Now(), count)
|
||||
}
|
||||
|
||||
// takeAvailable is the internal version of TakeAvailable - it takes the
|
||||
// current time as an argument to enable easy testing.
|
||||
func (tb *Bucket) takeAvailable(now time.Time, count int64) int64 {
|
||||
if count <= 0 {
|
||||
return 0
|
||||
}
|
||||
tb.mu.Lock()
|
||||
defer tb.mu.Unlock()
|
||||
|
||||
tb.adjust(now)
|
||||
if tb.avail <= 0 {
|
||||
return 0
|
||||
}
|
||||
if count > tb.avail {
|
||||
count = tb.avail
|
||||
}
|
||||
tb.avail -= count
|
||||
return count
|
||||
}
|
||||
|
||||
// Rate returns the fill rate of the bucket, in tokens per second.
|
||||
func (tb *Bucket) Rate() float64 {
|
||||
return 1e9 * float64(tb.quantum) / float64(tb.fillInterval)
|
||||
}
|
||||
|
||||
// take is the internal version of Take - it takes the current time as
|
||||
// an argument to enable easy testing.
|
||||
func (tb *Bucket) take(now time.Time, count int64, maxWait time.Duration) (time.Duration, bool) {
|
||||
if count <= 0 {
|
||||
return 0, true
|
||||
}
|
||||
tb.mu.Lock()
|
||||
defer tb.mu.Unlock()
|
||||
|
||||
currentTick := tb.adjust(now)
|
||||
avail := tb.avail - count
|
||||
if avail >= 0 {
|
||||
tb.avail = avail
|
||||
return 0, true
|
||||
}
|
||||
// Round up the missing tokens to the nearest multiple
|
||||
// of quantum - the tokens won't be available until
|
||||
// that tick.
|
||||
endTick := currentTick + (-avail+tb.quantum-1)/tb.quantum
|
||||
endTime := tb.startTime.Add(time.Duration(endTick) * tb.fillInterval)
|
||||
waitTime := endTime.Sub(now)
|
||||
if waitTime > maxWait {
|
||||
return 0, false
|
||||
}
|
||||
tb.avail = avail
|
||||
return waitTime, true
|
||||
}
|
||||
|
||||
// adjust adjusts the current bucket capacity based on the current time.
|
||||
// It returns the current tick.
|
||||
func (tb *Bucket) adjust(now time.Time) (currentTick int64) {
|
||||
currentTick = int64(now.Sub(tb.startTime) / tb.fillInterval)
|
||||
|
||||
if tb.avail >= tb.capacity {
|
||||
return
|
||||
}
|
||||
tb.avail += (currentTick - tb.availTick) * tb.quantum
|
||||
if tb.avail > tb.capacity {
|
||||
tb.avail = tb.capacity
|
||||
}
|
||||
tb.availTick = currentTick
|
||||
return
|
||||
}
|
||||
|
||||
func abs(f float64) float64 {
|
||||
if f < 0 {
|
||||
return -f
|
||||
}
|
||||
return f
|
||||
}
|
||||
328
Godeps/_workspace/src/github.com/juju/ratelimit/ratelimit_test.go
generated
vendored
Normal file
328
Godeps/_workspace/src/github.com/juju/ratelimit/ratelimit_test.go
generated
vendored
Normal file
@@ -0,0 +1,328 @@
|
||||
// Copyright 2014 Canonical Ltd.
|
||||
// Licensed under the LGPLv3 with static-linking exception.
|
||||
// See LICENCE file for details.
|
||||
|
||||
package ratelimit
|
||||
|
||||
import (
|
||||
gc "launchpad.net/gocheck"
|
||||
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestPackage(t *testing.T) {
|
||||
gc.TestingT(t)
|
||||
}
|
||||
|
||||
type rateLimitSuite struct{}
|
||||
|
||||
var _ = gc.Suite(rateLimitSuite{})
|
||||
|
||||
type takeReq struct {
|
||||
time time.Duration
|
||||
count int64
|
||||
expectWait time.Duration
|
||||
}
|
||||
|
||||
var takeTests = []struct {
|
||||
about string
|
||||
fillInterval time.Duration
|
||||
capacity int64
|
||||
reqs []takeReq
|
||||
}{{
|
||||
about: "serial requests",
|
||||
fillInterval: 250 * time.Millisecond,
|
||||
capacity: 10,
|
||||
reqs: []takeReq{{
|
||||
time: 0,
|
||||
count: 0,
|
||||
expectWait: 0,
|
||||
}, {
|
||||
time: 0,
|
||||
count: 10,
|
||||
expectWait: 0,
|
||||
}, {
|
||||
time: 0,
|
||||
count: 1,
|
||||
expectWait: 250 * time.Millisecond,
|
||||
}, {
|
||||
time: 250 * time.Millisecond,
|
||||
count: 1,
|
||||
expectWait: 250 * time.Millisecond,
|
||||
}},
|
||||
}, {
|
||||
about: "concurrent requests",
|
||||
fillInterval: 250 * time.Millisecond,
|
||||
capacity: 10,
|
||||
reqs: []takeReq{{
|
||||
time: 0,
|
||||
count: 10,
|
||||
expectWait: 0,
|
||||
}, {
|
||||
time: 0,
|
||||
count: 2,
|
||||
expectWait: 500 * time.Millisecond,
|
||||
}, {
|
||||
time: 0,
|
||||
count: 2,
|
||||
expectWait: 1000 * time.Millisecond,
|
||||
}, {
|
||||
time: 0,
|
||||
count: 1,
|
||||
expectWait: 1250 * time.Millisecond,
|
||||
}},
|
||||
}, {
|
||||
about: "more than capacity",
|
||||
fillInterval: 1 * time.Millisecond,
|
||||
capacity: 10,
|
||||
reqs: []takeReq{{
|
||||
time: 0,
|
||||
count: 10,
|
||||
expectWait: 0,
|
||||
}, {
|
||||
time: 20 * time.Millisecond,
|
||||
count: 15,
|
||||
expectWait: 5 * time.Millisecond,
|
||||
}},
|
||||
}, {
|
||||
about: "sub-quantum time",
|
||||
fillInterval: 10 * time.Millisecond,
|
||||
capacity: 10,
|
||||
reqs: []takeReq{{
|
||||
time: 0,
|
||||
count: 10,
|
||||
expectWait: 0,
|
||||
}, {
|
||||
time: 7 * time.Millisecond,
|
||||
count: 1,
|
||||
expectWait: 3 * time.Millisecond,
|
||||
}, {
|
||||
time: 8 * time.Millisecond,
|
||||
count: 1,
|
||||
expectWait: 12 * time.Millisecond,
|
||||
}},
|
||||
}, {
|
||||
about: "within capacity",
|
||||
fillInterval: 10 * time.Millisecond,
|
||||
capacity: 5,
|
||||
reqs: []takeReq{{
|
||||
time: 0,
|
||||
count: 5,
|
||||
expectWait: 0,
|
||||
}, {
|
||||
time: 60 * time.Millisecond,
|
||||
count: 5,
|
||||
expectWait: 0,
|
||||
}, {
|
||||
time: 60 * time.Millisecond,
|
||||
count: 1,
|
||||
expectWait: 10 * time.Millisecond,
|
||||
}, {
|
||||
time: 80 * time.Millisecond,
|
||||
count: 2,
|
||||
expectWait: 10 * time.Millisecond,
|
||||
}},
|
||||
}}
|
||||
|
||||
func (rateLimitSuite) TestTake(c *gc.C) {
|
||||
for i, test := range takeTests {
|
||||
tb := NewBucket(test.fillInterval, test.capacity)
|
||||
for j, req := range test.reqs {
|
||||
d, ok := tb.take(tb.startTime.Add(req.time), req.count, infinityDuration)
|
||||
c.Assert(ok, gc.Equals, true)
|
||||
if d != req.expectWait {
|
||||
c.Fatalf("test %d.%d, %s, got %v want %v", i, j, test.about, d, req.expectWait)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rateLimitSuite) TestTakeMaxDuration(c *gc.C) {
|
||||
for i, test := range takeTests {
|
||||
tb := NewBucket(test.fillInterval, test.capacity)
|
||||
for j, req := range test.reqs {
|
||||
if req.expectWait > 0 {
|
||||
d, ok := tb.take(tb.startTime.Add(req.time), req.count, req.expectWait-1)
|
||||
c.Assert(ok, gc.Equals, false)
|
||||
c.Assert(d, gc.Equals, time.Duration(0))
|
||||
}
|
||||
d, ok := tb.take(tb.startTime.Add(req.time), req.count, req.expectWait)
|
||||
c.Assert(ok, gc.Equals, true)
|
||||
if d != req.expectWait {
|
||||
c.Fatalf("test %d.%d, %s, got %v want %v", i, j, test.about, d, req.expectWait)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type takeAvailableReq struct {
|
||||
time time.Duration
|
||||
count int64
|
||||
expect int64
|
||||
}
|
||||
|
||||
var takeAvailableTests = []struct {
|
||||
about string
|
||||
fillInterval time.Duration
|
||||
capacity int64
|
||||
reqs []takeAvailableReq
|
||||
}{{
|
||||
about: "serial requests",
|
||||
fillInterval: 250 * time.Millisecond,
|
||||
capacity: 10,
|
||||
reqs: []takeAvailableReq{{
|
||||
time: 0,
|
||||
count: 0,
|
||||
expect: 0,
|
||||
}, {
|
||||
time: 0,
|
||||
count: 10,
|
||||
expect: 10,
|
||||
}, {
|
||||
time: 0,
|
||||
count: 1,
|
||||
expect: 0,
|
||||
}, {
|
||||
time: 250 * time.Millisecond,
|
||||
count: 1,
|
||||
expect: 1,
|
||||
}},
|
||||
}, {
|
||||
about: "concurrent requests",
|
||||
fillInterval: 250 * time.Millisecond,
|
||||
capacity: 10,
|
||||
reqs: []takeAvailableReq{{
|
||||
time: 0,
|
||||
count: 5,
|
||||
expect: 5,
|
||||
}, {
|
||||
time: 0,
|
||||
count: 2,
|
||||
expect: 2,
|
||||
}, {
|
||||
time: 0,
|
||||
count: 5,
|
||||
expect: 3,
|
||||
}, {
|
||||
time: 0,
|
||||
count: 1,
|
||||
expect: 0,
|
||||
}},
|
||||
}, {
|
||||
about: "more than capacity",
|
||||
fillInterval: 1 * time.Millisecond,
|
||||
capacity: 10,
|
||||
reqs: []takeAvailableReq{{
|
||||
time: 0,
|
||||
count: 10,
|
||||
expect: 10,
|
||||
}, {
|
||||
time: 20 * time.Millisecond,
|
||||
count: 15,
|
||||
expect: 10,
|
||||
}},
|
||||
}, {
|
||||
about: "within capacity",
|
||||
fillInterval: 10 * time.Millisecond,
|
||||
capacity: 5,
|
||||
reqs: []takeAvailableReq{{
|
||||
time: 0,
|
||||
count: 5,
|
||||
expect: 5,
|
||||
}, {
|
||||
time: 60 * time.Millisecond,
|
||||
count: 5,
|
||||
expect: 5,
|
||||
}, {
|
||||
time: 70 * time.Millisecond,
|
||||
count: 1,
|
||||
expect: 1,
|
||||
}},
|
||||
}}
|
||||
|
||||
func (rateLimitSuite) TestTakeAvailable(c *gc.C) {
|
||||
for i, test := range takeAvailableTests {
|
||||
tb := NewBucket(test.fillInterval, test.capacity)
|
||||
for j, req := range test.reqs {
|
||||
d := tb.takeAvailable(tb.startTime.Add(req.time), req.count)
|
||||
if d != req.expect {
|
||||
c.Fatalf("test %d.%d, %s, got %v want %v", i, j, test.about, d, req.expect)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rateLimitSuite) TestPanics(c *gc.C) {
|
||||
c.Assert(func() { NewBucket(0, 1) }, gc.PanicMatches, "token bucket fill interval is not > 0")
|
||||
c.Assert(func() { NewBucket(-2, 1) }, gc.PanicMatches, "token bucket fill interval is not > 0")
|
||||
c.Assert(func() { NewBucket(1, 0) }, gc.PanicMatches, "token bucket capacity is not > 0")
|
||||
c.Assert(func() { NewBucket(1, -2) }, gc.PanicMatches, "token bucket capacity is not > 0")
|
||||
}
|
||||
|
||||
func isCloseTo(x, y, tolerance float64) bool {
|
||||
return abs(x-y)/y < tolerance
|
||||
}
|
||||
|
||||
func (rateLimitSuite) TestRate(c *gc.C) {
|
||||
tb := NewBucket(1, 1)
|
||||
if !isCloseTo(tb.Rate(), 1e9, 0.00001) {
|
||||
c.Fatalf("got %v want 1e9", tb.Rate())
|
||||
}
|
||||
tb = NewBucket(2*time.Second, 1)
|
||||
if !isCloseTo(tb.Rate(), 0.5, 0.00001) {
|
||||
c.Fatalf("got %v want 0.5", tb.Rate())
|
||||
}
|
||||
tb = NewBucketWithQuantum(100*time.Millisecond, 1, 5)
|
||||
if !isCloseTo(tb.Rate(), 50, 0.00001) {
|
||||
c.Fatalf("got %v want 50", tb.Rate())
|
||||
}
|
||||
}
|
||||
|
||||
func checkRate(c *gc.C, rate float64) {
|
||||
tb := NewBucketWithRate(rate, 1<<62)
|
||||
if !isCloseTo(tb.Rate(), rate, rateMargin) {
|
||||
c.Fatalf("got %g want %v", tb.Rate(), rate)
|
||||
}
|
||||
d, ok := tb.take(tb.startTime, 1<<62, infinityDuration)
|
||||
c.Assert(ok, gc.Equals, true)
|
||||
c.Assert(d, gc.Equals, time.Duration(0))
|
||||
|
||||
// Check that the actual rate is as expected by
|
||||
// asking for a not-quite multiple of the bucket's
|
||||
// quantum and checking that the wait time
|
||||
// correct.
|
||||
d, ok = tb.take(tb.startTime, tb.quantum*2-tb.quantum/2, infinityDuration)
|
||||
c.Assert(ok, gc.Equals, true)
|
||||
expectTime := 1e9 * float64(tb.quantum) * 2 / rate
|
||||
if !isCloseTo(float64(d), expectTime, rateMargin) {
|
||||
c.Fatalf("rate %g: got %g want %v", rate, float64(d), expectTime)
|
||||
}
|
||||
}
|
||||
|
||||
func (rateLimitSuite) TestNewWithRate(c *gc.C) {
|
||||
for rate := float64(1); rate < 1e6; rate += 7 {
|
||||
checkRate(c, rate)
|
||||
}
|
||||
for _, rate := range []float64{
|
||||
1024 * 1024 * 1024,
|
||||
1e-5,
|
||||
0.9e-5,
|
||||
0.5,
|
||||
0.9,
|
||||
0.9e8,
|
||||
3e12,
|
||||
4e18,
|
||||
} {
|
||||
checkRate(c, rate)
|
||||
checkRate(c, rate/3)
|
||||
checkRate(c, rate*1.3)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkWait(b *testing.B) {
|
||||
tb := NewBucket(1, 16*1024)
|
||||
for i := b.N - 1; i >= 0; i-- {
|
||||
tb.Wait(1)
|
||||
}
|
||||
}
|
||||
51
Godeps/_workspace/src/github.com/juju/ratelimit/reader.go
generated
vendored
Normal file
51
Godeps/_workspace/src/github.com/juju/ratelimit/reader.go
generated
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
// Copyright 2014 Canonical Ltd.
|
||||
// Licensed under the LGPLv3 with static-linking exception.
|
||||
// See LICENCE file for details.
|
||||
|
||||
package ratelimit
|
||||
|
||||
import "io"
|
||||
|
||||
type reader struct {
|
||||
r io.Reader
|
||||
bucket *Bucket
|
||||
}
|
||||
|
||||
// Reader returns a reader that is rate limited by
|
||||
// the given token bucket. Each token in the bucket
|
||||
// represents one byte.
|
||||
func Reader(r io.Reader, bucket *Bucket) io.Reader {
|
||||
return &reader{
|
||||
r: r,
|
||||
bucket: bucket,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *reader) Read(buf []byte) (int, error) {
|
||||
n, err := r.r.Read(buf)
|
||||
if n <= 0 {
|
||||
return n, err
|
||||
}
|
||||
r.bucket.Wait(int64(n))
|
||||
return n, err
|
||||
}
|
||||
|
||||
type writer struct {
|
||||
w io.Writer
|
||||
bucket *Bucket
|
||||
}
|
||||
|
||||
// Writer returns a reader that is rate limited by
|
||||
// the given token bucket. Each token in the bucket
|
||||
// represents one byte.
|
||||
func Writer(w io.Writer, bucket *Bucket) io.Writer {
|
||||
return &writer{
|
||||
w: w,
|
||||
bucket: bucket,
|
||||
}
|
||||
}
|
||||
|
||||
func (w *writer) Write(buf []byte) (int, error) {
|
||||
w.bucket.Wait(int64(len(buf)))
|
||||
return w.w.Write(buf)
|
||||
}
|
||||
14
Godeps/_workspace/src/github.com/kardianos/osext/README.md
generated
vendored
Normal file
14
Godeps/_workspace/src/github.com/kardianos/osext/README.md
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
### Extensions to the "os" package.
|
||||
|
||||
## Find the current Executable and ExecutableFolder.
|
||||
|
||||
There is sometimes utility in finding the current executable file
|
||||
that is running. This can be used for upgrading the current executable
|
||||
or finding resources located relative to the executable file.
|
||||
|
||||
Multi-platform and supports:
|
||||
* Linux
|
||||
* OS X
|
||||
* Windows
|
||||
* Plan 9
|
||||
* BSDs.
|
||||
@@ -3,31 +3,25 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Extensions to the standard "os" package.
|
||||
package osext // import "github.com/kardianos/osext"
|
||||
package osext
|
||||
|
||||
import "path/filepath"
|
||||
|
||||
var cx, ce = executableClean()
|
||||
|
||||
func executableClean() (string, error) {
|
||||
p, err := executable()
|
||||
return filepath.Clean(p), err
|
||||
}
|
||||
|
||||
// Executable returns an absolute path that can be used to
|
||||
// re-invoke the current program.
|
||||
// It may not be valid after the current program exits.
|
||||
func Executable() (string, error) {
|
||||
return cx, ce
|
||||
p, err := executable()
|
||||
return filepath.Clean(p), err
|
||||
}
|
||||
|
||||
// Returns same path as Executable, returns just the folder
|
||||
// path. Excludes the executable name and any trailing slash.
|
||||
// path. Excludes the executable name.
|
||||
func ExecutableFolder() (string, error) {
|
||||
p, err := Executable()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return filepath.Dir(p), nil
|
||||
folder, _ := filepath.Split(p)
|
||||
return folder, nil
|
||||
}
|
||||
@@ -2,8 +2,6 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//+build !go1.8
|
||||
|
||||
package osext
|
||||
|
||||
import (
|
||||
@@ -2,7 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.8,linux !go1.8,netbsd !go1.8,solaris !go1.8,dragonfly
|
||||
// +build linux netbsd openbsd solaris dragonfly
|
||||
|
||||
package osext
|
||||
|
||||
@@ -11,23 +11,15 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func executable() (string, error) {
|
||||
switch runtime.GOOS {
|
||||
case "linux":
|
||||
const deletedTag = " (deleted)"
|
||||
execpath, err := os.Readlink("/proc/self/exe")
|
||||
if err != nil {
|
||||
return execpath, err
|
||||
}
|
||||
execpath = strings.TrimSuffix(execpath, deletedTag)
|
||||
execpath = strings.TrimPrefix(execpath, deletedTag)
|
||||
return execpath, nil
|
||||
return os.Readlink("/proc/self/exe")
|
||||
case "netbsd":
|
||||
return os.Readlink("/proc/curproc/exe")
|
||||
case "dragonfly":
|
||||
case "openbsd", "dragonfly":
|
||||
return os.Readlink("/proc/curproc/file")
|
||||
case "solaris":
|
||||
return os.Readlink(fmt.Sprintf("/proc/%d/path/a.out", os.Getpid()))
|
||||
@@ -2,13 +2,12 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.8,darwin !go1.8,freebsd openbsd
|
||||
// +build darwin freebsd
|
||||
|
||||
package osext
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"syscall"
|
||||
@@ -24,8 +23,6 @@ func executable() (string, error) {
|
||||
mib = [4]int32{1 /* CTL_KERN */, 14 /* KERN_PROC */, 12 /* KERN_PROC_PATHNAME */, -1}
|
||||
case "darwin":
|
||||
mib = [4]int32{1 /* CTL_KERN */, 38 /* KERN_PROCARGS */, int32(os.Getpid()), -1}
|
||||
case "openbsd":
|
||||
mib = [4]int32{1 /* CTL_KERN */, 55 /* KERN_PROC_ARGS */, int32(os.Getpid()), 1 /* KERN_PROC_ARGV */}
|
||||
}
|
||||
|
||||
n := uintptr(0)
|
||||
@@ -45,58 +42,14 @@ func executable() (string, error) {
|
||||
if n == 0 { // This shouldn't happen.
|
||||
return "", nil
|
||||
}
|
||||
|
||||
var execPath string
|
||||
switch runtime.GOOS {
|
||||
case "openbsd":
|
||||
// buf now contains **argv, with pointers to each of the C-style
|
||||
// NULL terminated arguments.
|
||||
var args []string
|
||||
argv := uintptr(unsafe.Pointer(&buf[0]))
|
||||
Loop:
|
||||
for {
|
||||
argp := *(**[1 << 20]byte)(unsafe.Pointer(argv))
|
||||
if argp == nil {
|
||||
break
|
||||
}
|
||||
for i := 0; uintptr(i) < n; i++ {
|
||||
// we don't want the full arguments list
|
||||
if string(argp[i]) == " " {
|
||||
break Loop
|
||||
}
|
||||
if argp[i] != 0 {
|
||||
continue
|
||||
}
|
||||
args = append(args, string(argp[:i]))
|
||||
n -= uintptr(i)
|
||||
break
|
||||
}
|
||||
if n < unsafe.Sizeof(argv) {
|
||||
break
|
||||
}
|
||||
argv += unsafe.Sizeof(argv)
|
||||
n -= unsafe.Sizeof(argv)
|
||||
for i, v := range buf {
|
||||
if v == 0 {
|
||||
buf = buf[:i]
|
||||
break
|
||||
}
|
||||
execPath = args[0]
|
||||
// There is no canonical way to get an executable path on
|
||||
// OpenBSD, so check PATH in case we are called directly
|
||||
if execPath[0] != '/' && execPath[0] != '.' {
|
||||
execIsInPath, err := exec.LookPath(execPath)
|
||||
if err == nil {
|
||||
execPath = execIsInPath
|
||||
}
|
||||
}
|
||||
default:
|
||||
for i, v := range buf {
|
||||
if v == 0 {
|
||||
buf = buf[:i]
|
||||
break
|
||||
}
|
||||
}
|
||||
execPath = string(buf)
|
||||
}
|
||||
|
||||
var err error
|
||||
execPath := string(buf)
|
||||
// execPath will not be empty due to above checks.
|
||||
// Try to get the absolute path if the execPath is not rooted.
|
||||
if execPath[0] != '/' {
|
||||
79
Godeps/_workspace/src/github.com/kardianos/osext/osext_test.go
generated
vendored
Normal file
79
Godeps/_workspace/src/github.com/kardianos/osext/osext_test.go
generated
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin linux freebsd netbsd windows
|
||||
|
||||
package osext
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
oexec "os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const execPath_EnvVar = "OSTEST_OUTPUT_EXECPATH"
|
||||
|
||||
func TestExecPath(t *testing.T) {
|
||||
ep, err := Executable()
|
||||
if err != nil {
|
||||
t.Fatalf("ExecPath failed: %v", err)
|
||||
}
|
||||
// we want fn to be of the form "dir/prog"
|
||||
dir := filepath.Dir(filepath.Dir(ep))
|
||||
fn, err := filepath.Rel(dir, ep)
|
||||
if err != nil {
|
||||
t.Fatalf("filepath.Rel: %v", err)
|
||||
}
|
||||
cmd := &oexec.Cmd{}
|
||||
// make child start with a relative program path
|
||||
cmd.Dir = dir
|
||||
cmd.Path = fn
|
||||
// forge argv[0] for child, so that we can verify we could correctly
|
||||
// get real path of the executable without influenced by argv[0].
|
||||
cmd.Args = []string{"-", "-test.run=XXXX"}
|
||||
cmd.Env = []string{fmt.Sprintf("%s=1", execPath_EnvVar)}
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Fatalf("exec(self) failed: %v", err)
|
||||
}
|
||||
outs := string(out)
|
||||
if !filepath.IsAbs(outs) {
|
||||
t.Fatalf("Child returned %q, want an absolute path", out)
|
||||
}
|
||||
if !sameFile(outs, ep) {
|
||||
t.Fatalf("Child returned %q, not the same file as %q", out, ep)
|
||||
}
|
||||
}
|
||||
|
||||
func sameFile(fn1, fn2 string) bool {
|
||||
fi1, err := os.Stat(fn1)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
fi2, err := os.Stat(fn2)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return os.SameFile(fi1, fi2)
|
||||
}
|
||||
|
||||
func init() {
|
||||
if e := os.Getenv(execPath_EnvVar); e != "" {
|
||||
// first chdir to another path
|
||||
dir := "/"
|
||||
if runtime.GOOS == "windows" {
|
||||
dir = filepath.VolumeName(".")
|
||||
}
|
||||
os.Chdir(dir)
|
||||
if ep, err := Executable(); err != nil {
|
||||
fmt.Fprint(os.Stderr, "ERROR: ", err)
|
||||
} else {
|
||||
fmt.Fprint(os.Stderr, ep)
|
||||
}
|
||||
os.Exit(0)
|
||||
}
|
||||
}
|
||||
@@ -2,8 +2,6 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//+build !go1.8
|
||||
|
||||
package osext
|
||||
|
||||
import (
|
||||
4
Godeps/_workspace/src/github.com/syncthing/protocol/AUTHORS
generated
vendored
Normal file
4
Godeps/_workspace/src/github.com/syncthing/protocol/AUTHORS
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
# This is the official list of Protocol Authors for copyright purposes.
|
||||
|
||||
Audrius Butkevicius <audrius.butkevicius@gmail.com>
|
||||
Jakob Borg <jakob@nym.se>
|
||||
76
Godeps/_workspace/src/github.com/syncthing/protocol/CONTRIBUTING.md
generated
vendored
Normal file
76
Godeps/_workspace/src/github.com/syncthing/protocol/CONTRIBUTING.md
generated
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
## Reporting Bugs
|
||||
|
||||
Please file bugs in the [Github Issue
|
||||
Tracker](https://github.com/syncthing/protocol/issues).
|
||||
|
||||
## Contributing Code
|
||||
|
||||
Every contribution is welcome. Following the points below will make this
|
||||
a smoother process.
|
||||
|
||||
Individuals making significant and valuable contributions are given
|
||||
commit-access to the project. If you make a significant contribution and
|
||||
are not considered for commit-access, please contact any of the
|
||||
Syncthing core team members.
|
||||
|
||||
All nontrivial contributions should go through the pull request
|
||||
mechanism for internal review. Determining what is "nontrivial" is left
|
||||
at the discretion of the contributor.
|
||||
|
||||
### Authorship
|
||||
|
||||
All code authors are listed in the AUTHORS file. Commits must be made
|
||||
with the same name and email as listed in the AUTHORS file. To
|
||||
accomplish this, ensure that your git configuration is set correctly
|
||||
prior to making your first commit;
|
||||
|
||||
$ git config --global user.name "Jane Doe"
|
||||
$ git config --global user.email janedoe@example.com
|
||||
|
||||
You must be reachable on the given email address. If you do not wish to
|
||||
use your real name for whatever reason, using a nickname or pseudonym is
|
||||
perfectly acceptable.
|
||||
|
||||
## Coding Style
|
||||
|
||||
- Follow the conventions laid out in [Effective Go](https://golang.org/doc/effective_go.html)
|
||||
as much as makes sense.
|
||||
|
||||
- All text files use Unix line endings.
|
||||
|
||||
- Each commit should be `go fmt` clean.
|
||||
|
||||
- The commit message subject should be a single short sentence
|
||||
describing the change, starting with a capital letter.
|
||||
|
||||
- Commits that resolve an existing issue must include the issue number
|
||||
as `(fixes #123)` at the end of the commit message subject.
|
||||
|
||||
- Imports are grouped per `goimports` standard; that is, standard
|
||||
library first, then third party libraries after a blank line.
|
||||
|
||||
- A contribution solving a single issue or introducing a single new
|
||||
feature should probably be a single commit based on the current
|
||||
`master` branch. You may be asked to "rebase" or "squash" your pull
|
||||
request to make sure this is the case, especially if there have been
|
||||
amendments during review.
|
||||
|
||||
## Licensing
|
||||
|
||||
All contributions are made under the same MIT license as the rest of the
|
||||
project, except documentation, user interface text and translation
|
||||
strings which are licensed under the Creative Commons Attribution 4.0
|
||||
International License. You retain the copyright to code you have
|
||||
written.
|
||||
|
||||
When accepting your first contribution, the maintainer of the project
|
||||
will ensure that you are added to the AUTHORS file. You are welcome to
|
||||
add yourself as a separate commit in your first pull request.
|
||||
|
||||
## Tests
|
||||
|
||||
Yes please!
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
||||
0
lib/protocol/LICENSE → Godeps/_workspace/src/github.com/syncthing/protocol/LICENSE
generated
vendored
0
lib/protocol/LICENSE → Godeps/_workspace/src/github.com/syncthing/protocol/LICENSE
generated
vendored
13
Godeps/_workspace/src/github.com/syncthing/protocol/README.md
generated
vendored
Normal file
13
Godeps/_workspace/src/github.com/syncthing/protocol/README.md
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
The BEPv1 Protocol
|
||||
==================
|
||||
|
||||
[](http://build.syncthing.net/job/protocol/lastBuild/)
|
||||
[](http://godoc.org/github.com/syncthing/protocol)
|
||||
[](http://opensource.org/licenses/MIT)
|
||||
|
||||
This is the protocol implementation used by Syncthing.
|
||||
|
||||
License
|
||||
=======
|
||||
|
||||
MIT
|
||||
74
Godeps/_workspace/src/github.com/syncthing/protocol/common_test.go
generated
vendored
Normal file
74
Godeps/_workspace/src/github.com/syncthing/protocol/common_test.go
generated
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
// Copyright (C) 2014 The Protocol Authors.
|
||||
|
||||
package protocol
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
)
|
||||
|
||||
type TestModel struct {
|
||||
data []byte
|
||||
folder string
|
||||
name string
|
||||
offset int64
|
||||
size int
|
||||
closedCh chan bool
|
||||
}
|
||||
|
||||
func newTestModel() *TestModel {
|
||||
return &TestModel{
|
||||
closedCh: make(chan bool),
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TestModel) Index(deviceID DeviceID, folder string, files []FileInfo) {
|
||||
}
|
||||
|
||||
func (t *TestModel) IndexUpdate(deviceID DeviceID, folder string, files []FileInfo) {
|
||||
}
|
||||
|
||||
func (t *TestModel) Request(deviceID DeviceID, folder, name string, offset int64, size int) ([]byte, error) {
|
||||
t.folder = folder
|
||||
t.name = name
|
||||
t.offset = offset
|
||||
t.size = size
|
||||
return t.data, nil
|
||||
}
|
||||
|
||||
func (t *TestModel) Close(deviceID DeviceID, err error) {
|
||||
close(t.closedCh)
|
||||
}
|
||||
|
||||
func (t *TestModel) ClusterConfig(deviceID DeviceID, config ClusterConfigMessage) {
|
||||
}
|
||||
|
||||
func (t *TestModel) isClosed() bool {
|
||||
select {
|
||||
case <-t.closedCh:
|
||||
return true
|
||||
case <-time.After(1 * time.Second):
|
||||
return false // Timeout
|
||||
}
|
||||
}
|
||||
|
||||
type ErrPipe struct {
|
||||
io.PipeWriter
|
||||
written int
|
||||
max int
|
||||
err error
|
||||
closed bool
|
||||
}
|
||||
|
||||
func (e *ErrPipe) Write(data []byte) (int, error) {
|
||||
if e.closed {
|
||||
return 0, e.err
|
||||
}
|
||||
if e.written+len(data) > e.max {
|
||||
n, _ := e.PipeWriter.Write(data[:e.max-e.written])
|
||||
e.PipeWriter.CloseWithError(e.err)
|
||||
e.closed = true
|
||||
return n, e.err
|
||||
}
|
||||
return e.PipeWriter.Write(data)
|
||||
}
|
||||
@@ -4,7 +4,13 @@ package protocol
|
||||
|
||||
import "fmt"
|
||||
|
||||
type Compression int
|
||||
|
||||
const (
|
||||
CompressMetadata Compression = iota // zero value is the default, default should be "metadata"
|
||||
CompressNever
|
||||
CompressAlways
|
||||
|
||||
compressionThreshold = 128 // don't bother compressing messages smaller than this many bytes
|
||||
)
|
||||
|
||||
@@ -25,6 +31,14 @@ var compressionUnmarshal = map[string]Compression{
|
||||
"always": CompressAlways,
|
||||
}
|
||||
|
||||
func (c Compression) String() string {
|
||||
s, ok := compressionMarshal[c]
|
||||
if !ok {
|
||||
return fmt.Sprintf("unknown:%d", c)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (c Compression) GoString() string {
|
||||
return fmt.Sprintf("%q", c.String())
|
||||
}
|
||||
15
Godeps/_workspace/src/github.com/syncthing/protocol/debug.go
generated
vendored
Normal file
15
Godeps/_workspace/src/github.com/syncthing/protocol/debug.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
// Copyright (C) 2014 The Protocol Authors.
|
||||
|
||||
package protocol
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/calmh/logger"
|
||||
)
|
||||
|
||||
var (
|
||||
debug = strings.Contains(os.Getenv("STTRACE"), "protocol") || os.Getenv("STTRACE") == "all"
|
||||
l = logger.DefaultLogger
|
||||
)
|
||||
114
lib/protocol/deviceid.go → Godeps/_workspace/src/github.com/syncthing/protocol/deviceid.go
generated
vendored
114
lib/protocol/deviceid.go → Godeps/_workspace/src/github.com/syncthing/protocol/deviceid.go
generated
vendored
@@ -4,24 +4,19 @@ package protocol
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/base32"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
"github.com/calmh/luhn"
|
||||
)
|
||||
|
||||
const DeviceIDLength = 32
|
||||
type DeviceID [32]byte
|
||||
|
||||
type DeviceID [DeviceIDLength]byte
|
||||
type ShortID uint64
|
||||
|
||||
var (
|
||||
LocalDeviceID = DeviceID{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
|
||||
EmptyDeviceID = DeviceID{ /* all zeroes */ }
|
||||
)
|
||||
var LocalDeviceID = DeviceID{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
|
||||
|
||||
// NewDeviceID generates a new device ID from the raw bytes of a certificate
|
||||
func NewDeviceID(rawCert []byte) DeviceID {
|
||||
@@ -49,9 +44,6 @@ func DeviceIDFromBytes(bs []byte) DeviceID {
|
||||
|
||||
// String returns the canonical string representation of the device ID
|
||||
func (n DeviceID) String() string {
|
||||
if n == EmptyDeviceID {
|
||||
return ""
|
||||
}
|
||||
id := base32.StdEncoding.EncodeToString(n[:])
|
||||
id = strings.Trim(id, "=")
|
||||
id, err := luhnify(id)
|
||||
@@ -72,27 +64,13 @@ func (n DeviceID) Compare(other DeviceID) int {
|
||||
}
|
||||
|
||||
func (n DeviceID) Equals(other DeviceID) bool {
|
||||
return bytes.Equal(n[:], other[:])
|
||||
}
|
||||
|
||||
// Short returns an integer representing bits 0-63 of the device ID.
|
||||
func (n DeviceID) Short() ShortID {
|
||||
return ShortID(binary.BigEndian.Uint64(n[:]))
|
||||
return bytes.Compare(n[:], other[:]) == 0
|
||||
}
|
||||
|
||||
func (n *DeviceID) MarshalText() ([]byte, error) {
|
||||
return []byte(n.String()), nil
|
||||
}
|
||||
|
||||
func (s ShortID) String() string {
|
||||
if s == 0 {
|
||||
return ""
|
||||
}
|
||||
var bs [8]byte
|
||||
binary.BigEndian.PutUint64(bs[:], uint64(s))
|
||||
return base32.StdEncoding.EncodeToString(bs[:])[:7]
|
||||
}
|
||||
|
||||
func (n *DeviceID) UnmarshalText(bs []byte) error {
|
||||
id := string(bs)
|
||||
id = strings.Trim(id, "=")
|
||||
@@ -102,9 +80,6 @@ func (n *DeviceID) UnmarshalText(bs []byte) error {
|
||||
|
||||
var err error
|
||||
switch len(id) {
|
||||
case 0:
|
||||
*n = EmptyDeviceID
|
||||
return nil
|
||||
case 56:
|
||||
// New style, with check digits
|
||||
id, err = unluhnify(id)
|
||||
@@ -121,81 +96,51 @@ func (n *DeviceID) UnmarshalText(bs []byte) error {
|
||||
copy(n[:], dec)
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("%q: device ID invalid: incorrect length", bs)
|
||||
return errors.New("device ID invalid: incorrect length")
|
||||
}
|
||||
}
|
||||
|
||||
func (n *DeviceID) ProtoSize() int {
|
||||
// Used by protobuf marshaller.
|
||||
return DeviceIDLength
|
||||
}
|
||||
|
||||
func (n *DeviceID) MarshalTo(bs []byte) (int, error) {
|
||||
// Used by protobuf marshaller.
|
||||
if len(bs) < DeviceIDLength {
|
||||
return 0, errors.New("destination too short")
|
||||
}
|
||||
copy(bs, (*n)[:])
|
||||
return DeviceIDLength, nil
|
||||
}
|
||||
|
||||
func (n *DeviceID) Unmarshal(bs []byte) error {
|
||||
// Used by protobuf marshaller.
|
||||
if len(bs) < DeviceIDLength {
|
||||
return fmt.Errorf("%q: not enough data", bs)
|
||||
}
|
||||
copy((*n)[:], bs)
|
||||
return nil
|
||||
}
|
||||
|
||||
func luhnify(s string) (string, error) {
|
||||
if len(s) != 52 {
|
||||
panic("unsupported string length")
|
||||
}
|
||||
|
||||
res := make([]byte, 4*(13+1))
|
||||
res := make([]string, 0, 4)
|
||||
for i := 0; i < 4; i++ {
|
||||
p := s[i*13 : (i+1)*13]
|
||||
copy(res[i*(13+1):], p)
|
||||
l, err := luhnBase32.generate(p)
|
||||
l, err := luhn.Base32.Generate(p)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
res[(i+1)*(13)+i] = byte(l)
|
||||
res = append(res, fmt.Sprintf("%s%c", p, l))
|
||||
}
|
||||
return string(res), nil
|
||||
return res[0] + res[1] + res[2] + res[3], nil
|
||||
}
|
||||
|
||||
func unluhnify(s string) (string, error) {
|
||||
if len(s) != 56 {
|
||||
return "", fmt.Errorf("%q: unsupported string length %d", s, len(s))
|
||||
return "", fmt.Errorf("unsupported string length %d", len(s))
|
||||
}
|
||||
|
||||
res := make([]byte, 52)
|
||||
res := make([]string, 0, 4)
|
||||
for i := 0; i < 4; i++ {
|
||||
p := s[i*(13+1) : (i+1)*(13+1)-1]
|
||||
copy(res[i*13:], p)
|
||||
l, err := luhnBase32.generate(p)
|
||||
p := s[i*14 : (i+1)*14-1]
|
||||
l, err := luhn.Base32.Generate(p)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if s[(i+1)*14-1] != byte(l) {
|
||||
return "", fmt.Errorf("%q: check digit incorrect", s)
|
||||
if g := fmt.Sprintf("%s%c", p, l); g != s[i*14:(i+1)*14] {
|
||||
return "", errors.New("check digit incorrect")
|
||||
}
|
||||
res = append(res, p)
|
||||
}
|
||||
return string(res), nil
|
||||
return res[0] + res[1] + res[2] + res[3], nil
|
||||
}
|
||||
|
||||
func chunkify(s string) string {
|
||||
chunks := len(s) / 7
|
||||
res := make([]byte, chunks*(7+1)-1)
|
||||
for i := 0; i < chunks; i++ {
|
||||
if i > 0 {
|
||||
res[i*(7+1)-1] = '-'
|
||||
}
|
||||
copy(res[i*(7+1):], s[i*7:(i+1)*7])
|
||||
}
|
||||
return string(res)
|
||||
s = regexp.MustCompile("(.{7})").ReplaceAllString(s, "$1-")
|
||||
s = strings.Trim(s, "-")
|
||||
return s
|
||||
}
|
||||
|
||||
func unchunkify(s string) string {
|
||||
@@ -210,18 +155,3 @@ func untypeoify(s string) string {
|
||||
s = strings.Replace(s, "8", "B", -1)
|
||||
return s
|
||||
}
|
||||
|
||||
// DeviceIDs is a sortable slice of DeviceID
|
||||
type DeviceIDs []DeviceID
|
||||
|
||||
func (l DeviceIDs) Len() int {
|
||||
return len(l)
|
||||
}
|
||||
|
||||
func (l DeviceIDs) Less(a, b int) bool {
|
||||
return l[a].Compare(l[b]) == -1
|
||||
}
|
||||
|
||||
func (l DeviceIDs) Swap(a, b int) {
|
||||
l[a], l[b] = l[b], l[a]
|
||||
}
|
||||
76
Godeps/_workspace/src/github.com/syncthing/protocol/deviceid_test.go
generated
vendored
Normal file
76
Godeps/_workspace/src/github.com/syncthing/protocol/deviceid_test.go
generated
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
// Copyright (C) 2014 The Protocol Authors.
|
||||
|
||||
package protocol
|
||||
|
||||
import "testing"
|
||||
|
||||
var formatted = "P56IOI7-MZJNU2Y-IQGDREY-DM2MGTI-MGL3BXN-PQ6W5BM-TBBZ4TJ-XZWICQ2"
|
||||
var formatCases = []string{
|
||||
"P56IOI-7MZJNU-2IQGDR-EYDM2M-GTMGL3-BXNPQ6-W5BTBB-Z4TJXZ-WICQ",
|
||||
"P56IOI-7MZJNU2Y-IQGDR-EYDM2M-GTI-MGL3-BXNPQ6-W5BM-TBB-Z4TJXZ-WICQ2",
|
||||
"P56IOI7 MZJNU2I QGDREYD M2MGTMGL 3BXNPQ6W 5BTB BZ4T JXZWICQ",
|
||||
"P56IOI7 MZJNU2Y IQGDREY DM2MGTI MGL3BXN PQ6W5BM TBBZ4TJ XZWICQ2",
|
||||
"P56IOI7MZJNU2IQGDREYDM2MGTMGL3BXNPQ6W5BTBBZ4TJXZWICQ",
|
||||
"p56ioi7mzjnu2iqgdreydm2mgtmgl3bxnpq6w5btbbz4tjxzwicq",
|
||||
"P56IOI7MZJNU2YIQGDREYDM2MGTIMGL3BXNPQ6W5BMTBBZ4TJXZWICQ2",
|
||||
"P561017MZJNU2YIQGDREYDM2MGTIMGL3BXNPQ6W5BMT88Z4TJXZWICQ2",
|
||||
"p56ioi7mzjnu2yiqgdreydm2mgtimgl3bxnpq6w5bmtbbz4tjxzwicq2",
|
||||
"p561017mzjnu2yiqgdreydm2mgtimgl3bxnpq6w5bmt88z4tjxzwicq2",
|
||||
}
|
||||
|
||||
func TestFormatDeviceID(t *testing.T) {
|
||||
for i, tc := range formatCases {
|
||||
var id DeviceID
|
||||
err := id.UnmarshalText([]byte(tc))
|
||||
if err != nil {
|
||||
t.Errorf("#%d UnmarshalText(%q); %v", i, tc, err)
|
||||
} else if f := id.String(); f != formatted {
|
||||
t.Errorf("#%d FormatDeviceID(%q)\n\t%q !=\n\t%q", i, tc, f, formatted)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var validateCases = []struct {
|
||||
s string
|
||||
ok bool
|
||||
}{
|
||||
{"", false},
|
||||
{"P56IOI7-MZJNU2Y-IQGDREY-DM2MGTI-MGL3BXN-PQ6W5BM-TBBZ4TJ-XZWICQ2", true},
|
||||
{"P56IOI7-MZJNU2-IQGDREY-DM2MGT-MGL3BXN-PQ6W5B-TBBZ4TJ-XZWICQ", true},
|
||||
{"P56IOI7 MZJNU2I QGDREYD M2MGTMGL 3BXNPQ6W 5BTB BZ4T JXZWICQ", true},
|
||||
{"P56IOI7MZJNU2IQGDREYDM2MGTMGL3BXNPQ6W5BTBBZ4TJXZWICQ", true},
|
||||
{"P56IOI7MZJNU2IQGDREYDM2MGTMGL3BXNPQ6W5BTBBZ4TJXZWICQCCCC", false},
|
||||
{"p56ioi7mzjnu2iqgdreydm2mgtmgl3bxnpq6w5btbbz4tjxzwicq", true},
|
||||
{"p56ioi7mzjnu2iqgdreydm2mgtmgl3bxnpq6w5btbbz4tjxzwicqCCCC", false},
|
||||
}
|
||||
|
||||
func TestValidateDeviceID(t *testing.T) {
|
||||
for _, tc := range validateCases {
|
||||
var id DeviceID
|
||||
err := id.UnmarshalText([]byte(tc.s))
|
||||
if (err == nil && !tc.ok) || (err != nil && tc.ok) {
|
||||
t.Errorf("ValidateDeviceID(%q); %v != %v", tc.s, err, tc.ok)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshallingDeviceID(t *testing.T) {
|
||||
n0 := DeviceID{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 10, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}
|
||||
n1 := DeviceID{}
|
||||
n2 := DeviceID{}
|
||||
|
||||
bs, _ := n0.MarshalText()
|
||||
n1.UnmarshalText(bs)
|
||||
bs, _ = n1.MarshalText()
|
||||
n2.UnmarshalText(bs)
|
||||
|
||||
if n2.String() != n0.String() {
|
||||
t.Errorf("String marshalling error; %q != %q", n2.String(), n0.String())
|
||||
}
|
||||
if !n2.Equals(n0) {
|
||||
t.Error("Equals error")
|
||||
}
|
||||
if n2.Compare(n0) != 0 {
|
||||
t.Error("Compare error")
|
||||
}
|
||||
}
|
||||
0
lib/protocol/doc.go → Godeps/_workspace/src/github.com/syncthing/protocol/doc.go
generated
vendored
0
lib/protocol/doc.go → Godeps/_workspace/src/github.com/syncthing/protocol/doc.go
generated
vendored
43
Godeps/_workspace/src/github.com/syncthing/protocol/header.go
generated
vendored
Normal file
43
Godeps/_workspace/src/github.com/syncthing/protocol/header.go
generated
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
// Copyright (C) 2014 The Protocol Authors.
|
||||
|
||||
package protocol
|
||||
|
||||
import "github.com/calmh/xdr"
|
||||
|
||||
type header struct {
|
||||
version int
|
||||
msgID int
|
||||
msgType int
|
||||
compression bool
|
||||
}
|
||||
|
||||
func (h header) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
u := encodeHeader(h)
|
||||
return xw.WriteUint32(u)
|
||||
}
|
||||
|
||||
func (h *header) decodeXDR(xr *xdr.Reader) error {
|
||||
u := xr.ReadUint32()
|
||||
*h = decodeHeader(u)
|
||||
return xr.Error()
|
||||
}
|
||||
|
||||
func encodeHeader(h header) uint32 {
|
||||
var isComp uint32
|
||||
if h.compression {
|
||||
isComp = 1 << 0 // the zeroth bit is the compression bit
|
||||
}
|
||||
return uint32(h.version&0xf)<<28 +
|
||||
uint32(h.msgID&0xfff)<<16 +
|
||||
uint32(h.msgType&0xff)<<8 +
|
||||
isComp
|
||||
}
|
||||
|
||||
func decodeHeader(u uint32) header {
|
||||
return header{
|
||||
version: int(u>>28) & 0xf,
|
||||
msgID: int(u>>16) & 0xfff,
|
||||
msgType: int(u>>8) & 0xff,
|
||||
compression: u&1 == 1,
|
||||
}
|
||||
}
|
||||
122
Godeps/_workspace/src/github.com/syncthing/protocol/message.go
generated
vendored
Normal file
122
Godeps/_workspace/src/github.com/syncthing/protocol/message.go
generated
vendored
Normal file
@@ -0,0 +1,122 @@
|
||||
// Copyright (C) 2014 The Protocol Authors.
|
||||
|
||||
//go:generate genxdr -o message_xdr.go message.go
|
||||
|
||||
package protocol
|
||||
|
||||
import "fmt"
|
||||
|
||||
type IndexMessage struct {
|
||||
Folder string
|
||||
Files []FileInfo
|
||||
Flags uint32
|
||||
Options []Option // max:64
|
||||
}
|
||||
|
||||
type FileInfo struct {
|
||||
Name string // max:8192
|
||||
Flags uint32
|
||||
Modified int64
|
||||
Version int64
|
||||
LocalVersion int64
|
||||
Blocks []BlockInfo
|
||||
}
|
||||
|
||||
func (f FileInfo) String() string {
|
||||
return fmt.Sprintf("File{Name:%q, Flags:0%o, Modified:%d, Version:%d, Size:%d, Blocks:%v}",
|
||||
f.Name, f.Flags, f.Modified, f.Version, f.Size(), f.Blocks)
|
||||
}
|
||||
|
||||
func (f FileInfo) Size() (bytes int64) {
|
||||
if f.IsDeleted() || f.IsDirectory() {
|
||||
return 128
|
||||
}
|
||||
for _, b := range f.Blocks {
|
||||
bytes += int64(b.Size)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (f FileInfo) IsDeleted() bool {
|
||||
return f.Flags&FlagDeleted != 0
|
||||
}
|
||||
|
||||
func (f FileInfo) IsInvalid() bool {
|
||||
return f.Flags&FlagInvalid != 0
|
||||
}
|
||||
|
||||
func (f FileInfo) IsDirectory() bool {
|
||||
return f.Flags&FlagDirectory != 0
|
||||
}
|
||||
|
||||
func (f FileInfo) IsSymlink() bool {
|
||||
return f.Flags&FlagSymlink != 0
|
||||
}
|
||||
|
||||
func (f FileInfo) HasPermissionBits() bool {
|
||||
return f.Flags&FlagNoPermBits == 0
|
||||
}
|
||||
|
||||
type BlockInfo struct {
|
||||
Offset int64 // noencode (cache only)
|
||||
Size int32
|
||||
Hash []byte // max:64
|
||||
}
|
||||
|
||||
func (b BlockInfo) String() string {
|
||||
return fmt.Sprintf("Block{%d/%d/%x}", b.Offset, b.Size, b.Hash)
|
||||
}
|
||||
|
||||
type RequestMessage struct {
|
||||
Folder string // max:64
|
||||
Name string // max:8192
|
||||
Offset int64
|
||||
Size int32
|
||||
Hash []byte // max:64
|
||||
Flags uint32
|
||||
Options []Option // max:64
|
||||
}
|
||||
|
||||
type ResponseMessage struct {
|
||||
Data []byte
|
||||
Error int32
|
||||
}
|
||||
|
||||
type ClusterConfigMessage struct {
|
||||
ClientName string // max:64
|
||||
ClientVersion string // max:64
|
||||
Folders []Folder
|
||||
Options []Option // max:64
|
||||
}
|
||||
|
||||
func (o *ClusterConfigMessage) GetOption(key string) string {
|
||||
for _, option := range o.Options {
|
||||
if option.Key == key {
|
||||
return option.Value
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type Folder struct {
|
||||
ID string // max:64
|
||||
Devices []Device
|
||||
}
|
||||
|
||||
type Device struct {
|
||||
ID []byte // max:32
|
||||
Flags uint32
|
||||
MaxLocalVersion int64
|
||||
}
|
||||
|
||||
type Option struct {
|
||||
Key string // max:64
|
||||
Value string // max:1024
|
||||
}
|
||||
|
||||
type CloseMessage struct {
|
||||
Reason string // max:1024
|
||||
Code int32
|
||||
}
|
||||
|
||||
type EmptyMessage struct{}
|
||||
1027
Godeps/_workspace/src/github.com/syncthing/protocol/message_xdr.go
generated
vendored
Normal file
1027
Godeps/_workspace/src/github.com/syncthing/protocol/message_xdr.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
@@ -9,24 +9,32 @@ package protocol
|
||||
import "golang.org/x/text/unicode/norm"
|
||||
|
||||
type nativeModel struct {
|
||||
Model
|
||||
next Model
|
||||
}
|
||||
|
||||
func (m nativeModel) Index(deviceID DeviceID, folder string, files []FileInfo) {
|
||||
for i := range files {
|
||||
files[i].Name = norm.NFD.String(files[i].Name)
|
||||
}
|
||||
m.Model.Index(deviceID, folder, files)
|
||||
m.next.Index(deviceID, folder, files)
|
||||
}
|
||||
|
||||
func (m nativeModel) IndexUpdate(deviceID DeviceID, folder string, files []FileInfo) {
|
||||
for i := range files {
|
||||
files[i].Name = norm.NFD.String(files[i].Name)
|
||||
}
|
||||
m.Model.IndexUpdate(deviceID, folder, files)
|
||||
m.next.IndexUpdate(deviceID, folder, files)
|
||||
}
|
||||
|
||||
func (m nativeModel) Request(deviceID DeviceID, folder string, name string, offset int64, hash []byte, fromTemporary bool, buf []byte) error {
|
||||
func (m nativeModel) Request(deviceID DeviceID, folder string, name string, offset int64, size int) ([]byte, error) {
|
||||
name = norm.NFD.String(name)
|
||||
return m.Model.Request(deviceID, folder, name, offset, hash, fromTemporary, buf)
|
||||
return m.next.Request(deviceID, folder, name, offset, size)
|
||||
}
|
||||
|
||||
func (m nativeModel) ClusterConfig(deviceID DeviceID, config ClusterConfigMessage) {
|
||||
m.next.ClusterConfig(deviceID, config)
|
||||
}
|
||||
|
||||
func (m nativeModel) Close(deviceID DeviceID, err error) {
|
||||
m.next.Close(deviceID, err)
|
||||
}
|
||||
31
Godeps/_workspace/src/github.com/syncthing/protocol/nativemodel_unix.go
generated
vendored
Normal file
31
Godeps/_workspace/src/github.com/syncthing/protocol/nativemodel_unix.go
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
// Copyright (C) 2014 The Protocol Authors.
|
||||
|
||||
// +build !windows,!darwin
|
||||
|
||||
package protocol
|
||||
|
||||
// Normal Unixes uses NFC and slashes, which is the wire format.
|
||||
|
||||
type nativeModel struct {
|
||||
next Model
|
||||
}
|
||||
|
||||
func (m nativeModel) Index(deviceID DeviceID, folder string, files []FileInfo) {
|
||||
m.next.Index(deviceID, folder, files)
|
||||
}
|
||||
|
||||
func (m nativeModel) IndexUpdate(deviceID DeviceID, folder string, files []FileInfo) {
|
||||
m.next.IndexUpdate(deviceID, folder, files)
|
||||
}
|
||||
|
||||
func (m nativeModel) Request(deviceID DeviceID, folder string, name string, offset int64, size int) ([]byte, error) {
|
||||
return m.next.Request(deviceID, folder, name, offset, size)
|
||||
}
|
||||
|
||||
func (m nativeModel) ClusterConfig(deviceID DeviceID, config ClusterConfigMessage) {
|
||||
m.next.ClusterConfig(deviceID, config)
|
||||
}
|
||||
|
||||
func (m nativeModel) Close(deviceID DeviceID, err error) {
|
||||
m.next.Close(deviceID, err)
|
||||
}
|
||||
70
Godeps/_workspace/src/github.com/syncthing/protocol/nativemodel_windows.go
generated
vendored
Normal file
70
Godeps/_workspace/src/github.com/syncthing/protocol/nativemodel_windows.go
generated
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
// Copyright (C) 2014 The Protocol Authors.
|
||||
|
||||
// +build windows
|
||||
|
||||
package protocol
|
||||
|
||||
// Windows uses backslashes as file separator and disallows a bunch of
|
||||
// characters in the filename
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var disallowedCharacters = string([]rune{
|
||||
'<', '>', ':', '"', '|', '?', '*',
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
|
||||
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
|
||||
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
|
||||
31,
|
||||
})
|
||||
|
||||
type nativeModel struct {
|
||||
next Model
|
||||
}
|
||||
|
||||
func (m nativeModel) Index(deviceID DeviceID, folder string, files []FileInfo) {
|
||||
for i, f := range files {
|
||||
if strings.ContainsAny(f.Name, disallowedCharacters) {
|
||||
if f.IsDeleted() {
|
||||
// Don't complain if the file is marked as deleted, since it
|
||||
// can't possibly exist here anyway.
|
||||
continue
|
||||
}
|
||||
files[i].Flags |= FlagInvalid
|
||||
l.Warnf("File name %q contains invalid characters; marked as invalid.", f.Name)
|
||||
}
|
||||
files[i].Name = filepath.FromSlash(f.Name)
|
||||
}
|
||||
m.next.Index(deviceID, folder, files)
|
||||
}
|
||||
|
||||
func (m nativeModel) IndexUpdate(deviceID DeviceID, folder string, files []FileInfo) {
|
||||
for i, f := range files {
|
||||
if strings.ContainsAny(f.Name, disallowedCharacters) {
|
||||
if f.IsDeleted() {
|
||||
// Don't complain if the file is marked as deleted, since it
|
||||
// can't possibly exist here anyway.
|
||||
continue
|
||||
}
|
||||
files[i].Flags |= FlagInvalid
|
||||
l.Warnf("File name %q contains invalid characters; marked as invalid.", f.Name)
|
||||
}
|
||||
files[i].Name = filepath.FromSlash(files[i].Name)
|
||||
}
|
||||
m.next.IndexUpdate(deviceID, folder, files)
|
||||
}
|
||||
|
||||
func (m nativeModel) Request(deviceID DeviceID, folder string, name string, offset int64, size int) ([]byte, error) {
|
||||
name = filepath.FromSlash(name)
|
||||
return m.next.Request(deviceID, folder, name, offset, size)
|
||||
}
|
||||
|
||||
func (m nativeModel) ClusterConfig(deviceID DeviceID, config ClusterConfigMessage) {
|
||||
m.next.ClusterConfig(deviceID, config)
|
||||
}
|
||||
|
||||
func (m nativeModel) Close(deviceID DeviceID, err error) {
|
||||
m.next.Close(deviceID, err)
|
||||
}
|
||||
725
Godeps/_workspace/src/github.com/syncthing/protocol/protocol.go
generated
vendored
Normal file
725
Godeps/_workspace/src/github.com/syncthing/protocol/protocol.go
generated
vendored
Normal file
@@ -0,0 +1,725 @@
|
||||
// Copyright (C) 2014 The Protocol Authors.
|
||||
|
||||
package protocol
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
lz4 "github.com/bkaradzic/go-lz4"
|
||||
)
|
||||
|
||||
const (
|
||||
BlockSize = 128 * 1024
|
||||
)
|
||||
|
||||
const (
|
||||
messageTypeClusterConfig = 0
|
||||
messageTypeIndex = 1
|
||||
messageTypeRequest = 2
|
||||
messageTypeResponse = 3
|
||||
messageTypePing = 4
|
||||
messageTypePong = 5
|
||||
messageTypeIndexUpdate = 6
|
||||
messageTypeClose = 7
|
||||
)
|
||||
|
||||
const (
|
||||
stateInitial = iota
|
||||
stateCCRcvd
|
||||
stateIdxRcvd
|
||||
)
|
||||
|
||||
const (
|
||||
FlagDeleted uint32 = 1 << 12
|
||||
FlagInvalid = 1 << 13
|
||||
FlagDirectory = 1 << 14
|
||||
FlagNoPermBits = 1 << 15
|
||||
FlagSymlink = 1 << 16
|
||||
FlagSymlinkMissingTarget = 1 << 17
|
||||
|
||||
FlagsAll = (1 << 18) - 1
|
||||
|
||||
SymlinkTypeMask = FlagDirectory | FlagSymlinkMissingTarget
|
||||
)
|
||||
|
||||
const (
|
||||
FlagShareTrusted uint32 = 1 << 0
|
||||
FlagShareReadOnly = 1 << 1
|
||||
FlagIntroducer = 1 << 2
|
||||
FlagShareBits = 0x000000ff
|
||||
)
|
||||
|
||||
var (
|
||||
ErrClusterHash = fmt.Errorf("configuration error: mismatched cluster hash")
|
||||
ErrClosed = errors.New("connection closed")
|
||||
)
|
||||
|
||||
// Specific variants of empty messages...
|
||||
type pingMessage struct{ EmptyMessage }
|
||||
type pongMessage struct{ EmptyMessage }
|
||||
|
||||
type Model interface {
|
||||
// An index was received from the peer device
|
||||
Index(deviceID DeviceID, folder string, files []FileInfo)
|
||||
// An index update was received from the peer device
|
||||
IndexUpdate(deviceID DeviceID, folder string, files []FileInfo)
|
||||
// A request was made by the peer device
|
||||
Request(deviceID DeviceID, folder string, name string, offset int64, size int) ([]byte, error)
|
||||
// A cluster configuration message was received
|
||||
ClusterConfig(deviceID DeviceID, config ClusterConfigMessage)
|
||||
// The peer device closed the connection
|
||||
Close(deviceID DeviceID, err error)
|
||||
}
|
||||
|
||||
type Connection interface {
|
||||
ID() DeviceID
|
||||
Name() string
|
||||
Index(folder string, files []FileInfo) error
|
||||
IndexUpdate(folder string, files []FileInfo) error
|
||||
Request(folder string, name string, offset int64, size int) ([]byte, error)
|
||||
ClusterConfig(config ClusterConfigMessage)
|
||||
Statistics() Statistics
|
||||
}
|
||||
|
||||
type rawConnection struct {
|
||||
id DeviceID
|
||||
name string
|
||||
receiver Model
|
||||
state int
|
||||
|
||||
cr *countingReader
|
||||
cw *countingWriter
|
||||
|
||||
awaiting [4096]chan asyncResult
|
||||
awaitingMut sync.Mutex
|
||||
|
||||
idxMut sync.Mutex // ensures serialization of Index calls
|
||||
|
||||
nextID chan int
|
||||
outbox chan hdrMsg
|
||||
closed chan struct{}
|
||||
once sync.Once
|
||||
|
||||
compression Compression
|
||||
|
||||
rdbuf0 []byte // used & reused by readMessage
|
||||
rdbuf1 []byte // used & reused by readMessage
|
||||
}
|
||||
|
||||
type asyncResult struct {
|
||||
val []byte
|
||||
err error
|
||||
}
|
||||
|
||||
type hdrMsg struct {
|
||||
hdr header
|
||||
msg encodable
|
||||
}
|
||||
|
||||
type encodable interface {
|
||||
AppendXDR([]byte) ([]byte, error)
|
||||
}
|
||||
|
||||
type isEofer interface {
|
||||
IsEOF() bool
|
||||
}
|
||||
|
||||
const (
|
||||
pingTimeout = 30 * time.Second
|
||||
pingIdleTime = 60 * time.Second
|
||||
)
|
||||
|
||||
func NewConnection(deviceID DeviceID, reader io.Reader, writer io.Writer, receiver Model, name string, compress Compression) Connection {
|
||||
cr := &countingReader{Reader: reader}
|
||||
cw := &countingWriter{Writer: writer}
|
||||
|
||||
c := rawConnection{
|
||||
id: deviceID,
|
||||
name: name,
|
||||
receiver: nativeModel{receiver},
|
||||
state: stateInitial,
|
||||
cr: cr,
|
||||
cw: cw,
|
||||
outbox: make(chan hdrMsg),
|
||||
nextID: make(chan int),
|
||||
closed: make(chan struct{}),
|
||||
compression: compress,
|
||||
}
|
||||
|
||||
go c.readerLoop()
|
||||
go c.writerLoop()
|
||||
go c.pingerLoop()
|
||||
go c.idGenerator()
|
||||
|
||||
return wireFormatConnection{&c}
|
||||
}
|
||||
|
||||
func (c *rawConnection) ID() DeviceID {
|
||||
return c.id
|
||||
}
|
||||
|
||||
func (c *rawConnection) Name() string {
|
||||
return c.name
|
||||
}
|
||||
|
||||
// Index writes the list of file information to the connected peer device
|
||||
func (c *rawConnection) Index(folder string, idx []FileInfo) error {
|
||||
select {
|
||||
case <-c.closed:
|
||||
return ErrClosed
|
||||
default:
|
||||
}
|
||||
c.idxMut.Lock()
|
||||
c.send(-1, messageTypeIndex, IndexMessage{
|
||||
Folder: folder,
|
||||
Files: idx,
|
||||
})
|
||||
c.idxMut.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// IndexUpdate writes the list of file information to the connected peer device as an update
|
||||
func (c *rawConnection) IndexUpdate(folder string, idx []FileInfo) error {
|
||||
select {
|
||||
case <-c.closed:
|
||||
return ErrClosed
|
||||
default:
|
||||
}
|
||||
c.idxMut.Lock()
|
||||
c.send(-1, messageTypeIndexUpdate, IndexMessage{
|
||||
Folder: folder,
|
||||
Files: idx,
|
||||
})
|
||||
c.idxMut.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Request returns the bytes for the specified block after fetching them from the connected peer.
|
||||
func (c *rawConnection) Request(folder string, name string, offset int64, size int) ([]byte, error) {
|
||||
var id int
|
||||
select {
|
||||
case id = <-c.nextID:
|
||||
case <-c.closed:
|
||||
return nil, ErrClosed
|
||||
}
|
||||
|
||||
c.awaitingMut.Lock()
|
||||
if ch := c.awaiting[id]; ch != nil {
|
||||
panic("id taken")
|
||||
}
|
||||
rc := make(chan asyncResult, 1)
|
||||
c.awaiting[id] = rc
|
||||
c.awaitingMut.Unlock()
|
||||
|
||||
ok := c.send(id, messageTypeRequest, RequestMessage{
|
||||
Folder: folder,
|
||||
Name: name,
|
||||
Offset: offset,
|
||||
Size: int32(size),
|
||||
})
|
||||
if !ok {
|
||||
return nil, ErrClosed
|
||||
}
|
||||
|
||||
res, ok := <-rc
|
||||
if !ok {
|
||||
return nil, ErrClosed
|
||||
}
|
||||
return res.val, res.err
|
||||
}
|
||||
|
||||
// ClusterConfig send the cluster configuration message to the peer and returns any error
|
||||
func (c *rawConnection) ClusterConfig(config ClusterConfigMessage) {
|
||||
c.send(-1, messageTypeClusterConfig, config)
|
||||
}
|
||||
|
||||
func (c *rawConnection) ping() bool {
|
||||
var id int
|
||||
select {
|
||||
case id = <-c.nextID:
|
||||
case <-c.closed:
|
||||
return false
|
||||
}
|
||||
|
||||
rc := make(chan asyncResult, 1)
|
||||
c.awaitingMut.Lock()
|
||||
c.awaiting[id] = rc
|
||||
c.awaitingMut.Unlock()
|
||||
|
||||
ok := c.send(id, messageTypePing, nil)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
res, ok := <-rc
|
||||
return ok && res.err == nil
|
||||
}
|
||||
|
||||
func (c *rawConnection) readerLoop() (err error) {
|
||||
defer func() {
|
||||
c.close(err)
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-c.closed:
|
||||
return ErrClosed
|
||||
default:
|
||||
}
|
||||
|
||||
hdr, msg, err := c.readMessage()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch msg := msg.(type) {
|
||||
case IndexMessage:
|
||||
if msg.Flags != 0 {
|
||||
// We don't currently support or expect any flags.
|
||||
return fmt.Errorf("protocol error: unknown flags 0x%x in Index(Update) message", msg.Flags)
|
||||
}
|
||||
|
||||
switch hdr.msgType {
|
||||
case messageTypeIndex:
|
||||
if c.state < stateCCRcvd {
|
||||
return fmt.Errorf("protocol error: index message in state %d", c.state)
|
||||
}
|
||||
c.handleIndex(msg)
|
||||
c.state = stateIdxRcvd
|
||||
|
||||
case messageTypeIndexUpdate:
|
||||
if c.state < stateIdxRcvd {
|
||||
return fmt.Errorf("protocol error: index update message in state %d", c.state)
|
||||
}
|
||||
c.handleIndexUpdate(msg)
|
||||
}
|
||||
|
||||
case RequestMessage:
|
||||
if msg.Flags != 0 {
|
||||
// We don't currently support or expect any flags.
|
||||
return fmt.Errorf("protocol error: unknown flags 0x%x in Request message", msg.Flags)
|
||||
}
|
||||
if c.state < stateIdxRcvd {
|
||||
return fmt.Errorf("protocol error: request message in state %d", c.state)
|
||||
}
|
||||
// Requests are handled asynchronously
|
||||
go c.handleRequest(hdr.msgID, msg)
|
||||
|
||||
case ResponseMessage:
|
||||
if c.state < stateIdxRcvd {
|
||||
return fmt.Errorf("protocol error: response message in state %d", c.state)
|
||||
}
|
||||
c.handleResponse(hdr.msgID, msg)
|
||||
|
||||
case pingMessage:
|
||||
c.send(hdr.msgID, messageTypePong, pongMessage{})
|
||||
|
||||
case pongMessage:
|
||||
c.handlePong(hdr.msgID)
|
||||
|
||||
case ClusterConfigMessage:
|
||||
if c.state != stateInitial {
|
||||
return fmt.Errorf("protocol error: cluster config message in state %d", c.state)
|
||||
}
|
||||
go c.receiver.ClusterConfig(c.id, msg)
|
||||
c.state = stateCCRcvd
|
||||
|
||||
case CloseMessage:
|
||||
return errors.New(msg.Reason)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("protocol error: %s: unknown message type %#x", c.id, hdr.msgType)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *rawConnection) readMessage() (hdr header, msg encodable, err error) {
|
||||
if cap(c.rdbuf0) < 8 {
|
||||
c.rdbuf0 = make([]byte, 8)
|
||||
} else {
|
||||
c.rdbuf0 = c.rdbuf0[:8]
|
||||
}
|
||||
_, err = io.ReadFull(c.cr, c.rdbuf0)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
hdr = decodeHeader(binary.BigEndian.Uint32(c.rdbuf0[0:4]))
|
||||
msglen := int(binary.BigEndian.Uint32(c.rdbuf0[4:8]))
|
||||
|
||||
if debug {
|
||||
l.Debugf("read header %v (msglen=%d)", hdr, msglen)
|
||||
}
|
||||
|
||||
if hdr.version != 0 {
|
||||
err = fmt.Errorf("unknown protocol version 0x%x", hdr.version)
|
||||
return
|
||||
}
|
||||
|
||||
if cap(c.rdbuf0) < msglen {
|
||||
c.rdbuf0 = make([]byte, msglen)
|
||||
} else {
|
||||
c.rdbuf0 = c.rdbuf0[:msglen]
|
||||
}
|
||||
_, err = io.ReadFull(c.cr, c.rdbuf0)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if debug {
|
||||
l.Debugf("read %d bytes", len(c.rdbuf0))
|
||||
}
|
||||
|
||||
msgBuf := c.rdbuf0
|
||||
if hdr.compression {
|
||||
c.rdbuf1 = c.rdbuf1[:cap(c.rdbuf1)]
|
||||
c.rdbuf1, err = lz4.Decode(c.rdbuf1, c.rdbuf0)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
msgBuf = c.rdbuf1
|
||||
if debug {
|
||||
l.Debugf("decompressed to %d bytes", len(msgBuf))
|
||||
}
|
||||
}
|
||||
|
||||
if debug {
|
||||
if len(msgBuf) > 1024 {
|
||||
l.Debugf("message data:\n%s", hex.Dump(msgBuf[:1024]))
|
||||
} else {
|
||||
l.Debugf("message data:\n%s", hex.Dump(msgBuf))
|
||||
}
|
||||
}
|
||||
|
||||
// We check each returned error for the XDRError.IsEOF() method.
|
||||
// IsEOF()==true here means that the message contained fewer fields than
|
||||
// expected. It does not signify an EOF on the socket, because we've
|
||||
// successfully read a size value and that many bytes already. New fields
|
||||
// we expected but the other peer didn't send should be interpreted as
|
||||
// zero/nil, and if that's not valid we'll verify it somewhere else.
|
||||
|
||||
switch hdr.msgType {
|
||||
case messageTypeIndex, messageTypeIndexUpdate:
|
||||
var idx IndexMessage
|
||||
err = idx.UnmarshalXDR(msgBuf)
|
||||
if xdrErr, ok := err.(isEofer); ok && xdrErr.IsEOF() {
|
||||
err = nil
|
||||
}
|
||||
msg = idx
|
||||
|
||||
case messageTypeRequest:
|
||||
var req RequestMessage
|
||||
err = req.UnmarshalXDR(msgBuf)
|
||||
if xdrErr, ok := err.(isEofer); ok && xdrErr.IsEOF() {
|
||||
err = nil
|
||||
}
|
||||
msg = req
|
||||
|
||||
case messageTypeResponse:
|
||||
var resp ResponseMessage
|
||||
err = resp.UnmarshalXDR(msgBuf)
|
||||
if xdrErr, ok := err.(isEofer); ok && xdrErr.IsEOF() {
|
||||
err = nil
|
||||
}
|
||||
msg = resp
|
||||
|
||||
case messageTypePing:
|
||||
msg = pingMessage{}
|
||||
|
||||
case messageTypePong:
|
||||
msg = pongMessage{}
|
||||
|
||||
case messageTypeClusterConfig:
|
||||
var cc ClusterConfigMessage
|
||||
err = cc.UnmarshalXDR(msgBuf)
|
||||
if xdrErr, ok := err.(isEofer); ok && xdrErr.IsEOF() {
|
||||
err = nil
|
||||
}
|
||||
msg = cc
|
||||
|
||||
case messageTypeClose:
|
||||
var cm CloseMessage
|
||||
err = cm.UnmarshalXDR(msgBuf)
|
||||
if xdrErr, ok := err.(isEofer); ok && xdrErr.IsEOF() {
|
||||
err = nil
|
||||
}
|
||||
msg = cm
|
||||
|
||||
default:
|
||||
err = fmt.Errorf("protocol error: %s: unknown message type %#x", c.id, hdr.msgType)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c *rawConnection) handleIndex(im IndexMessage) {
|
||||
if debug {
|
||||
l.Debugf("Index(%v, %v, %d files)", c.id, im.Folder, len(im.Files))
|
||||
}
|
||||
c.receiver.Index(c.id, im.Folder, filterIndexMessageFiles(im.Files))
|
||||
}
|
||||
|
||||
func (c *rawConnection) handleIndexUpdate(im IndexMessage) {
|
||||
if debug {
|
||||
l.Debugf("queueing IndexUpdate(%v, %v, %d files)", c.id, im.Folder, len(im.Files))
|
||||
}
|
||||
c.receiver.IndexUpdate(c.id, im.Folder, filterIndexMessageFiles(im.Files))
|
||||
}
|
||||
|
||||
func filterIndexMessageFiles(fs []FileInfo) []FileInfo {
|
||||
var out []FileInfo
|
||||
for i, f := range fs {
|
||||
switch f.Name {
|
||||
case "", ".", "..", "/": // A few obviously invalid filenames
|
||||
l.Infof("Dropping invalid filename %q from incoming index", f.Name)
|
||||
if out == nil {
|
||||
// Most incoming updates won't contain anything invalid, so we
|
||||
// delay the allocation and copy to output slice until we
|
||||
// really need to do it, then copy all the so var valid files
|
||||
// to it.
|
||||
out = make([]FileInfo, i, len(fs)-1)
|
||||
copy(out, fs)
|
||||
}
|
||||
default:
|
||||
if out != nil {
|
||||
out = append(out, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
if out != nil {
|
||||
return out
|
||||
}
|
||||
return fs
|
||||
}
|
||||
|
||||
func (c *rawConnection) handleRequest(msgID int, req RequestMessage) {
|
||||
data, _ := c.receiver.Request(c.id, req.Folder, req.Name, int64(req.Offset), int(req.Size))
|
||||
|
||||
c.send(msgID, messageTypeResponse, ResponseMessage{
|
||||
Data: data,
|
||||
})
|
||||
}
|
||||
|
||||
func (c *rawConnection) handleResponse(msgID int, resp ResponseMessage) {
|
||||
c.awaitingMut.Lock()
|
||||
if rc := c.awaiting[msgID]; rc != nil {
|
||||
c.awaiting[msgID] = nil
|
||||
rc <- asyncResult{resp.Data, nil}
|
||||
close(rc)
|
||||
}
|
||||
c.awaitingMut.Unlock()
|
||||
}
|
||||
|
||||
func (c *rawConnection) handlePong(msgID int) {
|
||||
c.awaitingMut.Lock()
|
||||
if rc := c.awaiting[msgID]; rc != nil {
|
||||
c.awaiting[msgID] = nil
|
||||
rc <- asyncResult{}
|
||||
close(rc)
|
||||
}
|
||||
c.awaitingMut.Unlock()
|
||||
}
|
||||
|
||||
func (c *rawConnection) send(msgID int, msgType int, msg encodable) bool {
|
||||
if msgID < 0 {
|
||||
select {
|
||||
case id := <-c.nextID:
|
||||
msgID = id
|
||||
case <-c.closed:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
hdr := header{
|
||||
version: 0,
|
||||
msgID: msgID,
|
||||
msgType: msgType,
|
||||
}
|
||||
|
||||
select {
|
||||
case c.outbox <- hdrMsg{hdr, msg}:
|
||||
return true
|
||||
case <-c.closed:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (c *rawConnection) writerLoop() {
|
||||
var msgBuf = make([]byte, 8) // buffer for wire format message, kept and reused
|
||||
var uncBuf []byte // buffer for uncompressed message, kept and reused
|
||||
for {
|
||||
var tempBuf []byte
|
||||
var err error
|
||||
|
||||
select {
|
||||
case hm := <-c.outbox:
|
||||
if hm.msg != nil {
|
||||
// Uncompressed message in uncBuf
|
||||
uncBuf, err = hm.msg.AppendXDR(uncBuf[:0])
|
||||
if err != nil {
|
||||
c.close(err)
|
||||
return
|
||||
}
|
||||
|
||||
compress := false
|
||||
switch c.compression {
|
||||
case CompressAlways:
|
||||
compress = true
|
||||
case CompressMetadata:
|
||||
compress = hm.hdr.msgType != messageTypeResponse
|
||||
}
|
||||
|
||||
if compress && len(uncBuf) >= compressionThreshold {
|
||||
// Use compression for large messages
|
||||
hm.hdr.compression = true
|
||||
|
||||
// Make sure we have enough space for the compressed message plus header in msgBug
|
||||
msgBuf = msgBuf[:cap(msgBuf)]
|
||||
if maxLen := lz4.CompressBound(len(uncBuf)) + 8; maxLen > len(msgBuf) {
|
||||
msgBuf = make([]byte, maxLen)
|
||||
}
|
||||
|
||||
// Compressed is written to msgBuf, we keep tb for the length only
|
||||
tempBuf, err = lz4.Encode(msgBuf[8:], uncBuf)
|
||||
binary.BigEndian.PutUint32(msgBuf[4:8], uint32(len(tempBuf)))
|
||||
msgBuf = msgBuf[0 : len(tempBuf)+8]
|
||||
|
||||
if debug {
|
||||
l.Debugf("write compressed message; %v (len=%d)", hm.hdr, len(tempBuf))
|
||||
}
|
||||
} else {
|
||||
// No point in compressing very short messages
|
||||
hm.hdr.compression = false
|
||||
|
||||
msgBuf = msgBuf[:cap(msgBuf)]
|
||||
if l := len(uncBuf) + 8; l > len(msgBuf) {
|
||||
msgBuf = make([]byte, l)
|
||||
}
|
||||
|
||||
binary.BigEndian.PutUint32(msgBuf[4:8], uint32(len(uncBuf)))
|
||||
msgBuf = msgBuf[0 : len(uncBuf)+8]
|
||||
copy(msgBuf[8:], uncBuf)
|
||||
|
||||
if debug {
|
||||
l.Debugf("write uncompressed message; %v (len=%d)", hm.hdr, len(uncBuf))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if debug {
|
||||
l.Debugf("write empty message; %v", hm.hdr)
|
||||
}
|
||||
binary.BigEndian.PutUint32(msgBuf[4:8], 0)
|
||||
msgBuf = msgBuf[:8]
|
||||
}
|
||||
|
||||
binary.BigEndian.PutUint32(msgBuf[0:4], encodeHeader(hm.hdr))
|
||||
|
||||
if err == nil {
|
||||
var n int
|
||||
n, err = c.cw.Write(msgBuf)
|
||||
if debug {
|
||||
l.Debugf("wrote %d bytes on the wire", n)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
c.close(err)
|
||||
return
|
||||
}
|
||||
case <-c.closed:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *rawConnection) close(err error) {
|
||||
c.once.Do(func() {
|
||||
close(c.closed)
|
||||
|
||||
c.awaitingMut.Lock()
|
||||
for i, ch := range c.awaiting {
|
||||
if ch != nil {
|
||||
close(ch)
|
||||
c.awaiting[i] = nil
|
||||
}
|
||||
}
|
||||
c.awaitingMut.Unlock()
|
||||
|
||||
go c.receiver.Close(c.id, err)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *rawConnection) idGenerator() {
|
||||
nextID := 0
|
||||
for {
|
||||
nextID = (nextID + 1) & 0xfff
|
||||
select {
|
||||
case c.nextID <- nextID:
|
||||
case <-c.closed:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *rawConnection) pingerLoop() {
|
||||
var rc = make(chan bool, 1)
|
||||
ticker := time.Tick(pingIdleTime / 2)
|
||||
for {
|
||||
select {
|
||||
case <-ticker:
|
||||
if d := time.Since(c.cr.Last()); d < pingIdleTime {
|
||||
if debug {
|
||||
l.Debugln(c.id, "ping skipped after rd", d)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if d := time.Since(c.cw.Last()); d < pingIdleTime {
|
||||
if debug {
|
||||
l.Debugln(c.id, "ping skipped after wr", d)
|
||||
}
|
||||
continue
|
||||
}
|
||||
go func() {
|
||||
if debug {
|
||||
l.Debugln(c.id, "ping ->")
|
||||
}
|
||||
rc <- c.ping()
|
||||
}()
|
||||
select {
|
||||
case ok := <-rc:
|
||||
if debug {
|
||||
l.Debugln(c.id, "<- pong")
|
||||
}
|
||||
if !ok {
|
||||
c.close(fmt.Errorf("ping failure"))
|
||||
}
|
||||
case <-time.After(pingTimeout):
|
||||
c.close(fmt.Errorf("ping timeout"))
|
||||
case <-c.closed:
|
||||
return
|
||||
}
|
||||
|
||||
case <-c.closed:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type Statistics struct {
|
||||
At time.Time
|
||||
InBytesTotal int64
|
||||
OutBytesTotal int64
|
||||
}
|
||||
|
||||
func (c *rawConnection) Statistics() Statistics {
|
||||
return Statistics{
|
||||
At: time.Now(),
|
||||
InBytesTotal: c.cr.Tot(),
|
||||
OutBytesTotal: c.cw.Tot(),
|
||||
}
|
||||
}
|
||||
382
Godeps/_workspace/src/github.com/syncthing/protocol/protocol_test.go
generated
vendored
Normal file
382
Godeps/_workspace/src/github.com/syncthing/protocol/protocol_test.go
generated
vendored
Normal file
@@ -0,0 +1,382 @@
|
||||
// Copyright (C) 2014 The Protocol Authors.
|
||||
|
||||
package protocol
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
|
||||
"github.com/calmh/xdr"
|
||||
)
|
||||
|
||||
var (
|
||||
c0ID = NewDeviceID([]byte{1})
|
||||
c1ID = NewDeviceID([]byte{2})
|
||||
)
|
||||
|
||||
func TestHeaderFunctions(t *testing.T) {
|
||||
f := func(ver, id, typ int) bool {
|
||||
ver = int(uint(ver) % 16)
|
||||
id = int(uint(id) % 4096)
|
||||
typ = int(uint(typ) % 256)
|
||||
h0 := header{version: ver, msgID: id, msgType: typ}
|
||||
h1 := decodeHeader(encodeHeader(h0))
|
||||
return h0 == h1
|
||||
}
|
||||
if err := quick.Check(f, nil); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeaderLayout(t *testing.T) {
|
||||
var e, a uint32
|
||||
|
||||
// Version are the first four bits
|
||||
e = 0xf0000000
|
||||
a = encodeHeader(header{version: 0xf})
|
||||
if a != e {
|
||||
t.Errorf("Header layout incorrect; %08x != %08x", a, e)
|
||||
}
|
||||
|
||||
// Message ID are the following 12 bits
|
||||
e = 0x0fff0000
|
||||
a = encodeHeader(header{msgID: 0xfff})
|
||||
if a != e {
|
||||
t.Errorf("Header layout incorrect; %08x != %08x", a, e)
|
||||
}
|
||||
|
||||
// Type are the last 8 bits before reserved
|
||||
e = 0x0000ff00
|
||||
a = encodeHeader(header{msgType: 0xff})
|
||||
if a != e {
|
||||
t.Errorf("Header layout incorrect; %08x != %08x", a, e)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPing(t *testing.T) {
|
||||
ar, aw := io.Pipe()
|
||||
br, bw := io.Pipe()
|
||||
|
||||
c0 := NewConnection(c0ID, ar, bw, nil, "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
|
||||
c1 := NewConnection(c1ID, br, aw, nil, "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
|
||||
|
||||
if ok := c0.ping(); !ok {
|
||||
t.Error("c0 ping failed")
|
||||
}
|
||||
if ok := c1.ping(); !ok {
|
||||
t.Error("c1 ping failed")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPingErr(t *testing.T) {
|
||||
e := errors.New("something broke")
|
||||
|
||||
for i := 0; i < 16; i++ {
|
||||
for j := 0; j < 16; j++ {
|
||||
m0 := newTestModel()
|
||||
m1 := newTestModel()
|
||||
|
||||
ar, aw := io.Pipe()
|
||||
br, bw := io.Pipe()
|
||||
eaw := &ErrPipe{PipeWriter: *aw, max: i, err: e}
|
||||
ebw := &ErrPipe{PipeWriter: *bw, max: j, err: e}
|
||||
|
||||
c0 := NewConnection(c0ID, ar, ebw, m0, "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
|
||||
NewConnection(c1ID, br, eaw, m1, "name", CompressAlways)
|
||||
|
||||
res := c0.ping()
|
||||
if (i < 8 || j < 8) && res {
|
||||
t.Errorf("Unexpected ping success; i=%d, j=%d", i, j)
|
||||
} else if (i >= 12 && j >= 12) && !res {
|
||||
t.Errorf("Unexpected ping fail; i=%d, j=%d", i, j)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// func TestRequestResponseErr(t *testing.T) {
|
||||
// e := errors.New("something broke")
|
||||
|
||||
// var pass bool
|
||||
// for i := 0; i < 48; i++ {
|
||||
// for j := 0; j < 38; j++ {
|
||||
// m0 := newTestModel()
|
||||
// m0.data = []byte("response data")
|
||||
// m1 := newTestModel()
|
||||
|
||||
// ar, aw := io.Pipe()
|
||||
// br, bw := io.Pipe()
|
||||
// eaw := &ErrPipe{PipeWriter: *aw, max: i, err: e}
|
||||
// ebw := &ErrPipe{PipeWriter: *bw, max: j, err: e}
|
||||
|
||||
// NewConnection(c0ID, ar, ebw, m0, nil)
|
||||
// c1 := NewConnection(c1ID, br, eaw, m1, nil).(wireFormatConnection).next.(*rawConnection)
|
||||
|
||||
// d, err := c1.Request("default", "tn", 1234, 5678)
|
||||
// if err == e || err == ErrClosed {
|
||||
// t.Logf("Error at %d+%d bytes", i, j)
|
||||
// if !m1.isClosed() {
|
||||
// t.Fatal("c1 not closed")
|
||||
// }
|
||||
// if !m0.isClosed() {
|
||||
// t.Fatal("c0 not closed")
|
||||
// }
|
||||
// continue
|
||||
// }
|
||||
// if err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
// if string(d) != "response data" {
|
||||
// t.Fatalf("Incorrect response data %q", string(d))
|
||||
// }
|
||||
// if m0.folder != "default" {
|
||||
// t.Fatalf("Incorrect folder %q", m0.folder)
|
||||
// }
|
||||
// if m0.name != "tn" {
|
||||
// t.Fatalf("Incorrect name %q", m0.name)
|
||||
// }
|
||||
// if m0.offset != 1234 {
|
||||
// t.Fatalf("Incorrect offset %d", m0.offset)
|
||||
// }
|
||||
// if m0.size != 5678 {
|
||||
// t.Fatalf("Incorrect size %d", m0.size)
|
||||
// }
|
||||
// t.Logf("Pass at %d+%d bytes", i, j)
|
||||
// pass = true
|
||||
// }
|
||||
// }
|
||||
// if !pass {
|
||||
// t.Fatal("Never passed")
|
||||
// }
|
||||
// }
|
||||
|
||||
func TestVersionErr(t *testing.T) {
|
||||
m0 := newTestModel()
|
||||
m1 := newTestModel()
|
||||
|
||||
ar, aw := io.Pipe()
|
||||
br, bw := io.Pipe()
|
||||
|
||||
c0 := NewConnection(c0ID, ar, bw, m0, "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
|
||||
NewConnection(c1ID, br, aw, m1, "name", CompressAlways)
|
||||
|
||||
w := xdr.NewWriter(c0.cw)
|
||||
w.WriteUint32(encodeHeader(header{
|
||||
version: 2,
|
||||
msgID: 0,
|
||||
msgType: 0,
|
||||
}))
|
||||
w.WriteUint32(0) // Avoids reader closing due to EOF
|
||||
|
||||
if !m1.isClosed() {
|
||||
t.Error("Connection should close due to unknown version")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTypeErr(t *testing.T) {
|
||||
m0 := newTestModel()
|
||||
m1 := newTestModel()
|
||||
|
||||
ar, aw := io.Pipe()
|
||||
br, bw := io.Pipe()
|
||||
|
||||
c0 := NewConnection(c0ID, ar, bw, m0, "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
|
||||
NewConnection(c1ID, br, aw, m1, "name", CompressAlways)
|
||||
|
||||
w := xdr.NewWriter(c0.cw)
|
||||
w.WriteUint32(encodeHeader(header{
|
||||
version: 0,
|
||||
msgID: 0,
|
||||
msgType: 42,
|
||||
}))
|
||||
w.WriteUint32(0) // Avoids reader closing due to EOF
|
||||
|
||||
if !m1.isClosed() {
|
||||
t.Error("Connection should close due to unknown message type")
|
||||
}
|
||||
}
|
||||
|
||||
func TestClose(t *testing.T) {
|
||||
m0 := newTestModel()
|
||||
m1 := newTestModel()
|
||||
|
||||
ar, aw := io.Pipe()
|
||||
br, bw := io.Pipe()
|
||||
|
||||
c0 := NewConnection(c0ID, ar, bw, m0, "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
|
||||
NewConnection(c1ID, br, aw, m1, "name", CompressAlways)
|
||||
|
||||
c0.close(nil)
|
||||
|
||||
<-c0.closed
|
||||
if !m0.isClosed() {
|
||||
t.Fatal("Connection should be closed")
|
||||
}
|
||||
|
||||
// None of these should panic, some should return an error
|
||||
|
||||
if c0.ping() {
|
||||
t.Error("Ping should not return true")
|
||||
}
|
||||
|
||||
c0.Index("default", nil)
|
||||
c0.Index("default", nil)
|
||||
|
||||
if _, err := c0.Request("default", "foo", 0, 0); err == nil {
|
||||
t.Error("Request should return an error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestElementSizeExceededNested(t *testing.T) {
|
||||
m := ClusterConfigMessage{
|
||||
Folders: []Folder{
|
||||
{ID: "longstringlongstringlongstringinglongstringlongstringlonlongstringlongstringlon"},
|
||||
},
|
||||
}
|
||||
_, err := m.EncodeXDR(ioutil.Discard)
|
||||
if err == nil {
|
||||
t.Errorf("ID length %d > max 64, but no error", len(m.Folders[0].ID))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalIndexMessage(t *testing.T) {
|
||||
var quickCfg = &quick.Config{MaxCountScale: 10}
|
||||
if testing.Short() {
|
||||
quickCfg = nil
|
||||
}
|
||||
|
||||
f := func(m1 IndexMessage) bool {
|
||||
for _, f := range m1.Files {
|
||||
for i := range f.Blocks {
|
||||
f.Blocks[i].Offset = 0
|
||||
if len(f.Blocks[i].Hash) == 0 {
|
||||
f.Blocks[i].Hash = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return testMarshal(t, "index", &m1, &IndexMessage{})
|
||||
}
|
||||
|
||||
if err := quick.Check(f, quickCfg); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalRequestMessage(t *testing.T) {
|
||||
var quickCfg = &quick.Config{MaxCountScale: 10}
|
||||
if testing.Short() {
|
||||
quickCfg = nil
|
||||
}
|
||||
|
||||
f := func(m1 RequestMessage) bool {
|
||||
return testMarshal(t, "request", &m1, &RequestMessage{})
|
||||
}
|
||||
|
||||
if err := quick.Check(f, quickCfg); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalResponseMessage(t *testing.T) {
|
||||
var quickCfg = &quick.Config{MaxCountScale: 10}
|
||||
if testing.Short() {
|
||||
quickCfg = nil
|
||||
}
|
||||
|
||||
f := func(m1 ResponseMessage) bool {
|
||||
if len(m1.Data) == 0 {
|
||||
m1.Data = nil
|
||||
}
|
||||
return testMarshal(t, "response", &m1, &ResponseMessage{})
|
||||
}
|
||||
|
||||
if err := quick.Check(f, quickCfg); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalClusterConfigMessage(t *testing.T) {
|
||||
var quickCfg = &quick.Config{MaxCountScale: 10}
|
||||
if testing.Short() {
|
||||
quickCfg = nil
|
||||
}
|
||||
|
||||
f := func(m1 ClusterConfigMessage) bool {
|
||||
return testMarshal(t, "clusterconfig", &m1, &ClusterConfigMessage{})
|
||||
}
|
||||
|
||||
if err := quick.Check(f, quickCfg); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalCloseMessage(t *testing.T) {
|
||||
var quickCfg = &quick.Config{MaxCountScale: 10}
|
||||
if testing.Short() {
|
||||
quickCfg = nil
|
||||
}
|
||||
|
||||
f := func(m1 CloseMessage) bool {
|
||||
return testMarshal(t, "close", &m1, &CloseMessage{})
|
||||
}
|
||||
|
||||
if err := quick.Check(f, quickCfg); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
type message interface {
|
||||
EncodeXDR(io.Writer) (int, error)
|
||||
DecodeXDR(io.Reader) error
|
||||
}
|
||||
|
||||
func testMarshal(t *testing.T, prefix string, m1, m2 message) bool {
|
||||
var buf bytes.Buffer
|
||||
|
||||
failed := func(bc []byte) {
|
||||
bs, _ := json.MarshalIndent(m1, "", " ")
|
||||
ioutil.WriteFile(prefix+"-1.txt", bs, 0644)
|
||||
bs, _ = json.MarshalIndent(m2, "", " ")
|
||||
ioutil.WriteFile(prefix+"-2.txt", bs, 0644)
|
||||
if len(bc) > 0 {
|
||||
f, _ := os.Create(prefix + "-data.txt")
|
||||
fmt.Fprint(f, hex.Dump(bc))
|
||||
f.Close()
|
||||
}
|
||||
}
|
||||
|
||||
_, err := m1.EncodeXDR(&buf)
|
||||
if err != nil && strings.Contains(err.Error(), "exceeds size") {
|
||||
return true
|
||||
}
|
||||
if err != nil {
|
||||
failed(nil)
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
bc := make([]byte, len(buf.Bytes()))
|
||||
copy(bc, buf.Bytes())
|
||||
|
||||
err = m2.DecodeXDR(&buf)
|
||||
if err != nil {
|
||||
failed(bc)
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ok := reflect.DeepEqual(m1, m2)
|
||||
if !ok {
|
||||
failed(bc)
|
||||
}
|
||||
return ok
|
||||
}
|
||||
@@ -9,7 +9,15 @@ import (
|
||||
)
|
||||
|
||||
type wireFormatConnection struct {
|
||||
Connection
|
||||
next Connection
|
||||
}
|
||||
|
||||
func (c wireFormatConnection) ID() DeviceID {
|
||||
return c.next.ID()
|
||||
}
|
||||
|
||||
func (c wireFormatConnection) Name() string {
|
||||
return c.next.Name()
|
||||
}
|
||||
|
||||
func (c wireFormatConnection) Index(folder string, fs []FileInfo) error {
|
||||
@@ -20,7 +28,7 @@ func (c wireFormatConnection) Index(folder string, fs []FileInfo) error {
|
||||
myFs[i].Name = norm.NFC.String(filepath.ToSlash(myFs[i].Name))
|
||||
}
|
||||
|
||||
return c.Connection.Index(folder, myFs)
|
||||
return c.next.Index(folder, myFs)
|
||||
}
|
||||
|
||||
func (c wireFormatConnection) IndexUpdate(folder string, fs []FileInfo) error {
|
||||
@@ -31,10 +39,18 @@ func (c wireFormatConnection) IndexUpdate(folder string, fs []FileInfo) error {
|
||||
myFs[i].Name = norm.NFC.String(filepath.ToSlash(myFs[i].Name))
|
||||
}
|
||||
|
||||
return c.Connection.IndexUpdate(folder, myFs)
|
||||
return c.next.IndexUpdate(folder, myFs)
|
||||
}
|
||||
|
||||
func (c wireFormatConnection) Request(folder, name string, offset int64, size int, hash []byte, fromTemporary bool) ([]byte, error) {
|
||||
func (c wireFormatConnection) Request(folder, name string, offset int64, size int) ([]byte, error) {
|
||||
name = norm.NFC.String(filepath.ToSlash(name))
|
||||
return c.Connection.Request(folder, name, offset, size, hash, fromTemporary)
|
||||
return c.next.Request(folder, name, offset, size)
|
||||
}
|
||||
|
||||
func (c wireFormatConnection) ClusterConfig(config ClusterConfigMessage) {
|
||||
c.next.ClusterConfig(config)
|
||||
}
|
||||
|
||||
func (c wireFormatConnection) Statistics() Statistics {
|
||||
return c.next.Statistics()
|
||||
}
|
||||
252
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go
generated
vendored
Normal file
252
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go
generated
vendored
Normal file
@@ -0,0 +1,252 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/memdb"
|
||||
)
|
||||
|
||||
type ErrBatchCorrupted struct {
|
||||
Reason string
|
||||
}
|
||||
|
||||
func (e *ErrBatchCorrupted) Error() string {
|
||||
return fmt.Sprintf("leveldb: batch corrupted: %s", e.Reason)
|
||||
}
|
||||
|
||||
func newErrBatchCorrupted(reason string) error {
|
||||
return errors.NewErrCorrupted(nil, &ErrBatchCorrupted{reason})
|
||||
}
|
||||
|
||||
const (
|
||||
batchHdrLen = 8 + 4
|
||||
batchGrowRec = 3000
|
||||
)
|
||||
|
||||
type BatchReplay interface {
|
||||
Put(key, value []byte)
|
||||
Delete(key []byte)
|
||||
}
|
||||
|
||||
// Batch is a write batch.
|
||||
type Batch struct {
|
||||
data []byte
|
||||
rLen, bLen int
|
||||
seq uint64
|
||||
sync bool
|
||||
}
|
||||
|
||||
func (b *Batch) grow(n int) {
|
||||
off := len(b.data)
|
||||
if off == 0 {
|
||||
off = batchHdrLen
|
||||
if b.data != nil {
|
||||
b.data = b.data[:off]
|
||||
}
|
||||
}
|
||||
if cap(b.data)-off < n {
|
||||
if b.data == nil {
|
||||
b.data = make([]byte, off, off+n)
|
||||
} else {
|
||||
odata := b.data
|
||||
div := 1
|
||||
if b.rLen > batchGrowRec {
|
||||
div = b.rLen / batchGrowRec
|
||||
}
|
||||
b.data = make([]byte, off, off+n+(off-batchHdrLen)/div)
|
||||
copy(b.data, odata)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Batch) appendRec(kt kType, key, value []byte) {
|
||||
n := 1 + binary.MaxVarintLen32 + len(key)
|
||||
if kt == ktVal {
|
||||
n += binary.MaxVarintLen32 + len(value)
|
||||
}
|
||||
b.grow(n)
|
||||
off := len(b.data)
|
||||
data := b.data[:off+n]
|
||||
data[off] = byte(kt)
|
||||
off += 1
|
||||
off += binary.PutUvarint(data[off:], uint64(len(key)))
|
||||
copy(data[off:], key)
|
||||
off += len(key)
|
||||
if kt == ktVal {
|
||||
off += binary.PutUvarint(data[off:], uint64(len(value)))
|
||||
copy(data[off:], value)
|
||||
off += len(value)
|
||||
}
|
||||
b.data = data[:off]
|
||||
b.rLen++
|
||||
// Include 8-byte ikey header
|
||||
b.bLen += len(key) + len(value) + 8
|
||||
}
|
||||
|
||||
// Put appends 'put operation' of the given key/value pair to the batch.
|
||||
// It is safe to modify the contents of the argument after Put returns.
|
||||
func (b *Batch) Put(key, value []byte) {
|
||||
b.appendRec(ktVal, key, value)
|
||||
}
|
||||
|
||||
// Delete appends 'delete operation' of the given key to the batch.
|
||||
// It is safe to modify the contents of the argument after Delete returns.
|
||||
func (b *Batch) Delete(key []byte) {
|
||||
b.appendRec(ktDel, key, nil)
|
||||
}
|
||||
|
||||
// Dump dumps batch contents. The returned slice can be loaded into the
|
||||
// batch using Load method.
|
||||
// The returned slice is not its own copy, so the contents should not be
|
||||
// modified.
|
||||
func (b *Batch) Dump() []byte {
|
||||
return b.encode()
|
||||
}
|
||||
|
||||
// Load loads given slice into the batch. Previous contents of the batch
|
||||
// will be discarded.
|
||||
// The given slice will not be copied and will be used as batch buffer, so
|
||||
// it is not safe to modify the contents of the slice.
|
||||
func (b *Batch) Load(data []byte) error {
|
||||
return b.decode(0, data)
|
||||
}
|
||||
|
||||
// Replay replays batch contents.
|
||||
func (b *Batch) Replay(r BatchReplay) error {
|
||||
return b.decodeRec(func(i int, kt kType, key, value []byte) {
|
||||
switch kt {
|
||||
case ktVal:
|
||||
r.Put(key, value)
|
||||
case ktDel:
|
||||
r.Delete(key)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Len returns number of records in the batch.
|
||||
func (b *Batch) Len() int {
|
||||
return b.rLen
|
||||
}
|
||||
|
||||
// Reset resets the batch.
|
||||
func (b *Batch) Reset() {
|
||||
b.data = b.data[:0]
|
||||
b.seq = 0
|
||||
b.rLen = 0
|
||||
b.bLen = 0
|
||||
b.sync = false
|
||||
}
|
||||
|
||||
func (b *Batch) init(sync bool) {
|
||||
b.sync = sync
|
||||
}
|
||||
|
||||
func (b *Batch) append(p *Batch) {
|
||||
if p.rLen > 0 {
|
||||
b.grow(len(p.data) - batchHdrLen)
|
||||
b.data = append(b.data, p.data[batchHdrLen:]...)
|
||||
b.rLen += p.rLen
|
||||
}
|
||||
if p.sync {
|
||||
b.sync = true
|
||||
}
|
||||
}
|
||||
|
||||
// size returns sums of key/value pair length plus 8-bytes ikey.
|
||||
func (b *Batch) size() int {
|
||||
return b.bLen
|
||||
}
|
||||
|
||||
func (b *Batch) encode() []byte {
|
||||
b.grow(0)
|
||||
binary.LittleEndian.PutUint64(b.data, b.seq)
|
||||
binary.LittleEndian.PutUint32(b.data[8:], uint32(b.rLen))
|
||||
|
||||
return b.data
|
||||
}
|
||||
|
||||
func (b *Batch) decode(prevSeq uint64, data []byte) error {
|
||||
if len(data) < batchHdrLen {
|
||||
return newErrBatchCorrupted("too short")
|
||||
}
|
||||
|
||||
b.seq = binary.LittleEndian.Uint64(data)
|
||||
if b.seq < prevSeq {
|
||||
return newErrBatchCorrupted("invalid sequence number")
|
||||
}
|
||||
b.rLen = int(binary.LittleEndian.Uint32(data[8:]))
|
||||
if b.rLen < 0 {
|
||||
return newErrBatchCorrupted("invalid records length")
|
||||
}
|
||||
// No need to be precise at this point, it won't be used anyway
|
||||
b.bLen = len(data) - batchHdrLen
|
||||
b.data = data
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Batch) decodeRec(f func(i int, kt kType, key, value []byte)) (err error) {
|
||||
off := batchHdrLen
|
||||
for i := 0; i < b.rLen; i++ {
|
||||
if off >= len(b.data) {
|
||||
return newErrBatchCorrupted("invalid records length")
|
||||
}
|
||||
|
||||
kt := kType(b.data[off])
|
||||
if kt > ktVal {
|
||||
return newErrBatchCorrupted("bad record: invalid type")
|
||||
}
|
||||
off += 1
|
||||
|
||||
x, n := binary.Uvarint(b.data[off:])
|
||||
off += n
|
||||
if n <= 0 || off+int(x) > len(b.data) {
|
||||
return newErrBatchCorrupted("bad record: invalid key length")
|
||||
}
|
||||
key := b.data[off : off+int(x)]
|
||||
off += int(x)
|
||||
var value []byte
|
||||
if kt == ktVal {
|
||||
x, n := binary.Uvarint(b.data[off:])
|
||||
off += n
|
||||
if n <= 0 || off+int(x) > len(b.data) {
|
||||
return newErrBatchCorrupted("bad record: invalid value length")
|
||||
}
|
||||
value = b.data[off : off+int(x)]
|
||||
off += int(x)
|
||||
}
|
||||
|
||||
f(i, kt, key, value)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Batch) memReplay(to *memdb.DB) error {
|
||||
return b.decodeRec(func(i int, kt kType, key, value []byte) {
|
||||
ikey := newIkey(key, b.seq+uint64(i), kt)
|
||||
to.Put(ikey, value)
|
||||
})
|
||||
}
|
||||
|
||||
func (b *Batch) memDecodeAndReplay(prevSeq uint64, data []byte, to *memdb.DB) error {
|
||||
if err := b.decode(prevSeq, data); err != nil {
|
||||
return err
|
||||
}
|
||||
return b.memReplay(to)
|
||||
}
|
||||
|
||||
func (b *Batch) revertMemReplay(to *memdb.DB) error {
|
||||
return b.decodeRec(func(i int, kt kType, key, value []byte) {
|
||||
ikey := newIkey(key, b.seq+uint64(i), kt)
|
||||
to.Delete(ikey)
|
||||
})
|
||||
}
|
||||
120
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go
generated
vendored
Normal file
120
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go
generated
vendored
Normal file
@@ -0,0 +1,120 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/comparer"
|
||||
"github.com/syndtr/goleveldb/leveldb/memdb"
|
||||
)
|
||||
|
||||
type tbRec struct {
|
||||
kt kType
|
||||
key, value []byte
|
||||
}
|
||||
|
||||
type testBatch struct {
|
||||
rec []*tbRec
|
||||
}
|
||||
|
||||
func (p *testBatch) Put(key, value []byte) {
|
||||
p.rec = append(p.rec, &tbRec{ktVal, key, value})
|
||||
}
|
||||
|
||||
func (p *testBatch) Delete(key []byte) {
|
||||
p.rec = append(p.rec, &tbRec{ktDel, key, nil})
|
||||
}
|
||||
|
||||
func compareBatch(t *testing.T, b1, b2 *Batch) {
|
||||
if b1.seq != b2.seq {
|
||||
t.Errorf("invalid seq number want %d, got %d", b1.seq, b2.seq)
|
||||
}
|
||||
if b1.Len() != b2.Len() {
|
||||
t.Fatalf("invalid record length want %d, got %d", b1.Len(), b2.Len())
|
||||
}
|
||||
p1, p2 := new(testBatch), new(testBatch)
|
||||
err := b1.Replay(p1)
|
||||
if err != nil {
|
||||
t.Fatal("error when replaying batch 1: ", err)
|
||||
}
|
||||
err = b2.Replay(p2)
|
||||
if err != nil {
|
||||
t.Fatal("error when replaying batch 2: ", err)
|
||||
}
|
||||
for i := range p1.rec {
|
||||
r1, r2 := p1.rec[i], p2.rec[i]
|
||||
if r1.kt != r2.kt {
|
||||
t.Errorf("invalid type on record '%d' want %d, got %d", i, r1.kt, r2.kt)
|
||||
}
|
||||
if !bytes.Equal(r1.key, r2.key) {
|
||||
t.Errorf("invalid key on record '%d' want %s, got %s", i, string(r1.key), string(r2.key))
|
||||
}
|
||||
if r1.kt == ktVal {
|
||||
if !bytes.Equal(r1.value, r2.value) {
|
||||
t.Errorf("invalid value on record '%d' want %s, got %s", i, string(r1.value), string(r2.value))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBatch_EncodeDecode(t *testing.T) {
|
||||
b1 := new(Batch)
|
||||
b1.seq = 10009
|
||||
b1.Put([]byte("key1"), []byte("value1"))
|
||||
b1.Put([]byte("key2"), []byte("value2"))
|
||||
b1.Delete([]byte("key1"))
|
||||
b1.Put([]byte("k"), []byte(""))
|
||||
b1.Put([]byte("zzzzzzzzzzz"), []byte("zzzzzzzzzzzzzzzzzzzzzzzz"))
|
||||
b1.Delete([]byte("key10000"))
|
||||
b1.Delete([]byte("k"))
|
||||
buf := b1.encode()
|
||||
b2 := new(Batch)
|
||||
err := b2.decode(0, buf)
|
||||
if err != nil {
|
||||
t.Error("error when decoding batch: ", err)
|
||||
}
|
||||
compareBatch(t, b1, b2)
|
||||
}
|
||||
|
||||
func TestBatch_Append(t *testing.T) {
|
||||
b1 := new(Batch)
|
||||
b1.seq = 10009
|
||||
b1.Put([]byte("key1"), []byte("value1"))
|
||||
b1.Put([]byte("key2"), []byte("value2"))
|
||||
b1.Delete([]byte("key1"))
|
||||
b1.Put([]byte("foo"), []byte("foovalue"))
|
||||
b1.Put([]byte("bar"), []byte("barvalue"))
|
||||
b2a := new(Batch)
|
||||
b2a.seq = 10009
|
||||
b2a.Put([]byte("key1"), []byte("value1"))
|
||||
b2a.Put([]byte("key2"), []byte("value2"))
|
||||
b2a.Delete([]byte("key1"))
|
||||
b2b := new(Batch)
|
||||
b2b.Put([]byte("foo"), []byte("foovalue"))
|
||||
b2b.Put([]byte("bar"), []byte("barvalue"))
|
||||
b2a.append(b2b)
|
||||
compareBatch(t, b1, b2a)
|
||||
}
|
||||
|
||||
func TestBatch_Size(t *testing.T) {
|
||||
b := new(Batch)
|
||||
for i := 0; i < 2; i++ {
|
||||
b.Put([]byte("key1"), []byte("value1"))
|
||||
b.Put([]byte("key2"), []byte("value2"))
|
||||
b.Delete([]byte("key1"))
|
||||
b.Put([]byte("foo"), []byte("foovalue"))
|
||||
b.Put([]byte("bar"), []byte("barvalue"))
|
||||
mem := memdb.New(&iComparer{comparer.DefaultComparer}, 0)
|
||||
b.memReplay(mem)
|
||||
if b.size() != mem.Size() {
|
||||
t.Errorf("invalid batch size calculation, want=%d got=%d", mem.Size(), b.size())
|
||||
}
|
||||
b.Reset()
|
||||
}
|
||||
}
|
||||
58
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench2_test.go
generated
vendored
Normal file
58
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench2_test.go
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// +build !go1.2
|
||||
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkDBReadConcurrent(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.gc()
|
||||
defer p.close()
|
||||
|
||||
b.ResetTimer()
|
||||
b.SetBytes(116)
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
iter := p.newIter()
|
||||
defer iter.Release()
|
||||
for pb.Next() && iter.Next() {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkDBReadConcurrent2(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.gc()
|
||||
defer p.close()
|
||||
|
||||
b.ResetTimer()
|
||||
b.SetBytes(116)
|
||||
|
||||
var dir uint32
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
iter := p.newIter()
|
||||
defer iter.Release()
|
||||
if atomic.AddUint32(&dir, 1)%2 == 0 {
|
||||
for pb.Next() && iter.Next() {
|
||||
}
|
||||
} else {
|
||||
if pb.Next() && iter.Last() {
|
||||
for pb.Next() && iter.Prev() {
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
464
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go
generated
vendored
Normal file
464
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go
generated
vendored
Normal file
@@ -0,0 +1,464 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/iterator"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||
)
|
||||
|
||||
func randomString(r *rand.Rand, n int) []byte {
|
||||
b := new(bytes.Buffer)
|
||||
for i := 0; i < n; i++ {
|
||||
b.WriteByte(' ' + byte(r.Intn(95)))
|
||||
}
|
||||
return b.Bytes()
|
||||
}
|
||||
|
||||
func compressibleStr(r *rand.Rand, frac float32, n int) []byte {
|
||||
nn := int(float32(n) * frac)
|
||||
rb := randomString(r, nn)
|
||||
b := make([]byte, 0, n+nn)
|
||||
for len(b) < n {
|
||||
b = append(b, rb...)
|
||||
}
|
||||
return b[:n]
|
||||
}
|
||||
|
||||
type valueGen struct {
|
||||
src []byte
|
||||
pos int
|
||||
}
|
||||
|
||||
func newValueGen(frac float32) *valueGen {
|
||||
v := new(valueGen)
|
||||
r := rand.New(rand.NewSource(301))
|
||||
v.src = make([]byte, 0, 1048576+100)
|
||||
for len(v.src) < 1048576 {
|
||||
v.src = append(v.src, compressibleStr(r, frac, 100)...)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func (v *valueGen) get(n int) []byte {
|
||||
if v.pos+n > len(v.src) {
|
||||
v.pos = 0
|
||||
}
|
||||
v.pos += n
|
||||
return v.src[v.pos-n : v.pos]
|
||||
}
|
||||
|
||||
var benchDB = filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbbench-%d", os.Getuid()))
|
||||
|
||||
type dbBench struct {
|
||||
b *testing.B
|
||||
stor storage.Storage
|
||||
db *DB
|
||||
|
||||
o *opt.Options
|
||||
ro *opt.ReadOptions
|
||||
wo *opt.WriteOptions
|
||||
|
||||
keys, values [][]byte
|
||||
}
|
||||
|
||||
func openDBBench(b *testing.B, noCompress bool) *dbBench {
|
||||
_, err := os.Stat(benchDB)
|
||||
if err == nil {
|
||||
err = os.RemoveAll(benchDB)
|
||||
if err != nil {
|
||||
b.Fatal("cannot remove old db: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
p := &dbBench{
|
||||
b: b,
|
||||
o: &opt.Options{},
|
||||
ro: &opt.ReadOptions{},
|
||||
wo: &opt.WriteOptions{},
|
||||
}
|
||||
p.stor, err = storage.OpenFile(benchDB)
|
||||
if err != nil {
|
||||
b.Fatal("cannot open stor: ", err)
|
||||
}
|
||||
if noCompress {
|
||||
p.o.Compression = opt.NoCompression
|
||||
}
|
||||
|
||||
p.db, err = Open(p.stor, p.o)
|
||||
if err != nil {
|
||||
b.Fatal("cannot open db: ", err)
|
||||
}
|
||||
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *dbBench) reopen() {
|
||||
p.db.Close()
|
||||
var err error
|
||||
p.db, err = Open(p.stor, p.o)
|
||||
if err != nil {
|
||||
p.b.Fatal("Reopen: got error: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *dbBench) populate(n int) {
|
||||
p.keys, p.values = make([][]byte, n), make([][]byte, n)
|
||||
v := newValueGen(0.5)
|
||||
for i := range p.keys {
|
||||
p.keys[i], p.values[i] = []byte(fmt.Sprintf("%016d", i)), v.get(100)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *dbBench) randomize() {
|
||||
m := len(p.keys)
|
||||
times := m * 2
|
||||
r1, r2 := rand.New(rand.NewSource(0xdeadbeef)), rand.New(rand.NewSource(0xbeefface))
|
||||
for n := 0; n < times; n++ {
|
||||
i, j := r1.Int()%m, r2.Int()%m
|
||||
if i == j {
|
||||
continue
|
||||
}
|
||||
p.keys[i], p.keys[j] = p.keys[j], p.keys[i]
|
||||
p.values[i], p.values[j] = p.values[j], p.values[i]
|
||||
}
|
||||
}
|
||||
|
||||
func (p *dbBench) writes(perBatch int) {
|
||||
b := p.b
|
||||
db := p.db
|
||||
|
||||
n := len(p.keys)
|
||||
m := n / perBatch
|
||||
if n%perBatch > 0 {
|
||||
m++
|
||||
}
|
||||
batches := make([]Batch, m)
|
||||
j := 0
|
||||
for i := range batches {
|
||||
first := true
|
||||
for ; j < n && ((j+1)%perBatch != 0 || first); j++ {
|
||||
first = false
|
||||
batches[i].Put(p.keys[j], p.values[j])
|
||||
}
|
||||
}
|
||||
runtime.GC()
|
||||
|
||||
b.ResetTimer()
|
||||
b.StartTimer()
|
||||
for i := range batches {
|
||||
err := db.Write(&(batches[i]), p.wo)
|
||||
if err != nil {
|
||||
b.Fatal("write failed: ", err)
|
||||
}
|
||||
}
|
||||
b.StopTimer()
|
||||
b.SetBytes(116)
|
||||
}
|
||||
|
||||
func (p *dbBench) gc() {
|
||||
p.keys, p.values = nil, nil
|
||||
runtime.GC()
|
||||
}
|
||||
|
||||
func (p *dbBench) puts() {
|
||||
b := p.b
|
||||
db := p.db
|
||||
|
||||
b.ResetTimer()
|
||||
b.StartTimer()
|
||||
for i := range p.keys {
|
||||
err := db.Put(p.keys[i], p.values[i], p.wo)
|
||||
if err != nil {
|
||||
b.Fatal("put failed: ", err)
|
||||
}
|
||||
}
|
||||
b.StopTimer()
|
||||
b.SetBytes(116)
|
||||
}
|
||||
|
||||
func (p *dbBench) fill() {
|
||||
b := p.b
|
||||
db := p.db
|
||||
|
||||
perBatch := 10000
|
||||
batch := new(Batch)
|
||||
for i, n := 0, len(p.keys); i < n; {
|
||||
first := true
|
||||
for ; i < n && ((i+1)%perBatch != 0 || first); i++ {
|
||||
first = false
|
||||
batch.Put(p.keys[i], p.values[i])
|
||||
}
|
||||
err := db.Write(batch, p.wo)
|
||||
if err != nil {
|
||||
b.Fatal("write failed: ", err)
|
||||
}
|
||||
batch.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
func (p *dbBench) gets() {
|
||||
b := p.b
|
||||
db := p.db
|
||||
|
||||
b.ResetTimer()
|
||||
for i := range p.keys {
|
||||
_, err := db.Get(p.keys[i], p.ro)
|
||||
if err != nil {
|
||||
b.Error("got error: ", err)
|
||||
}
|
||||
}
|
||||
b.StopTimer()
|
||||
}
|
||||
|
||||
func (p *dbBench) seeks() {
|
||||
b := p.b
|
||||
|
||||
iter := p.newIter()
|
||||
defer iter.Release()
|
||||
b.ResetTimer()
|
||||
for i := range p.keys {
|
||||
if !iter.Seek(p.keys[i]) {
|
||||
b.Error("value not found for: ", string(p.keys[i]))
|
||||
}
|
||||
}
|
||||
b.StopTimer()
|
||||
}
|
||||
|
||||
func (p *dbBench) newIter() iterator.Iterator {
|
||||
iter := p.db.NewIterator(nil, p.ro)
|
||||
err := iter.Error()
|
||||
if err != nil {
|
||||
p.b.Fatal("cannot create iterator: ", err)
|
||||
}
|
||||
return iter
|
||||
}
|
||||
|
||||
func (p *dbBench) close() {
|
||||
if bp, err := p.db.GetProperty("leveldb.blockpool"); err == nil {
|
||||
p.b.Log("Block pool stats: ", bp)
|
||||
}
|
||||
p.db.Close()
|
||||
p.stor.Close()
|
||||
os.RemoveAll(benchDB)
|
||||
p.db = nil
|
||||
p.keys = nil
|
||||
p.values = nil
|
||||
runtime.GC()
|
||||
runtime.GOMAXPROCS(1)
|
||||
}
|
||||
|
||||
func BenchmarkDBWrite(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.writes(1)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBWriteBatch(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.writes(1000)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBWriteUncompressed(b *testing.B) {
|
||||
p := openDBBench(b, true)
|
||||
p.populate(b.N)
|
||||
p.writes(1)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBWriteBatchUncompressed(b *testing.B) {
|
||||
p := openDBBench(b, true)
|
||||
p.populate(b.N)
|
||||
p.writes(1000)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBWriteRandom(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.randomize()
|
||||
p.writes(1)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBWriteRandomSync(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.wo.Sync = true
|
||||
p.populate(b.N)
|
||||
p.writes(1)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBOverwrite(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.writes(1)
|
||||
p.writes(1)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBOverwriteRandom(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.writes(1)
|
||||
p.randomize()
|
||||
p.writes(1)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBPut(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.puts()
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBRead(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.gc()
|
||||
|
||||
iter := p.newIter()
|
||||
b.ResetTimer()
|
||||
for iter.Next() {
|
||||
}
|
||||
iter.Release()
|
||||
b.StopTimer()
|
||||
b.SetBytes(116)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBReadGC(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
|
||||
iter := p.newIter()
|
||||
b.ResetTimer()
|
||||
for iter.Next() {
|
||||
}
|
||||
iter.Release()
|
||||
b.StopTimer()
|
||||
b.SetBytes(116)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBReadUncompressed(b *testing.B) {
|
||||
p := openDBBench(b, true)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.gc()
|
||||
|
||||
iter := p.newIter()
|
||||
b.ResetTimer()
|
||||
for iter.Next() {
|
||||
}
|
||||
iter.Release()
|
||||
b.StopTimer()
|
||||
b.SetBytes(116)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBReadTable(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.reopen()
|
||||
p.gc()
|
||||
|
||||
iter := p.newIter()
|
||||
b.ResetTimer()
|
||||
for iter.Next() {
|
||||
}
|
||||
iter.Release()
|
||||
b.StopTimer()
|
||||
b.SetBytes(116)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBReadReverse(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.gc()
|
||||
|
||||
iter := p.newIter()
|
||||
b.ResetTimer()
|
||||
iter.Last()
|
||||
for iter.Prev() {
|
||||
}
|
||||
iter.Release()
|
||||
b.StopTimer()
|
||||
b.SetBytes(116)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBReadReverseTable(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.reopen()
|
||||
p.gc()
|
||||
|
||||
iter := p.newIter()
|
||||
b.ResetTimer()
|
||||
iter.Last()
|
||||
for iter.Prev() {
|
||||
}
|
||||
iter.Release()
|
||||
b.StopTimer()
|
||||
b.SetBytes(116)
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBSeek(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.seeks()
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBSeekRandom(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.randomize()
|
||||
p.seeks()
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBGet(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.gets()
|
||||
p.close()
|
||||
}
|
||||
|
||||
func BenchmarkDBGetRandom(b *testing.B) {
|
||||
p := openDBBench(b, false)
|
||||
p.populate(b.N)
|
||||
p.fill()
|
||||
p.randomize()
|
||||
p.gets()
|
||||
p.close()
|
||||
}
|
||||
30
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go
generated
vendored
Normal file
30
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// +build !go1.2
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkLRUCache(b *testing.B) {
|
||||
c := NewCache(NewLRU(10000))
|
||||
|
||||
b.SetParallelism(10)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
for pb.Next() {
|
||||
key := uint64(r.Intn(1000000))
|
||||
c.Get(0, key, func() (int, Value) {
|
||||
return 1, key
|
||||
}).Release()
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
)
|
||||
|
||||
// Cacher provides interface to implements a caching functionality.
|
||||
// An implementation must be safe for concurrent use.
|
||||
// An implementation must be goroutine-safe.
|
||||
type Cacher interface {
|
||||
// Capacity returns cache capacity.
|
||||
Capacity() int
|
||||
@@ -47,21 +47,17 @@ type Cacher interface {
|
||||
// so the the Release method will be called once object is released.
|
||||
type Value interface{}
|
||||
|
||||
// NamespaceGetter provides convenient wrapper for namespace.
|
||||
type NamespaceGetter struct {
|
||||
type CacheGetter struct {
|
||||
Cache *Cache
|
||||
NS uint64
|
||||
}
|
||||
|
||||
// Get simply calls Cache.Get() method.
|
||||
func (g *NamespaceGetter) Get(key uint64, setFunc func() (size int, value Value)) *Handle {
|
||||
func (g *CacheGetter) Get(key uint64, setFunc func() (size int, value Value)) *Handle {
|
||||
return g.Cache.Get(g.NS, key, setFunc)
|
||||
}
|
||||
|
||||
// The hash tables implementation is based on:
|
||||
// "Dynamic-Sized Nonblocking Hash Tables", by Yujie Liu,
|
||||
// Kunlong Zhang, and Michael Spear.
|
||||
// ACM Symposium on Principles of Distributed Computing, Jul 2014.
|
||||
// "Dynamic-Sized Nonblocking Hash Tables", by Yujie Liu, Kunlong Zhang, and Michael Spear. ACM Symposium on Principles of Distributed Computing, Jul 2014.
|
||||
|
||||
const (
|
||||
mInitialSize = 1 << 4
|
||||
@@ -511,12 +507,18 @@ func (r *Cache) EvictAll() {
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes the 'cache map' and forcefully releases all 'cache node'.
|
||||
// Close closes the 'cache map' and releases all 'cache node'.
|
||||
func (r *Cache) Close() error {
|
||||
r.mu.Lock()
|
||||
if !r.closed {
|
||||
r.closed = true
|
||||
|
||||
if r.cacher != nil {
|
||||
if err := r.cacher.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
h := (*mNode)(r.mHead)
|
||||
h.initBuckets()
|
||||
|
||||
@@ -535,37 +537,10 @@ func (r *Cache) Close() error {
|
||||
for _, f := range n.onDel {
|
||||
f()
|
||||
}
|
||||
n.onDel = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
r.mu.Unlock()
|
||||
|
||||
// Avoid deadlock.
|
||||
if r.cacher != nil {
|
||||
if err := r.cacher.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CloseWeak closes the 'cache map' and evict all 'cache node' from cacher, but
|
||||
// unlike Close it doesn't forcefully releases 'cache node'.
|
||||
func (r *Cache) CloseWeak() error {
|
||||
r.mu.Lock()
|
||||
if !r.closed {
|
||||
r.closed = true
|
||||
}
|
||||
r.mu.Unlock()
|
||||
|
||||
// Avoid deadlock.
|
||||
if r.cacher != nil {
|
||||
r.cacher.EvictAll()
|
||||
if err := r.cacher.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -635,12 +610,10 @@ func (n *Node) unrefLocked() {
|
||||
}
|
||||
}
|
||||
|
||||
// Handle is a 'cache handle' of a 'cache node'.
|
||||
type Handle struct {
|
||||
n unsafe.Pointer // *Node
|
||||
}
|
||||
|
||||
// Value returns the value of the 'cache node'.
|
||||
func (h *Handle) Value() Value {
|
||||
n := (*Node)(atomic.LoadPointer(&h.n))
|
||||
if n != nil {
|
||||
@@ -649,8 +622,6 @@ func (h *Handle) Value() Value {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Release releases this 'cache handle'.
|
||||
// It is safe to call release multiple times.
|
||||
func (h *Handle) Release() {
|
||||
nPtr := atomic.LoadPointer(&h.n)
|
||||
if nPtr != nil && atomic.CompareAndSwapPointer(&h.n, nPtr, nil) {
|
||||
554
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go
generated
vendored
Normal file
554
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go
generated
vendored
Normal file
@@ -0,0 +1,554 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type int32o int32
|
||||
|
||||
func (o *int32o) acquire() {
|
||||
if atomic.AddInt32((*int32)(o), 1) != 1 {
|
||||
panic("BUG: invalid ref")
|
||||
}
|
||||
}
|
||||
|
||||
func (o *int32o) Release() {
|
||||
if atomic.AddInt32((*int32)(o), -1) != 0 {
|
||||
panic("BUG: invalid ref")
|
||||
}
|
||||
}
|
||||
|
||||
type releaserFunc struct {
|
||||
fn func()
|
||||
value Value
|
||||
}
|
||||
|
||||
func (r releaserFunc) Release() {
|
||||
if r.fn != nil {
|
||||
r.fn()
|
||||
}
|
||||
}
|
||||
|
||||
func set(c *Cache, ns, key uint64, value Value, charge int, relf func()) *Handle {
|
||||
return c.Get(ns, key, func() (int, Value) {
|
||||
if relf != nil {
|
||||
return charge, releaserFunc{relf, value}
|
||||
} else {
|
||||
return charge, value
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestCacheMap(t *testing.T) {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
|
||||
nsx := []struct {
|
||||
nobjects, nhandles, concurrent, repeat int
|
||||
}{
|
||||
{10000, 400, 50, 3},
|
||||
{100000, 1000, 100, 10},
|
||||
}
|
||||
|
||||
var (
|
||||
objects [][]int32o
|
||||
handles [][]unsafe.Pointer
|
||||
)
|
||||
|
||||
for _, x := range nsx {
|
||||
objects = append(objects, make([]int32o, x.nobjects))
|
||||
handles = append(handles, make([]unsafe.Pointer, x.nhandles))
|
||||
}
|
||||
|
||||
c := NewCache(nil)
|
||||
|
||||
wg := new(sync.WaitGroup)
|
||||
var done int32
|
||||
|
||||
for ns, x := range nsx {
|
||||
for i := 0; i < x.concurrent; i++ {
|
||||
wg.Add(1)
|
||||
go func(ns, i, repeat int, objects []int32o, handles []unsafe.Pointer) {
|
||||
defer wg.Done()
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
for j := len(objects) * repeat; j >= 0; j-- {
|
||||
key := uint64(r.Intn(len(objects)))
|
||||
h := c.Get(uint64(ns), key, func() (int, Value) {
|
||||
o := &objects[key]
|
||||
o.acquire()
|
||||
return 1, o
|
||||
})
|
||||
if v := h.Value().(*int32o); v != &objects[key] {
|
||||
t.Fatalf("#%d invalid value: want=%p got=%p", ns, &objects[key], v)
|
||||
}
|
||||
if objects[key] != 1 {
|
||||
t.Fatalf("#%d invalid object %d: %d", ns, key, objects[key])
|
||||
}
|
||||
if !atomic.CompareAndSwapPointer(&handles[r.Intn(len(handles))], nil, unsafe.Pointer(h)) {
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
}(ns, i, x.repeat, objects[ns], handles[ns])
|
||||
}
|
||||
|
||||
go func(handles []unsafe.Pointer) {
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
for atomic.LoadInt32(&done) == 0 {
|
||||
i := r.Intn(len(handles))
|
||||
h := (*Handle)(atomic.LoadPointer(&handles[i]))
|
||||
if h != nil && atomic.CompareAndSwapPointer(&handles[i], unsafe.Pointer(h), nil) {
|
||||
h.Release()
|
||||
}
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
}(handles[ns])
|
||||
}
|
||||
|
||||
go func() {
|
||||
handles := make([]*Handle, 100000)
|
||||
for atomic.LoadInt32(&done) == 0 {
|
||||
for i := range handles {
|
||||
handles[i] = c.Get(999999999, uint64(i), func() (int, Value) {
|
||||
return 1, 1
|
||||
})
|
||||
}
|
||||
for _, h := range handles {
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
atomic.StoreInt32(&done, 1)
|
||||
|
||||
for _, handles0 := range handles {
|
||||
for i := range handles0 {
|
||||
h := (*Handle)(atomic.LoadPointer(&handles0[i]))
|
||||
if h != nil && atomic.CompareAndSwapPointer(&handles0[i], unsafe.Pointer(h), nil) {
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for ns, objects0 := range objects {
|
||||
for i, o := range objects0 {
|
||||
if o != 0 {
|
||||
t.Fatalf("invalid object #%d.%d: ref=%d", ns, i, o)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCacheMap_NodesAndSize(t *testing.T) {
|
||||
c := NewCache(nil)
|
||||
if c.Nodes() != 0 {
|
||||
t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes())
|
||||
}
|
||||
if c.Size() != 0 {
|
||||
t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size())
|
||||
}
|
||||
set(c, 0, 1, 1, 1, nil)
|
||||
set(c, 0, 2, 2, 2, nil)
|
||||
set(c, 1, 1, 3, 3, nil)
|
||||
set(c, 2, 1, 4, 1, nil)
|
||||
if c.Nodes() != 4 {
|
||||
t.Errorf("invalid nodes counter: want=%d got=%d", 4, c.Nodes())
|
||||
}
|
||||
if c.Size() != 7 {
|
||||
t.Errorf("invalid size counter: want=%d got=%d", 4, c.Size())
|
||||
}
|
||||
}
|
||||
|
||||
func TestLRUCache_Capacity(t *testing.T) {
|
||||
c := NewCache(NewLRU(10))
|
||||
if c.Capacity() != 10 {
|
||||
t.Errorf("invalid capacity: want=%d got=%d", 10, c.Capacity())
|
||||
}
|
||||
set(c, 0, 1, 1, 1, nil).Release()
|
||||
set(c, 0, 2, 2, 2, nil).Release()
|
||||
set(c, 1, 1, 3, 3, nil).Release()
|
||||
set(c, 2, 1, 4, 1, nil).Release()
|
||||
set(c, 2, 2, 5, 1, nil).Release()
|
||||
set(c, 2, 3, 6, 1, nil).Release()
|
||||
set(c, 2, 4, 7, 1, nil).Release()
|
||||
set(c, 2, 5, 8, 1, nil).Release()
|
||||
if c.Nodes() != 7 {
|
||||
t.Errorf("invalid nodes counter: want=%d got=%d", 7, c.Nodes())
|
||||
}
|
||||
if c.Size() != 10 {
|
||||
t.Errorf("invalid size counter: want=%d got=%d", 10, c.Size())
|
||||
}
|
||||
c.SetCapacity(9)
|
||||
if c.Capacity() != 9 {
|
||||
t.Errorf("invalid capacity: want=%d got=%d", 9, c.Capacity())
|
||||
}
|
||||
if c.Nodes() != 6 {
|
||||
t.Errorf("invalid nodes counter: want=%d got=%d", 6, c.Nodes())
|
||||
}
|
||||
if c.Size() != 8 {
|
||||
t.Errorf("invalid size counter: want=%d got=%d", 8, c.Size())
|
||||
}
|
||||
}
|
||||
|
||||
func TestCacheMap_NilValue(t *testing.T) {
|
||||
c := NewCache(NewLRU(10))
|
||||
h := c.Get(0, 0, func() (size int, value Value) {
|
||||
return 1, nil
|
||||
})
|
||||
if h != nil {
|
||||
t.Error("cache handle is non-nil")
|
||||
}
|
||||
if c.Nodes() != 0 {
|
||||
t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes())
|
||||
}
|
||||
if c.Size() != 0 {
|
||||
t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size())
|
||||
}
|
||||
}
|
||||
|
||||
func TestLRUCache_GetLatency(t *testing.T) {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
|
||||
const (
|
||||
concurrentSet = 30
|
||||
concurrentGet = 3
|
||||
duration = 3 * time.Second
|
||||
delay = 3 * time.Millisecond
|
||||
maxkey = 100000
|
||||
)
|
||||
|
||||
var (
|
||||
set, getHit, getAll int32
|
||||
getMaxLatency, getDuration int64
|
||||
)
|
||||
|
||||
c := NewCache(NewLRU(5000))
|
||||
wg := &sync.WaitGroup{}
|
||||
until := time.Now().Add(duration)
|
||||
for i := 0; i < concurrentSet; i++ {
|
||||
wg.Add(1)
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
for time.Now().Before(until) {
|
||||
c.Get(0, uint64(r.Intn(maxkey)), func() (int, Value) {
|
||||
time.Sleep(delay)
|
||||
atomic.AddInt32(&set, 1)
|
||||
return 1, 1
|
||||
}).Release()
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
for i := 0; i < concurrentGet; i++ {
|
||||
wg.Add(1)
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
for {
|
||||
mark := time.Now()
|
||||
if mark.Before(until) {
|
||||
h := c.Get(0, uint64(r.Intn(maxkey)), nil)
|
||||
latency := int64(time.Now().Sub(mark))
|
||||
m := atomic.LoadInt64(&getMaxLatency)
|
||||
if latency > m {
|
||||
atomic.CompareAndSwapInt64(&getMaxLatency, m, latency)
|
||||
}
|
||||
atomic.AddInt64(&getDuration, latency)
|
||||
if h != nil {
|
||||
atomic.AddInt32(&getHit, 1)
|
||||
h.Release()
|
||||
}
|
||||
atomic.AddInt32(&getAll, 1)
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
getAvglatency := time.Duration(getDuration) / time.Duration(getAll)
|
||||
t.Logf("set=%d getHit=%d getAll=%d getMaxLatency=%v getAvgLatency=%v",
|
||||
set, getHit, getAll, time.Duration(getMaxLatency), getAvglatency)
|
||||
|
||||
if getAvglatency > delay/3 {
|
||||
t.Errorf("get avg latency > %v: got=%v", delay/3, getAvglatency)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLRUCache_HitMiss(t *testing.T) {
|
||||
cases := []struct {
|
||||
key uint64
|
||||
value string
|
||||
}{
|
||||
{1, "vvvvvvvvv"},
|
||||
{100, "v1"},
|
||||
{0, "v2"},
|
||||
{12346, "v3"},
|
||||
{777, "v4"},
|
||||
{999, "v5"},
|
||||
{7654, "v6"},
|
||||
{2, "v7"},
|
||||
{3, "v8"},
|
||||
{9, "v9"},
|
||||
}
|
||||
|
||||
setfin := 0
|
||||
c := NewCache(NewLRU(1000))
|
||||
for i, x := range cases {
|
||||
set(c, 0, x.key, x.value, len(x.value), func() {
|
||||
setfin++
|
||||
}).Release()
|
||||
for j, y := range cases {
|
||||
h := c.Get(0, y.key, nil)
|
||||
if j <= i {
|
||||
// should hit
|
||||
if h == nil {
|
||||
t.Errorf("case '%d' iteration '%d' is miss", i, j)
|
||||
} else {
|
||||
if x := h.Value().(releaserFunc).value.(string); x != y.value {
|
||||
t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// should miss
|
||||
if h != nil {
|
||||
t.Errorf("case '%d' iteration '%d' is hit , value '%s'", i, j, h.Value().(releaserFunc).value.(string))
|
||||
}
|
||||
}
|
||||
if h != nil {
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for i, x := range cases {
|
||||
finalizerOk := false
|
||||
c.Delete(0, x.key, func() {
|
||||
finalizerOk = true
|
||||
})
|
||||
|
||||
if !finalizerOk {
|
||||
t.Errorf("case %d delete finalizer not executed", i)
|
||||
}
|
||||
|
||||
for j, y := range cases {
|
||||
h := c.Get(0, y.key, nil)
|
||||
if j > i {
|
||||
// should hit
|
||||
if h == nil {
|
||||
t.Errorf("case '%d' iteration '%d' is miss", i, j)
|
||||
} else {
|
||||
if x := h.Value().(releaserFunc).value.(string); x != y.value {
|
||||
t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// should miss
|
||||
if h != nil {
|
||||
t.Errorf("case '%d' iteration '%d' is hit, value '%s'", i, j, h.Value().(releaserFunc).value.(string))
|
||||
}
|
||||
}
|
||||
if h != nil {
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if setfin != len(cases) {
|
||||
t.Errorf("some set finalizer may not be executed, want=%d got=%d", len(cases), setfin)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLRUCache_Eviction(t *testing.T) {
|
||||
c := NewCache(NewLRU(12))
|
||||
o1 := set(c, 0, 1, 1, 1, nil)
|
||||
set(c, 0, 2, 2, 1, nil).Release()
|
||||
set(c, 0, 3, 3, 1, nil).Release()
|
||||
set(c, 0, 4, 4, 1, nil).Release()
|
||||
set(c, 0, 5, 5, 1, nil).Release()
|
||||
if h := c.Get(0, 2, nil); h != nil { // 1,3,4,5,2
|
||||
h.Release()
|
||||
}
|
||||
set(c, 0, 9, 9, 10, nil).Release() // 5,2,9
|
||||
|
||||
for _, key := range []uint64{9, 2, 5, 1} {
|
||||
h := c.Get(0, key, nil)
|
||||
if h == nil {
|
||||
t.Errorf("miss for key '%d'", key)
|
||||
} else {
|
||||
if x := h.Value().(int); x != int(key) {
|
||||
t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
|
||||
}
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
o1.Release()
|
||||
for _, key := range []uint64{1, 2, 5} {
|
||||
h := c.Get(0, key, nil)
|
||||
if h == nil {
|
||||
t.Errorf("miss for key '%d'", key)
|
||||
} else {
|
||||
if x := h.Value().(int); x != int(key) {
|
||||
t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
|
||||
}
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
for _, key := range []uint64{3, 4, 9} {
|
||||
h := c.Get(0, key, nil)
|
||||
if h != nil {
|
||||
t.Errorf("hit for key '%d'", key)
|
||||
if x := h.Value().(int); x != int(key) {
|
||||
t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
|
||||
}
|
||||
h.Release()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLRUCache_Evict(t *testing.T) {
|
||||
c := NewCache(NewLRU(6))
|
||||
set(c, 0, 1, 1, 1, nil).Release()
|
||||
set(c, 0, 2, 2, 1, nil).Release()
|
||||
set(c, 1, 1, 4, 1, nil).Release()
|
||||
set(c, 1, 2, 5, 1, nil).Release()
|
||||
set(c, 2, 1, 6, 1, nil).Release()
|
||||
set(c, 2, 2, 7, 1, nil).Release()
|
||||
|
||||
for ns := 0; ns < 3; ns++ {
|
||||
for key := 1; key < 3; key++ {
|
||||
if h := c.Get(uint64(ns), uint64(key), nil); h != nil {
|
||||
h.Release()
|
||||
} else {
|
||||
t.Errorf("Cache.Get on #%d.%d return nil", ns, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ok := c.Evict(0, 1); !ok {
|
||||
t.Error("first Cache.Evict on #0.1 return false")
|
||||
}
|
||||
if ok := c.Evict(0, 1); ok {
|
||||
t.Error("second Cache.Evict on #0.1 return true")
|
||||
}
|
||||
if h := c.Get(0, 1, nil); h != nil {
|
||||
t.Errorf("Cache.Get on #0.1 return non-nil: %v", h.Value())
|
||||
}
|
||||
|
||||
c.EvictNS(1)
|
||||
if h := c.Get(1, 1, nil); h != nil {
|
||||
t.Errorf("Cache.Get on #1.1 return non-nil: %v", h.Value())
|
||||
}
|
||||
if h := c.Get(1, 2, nil); h != nil {
|
||||
t.Errorf("Cache.Get on #1.2 return non-nil: %v", h.Value())
|
||||
}
|
||||
|
||||
c.EvictAll()
|
||||
for ns := 0; ns < 3; ns++ {
|
||||
for key := 1; key < 3; key++ {
|
||||
if h := c.Get(uint64(ns), uint64(key), nil); h != nil {
|
||||
t.Errorf("Cache.Get on #%d.%d return non-nil: %v", ns, key, h.Value())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLRUCache_Delete(t *testing.T) {
|
||||
delFuncCalled := 0
|
||||
delFunc := func() {
|
||||
delFuncCalled++
|
||||
}
|
||||
|
||||
c := NewCache(NewLRU(2))
|
||||
set(c, 0, 1, 1, 1, nil).Release()
|
||||
set(c, 0, 2, 2, 1, nil).Release()
|
||||
|
||||
if ok := c.Delete(0, 1, delFunc); !ok {
|
||||
t.Error("Cache.Delete on #1 return false")
|
||||
}
|
||||
if h := c.Get(0, 1, nil); h != nil {
|
||||
t.Errorf("Cache.Get on #1 return non-nil: %v", h.Value())
|
||||
}
|
||||
if ok := c.Delete(0, 1, delFunc); ok {
|
||||
t.Error("Cache.Delete on #1 return true")
|
||||
}
|
||||
|
||||
h2 := c.Get(0, 2, nil)
|
||||
if h2 == nil {
|
||||
t.Error("Cache.Get on #2 return nil")
|
||||
}
|
||||
if ok := c.Delete(0, 2, delFunc); !ok {
|
||||
t.Error("(1) Cache.Delete on #2 return false")
|
||||
}
|
||||
if ok := c.Delete(0, 2, delFunc); !ok {
|
||||
t.Error("(2) Cache.Delete on #2 return false")
|
||||
}
|
||||
|
||||
set(c, 0, 3, 3, 1, nil).Release()
|
||||
set(c, 0, 4, 4, 1, nil).Release()
|
||||
c.Get(0, 2, nil).Release()
|
||||
|
||||
for key := 2; key <= 4; key++ {
|
||||
if h := c.Get(0, uint64(key), nil); h != nil {
|
||||
h.Release()
|
||||
} else {
|
||||
t.Errorf("Cache.Get on #%d return nil", key)
|
||||
}
|
||||
}
|
||||
|
||||
h2.Release()
|
||||
if h := c.Get(0, 2, nil); h != nil {
|
||||
t.Errorf("Cache.Get on #2 return non-nil: %v", h.Value())
|
||||
}
|
||||
|
||||
if delFuncCalled != 4 {
|
||||
t.Errorf("delFunc isn't called 4 times: got=%d", delFuncCalled)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLRUCache_Close(t *testing.T) {
|
||||
relFuncCalled := 0
|
||||
relFunc := func() {
|
||||
relFuncCalled++
|
||||
}
|
||||
delFuncCalled := 0
|
||||
delFunc := func() {
|
||||
delFuncCalled++
|
||||
}
|
||||
|
||||
c := NewCache(NewLRU(2))
|
||||
set(c, 0, 1, 1, 1, relFunc).Release()
|
||||
set(c, 0, 2, 2, 1, relFunc).Release()
|
||||
|
||||
h3 := set(c, 0, 3, 3, 1, relFunc)
|
||||
if h3 == nil {
|
||||
t.Error("Cache.Get on #3 return nil")
|
||||
}
|
||||
if ok := c.Delete(0, 3, delFunc); !ok {
|
||||
t.Error("Cache.Delete on #3 return false")
|
||||
}
|
||||
|
||||
c.Close()
|
||||
|
||||
if relFuncCalled != 3 {
|
||||
t.Errorf("relFunc isn't called 3 times: got=%d", relFuncCalled)
|
||||
}
|
||||
if delFuncCalled != 1 {
|
||||
t.Errorf("delFunc isn't called 1 times: got=%d", delFuncCalled)
|
||||
}
|
||||
}
|
||||
@@ -6,9 +6,7 @@
|
||||
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"github.com/syndtr/goleveldb/leveldb/comparer"
|
||||
)
|
||||
import "github.com/syndtr/goleveldb/leveldb/comparer"
|
||||
|
||||
type iComparer struct {
|
||||
ucmp comparer.Comparer
|
||||
@@ -35,33 +33,43 @@ func (icmp *iComparer) Name() string {
|
||||
}
|
||||
|
||||
func (icmp *iComparer) Compare(a, b []byte) int {
|
||||
x := icmp.uCompare(internalKey(a).ukey(), internalKey(b).ukey())
|
||||
x := icmp.ucmp.Compare(iKey(a).ukey(), iKey(b).ukey())
|
||||
if x == 0 {
|
||||
if m, n := internalKey(a).num(), internalKey(b).num(); m > n {
|
||||
return -1
|
||||
if m, n := iKey(a).num(), iKey(b).num(); m > n {
|
||||
x = -1
|
||||
} else if m < n {
|
||||
return 1
|
||||
x = 1
|
||||
}
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
func (icmp *iComparer) Separator(dst, a, b []byte) []byte {
|
||||
ua, ub := internalKey(a).ukey(), internalKey(b).ukey()
|
||||
dst = icmp.uSeparator(dst, ua, ub)
|
||||
if dst != nil && len(dst) < len(ua) && icmp.uCompare(ua, dst) < 0 {
|
||||
// Append earliest possible number.
|
||||
return append(dst, keyMaxNumBytes...)
|
||||
ua, ub := iKey(a).ukey(), iKey(b).ukey()
|
||||
dst = icmp.ucmp.Separator(dst, ua, ub)
|
||||
if dst == nil {
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
if len(dst) < len(ua) && icmp.uCompare(ua, dst) < 0 {
|
||||
dst = append(dst, kMaxNumBytes...)
|
||||
} else {
|
||||
// Did not close possibilities that n maybe longer than len(ub).
|
||||
dst = append(dst, a[len(a)-8:]...)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
func (icmp *iComparer) Successor(dst, b []byte) []byte {
|
||||
ub := internalKey(b).ukey()
|
||||
dst = icmp.uSuccessor(dst, ub)
|
||||
if dst != nil && len(dst) < len(ub) && icmp.uCompare(ub, dst) < 0 {
|
||||
// Append earliest possible number.
|
||||
return append(dst, keyMaxNumBytes...)
|
||||
ub := iKey(b).ukey()
|
||||
dst = icmp.ucmp.Successor(dst, ub)
|
||||
if dst == nil {
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
if len(dst) < len(ub) && icmp.uCompare(ub, dst) < 0 {
|
||||
dst = append(dst, kMaxNumBytes...)
|
||||
} else {
|
||||
// Did not close possibilities that n maybe longer than len(ub).
|
||||
dst = append(dst, b[len(b)-8:]...)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
500
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go
generated
vendored
Normal file
500
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go
generated
vendored
Normal file
@@ -0,0 +1,500 @@
|
||||
// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/syndtr/goleveldb/leveldb/filter"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||
"io"
|
||||
"math/rand"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const ctValSize = 1000
|
||||
|
||||
type dbCorruptHarness struct {
|
||||
dbHarness
|
||||
}
|
||||
|
||||
func newDbCorruptHarnessWopt(t *testing.T, o *opt.Options) *dbCorruptHarness {
|
||||
h := new(dbCorruptHarness)
|
||||
h.init(t, o)
|
||||
return h
|
||||
}
|
||||
|
||||
func newDbCorruptHarness(t *testing.T) *dbCorruptHarness {
|
||||
return newDbCorruptHarnessWopt(t, &opt.Options{
|
||||
BlockCacheCapacity: 100,
|
||||
Strict: opt.StrictJournalChecksum,
|
||||
})
|
||||
}
|
||||
|
||||
func (h *dbCorruptHarness) recover() {
|
||||
p := &h.dbHarness
|
||||
t := p.t
|
||||
|
||||
var err error
|
||||
p.db, err = Recover(h.stor, h.o)
|
||||
if err != nil {
|
||||
t.Fatal("Repair: got error: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *dbCorruptHarness) build(n int) {
|
||||
p := &h.dbHarness
|
||||
t := p.t
|
||||
db := p.db
|
||||
|
||||
batch := new(Batch)
|
||||
for i := 0; i < n; i++ {
|
||||
batch.Reset()
|
||||
batch.Put(tkey(i), tval(i, ctValSize))
|
||||
err := db.Write(batch, p.wo)
|
||||
if err != nil {
|
||||
t.Fatal("write error: ", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *dbCorruptHarness) buildShuffled(n int, rnd *rand.Rand) {
|
||||
p := &h.dbHarness
|
||||
t := p.t
|
||||
db := p.db
|
||||
|
||||
batch := new(Batch)
|
||||
for i := range rnd.Perm(n) {
|
||||
batch.Reset()
|
||||
batch.Put(tkey(i), tval(i, ctValSize))
|
||||
err := db.Write(batch, p.wo)
|
||||
if err != nil {
|
||||
t.Fatal("write error: ", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *dbCorruptHarness) deleteRand(n, max int, rnd *rand.Rand) {
|
||||
p := &h.dbHarness
|
||||
t := p.t
|
||||
db := p.db
|
||||
|
||||
batch := new(Batch)
|
||||
for i := 0; i < n; i++ {
|
||||
batch.Reset()
|
||||
batch.Delete(tkey(rnd.Intn(max)))
|
||||
err := db.Write(batch, p.wo)
|
||||
if err != nil {
|
||||
t.Fatal("write error: ", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *dbCorruptHarness) corrupt(ft storage.FileType, fi, offset, n int) {
|
||||
p := &h.dbHarness
|
||||
t := p.t
|
||||
|
||||
ff, _ := p.stor.GetFiles(ft)
|
||||
sff := files(ff)
|
||||
sff.sort()
|
||||
if fi < 0 {
|
||||
fi = len(sff) - 1
|
||||
}
|
||||
if fi >= len(sff) {
|
||||
t.Fatalf("no such file with type %q with index %d", ft, fi)
|
||||
}
|
||||
|
||||
file := sff[fi]
|
||||
|
||||
r, err := file.Open()
|
||||
if err != nil {
|
||||
t.Fatal("cannot open file: ", err)
|
||||
}
|
||||
x, err := r.Seek(0, 2)
|
||||
if err != nil {
|
||||
t.Fatal("cannot query file size: ", err)
|
||||
}
|
||||
m := int(x)
|
||||
if _, err := r.Seek(0, 0); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if offset < 0 {
|
||||
if -offset > m {
|
||||
offset = 0
|
||||
} else {
|
||||
offset = m + offset
|
||||
}
|
||||
}
|
||||
if offset > m {
|
||||
offset = m
|
||||
}
|
||||
if offset+n > m {
|
||||
n = m - offset
|
||||
}
|
||||
|
||||
buf := make([]byte, m)
|
||||
_, err = io.ReadFull(r, buf)
|
||||
if err != nil {
|
||||
t.Fatal("cannot read file: ", err)
|
||||
}
|
||||
r.Close()
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
buf[offset+i] ^= 0x80
|
||||
}
|
||||
|
||||
err = file.Remove()
|
||||
if err != nil {
|
||||
t.Fatal("cannot remove old file: ", err)
|
||||
}
|
||||
w, err := file.Create()
|
||||
if err != nil {
|
||||
t.Fatal("cannot create new file: ", err)
|
||||
}
|
||||
_, err = w.Write(buf)
|
||||
if err != nil {
|
||||
t.Fatal("cannot write new file: ", err)
|
||||
}
|
||||
w.Close()
|
||||
}
|
||||
|
||||
func (h *dbCorruptHarness) removeAll(ft storage.FileType) {
|
||||
ff, err := h.stor.GetFiles(ft)
|
||||
if err != nil {
|
||||
h.t.Fatal("get files: ", err)
|
||||
}
|
||||
for _, f := range ff {
|
||||
if err := f.Remove(); err != nil {
|
||||
h.t.Error("remove file: ", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *dbCorruptHarness) removeOne(ft storage.FileType) {
|
||||
ff, err := h.stor.GetFiles(ft)
|
||||
if err != nil {
|
||||
h.t.Fatal("get files: ", err)
|
||||
}
|
||||
f := ff[rand.Intn(len(ff))]
|
||||
h.t.Logf("removing file @%d", f.Num())
|
||||
if err := f.Remove(); err != nil {
|
||||
h.t.Error("remove file: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *dbCorruptHarness) check(min, max int) {
|
||||
p := &h.dbHarness
|
||||
t := p.t
|
||||
db := p.db
|
||||
|
||||
var n, badk, badv, missed, good int
|
||||
iter := db.NewIterator(nil, p.ro)
|
||||
for iter.Next() {
|
||||
k := 0
|
||||
fmt.Sscanf(string(iter.Key()), "%d", &k)
|
||||
if k < n {
|
||||
badk++
|
||||
continue
|
||||
}
|
||||
missed += k - n
|
||||
n = k + 1
|
||||
if !bytes.Equal(iter.Value(), tval(k, ctValSize)) {
|
||||
badv++
|
||||
} else {
|
||||
good++
|
||||
}
|
||||
}
|
||||
err := iter.Error()
|
||||
iter.Release()
|
||||
t.Logf("want=%d..%d got=%d badkeys=%d badvalues=%d missed=%d, err=%v",
|
||||
min, max, good, badk, badv, missed, err)
|
||||
if good < min || good > max {
|
||||
t.Errorf("good entries number not in range")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCorruptDB_Journal(t *testing.T) {
|
||||
h := newDbCorruptHarness(t)
|
||||
|
||||
h.build(100)
|
||||
h.check(100, 100)
|
||||
h.closeDB()
|
||||
h.corrupt(storage.TypeJournal, -1, 19, 1)
|
||||
h.corrupt(storage.TypeJournal, -1, 32*1024+1000, 1)
|
||||
|
||||
h.openDB()
|
||||
h.check(36, 36)
|
||||
|
||||
h.close()
|
||||
}
|
||||
|
||||
func TestCorruptDB_Table(t *testing.T) {
|
||||
h := newDbCorruptHarness(t)
|
||||
|
||||
h.build(100)
|
||||
h.compactMem()
|
||||
h.compactRangeAt(0, "", "")
|
||||
h.compactRangeAt(1, "", "")
|
||||
h.closeDB()
|
||||
h.corrupt(storage.TypeTable, -1, 100, 1)
|
||||
|
||||
h.openDB()
|
||||
h.check(99, 99)
|
||||
|
||||
h.close()
|
||||
}
|
||||
|
||||
func TestCorruptDB_TableIndex(t *testing.T) {
|
||||
h := newDbCorruptHarness(t)
|
||||
|
||||
h.build(10000)
|
||||
h.compactMem()
|
||||
h.closeDB()
|
||||
h.corrupt(storage.TypeTable, -1, -2000, 500)
|
||||
|
||||
h.openDB()
|
||||
h.check(5000, 9999)
|
||||
|
||||
h.close()
|
||||
}
|
||||
|
||||
func TestCorruptDB_MissingManifest(t *testing.T) {
|
||||
rnd := rand.New(rand.NewSource(0x0badda7a))
|
||||
h := newDbCorruptHarnessWopt(t, &opt.Options{
|
||||
BlockCacheCapacity: 100,
|
||||
Strict: opt.StrictJournalChecksum,
|
||||
WriteBuffer: 1000 * 60,
|
||||
})
|
||||
|
||||
h.build(1000)
|
||||
h.compactMem()
|
||||
h.buildShuffled(1000, rnd)
|
||||
h.compactMem()
|
||||
h.deleteRand(500, 1000, rnd)
|
||||
h.compactMem()
|
||||
h.buildShuffled(1000, rnd)
|
||||
h.compactMem()
|
||||
h.deleteRand(500, 1000, rnd)
|
||||
h.compactMem()
|
||||
h.buildShuffled(1000, rnd)
|
||||
h.compactMem()
|
||||
h.closeDB()
|
||||
|
||||
h.stor.SetIgnoreOpenErr(storage.TypeManifest)
|
||||
h.removeAll(storage.TypeManifest)
|
||||
h.openAssert(false)
|
||||
h.stor.SetIgnoreOpenErr(0)
|
||||
|
||||
h.recover()
|
||||
h.check(1000, 1000)
|
||||
h.build(1000)
|
||||
h.compactMem()
|
||||
h.compactRange("", "")
|
||||
h.closeDB()
|
||||
|
||||
h.recover()
|
||||
h.check(1000, 1000)
|
||||
|
||||
h.close()
|
||||
}
|
||||
|
||||
func TestCorruptDB_SequenceNumberRecovery(t *testing.T) {
|
||||
h := newDbCorruptHarness(t)
|
||||
|
||||
h.put("foo", "v1")
|
||||
h.put("foo", "v2")
|
||||
h.put("foo", "v3")
|
||||
h.put("foo", "v4")
|
||||
h.put("foo", "v5")
|
||||
h.closeDB()
|
||||
|
||||
h.recover()
|
||||
h.getVal("foo", "v5")
|
||||
h.put("foo", "v6")
|
||||
h.getVal("foo", "v6")
|
||||
|
||||
h.reopenDB()
|
||||
h.getVal("foo", "v6")
|
||||
|
||||
h.close()
|
||||
}
|
||||
|
||||
func TestCorruptDB_SequenceNumberRecoveryTable(t *testing.T) {
|
||||
h := newDbCorruptHarness(t)
|
||||
|
||||
h.put("foo", "v1")
|
||||
h.put("foo", "v2")
|
||||
h.put("foo", "v3")
|
||||
h.compactMem()
|
||||
h.put("foo", "v4")
|
||||
h.put("foo", "v5")
|
||||
h.compactMem()
|
||||
h.closeDB()
|
||||
|
||||
h.recover()
|
||||
h.getVal("foo", "v5")
|
||||
h.put("foo", "v6")
|
||||
h.getVal("foo", "v6")
|
||||
|
||||
h.reopenDB()
|
||||
h.getVal("foo", "v6")
|
||||
|
||||
h.close()
|
||||
}
|
||||
|
||||
func TestCorruptDB_CorruptedManifest(t *testing.T) {
|
||||
h := newDbCorruptHarness(t)
|
||||
|
||||
h.put("foo", "hello")
|
||||
h.compactMem()
|
||||
h.compactRange("", "")
|
||||
h.closeDB()
|
||||
h.corrupt(storage.TypeManifest, -1, 0, 1000)
|
||||
h.openAssert(false)
|
||||
|
||||
h.recover()
|
||||
h.getVal("foo", "hello")
|
||||
|
||||
h.close()
|
||||
}
|
||||
|
||||
func TestCorruptDB_CompactionInputError(t *testing.T) {
|
||||
h := newDbCorruptHarness(t)
|
||||
|
||||
h.build(10)
|
||||
h.compactMem()
|
||||
h.closeDB()
|
||||
h.corrupt(storage.TypeTable, -1, 100, 1)
|
||||
|
||||
h.openDB()
|
||||
h.check(9, 9)
|
||||
|
||||
h.build(10000)
|
||||
h.check(10000, 10000)
|
||||
|
||||
h.close()
|
||||
}
|
||||
|
||||
func TestCorruptDB_UnrelatedKeys(t *testing.T) {
|
||||
h := newDbCorruptHarness(t)
|
||||
|
||||
h.build(10)
|
||||
h.compactMem()
|
||||
h.closeDB()
|
||||
h.corrupt(storage.TypeTable, -1, 100, 1)
|
||||
|
||||
h.openDB()
|
||||
h.put(string(tkey(1000)), string(tval(1000, ctValSize)))
|
||||
h.getVal(string(tkey(1000)), string(tval(1000, ctValSize)))
|
||||
h.compactMem()
|
||||
h.getVal(string(tkey(1000)), string(tval(1000, ctValSize)))
|
||||
|
||||
h.close()
|
||||
}
|
||||
|
||||
func TestCorruptDB_Level0NewerFileHasOlderSeqnum(t *testing.T) {
|
||||
h := newDbCorruptHarness(t)
|
||||
|
||||
h.put("a", "v1")
|
||||
h.put("b", "v1")
|
||||
h.compactMem()
|
||||
h.put("a", "v2")
|
||||
h.put("b", "v2")
|
||||
h.compactMem()
|
||||
h.put("a", "v3")
|
||||
h.put("b", "v3")
|
||||
h.compactMem()
|
||||
h.put("c", "v0")
|
||||
h.put("d", "v0")
|
||||
h.compactMem()
|
||||
h.compactRangeAt(1, "", "")
|
||||
h.closeDB()
|
||||
|
||||
h.recover()
|
||||
h.getVal("a", "v3")
|
||||
h.getVal("b", "v3")
|
||||
h.getVal("c", "v0")
|
||||
h.getVal("d", "v0")
|
||||
|
||||
h.close()
|
||||
}
|
||||
|
||||
func TestCorruptDB_RecoverInvalidSeq_Issue53(t *testing.T) {
|
||||
h := newDbCorruptHarness(t)
|
||||
|
||||
h.put("a", "v1")
|
||||
h.put("b", "v1")
|
||||
h.compactMem()
|
||||
h.put("a", "v2")
|
||||
h.put("b", "v2")
|
||||
h.compactMem()
|
||||
h.put("a", "v3")
|
||||
h.put("b", "v3")
|
||||
h.compactMem()
|
||||
h.put("c", "v0")
|
||||
h.put("d", "v0")
|
||||
h.compactMem()
|
||||
h.compactRangeAt(0, "", "")
|
||||
h.closeDB()
|
||||
|
||||
h.recover()
|
||||
h.getVal("a", "v3")
|
||||
h.getVal("b", "v3")
|
||||
h.getVal("c", "v0")
|
||||
h.getVal("d", "v0")
|
||||
|
||||
h.close()
|
||||
}
|
||||
|
||||
func TestCorruptDB_MissingTableFiles(t *testing.T) {
|
||||
h := newDbCorruptHarness(t)
|
||||
|
||||
h.put("a", "v1")
|
||||
h.put("b", "v1")
|
||||
h.compactMem()
|
||||
h.put("c", "v2")
|
||||
h.put("d", "v2")
|
||||
h.compactMem()
|
||||
h.put("e", "v3")
|
||||
h.put("f", "v3")
|
||||
h.closeDB()
|
||||
|
||||
h.removeOne(storage.TypeTable)
|
||||
h.openAssert(false)
|
||||
|
||||
h.close()
|
||||
}
|
||||
|
||||
func TestCorruptDB_RecoverTable(t *testing.T) {
|
||||
h := newDbCorruptHarnessWopt(t, &opt.Options{
|
||||
WriteBuffer: 112 * opt.KiB,
|
||||
CompactionTableSize: 90 * opt.KiB,
|
||||
Filter: filter.NewBloomFilter(10),
|
||||
})
|
||||
|
||||
h.build(1000)
|
||||
h.compactMem()
|
||||
h.compactRangeAt(0, "", "")
|
||||
h.compactRangeAt(1, "", "")
|
||||
seq := h.db.seq
|
||||
h.closeDB()
|
||||
h.corrupt(storage.TypeTable, 0, 1000, 1)
|
||||
h.corrupt(storage.TypeTable, 3, 10000, 1)
|
||||
// Corrupted filter shouldn't affect recovery.
|
||||
h.corrupt(storage.TypeTable, 3, 113888, 10)
|
||||
h.corrupt(storage.TypeTable, -1, 20000, 1)
|
||||
|
||||
h.recover()
|
||||
if h.db.seq != seq {
|
||||
t.Errorf("invalid seq, want=%d got=%d", seq, h.db.seq)
|
||||
}
|
||||
h.check(985, 985)
|
||||
|
||||
h.close()
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -11,79 +11,109 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/memdb"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||
)
|
||||
|
||||
var (
|
||||
errCompactionTransactExiting = errors.New("leveldb: compaction transact exiting")
|
||||
)
|
||||
|
||||
type cStat struct {
|
||||
type cStats struct {
|
||||
sync.Mutex
|
||||
duration time.Duration
|
||||
read int64
|
||||
write int64
|
||||
read uint64
|
||||
write uint64
|
||||
}
|
||||
|
||||
func (p *cStat) add(n *cStatStaging) {
|
||||
func (p *cStats) add(n *cStatsStaging) {
|
||||
p.Lock()
|
||||
p.duration += n.duration
|
||||
p.read += n.read
|
||||
p.write += n.write
|
||||
p.Unlock()
|
||||
}
|
||||
|
||||
func (p *cStat) get() (duration time.Duration, read, write int64) {
|
||||
func (p *cStats) get() (duration time.Duration, read, write uint64) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
return p.duration, p.read, p.write
|
||||
}
|
||||
|
||||
type cStatStaging struct {
|
||||
type cStatsStaging struct {
|
||||
start time.Time
|
||||
duration time.Duration
|
||||
on bool
|
||||
read int64
|
||||
write int64
|
||||
read uint64
|
||||
write uint64
|
||||
}
|
||||
|
||||
func (p *cStatStaging) startTimer() {
|
||||
func (p *cStatsStaging) startTimer() {
|
||||
if !p.on {
|
||||
p.start = time.Now()
|
||||
p.on = true
|
||||
}
|
||||
}
|
||||
|
||||
func (p *cStatStaging) stopTimer() {
|
||||
func (p *cStatsStaging) stopTimer() {
|
||||
if p.on {
|
||||
p.duration += time.Since(p.start)
|
||||
p.on = false
|
||||
}
|
||||
}
|
||||
|
||||
type cStats struct {
|
||||
lk sync.Mutex
|
||||
stats []cStat
|
||||
type cMem struct {
|
||||
s *session
|
||||
level int
|
||||
rec *sessionRecord
|
||||
}
|
||||
|
||||
func (p *cStats) addStat(level int, n *cStatStaging) {
|
||||
p.lk.Lock()
|
||||
if level >= len(p.stats) {
|
||||
newStats := make([]cStat, level+1)
|
||||
copy(newStats, p.stats)
|
||||
p.stats = newStats
|
||||
}
|
||||
p.stats[level].add(n)
|
||||
p.lk.Unlock()
|
||||
func newCMem(s *session) *cMem {
|
||||
return &cMem{s: s, rec: &sessionRecord{numLevel: s.o.GetNumLevel()}}
|
||||
}
|
||||
|
||||
func (p *cStats) getStat(level int) (duration time.Duration, read, write int64) {
|
||||
p.lk.Lock()
|
||||
defer p.lk.Unlock()
|
||||
if level < len(p.stats) {
|
||||
return p.stats[level].get()
|
||||
func (c *cMem) flush(mem *memdb.DB, level int) error {
|
||||
s := c.s
|
||||
|
||||
// Write memdb to table.
|
||||
iter := mem.NewIterator(nil)
|
||||
defer iter.Release()
|
||||
t, n, err := s.tops.createFrom(iter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return
|
||||
|
||||
// Pick level.
|
||||
if level < 0 {
|
||||
v := s.version()
|
||||
level = v.pickLevel(t.imin.ukey(), t.imax.ukey())
|
||||
v.release()
|
||||
}
|
||||
c.rec.addTableFile(level, t)
|
||||
|
||||
s.logf("mem@flush created L%d@%d N·%d S·%s %q:%q", level, t.file.Num(), n, shortenb(int(t.size)), t.imin, t.imax)
|
||||
|
||||
c.level = level
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *cMem) reset() {
|
||||
c.rec = &sessionRecord{numLevel: c.s.o.GetNumLevel()}
|
||||
}
|
||||
|
||||
func (c *cMem) commit(journal, seq uint64) error {
|
||||
c.rec.setJournalNum(journal)
|
||||
c.rec.setSeqNum(seq)
|
||||
|
||||
// Commit changes.
|
||||
return c.s.commit(c.rec)
|
||||
}
|
||||
|
||||
func (db *DB) compactionError() {
|
||||
var err error
|
||||
var (
|
||||
err error
|
||||
wlocked bool
|
||||
)
|
||||
noerr:
|
||||
// No error.
|
||||
for {
|
||||
@@ -91,12 +121,12 @@ noerr:
|
||||
case err = <-db.compErrSetC:
|
||||
switch {
|
||||
case err == nil:
|
||||
case err == ErrReadOnly, errors.IsCorrupted(err):
|
||||
case errors.IsCorrupted(err):
|
||||
goto hasperr
|
||||
default:
|
||||
goto haserr
|
||||
}
|
||||
case <-db.closeC:
|
||||
case _, _ = <-db.closeC:
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -109,11 +139,11 @@ haserr:
|
||||
switch {
|
||||
case err == nil:
|
||||
goto noerr
|
||||
case err == ErrReadOnly, errors.IsCorrupted(err):
|
||||
case errors.IsCorrupted(err):
|
||||
goto hasperr
|
||||
default:
|
||||
}
|
||||
case <-db.closeC:
|
||||
case _, _ = <-db.closeC:
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -125,9 +155,9 @@ hasperr:
|
||||
case db.compPerErrC <- err:
|
||||
case db.writeLockC <- struct{}{}:
|
||||
// Hold write lock, so that write won't pass-through.
|
||||
db.compWriteLocking = true
|
||||
case <-db.closeC:
|
||||
if db.compWriteLocking {
|
||||
wlocked = true
|
||||
case _, _ = <-db.closeC:
|
||||
if wlocked {
|
||||
// We should release the lock or Close will hang.
|
||||
<-db.writeLockC
|
||||
}
|
||||
@@ -172,7 +202,7 @@ func (db *DB) compactionTransact(name string, t compactionTransactInterface) {
|
||||
disableBackoff = db.s.o.GetDisableCompactionBackoff()
|
||||
)
|
||||
for n := 0; ; n++ {
|
||||
// Check whether the DB is closed.
|
||||
// Check wether the DB is closed.
|
||||
if db.isClosed() {
|
||||
db.logf("%s exiting", name)
|
||||
db.compactionExitTransact()
|
||||
@@ -195,7 +225,7 @@ func (db *DB) compactionTransact(name string, t compactionTransactInterface) {
|
||||
db.logf("%s exiting (persistent error %q)", name, perr)
|
||||
db.compactionExitTransact()
|
||||
}
|
||||
case <-db.closeC:
|
||||
case _, _ = <-db.closeC:
|
||||
db.logf("%s exiting", name)
|
||||
db.compactionExitTransact()
|
||||
}
|
||||
@@ -224,7 +254,7 @@ func (db *DB) compactionTransact(name string, t compactionTransactInterface) {
|
||||
}
|
||||
select {
|
||||
case <-backoffT.C:
|
||||
case <-db.closeC:
|
||||
case _, _ = <-db.closeC:
|
||||
db.logf("%s exiting", name)
|
||||
db.compactionExitTransact()
|
||||
}
|
||||
@@ -256,27 +286,22 @@ func (db *DB) compactionExitTransact() {
|
||||
panic(errCompactionTransactExiting)
|
||||
}
|
||||
|
||||
func (db *DB) compactionCommit(name string, rec *sessionRecord) {
|
||||
db.compCommitLk.Lock()
|
||||
defer db.compCommitLk.Unlock() // Defer is necessary.
|
||||
db.compactionTransactFunc(name+"@commit", func(cnt *compactionTransactCounter) error {
|
||||
return db.s.commit(rec)
|
||||
}, nil)
|
||||
}
|
||||
|
||||
func (db *DB) memCompaction() {
|
||||
mdb := db.getFrozenMem()
|
||||
if mdb == nil {
|
||||
mem := db.getFrozenMem()
|
||||
if mem == nil {
|
||||
return
|
||||
}
|
||||
defer mdb.decref()
|
||||
defer mem.decref()
|
||||
|
||||
db.logf("memdb@flush N·%d S·%s", mdb.Len(), shortenb(mdb.Size()))
|
||||
c := newCMem(db.s)
|
||||
stats := new(cStatsStaging)
|
||||
|
||||
db.logf("mem@flush N·%d S·%s", mem.mdb.Len(), shortenb(mem.mdb.Size()))
|
||||
|
||||
// Don't compact empty memdb.
|
||||
if mdb.Len() == 0 {
|
||||
db.logf("memdb@flush skipping")
|
||||
// drop frozen memdb
|
||||
if mem.mdb.Len() == 0 {
|
||||
db.logf("mem@flush skipping")
|
||||
// drop frozen mem
|
||||
db.dropFrozenMem()
|
||||
return
|
||||
}
|
||||
@@ -288,48 +313,39 @@ func (db *DB) memCompaction() {
|
||||
case <-db.compPerErrC:
|
||||
close(resumeC)
|
||||
resumeC = nil
|
||||
case <-db.closeC:
|
||||
db.compactionExitTransact()
|
||||
case _, _ = <-db.closeC:
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
rec = &sessionRecord{}
|
||||
stats = &cStatStaging{}
|
||||
flushLevel int
|
||||
)
|
||||
|
||||
// Generate tables.
|
||||
db.compactionTransactFunc("memdb@flush", func(cnt *compactionTransactCounter) (err error) {
|
||||
db.compactionTransactFunc("mem@flush", func(cnt *compactionTransactCounter) (err error) {
|
||||
stats.startTimer()
|
||||
flushLevel, err = db.s.flushMemdb(rec, mdb.DB, db.memdbMaxLevel)
|
||||
stats.stopTimer()
|
||||
return
|
||||
defer stats.stopTimer()
|
||||
return c.flush(mem.mdb, -1)
|
||||
}, func() error {
|
||||
for _, r := range rec.addedTables {
|
||||
db.logf("memdb@flush revert @%d", r.num)
|
||||
if err := db.s.stor.Remove(storage.FileDesc{Type: storage.TypeTable, Num: r.num}); err != nil {
|
||||
for _, r := range c.rec.addedTables {
|
||||
db.logf("mem@flush revert @%d", r.num)
|
||||
f := db.s.getTableFile(r.num)
|
||||
if err := f.Remove(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
rec.setJournalNum(db.journalFd.Num)
|
||||
rec.setSeqNum(db.frozenSeq)
|
||||
db.compactionTransactFunc("mem@commit", func(cnt *compactionTransactCounter) (err error) {
|
||||
stats.startTimer()
|
||||
defer stats.stopTimer()
|
||||
return c.commit(db.journalFile.Num(), db.frozenSeq)
|
||||
}, nil)
|
||||
|
||||
// Commit.
|
||||
stats.startTimer()
|
||||
db.compactionCommit("memdb", rec)
|
||||
stats.stopTimer()
|
||||
db.logf("mem@flush committed F·%d T·%v", len(c.rec.addedTables), stats.duration)
|
||||
|
||||
db.logf("memdb@flush committed F·%d T·%v", len(rec.addedTables), stats.duration)
|
||||
|
||||
for _, r := range rec.addedTables {
|
||||
for _, r := range c.rec.addedTables {
|
||||
stats.write += r.size
|
||||
}
|
||||
db.compStats.addStat(flushLevel, stats)
|
||||
db.compStats[c.level].add(stats)
|
||||
|
||||
// Drop frozen memdb.
|
||||
// Drop frozen mem.
|
||||
db.dropFrozenMem()
|
||||
|
||||
// Resume table compaction.
|
||||
@@ -337,13 +353,13 @@ func (db *DB) memCompaction() {
|
||||
select {
|
||||
case <-resumeC:
|
||||
close(resumeC)
|
||||
case <-db.closeC:
|
||||
db.compactionExitTransact()
|
||||
case _, _ = <-db.closeC:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Trigger table compaction.
|
||||
db.compTrigger(db.tcompCmdC)
|
||||
db.compSendTrigger(db.tcompCmdC)
|
||||
}
|
||||
|
||||
type tableCompactionBuilder struct {
|
||||
@@ -351,7 +367,7 @@ type tableCompactionBuilder struct {
|
||||
s *session
|
||||
c *compaction
|
||||
rec *sessionRecord
|
||||
stat0, stat1 *cStatStaging
|
||||
stat0, stat1 *cStatsStaging
|
||||
|
||||
snapHasLastUkey bool
|
||||
snapLastUkey []byte
|
||||
@@ -378,7 +394,7 @@ func (b *tableCompactionBuilder) appendKV(key, value []byte) error {
|
||||
select {
|
||||
case ch := <-b.db.tcompPauseC:
|
||||
b.db.pauseCompaction(ch)
|
||||
case <-b.db.closeC:
|
||||
case _, _ = <-b.db.closeC:
|
||||
b.db.compactionExitTransact()
|
||||
default:
|
||||
}
|
||||
@@ -405,9 +421,9 @@ func (b *tableCompactionBuilder) flush() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.rec.addTableFile(b.c.sourceLevel+1, t)
|
||||
b.rec.addTableFile(b.c.level+1, t)
|
||||
b.stat1.write += t.size
|
||||
b.s.logf("table@build created L%d@%d N·%d S·%s %q:%q", b.c.sourceLevel+1, t.fd.Num, b.tw.tw.EntriesLen(), shortenb(int(t.size)), t.imin, t.imax)
|
||||
b.s.logf("table@build created L%d@%d N·%d S·%s %q:%q", b.c.level+1, t.file.Num(), b.tw.tw.EntriesLen(), shortenb(int(t.size)), t.imin, t.imax)
|
||||
b.tw = nil
|
||||
return nil
|
||||
}
|
||||
@@ -452,7 +468,7 @@ func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error {
|
||||
}
|
||||
|
||||
ikey := iter.Key()
|
||||
ukey, seq, kt, kerr := parseInternalKey(ikey)
|
||||
ukey, seq, kt, kerr := parseIkey(ikey)
|
||||
|
||||
if kerr == nil {
|
||||
shouldStop := !resumed && b.c.shouldStopBefore(ikey)
|
||||
@@ -478,14 +494,14 @@ func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error {
|
||||
|
||||
hasLastUkey = true
|
||||
lastUkey = append(lastUkey[:0], ukey...)
|
||||
lastSeq = keyMaxSeq
|
||||
lastSeq = kMaxSeq
|
||||
}
|
||||
|
||||
switch {
|
||||
case lastSeq <= b.minSeq:
|
||||
// Dropped because newer entry for same user key exist
|
||||
fallthrough // (A)
|
||||
case kt == keyTypeDel && seq <= b.minSeq && b.c.baseLevelForKey(lastUkey):
|
||||
case kt == ktDel && seq <= b.minSeq && b.c.baseLevelForKey(lastUkey):
|
||||
// For this user key:
|
||||
// (1) there is no data in higher levels
|
||||
// (2) data in lower levels will have larger seq numbers
|
||||
@@ -507,7 +523,7 @@ func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error {
|
||||
// Don't drop corrupted keys.
|
||||
hasLastUkey = false
|
||||
lastUkey = lastUkey[:0]
|
||||
lastSeq = keyMaxSeq
|
||||
lastSeq = kMaxSeq
|
||||
b.kerrCnt++
|
||||
}
|
||||
|
||||
@@ -530,7 +546,8 @@ func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error {
|
||||
func (b *tableCompactionBuilder) revert() error {
|
||||
for _, at := range b.rec.addedTables {
|
||||
b.s.logf("table@build revert @%d", at.num)
|
||||
if err := b.s.stor.Remove(storage.FileDesc{Type: storage.TypeTable, Num: at.num}); err != nil {
|
||||
f := b.s.getTableFile(at.num)
|
||||
if err := f.Remove(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -540,29 +557,31 @@ func (b *tableCompactionBuilder) revert() error {
|
||||
func (db *DB) tableCompaction(c *compaction, noTrivial bool) {
|
||||
defer c.release()
|
||||
|
||||
rec := &sessionRecord{}
|
||||
rec.addCompPtr(c.sourceLevel, c.imax)
|
||||
rec := &sessionRecord{numLevel: db.s.o.GetNumLevel()}
|
||||
rec.addCompPtr(c.level, c.imax)
|
||||
|
||||
if !noTrivial && c.trivial() {
|
||||
t := c.levels[0][0]
|
||||
db.logf("table@move L%d@%d -> L%d", c.sourceLevel, t.fd.Num, c.sourceLevel+1)
|
||||
rec.delTable(c.sourceLevel, t.fd.Num)
|
||||
rec.addTableFile(c.sourceLevel+1, t)
|
||||
db.compactionCommit("table-move", rec)
|
||||
t := c.tables[0][0]
|
||||
db.logf("table@move L%d@%d -> L%d", c.level, t.file.Num(), c.level+1)
|
||||
rec.delTable(c.level, t.file.Num())
|
||||
rec.addTableFile(c.level+1, t)
|
||||
db.compactionTransactFunc("table@move", func(cnt *compactionTransactCounter) (err error) {
|
||||
return db.s.commit(rec)
|
||||
}, nil)
|
||||
return
|
||||
}
|
||||
|
||||
var stats [2]cStatStaging
|
||||
for i, tables := range c.levels {
|
||||
var stats [2]cStatsStaging
|
||||
for i, tables := range c.tables {
|
||||
for _, t := range tables {
|
||||
stats[i].read += t.size
|
||||
// Insert deleted tables into record
|
||||
rec.delTable(c.sourceLevel+i, t.fd.Num)
|
||||
rec.delTable(c.level+i, t.file.Num())
|
||||
}
|
||||
}
|
||||
sourceSize := int(stats[0].read + stats[1].read)
|
||||
minSeq := db.minSeq()
|
||||
db.logf("table@compaction L%d·%d -> L%d·%d S·%s Q·%d", c.sourceLevel, len(c.levels[0]), c.sourceLevel+1, len(c.levels[1]), shortenb(sourceSize), minSeq)
|
||||
db.logf("table@compaction L%d·%d -> L%d·%d S·%s Q·%d", c.level, len(c.tables[0]), c.level+1, len(c.tables[1]), shortenb(sourceSize), minSeq)
|
||||
|
||||
b := &tableCompactionBuilder{
|
||||
db: db,
|
||||
@@ -572,60 +591,49 @@ func (db *DB) tableCompaction(c *compaction, noTrivial bool) {
|
||||
stat1: &stats[1],
|
||||
minSeq: minSeq,
|
||||
strict: db.s.o.GetStrict(opt.StrictCompaction),
|
||||
tableSize: db.s.o.GetCompactionTableSize(c.sourceLevel + 1),
|
||||
tableSize: db.s.o.GetCompactionTableSize(c.level + 1),
|
||||
}
|
||||
db.compactionTransact("table@build", b)
|
||||
|
||||
// Commit.
|
||||
stats[1].startTimer()
|
||||
db.compactionCommit("table", rec)
|
||||
stats[1].stopTimer()
|
||||
// Commit changes
|
||||
db.compactionTransactFunc("table@commit", func(cnt *compactionTransactCounter) (err error) {
|
||||
stats[1].startTimer()
|
||||
defer stats[1].stopTimer()
|
||||
return db.s.commit(rec)
|
||||
}, nil)
|
||||
|
||||
resultSize := int(stats[1].write)
|
||||
db.logf("table@compaction committed F%s S%s Ke·%d D·%d T·%v", sint(len(rec.addedTables)-len(rec.deletedTables)), sshortenb(resultSize-sourceSize), b.kerrCnt, b.dropCnt, stats[1].duration)
|
||||
|
||||
// Save compaction stats
|
||||
for i := range stats {
|
||||
db.compStats.addStat(c.sourceLevel+1, &stats[i])
|
||||
db.compStats[c.level+1].add(&stats[i])
|
||||
}
|
||||
}
|
||||
|
||||
func (db *DB) tableRangeCompaction(level int, umin, umax []byte) error {
|
||||
func (db *DB) tableRangeCompaction(level int, umin, umax []byte) {
|
||||
db.logf("table@compaction range L%d %q:%q", level, umin, umax)
|
||||
|
||||
if level >= 0 {
|
||||
if c := db.s.getCompactionRange(level, umin, umax, true); c != nil {
|
||||
if c := db.s.getCompactionRange(level, umin, umax); c != nil {
|
||||
db.tableCompaction(c, true)
|
||||
}
|
||||
} else {
|
||||
// Retry until nothing to compact.
|
||||
for {
|
||||
compacted := false
|
||||
|
||||
// Scan for maximum level with overlapped tables.
|
||||
v := db.s.version()
|
||||
m := 1
|
||||
for i := m; i < len(v.levels); i++ {
|
||||
tables := v.levels[i]
|
||||
if tables.overlaps(db.s.icmp, umin, umax, false) {
|
||||
m = i
|
||||
}
|
||||
v := db.s.version()
|
||||
m := 1
|
||||
for i, t := range v.tables[1:] {
|
||||
if t.overlaps(db.s.icmp, umin, umax, false) {
|
||||
m = i + 1
|
||||
}
|
||||
v.release()
|
||||
}
|
||||
v.release()
|
||||
|
||||
for level := 0; level < m; level++ {
|
||||
if c := db.s.getCompactionRange(level, umin, umax, false); c != nil {
|
||||
db.tableCompaction(c, true)
|
||||
compacted = true
|
||||
}
|
||||
}
|
||||
|
||||
if !compacted {
|
||||
break
|
||||
for level := 0; level < m; level++ {
|
||||
if c := db.s.getCompactionRange(level, umin, umax); c != nil {
|
||||
db.tableCompaction(c, true)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *DB) tableAutoCompaction() {
|
||||
@@ -643,7 +651,7 @@ func (db *DB) tableNeedCompaction() bool {
|
||||
func (db *DB) pauseCompaction(ch chan<- struct{}) {
|
||||
select {
|
||||
case ch <- struct{}{}:
|
||||
case <-db.closeC:
|
||||
case _, _ = <-db.closeC:
|
||||
db.compactionExitTransact()
|
||||
}
|
||||
}
|
||||
@@ -652,11 +660,11 @@ type cCmd interface {
|
||||
ack(err error)
|
||||
}
|
||||
|
||||
type cAuto struct {
|
||||
type cIdle struct {
|
||||
ackC chan<- error
|
||||
}
|
||||
|
||||
func (r cAuto) ack(err error) {
|
||||
func (r cIdle) ack(err error) {
|
||||
if r.ackC != nil {
|
||||
defer func() {
|
||||
recover()
|
||||
@@ -680,38 +688,38 @@ func (r cRange) ack(err error) {
|
||||
}
|
||||
}
|
||||
|
||||
// This will trigger auto compaction but will not wait for it.
|
||||
func (db *DB) compTrigger(compC chan<- cCmd) {
|
||||
select {
|
||||
case compC <- cAuto{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// This will trigger auto compaction and/or wait for all compaction to be done.
|
||||
func (db *DB) compTriggerWait(compC chan<- cCmd) (err error) {
|
||||
// This will trigger auto compation and/or wait for all compaction to be done.
|
||||
func (db *DB) compSendIdle(compC chan<- cCmd) (err error) {
|
||||
ch := make(chan error)
|
||||
defer close(ch)
|
||||
// Send cmd.
|
||||
select {
|
||||
case compC <- cAuto{ch}:
|
||||
case compC <- cIdle{ch}:
|
||||
case err = <-db.compErrC:
|
||||
return
|
||||
case <-db.closeC:
|
||||
case _, _ = <-db.closeC:
|
||||
return ErrClosed
|
||||
}
|
||||
// Wait cmd.
|
||||
select {
|
||||
case err = <-ch:
|
||||
case err = <-db.compErrC:
|
||||
case <-db.closeC:
|
||||
case _, _ = <-db.closeC:
|
||||
return ErrClosed
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// This will trigger auto compaction but will not wait for it.
|
||||
func (db *DB) compSendTrigger(compC chan<- cCmd) {
|
||||
select {
|
||||
case compC <- cIdle{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// Send range compaction request.
|
||||
func (db *DB) compTriggerRange(compC chan<- cCmd, level int, min, max []byte) (err error) {
|
||||
func (db *DB) compSendRange(compC chan<- cCmd, level int, min, max []byte) (err error) {
|
||||
ch := make(chan error)
|
||||
defer close(ch)
|
||||
// Send cmd.
|
||||
@@ -719,14 +727,14 @@ func (db *DB) compTriggerRange(compC chan<- cCmd, level int, min, max []byte) (e
|
||||
case compC <- cRange{level, min, max, ch}:
|
||||
case err := <-db.compErrC:
|
||||
return err
|
||||
case <-db.closeC:
|
||||
case _, _ = <-db.closeC:
|
||||
return ErrClosed
|
||||
}
|
||||
// Wait cmd.
|
||||
select {
|
||||
case err = <-ch:
|
||||
case err = <-db.compErrC:
|
||||
case <-db.closeC:
|
||||
case _, _ = <-db.closeC:
|
||||
return ErrClosed
|
||||
}
|
||||
return err
|
||||
@@ -751,14 +759,14 @@ func (db *DB) mCompaction() {
|
||||
select {
|
||||
case x = <-db.mcompCmdC:
|
||||
switch x.(type) {
|
||||
case cAuto:
|
||||
case cIdle:
|
||||
db.memCompaction()
|
||||
x.ack(nil)
|
||||
x = nil
|
||||
default:
|
||||
panic("leveldb: unknown command")
|
||||
}
|
||||
case <-db.closeC:
|
||||
case _, _ = <-db.closeC:
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -791,7 +799,7 @@ func (db *DB) tCompaction() {
|
||||
case ch := <-db.tcompPauseC:
|
||||
db.pauseCompaction(ch)
|
||||
continue
|
||||
case <-db.closeC:
|
||||
case _, _ = <-db.closeC:
|
||||
return
|
||||
default:
|
||||
}
|
||||
@@ -806,16 +814,17 @@ func (db *DB) tCompaction() {
|
||||
case ch := <-db.tcompPauseC:
|
||||
db.pauseCompaction(ch)
|
||||
continue
|
||||
case <-db.closeC:
|
||||
case _, _ = <-db.closeC:
|
||||
return
|
||||
}
|
||||
}
|
||||
if x != nil {
|
||||
switch cmd := x.(type) {
|
||||
case cAuto:
|
||||
case cIdle:
|
||||
ackQ = append(ackQ, x)
|
||||
case cRange:
|
||||
x.ack(db.tableRangeCompaction(cmd.level, cmd.min, cmd.max))
|
||||
db.tableRangeCompaction(cmd.level, cmd.min, cmd.max)
|
||||
x.ack(nil)
|
||||
default:
|
||||
panic("leveldb: unknown command")
|
||||
}
|
||||
@@ -8,7 +8,6 @@ package leveldb
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math/rand"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
@@ -19,7 +18,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
errInvalidInternalKey = errors.New("leveldb: Iterator: invalid internal key")
|
||||
errInvalidIkey = errors.New("leveldb: Iterator: invalid internal key")
|
||||
)
|
||||
|
||||
type memdbReleaser struct {
|
||||
@@ -33,50 +32,40 @@ func (mr *memdbReleaser) Release() {
|
||||
})
|
||||
}
|
||||
|
||||
func (db *DB) newRawIterator(auxm *memDB, auxt tFiles, slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
|
||||
strict := opt.GetStrict(db.s.o.Options, ro, opt.StrictReader)
|
||||
func (db *DB) newRawIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
|
||||
em, fm := db.getMems()
|
||||
v := db.s.version()
|
||||
|
||||
tableIts := v.getIterators(slice, ro)
|
||||
n := len(tableIts) + len(auxt) + 3
|
||||
its := make([]iterator.Iterator, 0, n)
|
||||
|
||||
if auxm != nil {
|
||||
ami := auxm.NewIterator(slice)
|
||||
ami.SetReleaser(&memdbReleaser{m: auxm})
|
||||
its = append(its, ami)
|
||||
}
|
||||
for _, t := range auxt {
|
||||
its = append(its, v.s.tops.newIterator(t, slice, ro))
|
||||
}
|
||||
|
||||
emi := em.NewIterator(slice)
|
||||
ti := v.getIterators(slice, ro)
|
||||
n := len(ti) + 2
|
||||
i := make([]iterator.Iterator, 0, n)
|
||||
emi := em.mdb.NewIterator(slice)
|
||||
emi.SetReleaser(&memdbReleaser{m: em})
|
||||
its = append(its, emi)
|
||||
i = append(i, emi)
|
||||
if fm != nil {
|
||||
fmi := fm.NewIterator(slice)
|
||||
fmi := fm.mdb.NewIterator(slice)
|
||||
fmi.SetReleaser(&memdbReleaser{m: fm})
|
||||
its = append(its, fmi)
|
||||
i = append(i, fmi)
|
||||
}
|
||||
its = append(its, tableIts...)
|
||||
mi := iterator.NewMergedIterator(its, db.s.icmp, strict)
|
||||
i = append(i, ti...)
|
||||
strict := opt.GetStrict(db.s.o.Options, ro, opt.StrictReader)
|
||||
mi := iterator.NewMergedIterator(i, db.s.icmp, strict)
|
||||
mi.SetReleaser(&versionReleaser{v: v})
|
||||
return mi
|
||||
}
|
||||
|
||||
func (db *DB) newIterator(auxm *memDB, auxt tFiles, seq uint64, slice *util.Range, ro *opt.ReadOptions) *dbIter {
|
||||
func (db *DB) newIterator(seq uint64, slice *util.Range, ro *opt.ReadOptions) *dbIter {
|
||||
var islice *util.Range
|
||||
if slice != nil {
|
||||
islice = &util.Range{}
|
||||
if slice.Start != nil {
|
||||
islice.Start = makeInternalKey(nil, slice.Start, keyMaxSeq, keyTypeSeek)
|
||||
islice.Start = newIkey(slice.Start, kMaxSeq, ktSeek)
|
||||
}
|
||||
if slice.Limit != nil {
|
||||
islice.Limit = makeInternalKey(nil, slice.Limit, keyMaxSeq, keyTypeSeek)
|
||||
islice.Limit = newIkey(slice.Limit, kMaxSeq, ktSeek)
|
||||
}
|
||||
}
|
||||
rawIter := db.newRawIterator(auxm, auxt, islice, ro)
|
||||
rawIter := db.newRawIterator(islice, ro)
|
||||
iter := &dbIter{
|
||||
db: db,
|
||||
icmp: db.s.icmp,
|
||||
@@ -91,10 +80,6 @@ func (db *DB) newIterator(auxm *memDB, auxt tFiles, seq uint64, slice *util.Rang
|
||||
return iter
|
||||
}
|
||||
|
||||
func (db *DB) iterSamplingRate() int {
|
||||
return rand.Intn(2 * db.s.o.GetIteratorSamplingRate())
|
||||
}
|
||||
|
||||
type dir int
|
||||
|
||||
const (
|
||||
@@ -113,21 +98,11 @@ type dbIter struct {
|
||||
seq uint64
|
||||
strict bool
|
||||
|
||||
smaplingGap int
|
||||
dir dir
|
||||
key []byte
|
||||
value []byte
|
||||
err error
|
||||
releaser util.Releaser
|
||||
}
|
||||
|
||||
func (i *dbIter) sampleSeek() {
|
||||
ikey := i.iter.Key()
|
||||
i.smaplingGap -= len(ikey) + len(i.iter.Value())
|
||||
for i.smaplingGap < 0 {
|
||||
i.smaplingGap += i.db.iterSamplingRate()
|
||||
i.db.sampleSeek(ikey)
|
||||
}
|
||||
dir dir
|
||||
key []byte
|
||||
value []byte
|
||||
err error
|
||||
releaser util.Releaser
|
||||
}
|
||||
|
||||
func (i *dbIter) setErr(err error) {
|
||||
@@ -187,7 +162,7 @@ func (i *dbIter) Seek(key []byte) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
ikey := makeInternalKey(nil, key, i.seq, keyTypeSeek)
|
||||
ikey := newIkey(key, i.seq, ktSeek)
|
||||
if i.iter.Seek(ikey) {
|
||||
i.dir = dirSOI
|
||||
return i.next()
|
||||
@@ -199,15 +174,14 @@ func (i *dbIter) Seek(key []byte) bool {
|
||||
|
||||
func (i *dbIter) next() bool {
|
||||
for {
|
||||
if ukey, seq, kt, kerr := parseInternalKey(i.iter.Key()); kerr == nil {
|
||||
i.sampleSeek()
|
||||
if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil {
|
||||
if seq <= i.seq {
|
||||
switch kt {
|
||||
case keyTypeDel:
|
||||
case ktDel:
|
||||
// Skip deleted key.
|
||||
i.key = append(i.key[:0], ukey...)
|
||||
i.dir = dirForward
|
||||
case keyTypeVal:
|
||||
case ktVal:
|
||||
if i.dir == dirSOI || i.icmp.uCompare(ukey, i.key) > 0 {
|
||||
i.key = append(i.key[:0], ukey...)
|
||||
i.value = append(i.value[:0], i.iter.Value()...)
|
||||
@@ -250,13 +224,12 @@ func (i *dbIter) prev() bool {
|
||||
del := true
|
||||
if i.iter.Valid() {
|
||||
for {
|
||||
if ukey, seq, kt, kerr := parseInternalKey(i.iter.Key()); kerr == nil {
|
||||
i.sampleSeek()
|
||||
if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil {
|
||||
if seq <= i.seq {
|
||||
if !del && i.icmp.uCompare(ukey, i.key) < 0 {
|
||||
return true
|
||||
}
|
||||
del = (kt == keyTypeDel)
|
||||
del = (kt == ktDel)
|
||||
if !del {
|
||||
i.key = append(i.key[:0], ukey...)
|
||||
i.value = append(i.value[:0], i.iter.Value()...)
|
||||
@@ -292,8 +265,7 @@ func (i *dbIter) Prev() bool {
|
||||
return i.Last()
|
||||
case dirForward:
|
||||
for i.iter.Prev() {
|
||||
if ukey, _, _, kerr := parseInternalKey(i.iter.Key()); kerr == nil {
|
||||
i.sampleSeek()
|
||||
if ukey, _, _, kerr := parseIkey(i.iter.Key()); kerr == nil {
|
||||
if i.icmp.uCompare(ukey, i.key) < 0 {
|
||||
goto cont
|
||||
}
|
||||
@@ -59,7 +59,7 @@ func (db *DB) releaseSnapshot(se *snapshotElement) {
|
||||
}
|
||||
}
|
||||
|
||||
// Gets minimum sequence that not being snapshotted.
|
||||
// Gets minimum sequence that not being snapshoted.
|
||||
func (db *DB) minSeq() uint64 {
|
||||
db.snapsMu.Lock()
|
||||
defer db.snapsMu.Unlock()
|
||||
@@ -110,7 +110,7 @@ func (snap *Snapshot) Get(key []byte, ro *opt.ReadOptions) (value []byte, err er
|
||||
err = ErrSnapshotReleased
|
||||
return
|
||||
}
|
||||
return snap.db.get(nil, nil, key, snap.elem.seq, ro)
|
||||
return snap.db.get(key, snap.elem.seq, ro)
|
||||
}
|
||||
|
||||
// Has returns true if the DB does contains the given key.
|
||||
@@ -127,11 +127,11 @@ func (snap *Snapshot) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error)
|
||||
err = ErrSnapshotReleased
|
||||
return
|
||||
}
|
||||
return snap.db.has(nil, nil, key, snap.elem.seq, ro)
|
||||
return snap.db.has(key, snap.elem.seq, ro)
|
||||
}
|
||||
|
||||
// NewIterator returns an iterator for the snapshot of the underlying DB.
|
||||
// The returned iterator is not safe for concurrent use, but it is safe to use
|
||||
// NewIterator returns an iterator for the snapshot of the uderlying DB.
|
||||
// The returned iterator is not goroutine-safe, but it is safe to use
|
||||
// multiple iterators concurrently, with each in a dedicated goroutine.
|
||||
// It is also safe to use an iterator concurrently with modifying its
|
||||
// underlying DB. The resultant key/value pairs are guaranteed to be
|
||||
@@ -158,7 +158,7 @@ func (snap *Snapshot) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterat
|
||||
}
|
||||
// Since iterator already hold version ref, it doesn't need to
|
||||
// hold snapshot ref.
|
||||
return snap.db.newIterator(nil, nil, snap.elem.seq, slice, ro)
|
||||
return snap.db.newIterator(snap.elem.seq, slice, ro)
|
||||
}
|
||||
|
||||
// Release releases the snapshot. This will not release any returned
|
||||
@@ -7,29 +7,19 @@
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/journal"
|
||||
"github.com/syndtr/goleveldb/leveldb/memdb"
|
||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||
)
|
||||
|
||||
var (
|
||||
errHasFrozenMem = errors.New("has frozen mem")
|
||||
)
|
||||
|
||||
type memDB struct {
|
||||
db *DB
|
||||
*memdb.DB
|
||||
db *DB
|
||||
mdb *memdb.DB
|
||||
ref int32
|
||||
}
|
||||
|
||||
func (m *memDB) getref() int32 {
|
||||
return atomic.LoadInt32(&m.ref)
|
||||
}
|
||||
|
||||
func (m *memDB) incref() {
|
||||
atomic.AddInt32(&m.ref, 1)
|
||||
}
|
||||
@@ -37,12 +27,12 @@ func (m *memDB) incref() {
|
||||
func (m *memDB) decref() {
|
||||
if ref := atomic.AddInt32(&m.ref, -1); ref == 0 {
|
||||
// Only put back memdb with std capacity.
|
||||
if m.Capacity() == m.db.s.o.GetWriteBuffer() {
|
||||
m.Reset()
|
||||
m.db.mpoolPut(m.DB)
|
||||
if m.mdb.Capacity() == m.db.s.o.GetWriteBuffer() {
|
||||
m.mdb.Reset()
|
||||
m.db.mpoolPut(m.mdb)
|
||||
}
|
||||
m.db = nil
|
||||
m.DB = nil
|
||||
m.mdb = nil
|
||||
} else if ref < 0 {
|
||||
panic("negative memdb ref")
|
||||
}
|
||||
@@ -58,40 +48,22 @@ func (db *DB) addSeq(delta uint64) {
|
||||
atomic.AddUint64(&db.seq, delta)
|
||||
}
|
||||
|
||||
func (db *DB) setSeq(seq uint64) {
|
||||
atomic.StoreUint64(&db.seq, seq)
|
||||
}
|
||||
|
||||
func (db *DB) sampleSeek(ikey internalKey) {
|
||||
v := db.s.version()
|
||||
if v.sampleSeek(ikey) {
|
||||
// Trigger table compaction.
|
||||
db.compTrigger(db.tcompCmdC)
|
||||
}
|
||||
v.release()
|
||||
}
|
||||
|
||||
func (db *DB) mpoolPut(mem *memdb.DB) {
|
||||
if !db.isClosed() {
|
||||
select {
|
||||
case db.memPool <- mem:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (db *DB) mpoolGet(n int) *memDB {
|
||||
var mdb *memdb.DB
|
||||
defer func() {
|
||||
recover()
|
||||
}()
|
||||
select {
|
||||
case mdb = <-db.memPool:
|
||||
case db.memPool <- mem:
|
||||
default:
|
||||
}
|
||||
if mdb == nil || mdb.Capacity() < n {
|
||||
mdb = memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n))
|
||||
}
|
||||
return &memDB{
|
||||
db: db,
|
||||
DB: mdb,
|
||||
}
|
||||
|
||||
func (db *DB) mpoolGet() *memdb.DB {
|
||||
select {
|
||||
case mem := <-db.memPool:
|
||||
return mem
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -104,13 +76,7 @@ func (db *DB) mpoolDrain() {
|
||||
case <-db.memPool:
|
||||
default:
|
||||
}
|
||||
case <-db.closeC:
|
||||
ticker.Stop()
|
||||
// Make sure the pool is drained.
|
||||
select {
|
||||
case <-db.memPool:
|
||||
case <-time.After(time.Second):
|
||||
}
|
||||
case _, _ = <-db.closeC:
|
||||
close(db.memPool)
|
||||
return
|
||||
}
|
||||
@@ -120,10 +86,11 @@ func (db *DB) mpoolDrain() {
|
||||
// Create new memdb and froze the old one; need external synchronization.
|
||||
// newMem only called synchronously by the writer.
|
||||
func (db *DB) newMem(n int) (mem *memDB, err error) {
|
||||
fd := storage.FileDesc{Type: storage.TypeJournal, Num: db.s.allocFileNum()}
|
||||
w, err := db.s.stor.Create(fd)
|
||||
num := db.s.allocFileNum()
|
||||
file := db.s.getJournalFile(num)
|
||||
w, err := file.Create()
|
||||
if err != nil {
|
||||
db.s.reuseFileNum(fd.Num)
|
||||
db.s.reuseFileNum(num)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -131,7 +98,7 @@ func (db *DB) newMem(n int) (mem *memDB, err error) {
|
||||
defer db.memMu.Unlock()
|
||||
|
||||
if db.frozenMem != nil {
|
||||
return nil, errHasFrozenMem
|
||||
panic("still has frozen mem")
|
||||
}
|
||||
|
||||
if db.journal == nil {
|
||||
@@ -139,14 +106,20 @@ func (db *DB) newMem(n int) (mem *memDB, err error) {
|
||||
} else {
|
||||
db.journal.Reset(w)
|
||||
db.journalWriter.Close()
|
||||
db.frozenJournalFd = db.journalFd
|
||||
db.frozenJournalFile = db.journalFile
|
||||
}
|
||||
db.journalWriter = w
|
||||
db.journalFd = fd
|
||||
db.journalFile = file
|
||||
db.frozenMem = db.mem
|
||||
mem = db.mpoolGet(n)
|
||||
mem.incref() // for self
|
||||
mem.incref() // for caller
|
||||
mdb := db.mpoolGet()
|
||||
if mdb == nil || mdb.Capacity() < n {
|
||||
mdb = memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n))
|
||||
}
|
||||
mem = &memDB{
|
||||
db: db,
|
||||
mdb: mdb,
|
||||
ref: 2,
|
||||
}
|
||||
db.mem = mem
|
||||
// The seq only incremented by the writer. And whoever called newMem
|
||||
// should hold write lock, so no need additional synchronization here.
|
||||
@@ -158,26 +131,24 @@ func (db *DB) newMem(n int) (mem *memDB, err error) {
|
||||
func (db *DB) getMems() (e, f *memDB) {
|
||||
db.memMu.RLock()
|
||||
defer db.memMu.RUnlock()
|
||||
if db.mem != nil {
|
||||
db.mem.incref()
|
||||
} else if !db.isClosed() {
|
||||
if db.mem == nil {
|
||||
panic("nil effective mem")
|
||||
}
|
||||
db.mem.incref()
|
||||
if db.frozenMem != nil {
|
||||
db.frozenMem.incref()
|
||||
}
|
||||
return db.mem, db.frozenMem
|
||||
}
|
||||
|
||||
// Get effective memdb.
|
||||
// Get frozen memdb.
|
||||
func (db *DB) getEffectiveMem() *memDB {
|
||||
db.memMu.RLock()
|
||||
defer db.memMu.RUnlock()
|
||||
if db.mem != nil {
|
||||
db.mem.incref()
|
||||
} else if !db.isClosed() {
|
||||
if db.mem == nil {
|
||||
panic("nil effective mem")
|
||||
}
|
||||
db.mem.incref()
|
||||
return db.mem
|
||||
}
|
||||
|
||||
@@ -201,25 +172,17 @@ func (db *DB) getFrozenMem() *memDB {
|
||||
// Drop frozen memdb; assume that frozen memdb isn't nil.
|
||||
func (db *DB) dropFrozenMem() {
|
||||
db.memMu.Lock()
|
||||
if err := db.s.stor.Remove(db.frozenJournalFd); err != nil {
|
||||
db.logf("journal@remove removing @%d %q", db.frozenJournalFd.Num, err)
|
||||
if err := db.frozenJournalFile.Remove(); err != nil {
|
||||
db.logf("journal@remove removing @%d %q", db.frozenJournalFile.Num(), err)
|
||||
} else {
|
||||
db.logf("journal@remove removed @%d", db.frozenJournalFd.Num)
|
||||
db.logf("journal@remove removed @%d", db.frozenJournalFile.Num())
|
||||
}
|
||||
db.frozenJournalFd = storage.FileDesc{}
|
||||
db.frozenJournalFile = nil
|
||||
db.frozenMem.decref()
|
||||
db.frozenMem = nil
|
||||
db.memMu.Unlock()
|
||||
}
|
||||
|
||||
// Clear mems ptr; used by DB.Close().
|
||||
func (db *DB) clearMems() {
|
||||
db.memMu.Lock()
|
||||
db.mem = nil
|
||||
db.frozenMem = nil
|
||||
db.memMu.Unlock()
|
||||
}
|
||||
|
||||
// Set closed flag; return true if not already closed.
|
||||
func (db *DB) setClosed() bool {
|
||||
return atomic.CompareAndSwapUint32(&db.closed, 0, 1)
|
||||
2579
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go
generated
vendored
Normal file
2579
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
@@ -21,16 +21,14 @@ type Reader interface {
|
||||
NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator
|
||||
}
|
||||
|
||||
// Sizes is list of size.
|
||||
type Sizes []int64
|
||||
type Sizes []uint64
|
||||
|
||||
// Sum returns sum of the sizes.
|
||||
func (sizes Sizes) Sum() int64 {
|
||||
var sum int64
|
||||
for _, size := range sizes {
|
||||
sum += size
|
||||
func (p Sizes) Sum() (n uint64) {
|
||||
for _, s := range p {
|
||||
n += s
|
||||
}
|
||||
return sum
|
||||
return n
|
||||
}
|
||||
|
||||
// Logging.
|
||||
@@ -42,59 +40,59 @@ func (db *DB) checkAndCleanFiles() error {
|
||||
v := db.s.version()
|
||||
defer v.release()
|
||||
|
||||
tmap := make(map[int64]bool)
|
||||
for _, tables := range v.levels {
|
||||
tablesMap := make(map[uint64]bool)
|
||||
for _, tables := range v.tables {
|
||||
for _, t := range tables {
|
||||
tmap[t.fd.Num] = false
|
||||
tablesMap[t.file.Num()] = false
|
||||
}
|
||||
}
|
||||
|
||||
fds, err := db.s.stor.List(storage.TypeAll)
|
||||
files, err := db.s.getFiles(storage.TypeAll)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var nt int
|
||||
var rem []storage.FileDesc
|
||||
for _, fd := range fds {
|
||||
var nTables int
|
||||
var rem []storage.File
|
||||
for _, f := range files {
|
||||
keep := true
|
||||
switch fd.Type {
|
||||
switch f.Type() {
|
||||
case storage.TypeManifest:
|
||||
keep = fd.Num >= db.s.manifestFd.Num
|
||||
keep = f.Num() >= db.s.manifestFile.Num()
|
||||
case storage.TypeJournal:
|
||||
if !db.frozenJournalFd.Zero() {
|
||||
keep = fd.Num >= db.frozenJournalFd.Num
|
||||
if db.frozenJournalFile != nil {
|
||||
keep = f.Num() >= db.frozenJournalFile.Num()
|
||||
} else {
|
||||
keep = fd.Num >= db.journalFd.Num
|
||||
keep = f.Num() >= db.journalFile.Num()
|
||||
}
|
||||
case storage.TypeTable:
|
||||
_, keep = tmap[fd.Num]
|
||||
_, keep = tablesMap[f.Num()]
|
||||
if keep {
|
||||
tmap[fd.Num] = true
|
||||
nt++
|
||||
tablesMap[f.Num()] = true
|
||||
nTables++
|
||||
}
|
||||
}
|
||||
|
||||
if !keep {
|
||||
rem = append(rem, fd)
|
||||
rem = append(rem, f)
|
||||
}
|
||||
}
|
||||
|
||||
if nt != len(tmap) {
|
||||
var mfds []storage.FileDesc
|
||||
for num, present := range tmap {
|
||||
if nTables != len(tablesMap) {
|
||||
var missing []*storage.FileInfo
|
||||
for num, present := range tablesMap {
|
||||
if !present {
|
||||
mfds = append(mfds, storage.FileDesc{storage.TypeTable, num})
|
||||
missing = append(missing, &storage.FileInfo{Type: storage.TypeTable, Num: num})
|
||||
db.logf("db@janitor table missing @%d", num)
|
||||
}
|
||||
}
|
||||
return errors.NewErrCorrupted(storage.FileDesc{}, &errors.ErrMissingFiles{Fds: mfds})
|
||||
return errors.NewErrCorrupted(nil, &errors.ErrMissingFiles{Files: missing})
|
||||
}
|
||||
|
||||
db.logf("db@janitor F·%d G·%d", len(fds), len(rem))
|
||||
for _, fd := range rem {
|
||||
db.logf("db@janitor removing %s-%d", fd.Type, fd.Num)
|
||||
if err := db.s.stor.Remove(fd); err != nil {
|
||||
db.logf("db@janitor F·%d G·%d", len(files), len(rem))
|
||||
for _, f := range rem {
|
||||
db.logf("db@janitor removing %s-%d", f.Type(), f.Num())
|
||||
if err := f.Remove(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user