Compare commits

..

28 Commits

Author SHA1 Message Date
Audrius Butkevicius
2470875d14 Merge pull request #1681 from calmh/major-upgrade
Allow major upgrades [v0.10]
2015-04-22 14:11:18 +01:00
Jakob Borg
930e90289f Backport the v0.11 upgrade system 2015-04-22 21:27:13 +09:00
Jakob Borg
bf959a77e2 Allow major upgrade (fixes #1680) 2015-04-22 21:22:16 +09:00
Jakob Borg
3cc4cb0a0b Translation update 2015-03-29 09:46:44 +02:00
Jakob Borg
e6cba61740 Don't allow arbitrarily short reconnection intervals (fixes #1524) 2015-03-29 09:44:20 +02:00
Jakob Borg
cd7ce73f59 Add negative cache time to global discovery
This reduces the amount of external queries by not repeating a query for
a given address if we have failed within the last three minutes.
2015-03-26 08:43:55 +01:00
KAMADA Ken'ichi
fab4e33c58 Preserve the permission of a newly created directory
We need an explicit chmod() when creating a new directory.
Otherwise a new directory may be created with a different permission
from the one received from an originating device, because the umask
is applied to the mode given to mkdir().
The incorrect permission is later sent back to the originating device
and the original permission will be lost.
2015-03-26 08:43:16 +01:00
Audrius Butkevicius
b79b13a75b Configure location provider 2015-03-26 08:43:06 +01:00
Audrius Butkevicius
c294d5f087 Fix crash on walker error (fixes #1507) 2015-03-22 14:09:14 +00:00
Jakob Borg
10ead2e61f Send correct MIME type for SVG images (fixes #1506) 2015-03-22 12:56:50 +01:00
Jakob Borg
960b40fa89 Translation update 2015-03-22 10:34:45 +01:00
Stefan Tatschner
afad329e99 systemd: Set -logflags to 0, provide -no-browser flag
Syncthing should not try to start a browser when invoked by systemd.
Furthermore we do not need any timestamps in the journal as systemd
already handles this for us.
2015-03-22 10:26:53 +01:00
Jakob Borg
4025284fba Update integration test configs to v10 2015-03-22 10:26:53 +01:00
Jakob Borg
a595e814dd Set defaults correctly for autoNormalize
The default:"foo" struct tags aren't actually used for folder configs.
2015-03-22 10:26:51 +01:00
Alexander Graf
963d8121d9 use Lstat instead of Stat to prevent errors with symlinks 2015-03-22 08:48:37 +01:00
Audrius Butkevicius
03019988b1 Skip unspecified IPs 2015-03-22 08:48:37 +01:00
Audrius Butkevicius
97115afa32 Print LANs on startup 2015-03-22 08:48:37 +01:00
Jakob Borg
c9f5bae177 Decide once and for all to return filepath.SkipDir or nil 2015-03-22 08:47:36 +01:00
Jakob Borg
2bd11ca4e3 Automatically fix file name normalization errors (fixes #430) 2015-03-22 08:47:34 +01:00
Jakob Borg
a5de1acb46 Use SVG format logos 2015-03-22 08:46:54 +01:00
Jakob Borg
5581751e9d Rename files to match type names 2015-03-22 08:46:43 +01:00
Jakob Borg
055ae92273 Refactor state tracking (...)
Move state tracking into the puller/scanner objects. This is a first
step towards resolving #1391.

Rename Puller and Scanner to roFolder and rwFolder as they have more
duties than just pulling and scanning, and don't need to be exported.
2015-03-22 08:46:43 +01:00
Audrius Butkevicius
dea7c77055 Rebuild assets 2015-03-22 08:46:41 +01:00
Audrius Butkevicius
765dda6ad7 Fix build 2015-03-22 08:46:26 +01:00
Jakob Borg
28702a1c9d Add /rest/filestatus 2015-03-22 08:46:26 +01:00
Jakob Borg
40d1226612 MPLv2 2015-03-22 08:46:25 +01:00
Johan Vromans
effe8ce8a9 Suppress 'Last File Received' if a node is folder master (fixes #1472) 2015-03-22 08:46:24 +01:00
Jakob Borg
4c3ba24826 Add sciurius 2015-03-22 08:45:42 +01:00
2002 changed files with 43748 additions and 830185 deletions

9
.gitattributes vendored
View File

@@ -1,9 +0,0 @@
# Text files use LF line endings in this repository
* text=auto
# Except the dependencies, which we leave alone
vendor/** -text=auto
# Diffs on these files are meaningless
*.svg -diff
*.pb.go -diff

13
.gitignore vendored
View File

@@ -1,17 +1,18 @@
/syncthing
/stdiscosrv
./syncthing
syncthing.exe
stdiscosrv.exe
*.tar.gz
*.zip
*.asc
*.sublime*
.idea/
.jshintrc
coverage.out
files/pidx
bin
perfstats*.csv
coverage.xml
syncthing.sig
!gui/scripts/syncthing
.DS_Store
syncthing.md5
syncthing.exe.md5
RELEASE
deb
lib/auto/gui.files.go

145
AUTHORS
View File

@@ -1,99 +1,50 @@
# This is the official list of Syncthing authors for copyright purposes.
# The format is:
#
# Name Name Name (nickname) <email1@example.com> <email2@example.com>
#
# The NICKS list is auto generated from this file.
Aaron Bieber (qbit) <qbit@deftly.net>
Adam Piggott (simplypeachy) <aD@simplypeachy.co.uk> <simplypeachy@users.noreply.github.com>
Alessandro G. (alessandro.g89) <alessandro.g89@gmail.com>
Alexander Graf (alex2108) <register-github@alex-graf.de>
Alexandre Viau (aviau) <alexandre@alexandreviau.net> <aviau@debian.org>
Anderson Mesquita (andersonvom) <andersonvom@gmail.com>
Andrew Dunham (andrew-d) <andrew@du.nham.ca>
Andrey D (scienmind) <scintertech@cryptolab.net>
Antoine Lamielle (0x010C) <antoine.lamielle@0x010c.fr> <gh@0x010c.fr>
Antony Male (canton7) <antony.male@gmail.com>
Arthur Axel fREW Schmidt (frioux) <frew@afoolishmanifesto.com> <frioux@gmail.com>
Audrius Butkevicius (AudriusButkevicius) <audrius.butkevicius@gmail.com>
Bart De Vries (mogwa1) <devriesb@gmail.com>
Ben Curthoys (bencurthoys) <ben@bencurthoys.com>
Ben Schulz (uok) <ueomkail@gmail.com> <uok@users.noreply.github.com>
Ben Sidhom (bsidhom) <bsidhom@gmail.com>
Benny Ng (tpng) <benny.tpng@gmail.com>
Brandon Philips (philips) <brandon@ifup.org>
Brendan Long (brendanlong) <self@brendanlong.com>
Brian R. Becker (brbecker) <brbecker@gmail.com>
Caleb Callaway (cqcallaw) <enlightened.despot@gmail.com>
Carsten Hagemann (Moter8) <moter8@gmail.com>
Cathryne Linenweaver (Cathryne) <cathryne.linenweaver@gmail.com> <Cathryne@users.noreply.github.com>
Cedric Staniewski (xduugu) <cedric@gmx.ca>
Chris Howie (cdhowie) <me@chrishowie.com>
Chris Joel (cdata) <chris@scriptolo.gy>
Colin Kennedy (moshen) <moshen.colin@gmail.com>
Daniel Bergmann (brgmnn) <dan.arne.bergmann@gmail.com> <brgmnn@users.noreply.github.com>
Daniel Harte (norgeous) <daniel@harte.me> <daniel@danielharte.co.uk> <norgeous@users.noreply.github.com>
Daniel Martí (mvdan) <mvdan@mvdan.cc>
David Rimmer (dinosore) <dinosore@dbrsoftware.co.uk>
Denis A. (dva) <denisva@gmail.com>
Dennis Wilson (snnd) <dw@risu.io>
Dominik Heidler (asdil12) <dominik@heidler.eu>
Elias Jarlebring (jarlebring) <jarlebring@gmail.com>
Emil Hessman (ceh) <emil@hessman.se>
Erik Meitner (WSGCSysadmin) <e.meitner@willystreet.coop>
Federico Castagnini (facastagnini) <federico.castagnini@gmail.com>
Felix Ableitner (Nutomic) <me@nutomic.com>
Felix Unterpaintner (bigbear2nd) <bigbear2nd@gmail.com>
Francois-Xavier Gsell (zukoo) <fxgsell@gmail.com>
Frank Isemann (fti7) <frank@isemann.name>
Gilli Sigurdsson (gillisig) <gilli@vx.is>
Jaakko Hannikainen (jgke) <jgke@jgke.fi>
Jacek Szafarkiewicz (hadogenes) <szafar@linux.pl>
Jake Peterson (acogdev) <jake@acogdev.com>
Jakob Borg (calmh) <jakob@nym.se> <jakob@kastelo.net>
James Patterson (jpjp) <jamespatterson@operamail.com> <jpjp@users.noreply.github.com>
Jaroslav Malec (dzarda) <dzardacz@gmail.com>
Jens Diemer (jedie) <github.com@jensdiemer.de> <git@jensdiemer.de>
Jochen Voss (seehuhn) <voss@seehuhn.de>
Johan Vromans (sciurius) <jvromans@squirrel.nl>
Karol Różycki (krozycki) <rozycki.karol@gmail.com>
Kelong Cong (kc1212) <kc04bc@gmx.com> <kc1212@users.noreply.github.com>
Ken'ichi Kamada (kamadak) <kamada@nanohz.org>
Kevin Allen (ironmig) <kma1660@gmail.com>
Lars K.W. Gohlke (lkwg82) <lkwg82@gmx.de>
Laurent Etiemble (letiemble) <laurent.etiemble@gmail.com> <laurent.etiemble@monobjc.net>
Lode Hoste (Zillode) <zillode@zillode.be>
Lord Landon Agahnim (LordLandon) <lordlandon@gmail.com>
Majed Abdulaziz (majedev) <majed.alhajry@gmail.com>
Marc Laporte (marclaporte) <marc@marclaporte.com> <marc@laporte.name>
Marc Pujol (kilburn) <kilburn@la3.org>
Marcin Dziadus (marcindziadus) <dziadus.marcin@gmail.com>
Mateusz Naściszewski (mateon1) <matin1111@wp.pl>
Matt Burke (burkemw3) <mburke@amplify.com> <burkemw3@gmail.com>
Max Schulze (kralo) <max.schulze@online.de> <kralo@users.noreply.github.com>
Michael Jephcote (Rewt0r) <rewt0r@gmx.com> <Rewt0r@users.noreply.github.com>
Michael Ploujnikov (plouj) <ploujj@gmail.com>
Michael Tilli (pyfisch) <pyfisch@gmail.com>
Nate Morrison (nrm21) <natemorrison@gmail.com>
Pascal Jungblut (pascalj) <github@pascalj.com> <mail@pascal-jungblut.com>
Peter Hoeg (peterhoeg) <peter@speartail.com>
Philippe Schommers (filoozoom) <philippe@schommers.be>
Phill Luby (pluby) <phill.luby@newredo.com>
Piotr Bejda (piobpl) <piotrb10@gmail.com>
Ryan Sullivan (KayoticSully) <kayoticsully@gmail.com>
Scott Klupfel (kluppy) <kluppy@going2blue.com>
Sergey Mishin (ralder) <ralder@yandex.ru>
Stefan Kuntz (Stefan-Code) <stefan.github@gmail.com> <Stefan.github@gmail.com>
Stefan Tatschner (rumpelsepp) <stefan@sevenbyte.org> <rumpelsepp@sevenbyte.org>
Tim Abell (timabell) <tim@timwise.co.uk>
Tobias Nygren (tnn2) <tnn@nygren.pp.se>
Tomas Cerveny (kozec) <kozec@kozec.com>
Tully Robinson (tojrobinson) <tully@tojr.org>
Tyler Brazier (tylerbrazier) <tyler@tylerbrazier.com>
Veeti Paananen (veeti) <veeti.paananen@rojekti.fi>
Victor Buinsky (buinsky) <vix_booja@tut.by>
Vil Brekin (Vilbrekin) <vilbrekin@gmail.com>
William A. Kennington III (wkennington) <william@wkennington.com>
Wulf Weich (wweich) <wweich@users.noreply.github.com> <wweich@gmx.de>
Yannic A. (eipiminus1) <eipiminusone+github@gmail.com> <eipiminus1@users.noreply.github.com>
Aaron Bieber <qbit@deftly.net>
Alexander Graf <register-github@alex-graf.de>
Andrew Dunham <andrew@du.nham.ca>
Audrius Butkevicius <audrius.butkevicius@gmail.com>
Arthur Axel fREW Schmidt <frew@afoolishmanifesto.com> <frioux@gmail.com>
Ben Curthoys <ben@bencurthoys.com>
Ben Schulz <ueomkail@gmail.com> <uok@users.noreply.github.com>
Ben Sidhom <bsidhom@gmail.com>
Brandon Philips <brandon@ifup.org>
Brendan Long <self@brendanlong.com>
Caleb Callaway <enlightened.despot@gmail.com>
Cathryne Linenweaver <cathryne.linenweaver@gmail.com> <Cathryne@users.noreply.github.com>
Chris Joel <chris@scriptolo.gy>
Colin Kennedy <moshen.colin@gmail.com>
Daniel Martí <mvdan@mvdan.cc>
Dennis Wilson <dw@risu.io>
Dominik Heidler <dominik@heidler.eu>
Emil Hessman <emil@hessman.se>
Federico Castagnini <federico.castagnini@gmail.com>
Felix Ableitner <me@nutomic.com>
Felix Unterpaintner <bigbear2nd@gmail.com>
Gilli Sigurdsson <gilli@vx.is>
Jakob Borg <jakob@nym.se>
James Patterson <jamespatterson@operamail.com> <jpjp@users.noreply.github.com>
Jens Diemer <github.com@jensdiemer.de> <git@jensdiemer.de>
Jochen Voss <voss@seehuhn.de>
Johan Vromans <jvromans@squirrel.nl>
Karol Różycki <rozycki.karol@gmail.com>
Kamada Ken'ichi <kamada@nanohz.org>
Lode Hoste <zillode@zillode.be>
Marcin Dziadus <dziadus.marcin@gmail.com>
Marc Laporte <marc@marclaporte.com>
Marc Pujol <kilburn@la3.org>
Michael Jephcote <rewt0r@gmx.com> <Rewt0r@users.noreply.github.com>
Michael Tilli <pyfisch@gmail.com>
Pascal Jungblut <github@pascalj.com> <mail@pascal-jungblut.com>
Peter Hoeg <peter@speartail.com>
Philippe Schommers <philippe@schommers.be>
Phill Luby <phill.luby@newredo.com>
Piotr Bejda <piotrb10@gmail.com>
Ryan Sullivan <kayoticsully@gmail.com>
Stefan Tatschner <stefan@sevenbyte.org>
Tim Abell <tim@timwise.co.uk>
Tobias Nygren <tnn@nygren.pp.se>
Tomas Cerveny <kozec@kozec.com>
Tully Robinson <tully@tojr.org>
Veeti Paananen <veeti.paananen@rojekti.fi>
Vil Brekin <vilbrekin@gmail.com>

View File

@@ -32,32 +32,109 @@ latest info on Transifex.
## Contributing Code
Every contribution is welcome. If you want to contribute but are unsure
where to start, any open issues are fair game! See the [Contribution
Guidelines](http://docs.syncthing.net/dev/contributing.html) for the full
story on committing code.
where to start, any open issues are fair game! Be prepared for a
[certain amount of review](https://github.com/syncthing/syncthing/wiki/FAQ#why-are-you-being-so-hard-on-my-pull-request);
it's all in the name of quality. :) Following the points below will make this
a smoother process.
## Contributing Documentation
Individuals making significant and valuable contributions are given
commit-access to the project. If you make a significant contribution and
are not considered for commit-access, please contact any of the
Syncthing core team members.
Updates to the [documentation site](http://docs.syncthing.net/) can be
made as pull requests on the [documentation
repository](https://github.com/syncthing/docs).
All nontrivial contributions should go through the pull request
mechanism for internal review. Determining what is "nontrivial" is left
at the discretion of the contributor.
### Authorship
All code authors are listed in the AUTHORS file. Commits must be made
with the same name and email as listed in the AUTHORS file. To
accomplish this, ensure that your git configuration is set correctly
prior to making your first commit;
$ git config --global user.name "Jane Doe"
$ git config --global user.email janedoe@example.com
You must be reachable on the given email address. If you do not wish to
use your real name for whatever reason, using a nickname or pseudonym is
perfectly acceptable.
### Core Team
The Syncthing core team currently consists of the following members;
- Jakob Borg (@calmh)
- Audrius Butkevicius (@AudriusButkevicius)
## Coding Style
- Follow the conventions laid out in [Effective Go](https://golang.org/doc/effective_go.html)
as much as makes sense.
- All text files use Unix line endings.
- Each commit should be `go fmt` clean.
- The commit message subject should be a single short sentence
describing the change, starting with a capital letter.
- Commits that resolve an existing issue must include the issue number
as `(fixes #123)` at the end of the commit message subject.
- Imports are grouped per `goimports` standard; that is, standard
library first, then third party libraries after a blank line.
- A contribution solving a single issue or introducing a single new
feature should probably be a single commit based on the current
`master` branch. You may be asked to "rebase" or "squash" your pull
request to make sure this is the case, especially if there have been
amendments during review.
## Licensing
All contributions are made available under the same license as the already
existing material being contributed to. For most of the project and unless
otherwise stated this means MPLv2, but there are exceptions:
All contributions are made under the same MPLv2 license as the rest of
the project, except documentation, user interface text and translation
strings which are licensed under the Creative Commons Attribution 4.0
International License. You retain the copyright to code you have
written.
- Certain commands (under cmd/...) may have a separate license, indicated by
the presence of a LICENSE file in the corresponding directory.
When accepting your first contribution, the maintainer of the project
will ensure that you are added to the AUTHORS file. You are welcome to
add yourself as a separate commit in your first pull request.
- The documentation (man/...) is licensed under the Creative Commons
Attribution 4.0 International License.
## Building
- Projects under vendor/... are copyright by and licensed from their
respective original authors. Contributions should be made to the original
project, not here.
[See the documentation](https://github.com/syncthing/syncthing/wiki/Building)
on how to get started with a build environment.
Regardless of the license in effect, you retain the copyright to your
contribution.
## Branches
- `master` is the main branch containing good code that will end up in
the next release. You should base your work on it. It won't ever be
rebased or force-pushed to.
- `vx.y` branches exist to make patch releases on otherwise obsolete
minor releases. Should only contain fixes cherry picked from master.
Don't base any work on them.
- Other branches are probably topic branches and may be subject to
rebasing. Don't base any work on them unless you specifically know
otherwise.
## Tags
All releases are tagged semver style as `vx.y.z`. Release tags are
signed by GPG key BCE524C7.
## Tests
Yes please!
## Documentation
[Over here!](https://github.com/syncthing/syncthing/wiki)
## License
MPLv2

73
Godeps/Godeps.json generated Normal file
View File

@@ -0,0 +1,73 @@
{
"ImportPath": "github.com/syncthing/syncthing",
"GoVersion": "go1.4",
"Packages": [
"./cmd/..."
],
"Deps": [
{
"ImportPath": "github.com/bkaradzic/go-lz4",
"Rev": "93a831dcee242be64a9cc9803dda84af25932de7"
},
{
"ImportPath": "github.com/calmh/logger",
"Rev": "f50d32b313bec2933a3e1049f7416a29f3413d29"
},
{
"ImportPath": "github.com/calmh/luhn",
"Rev": "0c8388ff95fa92d4094011e5a04fc99dea3d1632"
},
{
"ImportPath": "github.com/calmh/xdr",
"Rev": "ff948d7666c5e0fd18d398f6278881724d36a90b"
},
{
"ImportPath": "github.com/juju/ratelimit",
"Rev": "f9f36d11773655c0485207f0ad30dc2655f69d56"
},
{
"ImportPath": "github.com/kardianos/osext",
"Rev": "91292666f7e40f03185cdd1da7d85633c973eca7"
},
{
"ImportPath": "github.com/syncthing/protocol",
"Rev": "1a4398cc55c8fe82a964097eaf59f2475b020a49"
},
{
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
"Rev": "e3f32eb300aa1e514fe8ba58d008da90a062273d"
},
{
"ImportPath": "github.com/syndtr/gosnappy/snappy",
"Rev": "ce8acff4829e0c2458a67ead32390ac0a381c862"
},
{
"ImportPath": "github.com/vitrun/qart/coding",
"Rev": "ccb109cf25f0cd24474da73b9fee4e7a3e8a8ce0"
},
{
"ImportPath": "github.com/vitrun/qart/gf256",
"Rev": "ccb109cf25f0cd24474da73b9fee4e7a3e8a8ce0"
},
{
"ImportPath": "github.com/vitrun/qart/qr",
"Rev": "ccb109cf25f0cd24474da73b9fee4e7a3e8a8ce0"
},
{
"ImportPath": "golang.org/x/crypto/bcrypt",
"Rev": "4ed45ec682102c643324fae5dff8dab085b6c300"
},
{
"ImportPath": "golang.org/x/crypto/blowfish",
"Rev": "4ed45ec682102c643324fae5dff8dab085b6c300"
},
{
"ImportPath": "golang.org/x/text/transform",
"Rev": "c980adc4a823548817b9c47d38c6ca6b7d7d8b6a"
},
{
"ImportPath": "golang.org/x/text/unicode/norm",
"Rev": "c980adc4a823548817b9c47d38c6ca6b7d7d8b6a"
}
]
}

5
Godeps/Readme generated Normal file
View File

@@ -0,0 +1,5 @@
This directory tree is generated automatically by godep.
Please do not edit.
See https://github.com/tools/godep for more information.

2
Godeps/_workspace/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,2 @@
/pkg
/bin

View File

@@ -0,0 +1 @@
/lz4-example/lz4-example

View File

@@ -0,0 +1,7 @@
language: go
go:
- 1.1
- 1.2
- 1.3
- tip

View File

@@ -4,7 +4,7 @@ go-lz4
go-lz4 is port of LZ4 lossless compression algorithm to Go. The original C code
is located at:
https://github.com/Cyan4973/lz4
https://code.google.com/p/lz4/
Status
------

View File

@@ -141,7 +141,7 @@ func Decode(dst, src []byte) ([]byte, error) {
length += ln
}
if int(d.spos+length) > len(d.src) || int(d.dpos+length) > len(d.dst) {
if int(d.spos+length) > len(d.src) {
return nil, ErrCorrupt
}
@@ -179,12 +179,7 @@ func Decode(dst, src []byte) ([]byte, error) {
}
literal := d.dpos - d.ref
if literal < 4 {
if int(d.dpos+4) > len(d.dst) {
return nil, ErrCorrupt
}
d.cp(4, decr[literal])
} else {
length += 4

View File

@@ -25,10 +25,8 @@
package lz4
import (
"encoding/binary"
"errors"
)
import "encoding/binary"
import "errors"
const (
minMatch = 4

View File

@@ -0,0 +1,15 @@
logger
======
A small wrapper around `log` to provide log levels.
Documentation
-------------
http://godoc.org/github.com/calmh/logger
License
-------
MIT

160
Godeps/_workspace/src/github.com/calmh/logger/logger.go generated vendored Normal file
View File

@@ -0,0 +1,160 @@
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
// is governed by an MIT-style license that can be found in the LICENSE file.
// Package logger implements a standardized logger with callback functionality
package logger
import (
"fmt"
"log"
"os"
"strings"
"sync"
)
type LogLevel int
const (
LevelDebug LogLevel = iota
LevelInfo
LevelOK
LevelWarn
LevelFatal
NumLevels
)
// A MessageHandler is called with the log level and message text.
type MessageHandler func(l LogLevel, msg string)
type Logger struct {
logger *log.Logger
handlers [NumLevels][]MessageHandler
mut sync.Mutex
}
// The default logger logs to standard output with a time prefix.
var DefaultLogger = New()
func New() *Logger {
return &Logger{
logger: log.New(os.Stdout, "", log.Ltime),
}
}
// AddHandler registers a new MessageHandler to receive messages with the
// specified log level or above.
func (l *Logger) AddHandler(level LogLevel, h MessageHandler) {
l.mut.Lock()
defer l.mut.Unlock()
l.handlers[level] = append(l.handlers[level], h)
}
// See log.SetFlags
func (l *Logger) SetFlags(flag int) {
l.logger.SetFlags(flag)
}
// See log.SetPrefix
func (l *Logger) SetPrefix(prefix string) {
l.logger.SetPrefix(prefix)
}
func (l *Logger) callHandlers(level LogLevel, s string) {
for _, h := range l.handlers[level] {
h(level, strings.TrimSpace(s))
}
}
// Debugln logs a line with a DEBUG prefix.
func (l *Logger) Debugln(vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintln(vals...)
l.logger.Output(2, "DEBUG: "+s)
l.callHandlers(LevelDebug, s)
}
// Debugf logs a formatted line with a DEBUG prefix.
func (l *Logger) Debugf(format string, vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintf(format, vals...)
l.logger.Output(2, "DEBUG: "+s)
l.callHandlers(LevelDebug, s)
}
// Infoln logs a line with an INFO prefix.
func (l *Logger) Infoln(vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintln(vals...)
l.logger.Output(2, "INFO: "+s)
l.callHandlers(LevelInfo, s)
}
// Infof logs a formatted line with an INFO prefix.
func (l *Logger) Infof(format string, vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintf(format, vals...)
l.logger.Output(2, "INFO: "+s)
l.callHandlers(LevelInfo, s)
}
// Okln logs a line with an OK prefix.
func (l *Logger) Okln(vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintln(vals...)
l.logger.Output(2, "OK: "+s)
l.callHandlers(LevelOK, s)
}
// Okf logs a formatted line with an OK prefix.
func (l *Logger) Okf(format string, vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintf(format, vals...)
l.logger.Output(2, "OK: "+s)
l.callHandlers(LevelOK, s)
}
// Warnln logs a formatted line with a WARNING prefix.
func (l *Logger) Warnln(vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintln(vals...)
l.logger.Output(2, "WARNING: "+s)
l.callHandlers(LevelWarn, s)
}
// Warnf logs a formatted line with a WARNING prefix.
func (l *Logger) Warnf(format string, vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintf(format, vals...)
l.logger.Output(2, "WARNING: "+s)
l.callHandlers(LevelWarn, s)
}
// Fatalln logs a line with a FATAL prefix and exits the process with exit
// code 1.
func (l *Logger) Fatalln(vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintln(vals...)
l.logger.Output(2, "FATAL: "+s)
l.callHandlers(LevelFatal, s)
os.Exit(1)
}
// Fatalf logs a formatted line with a FATAL prefix and exits the process with
// exit code 1.
func (l *Logger) Fatalf(format string, vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintf(format, vals...)
l.logger.Output(2, "FATAL: "+s)
l.callHandlers(LevelFatal, s)
os.Exit(1)
}

View File

@@ -0,0 +1,58 @@
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
// is governed by an MIT-style license that can be found in the LICENSE file.
package logger
import (
"strings"
"testing"
)
func TestAPI(t *testing.T) {
l := New()
l.SetFlags(0)
l.SetPrefix("testing")
debug := 0
l.AddHandler(LevelDebug, checkFunc(t, LevelDebug, "test 0", &debug))
info := 0
l.AddHandler(LevelInfo, checkFunc(t, LevelInfo, "test 1", &info))
warn := 0
l.AddHandler(LevelWarn, checkFunc(t, LevelWarn, "test 2", &warn))
ok := 0
l.AddHandler(LevelOK, checkFunc(t, LevelOK, "test 3", &ok))
l.Debugf("test %d", 0)
l.Debugln("test", 0)
l.Infof("test %d", 1)
l.Infoln("test", 1)
l.Warnf("test %d", 2)
l.Warnln("test", 2)
l.Okf("test %d", 3)
l.Okln("test", 3)
if debug != 2 {
t.Errorf("Debug handler called %d != 2 times", debug)
}
if info != 2 {
t.Errorf("Info handler called %d != 2 times", info)
}
if warn != 2 {
t.Errorf("Warn handler called %d != 2 times", warn)
}
if ok != 2 {
t.Errorf("Ok handler called %d != 2 times", ok)
}
}
func checkFunc(t *testing.T, expectl LogLevel, expectmsg string, counter *int) func(LogLevel, string) {
return func(l LogLevel, msg string) {
*counter++
if l != expectl {
t.Errorf("Incorrect message level %d != %d", l, expectl)
}
if !strings.HasSuffix(msg, expectmsg) {
t.Errorf("%q does not end with %q", msg, expectmsg)
}
}
}

View File

@@ -0,0 +1 @@
coverage.out

19
Godeps/_workspace/src/github.com/calmh/xdr/.travis.yml generated vendored Normal file
View File

@@ -0,0 +1,19 @@
language: go
go:
- tip
install:
- export PATH=$PATH:$HOME/gopath/bin
- go get code.google.com/p/go.tools/cmd/cover
- go get github.com/mattn/goveralls
script:
- ./generate.sh
- go test -coverprofile=coverage.out
after_success:
- goveralls -coverprofile=coverage.out -service=travis-ci -package=calmh/xdr -repotoken="$COVERALLS_TOKEN"
env:
global:
secure: SmgnrGfp2zLrA44ChRMpjPeujubt9veZ8Fx/OseMWECmacyV5N/TuDhzIbwo6QwV4xB0sBacoPzvxQbJRVjNKsPiSu72UbcQmQ7flN4Tf7nW09tSh1iW8NgrpBCq/3UYLoBu2iPBEBKm93IK0aGNAKs6oEkB0fU27iTVBwiTXOY=

View File

@@ -1,10 +1,12 @@
xdr
===
[![Build Status](https://img.shields.io/circleci/project/calmh/xdr.svg?style=flat-square)](https://circleci.com/gh/calmh/xdr)
[![Build Status](https://img.shields.io/travis/calmh/xdr.svg?style=flat)](https://travis-ci.org/calmh/xdr)
[![Coverage Status](https://img.shields.io/coveralls/calmh/xdr.svg?style=flat)](https://coveralls.io/r/calmh/xdr?branch=master)
[![API Documentation](http://img.shields.io/badge/api-Godoc-blue.svg?style=flat)](http://godoc.org/github.com/calmh/xdr)
[![MIT License](http://img.shields.io/badge/license-MIT-blue.svg?style=flat)](http://opensource.org/licenses/MIT)
This is an XDR marshalling/unmarshalling library. It uses code generation and
not reflection.
This is an XDR encoding/decoding library. It uses code generation and
not reflection. It supports the IPDR bastardized XDR format when built
with `-tags ipdr`.

View File

@@ -0,0 +1,117 @@
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
// is governed by an MIT-style license that can be found in the LICENSE file.
package xdr_test
import (
"io"
"io/ioutil"
"testing"
"github.com/calmh/xdr"
)
type XDRBenchStruct struct {
I1 uint64
I2 uint32
I3 uint16
I4 uint8
Bs0 []byte // max:128
Bs1 []byte
S0 string // max:128
S1 string
}
var res []byte // no to be optimized away
var s = XDRBenchStruct{
I1: 42,
I2: 43,
I3: 44,
I4: 45,
Bs0: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18},
Bs1: []byte{11, 12, 13, 14, 15, 16, 17, 18, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
S0: "Hello World! String one.",
S1: "Hello World! String two.",
}
var e []byte
func init() {
e, _ = s.MarshalXDR()
}
func BenchmarkThisMarshal(b *testing.B) {
for i := 0; i < b.N; i++ {
res, _ = s.MarshalXDR()
}
}
func BenchmarkThisUnmarshal(b *testing.B) {
var t XDRBenchStruct
for i := 0; i < b.N; i++ {
err := t.UnmarshalXDR(e)
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkThisEncode(b *testing.B) {
for i := 0; i < b.N; i++ {
_, err := s.EncodeXDR(ioutil.Discard)
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkThisEncoder(b *testing.B) {
w := xdr.NewWriter(ioutil.Discard)
for i := 0; i < b.N; i++ {
_, err := s.encodeXDR(w)
if err != nil {
b.Fatal(err)
}
}
}
type repeatReader struct {
data []byte
}
func (r *repeatReader) Read(bs []byte) (n int, err error) {
if len(bs) > len(r.data) {
err = io.EOF
}
n = copy(bs, r.data)
r.data = r.data[n:]
return n, err
}
func (r *repeatReader) Reset(bs []byte) {
r.data = bs
}
func BenchmarkThisDecode(b *testing.B) {
rr := &repeatReader{e}
var t XDRBenchStruct
for i := 0; i < b.N; i++ {
err := t.DecodeXDR(rr)
if err != nil {
b.Fatal(err)
}
rr.Reset(e)
}
}
func BenchmarkThisDecoder(b *testing.B) {
rr := &repeatReader{e}
r := xdr.NewReader(rr)
var t XDRBenchStruct
for i := 0; i < b.N; i++ {
err := t.decodeXDR(r)
if err != nil {
b.Fatal(err)
}
rr.Reset(e)
}
}

View File

@@ -0,0 +1,199 @@
// ************************************************************
// This file is automatically generated by genxdr. Do not edit.
// ************************************************************
package xdr_test
import (
"bytes"
"io"
"github.com/calmh/xdr"
)
/*
XDRBenchStruct Structure:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
+ I1 (64 bits) +
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| I2 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| 0x0000 | I3 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| uint8 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Length of Bs0 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ Bs0 (variable length) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Length of Bs1 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ Bs1 (variable length) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Length of S0 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ S0 (variable length) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Length of S1 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ S1 (variable length) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
struct XDRBenchStruct {
unsigned hyper I1;
unsigned int I2;
unsigned int I3;
uint8 I4;
opaque Bs0<128>;
opaque Bs1<>;
string S0<128>;
string S1<>;
}
*/
func (o XDRBenchStruct) EncodeXDR(w io.Writer) (int, error) {
var xw = xdr.NewWriter(w)
return o.encodeXDR(xw)
}
func (o XDRBenchStruct) MarshalXDR() ([]byte, error) {
return o.AppendXDR(make([]byte, 0, 128))
}
func (o XDRBenchStruct) MustMarshalXDR() []byte {
bs, err := o.MarshalXDR()
if err != nil {
panic(err)
}
return bs
}
func (o XDRBenchStruct) AppendXDR(bs []byte) ([]byte, error) {
var aw = xdr.AppendWriter(bs)
var xw = xdr.NewWriter(&aw)
_, err := o.encodeXDR(xw)
return []byte(aw), err
}
func (o XDRBenchStruct) encodeXDR(xw *xdr.Writer) (int, error) {
xw.WriteUint64(o.I1)
xw.WriteUint32(o.I2)
xw.WriteUint16(o.I3)
xw.WriteUint8(o.I4)
if l := len(o.Bs0); l > 128 {
return xw.Tot(), xdr.ElementSizeExceeded("Bs0", l, 128)
}
xw.WriteBytes(o.Bs0)
xw.WriteBytes(o.Bs1)
if l := len(o.S0); l > 128 {
return xw.Tot(), xdr.ElementSizeExceeded("S0", l, 128)
}
xw.WriteString(o.S0)
xw.WriteString(o.S1)
return xw.Tot(), xw.Error()
}
func (o *XDRBenchStruct) DecodeXDR(r io.Reader) error {
xr := xdr.NewReader(r)
return o.decodeXDR(xr)
}
func (o *XDRBenchStruct) UnmarshalXDR(bs []byte) error {
var br = bytes.NewReader(bs)
var xr = xdr.NewReader(br)
return o.decodeXDR(xr)
}
func (o *XDRBenchStruct) decodeXDR(xr *xdr.Reader) error {
o.I1 = xr.ReadUint64()
o.I2 = xr.ReadUint32()
o.I3 = xr.ReadUint16()
o.I4 = xr.ReadUint8()
o.Bs0 = xr.ReadBytesMax(128)
o.Bs1 = xr.ReadBytes()
o.S0 = xr.ReadStringMax(128)
o.S1 = xr.ReadString()
return xr.Error()
}
/*
repeatReader Structure:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Length of data |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ data (variable length) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
struct repeatReader {
opaque data<>;
}
*/
func (o repeatReader) EncodeXDR(w io.Writer) (int, error) {
var xw = xdr.NewWriter(w)
return o.encodeXDR(xw)
}
func (o repeatReader) MarshalXDR() ([]byte, error) {
return o.AppendXDR(make([]byte, 0, 128))
}
func (o repeatReader) MustMarshalXDR() []byte {
bs, err := o.MarshalXDR()
if err != nil {
panic(err)
}
return bs
}
func (o repeatReader) AppendXDR(bs []byte) ([]byte, error) {
var aw = xdr.AppendWriter(bs)
var xw = xdr.NewWriter(&aw)
_, err := o.encodeXDR(xw)
return []byte(aw), err
}
func (o repeatReader) encodeXDR(xw *xdr.Writer) (int, error) {
xw.WriteBytes(o.data)
return xw.Tot(), xw.Error()
}
func (o *repeatReader) DecodeXDR(r io.Reader) error {
xr := xdr.NewReader(r)
return o.decodeXDR(xr)
}
func (o *repeatReader) UnmarshalXDR(bs []byte) error {
var br = bytes.NewReader(bs)
var xr = xdr.NewReader(br)
return o.decodeXDR(xr)
}
func (o *repeatReader) decodeXDR(xr *xdr.Reader) error {
o.data = xr.ReadBytes()
return xr.Error()
}

View File

@@ -0,0 +1,456 @@
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
// is governed by an MIT-style license that can be found in the LICENSE file.
package main
import (
"bytes"
"flag"
"fmt"
"go/ast"
"go/format"
"go/parser"
"go/token"
"io"
"log"
"os"
"regexp"
"strconv"
"strings"
"text/template"
)
type fieldInfo struct {
Name string
IsBasic bool // handled by one the native Read/WriteUint64 etc functions
IsSlice bool // field is a slice of FieldType
FieldType string // original type of field, i.e. "int"
Encoder string // the encoder name, i.e. "Uint64" for Read/WriteUint64
Convert string // what to convert to when encoding, i.e. "uint64"
Max int // max size for slices and strings
}
type structInfo struct {
Name string
Fields []fieldInfo
}
var headerTpl = template.Must(template.New("header").Parse(`// ************************************************************
// This file is automatically generated by genxdr. Do not edit.
// ************************************************************
package {{.Package}}
import (
"bytes"
"io"
"github.com/calmh/xdr"
)
`))
var encodeTpl = template.Must(template.New("encoder").Parse(`
func (o {{.TypeName}}) EncodeXDR(w io.Writer) (int, error) {
var xw = xdr.NewWriter(w)
return o.encodeXDR(xw)
}//+n
func (o {{.TypeName}}) MarshalXDR() ([]byte, error) {
return o.AppendXDR(make([]byte, 0, 128))
}//+n
func (o {{.TypeName}}) MustMarshalXDR() []byte {
bs, err := o.MarshalXDR()
if err != nil {
panic(err)
}
return bs
}//+n
func (o {{.TypeName}}) AppendXDR(bs []byte) ([]byte, error) {
var aw = xdr.AppendWriter(bs)
var xw = xdr.NewWriter(&aw)
_, err := o.encodeXDR(xw)
return []byte(aw), err
}//+n
func (o {{.TypeName}}) encodeXDR(xw *xdr.Writer) (int, error) {
{{range $fieldInfo := .Fields}}
{{if not $fieldInfo.IsSlice}}
{{if ne $fieldInfo.Convert ""}}
xw.Write{{$fieldInfo.Encoder}}({{$fieldInfo.Convert}}(o.{{$fieldInfo.Name}}))
{{else if $fieldInfo.IsBasic}}
{{if ge $fieldInfo.Max 1}}
if l := len(o.{{$fieldInfo.Name}}); l > {{$fieldInfo.Max}} {
return xw.Tot(), xdr.ElementSizeExceeded("{{$fieldInfo.Name}}", l, {{$fieldInfo.Max}})
}
{{end}}
xw.Write{{$fieldInfo.Encoder}}(o.{{$fieldInfo.Name}})
{{else}}
_, err := o.{{$fieldInfo.Name}}.encodeXDR(xw)
if err != nil {
return xw.Tot(), err
}
{{end}}
{{else}}
{{if ge $fieldInfo.Max 1}}
if l := len(o.{{$fieldInfo.Name}}); l > {{$fieldInfo.Max}} {
return xw.Tot(), xdr.ElementSizeExceeded("{{$fieldInfo.Name}}", l, {{$fieldInfo.Max}})
}
{{end}}
xw.WriteUint32(uint32(len(o.{{$fieldInfo.Name}})))
for i := range o.{{$fieldInfo.Name}} {
{{if ne $fieldInfo.Convert ""}}
xw.Write{{$fieldInfo.Encoder}}({{$fieldInfo.Convert}}(o.{{$fieldInfo.Name}}[i]))
{{else if $fieldInfo.IsBasic}}
xw.Write{{$fieldInfo.Encoder}}(o.{{$fieldInfo.Name}}[i])
{{else}}
_, err := o.{{$fieldInfo.Name}}[i].encodeXDR(xw)
if err != nil {
return xw.Tot(), err
}
{{end}}
}
{{end}}
{{end}}
return xw.Tot(), xw.Error()
}//+n
func (o *{{.TypeName}}) DecodeXDR(r io.Reader) error {
xr := xdr.NewReader(r)
return o.decodeXDR(xr)
}//+n
func (o *{{.TypeName}}) UnmarshalXDR(bs []byte) error {
var br = bytes.NewReader(bs)
var xr = xdr.NewReader(br)
return o.decodeXDR(xr)
}//+n
func (o *{{.TypeName}}) decodeXDR(xr *xdr.Reader) error {
{{range $fieldInfo := .Fields}}
{{if not $fieldInfo.IsSlice}}
{{if ne $fieldInfo.Convert ""}}
o.{{$fieldInfo.Name}} = {{$fieldInfo.FieldType}}(xr.Read{{$fieldInfo.Encoder}}())
{{else if $fieldInfo.IsBasic}}
{{if ge $fieldInfo.Max 1}}
o.{{$fieldInfo.Name}} = xr.Read{{$fieldInfo.Encoder}}Max({{$fieldInfo.Max}})
{{else}}
o.{{$fieldInfo.Name}} = xr.Read{{$fieldInfo.Encoder}}()
{{end}}
{{else}}
(&o.{{$fieldInfo.Name}}).decodeXDR(xr)
{{end}}
{{else}}
_{{$fieldInfo.Name}}Size := int(xr.ReadUint32())
{{if ge $fieldInfo.Max 1}}
if _{{$fieldInfo.Name}}Size > {{$fieldInfo.Max}} {
return xdr.ElementSizeExceeded("{{$fieldInfo.Name}}", _{{$fieldInfo.Name}}Size, {{$fieldInfo.Max}})
}
{{end}}
o.{{$fieldInfo.Name}} = make([]{{$fieldInfo.FieldType}}, _{{$fieldInfo.Name}}Size)
for i := range o.{{$fieldInfo.Name}} {
{{if ne $fieldInfo.Convert ""}}
o.{{$fieldInfo.Name}}[i] = {{$fieldInfo.FieldType}}(xr.Read{{$fieldInfo.Encoder}}())
{{else if $fieldInfo.IsBasic}}
o.{{$fieldInfo.Name}}[i] = xr.Read{{$fieldInfo.Encoder}}()
{{else}}
(&o.{{$fieldInfo.Name}}[i]).decodeXDR(xr)
{{end}}
}
{{end}}
{{end}}
return xr.Error()
}`))
var maxRe = regexp.MustCompile(`\Wmax:(\d+)`)
type typeSet struct {
Type string
Encoder string
}
var xdrEncoders = map[string]typeSet{
"int8": typeSet{"uint8", "Uint8"},
"uint8": typeSet{"", "Uint8"},
"int16": typeSet{"uint16", "Uint16"},
"uint16": typeSet{"", "Uint16"},
"int32": typeSet{"uint32", "Uint32"},
"uint32": typeSet{"", "Uint32"},
"int64": typeSet{"uint64", "Uint64"},
"uint64": typeSet{"", "Uint64"},
"int": typeSet{"uint64", "Uint64"},
"string": typeSet{"", "String"},
"[]byte": typeSet{"", "Bytes"},
"bool": typeSet{"", "Bool"},
}
func handleStruct(t *ast.StructType) []fieldInfo {
var fs []fieldInfo
for _, sf := range t.Fields.List {
if len(sf.Names) == 0 {
// We don't handle anonymous fields
continue
}
fn := sf.Names[0].Name
var max = 0
if sf.Comment != nil {
c := sf.Comment.List[0].Text
if m := maxRe.FindStringSubmatch(c); m != nil {
max, _ = strconv.Atoi(m[1])
}
if strings.Contains(c, "noencode") {
continue
}
}
var f fieldInfo
switch ft := sf.Type.(type) {
case *ast.Ident:
tn := ft.Name
if enc, ok := xdrEncoders[tn]; ok {
f = fieldInfo{
Name: fn,
IsBasic: true,
FieldType: tn,
Encoder: enc.Encoder,
Convert: enc.Type,
Max: max,
}
} else {
f = fieldInfo{
Name: fn,
IsBasic: false,
FieldType: tn,
Max: max,
}
}
case *ast.ArrayType:
if ft.Len != nil {
// We don't handle arrays
continue
}
tn := ft.Elt.(*ast.Ident).Name
if enc, ok := xdrEncoders["[]"+tn]; ok {
f = fieldInfo{
Name: fn,
IsBasic: true,
FieldType: tn,
Encoder: enc.Encoder,
Convert: enc.Type,
Max: max,
}
} else if enc, ok := xdrEncoders[tn]; ok {
f = fieldInfo{
Name: fn,
IsBasic: true,
IsSlice: true,
FieldType: tn,
Encoder: enc.Encoder,
Convert: enc.Type,
Max: max,
}
} else {
f = fieldInfo{
Name: fn,
IsBasic: false,
IsSlice: true,
FieldType: tn,
Max: max,
}
}
}
fs = append(fs, f)
}
return fs
}
func generateCode(output io.Writer, s structInfo) {
name := s.Name
fs := s.Fields
var buf bytes.Buffer
err := encodeTpl.Execute(&buf, map[string]interface{}{"TypeName": name, "Fields": fs})
if err != nil {
panic(err)
}
bs := regexp.MustCompile(`(\s*\n)+`).ReplaceAll(buf.Bytes(), []byte("\n"))
bs = bytes.Replace(bs, []byte("//+n"), []byte("\n"), -1)
bs, err = format.Source(bs)
if err != nil {
panic(err)
}
fmt.Fprintln(output, string(bs))
}
func uncamelize(s string) string {
return regexp.MustCompile("[a-z][A-Z]").ReplaceAllStringFunc(s, func(camel string) string {
return camel[:1] + " " + camel[1:]
})
}
func generateDiagram(output io.Writer, s structInfo) {
sn := s.Name
fs := s.Fields
fmt.Fprintln(output, sn+" Structure:")
fmt.Fprintln(output)
fmt.Fprintln(output, " 0 1 2 3")
fmt.Fprintln(output, " 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1")
line := "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+"
fmt.Fprintln(output, line)
for _, f := range fs {
tn := f.FieldType
sl := f.IsSlice
name := uncamelize(f.Name)
if sl {
fmt.Fprintf(output, "| %s |\n", center("Number of "+name, 61))
fmt.Fprintln(output, line)
}
switch tn {
case "bool":
fmt.Fprintf(output, "| %s |V|\n", center(name+" (V=0 or 1)", 59))
fmt.Fprintln(output, line)
case "int16", "uint16":
fmt.Fprintf(output, "| %s | %s |\n", center("0x0000", 29), center(name, 29))
fmt.Fprintln(output, line)
case "int32", "uint32":
fmt.Fprintf(output, "| %s |\n", center(name, 61))
fmt.Fprintln(output, line)
case "int64", "uint64":
fmt.Fprintf(output, "| %-61s |\n", "")
fmt.Fprintf(output, "+ %s +\n", center(name+" (64 bits)", 61))
fmt.Fprintf(output, "| %-61s |\n", "")
fmt.Fprintln(output, line)
case "string", "byte": // XXX We assume slice of byte!
fmt.Fprintf(output, "| %s |\n", center("Length of "+name, 61))
fmt.Fprintln(output, line)
fmt.Fprintf(output, "/ %61s /\n", "")
fmt.Fprintf(output, "\\ %s \\\n", center(name+" (variable length)", 61))
fmt.Fprintf(output, "/ %61s /\n", "")
fmt.Fprintln(output, line)
default:
if sl {
tn = "Zero or more " + tn + " Structures"
fmt.Fprintf(output, "/ %s /\n", center("", 61))
fmt.Fprintf(output, "\\ %s \\\n", center(tn, 61))
fmt.Fprintf(output, "/ %s /\n", center("", 61))
} else {
fmt.Fprintf(output, "| %s |\n", center(tn, 61))
}
fmt.Fprintln(output, line)
}
}
fmt.Fprintln(output)
fmt.Fprintln(output)
}
func generateXdr(output io.Writer, s structInfo) {
sn := s.Name
fs := s.Fields
fmt.Fprintf(output, "struct %s {\n", sn)
for _, f := range fs {
tn := f.FieldType
fn := f.Name
suf := ""
l := ""
if f.Max > 0 {
l = strconv.Itoa(f.Max)
}
if f.IsSlice {
suf = "<" + l + ">"
}
switch tn {
case "int16", "int32":
fmt.Fprintf(output, "\tint %s%s;\n", fn, suf)
case "uint16", "uint32":
fmt.Fprintf(output, "\tunsigned int %s%s;\n", fn, suf)
case "int64":
fmt.Fprintf(output, "\thyper %s%s;\n", fn, suf)
case "uint64":
fmt.Fprintf(output, "\tunsigned hyper %s%s;\n", fn, suf)
case "string":
fmt.Fprintf(output, "\tstring %s<%s>;\n", fn, l)
case "byte":
fmt.Fprintf(output, "\topaque %s<%s>;\n", fn, l)
default:
fmt.Fprintf(output, "\t%s %s%s;\n", tn, fn, suf)
}
}
fmt.Fprintln(output, "}")
fmt.Fprintln(output)
}
func center(s string, w int) string {
w -= len(s)
l := w / 2
r := l
if l+r < w {
r++
}
return strings.Repeat(" ", l) + s + strings.Repeat(" ", r)
}
func inspector(structs *[]structInfo) func(ast.Node) bool {
return func(n ast.Node) bool {
switch n := n.(type) {
case *ast.TypeSpec:
switch t := n.Type.(type) {
case *ast.StructType:
name := n.Name.Name
fs := handleStruct(t)
*structs = append(*structs, structInfo{name, fs})
}
return false
default:
return true
}
}
}
func main() {
outputFile := flag.String("o", "", "Output file, blank for stdout")
flag.Parse()
fname := flag.Arg(0)
fset := token.NewFileSet()
f, err := parser.ParseFile(fset, fname, nil, parser.ParseComments)
if err != nil {
log.Fatal(err)
}
var structs []structInfo
i := inspector(&structs)
ast.Inspect(f, i)
var output io.Writer = os.Stdout
if *outputFile != "" {
fd, err := os.Create(*outputFile)
if err != nil {
log.Fatal(err)
}
output = fd
}
headerTpl.Execute(output, map[string]string{"Package": f.Name.Name})
for _, s := range structs {
fmt.Fprintf(output, "\n/*\n\n")
generateDiagram(output, s)
generateXdr(output, s)
fmt.Fprintf(output, "*/\n")
generateCode(output, s)
}
}

16
Godeps/_workspace/src/github.com/calmh/xdr/debug.go generated vendored Normal file
View File

@@ -0,0 +1,16 @@
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
// is governed by an MIT-style license that can be found in the LICENSE file.
package xdr
import (
"log"
"os"
)
var (
debug = len(os.Getenv("XDRTRACE")) > 0
dl = log.New(os.Stdout, "xdr: ", log.Lshortfile|log.Ltime|log.Lmicroseconds)
)
const maxDebugBytes = 32

View File

@@ -1,5 +1,5 @@
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
// is governed by an MIT-style license that can be found in the LICENSE file.
// Package xdr implements an XDR (RFC 4506) marshaller/unmarshaller.
// Package xdr implements an XDR (RFC 4506) encoder/decoder.
package xdr

View File

@@ -0,0 +1,79 @@
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
// is governed by an MIT-style license that can be found in the LICENSE file.
package xdr_test
import (
"bytes"
"math/rand"
"reflect"
"testing"
"testing/quick"
"github.com/calmh/xdr"
)
// Contains all supported types
type TestStruct struct {
I int
I8 int8
UI8 uint8
I16 int16
UI16 uint16
I32 int32
UI32 uint32
I64 int64
UI64 uint64
BS []byte // max:1024
S string // max:1024
C Opaque
SS []string // max:1024
}
type Opaque [32]byte
func (u *Opaque) encodeXDR(w *xdr.Writer) (int, error) {
return w.WriteRaw(u[:])
}
func (u *Opaque) decodeXDR(r *xdr.Reader) (int, error) {
return r.ReadRaw(u[:])
}
func (Opaque) Generate(rand *rand.Rand, size int) reflect.Value {
var u Opaque
for i := range u[:] {
u[i] = byte(rand.Int())
}
return reflect.ValueOf(u)
}
func TestEncDec(t *testing.T) {
fn := func(t0 TestStruct) bool {
bs, err := t0.MarshalXDR()
if err != nil {
t.Fatal(err)
}
var t1 TestStruct
err = t1.UnmarshalXDR(bs)
if err != nil {
t.Fatal(err)
}
// Not comparing with DeepEqual since we'll unmarshal nil slices as empty
if t0.I != t1.I ||
t0.I16 != t1.I16 || t0.UI16 != t1.UI16 ||
t0.I32 != t1.I32 || t0.UI32 != t1.UI32 ||
t0.I64 != t1.I64 || t0.UI64 != t1.UI64 ||
bytes.Compare(t0.BS, t1.BS) != 0 ||
t0.S != t1.S || t0.C != t1.C {
t.Logf("%#v", t0)
t.Logf("%#v", t1)
return false
}
return true
}
if err := quick.Check(fn, nil); err != nil {
t.Error(err)
}
}

View File

@@ -0,0 +1,174 @@
// ************************************************************
// This file is automatically generated by genxdr. Do not edit.
// ************************************************************
package xdr_test
import (
"bytes"
"io"
"github.com/calmh/xdr"
)
/*
TestStruct Structure:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| int |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| int8 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| uint8 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| int16 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| 0x0000 | UI16 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| int32 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| UI32 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
+ I64 (64 bits) +
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
+ UI64 (64 bits) +
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Length of BS |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ BS (variable length) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Length of S |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ S (variable length) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Opaque |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Number of SS |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Length of SS |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ SS (variable length) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
struct TestStruct {
int I;
int8 I8;
uint8 UI8;
int16 I16;
unsigned int UI16;
int32 I32;
unsigned int UI32;
hyper I64;
unsigned hyper UI64;
opaque BS<1024>;
string S<1024>;
Opaque C;
string SS<1024>;
}
*/
func (o TestStruct) EncodeXDR(w io.Writer) (int, error) {
var xw = xdr.NewWriter(w)
return o.encodeXDR(xw)
}
func (o TestStruct) MarshalXDR() ([]byte, error) {
return o.AppendXDR(make([]byte, 0, 128))
}
func (o TestStruct) MustMarshalXDR() []byte {
bs, err := o.MarshalXDR()
if err != nil {
panic(err)
}
return bs
}
func (o TestStruct) AppendXDR(bs []byte) ([]byte, error) {
var aw = xdr.AppendWriter(bs)
var xw = xdr.NewWriter(&aw)
_, err := o.encodeXDR(xw)
return []byte(aw), err
}
func (o TestStruct) encodeXDR(xw *xdr.Writer) (int, error) {
xw.WriteUint64(uint64(o.I))
xw.WriteUint8(uint8(o.I8))
xw.WriteUint8(o.UI8)
xw.WriteUint16(uint16(o.I16))
xw.WriteUint16(o.UI16)
xw.WriteUint32(uint32(o.I32))
xw.WriteUint32(o.UI32)
xw.WriteUint64(uint64(o.I64))
xw.WriteUint64(o.UI64)
if l := len(o.BS); l > 1024 {
return xw.Tot(), xdr.ElementSizeExceeded("BS", l, 1024)
}
xw.WriteBytes(o.BS)
if l := len(o.S); l > 1024 {
return xw.Tot(), xdr.ElementSizeExceeded("S", l, 1024)
}
xw.WriteString(o.S)
_, err := o.C.encodeXDR(xw)
if err != nil {
return xw.Tot(), err
}
if l := len(o.SS); l > 1024 {
return xw.Tot(), xdr.ElementSizeExceeded("SS", l, 1024)
}
xw.WriteUint32(uint32(len(o.SS)))
for i := range o.SS {
xw.WriteString(o.SS[i])
}
return xw.Tot(), xw.Error()
}
func (o *TestStruct) DecodeXDR(r io.Reader) error {
xr := xdr.NewReader(r)
return o.decodeXDR(xr)
}
func (o *TestStruct) UnmarshalXDR(bs []byte) error {
var br = bytes.NewReader(bs)
var xr = xdr.NewReader(br)
return o.decodeXDR(xr)
}
func (o *TestStruct) decodeXDR(xr *xdr.Reader) error {
o.I = int(xr.ReadUint64())
o.I8 = int8(xr.ReadUint8())
o.UI8 = xr.ReadUint8()
o.I16 = int16(xr.ReadUint16())
o.UI16 = xr.ReadUint16()
o.I32 = int32(xr.ReadUint32())
o.UI32 = xr.ReadUint32()
o.I64 = int64(xr.ReadUint64())
o.UI64 = xr.ReadUint64()
o.BS = xr.ReadBytesMax(1024)
o.S = xr.ReadStringMax(1024)
(&o.C).decodeXDR(xr)
_SSSize := int(xr.ReadUint32())
if _SSSize > 1024 {
return xdr.ElementSizeExceeded("SS", _SSSize, 1024)
}
o.SS = make([]string, _SSSize)
for i := range o.SS {
o.SS[i] = xr.ReadString()
}
return xr.Error()
}

10
Godeps/_workspace/src/github.com/calmh/xdr/pad_ipdr.go generated vendored Normal file
View File

@@ -0,0 +1,10 @@
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
// is governed by an MIT-style license that can be found in the LICENSE file.
// +build ipdr
package xdr
func pad(l int) int {
return 0
}

14
Godeps/_workspace/src/github.com/calmh/xdr/pad_xdr.go generated vendored Normal file
View File

@@ -0,0 +1,14 @@
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
// is governed by an MIT-style license that can be found in the LICENSE file.
// +build !ipdr
package xdr
func pad(l int) int {
d := l % 4
if d == 0 {
return 0
}
return 4 - d
}

170
Godeps/_workspace/src/github.com/calmh/xdr/reader.go generated vendored Normal file
View File

@@ -0,0 +1,170 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package xdr
import (
"fmt"
"io"
"reflect"
"unsafe"
)
type Reader struct {
r io.Reader
err error
b [8]byte
}
func NewReader(r io.Reader) *Reader {
return &Reader{
r: r,
}
}
func (r *Reader) ReadRaw(bs []byte) (int, error) {
if r.err != nil {
return 0, r.err
}
var n int
n, r.err = io.ReadFull(r.r, bs)
return n, r.err
}
func (r *Reader) ReadString() string {
return r.ReadStringMax(0)
}
func (r *Reader) ReadStringMax(max int) string {
buf := r.ReadBytesMaxInto(max, nil)
bh := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
sh := reflect.StringHeader{
Data: bh.Data,
Len: bh.Len,
}
return *((*string)(unsafe.Pointer(&sh)))
}
func (r *Reader) ReadBytes() []byte {
return r.ReadBytesInto(nil)
}
func (r *Reader) ReadBytesMax(max int) []byte {
return r.ReadBytesMaxInto(max, nil)
}
func (r *Reader) ReadBytesInto(dst []byte) []byte {
return r.ReadBytesMaxInto(0, dst)
}
func (r *Reader) ReadBytesMaxInto(max int, dst []byte) []byte {
if r.err != nil {
return nil
}
l := int(r.ReadUint32())
if r.err != nil {
return nil
}
if max > 0 && l > max {
r.err = ElementSizeExceeded("bytes field", l, max)
return nil
}
if fullLen := l + pad(l); fullLen > len(dst) {
dst = make([]byte, fullLen)
} else {
dst = dst[:fullLen]
}
var n int
n, r.err = io.ReadFull(r.r, dst)
if r.err != nil {
if debug {
dl.Printf("rd bytes (%d): %v", len(dst), r.err)
}
return nil
}
if debug {
if n > maxDebugBytes {
dl.Printf("rd bytes (%d): %x...", len(dst), dst[:maxDebugBytes])
} else {
dl.Printf("rd bytes (%d): %x", len(dst), dst)
}
}
return dst[:l]
}
func (r *Reader) ReadBool() bool {
return r.ReadUint8() != 0
}
func (r *Reader) ReadUint32() uint32 {
if r.err != nil {
return 0
}
_, r.err = io.ReadFull(r.r, r.b[:4])
if r.err != nil {
if debug {
dl.Printf("rd uint32: %v", r.err)
}
return 0
}
v := uint32(r.b[3]) | uint32(r.b[2])<<8 | uint32(r.b[1])<<16 | uint32(r.b[0])<<24
if debug {
dl.Printf("rd uint32=%d (0x%08x)", v, v)
}
return v
}
func (r *Reader) ReadUint64() uint64 {
if r.err != nil {
return 0
}
_, r.err = io.ReadFull(r.r, r.b[:8])
if r.err != nil {
if debug {
dl.Printf("rd uint64: %v", r.err)
}
return 0
}
v := uint64(r.b[7]) | uint64(r.b[6])<<8 | uint64(r.b[5])<<16 | uint64(r.b[4])<<24 |
uint64(r.b[3])<<32 | uint64(r.b[2])<<40 | uint64(r.b[1])<<48 | uint64(r.b[0])<<56
if debug {
dl.Printf("rd uint64=%d (0x%016x)", v, v)
}
return v
}
type XDRError struct {
op string
err error
}
func (e XDRError) Error() string {
return "xdr " + e.op + ": " + e.err.Error()
}
func (e XDRError) IsEOF() bool {
return e.err == io.EOF
}
func (r *Reader) Error() error {
if r.err == nil {
return nil
}
return XDRError{"read", r.err}
}
func ElementSizeExceeded(field string, size, limit int) error {
return fmt.Errorf("%s exceeds size limit; %d > %d", field, size, limit)
}

View File

@@ -0,0 +1,49 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build ipdr
package xdr
import "io"
func (r *Reader) ReadUint8() uint8 {
if r.err != nil {
return 0
}
_, r.err = io.ReadFull(r.r, r.b[:1])
if r.err != nil {
if debug {
dl.Printf("rd uint8: %v", r.err)
}
return 0
}
if debug {
dl.Printf("rd uint8=%d (0x%02x)", r.b[0], r.b[0])
}
return r.b[0]
}
func (r *Reader) ReadUint16() uint16 {
if r.err != nil {
return 0
}
_, r.err = io.ReadFull(r.r, r.b[:2])
if r.err != nil {
if debug {
dl.Printf("rd uint16: %v", r.err)
}
return 0
}
v := uint16(r.b[1]) | uint16(r.b[0])<<8
if debug {
dl.Printf("rd uint16=%d (0x%04x)", v, v)
}
return v
}

View File

@@ -0,0 +1,15 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build !ipdr
package xdr
func (r *Reader) ReadUint8() uint8 {
return uint8(r.ReadUint32())
}
func (r *Reader) ReadUint16() uint16 {
return uint16(r.ReadUint32())
}

View File

@@ -0,0 +1,44 @@
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
// is governed by an MIT-style license that can be found in the LICENSE file.
// +build refl
package xdr_test
import (
"bytes"
"testing"
refl "github.com/davecgh/go-xdr/xdr"
)
func TestCompareMarshals(t *testing.T) {
e0 := s.MarshalXDR()
e1, err := refl.Marshal(s)
if err != nil {
t.Fatal(err)
}
if bytes.Compare(e0, e1) != 0 {
t.Fatalf("Encoding mismatch;\n\t%x (this)\n\t%x (refl)", e0, e1)
}
}
func BenchmarkReflMarshal(b *testing.B) {
var err error
for i := 0; i < b.N; i++ {
res, err = refl.Marshal(s)
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkReflUnmarshal(b *testing.B) {
var t XDRBenchStruct
for i := 0; i < b.N; i++ {
_, err := refl.Unmarshal(e, &t)
if err != nil {
b.Fatal(err)
}
}
}

146
Godeps/_workspace/src/github.com/calmh/xdr/writer.go generated vendored Normal file
View File

@@ -0,0 +1,146 @@
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
// is governed by an MIT-style license that can be found in the LICENSE file.
package xdr
import (
"io"
"reflect"
"unsafe"
)
var padBytes = []byte{0, 0, 0}
type Writer struct {
w io.Writer
tot int
err error
b [8]byte
}
type AppendWriter []byte
func (w *AppendWriter) Write(bs []byte) (int, error) {
*w = append(*w, bs...)
return len(bs), nil
}
func NewWriter(w io.Writer) *Writer {
return &Writer{
w: w,
}
}
func (w *Writer) WriteRaw(bs []byte) (int, error) {
if w.err != nil {
return 0, w.err
}
var n int
n, w.err = w.w.Write(bs)
return n, w.err
}
func (w *Writer) WriteString(s string) (int, error) {
sh := *((*reflect.StringHeader)(unsafe.Pointer(&s)))
bh := reflect.SliceHeader{
Data: sh.Data,
Len: sh.Len,
Cap: sh.Len,
}
return w.WriteBytes(*(*[]byte)(unsafe.Pointer(&bh)))
}
func (w *Writer) WriteBytes(bs []byte) (int, error) {
if w.err != nil {
return 0, w.err
}
w.WriteUint32(uint32(len(bs)))
if w.err != nil {
return 0, w.err
}
if debug {
if len(bs) > maxDebugBytes {
dl.Printf("wr bytes (%d): %x...", len(bs), bs[:maxDebugBytes])
} else {
dl.Printf("wr bytes (%d): %x", len(bs), bs)
}
}
var l, n int
n, w.err = w.w.Write(bs)
l += n
if p := pad(len(bs)); w.err == nil && p > 0 {
n, w.err = w.w.Write(padBytes[:p])
l += n
}
w.tot += l
return l, w.err
}
func (w *Writer) WriteBool(v bool) (int, error) {
if v {
return w.WriteUint8(1)
} else {
return w.WriteUint8(0)
}
}
func (w *Writer) WriteUint32(v uint32) (int, error) {
if w.err != nil {
return 0, w.err
}
if debug {
dl.Printf("wr uint32=%d", v)
}
w.b[0] = byte(v >> 24)
w.b[1] = byte(v >> 16)
w.b[2] = byte(v >> 8)
w.b[3] = byte(v)
var l int
l, w.err = w.w.Write(w.b[:4])
w.tot += l
return l, w.err
}
func (w *Writer) WriteUint64(v uint64) (int, error) {
if w.err != nil {
return 0, w.err
}
if debug {
dl.Printf("wr uint64=%d", v)
}
w.b[0] = byte(v >> 56)
w.b[1] = byte(v >> 48)
w.b[2] = byte(v >> 40)
w.b[3] = byte(v >> 32)
w.b[4] = byte(v >> 24)
w.b[5] = byte(v >> 16)
w.b[6] = byte(v >> 8)
w.b[7] = byte(v)
var l int
l, w.err = w.w.Write(w.b[:8])
w.tot += l
return l, w.err
}
func (w *Writer) Tot() int {
return w.tot
}
func (w *Writer) Error() error {
if w.err == nil {
return nil
}
return XDRError{"write", w.err}
}

View File

@@ -0,0 +1,41 @@
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
// is governed by an MIT-style license that can be found in the LICENSE file.
// +build ipdr
package xdr
func (w *Writer) WriteUint8(v uint8) (int, error) {
if w.err != nil {
return 0, w.err
}
if debug {
dl.Printf("wr uint8=%d", v)
}
w.b[0] = byte(v)
var l int
l, w.err = w.w.Write(w.b[:1])
w.tot += l
return l, w.err
}
func (w *Writer) WriteUint16(v uint16) (int, error) {
if w.err != nil {
return 0, w.err
}
if debug {
dl.Printf("wr uint8=%d", v)
}
w.b[0] = byte(v >> 8)
w.b[1] = byte(v)
var l int
l, w.err = w.w.Write(w.b[:2])
w.tot += l
return l, w.err
}

View File

@@ -0,0 +1,14 @@
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
// is governed by an MIT-style license that can be found in the LICENSE file.
// +build !ipdr
package xdr
func (w *Writer) WriteUint8(v uint8) (int, error) {
return w.WriteUint32(uint32(v))
}
func (w *Writer) WriteUint16(v uint16) (int, error) {
return w.WriteUint32(uint32(v))
}

93
Godeps/_workspace/src/github.com/calmh/xdr/xdr_test.go generated vendored Normal file
View File

@@ -0,0 +1,93 @@
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
// is governed by an MIT-style license that can be found in the LICENSE file.
package xdr
import (
"bytes"
"strings"
"testing"
"testing/quick"
)
func TestBytesNil(t *testing.T) {
fn := func(bs []byte) bool {
var b = new(bytes.Buffer)
var w = NewWriter(b)
var r = NewReader(b)
w.WriteBytes(bs)
w.WriteBytes(bs)
r.ReadBytes()
res := r.ReadBytes()
return bytes.Compare(bs, res) == 0
}
if err := quick.Check(fn, nil); err != nil {
t.Error(err)
}
}
func TestBytesGiven(t *testing.T) {
fn := func(bs []byte) bool {
var b = new(bytes.Buffer)
var w = NewWriter(b)
var r = NewReader(b)
w.WriteBytes(bs)
w.WriteBytes(bs)
res := make([]byte, 12)
res = r.ReadBytesInto(res)
res = r.ReadBytesInto(res)
return bytes.Compare(bs, res) == 0
}
if err := quick.Check(fn, nil); err != nil {
t.Error(err)
}
}
func TestReadBytesMaxInto(t *testing.T) {
var max = 64
for tot := 32; tot < 128; tot++ {
for diff := -32; diff <= 32; diff++ {
var b = new(bytes.Buffer)
var r = NewReader(b)
var w = NewWriter(b)
var toWrite = make([]byte, tot)
w.WriteBytes(toWrite)
var buf = make([]byte, tot+diff)
var bs = r.ReadBytesMaxInto(max, buf)
if tot <= max {
if read := len(bs); read != tot {
t.Errorf("Incorrect read bytes, wrote=%d, buf=%d, max=%d, read=%d", tot, tot+diff, max, read)
}
} else if !strings.Contains(r.err.Error(), "exceeds size") {
t.Errorf("Unexpected non-ErrElementSizeExceeded error for wrote=%d, max=%d: %v", tot, max, r.err)
}
}
}
}
func TestReadStringMax(t *testing.T) {
for tot := 42; tot < 72; tot++ {
for max := 0; max < 128; max++ {
var b = new(bytes.Buffer)
var r = NewReader(b)
var w = NewWriter(b)
var toWrite = make([]byte, tot)
w.WriteBytes(toWrite)
var str = r.ReadStringMax(max)
var read = len(str)
if max == 0 || tot <= max {
if read != tot {
t.Errorf("Incorrect read bytes, wrote=%d, max=%d, read=%d", tot, max, read)
}
} else if !strings.Contains(r.err.Error(), "exceeds size") {
t.Errorf("Unexpected non-ErrElementSizeExceeded error for wrote=%d, max=%d, read=%d: %v", tot, max, read, r.err)
}
}
}
}

View File

@@ -1,9 +1,3 @@
All files in this repository are licensed as follows. If you contribute
to this repository, it is assumed that you license your contribution
under the same license unless you state otherwise.
All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file.
This software is licensed under the LGPLv3, included below.
As a special exception to the GNU Lesser General Public License version 3

View File

@@ -20,7 +20,7 @@ token in the bucket represents one byte.
```go
func Writer(w io.Writer, bucket *Bucket) io.Writer
```
Writer returns a writer that is rate limited by the given token bucket. Each
Writer returns a reader that is rate limited by the given token bucket. Each
token in the bucket represents one byte.
#### type Bucket

View File

@@ -2,13 +2,11 @@
// Licensed under the LGPLv3 with static-linking exception.
// See LICENCE file for details.
// The ratelimit package provides an efficient token bucket implementation
// that can be used to limit the rate of arbitrary things.
// The ratelimit package provides an efficient token bucket implementation.
// See http://en.wikipedia.org/wiki/Token_bucket.
package ratelimit
import (
"math"
"strconv"
"sync"
"time"
@@ -57,7 +55,7 @@ func NewBucketWithRate(rate float64, capacity int64) *Bucket {
continue
}
tb := NewBucketWithQuantum(fillInterval, capacity, quantum)
if diff := math.Abs(tb.Rate() - rate); diff/rate <= rateMargin {
if diff := abs(tb.Rate() - rate); diff/rate <= rateMargin {
return tb
}
}
@@ -171,30 +169,6 @@ func (tb *Bucket) takeAvailable(now time.Time, count int64) int64 {
return count
}
// Available returns the number of available tokens. It will be negative
// when there are consumers waiting for tokens. Note that if this
// returns greater than zero, it does not guarantee that calls that take
// tokens from the buffer will succeed, as the number of available
// tokens could have changed in the meantime. This method is intended
// primarily for metrics reporting and debugging.
func (tb *Bucket) Available() int64 {
return tb.available(time.Now())
}
// available is the internal version of available - it takes the current time as
// an argument to enable easy testing.
func (tb *Bucket) available(now time.Time) int64 {
tb.mu.Lock()
defer tb.mu.Unlock()
tb.adjust(now)
return tb.avail
}
// Capacity returns the capacity that the bucket was created with.
func (tb *Bucket) Capacity() int64 {
return tb.capacity
}
// Rate returns the fill rate of the bucket, in tokens per second.
func (tb *Bucket) Rate() float64 {
return 1e9 * float64(tb.quantum) / float64(tb.fillInterval)
@@ -243,3 +217,10 @@ func (tb *Bucket) adjust(now time.Time) (currentTick int64) {
tb.availTick = currentTick
return
}
func abs(f float64) float64 {
if f < 0 {
return -f
}
return f
}

View File

@@ -5,11 +5,10 @@
package ratelimit
import (
"math"
gc "launchpad.net/gocheck"
"testing"
"time"
gc "gopkg.in/check.v1"
)
func TestPackage(t *testing.T) {
@@ -126,49 +125,6 @@ var takeTests = []struct {
}},
}}
var availTests = []struct {
about string
capacity int64
fillInterval time.Duration
take int64
sleep time.Duration
expectCountAfterTake int64
expectCountAfterSleep int64
}{{
about: "should fill tokens after interval",
capacity: 5,
fillInterval: time.Second,
take: 5,
sleep: time.Second,
expectCountAfterTake: 0,
expectCountAfterSleep: 1,
}, {
about: "should fill tokens plus existing count",
capacity: 2,
fillInterval: time.Second,
take: 1,
sleep: time.Second,
expectCountAfterTake: 1,
expectCountAfterSleep: 2,
}, {
about: "shouldn't fill before interval",
capacity: 2,
fillInterval: 2 * time.Second,
take: 1,
sleep: time.Second,
expectCountAfterTake: 1,
expectCountAfterSleep: 1,
}, {
about: "should fill only once after 1*interval before 2*interval",
capacity: 2,
fillInterval: 2 * time.Second,
take: 1,
sleep: 3 * time.Second,
expectCountAfterTake: 1,
expectCountAfterSleep: 2,
}}
func (rateLimitSuite) TestTake(c *gc.C) {
for i, test := range takeTests {
tb := NewBucket(test.fillInterval, test.capacity)
@@ -305,7 +261,7 @@ func (rateLimitSuite) TestPanics(c *gc.C) {
}
func isCloseTo(x, y, tolerance float64) bool {
return math.Abs(x-y)/y < tolerance
return abs(x-y)/y < tolerance
}
func (rateLimitSuite) TestRate(c *gc.C) {
@@ -364,23 +320,6 @@ func (rateLimitSuite) TestNewWithRate(c *gc.C) {
}
}
func TestAvailable(t *testing.T) {
for i, tt := range availTests {
tb := NewBucket(tt.fillInterval, tt.capacity)
if c := tb.takeAvailable(tb.startTime, tt.take); c != tt.take {
t.Fatalf("#%d: %s, take = %d, want = %d", i, tt.about, c, tt.take)
}
if c := tb.available(tb.startTime); c != tt.expectCountAfterTake {
t.Fatalf("#%d: %s, after take, available = %d, want = %d", i, tt.about, c, tt.expectCountAfterTake)
}
if c := tb.available(tb.startTime.Add(tt.sleep)); c != tt.expectCountAfterSleep {
t.Fatalf("#%d: %s, after some time it should fill in new tokens, available = %d, want = %d",
i, tt.about, c, tt.expectCountAfterSleep)
}
}
}
func BenchmarkWait(b *testing.B) {
tb := NewBucket(1, 16*1024)
for i := b.N - 1; i >= 0; i-- {

View File

@@ -4,9 +4,7 @@
There is sometimes utility in finding the current executable file
that is running. This can be used for upgrading the current executable
or finding resources located relative to the executable file. Both
working directory and the os.Args[0] value are arbitrary and cannot
be relied on; os.Args[0] can be "faked".
or finding resources located relative to the executable file.
Multi-platform and supports:
* Linux

View File

@@ -3,31 +3,25 @@
// license that can be found in the LICENSE file.
// Extensions to the standard "os" package.
package osext // import "github.com/kardianos/osext"
package osext
import "path/filepath"
var cx, ce = executableClean()
func executableClean() (string, error) {
p, err := executable()
return filepath.Clean(p), err
}
// Executable returns an absolute path that can be used to
// re-invoke the current program.
// It may not be valid after the current program exits.
func Executable() (string, error) {
return cx, ce
p, err := executable()
return filepath.Clean(p), err
}
// Returns same path as Executable, returns just the folder
// path. Excludes the executable name and any trailing slash.
// path. Excludes the executable name.
func ExecutableFolder() (string, error) {
p, err := Executable()
if err != nil {
return "", err
}
return filepath.Dir(p), nil
folder, _ := filepath.Split(p)
return folder, nil
}

View File

@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux netbsd solaris dragonfly
// +build linux netbsd openbsd solaris dragonfly
package osext
@@ -11,23 +11,15 @@ import (
"fmt"
"os"
"runtime"
"strings"
)
func executable() (string, error) {
switch runtime.GOOS {
case "linux":
const deletedTag = " (deleted)"
execpath, err := os.Readlink("/proc/self/exe")
if err != nil {
return execpath, err
}
execpath = strings.TrimSuffix(execpath, deletedTag)
execpath = strings.TrimPrefix(execpath, deletedTag)
return execpath, nil
return os.Readlink("/proc/self/exe")
case "netbsd":
return os.Readlink("/proc/curproc/exe")
case "dragonfly":
case "openbsd", "dragonfly":
return os.Readlink("/proc/curproc/file")
case "solaris":
return os.Readlink(fmt.Sprintf("/proc/%d/path/a.out", os.Getpid()))

View File

@@ -2,13 +2,12 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin freebsd openbsd
// +build darwin freebsd
package osext
import (
"os"
"os/exec"
"path/filepath"
"runtime"
"syscall"
@@ -24,8 +23,6 @@ func executable() (string, error) {
mib = [4]int32{1 /* CTL_KERN */, 14 /* KERN_PROC */, 12 /* KERN_PROC_PATHNAME */, -1}
case "darwin":
mib = [4]int32{1 /* CTL_KERN */, 38 /* KERN_PROCARGS */, int32(os.Getpid()), -1}
case "openbsd":
mib = [4]int32{1 /* CTL_KERN */, 55 /* KERN_PROC_ARGS */, int32(os.Getpid()), 1 /* KERN_PROC_ARGV */}
}
n := uintptr(0)
@@ -45,58 +42,14 @@ func executable() (string, error) {
if n == 0 { // This shouldn't happen.
return "", nil
}
var execPath string
switch runtime.GOOS {
case "openbsd":
// buf now contains **argv, with pointers to each of the C-style
// NULL terminated arguments.
var args []string
argv := uintptr(unsafe.Pointer(&buf[0]))
Loop:
for {
argp := *(**[1 << 20]byte)(unsafe.Pointer(argv))
if argp == nil {
break
}
for i := 0; uintptr(i) < n; i++ {
// we don't want the full arguments list
if string(argp[i]) == " " {
break Loop
}
if argp[i] != 0 {
continue
}
args = append(args, string(argp[:i]))
n -= uintptr(i)
break
}
if n < unsafe.Sizeof(argv) {
break
}
argv += unsafe.Sizeof(argv)
n -= unsafe.Sizeof(argv)
for i, v := range buf {
if v == 0 {
buf = buf[:i]
break
}
execPath = args[0]
// There is no canonical way to get an executable path on
// OpenBSD, so check PATH in case we are called directly
if execPath[0] != '/' && execPath[0] != '.' {
execIsInPath, err := exec.LookPath(execPath)
if err == nil {
execPath = execIsInPath
}
}
default:
for i, v := range buf {
if v == 0 {
buf = buf[:i]
break
}
}
execPath = string(buf)
}
var err error
execPath := string(buf)
// execPath will not be empty due to above checks.
// Try to get the absolute path if the execPath is not rooted.
if execPath[0] != '/' {

View File

@@ -0,0 +1,79 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin linux freebsd netbsd windows
package osext
import (
"fmt"
"os"
oexec "os/exec"
"path/filepath"
"runtime"
"testing"
)
const execPath_EnvVar = "OSTEST_OUTPUT_EXECPATH"
func TestExecPath(t *testing.T) {
ep, err := Executable()
if err != nil {
t.Fatalf("ExecPath failed: %v", err)
}
// we want fn to be of the form "dir/prog"
dir := filepath.Dir(filepath.Dir(ep))
fn, err := filepath.Rel(dir, ep)
if err != nil {
t.Fatalf("filepath.Rel: %v", err)
}
cmd := &oexec.Cmd{}
// make child start with a relative program path
cmd.Dir = dir
cmd.Path = fn
// forge argv[0] for child, so that we can verify we could correctly
// get real path of the executable without influenced by argv[0].
cmd.Args = []string{"-", "-test.run=XXXX"}
cmd.Env = []string{fmt.Sprintf("%s=1", execPath_EnvVar)}
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("exec(self) failed: %v", err)
}
outs := string(out)
if !filepath.IsAbs(outs) {
t.Fatalf("Child returned %q, want an absolute path", out)
}
if !sameFile(outs, ep) {
t.Fatalf("Child returned %q, not the same file as %q", out, ep)
}
}
func sameFile(fn1, fn2 string) bool {
fi1, err := os.Stat(fn1)
if err != nil {
return false
}
fi2, err := os.Stat(fn2)
if err != nil {
return false
}
return os.SameFile(fi1, fi2)
}
func init() {
if e := os.Getenv(execPath_EnvVar); e != "" {
// first chdir to another path
dir := "/"
if runtime.GOOS == "windows" {
dir = filepath.VolumeName(".")
}
os.Chdir(dir)
if ep, err := Executable(); err != nil {
fmt.Fprint(os.Stderr, "ERROR: ", err)
} else {
fmt.Fprint(os.Stderr, ep)
}
os.Exit(0)
}
}

View File

@@ -0,0 +1,4 @@
# This is the official list of Protocol Authors for copyright purposes.
Audrius Butkevicius <audrius.butkevicius@gmail.com>
Jakob Borg <jakob@nym.se>

View File

@@ -0,0 +1,76 @@
## Reporting Bugs
Please file bugs in the [Github Issue
Tracker](https://github.com/syncthing/protocol/issues).
## Contributing Code
Every contribution is welcome. Following the points below will make this
a smoother process.
Individuals making significant and valuable contributions are given
commit-access to the project. If you make a significant contribution and
are not considered for commit-access, please contact any of the
Syncthing core team members.
All nontrivial contributions should go through the pull request
mechanism for internal review. Determining what is "nontrivial" is left
at the discretion of the contributor.
### Authorship
All code authors are listed in the AUTHORS file. Commits must be made
with the same name and email as listed in the AUTHORS file. To
accomplish this, ensure that your git configuration is set correctly
prior to making your first commit;
$ git config --global user.name "Jane Doe"
$ git config --global user.email janedoe@example.com
You must be reachable on the given email address. If you do not wish to
use your real name for whatever reason, using a nickname or pseudonym is
perfectly acceptable.
## Coding Style
- Follow the conventions laid out in [Effective Go](https://golang.org/doc/effective_go.html)
as much as makes sense.
- All text files use Unix line endings.
- Each commit should be `go fmt` clean.
- The commit message subject should be a single short sentence
describing the change, starting with a capital letter.
- Commits that resolve an existing issue must include the issue number
as `(fixes #123)` at the end of the commit message subject.
- Imports are grouped per `goimports` standard; that is, standard
library first, then third party libraries after a blank line.
- A contribution solving a single issue or introducing a single new
feature should probably be a single commit based on the current
`master` branch. You may be asked to "rebase" or "squash" your pull
request to make sure this is the case, especially if there have been
amendments during review.
## Licensing
All contributions are made under the same MIT license as the rest of the
project, except documentation, user interface text and translation
strings which are licensed under the Creative Commons Attribution 4.0
International License. You retain the copyright to code you have
written.
When accepting your first contribution, the maintainer of the project
will ensure that you are added to the AUTHORS file. You are welcome to
add yourself as a separate commit in your first pull request.
## Tests
Yes please!
## License
MIT

View File

@@ -0,0 +1,13 @@
The BEPv1 Protocol
==================
[![Latest Build](http://img.shields.io/jenkins/s/http/build.syncthing.net/protocol.svg?style=flat-square)](http://build.syncthing.net/job/protocol/lastBuild/)
[![API Documentation](http://img.shields.io/badge/api-Godoc-blue.svg?style=flat-square)](http://godoc.org/github.com/syncthing/protocol)
[![MIT License](http://img.shields.io/badge/license-MIT-blue.svg?style=flat-square)](http://opensource.org/licenses/MIT)
This is the protocol implementation used by Syncthing.
License
=======
MIT

View File

@@ -0,0 +1,74 @@
// Copyright (C) 2014 The Protocol Authors.
package protocol
import (
"io"
"time"
)
type TestModel struct {
data []byte
folder string
name string
offset int64
size int
closedCh chan bool
}
func newTestModel() *TestModel {
return &TestModel{
closedCh: make(chan bool),
}
}
func (t *TestModel) Index(deviceID DeviceID, folder string, files []FileInfo) {
}
func (t *TestModel) IndexUpdate(deviceID DeviceID, folder string, files []FileInfo) {
}
func (t *TestModel) Request(deviceID DeviceID, folder, name string, offset int64, size int) ([]byte, error) {
t.folder = folder
t.name = name
t.offset = offset
t.size = size
return t.data, nil
}
func (t *TestModel) Close(deviceID DeviceID, err error) {
close(t.closedCh)
}
func (t *TestModel) ClusterConfig(deviceID DeviceID, config ClusterConfigMessage) {
}
func (t *TestModel) isClosed() bool {
select {
case <-t.closedCh:
return true
case <-time.After(1 * time.Second):
return false // Timeout
}
}
type ErrPipe struct {
io.PipeWriter
written int
max int
err error
closed bool
}
func (e *ErrPipe) Write(data []byte) (int, error) {
if e.closed {
return 0, e.err
}
if e.written+len(data) > e.max {
n, _ := e.PipeWriter.Write(data[:e.max-e.written])
e.PipeWriter.CloseWithError(e.err)
e.closed = true
return n, e.err
}
return e.PipeWriter.Write(data)
}

View File

@@ -4,7 +4,13 @@ package protocol
import "fmt"
type Compression int
const (
CompressMetadata Compression = iota // zero value is the default, default should be "metadata"
CompressNever
CompressAlways
compressionThreshold = 128 // don't bother compressing messages smaller than this many bytes
)
@@ -25,6 +31,14 @@ var compressionUnmarshal = map[string]Compression{
"always": CompressAlways,
}
func (c Compression) String() string {
s, ok := compressionMarshal[c]
if !ok {
return fmt.Sprintf("unknown:%d", c)
}
return s
}
func (c Compression) GoString() string {
return fmt.Sprintf("%q", c.String())
}

View File

@@ -0,0 +1,15 @@
// Copyright (C) 2014 The Protocol Authors.
package protocol
import (
"os"
"strings"
"github.com/calmh/logger"
)
var (
debug = strings.Contains(os.Getenv("STTRACE"), "protocol") || os.Getenv("STTRACE") == "all"
l = logger.DefaultLogger
)

View File

@@ -6,7 +6,6 @@ import (
"bytes"
"crypto/sha256"
"encoding/base32"
"encoding/binary"
"errors"
"fmt"
"regexp"
@@ -16,7 +15,6 @@ import (
)
type DeviceID [32]byte
type ShortID uint64
var LocalDeviceID = DeviceID{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
@@ -66,24 +64,13 @@ func (n DeviceID) Compare(other DeviceID) int {
}
func (n DeviceID) Equals(other DeviceID) bool {
return bytes.Equal(n[:], other[:])
}
// Short returns an integer representing bits 0-63 of the device ID.
func (n DeviceID) Short() ShortID {
return ShortID(binary.BigEndian.Uint64(n[:]))
return bytes.Compare(n[:], other[:]) == 0
}
func (n *DeviceID) MarshalText() ([]byte, error) {
return []byte(n.String()), nil
}
func (s ShortID) String() string {
var bs [8]byte
binary.BigEndian.PutUint64(bs[:], uint64(s))
return base32.StdEncoding.EncodeToString(bs[:])[:7]
}
func (n *DeviceID) UnmarshalText(bs []byte) error {
id := string(bs)
id = strings.Trim(id, "=")

View File

@@ -74,25 +74,3 @@ func TestMarshallingDeviceID(t *testing.T) {
t.Error("Compare error")
}
}
func TestShortIDString(t *testing.T) {
id, _ := DeviceIDFromString(formatted)
sid := id.Short().String()
if len(sid) != 7 {
t.Errorf("Wrong length for short ID: got %d, want 7", len(sid))
}
want := formatted[:len(sid)]
if sid != want {
t.Errorf("Wrong short ID: got %q, want %q", sid, want)
}
}
func TestDeviceIDFromBytes(t *testing.T) {
id0, _ := DeviceIDFromString(formatted)
id1 := DeviceIDFromBytes(id0[:])
if id1.String() != formatted {
t.Errorf("Wrong device ID, got %q, want %q", id1, formatted)
}
}

View File

@@ -0,0 +1,43 @@
// Copyright (C) 2014 The Protocol Authors.
package protocol
import "github.com/calmh/xdr"
type header struct {
version int
msgID int
msgType int
compression bool
}
func (h header) encodeXDR(xw *xdr.Writer) (int, error) {
u := encodeHeader(h)
return xw.WriteUint32(u)
}
func (h *header) decodeXDR(xr *xdr.Reader) error {
u := xr.ReadUint32()
*h = decodeHeader(u)
return xr.Error()
}
func encodeHeader(h header) uint32 {
var isComp uint32
if h.compression {
isComp = 1 << 0 // the zeroth bit is the compression bit
}
return uint32(h.version&0xf)<<28 +
uint32(h.msgID&0xfff)<<16 +
uint32(h.msgType&0xff)<<8 +
isComp
}
func decodeHeader(u uint32) header {
return header{
version: int(u>>28) & 0xf,
msgID: int(u>>16) & 0xfff,
msgType: int(u>>8) & 0xff,
compression: u&1 == 1,
}
}

View File

@@ -0,0 +1,122 @@
// Copyright (C) 2014 The Protocol Authors.
//go:generate genxdr -o message_xdr.go message.go
package protocol
import "fmt"
type IndexMessage struct {
Folder string
Files []FileInfo
Flags uint32
Options []Option // max:64
}
type FileInfo struct {
Name string // max:8192
Flags uint32
Modified int64
Version int64
LocalVersion int64
Blocks []BlockInfo
}
func (f FileInfo) String() string {
return fmt.Sprintf("File{Name:%q, Flags:0%o, Modified:%d, Version:%d, Size:%d, Blocks:%v}",
f.Name, f.Flags, f.Modified, f.Version, f.Size(), f.Blocks)
}
func (f FileInfo) Size() (bytes int64) {
if f.IsDeleted() || f.IsDirectory() {
return 128
}
for _, b := range f.Blocks {
bytes += int64(b.Size)
}
return
}
func (f FileInfo) IsDeleted() bool {
return f.Flags&FlagDeleted != 0
}
func (f FileInfo) IsInvalid() bool {
return f.Flags&FlagInvalid != 0
}
func (f FileInfo) IsDirectory() bool {
return f.Flags&FlagDirectory != 0
}
func (f FileInfo) IsSymlink() bool {
return f.Flags&FlagSymlink != 0
}
func (f FileInfo) HasPermissionBits() bool {
return f.Flags&FlagNoPermBits == 0
}
type BlockInfo struct {
Offset int64 // noencode (cache only)
Size int32
Hash []byte // max:64
}
func (b BlockInfo) String() string {
return fmt.Sprintf("Block{%d/%d/%x}", b.Offset, b.Size, b.Hash)
}
type RequestMessage struct {
Folder string // max:64
Name string // max:8192
Offset int64
Size int32
Hash []byte // max:64
Flags uint32
Options []Option // max:64
}
type ResponseMessage struct {
Data []byte
Error int32
}
type ClusterConfigMessage struct {
ClientName string // max:64
ClientVersion string // max:64
Folders []Folder
Options []Option // max:64
}
func (o *ClusterConfigMessage) GetOption(key string) string {
for _, option := range o.Options {
if option.Key == key {
return option.Value
}
}
return ""
}
type Folder struct {
ID string // max:64
Devices []Device
}
type Device struct {
ID []byte // max:32
Flags uint32
MaxLocalVersion int64
}
type Option struct {
Key string // max:64
Value string // max:1024
}
type CloseMessage struct {
Reason string // max:1024
Code int32
}
type EmptyMessage struct{}

View File

File diff suppressed because it is too large Load Diff

View File

@@ -9,24 +9,32 @@ package protocol
import "golang.org/x/text/unicode/norm"
type nativeModel struct {
Model
next Model
}
func (m nativeModel) Index(deviceID DeviceID, folder string, files []FileInfo) {
for i := range files {
files[i].Name = norm.NFD.String(files[i].Name)
}
m.Model.Index(deviceID, folder, files)
m.next.Index(deviceID, folder, files)
}
func (m nativeModel) IndexUpdate(deviceID DeviceID, folder string, files []FileInfo) {
for i := range files {
files[i].Name = norm.NFD.String(files[i].Name)
}
m.Model.IndexUpdate(deviceID, folder, files)
m.next.IndexUpdate(deviceID, folder, files)
}
func (m nativeModel) Request(deviceID DeviceID, folder string, name string, offset int64, hash []byte, fromTemporary bool, buf []byte) error {
func (m nativeModel) Request(deviceID DeviceID, folder string, name string, offset int64, size int) ([]byte, error) {
name = norm.NFD.String(name)
return m.Model.Request(deviceID, folder, name, offset, hash, fromTemporary, buf)
return m.next.Request(deviceID, folder, name, offset, size)
}
func (m nativeModel) ClusterConfig(deviceID DeviceID, config ClusterConfigMessage) {
m.next.ClusterConfig(deviceID, config)
}
func (m nativeModel) Close(deviceID DeviceID, err error) {
m.next.Close(deviceID, err)
}

View File

@@ -0,0 +1,31 @@
// Copyright (C) 2014 The Protocol Authors.
// +build !windows,!darwin
package protocol
// Normal Unixes uses NFC and slashes, which is the wire format.
type nativeModel struct {
next Model
}
func (m nativeModel) Index(deviceID DeviceID, folder string, files []FileInfo) {
m.next.Index(deviceID, folder, files)
}
func (m nativeModel) IndexUpdate(deviceID DeviceID, folder string, files []FileInfo) {
m.next.IndexUpdate(deviceID, folder, files)
}
func (m nativeModel) Request(deviceID DeviceID, folder string, name string, offset int64, size int) ([]byte, error) {
return m.next.Request(deviceID, folder, name, offset, size)
}
func (m nativeModel) ClusterConfig(deviceID DeviceID, config ClusterConfigMessage) {
m.next.ClusterConfig(deviceID, config)
}
func (m nativeModel) Close(deviceID DeviceID, err error) {
m.next.Close(deviceID, err)
}

View File

@@ -0,0 +1,70 @@
// Copyright (C) 2014 The Protocol Authors.
// +build windows
package protocol
// Windows uses backslashes as file separator and disallows a bunch of
// characters in the filename
import (
"path/filepath"
"strings"
)
var disallowedCharacters = string([]rune{
'<', '>', ':', '"', '|', '?', '*',
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31,
})
type nativeModel struct {
next Model
}
func (m nativeModel) Index(deviceID DeviceID, folder string, files []FileInfo) {
for i, f := range files {
if strings.ContainsAny(f.Name, disallowedCharacters) {
if f.IsDeleted() {
// Don't complain if the file is marked as deleted, since it
// can't possibly exist here anyway.
continue
}
files[i].Flags |= FlagInvalid
l.Warnf("File name %q contains invalid characters; marked as invalid.", f.Name)
}
files[i].Name = filepath.FromSlash(f.Name)
}
m.next.Index(deviceID, folder, files)
}
func (m nativeModel) IndexUpdate(deviceID DeviceID, folder string, files []FileInfo) {
for i, f := range files {
if strings.ContainsAny(f.Name, disallowedCharacters) {
if f.IsDeleted() {
// Don't complain if the file is marked as deleted, since it
// can't possibly exist here anyway.
continue
}
files[i].Flags |= FlagInvalid
l.Warnf("File name %q contains invalid characters; marked as invalid.", f.Name)
}
files[i].Name = filepath.FromSlash(files[i].Name)
}
m.next.IndexUpdate(deviceID, folder, files)
}
func (m nativeModel) Request(deviceID DeviceID, folder string, name string, offset int64, size int) ([]byte, error) {
name = filepath.FromSlash(name)
return m.next.Request(deviceID, folder, name, offset, size)
}
func (m nativeModel) ClusterConfig(deviceID DeviceID, config ClusterConfigMessage) {
m.next.ClusterConfig(deviceID, config)
}
func (m nativeModel) Close(deviceID DeviceID, err error) {
m.next.Close(deviceID, err)
}

View File

@@ -0,0 +1,725 @@
// Copyright (C) 2014 The Protocol Authors.
package protocol
import (
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"io"
"sync"
"time"
lz4 "github.com/bkaradzic/go-lz4"
)
const (
BlockSize = 128 * 1024
)
const (
messageTypeClusterConfig = 0
messageTypeIndex = 1
messageTypeRequest = 2
messageTypeResponse = 3
messageTypePing = 4
messageTypePong = 5
messageTypeIndexUpdate = 6
messageTypeClose = 7
)
const (
stateInitial = iota
stateCCRcvd
stateIdxRcvd
)
const (
FlagDeleted uint32 = 1 << 12
FlagInvalid = 1 << 13
FlagDirectory = 1 << 14
FlagNoPermBits = 1 << 15
FlagSymlink = 1 << 16
FlagSymlinkMissingTarget = 1 << 17
FlagsAll = (1 << 18) - 1
SymlinkTypeMask = FlagDirectory | FlagSymlinkMissingTarget
)
const (
FlagShareTrusted uint32 = 1 << 0
FlagShareReadOnly = 1 << 1
FlagIntroducer = 1 << 2
FlagShareBits = 0x000000ff
)
var (
ErrClusterHash = fmt.Errorf("configuration error: mismatched cluster hash")
ErrClosed = errors.New("connection closed")
)
// Specific variants of empty messages...
type pingMessage struct{ EmptyMessage }
type pongMessage struct{ EmptyMessage }
type Model interface {
// An index was received from the peer device
Index(deviceID DeviceID, folder string, files []FileInfo)
// An index update was received from the peer device
IndexUpdate(deviceID DeviceID, folder string, files []FileInfo)
// A request was made by the peer device
Request(deviceID DeviceID, folder string, name string, offset int64, size int) ([]byte, error)
// A cluster configuration message was received
ClusterConfig(deviceID DeviceID, config ClusterConfigMessage)
// The peer device closed the connection
Close(deviceID DeviceID, err error)
}
type Connection interface {
ID() DeviceID
Name() string
Index(folder string, files []FileInfo) error
IndexUpdate(folder string, files []FileInfo) error
Request(folder string, name string, offset int64, size int) ([]byte, error)
ClusterConfig(config ClusterConfigMessage)
Statistics() Statistics
}
type rawConnection struct {
id DeviceID
name string
receiver Model
state int
cr *countingReader
cw *countingWriter
awaiting [4096]chan asyncResult
awaitingMut sync.Mutex
idxMut sync.Mutex // ensures serialization of Index calls
nextID chan int
outbox chan hdrMsg
closed chan struct{}
once sync.Once
compression Compression
rdbuf0 []byte // used & reused by readMessage
rdbuf1 []byte // used & reused by readMessage
}
type asyncResult struct {
val []byte
err error
}
type hdrMsg struct {
hdr header
msg encodable
}
type encodable interface {
AppendXDR([]byte) ([]byte, error)
}
type isEofer interface {
IsEOF() bool
}
const (
pingTimeout = 30 * time.Second
pingIdleTime = 60 * time.Second
)
func NewConnection(deviceID DeviceID, reader io.Reader, writer io.Writer, receiver Model, name string, compress Compression) Connection {
cr := &countingReader{Reader: reader}
cw := &countingWriter{Writer: writer}
c := rawConnection{
id: deviceID,
name: name,
receiver: nativeModel{receiver},
state: stateInitial,
cr: cr,
cw: cw,
outbox: make(chan hdrMsg),
nextID: make(chan int),
closed: make(chan struct{}),
compression: compress,
}
go c.readerLoop()
go c.writerLoop()
go c.pingerLoop()
go c.idGenerator()
return wireFormatConnection{&c}
}
func (c *rawConnection) ID() DeviceID {
return c.id
}
func (c *rawConnection) Name() string {
return c.name
}
// Index writes the list of file information to the connected peer device
func (c *rawConnection) Index(folder string, idx []FileInfo) error {
select {
case <-c.closed:
return ErrClosed
default:
}
c.idxMut.Lock()
c.send(-1, messageTypeIndex, IndexMessage{
Folder: folder,
Files: idx,
})
c.idxMut.Unlock()
return nil
}
// IndexUpdate writes the list of file information to the connected peer device as an update
func (c *rawConnection) IndexUpdate(folder string, idx []FileInfo) error {
select {
case <-c.closed:
return ErrClosed
default:
}
c.idxMut.Lock()
c.send(-1, messageTypeIndexUpdate, IndexMessage{
Folder: folder,
Files: idx,
})
c.idxMut.Unlock()
return nil
}
// Request returns the bytes for the specified block after fetching them from the connected peer.
func (c *rawConnection) Request(folder string, name string, offset int64, size int) ([]byte, error) {
var id int
select {
case id = <-c.nextID:
case <-c.closed:
return nil, ErrClosed
}
c.awaitingMut.Lock()
if ch := c.awaiting[id]; ch != nil {
panic("id taken")
}
rc := make(chan asyncResult, 1)
c.awaiting[id] = rc
c.awaitingMut.Unlock()
ok := c.send(id, messageTypeRequest, RequestMessage{
Folder: folder,
Name: name,
Offset: offset,
Size: int32(size),
})
if !ok {
return nil, ErrClosed
}
res, ok := <-rc
if !ok {
return nil, ErrClosed
}
return res.val, res.err
}
// ClusterConfig send the cluster configuration message to the peer and returns any error
func (c *rawConnection) ClusterConfig(config ClusterConfigMessage) {
c.send(-1, messageTypeClusterConfig, config)
}
func (c *rawConnection) ping() bool {
var id int
select {
case id = <-c.nextID:
case <-c.closed:
return false
}
rc := make(chan asyncResult, 1)
c.awaitingMut.Lock()
c.awaiting[id] = rc
c.awaitingMut.Unlock()
ok := c.send(id, messageTypePing, nil)
if !ok {
return false
}
res, ok := <-rc
return ok && res.err == nil
}
func (c *rawConnection) readerLoop() (err error) {
defer func() {
c.close(err)
}()
for {
select {
case <-c.closed:
return ErrClosed
default:
}
hdr, msg, err := c.readMessage()
if err != nil {
return err
}
switch msg := msg.(type) {
case IndexMessage:
if msg.Flags != 0 {
// We don't currently support or expect any flags.
return fmt.Errorf("protocol error: unknown flags 0x%x in Index(Update) message", msg.Flags)
}
switch hdr.msgType {
case messageTypeIndex:
if c.state < stateCCRcvd {
return fmt.Errorf("protocol error: index message in state %d", c.state)
}
c.handleIndex(msg)
c.state = stateIdxRcvd
case messageTypeIndexUpdate:
if c.state < stateIdxRcvd {
return fmt.Errorf("protocol error: index update message in state %d", c.state)
}
c.handleIndexUpdate(msg)
}
case RequestMessage:
if msg.Flags != 0 {
// We don't currently support or expect any flags.
return fmt.Errorf("protocol error: unknown flags 0x%x in Request message", msg.Flags)
}
if c.state < stateIdxRcvd {
return fmt.Errorf("protocol error: request message in state %d", c.state)
}
// Requests are handled asynchronously
go c.handleRequest(hdr.msgID, msg)
case ResponseMessage:
if c.state < stateIdxRcvd {
return fmt.Errorf("protocol error: response message in state %d", c.state)
}
c.handleResponse(hdr.msgID, msg)
case pingMessage:
c.send(hdr.msgID, messageTypePong, pongMessage{})
case pongMessage:
c.handlePong(hdr.msgID)
case ClusterConfigMessage:
if c.state != stateInitial {
return fmt.Errorf("protocol error: cluster config message in state %d", c.state)
}
go c.receiver.ClusterConfig(c.id, msg)
c.state = stateCCRcvd
case CloseMessage:
return errors.New(msg.Reason)
default:
return fmt.Errorf("protocol error: %s: unknown message type %#x", c.id, hdr.msgType)
}
}
}
func (c *rawConnection) readMessage() (hdr header, msg encodable, err error) {
if cap(c.rdbuf0) < 8 {
c.rdbuf0 = make([]byte, 8)
} else {
c.rdbuf0 = c.rdbuf0[:8]
}
_, err = io.ReadFull(c.cr, c.rdbuf0)
if err != nil {
return
}
hdr = decodeHeader(binary.BigEndian.Uint32(c.rdbuf0[0:4]))
msglen := int(binary.BigEndian.Uint32(c.rdbuf0[4:8]))
if debug {
l.Debugf("read header %v (msglen=%d)", hdr, msglen)
}
if hdr.version != 0 {
err = fmt.Errorf("unknown protocol version 0x%x", hdr.version)
return
}
if cap(c.rdbuf0) < msglen {
c.rdbuf0 = make([]byte, msglen)
} else {
c.rdbuf0 = c.rdbuf0[:msglen]
}
_, err = io.ReadFull(c.cr, c.rdbuf0)
if err != nil {
return
}
if debug {
l.Debugf("read %d bytes", len(c.rdbuf0))
}
msgBuf := c.rdbuf0
if hdr.compression {
c.rdbuf1 = c.rdbuf1[:cap(c.rdbuf1)]
c.rdbuf1, err = lz4.Decode(c.rdbuf1, c.rdbuf0)
if err != nil {
return
}
msgBuf = c.rdbuf1
if debug {
l.Debugf("decompressed to %d bytes", len(msgBuf))
}
}
if debug {
if len(msgBuf) > 1024 {
l.Debugf("message data:\n%s", hex.Dump(msgBuf[:1024]))
} else {
l.Debugf("message data:\n%s", hex.Dump(msgBuf))
}
}
// We check each returned error for the XDRError.IsEOF() method.
// IsEOF()==true here means that the message contained fewer fields than
// expected. It does not signify an EOF on the socket, because we've
// successfully read a size value and that many bytes already. New fields
// we expected but the other peer didn't send should be interpreted as
// zero/nil, and if that's not valid we'll verify it somewhere else.
switch hdr.msgType {
case messageTypeIndex, messageTypeIndexUpdate:
var idx IndexMessage
err = idx.UnmarshalXDR(msgBuf)
if xdrErr, ok := err.(isEofer); ok && xdrErr.IsEOF() {
err = nil
}
msg = idx
case messageTypeRequest:
var req RequestMessage
err = req.UnmarshalXDR(msgBuf)
if xdrErr, ok := err.(isEofer); ok && xdrErr.IsEOF() {
err = nil
}
msg = req
case messageTypeResponse:
var resp ResponseMessage
err = resp.UnmarshalXDR(msgBuf)
if xdrErr, ok := err.(isEofer); ok && xdrErr.IsEOF() {
err = nil
}
msg = resp
case messageTypePing:
msg = pingMessage{}
case messageTypePong:
msg = pongMessage{}
case messageTypeClusterConfig:
var cc ClusterConfigMessage
err = cc.UnmarshalXDR(msgBuf)
if xdrErr, ok := err.(isEofer); ok && xdrErr.IsEOF() {
err = nil
}
msg = cc
case messageTypeClose:
var cm CloseMessage
err = cm.UnmarshalXDR(msgBuf)
if xdrErr, ok := err.(isEofer); ok && xdrErr.IsEOF() {
err = nil
}
msg = cm
default:
err = fmt.Errorf("protocol error: %s: unknown message type %#x", c.id, hdr.msgType)
}
return
}
func (c *rawConnection) handleIndex(im IndexMessage) {
if debug {
l.Debugf("Index(%v, %v, %d files)", c.id, im.Folder, len(im.Files))
}
c.receiver.Index(c.id, im.Folder, filterIndexMessageFiles(im.Files))
}
func (c *rawConnection) handleIndexUpdate(im IndexMessage) {
if debug {
l.Debugf("queueing IndexUpdate(%v, %v, %d files)", c.id, im.Folder, len(im.Files))
}
c.receiver.IndexUpdate(c.id, im.Folder, filterIndexMessageFiles(im.Files))
}
func filterIndexMessageFiles(fs []FileInfo) []FileInfo {
var out []FileInfo
for i, f := range fs {
switch f.Name {
case "", ".", "..", "/": // A few obviously invalid filenames
l.Infof("Dropping invalid filename %q from incoming index", f.Name)
if out == nil {
// Most incoming updates won't contain anything invalid, so we
// delay the allocation and copy to output slice until we
// really need to do it, then copy all the so var valid files
// to it.
out = make([]FileInfo, i, len(fs)-1)
copy(out, fs)
}
default:
if out != nil {
out = append(out, f)
}
}
}
if out != nil {
return out
}
return fs
}
func (c *rawConnection) handleRequest(msgID int, req RequestMessage) {
data, _ := c.receiver.Request(c.id, req.Folder, req.Name, int64(req.Offset), int(req.Size))
c.send(msgID, messageTypeResponse, ResponseMessage{
Data: data,
})
}
func (c *rawConnection) handleResponse(msgID int, resp ResponseMessage) {
c.awaitingMut.Lock()
if rc := c.awaiting[msgID]; rc != nil {
c.awaiting[msgID] = nil
rc <- asyncResult{resp.Data, nil}
close(rc)
}
c.awaitingMut.Unlock()
}
func (c *rawConnection) handlePong(msgID int) {
c.awaitingMut.Lock()
if rc := c.awaiting[msgID]; rc != nil {
c.awaiting[msgID] = nil
rc <- asyncResult{}
close(rc)
}
c.awaitingMut.Unlock()
}
func (c *rawConnection) send(msgID int, msgType int, msg encodable) bool {
if msgID < 0 {
select {
case id := <-c.nextID:
msgID = id
case <-c.closed:
return false
}
}
hdr := header{
version: 0,
msgID: msgID,
msgType: msgType,
}
select {
case c.outbox <- hdrMsg{hdr, msg}:
return true
case <-c.closed:
return false
}
}
func (c *rawConnection) writerLoop() {
var msgBuf = make([]byte, 8) // buffer for wire format message, kept and reused
var uncBuf []byte // buffer for uncompressed message, kept and reused
for {
var tempBuf []byte
var err error
select {
case hm := <-c.outbox:
if hm.msg != nil {
// Uncompressed message in uncBuf
uncBuf, err = hm.msg.AppendXDR(uncBuf[:0])
if err != nil {
c.close(err)
return
}
compress := false
switch c.compression {
case CompressAlways:
compress = true
case CompressMetadata:
compress = hm.hdr.msgType != messageTypeResponse
}
if compress && len(uncBuf) >= compressionThreshold {
// Use compression for large messages
hm.hdr.compression = true
// Make sure we have enough space for the compressed message plus header in msgBug
msgBuf = msgBuf[:cap(msgBuf)]
if maxLen := lz4.CompressBound(len(uncBuf)) + 8; maxLen > len(msgBuf) {
msgBuf = make([]byte, maxLen)
}
// Compressed is written to msgBuf, we keep tb for the length only
tempBuf, err = lz4.Encode(msgBuf[8:], uncBuf)
binary.BigEndian.PutUint32(msgBuf[4:8], uint32(len(tempBuf)))
msgBuf = msgBuf[0 : len(tempBuf)+8]
if debug {
l.Debugf("write compressed message; %v (len=%d)", hm.hdr, len(tempBuf))
}
} else {
// No point in compressing very short messages
hm.hdr.compression = false
msgBuf = msgBuf[:cap(msgBuf)]
if l := len(uncBuf) + 8; l > len(msgBuf) {
msgBuf = make([]byte, l)
}
binary.BigEndian.PutUint32(msgBuf[4:8], uint32(len(uncBuf)))
msgBuf = msgBuf[0 : len(uncBuf)+8]
copy(msgBuf[8:], uncBuf)
if debug {
l.Debugf("write uncompressed message; %v (len=%d)", hm.hdr, len(uncBuf))
}
}
} else {
if debug {
l.Debugf("write empty message; %v", hm.hdr)
}
binary.BigEndian.PutUint32(msgBuf[4:8], 0)
msgBuf = msgBuf[:8]
}
binary.BigEndian.PutUint32(msgBuf[0:4], encodeHeader(hm.hdr))
if err == nil {
var n int
n, err = c.cw.Write(msgBuf)
if debug {
l.Debugf("wrote %d bytes on the wire", n)
}
}
if err != nil {
c.close(err)
return
}
case <-c.closed:
return
}
}
}
func (c *rawConnection) close(err error) {
c.once.Do(func() {
close(c.closed)
c.awaitingMut.Lock()
for i, ch := range c.awaiting {
if ch != nil {
close(ch)
c.awaiting[i] = nil
}
}
c.awaitingMut.Unlock()
go c.receiver.Close(c.id, err)
})
}
func (c *rawConnection) idGenerator() {
nextID := 0
for {
nextID = (nextID + 1) & 0xfff
select {
case c.nextID <- nextID:
case <-c.closed:
return
}
}
}
func (c *rawConnection) pingerLoop() {
var rc = make(chan bool, 1)
ticker := time.Tick(pingIdleTime / 2)
for {
select {
case <-ticker:
if d := time.Since(c.cr.Last()); d < pingIdleTime {
if debug {
l.Debugln(c.id, "ping skipped after rd", d)
}
continue
}
if d := time.Since(c.cw.Last()); d < pingIdleTime {
if debug {
l.Debugln(c.id, "ping skipped after wr", d)
}
continue
}
go func() {
if debug {
l.Debugln(c.id, "ping ->")
}
rc <- c.ping()
}()
select {
case ok := <-rc:
if debug {
l.Debugln(c.id, "<- pong")
}
if !ok {
c.close(fmt.Errorf("ping failure"))
}
case <-time.After(pingTimeout):
c.close(fmt.Errorf("ping timeout"))
case <-c.closed:
return
}
case <-c.closed:
return
}
}
}
type Statistics struct {
At time.Time
InBytesTotal int64
OutBytesTotal int64
}
func (c *rawConnection) Statistics() Statistics {
return Statistics{
At: time.Now(),
InBytesTotal: c.cr.Tot(),
OutBytesTotal: c.cw.Tot(),
}
}

View File

@@ -0,0 +1,382 @@
// Copyright (C) 2014 The Protocol Authors.
package protocol
import (
"bytes"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"reflect"
"strings"
"testing"
"testing/quick"
"github.com/calmh/xdr"
)
var (
c0ID = NewDeviceID([]byte{1})
c1ID = NewDeviceID([]byte{2})
)
func TestHeaderFunctions(t *testing.T) {
f := func(ver, id, typ int) bool {
ver = int(uint(ver) % 16)
id = int(uint(id) % 4096)
typ = int(uint(typ) % 256)
h0 := header{version: ver, msgID: id, msgType: typ}
h1 := decodeHeader(encodeHeader(h0))
return h0 == h1
}
if err := quick.Check(f, nil); err != nil {
t.Error(err)
}
}
func TestHeaderLayout(t *testing.T) {
var e, a uint32
// Version are the first four bits
e = 0xf0000000
a = encodeHeader(header{version: 0xf})
if a != e {
t.Errorf("Header layout incorrect; %08x != %08x", a, e)
}
// Message ID are the following 12 bits
e = 0x0fff0000
a = encodeHeader(header{msgID: 0xfff})
if a != e {
t.Errorf("Header layout incorrect; %08x != %08x", a, e)
}
// Type are the last 8 bits before reserved
e = 0x0000ff00
a = encodeHeader(header{msgType: 0xff})
if a != e {
t.Errorf("Header layout incorrect; %08x != %08x", a, e)
}
}
func TestPing(t *testing.T) {
ar, aw := io.Pipe()
br, bw := io.Pipe()
c0 := NewConnection(c0ID, ar, bw, nil, "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
c1 := NewConnection(c1ID, br, aw, nil, "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
if ok := c0.ping(); !ok {
t.Error("c0 ping failed")
}
if ok := c1.ping(); !ok {
t.Error("c1 ping failed")
}
}
func TestPingErr(t *testing.T) {
e := errors.New("something broke")
for i := 0; i < 16; i++ {
for j := 0; j < 16; j++ {
m0 := newTestModel()
m1 := newTestModel()
ar, aw := io.Pipe()
br, bw := io.Pipe()
eaw := &ErrPipe{PipeWriter: *aw, max: i, err: e}
ebw := &ErrPipe{PipeWriter: *bw, max: j, err: e}
c0 := NewConnection(c0ID, ar, ebw, m0, "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
NewConnection(c1ID, br, eaw, m1, "name", CompressAlways)
res := c0.ping()
if (i < 8 || j < 8) && res {
t.Errorf("Unexpected ping success; i=%d, j=%d", i, j)
} else if (i >= 12 && j >= 12) && !res {
t.Errorf("Unexpected ping fail; i=%d, j=%d", i, j)
}
}
}
}
// func TestRequestResponseErr(t *testing.T) {
// e := errors.New("something broke")
// var pass bool
// for i := 0; i < 48; i++ {
// for j := 0; j < 38; j++ {
// m0 := newTestModel()
// m0.data = []byte("response data")
// m1 := newTestModel()
// ar, aw := io.Pipe()
// br, bw := io.Pipe()
// eaw := &ErrPipe{PipeWriter: *aw, max: i, err: e}
// ebw := &ErrPipe{PipeWriter: *bw, max: j, err: e}
// NewConnection(c0ID, ar, ebw, m0, nil)
// c1 := NewConnection(c1ID, br, eaw, m1, nil).(wireFormatConnection).next.(*rawConnection)
// d, err := c1.Request("default", "tn", 1234, 5678)
// if err == e || err == ErrClosed {
// t.Logf("Error at %d+%d bytes", i, j)
// if !m1.isClosed() {
// t.Fatal("c1 not closed")
// }
// if !m0.isClosed() {
// t.Fatal("c0 not closed")
// }
// continue
// }
// if err != nil {
// t.Fatal(err)
// }
// if string(d) != "response data" {
// t.Fatalf("Incorrect response data %q", string(d))
// }
// if m0.folder != "default" {
// t.Fatalf("Incorrect folder %q", m0.folder)
// }
// if m0.name != "tn" {
// t.Fatalf("Incorrect name %q", m0.name)
// }
// if m0.offset != 1234 {
// t.Fatalf("Incorrect offset %d", m0.offset)
// }
// if m0.size != 5678 {
// t.Fatalf("Incorrect size %d", m0.size)
// }
// t.Logf("Pass at %d+%d bytes", i, j)
// pass = true
// }
// }
// if !pass {
// t.Fatal("Never passed")
// }
// }
func TestVersionErr(t *testing.T) {
m0 := newTestModel()
m1 := newTestModel()
ar, aw := io.Pipe()
br, bw := io.Pipe()
c0 := NewConnection(c0ID, ar, bw, m0, "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
NewConnection(c1ID, br, aw, m1, "name", CompressAlways)
w := xdr.NewWriter(c0.cw)
w.WriteUint32(encodeHeader(header{
version: 2,
msgID: 0,
msgType: 0,
}))
w.WriteUint32(0) // Avoids reader closing due to EOF
if !m1.isClosed() {
t.Error("Connection should close due to unknown version")
}
}
func TestTypeErr(t *testing.T) {
m0 := newTestModel()
m1 := newTestModel()
ar, aw := io.Pipe()
br, bw := io.Pipe()
c0 := NewConnection(c0ID, ar, bw, m0, "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
NewConnection(c1ID, br, aw, m1, "name", CompressAlways)
w := xdr.NewWriter(c0.cw)
w.WriteUint32(encodeHeader(header{
version: 0,
msgID: 0,
msgType: 42,
}))
w.WriteUint32(0) // Avoids reader closing due to EOF
if !m1.isClosed() {
t.Error("Connection should close due to unknown message type")
}
}
func TestClose(t *testing.T) {
m0 := newTestModel()
m1 := newTestModel()
ar, aw := io.Pipe()
br, bw := io.Pipe()
c0 := NewConnection(c0ID, ar, bw, m0, "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
NewConnection(c1ID, br, aw, m1, "name", CompressAlways)
c0.close(nil)
<-c0.closed
if !m0.isClosed() {
t.Fatal("Connection should be closed")
}
// None of these should panic, some should return an error
if c0.ping() {
t.Error("Ping should not return true")
}
c0.Index("default", nil)
c0.Index("default", nil)
if _, err := c0.Request("default", "foo", 0, 0); err == nil {
t.Error("Request should return an error")
}
}
func TestElementSizeExceededNested(t *testing.T) {
m := ClusterConfigMessage{
Folders: []Folder{
{ID: "longstringlongstringlongstringinglongstringlongstringlonlongstringlongstringlon"},
},
}
_, err := m.EncodeXDR(ioutil.Discard)
if err == nil {
t.Errorf("ID length %d > max 64, but no error", len(m.Folders[0].ID))
}
}
func TestMarshalIndexMessage(t *testing.T) {
var quickCfg = &quick.Config{MaxCountScale: 10}
if testing.Short() {
quickCfg = nil
}
f := func(m1 IndexMessage) bool {
for _, f := range m1.Files {
for i := range f.Blocks {
f.Blocks[i].Offset = 0
if len(f.Blocks[i].Hash) == 0 {
f.Blocks[i].Hash = nil
}
}
}
return testMarshal(t, "index", &m1, &IndexMessage{})
}
if err := quick.Check(f, quickCfg); err != nil {
t.Error(err)
}
}
func TestMarshalRequestMessage(t *testing.T) {
var quickCfg = &quick.Config{MaxCountScale: 10}
if testing.Short() {
quickCfg = nil
}
f := func(m1 RequestMessage) bool {
return testMarshal(t, "request", &m1, &RequestMessage{})
}
if err := quick.Check(f, quickCfg); err != nil {
t.Error(err)
}
}
func TestMarshalResponseMessage(t *testing.T) {
var quickCfg = &quick.Config{MaxCountScale: 10}
if testing.Short() {
quickCfg = nil
}
f := func(m1 ResponseMessage) bool {
if len(m1.Data) == 0 {
m1.Data = nil
}
return testMarshal(t, "response", &m1, &ResponseMessage{})
}
if err := quick.Check(f, quickCfg); err != nil {
t.Error(err)
}
}
func TestMarshalClusterConfigMessage(t *testing.T) {
var quickCfg = &quick.Config{MaxCountScale: 10}
if testing.Short() {
quickCfg = nil
}
f := func(m1 ClusterConfigMessage) bool {
return testMarshal(t, "clusterconfig", &m1, &ClusterConfigMessage{})
}
if err := quick.Check(f, quickCfg); err != nil {
t.Error(err)
}
}
func TestMarshalCloseMessage(t *testing.T) {
var quickCfg = &quick.Config{MaxCountScale: 10}
if testing.Short() {
quickCfg = nil
}
f := func(m1 CloseMessage) bool {
return testMarshal(t, "close", &m1, &CloseMessage{})
}
if err := quick.Check(f, quickCfg); err != nil {
t.Error(err)
}
}
type message interface {
EncodeXDR(io.Writer) (int, error)
DecodeXDR(io.Reader) error
}
func testMarshal(t *testing.T, prefix string, m1, m2 message) bool {
var buf bytes.Buffer
failed := func(bc []byte) {
bs, _ := json.MarshalIndent(m1, "", " ")
ioutil.WriteFile(prefix+"-1.txt", bs, 0644)
bs, _ = json.MarshalIndent(m2, "", " ")
ioutil.WriteFile(prefix+"-2.txt", bs, 0644)
if len(bc) > 0 {
f, _ := os.Create(prefix + "-data.txt")
fmt.Fprint(f, hex.Dump(bc))
f.Close()
}
}
_, err := m1.EncodeXDR(&buf)
if err != nil && strings.Contains(err.Error(), "exceeds size") {
return true
}
if err != nil {
failed(nil)
t.Fatal(err)
}
bc := make([]byte, len(buf.Bytes()))
copy(bc, buf.Bytes())
err = m2.DecodeXDR(&buf)
if err != nil {
failed(bc)
t.Fatal(err)
}
ok := reflect.DeepEqual(m1, m2)
if !ok {
failed(bc)
}
return ok
}

View File

@@ -9,7 +9,15 @@ import (
)
type wireFormatConnection struct {
Connection
next Connection
}
func (c wireFormatConnection) ID() DeviceID {
return c.next.ID()
}
func (c wireFormatConnection) Name() string {
return c.next.Name()
}
func (c wireFormatConnection) Index(folder string, fs []FileInfo) error {
@@ -20,7 +28,7 @@ func (c wireFormatConnection) Index(folder string, fs []FileInfo) error {
myFs[i].Name = norm.NFC.String(filepath.ToSlash(myFs[i].Name))
}
return c.Connection.Index(folder, myFs)
return c.next.Index(folder, myFs)
}
func (c wireFormatConnection) IndexUpdate(folder string, fs []FileInfo) error {
@@ -31,10 +39,18 @@ func (c wireFormatConnection) IndexUpdate(folder string, fs []FileInfo) error {
myFs[i].Name = norm.NFC.String(filepath.ToSlash(myFs[i].Name))
}
return c.Connection.IndexUpdate(folder, myFs)
return c.next.IndexUpdate(folder, myFs)
}
func (c wireFormatConnection) Request(folder, name string, offset int64, size int, hash []byte, fromTemporary bool) ([]byte, error) {
func (c wireFormatConnection) Request(folder, name string, offset int64, size int) ([]byte, error) {
name = norm.NFC.String(filepath.ToSlash(name))
return c.Connection.Request(folder, name, offset, size, hash, fromTemporary)
return c.next.Request(folder, name, offset, size)
}
func (c wireFormatConnection) ClusterConfig(config ClusterConfigMessage) {
c.next.ClusterConfig(config)
}
func (c wireFormatConnection) Statistics() Statistics {
return c.next.Statistics()
}

View File

@@ -12,10 +12,8 @@ import (
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/memdb"
"github.com/syndtr/goleveldb/leveldb/storage"
)
// ErrBatchCorrupted records reason of batch corruption.
type ErrBatchCorrupted struct {
Reason string
}
@@ -25,7 +23,7 @@ func (e *ErrBatchCorrupted) Error() string {
}
func newErrBatchCorrupted(reason string) error {
return errors.NewErrCorrupted(storage.FileDesc{}, &ErrBatchCorrupted{reason})
return errors.NewErrCorrupted(nil, &ErrBatchCorrupted{reason})
}
const (
@@ -33,7 +31,6 @@ const (
batchGrowRec = 3000
)
// BatchReplay wraps basic batch operations.
type BatchReplay interface {
Put(key, value []byte)
Delete(key []byte)
@@ -70,20 +67,20 @@ func (b *Batch) grow(n int) {
}
}
func (b *Batch) appendRec(kt keyType, key, value []byte) {
func (b *Batch) appendRec(kt kType, key, value []byte) {
n := 1 + binary.MaxVarintLen32 + len(key)
if kt == keyTypeVal {
if kt == ktVal {
n += binary.MaxVarintLen32 + len(value)
}
b.grow(n)
off := len(b.data)
data := b.data[:off+n]
data[off] = byte(kt)
off++
off += 1
off += binary.PutUvarint(data[off:], uint64(len(key)))
copy(data[off:], key)
off += len(key)
if kt == keyTypeVal {
if kt == ktVal {
off += binary.PutUvarint(data[off:], uint64(len(value)))
copy(data[off:], value)
off += len(value)
@@ -97,13 +94,13 @@ func (b *Batch) appendRec(kt keyType, key, value []byte) {
// Put appends 'put operation' of the given key/value pair to the batch.
// It is safe to modify the contents of the argument after Put returns.
func (b *Batch) Put(key, value []byte) {
b.appendRec(keyTypeVal, key, value)
b.appendRec(ktVal, key, value)
}
// Delete appends 'delete operation' of the given key to the batch.
// It is safe to modify the contents of the argument after Delete returns.
func (b *Batch) Delete(key []byte) {
b.appendRec(keyTypeDel, key, nil)
b.appendRec(ktDel, key, nil)
}
// Dump dumps batch contents. The returned slice can be loaded into the
@@ -124,14 +121,13 @@ func (b *Batch) Load(data []byte) error {
// Replay replays batch contents.
func (b *Batch) Replay(r BatchReplay) error {
return b.decodeRec(func(i int, kt keyType, key, value []byte) error {
return b.decodeRec(func(i int, kt kType, key, value []byte) {
switch kt {
case keyTypeVal:
case ktVal:
r.Put(key, value)
case keyTypeDel:
case ktDel:
r.Delete(key)
}
return nil
})
}
@@ -158,7 +154,6 @@ func (b *Batch) append(p *Batch) {
b.grow(len(p.data) - batchHdrLen)
b.data = append(b.data, p.data[batchHdrLen:]...)
b.rLen += p.rLen
b.bLen += p.bLen
}
if p.sync {
b.sync = true
@@ -198,19 +193,18 @@ func (b *Batch) decode(prevSeq uint64, data []byte) error {
return nil
}
func (b *Batch) decodeRec(f func(i int, kt keyType, key, value []byte) error) error {
func (b *Batch) decodeRec(f func(i int, kt kType, key, value []byte)) (err error) {
off := batchHdrLen
for i := 0; i < b.rLen; i++ {
if off >= len(b.data) {
return newErrBatchCorrupted("invalid records length")
}
kt := keyType(b.data[off])
if kt > keyTypeVal {
panic(kt)
kt := kType(b.data[off])
if kt > ktVal {
return newErrBatchCorrupted("bad record: invalid type")
}
off++
off += 1
x, n := binary.Uvarint(b.data[off:])
off += n
@@ -220,7 +214,7 @@ func (b *Batch) decodeRec(f func(i int, kt keyType, key, value []byte) error) er
key := b.data[off : off+int(x)]
off += int(x)
var value []byte
if kt == keyTypeVal {
if kt == ktVal {
x, n := binary.Uvarint(b.data[off:])
off += n
if n <= 0 || off+int(x) > len(b.data) {
@@ -230,19 +224,16 @@ func (b *Batch) decodeRec(f func(i int, kt keyType, key, value []byte) error) er
off += int(x)
}
if err := f(i, kt, key, value); err != nil {
return err
}
f(i, kt, key, value)
}
return nil
}
func (b *Batch) memReplay(to *memdb.DB) error {
var ikScratch []byte
return b.decodeRec(func(i int, kt keyType, key, value []byte) error {
ikScratch = makeInternalKey(ikScratch, key, b.seq+uint64(i), kt)
return to.Put(ikScratch, value)
return b.decodeRec(func(i int, kt kType, key, value []byte) {
ikey := newIkey(key, b.seq+uint64(i), kt)
to.Put(ikey, value)
})
}
@@ -254,9 +245,8 @@ func (b *Batch) memDecodeAndReplay(prevSeq uint64, data []byte, to *memdb.DB) er
}
func (b *Batch) revertMemReplay(to *memdb.DB) error {
var ikScratch []byte
return b.decodeRec(func(i int, kt keyType, key, value []byte) error {
ikScratch := makeInternalKey(ikScratch, key, b.seq+uint64(i), kt)
return to.Delete(ikScratch)
return b.decodeRec(func(i int, kt kType, key, value []byte) {
ikey := newIkey(key, b.seq+uint64(i), kt)
to.Delete(ikey)
})
}

View File

@@ -15,7 +15,7 @@ import (
)
type tbRec struct {
kt keyType
kt kType
key, value []byte
}
@@ -24,11 +24,11 @@ type testBatch struct {
}
func (p *testBatch) Put(key, value []byte) {
p.rec = append(p.rec, &tbRec{keyTypeVal, key, value})
p.rec = append(p.rec, &tbRec{ktVal, key, value})
}
func (p *testBatch) Delete(key []byte) {
p.rec = append(p.rec, &tbRec{keyTypeDel, key, nil})
p.rec = append(p.rec, &tbRec{ktDel, key, nil})
}
func compareBatch(t *testing.T, b1, b2 *Batch) {
@@ -55,7 +55,7 @@ func compareBatch(t *testing.T, b1, b2 *Batch) {
if !bytes.Equal(r1.key, r2.key) {
t.Errorf("invalid key on record '%d' want %s, got %s", i, string(r1.key), string(r2.key))
}
if r1.kt == keyTypeVal {
if r1.kt == ktVal {
if !bytes.Equal(r1.value, r2.value) {
t.Errorf("invalid value on record '%d' want %s, got %s", i, string(r1.value), string(r2.value))
}
@@ -100,9 +100,6 @@ func TestBatch_Append(t *testing.T) {
b2b.Put([]byte("bar"), []byte("barvalue"))
b2a.append(b2b)
compareBatch(t, b1, b2a)
if b1.size() != b2a.size() {
t.Fatalf("invalid batch size want %d, got %d", b1.size(), b2a.size())
}
}
func TestBatch_Size(t *testing.T) {

View File

@@ -0,0 +1,58 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// +build !go1.2
package leveldb
import (
"sync/atomic"
"testing"
)
func BenchmarkDBReadConcurrent(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.fill()
p.gc()
defer p.close()
b.ResetTimer()
b.SetBytes(116)
b.RunParallel(func(pb *testing.PB) {
iter := p.newIter()
defer iter.Release()
for pb.Next() && iter.Next() {
}
})
}
func BenchmarkDBReadConcurrent2(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.fill()
p.gc()
defer p.close()
b.ResetTimer()
b.SetBytes(116)
var dir uint32
b.RunParallel(func(pb *testing.PB) {
iter := p.newIter()
defer iter.Release()
if atomic.AddUint32(&dir, 1)%2 == 0 {
for pb.Next() && iter.Next() {
}
} else {
if pb.Next() && iter.Last() {
for pb.Next() && iter.Prev() {
}
}
}
})
}

View File

@@ -13,7 +13,6 @@ import (
"os"
"path/filepath"
"runtime"
"sync/atomic"
"testing"
"github.com/syndtr/goleveldb/leveldb/iterator"
@@ -91,7 +90,7 @@ func openDBBench(b *testing.B, noCompress bool) *dbBench {
ro: &opt.ReadOptions{},
wo: &opt.WriteOptions{},
}
p.stor, err = storage.OpenFile(benchDB, false)
p.stor, err = storage.OpenFile(benchDB)
if err != nil {
b.Fatal("cannot open stor: ", err)
}
@@ -463,47 +462,3 @@ func BenchmarkDBGetRandom(b *testing.B) {
p.gets()
p.close()
}
func BenchmarkDBReadConcurrent(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.fill()
p.gc()
defer p.close()
b.ResetTimer()
b.SetBytes(116)
b.RunParallel(func(pb *testing.PB) {
iter := p.newIter()
defer iter.Release()
for pb.Next() && iter.Next() {
}
})
}
func BenchmarkDBReadConcurrent2(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.fill()
p.gc()
defer p.close()
b.ResetTimer()
b.SetBytes(116)
var dir uint32
b.RunParallel(func(pb *testing.PB) {
iter := p.newIter()
defer iter.Release()
if atomic.AddUint32(&dir, 1)%2 == 0 {
for pb.Next() && iter.Next() {
}
} else {
if pb.Next() && iter.Last() {
for pb.Next() && iter.Prev() {
}
}
}
})
}

View File

@@ -4,12 +4,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// +build !go1.2
package cache
import (
"math/rand"
"testing"
"time"
)
func BenchmarkLRUCache(b *testing.B) {

View File

@@ -47,21 +47,17 @@ type Cacher interface {
// so the the Release method will be called once object is released.
type Value interface{}
// NamespaceGetter provides convenient wrapper for namespace.
type NamespaceGetter struct {
type CacheGetter struct {
Cache *Cache
NS uint64
}
// Get simply calls Cache.Get() method.
func (g *NamespaceGetter) Get(key uint64, setFunc func() (size int, value Value)) *Handle {
func (g *CacheGetter) Get(key uint64, setFunc func() (size int, value Value)) *Handle {
return g.Cache.Get(g.NS, key, setFunc)
}
// The hash tables implementation is based on:
// "Dynamic-Sized Nonblocking Hash Tables", by Yujie Liu,
// Kunlong Zhang, and Michael Spear.
// ACM Symposium on Principles of Distributed Computing, Jul 2014.
// "Dynamic-Sized Nonblocking Hash Tables", by Yujie Liu, Kunlong Zhang, and Michael Spear. ACM Symposium on Principles of Distributed Computing, Jul 2014.
const (
mInitialSize = 1 << 4
@@ -614,12 +610,10 @@ func (n *Node) unrefLocked() {
}
}
// Handle is a 'cache handle' of a 'cache node'.
type Handle struct {
n unsafe.Pointer // *Node
}
// Value returns the value of the 'cache node'.
func (h *Handle) Value() Value {
n := (*Node)(atomic.LoadPointer(&h.n))
if n != nil {
@@ -628,8 +622,6 @@ func (h *Handle) Value() Value {
return nil
}
// Release releases this 'cache handle'.
// It is safe to call release multiple times.
func (h *Handle) Release() {
nPtr := atomic.LoadPointer(&h.n)
if nPtr != nil && atomic.CompareAndSwapPointer(&h.n, nPtr, nil) {

View File

@@ -45,8 +45,9 @@ func set(c *Cache, ns, key uint64, value Value, charge int, relf func()) *Handle
return c.Get(ns, key, func() (int, Value) {
if relf != nil {
return charge, releaserFunc{relf, value}
} else {
return charge, value
}
return charge, value
})
}

View File

@@ -33,9 +33,9 @@ func (icmp *iComparer) Name() string {
}
func (icmp *iComparer) Compare(a, b []byte) int {
x := icmp.ucmp.Compare(internalKey(a).ukey(), internalKey(b).ukey())
x := icmp.ucmp.Compare(iKey(a).ukey(), iKey(b).ukey())
if x == 0 {
if m, n := internalKey(a).num(), internalKey(b).num(); m > n {
if m, n := iKey(a).num(), iKey(b).num(); m > n {
x = -1
} else if m < n {
x = 1
@@ -45,13 +45,13 @@ func (icmp *iComparer) Compare(a, b []byte) int {
}
func (icmp *iComparer) Separator(dst, a, b []byte) []byte {
ua, ub := internalKey(a).ukey(), internalKey(b).ukey()
ua, ub := iKey(a).ukey(), iKey(b).ukey()
dst = icmp.ucmp.Separator(dst, ua, ub)
if dst == nil {
return nil
}
if len(dst) < len(ua) && icmp.uCompare(ua, dst) < 0 {
dst = append(dst, keyMaxNumBytes...)
dst = append(dst, kMaxNumBytes...)
} else {
// Did not close possibilities that n maybe longer than len(ub).
dst = append(dst, a[len(a)-8:]...)
@@ -60,13 +60,13 @@ func (icmp *iComparer) Separator(dst, a, b []byte) []byte {
}
func (icmp *iComparer) Successor(dst, b []byte) []byte {
ub := internalKey(b).ukey()
ub := iKey(b).ukey()
dst = icmp.ucmp.Successor(dst, ub)
if dst == nil {
return nil
}
if len(dst) < len(ub) && icmp.uCompare(ub, dst) < 0 {
dst = append(dst, keyMaxNumBytes...)
dst = append(dst, kMaxNumBytes...)
} else {
// Did not close possibilities that n maybe longer than len(ub).
dst = append(dst, b[len(b)-8:]...)

View File

@@ -9,13 +9,12 @@ package leveldb
import (
"bytes"
"fmt"
"io"
"math/rand"
"testing"
"github.com/syndtr/goleveldb/leveldb/filter"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/storage"
"io"
"math/rand"
"testing"
)
const ctValSize = 1000
@@ -100,17 +99,19 @@ func (h *dbCorruptHarness) corrupt(ft storage.FileType, fi, offset, n int) {
p := &h.dbHarness
t := p.t
fds, _ := p.stor.List(ft)
sortFds(fds)
ff, _ := p.stor.GetFiles(ft)
sff := files(ff)
sff.sort()
if fi < 0 {
fi = len(fds) - 1
fi = len(sff) - 1
}
if fi >= len(fds) {
if fi >= len(sff) {
t.Fatalf("no such file with type %q with index %d", ft, fi)
}
fd := fds[fi]
r, err := h.stor.Open(fd)
file := sff[fi]
r, err := file.Open()
if err != nil {
t.Fatal("cannot open file: ", err)
}
@@ -148,11 +149,11 @@ func (h *dbCorruptHarness) corrupt(ft storage.FileType, fi, offset, n int) {
buf[offset+i] ^= 0x80
}
err = h.stor.Remove(fd)
err = file.Remove()
if err != nil {
t.Fatal("cannot remove old file: ", err)
}
w, err := h.stor.Create(fd)
w, err := file.Create()
if err != nil {
t.Fatal("cannot create new file: ", err)
}
@@ -164,37 +165,25 @@ func (h *dbCorruptHarness) corrupt(ft storage.FileType, fi, offset, n int) {
}
func (h *dbCorruptHarness) removeAll(ft storage.FileType) {
fds, err := h.stor.List(ft)
ff, err := h.stor.GetFiles(ft)
if err != nil {
h.t.Fatal("get files: ", err)
}
for _, fd := range fds {
if err := h.stor.Remove(fd); err != nil {
h.t.Error("remove file: ", err)
}
}
}
func (h *dbCorruptHarness) forceRemoveAll(ft storage.FileType) {
fds, err := h.stor.List(ft)
if err != nil {
h.t.Fatal("get files: ", err)
}
for _, fd := range fds {
if err := h.stor.ForceRemove(fd); err != nil {
for _, f := range ff {
if err := f.Remove(); err != nil {
h.t.Error("remove file: ", err)
}
}
}
func (h *dbCorruptHarness) removeOne(ft storage.FileType) {
fds, err := h.stor.List(ft)
ff, err := h.stor.GetFiles(ft)
if err != nil {
h.t.Fatal("get files: ", err)
}
fd := fds[rand.Intn(len(fds))]
h.t.Logf("removing file @%d", fd.Num)
if err := h.stor.Remove(fd); err != nil {
f := ff[rand.Intn(len(ff))]
h.t.Logf("removing file @%d", f.Num())
if err := f.Remove(); err != nil {
h.t.Error("remove file: ", err)
}
}
@@ -232,7 +221,6 @@ func (h *dbCorruptHarness) check(min, max int) {
func TestCorruptDB_Journal(t *testing.T) {
h := newDbCorruptHarness(t)
defer h.close()
h.build(100)
h.check(100, 100)
@@ -242,11 +230,12 @@ func TestCorruptDB_Journal(t *testing.T) {
h.openDB()
h.check(36, 36)
h.close()
}
func TestCorruptDB_Table(t *testing.T) {
h := newDbCorruptHarness(t)
defer h.close()
h.build(100)
h.compactMem()
@@ -257,11 +246,12 @@ func TestCorruptDB_Table(t *testing.T) {
h.openDB()
h.check(99, 99)
h.close()
}
func TestCorruptDB_TableIndex(t *testing.T) {
h := newDbCorruptHarness(t)
defer h.close()
h.build(10000)
h.compactMem()
@@ -270,6 +260,8 @@ func TestCorruptDB_TableIndex(t *testing.T) {
h.openDB()
h.check(5000, 9999)
h.close()
}
func TestCorruptDB_MissingManifest(t *testing.T) {
@@ -279,7 +271,6 @@ func TestCorruptDB_MissingManifest(t *testing.T) {
Strict: opt.StrictJournalChecksum,
WriteBuffer: 1000 * 60,
})
defer h.close()
h.build(1000)
h.compactMem()
@@ -295,8 +286,10 @@ func TestCorruptDB_MissingManifest(t *testing.T) {
h.compactMem()
h.closeDB()
h.forceRemoveAll(storage.TypeManifest)
h.stor.SetIgnoreOpenErr(storage.TypeManifest)
h.removeAll(storage.TypeManifest)
h.openAssert(false)
h.stor.SetIgnoreOpenErr(0)
h.recover()
h.check(1000, 1000)
@@ -307,11 +300,12 @@ func TestCorruptDB_MissingManifest(t *testing.T) {
h.recover()
h.check(1000, 1000)
h.close()
}
func TestCorruptDB_SequenceNumberRecovery(t *testing.T) {
h := newDbCorruptHarness(t)
defer h.close()
h.put("foo", "v1")
h.put("foo", "v2")
@@ -327,11 +321,12 @@ func TestCorruptDB_SequenceNumberRecovery(t *testing.T) {
h.reopenDB()
h.getVal("foo", "v6")
h.close()
}
func TestCorruptDB_SequenceNumberRecoveryTable(t *testing.T) {
h := newDbCorruptHarness(t)
defer h.close()
h.put("foo", "v1")
h.put("foo", "v2")
@@ -349,11 +344,12 @@ func TestCorruptDB_SequenceNumberRecoveryTable(t *testing.T) {
h.reopenDB()
h.getVal("foo", "v6")
h.close()
}
func TestCorruptDB_CorruptedManifest(t *testing.T) {
h := newDbCorruptHarness(t)
defer h.close()
h.put("foo", "hello")
h.compactMem()
@@ -364,11 +360,12 @@ func TestCorruptDB_CorruptedManifest(t *testing.T) {
h.recover()
h.getVal("foo", "hello")
h.close()
}
func TestCorruptDB_CompactionInputError(t *testing.T) {
h := newDbCorruptHarness(t)
defer h.close()
h.build(10)
h.compactMem()
@@ -380,11 +377,12 @@ func TestCorruptDB_CompactionInputError(t *testing.T) {
h.build(10000)
h.check(10000, 10000)
h.close()
}
func TestCorruptDB_UnrelatedKeys(t *testing.T) {
h := newDbCorruptHarness(t)
defer h.close()
h.build(10)
h.compactMem()
@@ -396,11 +394,12 @@ func TestCorruptDB_UnrelatedKeys(t *testing.T) {
h.getVal(string(tkey(1000)), string(tval(1000, ctValSize)))
h.compactMem()
h.getVal(string(tkey(1000)), string(tval(1000, ctValSize)))
h.close()
}
func TestCorruptDB_Level0NewerFileHasOlderSeqnum(t *testing.T) {
h := newDbCorruptHarness(t)
defer h.close()
h.put("a", "v1")
h.put("b", "v1")
@@ -422,11 +421,12 @@ func TestCorruptDB_Level0NewerFileHasOlderSeqnum(t *testing.T) {
h.getVal("b", "v3")
h.getVal("c", "v0")
h.getVal("d", "v0")
h.close()
}
func TestCorruptDB_RecoverInvalidSeq_Issue53(t *testing.T) {
h := newDbCorruptHarness(t)
defer h.close()
h.put("a", "v1")
h.put("b", "v1")
@@ -448,11 +448,12 @@ func TestCorruptDB_RecoverInvalidSeq_Issue53(t *testing.T) {
h.getVal("b", "v3")
h.getVal("c", "v0")
h.getVal("d", "v0")
h.close()
}
func TestCorruptDB_MissingTableFiles(t *testing.T) {
h := newDbCorruptHarness(t)
defer h.close()
h.put("a", "v1")
h.put("b", "v1")
@@ -466,6 +467,8 @@ func TestCorruptDB_MissingTableFiles(t *testing.T) {
h.removeOne(storage.TypeTable)
h.openAssert(false)
h.close()
}
func TestCorruptDB_RecoverTable(t *testing.T) {
@@ -474,7 +477,6 @@ func TestCorruptDB_RecoverTable(t *testing.T) {
CompactionTableSize: 90 * opt.KiB,
Filter: filter.NewBloomFilter(10),
})
defer h.close()
h.build(1000)
h.compactMem()
@@ -493,4 +495,6 @@ func TestCorruptDB_RecoverTable(t *testing.T) {
t.Errorf("invalid seq, want=%d got=%d", seq, h.db.seq)
}
h.check(985, 985)
h.close()
}

View File

@@ -36,14 +36,14 @@ type DB struct {
s *session
// MemDB.
memMu sync.RWMutex
memPool chan *memdb.DB
mem, frozenMem *memDB
journal *journal.Writer
journalWriter storage.Writer
journalFd storage.FileDesc
frozenJournalFd storage.FileDesc
frozenSeq uint64
memMu sync.RWMutex
memPool chan *memdb.DB
mem, frozenMem *memDB
journal *journal.Writer
journalWriter storage.Writer
journalFile storage.File
frozenJournalFile storage.File
frozenSeq uint64
// Snapshot.
snapsMu sync.Mutex
@@ -61,19 +61,15 @@ type DB struct {
writeDelayN int
journalC chan *Batch
journalAckC chan error
tr *Transaction
// Compaction.
compCommitLk sync.Mutex
tcompCmdC chan cCmd
tcompPauseC chan chan<- struct{}
mcompCmdC chan cCmd
compErrC chan error
compPerErrC chan error
compErrSetC chan error
compWriteLocking bool
compStats cStats
memdbMaxLevel int // For testing.
tcompCmdC chan cCmd
tcompPauseC chan chan<- struct{}
mcompCmdC chan cCmd
compErrC chan error
compPerErrC chan error
compErrSetC chan error
compStats []cStats
// Close.
closeW sync.WaitGroup
@@ -107,48 +103,33 @@ func openDB(s *session) (*DB, error) {
compErrC: make(chan error),
compPerErrC: make(chan error),
compErrSetC: make(chan error),
compStats: make([]cStats, s.o.GetNumLevel()),
// Close
closeC: make(chan struct{}),
}
// Read-only mode.
readOnly := s.o.GetReadOnly()
if err := db.recoverJournal(); err != nil {
return nil, err
}
if readOnly {
// Recover journals (read-only mode).
if err := db.recoverJournalRO(); err != nil {
return nil, err
// Remove any obsolete files.
if err := db.checkAndCleanFiles(); err != nil {
// Close journal.
if db.journal != nil {
db.journal.Close()
db.journalWriter.Close()
}
} else {
// Recover journals.
if err := db.recoverJournal(); err != nil {
return nil, err
}
// Remove any obsolete files.
if err := db.checkAndCleanFiles(); err != nil {
// Close journal.
if db.journal != nil {
db.journal.Close()
db.journalWriter.Close()
}
return nil, err
}
return nil, err
}
// Doesn't need to be included in the wait group.
go db.compactionError()
go db.mpoolDrain()
if readOnly {
db.SetReadOnly()
} else {
db.closeW.Add(3)
go db.tCompaction()
go db.mCompaction()
go db.jWriter()
}
db.closeW.Add(3)
go db.tCompaction()
go db.mCompaction()
go db.jWriter()
s.logf("db@open done T·%v", time.Since(start))
@@ -211,7 +192,7 @@ func Open(stor storage.Storage, o *opt.Options) (db *DB, err error) {
// The returned DB instance is goroutine-safe.
// The DB must be closed after use, by calling Close method.
func OpenFile(path string, o *opt.Options) (db *DB, err error) {
stor, err := storage.OpenFile(path, o.GetReadOnly())
stor, err := storage.OpenFile(path)
if err != nil {
return
}
@@ -261,7 +242,7 @@ func Recover(stor storage.Storage, o *opt.Options) (db *DB, err error) {
// The returned DB instance is goroutine-safe.
// The DB must be closed after use, by calling Close method.
func RecoverFile(path string, o *opt.Options) (db *DB, err error) {
stor, err := storage.OpenFile(path, false)
stor, err := storage.OpenFile(path)
if err != nil {
return
}
@@ -280,11 +261,12 @@ func recoverTable(s *session, o *opt.Options) error {
o.Strict &= ^opt.StrictReader
// Get all tables and sort it by file number.
fds, err := s.stor.List(storage.TypeTable)
tableFiles_, err := s.getFiles(storage.TypeTable)
if err != nil {
return err
}
sortFds(fds)
tableFiles := files(tableFiles_)
tableFiles.sort()
var (
maxSeq uint64
@@ -292,22 +274,21 @@ func recoverTable(s *session, o *opt.Options) error {
// We will drop corrupted table.
strict = o.GetStrict(opt.StrictRecovery)
noSync = o.GetNoSync()
rec = &sessionRecord{}
rec = &sessionRecord{numLevel: o.GetNumLevel()}
bpool = util.NewBufferPool(o.GetBlockSize() + 5)
)
buildTable := func(iter iterator.Iterator) (tmpFd storage.FileDesc, size int64, err error) {
tmpFd = s.newTemp()
writer, err := s.stor.Create(tmpFd)
buildTable := func(iter iterator.Iterator) (tmp storage.File, size int64, err error) {
tmp = s.newTemp()
writer, err := tmp.Create()
if err != nil {
return
}
defer func() {
writer.Close()
if err != nil {
s.stor.Remove(tmpFd)
tmpFd = storage.FileDesc{}
tmp.Remove()
tmp = nil
}
}()
@@ -315,7 +296,7 @@ func recoverTable(s *session, o *opt.Options) error {
tw := table.NewWriter(writer, o)
for iter.Next() {
key := iter.Key()
if validInternalKey(key) {
if validIkey(key) {
err = tw.Append(key, iter.Value())
if err != nil {
return
@@ -330,18 +311,16 @@ func recoverTable(s *session, o *opt.Options) error {
if err != nil {
return
}
if !noSync {
err = writer.Sync()
if err != nil {
return
}
err = writer.Sync()
if err != nil {
return
}
size = int64(tw.BytesLen())
return
}
recoverTable := func(fd storage.FileDesc) error {
s.logf("table@recovery recovering @%d", fd.Num)
reader, err := s.stor.Open(fd)
recoverTable := func(file storage.File) error {
s.logf("table@recovery recovering @%d", file.Num())
reader, err := file.Open()
if err != nil {
return err
}
@@ -363,7 +342,7 @@ func recoverTable(s *session, o *opt.Options) error {
tgoodKey, tcorruptedKey, tcorruptedBlock int
imin, imax []byte
)
tr, err := table.NewReader(reader, size, fd, nil, bpool, o)
tr, err := table.NewReader(reader, size, storage.NewFileInfo(file), nil, bpool, o)
if err != nil {
return err
}
@@ -371,7 +350,7 @@ func recoverTable(s *session, o *opt.Options) error {
if itererr, ok := iter.(iterator.ErrorCallbackSetter); ok {
itererr.SetErrorCallback(func(err error) {
if errors.IsCorrupted(err) {
s.logf("table@recovery block corruption @%d %q", fd.Num, err)
s.logf("table@recovery block corruption @%d %q", file.Num(), err)
tcorruptedBlock++
}
})
@@ -380,7 +359,7 @@ func recoverTable(s *session, o *opt.Options) error {
// Scan the table.
for iter.Next() {
key := iter.Key()
_, seq, _, kerr := parseInternalKey(key)
_, seq, _, kerr := parseIkey(key)
if kerr != nil {
tcorruptedKey++
continue
@@ -406,23 +385,23 @@ func recoverTable(s *session, o *opt.Options) error {
if strict && (tcorruptedKey > 0 || tcorruptedBlock > 0) {
droppedTable++
s.logf("table@recovery dropped @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", fd.Num, tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq)
s.logf("table@recovery dropped @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", file.Num(), tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq)
return nil
}
if tgoodKey > 0 {
if tcorruptedKey > 0 || tcorruptedBlock > 0 {
// Rebuild the table.
s.logf("table@recovery rebuilding @%d", fd.Num)
s.logf("table@recovery rebuilding @%d", file.Num())
iter := tr.NewIterator(nil, nil)
tmpFd, newSize, err := buildTable(iter)
tmp, newSize, err := buildTable(iter)
iter.Release()
if err != nil {
return err
}
closed = true
reader.Close()
if err := s.stor.Rename(tmpFd, fd); err != nil {
if err := file.Replace(tmp); err != nil {
return err
}
size = newSize
@@ -432,30 +411,30 @@ func recoverTable(s *session, o *opt.Options) error {
}
recoveredKey += tgoodKey
// Add table to level 0.
rec.addTable(0, fd.Num, size, imin, imax)
s.logf("table@recovery recovered @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", fd.Num, tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq)
rec.addTable(0, file.Num(), uint64(size), imin, imax)
s.logf("table@recovery recovered @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", file.Num(), tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq)
} else {
droppedTable++
s.logf("table@recovery unrecoverable @%d Ck·%d Cb·%d S·%d", fd.Num, tcorruptedKey, tcorruptedBlock, size)
s.logf("table@recovery unrecoverable @%d Ck·%d Cb·%d S·%d", file.Num(), tcorruptedKey, tcorruptedBlock, size)
}
return nil
}
// Recover all tables.
if len(fds) > 0 {
s.logf("table@recovery F·%d", len(fds))
if len(tableFiles) > 0 {
s.logf("table@recovery F·%d", len(tableFiles))
// Mark file number as used.
s.markFileNum(fds[len(fds)-1].Num)
s.markFileNum(tableFiles[len(tableFiles)-1].Num())
for _, fd := range fds {
if err := recoverTable(fd); err != nil {
for _, file := range tableFiles {
if err := recoverTable(file); err != nil {
return err
}
}
s.logf("table@recovery recovered F·%d N·%d Gk·%d Ck·%d Q·%d", len(fds), recoveredKey, goodKey, corruptedKey, maxSeq)
s.logf("table@recovery recovered F·%d N·%d Gk·%d Ck·%d Q·%d", len(tableFiles), recoveredKey, goodKey, corruptedKey, maxSeq)
}
// Set sequence number.
@@ -471,136 +450,132 @@ func recoverTable(s *session, o *opt.Options) error {
}
func (db *DB) recoverJournal() error {
// Get all journals and sort it by file number.
rawFds, err := db.s.stor.List(storage.TypeJournal)
// Get all tables and sort it by file number.
journalFiles_, err := db.s.getFiles(storage.TypeJournal)
if err != nil {
return err
}
sortFds(rawFds)
journalFiles := files(journalFiles_)
journalFiles.sort()
// Journals that will be recovered.
var fds []storage.FileDesc
for _, fd := range rawFds {
if fd.Num >= db.s.stJournalNum || fd.Num == db.s.stPrevJournalNum {
fds = append(fds, fd)
// Discard older journal.
prev := -1
for i, file := range journalFiles {
if file.Num() >= db.s.stJournalNum {
if prev >= 0 {
i--
journalFiles[i] = journalFiles[prev]
}
journalFiles = journalFiles[i:]
break
} else if file.Num() == db.s.stPrevJournalNum {
prev = i
}
}
var (
ofd storage.FileDesc // Obsolete file.
rec = &sessionRecord{}
)
var jr *journal.Reader
var of storage.File
var mem *memdb.DB
batch := new(Batch)
cm := newCMem(db.s)
buf := new(util.Buffer)
// Options.
strict := db.s.o.GetStrict(opt.StrictJournal)
checksum := db.s.o.GetStrict(opt.StrictJournalChecksum)
writeBuffer := db.s.o.GetWriteBuffer()
recoverJournal := func(file storage.File) error {
db.logf("journal@recovery recovering @%d", file.Num())
reader, err := file.Open()
if err != nil {
return err
}
defer reader.Close()
// Recover journals.
if len(fds) > 0 {
db.logf("journal@recovery F·%d", len(fds))
// Mark file number as used.
db.s.markFileNum(fds[len(fds)-1].Num)
var (
// Options.
strict = db.s.o.GetStrict(opt.StrictJournal)
checksum = db.s.o.GetStrict(opt.StrictJournalChecksum)
writeBuffer = db.s.o.GetWriteBuffer()
jr *journal.Reader
mdb = memdb.New(db.s.icmp, writeBuffer)
buf = &util.Buffer{}
batch = &Batch{}
)
for _, fd := range fds {
db.logf("journal@recovery recovering @%d", fd.Num)
fr, err := db.s.stor.Open(fd)
if err != nil {
return err
}
// Create or reset journal reader instance.
if jr == nil {
jr = journal.NewReader(fr, dropper{db.s, fd}, strict, checksum)
} else {
jr.Reset(fr, dropper{db.s, fd}, strict, checksum)
}
// Flush memdb and remove obsolete journal file.
if !ofd.Nil() {
if mdb.Len() > 0 {
if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil {
fr.Close()
return err
}
}
rec.setJournalNum(fd.Num)
rec.setSeqNum(db.seq)
if err := db.s.commit(rec); err != nil {
fr.Close()
return err
}
rec.resetAddedTables()
db.s.stor.Remove(ofd)
ofd = storage.FileDesc{}
}
// Replay journal to memdb.
mdb.Reset()
for {
r, err := jr.Next()
if err != nil {
if err == io.EOF {
break
}
fr.Close()
return errors.SetFd(err, fd)
}
buf.Reset()
if _, err := buf.ReadFrom(r); err != nil {
if err == io.ErrUnexpectedEOF {
// This is error returned due to corruption, with strict == false.
continue
}
fr.Close()
return errors.SetFd(err, fd)
}
if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mdb); err != nil {
if !strict && errors.IsCorrupted(err) {
db.s.logf("journal error: %v (skipped)", err)
// We won't apply sequence number as it might be corrupted.
continue
}
fr.Close()
return errors.SetFd(err, fd)
}
// Save sequence number.
db.seq = batch.seq + uint64(batch.Len())
// Flush it if large enough.
if mdb.Size() >= writeBuffer {
if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil {
fr.Close()
return err
}
mdb.Reset()
}
}
fr.Close()
ofd = fd
// Create/reset journal reader instance.
if jr == nil {
jr = journal.NewReader(reader, dropper{db.s, file}, strict, checksum)
} else {
jr.Reset(reader, dropper{db.s, file}, strict, checksum)
}
// Flush the last memdb.
if mdb.Len() > 0 {
if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil {
// Flush memdb and remove obsolete journal file.
if of != nil {
if mem.Len() > 0 {
if err := cm.flush(mem, 0); err != nil {
return err
}
}
if err := cm.commit(file.Num(), db.seq); err != nil {
return err
}
cm.reset()
of.Remove()
of = nil
}
// Replay journal to memdb.
mem.Reset()
for {
r, err := jr.Next()
if err != nil {
if err == io.EOF {
break
}
return errors.SetFile(err, file)
}
buf.Reset()
if _, err := buf.ReadFrom(r); err != nil {
if err == io.ErrUnexpectedEOF {
// This is error returned due to corruption, with strict == false.
continue
} else {
return errors.SetFile(err, file)
}
}
if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mem); err != nil {
if strict || !errors.IsCorrupted(err) {
return errors.SetFile(err, file)
} else {
db.s.logf("journal error: %v (skipped)", err)
// We won't apply sequence number as it might be corrupted.
continue
}
}
// Save sequence number.
db.seq = batch.seq + uint64(batch.Len())
// Flush it if large enough.
if mem.Size() >= writeBuffer {
if err := cm.flush(mem, 0); err != nil {
return err
}
mem.Reset()
}
}
of = file
return nil
}
// Recover all journals.
if len(journalFiles) > 0 {
db.logf("journal@recovery F·%d", len(journalFiles))
// Mark file number as used.
db.s.markFileNum(journalFiles[len(journalFiles)-1].Num())
mem = memdb.New(db.s.icmp, writeBuffer)
for _, file := range journalFiles {
if err := recoverJournal(file); err != nil {
return err
}
}
// Flush the last journal.
if mem.Len() > 0 {
if err := cm.flush(mem, 0); err != nil {
return err
}
}
@@ -612,10 +587,8 @@ func (db *DB) recoverJournal() error {
}
// Commit.
rec.setJournalNum(db.journalFd.Num)
rec.setSeqNum(db.seq)
if err := db.s.commit(rec); err != nil {
// Close journal on error.
if err := cm.commit(db.journalFile.Num(), db.seq); err != nil {
// Close journal.
if db.journal != nil {
db.journal.Close()
db.journalWriter.Close()
@@ -624,139 +597,15 @@ func (db *DB) recoverJournal() error {
}
// Remove the last obsolete journal file.
if !ofd.Nil() {
db.s.stor.Remove(ofd)
if of != nil {
of.Remove()
}
return nil
}
func (db *DB) recoverJournalRO() error {
// Get all journals and sort it by file number.
rawFds, err := db.s.stor.List(storage.TypeJournal)
if err != nil {
return err
}
sortFds(rawFds)
// Journals that will be recovered.
var fds []storage.FileDesc
for _, fd := range rawFds {
if fd.Num >= db.s.stJournalNum || fd.Num == db.s.stPrevJournalNum {
fds = append(fds, fd)
}
}
var (
// Options.
strict = db.s.o.GetStrict(opt.StrictJournal)
checksum = db.s.o.GetStrict(opt.StrictJournalChecksum)
writeBuffer = db.s.o.GetWriteBuffer()
mdb = memdb.New(db.s.icmp, writeBuffer)
)
// Recover journals.
if len(fds) > 0 {
db.logf("journal@recovery RO·Mode F·%d", len(fds))
var (
jr *journal.Reader
buf = &util.Buffer{}
batch = &Batch{}
)
for _, fd := range fds {
db.logf("journal@recovery recovering @%d", fd.Num)
fr, err := db.s.stor.Open(fd)
if err != nil {
return err
}
// Create or reset journal reader instance.
if jr == nil {
jr = journal.NewReader(fr, dropper{db.s, fd}, strict, checksum)
} else {
jr.Reset(fr, dropper{db.s, fd}, strict, checksum)
}
// Replay journal to memdb.
for {
r, err := jr.Next()
if err != nil {
if err == io.EOF {
break
}
fr.Close()
return errors.SetFd(err, fd)
}
buf.Reset()
if _, err := buf.ReadFrom(r); err != nil {
if err == io.ErrUnexpectedEOF {
// This is error returned due to corruption, with strict == false.
continue
}
fr.Close()
return errors.SetFd(err, fd)
}
if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mdb); err != nil {
if !strict && errors.IsCorrupted(err) {
db.s.logf("journal error: %v (skipped)", err)
// We won't apply sequence number as it might be corrupted.
continue
}
fr.Close()
return errors.SetFd(err, fd)
}
// Save sequence number.
db.seq = batch.seq + uint64(batch.Len())
}
fr.Close()
}
}
// Set memDB.
db.mem = &memDB{db: db, DB: mdb, ref: 1}
return nil
}
func memGet(mdb *memdb.DB, ikey internalKey, icmp *iComparer) (ok bool, mv []byte, err error) {
mk, mv, err := mdb.Find(ikey)
if err == nil {
ukey, _, kt, kerr := parseInternalKey(mk)
if kerr != nil {
// Shouldn't have had happen.
panic(kerr)
}
if icmp.uCompare(ukey, ikey.ukey()) == 0 {
if kt == keyTypeDel {
return true, nil, ErrNotFound
}
return true, mv, nil
}
} else if err != ErrNotFound {
return true, nil, err
}
return
}
func (db *DB) get(auxm *memdb.DB, auxt tFiles, key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) {
ikey := makeInternalKey(nil, key, seq, keyTypeSeek)
if auxm != nil {
if ok, mv, me := memGet(auxm, ikey, db.s.icmp); ok {
return append([]byte{}, mv...), me
}
}
func (db *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) {
ikey := newIkey(key, seq, ktSeek)
em, fm := db.getMems()
for _, m := range [...]*memDB{em, fm} {
@@ -765,36 +614,36 @@ func (db *DB) get(auxm *memdb.DB, auxt tFiles, key []byte, seq uint64, ro *opt.R
}
defer m.decref()
if ok, mv, me := memGet(m.DB, ikey, db.s.icmp); ok {
return append([]byte{}, mv...), me
mk, mv, me := m.mdb.Find(ikey)
if me == nil {
ukey, _, kt, kerr := parseIkey(mk)
if kerr != nil {
// Shouldn't have had happen.
panic(kerr)
}
if db.s.icmp.uCompare(ukey, key) == 0 {
if kt == ktDel {
return nil, ErrNotFound
}
return append([]byte{}, mv...), nil
}
} else if me != ErrNotFound {
return nil, me
}
}
v := db.s.version()
value, cSched, err := v.get(auxt, ikey, ro, false)
value, cSched, err := v.get(ikey, ro, false)
v.release()
if cSched {
// Trigger table compaction.
db.compTrigger(db.tcompCmdC)
db.compSendTrigger(db.tcompCmdC)
}
return
}
func nilIfNotFound(err error) error {
if err == ErrNotFound {
return nil
}
return err
}
func (db *DB) has(auxm *memdb.DB, auxt tFiles, key []byte, seq uint64, ro *opt.ReadOptions) (ret bool, err error) {
ikey := makeInternalKey(nil, key, seq, keyTypeSeek)
if auxm != nil {
if ok, _, me := memGet(auxm, ikey, db.s.icmp); ok {
return me == nil, nilIfNotFound(me)
}
}
func (db *DB) has(key []byte, seq uint64, ro *opt.ReadOptions) (ret bool, err error) {
ikey := newIkey(key, seq, ktSeek)
em, fm := db.getMems()
for _, m := range [...]*memDB{em, fm} {
@@ -803,17 +652,30 @@ func (db *DB) has(auxm *memdb.DB, auxt tFiles, key []byte, seq uint64, ro *opt.R
}
defer m.decref()
if ok, _, me := memGet(m.DB, ikey, db.s.icmp); ok {
return me == nil, nilIfNotFound(me)
mk, _, me := m.mdb.Find(ikey)
if me == nil {
ukey, _, kt, kerr := parseIkey(mk)
if kerr != nil {
// Shouldn't have had happen.
panic(kerr)
}
if db.s.icmp.uCompare(ukey, key) == 0 {
if kt == ktDel {
return false, nil
}
return true, nil
}
} else if me != ErrNotFound {
return false, me
}
}
v := db.s.version()
_, cSched, err := v.get(auxt, ikey, ro, true)
_, cSched, err := v.get(ikey, ro, true)
v.release()
if cSched {
// Trigger table compaction.
db.compTrigger(db.tcompCmdC)
db.compSendTrigger(db.tcompCmdC)
}
if err == nil {
ret = true
@@ -837,7 +699,7 @@ func (db *DB) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
se := db.acquireSnapshot()
defer db.releaseSnapshot(se)
return db.get(nil, nil, key, se.seq, ro)
return db.get(key, se.seq, ro)
}
// Has returns true if the DB does contains the given key.
@@ -851,11 +713,11 @@ func (db *DB) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) {
se := db.acquireSnapshot()
defer db.releaseSnapshot(se)
return db.has(nil, nil, key, se.seq, ro)
return db.has(key, se.seq, ro)
}
// NewIterator returns an iterator for the latest snapshot of the
// underlying DB.
// uderlying DB.
// The returned iterator is not goroutine-safe, but it is safe to use
// multiple iterators concurrently, with each in a dedicated goroutine.
// It is also safe to use an iterator concurrently with modifying its
@@ -879,7 +741,7 @@ func (db *DB) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Itera
defer db.releaseSnapshot(se)
// Iterator holds 'version' lock, 'version' is immutable so snapshot
// can be released after iterator created.
return db.newIterator(nil, nil, se.seq, slice, ro)
return db.newIterator(se.seq, slice, ro)
}
// GetSnapshot returns a latest snapshot of the underlying DB. A snapshot
@@ -922,7 +784,7 @@ func (db *DB) GetProperty(name string) (value string, err error) {
const prefix = "leveldb."
if !strings.HasPrefix(name, prefix) {
return "", ErrNotFound
return "", errors.New("leveldb: GetProperty: unknown property: " + name)
}
p := name[len(prefix):]
@@ -935,8 +797,8 @@ func (db *DB) GetProperty(name string) (value string, err error) {
var level uint
var rest string
n, _ := fmt.Sscanf(p[len(numFilesPrefix):], "%d%s", &level, &rest)
if n != 1 {
err = ErrNotFound
if n != 1 || int(level) >= db.s.o.GetNumLevel() {
err = errors.New("leveldb: GetProperty: invalid property: " + name)
} else {
value = fmt.Sprint(v.tLen(int(level)))
}
@@ -944,8 +806,8 @@ func (db *DB) GetProperty(name string) (value string, err error) {
value = "Compactions\n" +
" Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB)\n" +
"-------+------------+---------------+---------------+---------------+---------------\n"
for level, tables := range v.levels {
duration, read, write := db.compStats.getStat(level)
for level, tables := range v.tables {
duration, read, write := db.compStats[level].get()
if len(tables) == 0 && duration == 0 {
continue
}
@@ -954,10 +816,10 @@ func (db *DB) GetProperty(name string) (value string, err error) {
float64(read)/1048576.0, float64(write)/1048576.0)
}
case p == "sstables":
for level, tables := range v.levels {
for level, tables := range v.tables {
value += fmt.Sprintf("--- level %d ---\n", level)
for _, t := range tables {
value += fmt.Sprintf("%d:%d[%q .. %q]\n", t.fd.Num, t.size, t.imin, t.imax)
value += fmt.Sprintf("%d:%d[%q .. %q]\n", t.file.Num(), t.size, t.imin, t.imax)
}
}
case p == "blockpool":
@@ -975,7 +837,7 @@ func (db *DB) GetProperty(name string) (value string, err error) {
case p == "aliveiters":
value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveIters))
default:
err = ErrNotFound
err = errors.New("leveldb: GetProperty: unknown property: " + name)
}
return
@@ -997,8 +859,8 @@ func (db *DB) SizeOf(ranges []util.Range) (Sizes, error) {
sizes := make(Sizes, 0, len(ranges))
for _, r := range ranges {
imin := makeInternalKey(nil, r.Start, keyMaxSeq, keyTypeSeek)
imax := makeInternalKey(nil, r.Limit, keyMaxSeq, keyTypeSeek)
imin := newIkey(r.Start, kMaxSeq, ktSeek)
imax := newIkey(r.Limit, kMaxSeq, ktSeek)
start, err := v.offsetOf(imin)
if err != nil {
return nil, err
@@ -1007,7 +869,7 @@ func (db *DB) SizeOf(ranges []util.Range) (Sizes, error) {
if err != nil {
return nil, err
}
var size int64
var size uint64
if limit >= start {
size = limit - start
}
@@ -1017,8 +879,8 @@ func (db *DB) SizeOf(ranges []util.Range) (Sizes, error) {
return sizes, nil
}
// Close closes the DB. This will also releases any outstanding snapshot,
// abort any in-flight compaction and discard open transaction.
// Close closes the DB. This will also releases any outstanding snapshot and
// abort any in-flight compaction.
//
// It is not safe to close a DB until all outstanding iterators are released.
// It is valid to call Close multiple times. Other methods should not be
@@ -1038,27 +900,17 @@ func (db *DB) Close() error {
var err error
select {
case err = <-db.compErrC:
if err == ErrReadOnly {
err = nil
}
default:
}
// Signal all goroutines.
close(db.closeC)
// Discard open transaction.
if db.tr != nil {
db.tr.Discard()
}
// Acquire writer lock.
db.writeLockC <- struct{}{}
// Wait for all gorotines to exit.
db.closeW.Wait()
// Closes journal.
// Lock writer and closes journal.
db.writeLockC <- struct{}{}
if db.journal != nil {
db.journal.Close()
db.journalWriter.Close()
@@ -1085,6 +937,8 @@ func (db *DB) Close() error {
db.frozenMem = nil
db.journal = nil
db.journalWriter = nil
db.journalFile = nil
db.frozenJournalFile = nil
db.closer = nil
return err

View File

@@ -11,79 +11,109 @@ import (
"time"
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/memdb"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/storage"
)
var (
errCompactionTransactExiting = errors.New("leveldb: compaction transact exiting")
)
type cStat struct {
type cStats struct {
sync.Mutex
duration time.Duration
read int64
write int64
read uint64
write uint64
}
func (p *cStat) add(n *cStatStaging) {
func (p *cStats) add(n *cStatsStaging) {
p.Lock()
p.duration += n.duration
p.read += n.read
p.write += n.write
p.Unlock()
}
func (p *cStat) get() (duration time.Duration, read, write int64) {
func (p *cStats) get() (duration time.Duration, read, write uint64) {
p.Lock()
defer p.Unlock()
return p.duration, p.read, p.write
}
type cStatStaging struct {
type cStatsStaging struct {
start time.Time
duration time.Duration
on bool
read int64
write int64
read uint64
write uint64
}
func (p *cStatStaging) startTimer() {
func (p *cStatsStaging) startTimer() {
if !p.on {
p.start = time.Now()
p.on = true
}
}
func (p *cStatStaging) stopTimer() {
func (p *cStatsStaging) stopTimer() {
if p.on {
p.duration += time.Since(p.start)
p.on = false
}
}
type cStats struct {
lk sync.Mutex
stats []cStat
type cMem struct {
s *session
level int
rec *sessionRecord
}
func (p *cStats) addStat(level int, n *cStatStaging) {
p.lk.Lock()
if level >= len(p.stats) {
newStats := make([]cStat, level+1)
copy(newStats, p.stats)
p.stats = newStats
}
p.stats[level].add(n)
p.lk.Unlock()
func newCMem(s *session) *cMem {
return &cMem{s: s, rec: &sessionRecord{numLevel: s.o.GetNumLevel()}}
}
func (p *cStats) getStat(level int) (duration time.Duration, read, write int64) {
p.lk.Lock()
defer p.lk.Unlock()
if level < len(p.stats) {
return p.stats[level].get()
func (c *cMem) flush(mem *memdb.DB, level int) error {
s := c.s
// Write memdb to table.
iter := mem.NewIterator(nil)
defer iter.Release()
t, n, err := s.tops.createFrom(iter)
if err != nil {
return err
}
return
// Pick level.
if level < 0 {
v := s.version()
level = v.pickLevel(t.imin.ukey(), t.imax.ukey())
v.release()
}
c.rec.addTableFile(level, t)
s.logf("mem@flush created L%d@%d N·%d S·%s %q:%q", level, t.file.Num(), n, shortenb(int(t.size)), t.imin, t.imax)
c.level = level
return nil
}
func (c *cMem) reset() {
c.rec = &sessionRecord{numLevel: c.s.o.GetNumLevel()}
}
func (c *cMem) commit(journal, seq uint64) error {
c.rec.setJournalNum(journal)
c.rec.setSeqNum(seq)
// Commit changes.
return c.s.commit(c.rec)
}
func (db *DB) compactionError() {
var err error
var (
err error
wlocked bool
)
noerr:
// No error.
for {
@@ -91,7 +121,7 @@ noerr:
case err = <-db.compErrSetC:
switch {
case err == nil:
case err == ErrReadOnly, errors.IsCorrupted(err):
case errors.IsCorrupted(err):
goto hasperr
default:
goto haserr
@@ -109,7 +139,7 @@ haserr:
switch {
case err == nil:
goto noerr
case err == ErrReadOnly, errors.IsCorrupted(err):
case errors.IsCorrupted(err):
goto hasperr
default:
}
@@ -125,9 +155,9 @@ hasperr:
case db.compPerErrC <- err:
case db.writeLockC <- struct{}{}:
// Hold write lock, so that write won't pass-through.
db.compWriteLocking = true
wlocked = true
case _, _ = <-db.closeC:
if db.compWriteLocking {
if wlocked {
// We should release the lock or Close will hang.
<-db.writeLockC
}
@@ -172,7 +202,7 @@ func (db *DB) compactionTransact(name string, t compactionTransactInterface) {
disableBackoff = db.s.o.GetDisableCompactionBackoff()
)
for n := 0; ; n++ {
// Check whether the DB is closed.
// Check wether the DB is closed.
if db.isClosed() {
db.logf("%s exiting", name)
db.compactionExitTransact()
@@ -256,27 +286,22 @@ func (db *DB) compactionExitTransact() {
panic(errCompactionTransactExiting)
}
func (db *DB) compactionCommit(name string, rec *sessionRecord) {
db.compCommitLk.Lock()
defer db.compCommitLk.Unlock() // Defer is necessary.
db.compactionTransactFunc(name+"@commit", func(cnt *compactionTransactCounter) error {
return db.s.commit(rec)
}, nil)
}
func (db *DB) memCompaction() {
mdb := db.getFrozenMem()
if mdb == nil {
mem := db.getFrozenMem()
if mem == nil {
return
}
defer mdb.decref()
defer mem.decref()
db.logf("memdb@flush N·%d S·%s", mdb.Len(), shortenb(mdb.Size()))
c := newCMem(db.s)
stats := new(cStatsStaging)
db.logf("mem@flush N·%d S·%s", mem.mdb.Len(), shortenb(mem.mdb.Size()))
// Don't compact empty memdb.
if mdb.Len() == 0 {
db.logf("memdb@flush skipping")
// drop frozen memdb
if mem.mdb.Len() == 0 {
db.logf("mem@flush skipping")
// drop frozen mem
db.dropFrozenMem()
return
}
@@ -292,44 +317,35 @@ func (db *DB) memCompaction() {
return
}
var (
rec = &sessionRecord{}
stats = &cStatStaging{}
flushLevel int
)
// Generate tables.
db.compactionTransactFunc("memdb@flush", func(cnt *compactionTransactCounter) (err error) {
db.compactionTransactFunc("mem@flush", func(cnt *compactionTransactCounter) (err error) {
stats.startTimer()
flushLevel, err = db.s.flushMemdb(rec, mdb.DB, db.memdbMaxLevel)
stats.stopTimer()
return
defer stats.stopTimer()
return c.flush(mem.mdb, -1)
}, func() error {
for _, r := range rec.addedTables {
db.logf("memdb@flush revert @%d", r.num)
if err := db.s.stor.Remove(storage.FileDesc{Type: storage.TypeTable, Num: r.num}); err != nil {
for _, r := range c.rec.addedTables {
db.logf("mem@flush revert @%d", r.num)
f := db.s.getTableFile(r.num)
if err := f.Remove(); err != nil {
return err
}
}
return nil
})
rec.setJournalNum(db.journalFd.Num)
rec.setSeqNum(db.frozenSeq)
db.compactionTransactFunc("mem@commit", func(cnt *compactionTransactCounter) (err error) {
stats.startTimer()
defer stats.stopTimer()
return c.commit(db.journalFile.Num(), db.frozenSeq)
}, nil)
// Commit.
stats.startTimer()
db.compactionCommit("memdb", rec)
stats.stopTimer()
db.logf("mem@flush committed F·%d T·%v", len(c.rec.addedTables), stats.duration)
db.logf("memdb@flush committed F·%d T·%v", len(rec.addedTables), stats.duration)
for _, r := range rec.addedTables {
for _, r := range c.rec.addedTables {
stats.write += r.size
}
db.compStats.addStat(flushLevel, stats)
db.compStats[c.level].add(stats)
// Drop frozen memdb.
// Drop frozen mem.
db.dropFrozenMem()
// Resume table compaction.
@@ -343,7 +359,7 @@ func (db *DB) memCompaction() {
}
// Trigger table compaction.
db.compTrigger(db.tcompCmdC)
db.compSendTrigger(db.tcompCmdC)
}
type tableCompactionBuilder struct {
@@ -351,7 +367,7 @@ type tableCompactionBuilder struct {
s *session
c *compaction
rec *sessionRecord
stat0, stat1 *cStatStaging
stat0, stat1 *cStatsStaging
snapHasLastUkey bool
snapLastUkey []byte
@@ -405,9 +421,9 @@ func (b *tableCompactionBuilder) flush() error {
if err != nil {
return err
}
b.rec.addTableFile(b.c.sourceLevel+1, t)
b.rec.addTableFile(b.c.level+1, t)
b.stat1.write += t.size
b.s.logf("table@build created L%d@%d N·%d S·%s %q:%q", b.c.sourceLevel+1, t.fd.Num, b.tw.tw.EntriesLen(), shortenb(int(t.size)), t.imin, t.imax)
b.s.logf("table@build created L%d@%d N·%d S·%s %q:%q", b.c.level+1, t.file.Num(), b.tw.tw.EntriesLen(), shortenb(int(t.size)), t.imin, t.imax)
b.tw = nil
return nil
}
@@ -452,7 +468,7 @@ func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error {
}
ikey := iter.Key()
ukey, seq, kt, kerr := parseInternalKey(ikey)
ukey, seq, kt, kerr := parseIkey(ikey)
if kerr == nil {
shouldStop := !resumed && b.c.shouldStopBefore(ikey)
@@ -478,14 +494,14 @@ func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error {
hasLastUkey = true
lastUkey = append(lastUkey[:0], ukey...)
lastSeq = keyMaxSeq
lastSeq = kMaxSeq
}
switch {
case lastSeq <= b.minSeq:
// Dropped because newer entry for same user key exist
fallthrough // (A)
case kt == keyTypeDel && seq <= b.minSeq && b.c.baseLevelForKey(lastUkey):
case kt == ktDel && seq <= b.minSeq && b.c.baseLevelForKey(lastUkey):
// For this user key:
// (1) there is no data in higher levels
// (2) data in lower levels will have larger seq numbers
@@ -507,7 +523,7 @@ func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error {
// Don't drop corrupted keys.
hasLastUkey = false
lastUkey = lastUkey[:0]
lastSeq = keyMaxSeq
lastSeq = kMaxSeq
b.kerrCnt++
}
@@ -530,7 +546,8 @@ func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error {
func (b *tableCompactionBuilder) revert() error {
for _, at := range b.rec.addedTables {
b.s.logf("table@build revert @%d", at.num)
if err := b.s.stor.Remove(storage.FileDesc{Type: storage.TypeTable, Num: at.num}); err != nil {
f := b.s.getTableFile(at.num)
if err := f.Remove(); err != nil {
return err
}
}
@@ -540,29 +557,31 @@ func (b *tableCompactionBuilder) revert() error {
func (db *DB) tableCompaction(c *compaction, noTrivial bool) {
defer c.release()
rec := &sessionRecord{}
rec.addCompPtr(c.sourceLevel, c.imax)
rec := &sessionRecord{numLevel: db.s.o.GetNumLevel()}
rec.addCompPtr(c.level, c.imax)
if !noTrivial && c.trivial() {
t := c.levels[0][0]
db.logf("table@move L%d@%d -> L%d", c.sourceLevel, t.fd.Num, c.sourceLevel+1)
rec.delTable(c.sourceLevel, t.fd.Num)
rec.addTableFile(c.sourceLevel+1, t)
db.compactionCommit("table-move", rec)
t := c.tables[0][0]
db.logf("table@move L%d@%d -> L%d", c.level, t.file.Num(), c.level+1)
rec.delTable(c.level, t.file.Num())
rec.addTableFile(c.level+1, t)
db.compactionTransactFunc("table@move", func(cnt *compactionTransactCounter) (err error) {
return db.s.commit(rec)
}, nil)
return
}
var stats [2]cStatStaging
for i, tables := range c.levels {
var stats [2]cStatsStaging
for i, tables := range c.tables {
for _, t := range tables {
stats[i].read += t.size
// Insert deleted tables into record
rec.delTable(c.sourceLevel+i, t.fd.Num)
rec.delTable(c.level+i, t.file.Num())
}
}
sourceSize := int(stats[0].read + stats[1].read)
minSeq := db.minSeq()
db.logf("table@compaction L%d·%d -> L%d·%d S·%s Q·%d", c.sourceLevel, len(c.levels[0]), c.sourceLevel+1, len(c.levels[1]), shortenb(sourceSize), minSeq)
db.logf("table@compaction L%d·%d -> L%d·%d S·%s Q·%d", c.level, len(c.tables[0]), c.level+1, len(c.tables[1]), shortenb(sourceSize), minSeq)
b := &tableCompactionBuilder{
db: db,
@@ -572,60 +591,49 @@ func (db *DB) tableCompaction(c *compaction, noTrivial bool) {
stat1: &stats[1],
minSeq: minSeq,
strict: db.s.o.GetStrict(opt.StrictCompaction),
tableSize: db.s.o.GetCompactionTableSize(c.sourceLevel + 1),
tableSize: db.s.o.GetCompactionTableSize(c.level + 1),
}
db.compactionTransact("table@build", b)
// Commit.
stats[1].startTimer()
db.compactionCommit("table", rec)
stats[1].stopTimer()
// Commit changes
db.compactionTransactFunc("table@commit", func(cnt *compactionTransactCounter) (err error) {
stats[1].startTimer()
defer stats[1].stopTimer()
return db.s.commit(rec)
}, nil)
resultSize := int(stats[1].write)
db.logf("table@compaction committed F%s S%s Ke·%d D·%d T·%v", sint(len(rec.addedTables)-len(rec.deletedTables)), sshortenb(resultSize-sourceSize), b.kerrCnt, b.dropCnt, stats[1].duration)
// Save compaction stats
for i := range stats {
db.compStats.addStat(c.sourceLevel+1, &stats[i])
db.compStats[c.level+1].add(&stats[i])
}
}
func (db *DB) tableRangeCompaction(level int, umin, umax []byte) error {
func (db *DB) tableRangeCompaction(level int, umin, umax []byte) {
db.logf("table@compaction range L%d %q:%q", level, umin, umax)
if level >= 0 {
if c := db.s.getCompactionRange(level, umin, umax, true); c != nil {
if c := db.s.getCompactionRange(level, umin, umax); c != nil {
db.tableCompaction(c, true)
}
} else {
// Retry until nothing to compact.
for {
compacted := false
// Scan for maximum level with overlapped tables.
v := db.s.version()
m := 1
for i := m; i < len(v.levels); i++ {
tables := v.levels[i]
if tables.overlaps(db.s.icmp, umin, umax, false) {
m = i
}
v := db.s.version()
m := 1
for i, t := range v.tables[1:] {
if t.overlaps(db.s.icmp, umin, umax, false) {
m = i + 1
}
v.release()
}
v.release()
for level := 0; level < m; level++ {
if c := db.s.getCompactionRange(level, umin, umax, false); c != nil {
db.tableCompaction(c, true)
compacted = true
}
}
if !compacted {
break
for level := 0; level < m; level++ {
if c := db.s.getCompactionRange(level, umin, umax); c != nil {
db.tableCompaction(c, true)
}
}
}
return nil
}
func (db *DB) tableAutoCompaction() {
@@ -652,11 +660,11 @@ type cCmd interface {
ack(err error)
}
type cAuto struct {
type cIdle struct {
ackC chan<- error
}
func (r cAuto) ack(err error) {
func (r cIdle) ack(err error) {
if r.ackC != nil {
defer func() {
recover()
@@ -680,21 +688,13 @@ func (r cRange) ack(err error) {
}
}
// This will trigger auto compaction but will not wait for it.
func (db *DB) compTrigger(compC chan<- cCmd) {
select {
case compC <- cAuto{}:
default:
}
}
// This will trigger auto compaction and/or wait for all compaction to be done.
func (db *DB) compTriggerWait(compC chan<- cCmd) (err error) {
// This will trigger auto compation and/or wait for all compaction to be done.
func (db *DB) compSendIdle(compC chan<- cCmd) (err error) {
ch := make(chan error)
defer close(ch)
// Send cmd.
select {
case compC <- cAuto{ch}:
case compC <- cIdle{ch}:
case err = <-db.compErrC:
return
case _, _ = <-db.closeC:
@@ -710,8 +710,16 @@ func (db *DB) compTriggerWait(compC chan<- cCmd) (err error) {
return err
}
// This will trigger auto compaction but will not wait for it.
func (db *DB) compSendTrigger(compC chan<- cCmd) {
select {
case compC <- cIdle{}:
default:
}
}
// Send range compaction request.
func (db *DB) compTriggerRange(compC chan<- cCmd, level int, min, max []byte) (err error) {
func (db *DB) compSendRange(compC chan<- cCmd, level int, min, max []byte) (err error) {
ch := make(chan error)
defer close(ch)
// Send cmd.
@@ -751,7 +759,7 @@ func (db *DB) mCompaction() {
select {
case x = <-db.mcompCmdC:
switch x.(type) {
case cAuto:
case cIdle:
db.memCompaction()
x.ack(nil)
x = nil
@@ -812,10 +820,11 @@ func (db *DB) tCompaction() {
}
if x != nil {
switch cmd := x.(type) {
case cAuto:
case cIdle:
ackQ = append(ackQ, x)
case cRange:
x.ack(db.tableRangeCompaction(cmd.level, cmd.min, cmd.max))
db.tableRangeCompaction(cmd.level, cmd.min, cmd.max)
x.ack(nil)
default:
panic("leveldb: unknown command")
}

View File

@@ -8,7 +8,6 @@ package leveldb
import (
"errors"
"math/rand"
"runtime"
"sync"
"sync/atomic"
@@ -19,7 +18,7 @@ import (
)
var (
errInvalidInternalKey = errors.New("leveldb: Iterator: invalid internal key")
errInvalidIkey = errors.New("leveldb: Iterator: invalid internal key")
)
type memdbReleaser struct {
@@ -33,50 +32,40 @@ func (mr *memdbReleaser) Release() {
})
}
func (db *DB) newRawIterator(auxm *memDB, auxt tFiles, slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
strict := opt.GetStrict(db.s.o.Options, ro, opt.StrictReader)
func (db *DB) newRawIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
em, fm := db.getMems()
v := db.s.version()
tableIts := v.getIterators(slice, ro)
n := len(tableIts) + len(auxt) + 3
its := make([]iterator.Iterator, 0, n)
if auxm != nil {
ami := auxm.NewIterator(slice)
ami.SetReleaser(&memdbReleaser{m: auxm})
its = append(its, ami)
}
for _, t := range auxt {
its = append(its, v.s.tops.newIterator(t, slice, ro))
}
emi := em.NewIterator(slice)
ti := v.getIterators(slice, ro)
n := len(ti) + 2
i := make([]iterator.Iterator, 0, n)
emi := em.mdb.NewIterator(slice)
emi.SetReleaser(&memdbReleaser{m: em})
its = append(its, emi)
i = append(i, emi)
if fm != nil {
fmi := fm.NewIterator(slice)
fmi := fm.mdb.NewIterator(slice)
fmi.SetReleaser(&memdbReleaser{m: fm})
its = append(its, fmi)
i = append(i, fmi)
}
its = append(its, tableIts...)
mi := iterator.NewMergedIterator(its, db.s.icmp, strict)
i = append(i, ti...)
strict := opt.GetStrict(db.s.o.Options, ro, opt.StrictReader)
mi := iterator.NewMergedIterator(i, db.s.icmp, strict)
mi.SetReleaser(&versionReleaser{v: v})
return mi
}
func (db *DB) newIterator(auxm *memDB, auxt tFiles, seq uint64, slice *util.Range, ro *opt.ReadOptions) *dbIter {
func (db *DB) newIterator(seq uint64, slice *util.Range, ro *opt.ReadOptions) *dbIter {
var islice *util.Range
if slice != nil {
islice = &util.Range{}
if slice.Start != nil {
islice.Start = makeInternalKey(nil, slice.Start, keyMaxSeq, keyTypeSeek)
islice.Start = newIkey(slice.Start, kMaxSeq, ktSeek)
}
if slice.Limit != nil {
islice.Limit = makeInternalKey(nil, slice.Limit, keyMaxSeq, keyTypeSeek)
islice.Limit = newIkey(slice.Limit, kMaxSeq, ktSeek)
}
}
rawIter := db.newRawIterator(auxm, auxt, islice, ro)
rawIter := db.newRawIterator(islice, ro)
iter := &dbIter{
db: db,
icmp: db.s.icmp,
@@ -91,10 +80,6 @@ func (db *DB) newIterator(auxm *memDB, auxt tFiles, seq uint64, slice *util.Rang
return iter
}
func (db *DB) iterSamplingRate() int {
return rand.Intn(2 * db.s.o.GetIteratorSamplingRate())
}
type dir int
const (
@@ -113,21 +98,11 @@ type dbIter struct {
seq uint64
strict bool
smaplingGap int
dir dir
key []byte
value []byte
err error
releaser util.Releaser
}
func (i *dbIter) sampleSeek() {
ikey := i.iter.Key()
i.smaplingGap -= len(ikey) + len(i.iter.Value())
for i.smaplingGap < 0 {
i.smaplingGap += i.db.iterSamplingRate()
i.db.sampleSeek(ikey)
}
dir dir
key []byte
value []byte
err error
releaser util.Releaser
}
func (i *dbIter) setErr(err error) {
@@ -187,7 +162,7 @@ func (i *dbIter) Seek(key []byte) bool {
return false
}
ikey := makeInternalKey(nil, key, i.seq, keyTypeSeek)
ikey := newIkey(key, i.seq, ktSeek)
if i.iter.Seek(ikey) {
i.dir = dirSOI
return i.next()
@@ -199,15 +174,14 @@ func (i *dbIter) Seek(key []byte) bool {
func (i *dbIter) next() bool {
for {
if ukey, seq, kt, kerr := parseInternalKey(i.iter.Key()); kerr == nil {
i.sampleSeek()
if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil {
if seq <= i.seq {
switch kt {
case keyTypeDel:
case ktDel:
// Skip deleted key.
i.key = append(i.key[:0], ukey...)
i.dir = dirForward
case keyTypeVal:
case ktVal:
if i.dir == dirSOI || i.icmp.uCompare(ukey, i.key) > 0 {
i.key = append(i.key[:0], ukey...)
i.value = append(i.value[:0], i.iter.Value()...)
@@ -250,13 +224,12 @@ func (i *dbIter) prev() bool {
del := true
if i.iter.Valid() {
for {
if ukey, seq, kt, kerr := parseInternalKey(i.iter.Key()); kerr == nil {
i.sampleSeek()
if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil {
if seq <= i.seq {
if !del && i.icmp.uCompare(ukey, i.key) < 0 {
return true
}
del = (kt == keyTypeDel)
del = (kt == ktDel)
if !del {
i.key = append(i.key[:0], ukey...)
i.value = append(i.value[:0], i.iter.Value()...)
@@ -292,8 +265,7 @@ func (i *dbIter) Prev() bool {
return i.Last()
case dirForward:
for i.iter.Prev() {
if ukey, _, _, kerr := parseInternalKey(i.iter.Key()); kerr == nil {
i.sampleSeek()
if ukey, _, _, kerr := parseIkey(i.iter.Key()); kerr == nil {
if i.icmp.uCompare(ukey, i.key) < 0 {
goto cont
}

View File

@@ -110,7 +110,7 @@ func (snap *Snapshot) Get(key []byte, ro *opt.ReadOptions) (value []byte, err er
err = ErrSnapshotReleased
return
}
return snap.db.get(nil, nil, key, snap.elem.seq, ro)
return snap.db.get(key, snap.elem.seq, ro)
}
// Has returns true if the DB does contains the given key.
@@ -127,10 +127,10 @@ func (snap *Snapshot) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error)
err = ErrSnapshotReleased
return
}
return snap.db.has(nil, nil, key, snap.elem.seq, ro)
return snap.db.has(key, snap.elem.seq, ro)
}
// NewIterator returns an iterator for the snapshot of the underlying DB.
// NewIterator returns an iterator for the snapshot of the uderlying DB.
// The returned iterator is not goroutine-safe, but it is safe to use
// multiple iterators concurrently, with each in a dedicated goroutine.
// It is also safe to use an iterator concurrently with modifying its
@@ -158,7 +158,7 @@ func (snap *Snapshot) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterat
}
// Since iterator already hold version ref, it doesn't need to
// hold snapshot ref.
return snap.db.newIterator(nil, nil, snap.elem.seq, slice, ro)
return snap.db.newIterator(snap.elem.seq, slice, ro)
}
// Release releases the snapshot. This will not release any returned

View File

@@ -12,19 +12,14 @@ import (
"github.com/syndtr/goleveldb/leveldb/journal"
"github.com/syndtr/goleveldb/leveldb/memdb"
"github.com/syndtr/goleveldb/leveldb/storage"
)
type memDB struct {
db *DB
*memdb.DB
db *DB
mdb *memdb.DB
ref int32
}
func (m *memDB) getref() int32 {
return atomic.LoadInt32(&m.ref)
}
func (m *memDB) incref() {
atomic.AddInt32(&m.ref, 1)
}
@@ -32,12 +27,12 @@ func (m *memDB) incref() {
func (m *memDB) decref() {
if ref := atomic.AddInt32(&m.ref, -1); ref == 0 {
// Only put back memdb with std capacity.
if m.Capacity() == m.db.s.o.GetWriteBuffer() {
m.Reset()
m.db.mpoolPut(m.DB)
if m.mdb.Capacity() == m.db.s.o.GetWriteBuffer() {
m.mdb.Reset()
m.db.mpoolPut(m.mdb)
}
m.db = nil
m.DB = nil
m.mdb = nil
} else if ref < 0 {
panic("negative memdb ref")
}
@@ -53,19 +48,6 @@ func (db *DB) addSeq(delta uint64) {
atomic.AddUint64(&db.seq, delta)
}
func (db *DB) setSeq(seq uint64) {
atomic.StoreUint64(&db.seq, seq)
}
func (db *DB) sampleSeek(ikey internalKey) {
v := db.s.version()
if v.sampleSeek(ikey) {
// Trigger table compaction.
db.compTrigger(db.tcompCmdC)
}
v.release()
}
func (db *DB) mpoolPut(mem *memdb.DB) {
defer func() {
recover()
@@ -76,18 +58,12 @@ func (db *DB) mpoolPut(mem *memdb.DB) {
}
}
func (db *DB) mpoolGet(n int) *memDB {
var mdb *memdb.DB
func (db *DB) mpoolGet() *memdb.DB {
select {
case mdb = <-db.memPool:
case mem := <-db.memPool:
return mem
default:
}
if mdb == nil || mdb.Capacity() < n {
mdb = memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n))
}
return &memDB{
db: db,
DB: mdb,
return nil
}
}
@@ -110,10 +86,11 @@ func (db *DB) mpoolDrain() {
// Create new memdb and froze the old one; need external synchronization.
// newMem only called synchronously by the writer.
func (db *DB) newMem(n int) (mem *memDB, err error) {
fd := storage.FileDesc{Type: storage.TypeJournal, Num: db.s.allocFileNum()}
w, err := db.s.stor.Create(fd)
num := db.s.allocFileNum()
file := db.s.getJournalFile(num)
w, err := file.Create()
if err != nil {
db.s.reuseFileNum(fd.Num)
db.s.reuseFileNum(num)
return
}
@@ -129,14 +106,20 @@ func (db *DB) newMem(n int) (mem *memDB, err error) {
} else {
db.journal.Reset(w)
db.journalWriter.Close()
db.frozenJournalFd = db.journalFd
db.frozenJournalFile = db.journalFile
}
db.journalWriter = w
db.journalFd = fd
db.journalFile = file
db.frozenMem = db.mem
mem = db.mpoolGet(n)
mem.incref() // for self
mem.incref() // for caller
mdb := db.mpoolGet()
if mdb == nil || mdb.Capacity() < n {
mdb = memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n))
}
mem = &memDB{
db: db,
mdb: mdb,
ref: 2,
}
db.mem = mem
// The seq only incremented by the writer. And whoever called newMem
// should hold write lock, so no need additional synchronization here.
@@ -189,12 +172,12 @@ func (db *DB) getFrozenMem() *memDB {
// Drop frozen memdb; assume that frozen memdb isn't nil.
func (db *DB) dropFrozenMem() {
db.memMu.Lock()
if err := db.s.stor.Remove(db.frozenJournalFd); err != nil {
db.logf("journal@remove removing @%d %q", db.frozenJournalFd.Num, err)
if err := db.frozenJournalFile.Remove(); err != nil {
db.logf("journal@remove removing @%d %q", db.frozenJournalFile.Num(), err)
} else {
db.logf("journal@remove removed @%d", db.frozenJournalFd.Num)
db.logf("journal@remove removed @%d", db.frozenJournalFile.Num())
}
db.frozenJournalFd = storage.FileDesc{}
db.frozenJournalFile = nil
db.frozenMem.decref()
db.frozenMem = nil
db.memMu.Unlock()

View File

@@ -21,16 +21,14 @@ type Reader interface {
NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator
}
// Sizes is list of size.
type Sizes []int64
type Sizes []uint64
// Sum returns sum of the sizes.
func (sizes Sizes) Sum() int64 {
var sum int64
for _, size := range sizes {
sum += size
func (p Sizes) Sum() (n uint64) {
for _, s := range p {
n += s
}
return sum
return n
}
// Logging.
@@ -42,59 +40,59 @@ func (db *DB) checkAndCleanFiles() error {
v := db.s.version()
defer v.release()
tmap := make(map[int64]bool)
for _, tables := range v.levels {
tablesMap := make(map[uint64]bool)
for _, tables := range v.tables {
for _, t := range tables {
tmap[t.fd.Num] = false
tablesMap[t.file.Num()] = false
}
}
fds, err := db.s.stor.List(storage.TypeAll)
files, err := db.s.getFiles(storage.TypeAll)
if err != nil {
return err
}
var nt int
var rem []storage.FileDesc
for _, fd := range fds {
var nTables int
var rem []storage.File
for _, f := range files {
keep := true
switch fd.Type {
switch f.Type() {
case storage.TypeManifest:
keep = fd.Num >= db.s.manifestFd.Num
keep = f.Num() >= db.s.manifestFile.Num()
case storage.TypeJournal:
if !db.frozenJournalFd.Nil() {
keep = fd.Num >= db.frozenJournalFd.Num
if db.frozenJournalFile != nil {
keep = f.Num() >= db.frozenJournalFile.Num()
} else {
keep = fd.Num >= db.journalFd.Num
keep = f.Num() >= db.journalFile.Num()
}
case storage.TypeTable:
_, keep = tmap[fd.Num]
_, keep = tablesMap[f.Num()]
if keep {
tmap[fd.Num] = true
nt++
tablesMap[f.Num()] = true
nTables++
}
}
if !keep {
rem = append(rem, fd)
rem = append(rem, f)
}
}
if nt != len(tmap) {
var mfds []storage.FileDesc
for num, present := range tmap {
if nTables != len(tablesMap) {
var missing []*storage.FileInfo
for num, present := range tablesMap {
if !present {
mfds = append(mfds, storage.FileDesc{storage.TypeTable, num})
missing = append(missing, &storage.FileInfo{Type: storage.TypeTable, Num: num})
db.logf("db@janitor table missing @%d", num)
}
}
return errors.NewErrCorrupted(storage.FileDesc{}, &errors.ErrMissingFiles{Fds: mfds})
return errors.NewErrCorrupted(nil, &errors.ErrMissingFiles{Files: missing})
}
db.logf("db@janitor F·%d G·%d", len(fds), len(rem))
for _, fd := range rem {
db.logf("db@janitor removing %s-%d", fd.Type, fd.Num)
if err := db.s.stor.Remove(fd); err != nil {
db.logf("db@janitor F·%d G·%d", len(files), len(rem))
for _, f := range rem {
db.logf("db@janitor removing %s-%d", f.Type(), f.Num())
if err := f.Remove(); err != nil {
return err
}
}

View File

@@ -45,9 +45,9 @@ func (db *DB) jWriter() {
}
}
func (db *DB) rotateMem(n int, wait bool) (mem *memDB, err error) {
func (db *DB) rotateMem(n int) (mem *memDB, err error) {
// Wait for pending memdb compaction.
err = db.compTriggerWait(db.mcompCmdC)
err = db.compSendIdle(db.mcompCmdC)
if err != nil {
return
}
@@ -59,50 +59,46 @@ func (db *DB) rotateMem(n int, wait bool) (mem *memDB, err error) {
}
// Schedule memdb compaction.
if wait {
err = db.compTriggerWait(db.mcompCmdC)
} else {
db.compTrigger(db.mcompCmdC)
}
db.compSendTrigger(db.mcompCmdC)
return
}
func (db *DB) flush(n int) (mdb *memDB, mdbFree int, err error) {
func (db *DB) flush(n int) (mem *memDB, nn int, err error) {
delayed := false
flush := func() (retry bool) {
v := db.s.version()
defer v.release()
mdb = db.getEffectiveMem()
mem = db.getEffectiveMem()
defer func() {
if retry {
mdb.decref()
mdb = nil
mem.decref()
mem = nil
}
}()
mdbFree = mdb.Free()
nn = mem.mdb.Free()
switch {
case v.tLen(0) >= db.s.o.GetWriteL0SlowdownTrigger() && !delayed:
delayed = true
time.Sleep(time.Millisecond)
case mdbFree >= n:
case nn >= n:
return false
case v.tLen(0) >= db.s.o.GetWriteL0PauseTrigger():
delayed = true
err = db.compTriggerWait(db.tcompCmdC)
err = db.compSendIdle(db.tcompCmdC)
if err != nil {
return false
}
default:
// Allow memdb to grow if it has no entry.
if mdb.Len() == 0 {
mdbFree = n
if mem.mdb.Len() == 0 {
nn = n
} else {
mdb.decref()
mdb, err = db.rotateMem(n, false)
mem.decref()
mem, err = db.rotateMem(n)
if err == nil {
mdbFree = mdb.Free()
nn = mem.mdb.Free()
} else {
mdbFree = 0
nn = 0
}
}
return false
@@ -133,20 +129,7 @@ func (db *DB) Write(b *Batch, wo *opt.WriteOptions) (err error) {
return
}
b.init(wo.GetSync() && !db.s.o.GetNoSync())
if b.size() > db.s.o.GetWriteBuffer() && !db.s.o.GetDisableLargeBatchTransaction() {
// Writes using transaction.
tr, err1 := db.OpenTransaction()
if err1 != nil {
return err1
}
if err1 := tr.Write(b, wo); err1 != nil {
tr.Discard()
return err1
}
return tr.Commit()
}
b.init(wo.GetSync())
// The write happen synchronously.
select {
@@ -154,8 +137,6 @@ func (db *DB) Write(b *Batch, wo *opt.WriteOptions) (err error) {
if <-db.writeMergedC {
return <-db.writeAckC
}
// Continue, the write lock already acquired by previous writer
// and handed out to us.
case db.writeLockC <- struct{}{}:
case err = <-db.compPerErrC:
return
@@ -166,29 +147,28 @@ func (db *DB) Write(b *Batch, wo *opt.WriteOptions) (err error) {
merged := 0
danglingMerge := false
defer func() {
for i := 0; i < merged; i++ {
db.writeAckC <- err
}
if danglingMerge {
// Only one dangling merge at most, so this is safe.
db.writeMergedC <- false
} else {
<-db.writeLockC
}
for i := 0; i < merged; i++ {
db.writeAckC <- err
}
}()
mdb, mdbFree, err := db.flush(b.size())
mem, memFree, err := db.flush(b.size())
if err != nil {
return
}
defer mdb.decref()
defer mem.decref()
// Calculate maximum size of the batch.
m := 1 << 20
if x := b.size(); x <= 128<<10 {
m = x + (128 << 10)
}
m = minInt(m, mdbFree)
m = minInt(m, memFree)
// Merge with other batch.
drain:
@@ -217,7 +197,7 @@ drain:
select {
case db.journalC <- b:
// Write into memdb
if berr := b.memReplay(mdb.DB); berr != nil {
if berr := b.memReplay(mem.mdb); berr != nil {
panic(berr)
}
case err = <-db.compPerErrC:
@@ -231,7 +211,7 @@ drain:
case err = <-db.journalAckC:
if err != nil {
// Revert memdb if error detected
if berr := b.revertMemReplay(mdb.DB); berr != nil {
if berr := b.revertMemReplay(mem.mdb); berr != nil {
panic(berr)
}
return
@@ -245,7 +225,7 @@ drain:
if err != nil {
return
}
if berr := b.memReplay(mdb.DB); berr != nil {
if berr := b.memReplay(mem.mdb); berr != nil {
panic(berr)
}
}
@@ -253,8 +233,8 @@ drain:
// Set last seq number.
db.addSeq(uint64(b.Len()))
if b.size() >= mdbFree {
db.rotateMem(0, false)
if b.size() >= memFree {
db.rotateMem(0)
}
return
}
@@ -269,7 +249,8 @@ func (db *DB) Put(key, value []byte, wo *opt.WriteOptions) error {
return db.Write(b, wo)
}
// Delete deletes the value for the given key.
// Delete deletes the value for the given key. It returns ErrNotFound if
// the DB does not contain the key.
//
// It is safe to modify the contents of the arguments after Delete returns.
func (db *DB) Delete(key []byte, wo *opt.WriteOptions) error {
@@ -281,8 +262,8 @@ func (db *DB) Delete(key []byte, wo *opt.WriteOptions) error {
func isMemOverlaps(icmp *iComparer, mem *memdb.DB, min, max []byte) bool {
iter := mem.NewIterator(nil)
defer iter.Release()
return (max == nil || (iter.First() && icmp.uCompare(max, internalKey(iter.Key()).ukey()) >= 0)) &&
(min == nil || (iter.Last() && icmp.uCompare(min, internalKey(iter.Key()).ukey()) <= 0))
return (max == nil || (iter.First() && icmp.uCompare(max, iKey(iter.Key()).ukey()) >= 0)) &&
(min == nil || (iter.Last() && icmp.uCompare(min, iKey(iter.Key()).ukey()) <= 0))
}
// CompactRange compacts the underlying DB for the given key range.
@@ -309,16 +290,16 @@ func (db *DB) CompactRange(r util.Range) error {
}
// Check for overlaps in memdb.
mdb := db.getEffectiveMem()
defer mdb.decref()
if isMemOverlaps(db.s.icmp, mdb.DB, r.Start, r.Limit) {
mem := db.getEffectiveMem()
defer mem.decref()
if isMemOverlaps(db.s.icmp, mem.mdb, r.Start, r.Limit) {
// Memdb compaction.
if _, err := db.rotateMem(0, false); err != nil {
if _, err := db.rotateMem(0); err != nil {
<-db.writeLockC
return err
}
<-db.writeLockC
if err := db.compTriggerWait(db.mcompCmdC); err != nil {
if err := db.compSendIdle(db.mcompCmdC); err != nil {
return err
}
} else {
@@ -326,33 +307,5 @@ func (db *DB) CompactRange(r util.Range) error {
}
// Table compaction.
return db.compTriggerRange(db.tcompCmdC, -1, r.Start, r.Limit)
}
// SetReadOnly makes DB read-only. It will stay read-only until reopened.
func (db *DB) SetReadOnly() error {
if err := db.ok(); err != nil {
return err
}
// Lock writer.
select {
case db.writeLockC <- struct{}{}:
db.compWriteLocking = true
case err := <-db.compPerErrC:
return err
case _, _ = <-db.closeC:
return ErrClosed
}
// Set compaction read-only.
select {
case db.compErrSetC <- ErrReadOnly:
case perr := <-db.compPerErrC:
return perr
case _, _ = <-db.closeC:
return ErrClosed
}
return nil
return db.compSendRange(db.tcompCmdC, -1, r.Start, r.Limit)
}

Some files were not shown because too many files have changed in this diff Show More