Compare commits

..

1 Commits

Author SHA1 Message Date
Jakob Borg
24e691925c Add Debian build stuff 2015-05-11 22:09:38 +02:00
1957 changed files with 40915 additions and 817097 deletions

3
.gitattributes vendored
View File

@@ -2,7 +2,8 @@
* text=auto
# Except the dependencies, which we leave alone
vendor/** -text=auto
Godeps/** -text=auto
# Diffs on these files are meaningless
gui.files.go -diff
*.svg -diff

10
.gitignore vendored
View File

@@ -1,7 +1,5 @@
/syncthing
/stdiscosrv
./syncthing
syncthing.exe
stdiscosrv.exe
*.tar.gz
*.zip
*.asc
@@ -11,7 +9,7 @@ files/pidx
bin
perfstats*.csv
coverage.xml
syncthing.sig
!gui/scripts/syncthing
syncthing.md5
syncthing.exe.md5
RELEASE
deb
lib/auto/gui.files.go

150
AUTHORS
View File

@@ -1,98 +1,56 @@
# This is the official list of Syncthing authors for copyright purposes.
# The format is:
#
# Name Name Name (nickname) <email1@example.com> <email2@example.com>
#
# The NICKS list is auto generated from this file.
Aaron Bieber (qbit) <qbit@deftly.net>
Adam Piggott (simplypeachy) <aD@simplypeachy.co.uk> <simplypeachy@users.noreply.github.com>
Alessandro G. (alessandro.g89) <alessandro.g89@gmail.com>
Alexander Graf (alex2108) <register-github@alex-graf.de>
Alexandre Viau (aviau) <alexandre@alexandreviau.net> <aviau@debian.org>
Anderson Mesquita (andersonvom) <andersonvom@gmail.com>
Andrew Dunham (andrew-d) <andrew@du.nham.ca>
Andrey D (scienmind) <scintertech@cryptolab.net>
Antony Male (canton7) <antony.male@gmail.com>
Arthur Axel fREW Schmidt (frioux) <frew@afoolishmanifesto.com> <frioux@gmail.com>
Audrius Butkevicius (AudriusButkevicius) <audrius.butkevicius@gmail.com>
Bart De Vries (mogwa1) <devriesb@gmail.com>
Ben Curthoys (bencurthoys) <ben@bencurthoys.com>
Ben Schulz (uok) <ueomkail@gmail.com> <uok@users.noreply.github.com>
Ben Sidhom (bsidhom) <bsidhom@gmail.com>
Benny Ng (tpng) <benny.tpng@gmail.com>
Brandon Philips (philips) <brandon@ifup.org>
Brendan Long (brendanlong) <self@brendanlong.com>
Brian R. Becker (brbecker) <brbecker@gmail.com>
Caleb Callaway (cqcallaw) <enlightened.despot@gmail.com>
Carsten Hagemann (Moter8) <moter8@gmail.com>
Cathryne Linenweaver (Cathryne) <cathryne.linenweaver@gmail.com> <Cathryne@users.noreply.github.com>
Cedric Staniewski (xduugu) <cedric@gmx.ca>
Chris Howie (cdhowie) <me@chrishowie.com>
Chris Joel (cdata) <chris@scriptolo.gy>
Colin Kennedy (moshen) <moshen.colin@gmail.com>
Daniel Bergmann (brgmnn) <dan.arne.bergmann@gmail.com> <brgmnn@users.noreply.github.com>
Daniel Harte (norgeous) <daniel@harte.me> <daniel@danielharte.co.uk> <norgeous@users.noreply.github.com>
Daniel Martí (mvdan) <mvdan@mvdan.cc>
David Rimmer (dinosore) <dinosore@dbrsoftware.co.uk>
Denis A. (dva) <denisva@gmail.com>
Dennis Wilson (snnd) <dw@risu.io>
Dominik Heidler (asdil12) <dominik@heidler.eu>
Elias Jarlebring (jarlebring) <jarlebring@gmail.com>
Emil Hessman (ceh) <emil@hessman.se>
Erik Meitner (WSGCSysadmin) <e.meitner@willystreet.coop>
Federico Castagnini (facastagnini) <federico.castagnini@gmail.com>
Felix Ableitner (Nutomic) <me@nutomic.com>
Felix Unterpaintner (bigbear2nd) <bigbear2nd@gmail.com>
Francois-Xavier Gsell (zukoo) <fxgsell@gmail.com>
Frank Isemann (fti7) <frank@isemann.name>
Gilli Sigurdsson (gillisig) <gilli@vx.is>
Jaakko Hannikainen (jgke) <jgke@jgke.fi>
Jacek Szafarkiewicz (hadogenes) <szafar@linux.pl>
Jake Peterson (acogdev) <jake@acogdev.com>
Jakob Borg (calmh) <jakob@nym.se>
James Patterson (jpjp) <jamespatterson@operamail.com> <jpjp@users.noreply.github.com>
Jaroslav Malec (dzarda) <dzardacz@gmail.com>
Jens Diemer (jedie) <github.com@jensdiemer.de> <git@jensdiemer.de>
Jochen Voss (seehuhn) <voss@seehuhn.de>
Johan Vromans (sciurius) <jvromans@squirrel.nl>
Karol Różycki (krozycki) <rozycki.karol@gmail.com>
Kelong Cong (kc1212) <kc04bc@gmx.com> <kc1212@users.noreply.github.com>
Ken'ichi Kamada (kamadak) <kamada@nanohz.org>
Kevin Allen (ironmig) <kma1660@gmail.com>
Lars K.W. Gohlke (lkwg82) <lkwg82@gmx.de>
Laurent Etiemble (letiemble) <laurent.etiemble@gmail.com> <laurent.etiemble@monobjc.net>
Lode Hoste (Zillode) <zillode@zillode.be>
Lord Landon Agahnim (LordLandon) <lordlandon@gmail.com>
Majed Abdulaziz (majedev) <majed.alhajry@gmail.com>
Marc Laporte (marclaporte) <marc@marclaporte.com> <marc@laporte.name>
Marc Pujol (kilburn) <kilburn@la3.org>
Marcin Dziadus (marcindziadus) <dziadus.marcin@gmail.com>
Mateusz Naściszewski (mateon1) <matin1111@wp.pl>
Matt Burke (burkemw3) <mburke@amplify.com> <burkemw3@gmail.com>
Max Schulze (kralo) <max.schulze@online.de> <kralo@users.noreply.github.com>
Michael Jephcote (Rewt0r) <rewt0r@gmx.com> <Rewt0r@users.noreply.github.com>
Michael Ploujnikov (plouj) <ploujj@gmail.com>
Michael Tilli (pyfisch) <pyfisch@gmail.com>
Nate Morrison (nrm21) <natemorrison@gmail.com>
Pascal Jungblut (pascalj) <github@pascalj.com> <mail@pascal-jungblut.com>
Peter Hoeg (peterhoeg) <peter@speartail.com>
Philippe Schommers (filoozoom) <philippe@schommers.be>
Phill Luby (pluby) <phill.luby@newredo.com>
Piotr Bejda (piobpl) <piotrb10@gmail.com>
Ryan Sullivan (KayoticSully) <kayoticsully@gmail.com>
Scott Klupfel (kluppy) <kluppy@going2blue.com>
Sergey Mishin (ralder) <ralder@yandex.ru>
Stefan Kuntz (Stefan-Code) <stefan.github@gmail.com> <Stefan.github@gmail.com>
Stefan Tatschner (rumpelsepp) <stefan@sevenbyte.org> <rumpelsepp@sevenbyte.org>
Tim Abell (timabell) <tim@timwise.co.uk>
Tobias Nygren (tnn2) <tnn@nygren.pp.se>
Tomas Cerveny (kozec) <kozec@kozec.com>
Tully Robinson (tojrobinson) <tully@tojr.org>
Tyler Brazier (tylerbrazier) <tyler@tylerbrazier.com>
Veeti Paananen (veeti) <veeti.paananen@rojekti.fi>
Victor Buinsky (buinsky) <vix_booja@tut.by>
Vil Brekin (Vilbrekin) <vilbrekin@gmail.com>
William A. Kennington III (wkennington) <william@wkennington.com>
Wulf Weich (wweich) <wweich@users.noreply.github.com> <wweich@gmx.de>
Yannic A. (eipiminus1) <eipiminusone+github@gmail.com> <eipiminus1@users.noreply.github.com>
Aaron Bieber <qbit@deftly.net>
Alexander Graf <register-github@alex-graf.de>
Andrew Dunham <andrew@du.nham.ca>
Audrius Butkevicius <audrius.butkevicius@gmail.com>
Arthur Axel fREW Schmidt <frew@afoolishmanifesto.com> <frioux@gmail.com>
Ben Curthoys <ben@bencurthoys.com>
Ben Schulz <ueomkail@gmail.com> <uok@users.noreply.github.com>
Ben Sidhom <bsidhom@gmail.com>
Brandon Philips <brandon@ifup.org>
Brendan Long <self@brendanlong.com>
Caleb Callaway <enlightened.despot@gmail.com>
Carsten Hagemann <moter8@gmail.com>
Cathryne Linenweaver <cathryne.linenweaver@gmail.com> <Cathryne@users.noreply.github.com>
Chris Joel <chris@scriptolo.gy>
Colin Kennedy <moshen.colin@gmail.com>
Daniel Martí <mvdan@mvdan.cc>
Dennis Wilson <dw@risu.io>
Dominik Heidler <dominik@heidler.eu>
Elias Jarlebring <jarlebring@gmail.com>
Emil Hessman <emil@hessman.se>
Federico Castagnini <federico.castagnini@gmail.com>
Felix Ableitner <me@nutomic.com>
Felix Unterpaintner <bigbear2nd@gmail.com>
Francois-Xavier Gsell <fxgsell@gmail.com>
Gilli Sigurdsson <gilli@vx.is>
Jakob Borg <jakob@nym.se>
James Patterson <jamespatterson@operamail.com> <jpjp@users.noreply.github.com>
Jaroslav Malec <dzardacz@gmail.com>
Jens Diemer <github.com@jensdiemer.de> <git@jensdiemer.de>
Jochen Voss <voss@seehuhn.de>
Johan Vromans <jvromans@squirrel.nl>
Karol Różycki <rozycki.karol@gmail.com>
Ken'ichi Kamada <kamada@nanohz.org>
Lode Hoste <zillode@zillode.be>
Lord Landon Agahnim <lordlandon@gmail.com>
Marcin Dziadus <dziadus.marcin@gmail.com>
Marc Laporte <marc@marclaporte.com> <marc@laporte.name>
Marc Pujol <kilburn@la3.org>
Michael Jephcote <rewt0r@gmx.com> <Rewt0r@users.noreply.github.com>
Michael Tilli <pyfisch@gmail.com>
Pascal Jungblut <github@pascalj.com> <mail@pascal-jungblut.com>
Peter Hoeg <peter@speartail.com>
Philippe Schommers <philippe@schommers.be>
Phill Luby <phill.luby@newredo.com>
Piotr Bejda <piotrb10@gmail.com>
Ryan Sullivan <kayoticsully@gmail.com>
Sergey Mishin <ralder@yandex.ru>
Stefan Tatschner <stefan@sevenbyte.org>
Tim Abell <tim@timwise.co.uk>
Tobias Nygren <tnn@nygren.pp.se>
Tomas Cerveny <kozec@kozec.com>
Tully Robinson <tully@tojr.org>
Veeti Paananen <veeti.paananen@rojekti.fi>
Vil Brekin <vilbrekin@gmail.com>

View File

@@ -32,32 +32,109 @@ latest info on Transifex.
## Contributing Code
Every contribution is welcome. If you want to contribute but are unsure
where to start, any open issues are fair game! See the [Contribution
Guidelines](http://docs.syncthing.net/dev/contributing.html) for the full
story on committing code.
where to start, any open issues are fair game! Be prepared for a
[certain amount of review](https://github.com/syncthing/syncthing/wiki/FAQ#why-are-you-being-so-hard-on-my-pull-request);
it's all in the name of quality. :) Following the points below will make this
a smoother process.
## Contributing Documentation
Individuals making significant and valuable contributions are given
commit-access to the project. If you make a significant contribution and
are not considered for commit-access, please contact any of the
Syncthing core team members.
Updates to the [documentation site](http://docs.syncthing.net/) can be
made as pull requests on the [documentation
repository](https://github.com/syncthing/docs).
All nontrivial contributions should go through the pull request
mechanism for internal review. Determining what is "nontrivial" is left
at the discretion of the contributor.
### Authorship
All code authors are listed in the AUTHORS file. Commits must be made
with the same name and email as listed in the AUTHORS file. To
accomplish this, ensure that your git configuration is set correctly
prior to making your first commit;
$ git config --global user.name "Jane Doe"
$ git config --global user.email janedoe@example.com
You must be reachable on the given email address. If you do not wish to
use your real name for whatever reason, using a nickname or pseudonym is
perfectly acceptable.
### Core Team
The Syncthing core team currently consists of the following members;
- Jakob Borg (@calmh)
- Audrius Butkevicius (@AudriusButkevicius)
## Coding Style
- Follow the conventions laid out in [Effective Go](https://golang.org/doc/effective_go.html)
as much as makes sense.
- All text files use Unix line endings.
- Each commit should be `go fmt` clean.
- The commit message subject should be a single short sentence
describing the change, starting with a capital letter.
- Commits that resolve an existing issue must include the issue number
as `(fixes #123)` at the end of the commit message subject.
- Imports are grouped per `goimports` standard; that is, standard
library first, then third party libraries after a blank line.
- A contribution solving a single issue or introducing a single new
feature should probably be a single commit based on the current
`master` branch. You may be asked to "rebase" or "squash" your pull
request to make sure this is the case, especially if there have been
amendments during review.
## Licensing
All contributions are made available under the same license as the already
existing material being contributed to. For most of the project and unless
otherwise stated this means MPLv2, but there are exceptions:
All contributions are made under the same MPLv2 license as the rest of
the project, except documentation, user interface text and translation
strings which are licensed under the Creative Commons Attribution 4.0
International License. You retain the copyright to code you have
written.
- Certain commands (under cmd/...) may have a separate license, indicated by
the presence of a LICENSE file in the corresponding directory.
When accepting your first contribution, the maintainer of the project
will ensure that you are added to the AUTHORS file. You are welcome to
add yourself as a separate commit in your first pull request.
- The documentation (man/...) is licensed under the Creative Commons
Attribution 4.0 International License.
## Building
- Projects under vendor/... are copyright by and licensed from their
respective original authors. Contributions should be made to the original
project, not here.
[See the documentation](https://github.com/syncthing/syncthing/wiki/Building)
on how to get started with a build environment.
Regardless of the license in effect, you retain the copyright to your
contribution.
## Branches
- `master` is the main branch containing good code that will end up in
the next release. You should base your work on it. It won't ever be
rebased or force-pushed to.
- `vx.y` branches exist to make patch releases on otherwise obsolete
minor releases. Should only contain fixes cherry picked from master.
Don't base any work on them.
- Other branches are probably topic branches and may be subject to
rebasing. Don't base any work on them unless you specifically know
otherwise.
## Tags
All releases are tagged semver style as `vx.y.z`. Release tags are
signed by GPG key BCE524C7.
## Tests
Yes please!
## Documentation
[Over here!](https://github.com/syncthing/syncthing/wiki)
## License
MPLv2

77
Godeps/Godeps.json generated Normal file
View File

@@ -0,0 +1,77 @@
{
"ImportPath": "github.com/syncthing/syncthing",
"GoVersion": "go1.4",
"Packages": [
"./cmd/..."
],
"Deps": [
{
"ImportPath": "github.com/bkaradzic/go-lz4",
"Rev": "93a831dcee242be64a9cc9803dda84af25932de7"
},
{
"ImportPath": "github.com/calmh/logger",
"Rev": "4d4e2801954c5581e4c2a80a3d3beb3b3645fd04"
},
{
"ImportPath": "github.com/calmh/luhn",
"Rev": "0c8388ff95fa92d4094011e5a04fc99dea3d1632"
},
{
"ImportPath": "github.com/calmh/xdr",
"Rev": "5f7208e86762911861c94f1849eddbfc0a60cbf0"
},
{
"ImportPath": "github.com/juju/ratelimit",
"Rev": "c5abe513796336ee2869745bff0638508450e9c5"
},
{
"ImportPath": "github.com/kardianos/osext",
"Rev": "efacde03154693404c65e7aa7d461ac9014acd0c"
},
{
"ImportPath": "github.com/syncthing/protocol",
"Rev": "e7db2648034fb71b051902a02bc25d4468ed492e"
},
{
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
"Rev": "87e4e645d80ae9c537e8f2dee52b28036a5dd75e"
},
{
"ImportPath": "github.com/syndtr/gosnappy/snappy",
"Rev": "156a073208e131d7d2e212cb749feae7c339e846"
},
{
"ImportPath": "github.com/thejerf/suture",
"Rev": "ff19fb384c3fe30f42717967eaa69da91e5f317c"
},
{
"ImportPath": "github.com/vitrun/qart/coding",
"Rev": "ccb109cf25f0cd24474da73b9fee4e7a3e8a8ce0"
},
{
"ImportPath": "github.com/vitrun/qart/gf256",
"Rev": "ccb109cf25f0cd24474da73b9fee4e7a3e8a8ce0"
},
{
"ImportPath": "github.com/vitrun/qart/qr",
"Rev": "ccb109cf25f0cd24474da73b9fee4e7a3e8a8ce0"
},
{
"ImportPath": "golang.org/x/crypto/bcrypt",
"Rev": "c57d4a71915a248dbad846d60825145062b4c18e"
},
{
"ImportPath": "golang.org/x/crypto/blowfish",
"Rev": "c57d4a71915a248dbad846d60825145062b4c18e"
},
{
"ImportPath": "golang.org/x/text/transform",
"Rev": "2076e9cab4147459c82bc81169e46c139d358547"
},
{
"ImportPath": "golang.org/x/text/unicode/norm",
"Rev": "2076e9cab4147459c82bc81169e46c139d358547"
}
]
}

5
Godeps/Readme generated Normal file
View File

@@ -0,0 +1,5 @@
This directory tree is generated automatically by godep.
Please do not edit.
See https://github.com/tools/godep for more information.

2
Godeps/_workspace/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,2 @@
/pkg
/bin

View File

@@ -0,0 +1 @@
/lz4-example/lz4-example

View File

@@ -0,0 +1,7 @@
language: go
go:
- 1.1
- 1.2
- 1.3
- tip

View File

@@ -4,7 +4,7 @@ go-lz4
go-lz4 is port of LZ4 lossless compression algorithm to Go. The original C code
is located at:
https://github.com/Cyan4973/lz4
https://code.google.com/p/lz4/
Status
------

View File

@@ -141,7 +141,7 @@ func Decode(dst, src []byte) ([]byte, error) {
length += ln
}
if int(d.spos+length) > len(d.src) || int(d.dpos+length) > len(d.dst) {
if int(d.spos+length) > len(d.src) {
return nil, ErrCorrupt
}
@@ -179,12 +179,7 @@ func Decode(dst, src []byte) ([]byte, error) {
}
literal := d.dpos - d.ref
if literal < 4 {
if int(d.dpos+4) > len(d.dst) {
return nil, ErrCorrupt
}
d.cp(4, decr[literal])
} else {
length += 4

View File

@@ -25,10 +25,8 @@
package lz4
import (
"encoding/binary"
"errors"
)
import "encoding/binary"
import "errors"
const (
minMatch = 4

View File

@@ -0,0 +1,15 @@
logger
======
A small wrapper around `log` to provide log levels.
Documentation
-------------
http://godoc.org/github.com/calmh/logger
License
-------
MIT

179
Godeps/_workspace/src/github.com/calmh/logger/logger.go generated vendored Normal file
View File

@@ -0,0 +1,179 @@
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
// is governed by an MIT-style license that can be found in the LICENSE file.
// Package logger implements a standardized logger with callback functionality
package logger
import (
"fmt"
"log"
"os"
"strings"
"sync"
)
type LogLevel int
const (
LevelDebug LogLevel = iota
LevelVerbose
LevelInfo
LevelOK
LevelWarn
LevelFatal
NumLevels
)
// A MessageHandler is called with the log level and message text.
type MessageHandler func(l LogLevel, msg string)
type Logger struct {
logger *log.Logger
handlers [NumLevels][]MessageHandler
mut sync.Mutex
}
// The default logger logs to standard output with a time prefix.
var DefaultLogger = New()
func New() *Logger {
return &Logger{
logger: log.New(os.Stdout, "", log.Ltime),
}
}
// AddHandler registers a new MessageHandler to receive messages with the
// specified log level or above.
func (l *Logger) AddHandler(level LogLevel, h MessageHandler) {
l.mut.Lock()
defer l.mut.Unlock()
l.handlers[level] = append(l.handlers[level], h)
}
// See log.SetFlags
func (l *Logger) SetFlags(flag int) {
l.logger.SetFlags(flag)
}
// See log.SetPrefix
func (l *Logger) SetPrefix(prefix string) {
l.logger.SetPrefix(prefix)
}
func (l *Logger) callHandlers(level LogLevel, s string) {
for _, h := range l.handlers[level] {
h(level, strings.TrimSpace(s))
}
}
// Debugln logs a line with a DEBUG prefix.
func (l *Logger) Debugln(vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintln(vals...)
l.logger.Output(2, "DEBUG: "+s)
l.callHandlers(LevelDebug, s)
}
// Debugf logs a formatted line with a DEBUG prefix.
func (l *Logger) Debugf(format string, vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintf(format, vals...)
l.logger.Output(2, "DEBUG: "+s)
l.callHandlers(LevelDebug, s)
}
// Infoln logs a line with a VERBOSE prefix.
func (l *Logger) Verboseln(vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintln(vals...)
l.logger.Output(2, "VERBOSE: "+s)
l.callHandlers(LevelVerbose, s)
}
// Infof logs a formatted line with a VERBOSE prefix.
func (l *Logger) Verbosef(format string, vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintf(format, vals...)
l.logger.Output(2, "VERBOSE: "+s)
l.callHandlers(LevelVerbose, s)
}
// Infoln logs a line with an INFO prefix.
func (l *Logger) Infoln(vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintln(vals...)
l.logger.Output(2, "INFO: "+s)
l.callHandlers(LevelInfo, s)
}
// Infof logs a formatted line with an INFO prefix.
func (l *Logger) Infof(format string, vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintf(format, vals...)
l.logger.Output(2, "INFO: "+s)
l.callHandlers(LevelInfo, s)
}
// Okln logs a line with an OK prefix.
func (l *Logger) Okln(vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintln(vals...)
l.logger.Output(2, "OK: "+s)
l.callHandlers(LevelOK, s)
}
// Okf logs a formatted line with an OK prefix.
func (l *Logger) Okf(format string, vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintf(format, vals...)
l.logger.Output(2, "OK: "+s)
l.callHandlers(LevelOK, s)
}
// Warnln logs a formatted line with a WARNING prefix.
func (l *Logger) Warnln(vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintln(vals...)
l.logger.Output(2, "WARNING: "+s)
l.callHandlers(LevelWarn, s)
}
// Warnf logs a formatted line with a WARNING prefix.
func (l *Logger) Warnf(format string, vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintf(format, vals...)
l.logger.Output(2, "WARNING: "+s)
l.callHandlers(LevelWarn, s)
}
// Fatalln logs a line with a FATAL prefix and exits the process with exit
// code 1.
func (l *Logger) Fatalln(vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintln(vals...)
l.logger.Output(2, "FATAL: "+s)
l.callHandlers(LevelFatal, s)
os.Exit(1)
}
// Fatalf logs a formatted line with a FATAL prefix and exits the process with
// exit code 1.
func (l *Logger) Fatalf(format string, vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintf(format, vals...)
l.logger.Output(2, "FATAL: "+s)
l.callHandlers(LevelFatal, s)
os.Exit(1)
}

View File

@@ -0,0 +1,58 @@
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
// is governed by an MIT-style license that can be found in the LICENSE file.
package logger
import (
"strings"
"testing"
)
func TestAPI(t *testing.T) {
l := New()
l.SetFlags(0)
l.SetPrefix("testing")
debug := 0
l.AddHandler(LevelDebug, checkFunc(t, LevelDebug, "test 0", &debug))
info := 0
l.AddHandler(LevelInfo, checkFunc(t, LevelInfo, "test 1", &info))
warn := 0
l.AddHandler(LevelWarn, checkFunc(t, LevelWarn, "test 2", &warn))
ok := 0
l.AddHandler(LevelOK, checkFunc(t, LevelOK, "test 3", &ok))
l.Debugf("test %d", 0)
l.Debugln("test", 0)
l.Infof("test %d", 1)
l.Infoln("test", 1)
l.Warnf("test %d", 2)
l.Warnln("test", 2)
l.Okf("test %d", 3)
l.Okln("test", 3)
if debug != 2 {
t.Errorf("Debug handler called %d != 2 times", debug)
}
if info != 2 {
t.Errorf("Info handler called %d != 2 times", info)
}
if warn != 2 {
t.Errorf("Warn handler called %d != 2 times", warn)
}
if ok != 2 {
t.Errorf("Ok handler called %d != 2 times", ok)
}
}
func checkFunc(t *testing.T, expectl LogLevel, expectmsg string, counter *int) func(LogLevel, string) {
return func(l LogLevel, msg string) {
*counter++
if l != expectl {
t.Errorf("Incorrect message level %d != %d", l, expectl)
}
if !strings.HasSuffix(msg, expectmsg) {
t.Errorf("%q does not end with %q", msg, expectmsg)
}
}
}

View File

@@ -0,0 +1 @@
coverage.out

19
Godeps/_workspace/src/github.com/calmh/xdr/.travis.yml generated vendored Normal file
View File

@@ -0,0 +1,19 @@
language: go
go:
- tip
install:
- export PATH=$PATH:$HOME/gopath/bin
- go get code.google.com/p/go.tools/cmd/cover
- go get github.com/mattn/goveralls
script:
- ./generate.sh
- go test -coverprofile=coverage.out
after_success:
- goveralls -coverprofile=coverage.out -service=travis-ci -package=calmh/xdr -repotoken="$COVERALLS_TOKEN"
env:
global:
secure: SmgnrGfp2zLrA44ChRMpjPeujubt9veZ8Fx/OseMWECmacyV5N/TuDhzIbwo6QwV4xB0sBacoPzvxQbJRVjNKsPiSu72UbcQmQ7flN4Tf7nW09tSh1iW8NgrpBCq/3UYLoBu2iPBEBKm93IK0aGNAKs6oEkB0fU27iTVBwiTXOY=

View File

@@ -1,10 +1,12 @@
xdr
===
[![Build Status](https://img.shields.io/circleci/project/calmh/xdr.svg?style=flat-square)](https://circleci.com/gh/calmh/xdr)
[![Build Status](https://img.shields.io/travis/calmh/xdr.svg?style=flat)](https://travis-ci.org/calmh/xdr)
[![Coverage Status](https://img.shields.io/coveralls/calmh/xdr.svg?style=flat)](https://coveralls.io/r/calmh/xdr?branch=master)
[![API Documentation](http://img.shields.io/badge/api-Godoc-blue.svg?style=flat)](http://godoc.org/github.com/calmh/xdr)
[![MIT License](http://img.shields.io/badge/license-MIT-blue.svg?style=flat)](http://opensource.org/licenses/MIT)
This is an XDR marshalling/unmarshalling library. It uses code generation and
not reflection.
This is an XDR encoding/decoding library. It uses code generation and
not reflection. It supports the IPDR bastardized XDR format when built
with `-tags ipdr`.

View File

@@ -0,0 +1,117 @@
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
// is governed by an MIT-style license that can be found in the LICENSE file.
package xdr_test
import (
"io"
"io/ioutil"
"testing"
"github.com/calmh/xdr"
)
type XDRBenchStruct struct {
I1 uint64
I2 uint32
I3 uint16
I4 uint8
Bs0 []byte // max:128
Bs1 []byte
S0 string // max:128
S1 string
}
var res []byte // no to be optimized away
var s = XDRBenchStruct{
I1: 42,
I2: 43,
I3: 44,
I4: 45,
Bs0: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18},
Bs1: []byte{11, 12, 13, 14, 15, 16, 17, 18, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
S0: "Hello World! String one.",
S1: "Hello World! String two.",
}
var e []byte
func init() {
e, _ = s.MarshalXDR()
}
func BenchmarkThisMarshal(b *testing.B) {
for i := 0; i < b.N; i++ {
res, _ = s.MarshalXDR()
}
}
func BenchmarkThisUnmarshal(b *testing.B) {
var t XDRBenchStruct
for i := 0; i < b.N; i++ {
err := t.UnmarshalXDR(e)
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkThisEncode(b *testing.B) {
for i := 0; i < b.N; i++ {
_, err := s.EncodeXDR(ioutil.Discard)
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkThisEncoder(b *testing.B) {
w := xdr.NewWriter(ioutil.Discard)
for i := 0; i < b.N; i++ {
_, err := s.EncodeXDRInto(w)
if err != nil {
b.Fatal(err)
}
}
}
type repeatReader struct {
data []byte
}
func (r *repeatReader) Read(bs []byte) (n int, err error) {
if len(bs) > len(r.data) {
err = io.EOF
}
n = copy(bs, r.data)
r.data = r.data[n:]
return n, err
}
func (r *repeatReader) Reset(bs []byte) {
r.data = bs
}
func BenchmarkThisDecode(b *testing.B) {
rr := &repeatReader{e}
var t XDRBenchStruct
for i := 0; i < b.N; i++ {
err := t.DecodeXDR(rr)
if err != nil {
b.Fatal(err)
}
rr.Reset(e)
}
}
func BenchmarkThisDecoder(b *testing.B) {
rr := &repeatReader{e}
r := xdr.NewReader(rr)
var t XDRBenchStruct
for i := 0; i < b.N; i++ {
err := t.DecodeXDRFrom(r)
if err != nil {
b.Fatal(err)
}
rr.Reset(e)
}
}

View File

@@ -0,0 +1,201 @@
// ************************************************************
// This file is automatically generated by genxdr. Do not edit.
// ************************************************************
package xdr_test
import (
"bytes"
"io"
"github.com/calmh/xdr"
)
/*
XDRBenchStruct Structure:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
+ I1 (64 bits) +
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| I2 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| 0x0000 | I3 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ uint8 Structure \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Length of Bs0 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ Bs0 (variable length) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Length of Bs1 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ Bs1 (variable length) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Length of S0 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ S0 (variable length) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Length of S1 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ S1 (variable length) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
struct XDRBenchStruct {
unsigned hyper I1;
unsigned int I2;
unsigned int I3;
uint8 I4;
opaque Bs0<128>;
opaque Bs1<>;
string S0<128>;
string S1<>;
}
*/
func (o XDRBenchStruct) EncodeXDR(w io.Writer) (int, error) {
var xw = xdr.NewWriter(w)
return o.EncodeXDRInto(xw)
}
func (o XDRBenchStruct) MarshalXDR() ([]byte, error) {
return o.AppendXDR(make([]byte, 0, 128))
}
func (o XDRBenchStruct) MustMarshalXDR() []byte {
bs, err := o.MarshalXDR()
if err != nil {
panic(err)
}
return bs
}
func (o XDRBenchStruct) AppendXDR(bs []byte) ([]byte, error) {
var aw = xdr.AppendWriter(bs)
var xw = xdr.NewWriter(&aw)
_, err := o.EncodeXDRInto(xw)
return []byte(aw), err
}
func (o XDRBenchStruct) EncodeXDRInto(xw *xdr.Writer) (int, error) {
xw.WriteUint64(o.I1)
xw.WriteUint32(o.I2)
xw.WriteUint16(o.I3)
xw.WriteUint8(o.I4)
if l := len(o.Bs0); l > 128 {
return xw.Tot(), xdr.ElementSizeExceeded("Bs0", l, 128)
}
xw.WriteBytes(o.Bs0)
xw.WriteBytes(o.Bs1)
if l := len(o.S0); l > 128 {
return xw.Tot(), xdr.ElementSizeExceeded("S0", l, 128)
}
xw.WriteString(o.S0)
xw.WriteString(o.S1)
return xw.Tot(), xw.Error()
}
func (o *XDRBenchStruct) DecodeXDR(r io.Reader) error {
xr := xdr.NewReader(r)
return o.DecodeXDRFrom(xr)
}
func (o *XDRBenchStruct) UnmarshalXDR(bs []byte) error {
var br = bytes.NewReader(bs)
var xr = xdr.NewReader(br)
return o.DecodeXDRFrom(xr)
}
func (o *XDRBenchStruct) DecodeXDRFrom(xr *xdr.Reader) error {
o.I1 = xr.ReadUint64()
o.I2 = xr.ReadUint32()
o.I3 = xr.ReadUint16()
o.I4 = xr.ReadUint8()
o.Bs0 = xr.ReadBytesMax(128)
o.Bs1 = xr.ReadBytes()
o.S0 = xr.ReadStringMax(128)
o.S1 = xr.ReadString()
return xr.Error()
}
/*
repeatReader Structure:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Length of data |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ data (variable length) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
struct repeatReader {
opaque data<>;
}
*/
func (o repeatReader) EncodeXDR(w io.Writer) (int, error) {
var xw = xdr.NewWriter(w)
return o.EncodeXDRInto(xw)
}
func (o repeatReader) MarshalXDR() ([]byte, error) {
return o.AppendXDR(make([]byte, 0, 128))
}
func (o repeatReader) MustMarshalXDR() []byte {
bs, err := o.MarshalXDR()
if err != nil {
panic(err)
}
return bs
}
func (o repeatReader) AppendXDR(bs []byte) ([]byte, error) {
var aw = xdr.AppendWriter(bs)
var xw = xdr.NewWriter(&aw)
_, err := o.EncodeXDRInto(xw)
return []byte(aw), err
}
func (o repeatReader) EncodeXDRInto(xw *xdr.Writer) (int, error) {
xw.WriteBytes(o.data)
return xw.Tot(), xw.Error()
}
func (o *repeatReader) DecodeXDR(r io.Reader) error {
xr := xdr.NewReader(r)
return o.DecodeXDRFrom(xr)
}
func (o *repeatReader) UnmarshalXDR(bs []byte) error {
var br = bytes.NewReader(bs)
var xr = xdr.NewReader(br)
return o.DecodeXDRFrom(xr)
}
func (o *repeatReader) DecodeXDRFrom(xr *xdr.Reader) error {
o.data = xr.ReadBytes()
return xr.Error()
}

View File

@@ -28,7 +28,6 @@ type fieldInfo struct {
Encoder string // the encoder name, i.e. "Uint64" for Read/WriteUint64
Convert string // what to convert to when encoding, i.e. "uint64"
Max int // max size for slices and strings
Submax int // max size for strings inside slices
}
type structInfo struct {
@@ -36,73 +35,31 @@ type structInfo struct {
Fields []fieldInfo
}
func (i structInfo) SizeExpr() string {
var xdrSizes = map[string]int{
"int8": 4,
"uint8": 4,
"int16": 4,
"uint16": 4,
"int32": 4,
"uint32": 4,
"int64": 8,
"uint64": 8,
"int": 8,
"bool": 4,
}
var terms []string
nl := ""
for _, f := range i.Fields {
if size := xdrSizes[f.FieldType]; size > 0 {
if f.IsSlice {
terms = append(terms, nl+"4+len(o."+f.Name+")*"+strconv.Itoa(size))
} else {
terms = append(terms, strconv.Itoa(size))
}
} else {
switch f.FieldType {
case "string", "[]byte":
if f.IsSlice {
terms = append(terms, nl+"4+xdr.SizeOfSlice(o."+f.Name+")")
} else {
terms = append(terms, nl+"4+len(o."+f.Name+")+xdr.Padding(len(o."+f.Name+"))")
}
default:
if f.IsSlice {
terms = append(terms, nl+"4+xdr.SizeOfSlice(o."+f.Name+")")
} else {
terms = append(terms, nl+"o."+f.Name+".XDRSize()")
}
}
}
nl = "\n"
}
return strings.Join(terms, "+")
}
var headerData = `// ************************************************************
var headerTpl = template.Must(template.New("header").Parse(`// ************************************************************
// This file is automatically generated by genxdr. Do not edit.
// ************************************************************
package {{.Package}}
import (
"bytes"
"io"
"github.com/calmh/xdr"
)
`
`))
var encoderData = `
func (o {{.Name}}) XDRSize() int {
return {{.SizeExpr}}
var encodeTpl = template.Must(template.New("encoder").Parse(`
func (o {{.TypeName}}) EncodeXDR(w io.Writer) (int, error) {
var xw = xdr.NewWriter(w)
return o.EncodeXDRInto(xw)
}//+n
func (o {{.Name}}) MarshalXDR() ([]byte, error) {
buf:= make([]byte, o.XDRSize())
m := &xdr.Marshaller{Data: buf}
return buf, o.MarshalXDRInto(m)
func (o {{.TypeName}}) MarshalXDR() ([]byte, error) {
return o.AppendXDR(make([]byte, 0, 128))
}//+n
func (o {{.Name}}) MustMarshalXDR() []byte {
func (o {{.TypeName}}) MustMarshalXDR() []byte {
bs, err := o.MarshalXDR()
if err != nil {
panic(err)
@@ -110,157 +67,106 @@ func (o {{.Name}}) MustMarshalXDR() []byte {
return bs
}//+n
func (o {{.Name}}) MarshalXDRInto(m *xdr.Marshaller) error {
{{range $fi := .Fields}}
{{if $fi.IsSlice}}
{{template "marshalSlice" $fi}}
{{else}}
{{template "marshalValue" $fi}}
{{end}}
{{end}}
return m.Error
func (o {{.TypeName}}) AppendXDR(bs []byte) ([]byte, error) {
var aw = xdr.AppendWriter(bs)
var xw = xdr.NewWriter(&aw)
_, err := o.EncodeXDRInto(xw)
return []byte(aw), err
}//+n
{{define "marshalValue"}}
{{if ne .Convert ""}}
m.Marshal{{.Encoder}}({{.Convert}}(o.{{.Name}}))
{{else if .IsBasic}}
{{if ge .Max 1}}
if l := len(o.{{.Name}}); l > {{.Max}} {
return xdr.ElementSizeExceeded("{{.Name}}", l, {{.Max}})
func (o {{.TypeName}}) EncodeXDRInto(xw *xdr.Writer) (int, error) {
{{range $fieldInfo := .Fields}}
{{if not $fieldInfo.IsSlice}}
{{if ne $fieldInfo.Convert ""}}
xw.Write{{$fieldInfo.Encoder}}({{$fieldInfo.Convert}}(o.{{$fieldInfo.Name}}))
{{else if $fieldInfo.IsBasic}}
{{if ge $fieldInfo.Max 1}}
if l := len(o.{{$fieldInfo.Name}}); l > {{$fieldInfo.Max}} {
return xw.Tot(), xdr.ElementSizeExceeded("{{$fieldInfo.Name}}", l, {{$fieldInfo.Max}})
}
{{end}}
xw.Write{{$fieldInfo.Encoder}}(o.{{$fieldInfo.Name}})
{{else}}
_, err := o.{{$fieldInfo.Name}}.EncodeXDRInto(xw)
if err != nil {
return xw.Tot(), err
}
{{end}}
{{else}}
{{if ge $fieldInfo.Max 1}}
if l := len(o.{{$fieldInfo.Name}}); l > {{$fieldInfo.Max}} {
return xw.Tot(), xdr.ElementSizeExceeded("{{$fieldInfo.Name}}", l, {{$fieldInfo.Max}})
}
{{end}}
xw.WriteUint32(uint32(len(o.{{$fieldInfo.Name}})))
for i := range o.{{$fieldInfo.Name}} {
{{if ne $fieldInfo.Convert ""}}
xw.Write{{$fieldInfo.Encoder}}({{$fieldInfo.Convert}}(o.{{$fieldInfo.Name}}[i]))
{{else if $fieldInfo.IsBasic}}
xw.Write{{$fieldInfo.Encoder}}(o.{{$fieldInfo.Name}}[i])
{{else}}
_, err := o.{{$fieldInfo.Name}}[i].EncodeXDRInto(xw)
if err != nil {
return xw.Tot(), err
}
{{end}}
}
{{end}}
m.Marshal{{.Encoder}}(o.{{.Name}})
{{else}}
if err := o.{{.Name}}.MarshalXDRInto(m); err != nil {
return err
}
{{end}}
{{end}}
{{define "marshalSlice"}}
{{if ge .Max 1}}
if l := len(o.{{.Name}}); l > {{.Max}} {
return xdr.ElementSizeExceeded("{{.Name}}", l, {{.Max}})
}
{{end}}
m.MarshalUint32(uint32(len(o.{{.Name}})))
for i := range o.{{.Name}} {
{{if ne .Convert ""}}
m.Marshal{{.Encoder}}({{.Convert}}(o.{{.Name}}[i]))
{{else if .IsBasic}}
m.Marshal{{.Encoder}}(o.{{.Name}}[i])
{{else}}
if err := o.{{.Name}}[i].MarshalXDRInto(m); err != nil {
return err
}
{{end}}
}
{{end}}
func (o *{{.Name}}) UnmarshalXDR(bs []byte) error {
u := &xdr.Unmarshaller{Data: bs}
return o.UnmarshalXDRFrom(u)
}
func (o *{{.Name}}) UnmarshalXDRFrom(u *xdr.Unmarshaller) error {
{{range $fi := .Fields}}
{{if $fi.IsSlice}}
{{template "unmarshalSlice" $fi}}
{{else}}
{{template "unmarshalValue" $fi}}
{{end}}
{{end}}
return u.Error
return xw.Tot(), xw.Error()
}//+n
{{define "unmarshalValue"}}
{{if ne .Convert ""}}
o.{{.Name}} = {{.FieldType}}(u.Unmarshal{{.Encoder}}())
{{else if .IsBasic}}
{{if ge .Max 1}}
o.{{.Name}} = u.Unmarshal{{.Encoder}}Max({{.Max}})
{{else}}
o.{{.Name}} = u.Unmarshal{{.Encoder}}()
{{end}}
{{else}}
(&o.{{.Name}}).UnmarshalXDRFrom(u)
{{end}}
{{end}}
func (o *{{.TypeName}}) DecodeXDR(r io.Reader) error {
xr := xdr.NewReader(r)
return o.DecodeXDRFrom(xr)
}//+n
{{define "unmarshalSlice"}}
_{{.Name}}Size := int(u.UnmarshalUint32())
if _{{.Name}}Size < 0 {
return xdr.ElementSizeExceeded("{{.Name}}", _{{.Name}}Size, {{.Max}})
} else if _{{.Name}}Size == 0 {
o.{{.Name}} = nil
} else {
{{if ge .Max 1}}
if _{{.Name}}Size > {{.Max}} {
return xdr.ElementSizeExceeded("{{.Name}}", _{{.Name}}Size, {{.Max}})
}
{{end}}
if _{{.Name}}Size <= len(o.{{.Name}}) {
{{if eq .FieldType "string"}}
for i := _{{.Name}}Size; i < len(o.{{.Name}}); i++ { o.{{.Name}}[i] = "" }
{{end}}
{{if eq .FieldType "[]byte"}}
for i := _{{.Name}}Size; i < len(o.{{.Name}}); i++ { o.{{.Name}}[i] = nil }
{{end}}
o.{{.Name}} = o.{{.Name}}[:_{{.Name}}Size]
} else {
o.{{.Name}} = make([]{{.FieldType}}, _{{.Name}}Size)
}
for i := range o.{{.Name}} {
{{if ne .Convert ""}}
o.{{.Name}}[i] = {{.FieldType}}(u.Unmarshal{{.Encoder}}())
{{else if .IsBasic}}
{{if ge .Submax 1}}
o.{{.Name}}[i] = u.Unmarshal{{.Encoder}}Max({{.Submax}})
func (o *{{.TypeName}}) UnmarshalXDR(bs []byte) error {
var br = bytes.NewReader(bs)
var xr = xdr.NewReader(br)
return o.DecodeXDRFrom(xr)
}//+n
func (o *{{.TypeName}}) DecodeXDRFrom(xr *xdr.Reader) error {
{{range $fieldInfo := .Fields}}
{{if not $fieldInfo.IsSlice}}
{{if ne $fieldInfo.Convert ""}}
o.{{$fieldInfo.Name}} = {{$fieldInfo.FieldType}}(xr.Read{{$fieldInfo.Encoder}}())
{{else if $fieldInfo.IsBasic}}
{{if ge $fieldInfo.Max 1}}
o.{{$fieldInfo.Name}} = xr.Read{{$fieldInfo.Encoder}}Max({{$fieldInfo.Max}})
{{else}}
o.{{.Name}}[i] = u.Unmarshal{{.Encoder}}()
o.{{$fieldInfo.Name}} = xr.Read{{$fieldInfo.Encoder}}()
{{end}}
{{else}}
(&o.{{.Name}}[i]).UnmarshalXDRFrom(u)
(&o.{{$fieldInfo.Name}}).DecodeXDRFrom(xr)
{{end}}
}
}
{{end}}
`
{{else}}
_{{$fieldInfo.Name}}Size := int(xr.ReadUint32())
if _{{$fieldInfo.Name}}Size < 0 {
return xdr.ElementSizeExceeded("{{$fieldInfo.Name}}", _{{$fieldInfo.Name}}Size, {{$fieldInfo.Max}})
}
{{if ge $fieldInfo.Max 1}}
if _{{$fieldInfo.Name}}Size > {{$fieldInfo.Max}} {
return xdr.ElementSizeExceeded("{{$fieldInfo.Name}}", _{{$fieldInfo.Name}}Size, {{$fieldInfo.Max}})
}
{{end}}
o.{{$fieldInfo.Name}} = make([]{{$fieldInfo.FieldType}}, _{{$fieldInfo.Name}}Size)
for i := range o.{{$fieldInfo.Name}} {
{{if ne $fieldInfo.Convert ""}}
o.{{$fieldInfo.Name}}[i] = {{$fieldInfo.FieldType}}(xr.Read{{$fieldInfo.Encoder}}())
{{else if $fieldInfo.IsBasic}}
o.{{$fieldInfo.Name}}[i] = xr.Read{{$fieldInfo.Encoder}}()
{{else}}
(&o.{{$fieldInfo.Name}}[i]).DecodeXDRFrom(xr)
{{end}}
}
{{end}}
{{end}}
return xr.Error()
}`))
var (
encodeTpl = template.Must(template.New("encoder").Parse(encoderData))
headerTpl = template.Must(template.New("header").Parse(headerData))
)
var emptyTypeTpl = template.Must(template.New("encoder").Parse(`
func (o {{.Name}}) XDRSize() int {
return 0
}
func (o {{.Name}}) MarshalXDR() ([]byte, error) {
return nil, nil
}//+n
func (o {{.Name}}) MustMarshalXDR() []byte {
return nil
}//+n
func (o {{.Name}}) MarshalXDRInto(m *xdr.Marshaller) error {
return nil
}//+n
func (o *{{.Name}}) UnmarshalXDR(bs []byte) error {
return nil
}//+n
func (o *{{.Name}}) UnmarshalXDRFrom(u *xdr.Unmarshaller) error {
return nil
}//+n
`))
var maxRe = regexp.MustCompile(`(?:\Wmax:)(\d+)(?:\s*,\s*(\d+))?`)
var maxRe = regexp.MustCompile(`\Wmax:(\d+)`)
type typeSet struct {
Type string
@@ -292,15 +198,11 @@ func handleStruct(t *ast.StructType) []fieldInfo {
}
fn := sf.Names[0].Name
var max1, max2 int
var max = 0
if sf.Comment != nil {
c := sf.Comment.List[0].Text
m := maxRe.FindStringSubmatch(c)
if len(m) >= 2 {
max1, _ = strconv.Atoi(m[1])
}
if len(m) >= 3 {
max2, _ = strconv.Atoi(m[2])
if m := maxRe.FindStringSubmatch(c); m != nil {
max, _ = strconv.Atoi(m[1])
}
if strings.Contains(c, "noencode") {
continue
@@ -318,16 +220,14 @@ func handleStruct(t *ast.StructType) []fieldInfo {
FieldType: tn,
Encoder: enc.Encoder,
Convert: enc.Type,
Max: max1,
Submax: max2,
Max: max,
}
} else {
f = fieldInfo{
Name: fn,
IsBasic: false,
FieldType: tn,
Max: max1,
Submax: max2,
Max: max,
}
}
@@ -342,11 +242,10 @@ func handleStruct(t *ast.StructType) []fieldInfo {
f = fieldInfo{
Name: fn,
IsBasic: true,
FieldType: "[]" + tn,
FieldType: tn,
Encoder: enc.Encoder,
Convert: enc.Type,
Max: max1,
Submax: max2,
Max: max,
}
} else if enc, ok := xdrEncoders[tn]; ok {
f = fieldInfo{
@@ -356,16 +255,14 @@ func handleStruct(t *ast.StructType) []fieldInfo {
FieldType: tn,
Encoder: enc.Encoder,
Convert: enc.Type,
Max: max1,
Submax: max2,
Max: max,
}
} else {
f = fieldInfo{
Name: fn,
IsSlice: true,
FieldType: tn,
Max: max1,
Submax: max2,
Max: max,
}
}
@@ -373,8 +270,7 @@ func handleStruct(t *ast.StructType) []fieldInfo {
f = fieldInfo{
Name: fn,
FieldType: ft.Sel.Name,
Max: max1,
Submax: max2,
Max: max,
}
}
@@ -385,22 +281,23 @@ func handleStruct(t *ast.StructType) []fieldInfo {
}
func generateCode(output io.Writer, s structInfo) {
name := s.Name
fs := s.Fields
var buf bytes.Buffer
var err error
if len(s.Fields) == 0 {
// This is an empty type. We can create a quite simple codec for it.
err = emptyTypeTpl.Execute(&buf, s)
} else {
// Generate with the default template.
err = encodeTpl.Execute(&buf, s)
}
err := encodeTpl.Execute(&buf, map[string]interface{}{"TypeName": name, "Fields": fs})
if err != nil {
panic(err)
}
bs := regexp.MustCompile(`(\s*\n)+`).ReplaceAll(buf.Bytes(), []byte("\n"))
bs = bytes.Replace(bs, []byte("//+n"), []byte("\n"), -1)
output.Write(bs)
bs, err = format.Source(bs)
if err != nil {
panic(err)
}
fmt.Fprintln(output, string(bs))
}
func uncamelize(s string) string {
@@ -414,14 +311,6 @@ func generateDiagram(output io.Writer, s structInfo) {
fs := s.Fields
fmt.Fprintln(output, sn+" Structure:")
if len(fs) == 0 {
fmt.Fprintln(output, "(contains no fields)")
fmt.Fprintln(output)
fmt.Fprintln(output)
return
}
fmt.Fprintln(output)
fmt.Fprintln(output, " 0 1 2 3")
fmt.Fprintln(output, " 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1")
@@ -432,46 +321,46 @@ func generateDiagram(output io.Writer, s structInfo) {
tn := f.FieldType
name := uncamelize(f.Name)
suffix := ""
if f.IsSlice {
fmt.Fprintf(output, "| %s |\n", center("Number of "+name, 61))
fmt.Fprintln(output, line)
suffix = " (n items)"
fmt.Fprintf(output, "/ %s /\n", center("", 61))
}
switch tn {
case "bool":
fmt.Fprintf(output, "| %s |V|\n", center(name+" (V=0 or 1)", 59))
fmt.Fprintln(output, line)
case "int16", "uint16":
fmt.Fprintf(output, "| %s | %s |\n", center("16 zero bits", 29), center(name, 29))
case "int8", "uint8":
fmt.Fprintf(output, "| %s | %s |\n", center("24 zero bits", 45), center(name, 13))
fmt.Fprintf(output, "| %s | %s |\n", center("0x0000", 29), center(name, 29))
fmt.Fprintln(output, line)
case "int32", "uint32":
fmt.Fprintf(output, "| %s |\n", center(name+suffix, 61))
fmt.Fprintf(output, "| %s |\n", center(name, 61))
fmt.Fprintln(output, line)
case "int64", "uint64":
fmt.Fprintf(output, "| %-61s |\n", "")
fmt.Fprintf(output, "+ %s +\n", center(name+" (64 bits)", 61))
fmt.Fprintf(output, "| %-61s |\n", "")
case "string", "[]byte":
fmt.Fprintln(output, line)
case "string", "byte": // XXX We assume slice of byte!
fmt.Fprintf(output, "| %s |\n", center("Length of "+name, 61))
fmt.Fprintln(output, line)
fmt.Fprintf(output, "/ %61s /\n", "")
fmt.Fprintf(output, "\\ %s \\\n", center(name+" (length + padded data)", 61))
fmt.Fprintf(output, "\\ %s \\\n", center(name+" (variable length)", 61))
fmt.Fprintf(output, "/ %61s /\n", "")
fmt.Fprintln(output, line)
default:
if f.IsSlice {
tn = "Zero or more " + tn + " Structures"
fmt.Fprintf(output, "/ %s /\n", center("", 61))
fmt.Fprintf(output, "\\ %s \\\n", center(tn, 61))
fmt.Fprintf(output, "/ %s /\n", center("", 61))
} else {
tn = tn + " Structure"
fmt.Fprintf(output, "/ %s /\n", center("", 61))
fmt.Fprintf(output, "\\ %s \\\n", center(tn, 61))
fmt.Fprintf(output, "/ %s /\n", center("", 61))
}
fmt.Fprintln(output, line)
}
if f.IsSlice {
fmt.Fprintf(output, "/ %s /\n", center("", 61))
}
fmt.Fprintln(output, line)
}
fmt.Fprintln(output)
fmt.Fprintln(output)
@@ -496,9 +385,9 @@ func generateXdr(output io.Writer, s structInfo) {
}
switch tn {
case "int8", "int16", "int32":
case "int16", "int32":
fmt.Fprintf(output, "\tint %s%s;\n", fn, suf)
case "uint8", "uint16", "uint32":
case "uint16", "uint32":
fmt.Fprintf(output, "\tunsigned int %s%s;\n", fn, suf)
case "int64":
fmt.Fprintf(output, "\thyper %s%s;\n", fn, suf)
@@ -506,7 +395,7 @@ func generateXdr(output io.Writer, s structInfo) {
fmt.Fprintf(output, "\tunsigned hyper %s%s;\n", fn, suf)
case "string":
fmt.Fprintf(output, "\tstring %s<%s>;\n", fn, l)
case "[]byte":
case "byte":
fmt.Fprintf(output, "\topaque %s<%s>;\n", fn, l)
default:
fmt.Fprintf(output, "\t%s %s%s;\n", tn, fn, suf)
@@ -558,22 +447,6 @@ func main() {
i := inspector(&structs)
ast.Inspect(f, i)
buf := new(bytes.Buffer)
headerTpl.Execute(buf, map[string]string{"Package": f.Name.Name})
for _, s := range structs {
fmt.Fprintf(buf, "\n/*\n\n")
generateDiagram(buf, s)
generateXdr(buf, s)
fmt.Fprintf(buf, "*/\n")
generateCode(buf, s)
}
bs, err := format.Source(buf.Bytes())
if err != nil {
log.Print(buf.String())
log.Fatal(err)
}
var output io.Writer = os.Stdout
if *outputFile != "" {
fd, err := os.Create(*outputFile)
@@ -582,5 +455,13 @@ func main() {
}
output = fd
}
output.Write(bs)
headerTpl.Execute(output, map[string]string{"Package": f.Name.Name})
for _, s := range structs {
fmt.Fprintf(output, "\n/*\n\n")
generateDiagram(output, s)
generateXdr(output, s)
fmt.Fprintf(output, "*/\n")
generateCode(output, s)
}
}

16
Godeps/_workspace/src/github.com/calmh/xdr/debug.go generated vendored Normal file
View File

@@ -0,0 +1,16 @@
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
// is governed by an MIT-style license that can be found in the LICENSE file.
package xdr
import (
"log"
"os"
)
var (
debug = len(os.Getenv("XDRTRACE")) > 0
dl = log.New(os.Stdout, "xdr: ", log.Lshortfile|log.Ltime|log.Lmicroseconds)
)
const maxDebugBytes = 32

View File

@@ -1,5 +1,5 @@
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
// is governed by an MIT-style license that can be found in the LICENSE file.
// Package xdr implements an XDR (RFC 4506) marshaller/unmarshaller.
// Package xdr implements an XDR (RFC 4506) encoder/decoder.
package xdr

View File

@@ -0,0 +1,79 @@
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
// is governed by an MIT-style license that can be found in the LICENSE file.
package xdr_test
import (
"bytes"
"math/rand"
"reflect"
"testing"
"testing/quick"
"github.com/calmh/xdr"
)
// Contains all supported types
type TestStruct struct {
I int
I8 int8
UI8 uint8
I16 int16
UI16 uint16
I32 int32
UI32 uint32
I64 int64
UI64 uint64
BS []byte // max:1024
S string // max:1024
C Opaque
SS []string // max:1024
}
type Opaque [32]byte
func (u *Opaque) EncodeXDRInto(w *xdr.Writer) (int, error) {
return w.WriteRaw(u[:])
}
func (u *Opaque) DecodeXDRFrom(r *xdr.Reader) (int, error) {
return r.ReadRaw(u[:])
}
func (Opaque) Generate(rand *rand.Rand, size int) reflect.Value {
var u Opaque
for i := range u[:] {
u[i] = byte(rand.Int())
}
return reflect.ValueOf(u)
}
func TestEncDec(t *testing.T) {
fn := func(t0 TestStruct) bool {
bs, err := t0.MarshalXDR()
if err != nil {
t.Fatal(err)
}
var t1 TestStruct
err = t1.UnmarshalXDR(bs)
if err != nil {
t.Fatal(err)
}
// Not comparing with DeepEqual since we'll unmarshal nil slices as empty
if t0.I != t1.I ||
t0.I16 != t1.I16 || t0.UI16 != t1.UI16 ||
t0.I32 != t1.I32 || t0.UI32 != t1.UI32 ||
t0.I64 != t1.I64 || t0.UI64 != t1.UI64 ||
bytes.Compare(t0.BS, t1.BS) != 0 ||
t0.S != t1.S || t0.C != t1.C {
t.Logf("%#v", t0)
t.Logf("%#v", t1)
return false
}
return true
}
if err := quick.Check(fn, nil); err != nil {
t.Error(err)
}
}

View File

@@ -0,0 +1,185 @@
// ************************************************************
// This file is automatically generated by genxdr. Do not edit.
// ************************************************************
package xdr_test
import (
"bytes"
"io"
"github.com/calmh/xdr"
)
/*
TestStruct Structure:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ int Structure \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ int8 Structure \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ uint8 Structure \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| 0x0000 | I16 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| 0x0000 | UI16 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| I32 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| UI32 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
+ I64 (64 bits) +
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
+ UI64 (64 bits) +
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Length of BS |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ BS (variable length) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Length of S |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ S (variable length) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ Opaque Structure \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Number of SS |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Length of SS |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ SS (variable length) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
struct TestStruct {
int I;
int8 I8;
uint8 UI8;
int I16;
unsigned int UI16;
int I32;
unsigned int UI32;
hyper I64;
unsigned hyper UI64;
opaque BS<1024>;
string S<1024>;
Opaque C;
string SS<1024>;
}
*/
func (o TestStruct) EncodeXDR(w io.Writer) (int, error) {
var xw = xdr.NewWriter(w)
return o.EncodeXDRInto(xw)
}
func (o TestStruct) MarshalXDR() ([]byte, error) {
return o.AppendXDR(make([]byte, 0, 128))
}
func (o TestStruct) MustMarshalXDR() []byte {
bs, err := o.MarshalXDR()
if err != nil {
panic(err)
}
return bs
}
func (o TestStruct) AppendXDR(bs []byte) ([]byte, error) {
var aw = xdr.AppendWriter(bs)
var xw = xdr.NewWriter(&aw)
_, err := o.EncodeXDRInto(xw)
return []byte(aw), err
}
func (o TestStruct) EncodeXDRInto(xw *xdr.Writer) (int, error) {
xw.WriteUint64(uint64(o.I))
xw.WriteUint8(uint8(o.I8))
xw.WriteUint8(o.UI8)
xw.WriteUint16(uint16(o.I16))
xw.WriteUint16(o.UI16)
xw.WriteUint32(uint32(o.I32))
xw.WriteUint32(o.UI32)
xw.WriteUint64(uint64(o.I64))
xw.WriteUint64(o.UI64)
if l := len(o.BS); l > 1024 {
return xw.Tot(), xdr.ElementSizeExceeded("BS", l, 1024)
}
xw.WriteBytes(o.BS)
if l := len(o.S); l > 1024 {
return xw.Tot(), xdr.ElementSizeExceeded("S", l, 1024)
}
xw.WriteString(o.S)
_, err := o.C.EncodeXDRInto(xw)
if err != nil {
return xw.Tot(), err
}
if l := len(o.SS); l > 1024 {
return xw.Tot(), xdr.ElementSizeExceeded("SS", l, 1024)
}
xw.WriteUint32(uint32(len(o.SS)))
for i := range o.SS {
xw.WriteString(o.SS[i])
}
return xw.Tot(), xw.Error()
}
func (o *TestStruct) DecodeXDR(r io.Reader) error {
xr := xdr.NewReader(r)
return o.DecodeXDRFrom(xr)
}
func (o *TestStruct) UnmarshalXDR(bs []byte) error {
var br = bytes.NewReader(bs)
var xr = xdr.NewReader(br)
return o.DecodeXDRFrom(xr)
}
func (o *TestStruct) DecodeXDRFrom(xr *xdr.Reader) error {
o.I = int(xr.ReadUint64())
o.I8 = int8(xr.ReadUint8())
o.UI8 = xr.ReadUint8()
o.I16 = int16(xr.ReadUint16())
o.UI16 = xr.ReadUint16()
o.I32 = int32(xr.ReadUint32())
o.UI32 = xr.ReadUint32()
o.I64 = int64(xr.ReadUint64())
o.UI64 = xr.ReadUint64()
o.BS = xr.ReadBytesMax(1024)
o.S = xr.ReadStringMax(1024)
(&o.C).DecodeXDRFrom(xr)
_SSSize := int(xr.ReadUint32())
if _SSSize < 0 {
return xdr.ElementSizeExceeded("SS", _SSSize, 1024)
}
if _SSSize > 1024 {
return xdr.ElementSizeExceeded("SS", _SSSize, 1024)
}
o.SS = make([]string, _SSSize)
for i := range o.SS {
o.SS[i] = xr.ReadString()
}
return xr.Error()
}

10
Godeps/_workspace/src/github.com/calmh/xdr/pad_ipdr.go generated vendored Normal file
View File

@@ -0,0 +1,10 @@
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
// is governed by an MIT-style license that can be found in the LICENSE file.
// +build ipdr
package xdr
func pad(l int) int {
return 0
}

14
Godeps/_workspace/src/github.com/calmh/xdr/pad_xdr.go generated vendored Normal file
View File

@@ -0,0 +1,14 @@
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
// is governed by an MIT-style license that can be found in the LICENSE file.
// +build !ipdr
package xdr
func pad(l int) int {
d := l % 4
if d == 0 {
return 0
}
return 4 - d
}

171
Godeps/_workspace/src/github.com/calmh/xdr/reader.go generated vendored Normal file
View File

@@ -0,0 +1,171 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package xdr
import (
"fmt"
"io"
"reflect"
"unsafe"
)
type Reader struct {
r io.Reader
err error
b [8]byte
}
func NewReader(r io.Reader) *Reader {
return &Reader{
r: r,
}
}
func (r *Reader) ReadRaw(bs []byte) (int, error) {
if r.err != nil {
return 0, r.err
}
var n int
n, r.err = io.ReadFull(r.r, bs)
return n, r.err
}
func (r *Reader) ReadString() string {
return r.ReadStringMax(0)
}
func (r *Reader) ReadStringMax(max int) string {
buf := r.ReadBytesMaxInto(max, nil)
bh := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
sh := reflect.StringHeader{
Data: bh.Data,
Len: bh.Len,
}
return *((*string)(unsafe.Pointer(&sh)))
}
func (r *Reader) ReadBytes() []byte {
return r.ReadBytesInto(nil)
}
func (r *Reader) ReadBytesMax(max int) []byte {
return r.ReadBytesMaxInto(max, nil)
}
func (r *Reader) ReadBytesInto(dst []byte) []byte {
return r.ReadBytesMaxInto(0, dst)
}
func (r *Reader) ReadBytesMaxInto(max int, dst []byte) []byte {
if r.err != nil {
return nil
}
l := int(r.ReadUint32())
if r.err != nil {
return nil
}
if l < 0 || max > 0 && l > max {
// l may be negative on 32 bit builds
r.err = ElementSizeExceeded("bytes field", l, max)
return nil
}
if fullLen := l + pad(l); fullLen > len(dst) {
dst = make([]byte, fullLen)
} else {
dst = dst[:fullLen]
}
var n int
n, r.err = io.ReadFull(r.r, dst)
if r.err != nil {
if debug {
dl.Printf("rd bytes (%d): %v", len(dst), r.err)
}
return nil
}
if debug {
if n > maxDebugBytes {
dl.Printf("rd bytes (%d): %x...", len(dst), dst[:maxDebugBytes])
} else {
dl.Printf("rd bytes (%d): %x", len(dst), dst)
}
}
return dst[:l]
}
func (r *Reader) ReadBool() bool {
return r.ReadUint8() != 0
}
func (r *Reader) ReadUint32() uint32 {
if r.err != nil {
return 0
}
_, r.err = io.ReadFull(r.r, r.b[:4])
if r.err != nil {
if debug {
dl.Printf("rd uint32: %v", r.err)
}
return 0
}
v := uint32(r.b[3]) | uint32(r.b[2])<<8 | uint32(r.b[1])<<16 | uint32(r.b[0])<<24
if debug {
dl.Printf("rd uint32=%d (0x%08x)", v, v)
}
return v
}
func (r *Reader) ReadUint64() uint64 {
if r.err != nil {
return 0
}
_, r.err = io.ReadFull(r.r, r.b[:8])
if r.err != nil {
if debug {
dl.Printf("rd uint64: %v", r.err)
}
return 0
}
v := uint64(r.b[7]) | uint64(r.b[6])<<8 | uint64(r.b[5])<<16 | uint64(r.b[4])<<24 |
uint64(r.b[3])<<32 | uint64(r.b[2])<<40 | uint64(r.b[1])<<48 | uint64(r.b[0])<<56
if debug {
dl.Printf("rd uint64=%d (0x%016x)", v, v)
}
return v
}
type XDRError struct {
op string
err error
}
func (e XDRError) Error() string {
return "xdr " + e.op + ": " + e.err.Error()
}
func (e XDRError) IsEOF() bool {
return e.err == io.EOF
}
func (r *Reader) Error() error {
if r.err == nil {
return nil
}
return XDRError{"read", r.err}
}
func ElementSizeExceeded(field string, size, limit int) error {
return fmt.Errorf("%s exceeds size limit; %d > %d", field, size, limit)
}

View File

@@ -0,0 +1,49 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build ipdr
package xdr
import "io"
func (r *Reader) ReadUint8() uint8 {
if r.err != nil {
return 0
}
_, r.err = io.ReadFull(r.r, r.b[:1])
if r.err != nil {
if debug {
dl.Printf("rd uint8: %v", r.err)
}
return 0
}
if debug {
dl.Printf("rd uint8=%d (0x%02x)", r.b[0], r.b[0])
}
return r.b[0]
}
func (r *Reader) ReadUint16() uint16 {
if r.err != nil {
return 0
}
_, r.err = io.ReadFull(r.r, r.b[:2])
if r.err != nil {
if debug {
dl.Printf("rd uint16: %v", r.err)
}
return 0
}
v := uint16(r.b[1]) | uint16(r.b[0])<<8
if debug {
dl.Printf("rd uint16=%d (0x%04x)", v, v)
}
return v
}

View File

@@ -0,0 +1,15 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build !ipdr
package xdr
func (r *Reader) ReadUint8() uint8 {
return uint8(r.ReadUint32())
}
func (r *Reader) ReadUint16() uint16 {
return uint16(r.ReadUint32())
}

View File

@@ -0,0 +1,44 @@
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
// is governed by an MIT-style license that can be found in the LICENSE file.
// +build refl
package xdr_test
import (
"bytes"
"testing"
refl "github.com/davecgh/go-xdr/xdr"
)
func TestCompareMarshals(t *testing.T) {
e0 := s.MarshalXDR()
e1, err := refl.Marshal(s)
if err != nil {
t.Fatal(err)
}
if bytes.Compare(e0, e1) != 0 {
t.Fatalf("Encoding mismatch;\n\t%x (this)\n\t%x (refl)", e0, e1)
}
}
func BenchmarkReflMarshal(b *testing.B) {
var err error
for i := 0; i < b.N; i++ {
res, err = refl.Marshal(s)
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkReflUnmarshal(b *testing.B) {
var t XDRBenchStruct
for i := 0; i < b.N; i++ {
_, err := refl.Unmarshal(e, &t)
if err != nil {
b.Fatal(err)
}
}
}

146
Godeps/_workspace/src/github.com/calmh/xdr/writer.go generated vendored Normal file
View File

@@ -0,0 +1,146 @@
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
// is governed by an MIT-style license that can be found in the LICENSE file.
package xdr
import (
"io"
"reflect"
"unsafe"
)
var padBytes = []byte{0, 0, 0}
type Writer struct {
w io.Writer
tot int
err error
b [8]byte
}
type AppendWriter []byte
func (w *AppendWriter) Write(bs []byte) (int, error) {
*w = append(*w, bs...)
return len(bs), nil
}
func NewWriter(w io.Writer) *Writer {
return &Writer{
w: w,
}
}
func (w *Writer) WriteRaw(bs []byte) (int, error) {
if w.err != nil {
return 0, w.err
}
var n int
n, w.err = w.w.Write(bs)
return n, w.err
}
func (w *Writer) WriteString(s string) (int, error) {
sh := *((*reflect.StringHeader)(unsafe.Pointer(&s)))
bh := reflect.SliceHeader{
Data: sh.Data,
Len: sh.Len,
Cap: sh.Len,
}
return w.WriteBytes(*(*[]byte)(unsafe.Pointer(&bh)))
}
func (w *Writer) WriteBytes(bs []byte) (int, error) {
if w.err != nil {
return 0, w.err
}
w.WriteUint32(uint32(len(bs)))
if w.err != nil {
return 0, w.err
}
if debug {
if len(bs) > maxDebugBytes {
dl.Printf("wr bytes (%d): %x...", len(bs), bs[:maxDebugBytes])
} else {
dl.Printf("wr bytes (%d): %x", len(bs), bs)
}
}
var l, n int
n, w.err = w.w.Write(bs)
l += n
if p := pad(len(bs)); w.err == nil && p > 0 {
n, w.err = w.w.Write(padBytes[:p])
l += n
}
w.tot += l
return l, w.err
}
func (w *Writer) WriteBool(v bool) (int, error) {
if v {
return w.WriteUint8(1)
} else {
return w.WriteUint8(0)
}
}
func (w *Writer) WriteUint32(v uint32) (int, error) {
if w.err != nil {
return 0, w.err
}
if debug {
dl.Printf("wr uint32=%d", v)
}
w.b[0] = byte(v >> 24)
w.b[1] = byte(v >> 16)
w.b[2] = byte(v >> 8)
w.b[3] = byte(v)
var l int
l, w.err = w.w.Write(w.b[:4])
w.tot += l
return l, w.err
}
func (w *Writer) WriteUint64(v uint64) (int, error) {
if w.err != nil {
return 0, w.err
}
if debug {
dl.Printf("wr uint64=%d", v)
}
w.b[0] = byte(v >> 56)
w.b[1] = byte(v >> 48)
w.b[2] = byte(v >> 40)
w.b[3] = byte(v >> 32)
w.b[4] = byte(v >> 24)
w.b[5] = byte(v >> 16)
w.b[6] = byte(v >> 8)
w.b[7] = byte(v)
var l int
l, w.err = w.w.Write(w.b[:8])
w.tot += l
return l, w.err
}
func (w *Writer) Tot() int {
return w.tot
}
func (w *Writer) Error() error {
if w.err == nil {
return nil
}
return XDRError{"write", w.err}
}

View File

@@ -0,0 +1,41 @@
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
// is governed by an MIT-style license that can be found in the LICENSE file.
// +build ipdr
package xdr
func (w *Writer) WriteUint8(v uint8) (int, error) {
if w.err != nil {
return 0, w.err
}
if debug {
dl.Printf("wr uint8=%d", v)
}
w.b[0] = byte(v)
var l int
l, w.err = w.w.Write(w.b[:1])
w.tot += l
return l, w.err
}
func (w *Writer) WriteUint16(v uint16) (int, error) {
if w.err != nil {
return 0, w.err
}
if debug {
dl.Printf("wr uint8=%d", v)
}
w.b[0] = byte(v >> 8)
w.b[1] = byte(v)
var l int
l, w.err = w.w.Write(w.b[:2])
w.tot += l
return l, w.err
}

View File

@@ -0,0 +1,14 @@
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
// is governed by an MIT-style license that can be found in the LICENSE file.
// +build !ipdr
package xdr
func (w *Writer) WriteUint8(v uint8) (int, error) {
return w.WriteUint32(uint32(v))
}
func (w *Writer) WriteUint16(v uint16) (int, error) {
return w.WriteUint32(uint32(v))
}

93
Godeps/_workspace/src/github.com/calmh/xdr/xdr_test.go generated vendored Normal file
View File

@@ -0,0 +1,93 @@
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
// is governed by an MIT-style license that can be found in the LICENSE file.
package xdr
import (
"bytes"
"strings"
"testing"
"testing/quick"
)
func TestBytesNil(t *testing.T) {
fn := func(bs []byte) bool {
var b = new(bytes.Buffer)
var w = NewWriter(b)
var r = NewReader(b)
w.WriteBytes(bs)
w.WriteBytes(bs)
r.ReadBytes()
res := r.ReadBytes()
return bytes.Compare(bs, res) == 0
}
if err := quick.Check(fn, nil); err != nil {
t.Error(err)
}
}
func TestBytesGiven(t *testing.T) {
fn := func(bs []byte) bool {
var b = new(bytes.Buffer)
var w = NewWriter(b)
var r = NewReader(b)
w.WriteBytes(bs)
w.WriteBytes(bs)
res := make([]byte, 12)
res = r.ReadBytesInto(res)
res = r.ReadBytesInto(res)
return bytes.Compare(bs, res) == 0
}
if err := quick.Check(fn, nil); err != nil {
t.Error(err)
}
}
func TestReadBytesMaxInto(t *testing.T) {
var max = 64
for tot := 32; tot < 128; tot++ {
for diff := -32; diff <= 32; diff++ {
var b = new(bytes.Buffer)
var r = NewReader(b)
var w = NewWriter(b)
var toWrite = make([]byte, tot)
w.WriteBytes(toWrite)
var buf = make([]byte, tot+diff)
var bs = r.ReadBytesMaxInto(max, buf)
if tot <= max {
if read := len(bs); read != tot {
t.Errorf("Incorrect read bytes, wrote=%d, buf=%d, max=%d, read=%d", tot, tot+diff, max, read)
}
} else if !strings.Contains(r.err.Error(), "exceeds size") {
t.Errorf("Unexpected non-ErrElementSizeExceeded error for wrote=%d, max=%d: %v", tot, max, r.err)
}
}
}
}
func TestReadStringMax(t *testing.T) {
for tot := 42; tot < 72; tot++ {
for max := 0; max < 128; max++ {
var b = new(bytes.Buffer)
var r = NewReader(b)
var w = NewWriter(b)
var toWrite = make([]byte, tot)
w.WriteBytes(toWrite)
var str = r.ReadStringMax(max)
var read = len(str)
if max == 0 || tot <= max {
if read != tot {
t.Errorf("Incorrect read bytes, wrote=%d, max=%d, read=%d", tot, max, read)
}
} else if !strings.Contains(r.err.Error(), "exceeds size") {
t.Errorf("Unexpected non-ErrElementSizeExceeded error for wrote=%d, max=%d, read=%d: %v", tot, max, read, r.err)
}
}
}
}

View File

@@ -1,8 +1,5 @@
All files in this repository are licensed as follows. If you contribute
to this repository, it is assumed that you license your contribution
under the same license unless you state otherwise.
All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file.
This package contains an efficient token-bucket-based rate limiter.
Copyright (C) 2015 Canonical Ltd.
This software is licensed under the LGPLv3, included below.

View File

@@ -20,7 +20,7 @@ token in the bucket represents one byte.
```go
func Writer(w io.Writer, bucket *Bucket) io.Writer
```
Writer returns a writer that is rate limited by the given token bucket. Each
Writer returns a reader that is rate limited by the given token bucket. Each
token in the bucket represents one byte.
#### type Bucket

View File

@@ -2,13 +2,11 @@
// Licensed under the LGPLv3 with static-linking exception.
// See LICENCE file for details.
// The ratelimit package provides an efficient token bucket implementation
// that can be used to limit the rate of arbitrary things.
// The ratelimit package provides an efficient token bucket implementation.
// See http://en.wikipedia.org/wiki/Token_bucket.
package ratelimit
import (
"math"
"strconv"
"sync"
"time"
@@ -57,7 +55,7 @@ func NewBucketWithRate(rate float64, capacity int64) *Bucket {
continue
}
tb := NewBucketWithQuantum(fillInterval, capacity, quantum)
if diff := math.Abs(tb.Rate() - rate); diff/rate <= rateMargin {
if diff := abs(tb.Rate() - rate); diff/rate <= rateMargin {
return tb
}
}
@@ -171,30 +169,6 @@ func (tb *Bucket) takeAvailable(now time.Time, count int64) int64 {
return count
}
// Available returns the number of available tokens. It will be negative
// when there are consumers waiting for tokens. Note that if this
// returns greater than zero, it does not guarantee that calls that take
// tokens from the buffer will succeed, as the number of available
// tokens could have changed in the meantime. This method is intended
// primarily for metrics reporting and debugging.
func (tb *Bucket) Available() int64 {
return tb.available(time.Now())
}
// available is the internal version of available - it takes the current time as
// an argument to enable easy testing.
func (tb *Bucket) available(now time.Time) int64 {
tb.mu.Lock()
defer tb.mu.Unlock()
tb.adjust(now)
return tb.avail
}
// Capacity returns the capacity that the bucket was created with.
func (tb *Bucket) Capacity() int64 {
return tb.capacity
}
// Rate returns the fill rate of the bucket, in tokens per second.
func (tb *Bucket) Rate() float64 {
return 1e9 * float64(tb.quantum) / float64(tb.fillInterval)
@@ -243,3 +217,10 @@ func (tb *Bucket) adjust(now time.Time) (currentTick int64) {
tb.availTick = currentTick
return
}
func abs(f float64) float64 {
if f < 0 {
return -f
}
return f
}

View File

@@ -5,11 +5,10 @@
package ratelimit
import (
"math"
gc "launchpad.net/gocheck"
"testing"
"time"
gc "gopkg.in/check.v1"
)
func TestPackage(t *testing.T) {
@@ -126,49 +125,6 @@ var takeTests = []struct {
}},
}}
var availTests = []struct {
about string
capacity int64
fillInterval time.Duration
take int64
sleep time.Duration
expectCountAfterTake int64
expectCountAfterSleep int64
}{{
about: "should fill tokens after interval",
capacity: 5,
fillInterval: time.Second,
take: 5,
sleep: time.Second,
expectCountAfterTake: 0,
expectCountAfterSleep: 1,
}, {
about: "should fill tokens plus existing count",
capacity: 2,
fillInterval: time.Second,
take: 1,
sleep: time.Second,
expectCountAfterTake: 1,
expectCountAfterSleep: 2,
}, {
about: "shouldn't fill before interval",
capacity: 2,
fillInterval: 2 * time.Second,
take: 1,
sleep: time.Second,
expectCountAfterTake: 1,
expectCountAfterSleep: 1,
}, {
about: "should fill only once after 1*interval before 2*interval",
capacity: 2,
fillInterval: 2 * time.Second,
take: 1,
sleep: 3 * time.Second,
expectCountAfterTake: 1,
expectCountAfterSleep: 2,
}}
func (rateLimitSuite) TestTake(c *gc.C) {
for i, test := range takeTests {
tb := NewBucket(test.fillInterval, test.capacity)
@@ -305,7 +261,7 @@ func (rateLimitSuite) TestPanics(c *gc.C) {
}
func isCloseTo(x, y, tolerance float64) bool {
return math.Abs(x-y)/y < tolerance
return abs(x-y)/y < tolerance
}
func (rateLimitSuite) TestRate(c *gc.C) {
@@ -364,23 +320,6 @@ func (rateLimitSuite) TestNewWithRate(c *gc.C) {
}
}
func TestAvailable(t *testing.T) {
for i, tt := range availTests {
tb := NewBucket(tt.fillInterval, tt.capacity)
if c := tb.takeAvailable(tb.startTime, tt.take); c != tt.take {
t.Fatalf("#%d: %s, take = %d, want = %d", i, tt.about, c, tt.take)
}
if c := tb.available(tb.startTime); c != tt.expectCountAfterTake {
t.Fatalf("#%d: %s, after take, available = %d, want = %d", i, tt.about, c, tt.expectCountAfterTake)
}
if c := tb.available(tb.startTime.Add(tt.sleep)); c != tt.expectCountAfterSleep {
t.Fatalf("#%d: %s, after some time it should fill in new tokens, available = %d, want = %d",
i, tt.about, c, tt.expectCountAfterSleep)
}
}
}
func BenchmarkWait(b *testing.B) {
tb := NewBucket(1, 16*1024)
for i := b.N - 1; i >= 0; i-- {

View File

@@ -4,9 +4,7 @@
There is sometimes utility in finding the current executable file
that is running. This can be used for upgrading the current executable
or finding resources located relative to the executable file. Both
working directory and the os.Args[0] value are arbitrary and cannot
be relied on; os.Args[0] can be "faked".
or finding resources located relative to the executable file.
Multi-platform and supports:
* Linux

View File

@@ -3,31 +3,25 @@
// license that can be found in the LICENSE file.
// Extensions to the standard "os" package.
package osext // import "github.com/kardianos/osext"
package osext
import "path/filepath"
var cx, ce = executableClean()
func executableClean() (string, error) {
p, err := executable()
return filepath.Clean(p), err
}
// Executable returns an absolute path that can be used to
// re-invoke the current program.
// It may not be valid after the current program exits.
func Executable() (string, error) {
return cx, ce
p, err := executable()
return filepath.Clean(p), err
}
// Returns same path as Executable, returns just the folder
// path. Excludes the executable name and any trailing slash.
// path. Excludes the executable name.
func ExecutableFolder() (string, error) {
p, err := Executable()
if err != nil {
return "", err
}
return filepath.Dir(p), nil
folder, _ := filepath.Split(p)
return folder, nil
}

View File

@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux netbsd solaris dragonfly
// +build linux netbsd openbsd solaris dragonfly
package osext
@@ -17,17 +17,15 @@ import (
func executable() (string, error) {
switch runtime.GOOS {
case "linux":
const deletedTag = " (deleted)"
const deletedSuffix = " (deleted)"
execpath, err := os.Readlink("/proc/self/exe")
if err != nil {
return execpath, err
}
execpath = strings.TrimSuffix(execpath, deletedTag)
execpath = strings.TrimPrefix(execpath, deletedTag)
return execpath, nil
return strings.TrimSuffix(execpath, deletedSuffix), nil
case "netbsd":
return os.Readlink("/proc/curproc/exe")
case "dragonfly":
case "openbsd", "dragonfly":
return os.Readlink("/proc/curproc/file")
case "solaris":
return os.Readlink(fmt.Sprintf("/proc/%d/path/a.out", os.Getpid()))

View File

@@ -2,13 +2,12 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin freebsd openbsd
// +build darwin freebsd
package osext
import (
"os"
"os/exec"
"path/filepath"
"runtime"
"syscall"
@@ -24,8 +23,6 @@ func executable() (string, error) {
mib = [4]int32{1 /* CTL_KERN */, 14 /* KERN_PROC */, 12 /* KERN_PROC_PATHNAME */, -1}
case "darwin":
mib = [4]int32{1 /* CTL_KERN */, 38 /* KERN_PROCARGS */, int32(os.Getpid()), -1}
case "openbsd":
mib = [4]int32{1 /* CTL_KERN */, 55 /* KERN_PROC_ARGS */, int32(os.Getpid()), 1 /* KERN_PROC_ARGV */}
}
n := uintptr(0)
@@ -45,58 +42,14 @@ func executable() (string, error) {
if n == 0 { // This shouldn't happen.
return "", nil
}
var execPath string
switch runtime.GOOS {
case "openbsd":
// buf now contains **argv, with pointers to each of the C-style
// NULL terminated arguments.
var args []string
argv := uintptr(unsafe.Pointer(&buf[0]))
Loop:
for {
argp := *(**[1 << 20]byte)(unsafe.Pointer(argv))
if argp == nil {
break
}
for i := 0; uintptr(i) < n; i++ {
// we don't want the full arguments list
if string(argp[i]) == " " {
break Loop
}
if argp[i] != 0 {
continue
}
args = append(args, string(argp[:i]))
n -= uintptr(i)
break
}
if n < unsafe.Sizeof(argv) {
break
}
argv += unsafe.Sizeof(argv)
n -= unsafe.Sizeof(argv)
for i, v := range buf {
if v == 0 {
buf = buf[:i]
break
}
execPath = args[0]
// There is no canonical way to get an executable path on
// OpenBSD, so check PATH in case we are called directly
if execPath[0] != '/' && execPath[0] != '.' {
execIsInPath, err := exec.LookPath(execPath)
if err == nil {
execPath = execIsInPath
}
}
default:
for i, v := range buf {
if v == 0 {
buf = buf[:i]
break
}
}
execPath = string(buf)
}
var err error
execPath := string(buf)
// execPath will not be empty due to above checks.
// Try to get the absolute path if the execPath is not rooted.
if execPath[0] != '/' {

View File

@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin linux freebsd netbsd windows openbsd
// +build darwin linux freebsd netbsd windows
package osext
@@ -24,29 +24,6 @@ const (
executableEnvValueDelete = "delete"
)
func TestPrintExecutable(t *testing.T) {
ef, err := Executable()
if err != nil {
t.Fatalf("Executable failed: %v", err)
}
t.Log("Executable:", ef)
}
func TestPrintExecutableFolder(t *testing.T) {
ef, err := ExecutableFolder()
if err != nil {
t.Fatalf("ExecutableFolder failed: %v", err)
}
t.Log("Executable Folder:", ef)
}
func TestExecutableFolder(t *testing.T) {
ef, err := ExecutableFolder()
if err != nil {
t.Fatalf("ExecutableFolder failed: %v", err)
}
if ef[len(ef)-1] == filepath.Separator {
t.Fatal("ExecutableFolder ends with a trailing slash.")
}
}
func TestExecutableMatch(t *testing.T) {
ep, err := Executable()
if err != nil {

View File

@@ -0,0 +1,80 @@
// Copyright (C) 2014 The Protocol Authors.
package protocol
import (
"io"
"time"
)
type TestModel struct {
data []byte
folder string
name string
offset int64
size int
hash []byte
flags uint32
options []Option
closedCh chan bool
}
func newTestModel() *TestModel {
return &TestModel{
closedCh: make(chan bool),
}
}
func (t *TestModel) Index(deviceID DeviceID, folder string, files []FileInfo, flags uint32, options []Option) {
}
func (t *TestModel) IndexUpdate(deviceID DeviceID, folder string, files []FileInfo, flags uint32, options []Option) {
}
func (t *TestModel) Request(deviceID DeviceID, folder, name string, offset int64, size int, hash []byte, flags uint32, options []Option) ([]byte, error) {
t.folder = folder
t.name = name
t.offset = offset
t.size = size
t.hash = hash
t.flags = flags
t.options = options
return t.data, nil
}
func (t *TestModel) Close(deviceID DeviceID, err error) {
close(t.closedCh)
}
func (t *TestModel) ClusterConfig(deviceID DeviceID, config ClusterConfigMessage) {
}
func (t *TestModel) isClosed() bool {
select {
case <-t.closedCh:
return true
case <-time.After(1 * time.Second):
return false // Timeout
}
}
type ErrPipe struct {
io.PipeWriter
written int
max int
err error
closed bool
}
func (e *ErrPipe) Write(data []byte) (int, error) {
if e.closed {
return 0, e.err
}
if e.written+len(data) > e.max {
n, _ := e.PipeWriter.Write(data[:e.max-e.written])
e.PipeWriter.CloseWithError(e.err)
e.closed = true
return n, e.err
}
return e.PipeWriter.Write(data)
}

View File

@@ -4,7 +4,13 @@ package protocol
import "fmt"
type Compression int
const (
CompressMetadata Compression = iota // zero value is the default, default should be "metadata"
CompressNever
CompressAlways
compressionThreshold = 128 // don't bother compressing messages smaller than this many bytes
)
@@ -25,6 +31,14 @@ var compressionUnmarshal = map[string]Compression{
"always": CompressAlways,
}
func (c Compression) String() string {
s, ok := compressionMarshal[c]
if !ok {
return fmt.Sprintf("unknown:%d", c)
}
return s
}
func (c Compression) GoString() string {
return fmt.Sprintf("%q", c.String())
}

View File

@@ -0,0 +1,15 @@
// Copyright (C) 2014 The Protocol Authors.
package protocol
import (
"os"
"strings"
"github.com/calmh/logger"
)
var (
debug = strings.Contains(os.Getenv("STTRACE"), "protocol") || os.Getenv("STTRACE") == "all"
l = logger.DefaultLogger
)

View File

@@ -16,7 +16,6 @@ import (
)
type DeviceID [32]byte
type ShortID uint64
var LocalDeviceID = DeviceID{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
@@ -66,24 +65,18 @@ func (n DeviceID) Compare(other DeviceID) int {
}
func (n DeviceID) Equals(other DeviceID) bool {
return bytes.Equal(n[:], other[:])
return bytes.Compare(n[:], other[:]) == 0
}
// Short returns an integer representing bits 0-63 of the device ID.
func (n DeviceID) Short() ShortID {
return ShortID(binary.BigEndian.Uint64(n[:]))
func (n DeviceID) Short() uint64 {
return binary.BigEndian.Uint64(n[:])
}
func (n *DeviceID) MarshalText() ([]byte, error) {
return []byte(n.String()), nil
}
func (s ShortID) String() string {
var bs [8]byte
binary.BigEndian.PutUint64(bs[:], uint64(s))
return base32.StdEncoding.EncodeToString(bs[:])[:7]
}
func (n *DeviceID) UnmarshalText(bs []byte) error {
id := string(bs)
id = strings.Trim(id, "=")

View File

@@ -74,25 +74,3 @@ func TestMarshallingDeviceID(t *testing.T) {
t.Error("Compare error")
}
}
func TestShortIDString(t *testing.T) {
id, _ := DeviceIDFromString(formatted)
sid := id.Short().String()
if len(sid) != 7 {
t.Errorf("Wrong length for short ID: got %d, want 7", len(sid))
}
want := formatted[:len(sid)]
if sid != want {
t.Errorf("Wrong short ID: got %q, want %q", sid, want)
}
}
func TestDeviceIDFromBytes(t *testing.T) {
id0, _ := DeviceIDFromString(formatted)
id1 := DeviceIDFromBytes(id0[:])
if id1.String() != formatted {
t.Errorf("Wrong device ID, got %q, want %q", id1, formatted)
}
}

View File

@@ -0,0 +1,51 @@
// Copyright (C) 2014 The Protocol Authors.
package protocol
import (
"errors"
)
const (
ecNoError int32 = iota
ecGeneric
ecNoSuchFile
ecInvalid
)
var (
ErrNoError error = nil
ErrGeneric = errors.New("generic error")
ErrNoSuchFile = errors.New("no such file")
ErrInvalid = errors.New("file is invalid")
)
var lookupError = map[int32]error{
ecNoError: ErrNoError,
ecGeneric: ErrGeneric,
ecNoSuchFile: ErrNoSuchFile,
ecInvalid: ErrInvalid,
}
var lookupCode = map[error]int32{
ErrNoError: ecNoError,
ErrGeneric: ecGeneric,
ErrNoSuchFile: ecNoSuchFile,
ErrInvalid: ecInvalid,
}
func codeToError(errcode int32) error {
err, ok := lookupError[errcode]
if !ok {
return ErrGeneric
}
return err
}
func errorToCode(err error) int32 {
code, ok := lookupCode[err]
if !ok {
return ecGeneric
}
return code
}

View File

@@ -0,0 +1,43 @@
// Copyright (C) 2014 The Protocol Authors.
package protocol
import "github.com/calmh/xdr"
type header struct {
version int
msgID int
msgType int
compression bool
}
func (h header) encodeXDR(xw *xdr.Writer) (int, error) {
u := encodeHeader(h)
return xw.WriteUint32(u)
}
func (h *header) decodeXDR(xr *xdr.Reader) error {
u := xr.ReadUint32()
*h = decodeHeader(u)
return xr.Error()
}
func encodeHeader(h header) uint32 {
var isComp uint32
if h.compression {
isComp = 1 << 0 // the zeroth bit is the compression bit
}
return uint32(h.version&0xf)<<28 +
uint32(h.msgID&0xfff)<<16 +
uint32(h.msgType&0xff)<<8 +
isComp
}
func decodeHeader(u uint32) header {
return header{
version: int(u>>28) & 0xf,
msgID: int(u>>16) & 0xfff,
msgType: int(u>>8) & 0xff,
compression: u&1 == 1,
}
}

View File

@@ -0,0 +1,126 @@
// Copyright (C) 2014 The Protocol Authors.
//go:generate -command genxdr go run ../syncthing/Godeps/_workspace/src/github.com/calmh/xdr/cmd/genxdr/main.go
//go:generate genxdr -o message_xdr.go message.go
package protocol
import "fmt"
type IndexMessage struct {
Folder string
Files []FileInfo
Flags uint32
Options []Option // max:64
}
type FileInfo struct {
Name string // max:8192
Flags uint32
Modified int64
Version Vector
LocalVersion int64
Blocks []BlockInfo
}
func (f FileInfo) String() string {
return fmt.Sprintf("File{Name:%q, Flags:0%o, Modified:%d, Version:%v, Size:%d, Blocks:%v}",
f.Name, f.Flags, f.Modified, f.Version, f.Size(), f.Blocks)
}
func (f FileInfo) Size() (bytes int64) {
if f.IsDeleted() || f.IsDirectory() {
return 128
}
for _, b := range f.Blocks {
bytes += int64(b.Size)
}
return
}
func (f FileInfo) IsDeleted() bool {
return f.Flags&FlagDeleted != 0
}
func (f FileInfo) IsInvalid() bool {
return f.Flags&FlagInvalid != 0
}
func (f FileInfo) IsDirectory() bool {
return f.Flags&FlagDirectory != 0
}
func (f FileInfo) IsSymlink() bool {
return f.Flags&FlagSymlink != 0
}
func (f FileInfo) HasPermissionBits() bool {
return f.Flags&FlagNoPermBits == 0
}
type BlockInfo struct {
Offset int64 // noencode (cache only)
Size int32
Hash []byte // max:64
}
func (b BlockInfo) String() string {
return fmt.Sprintf("Block{%d/%d/%x}", b.Offset, b.Size, b.Hash)
}
type RequestMessage struct {
Folder string // max:64
Name string // max:8192
Offset int64
Size int32
Hash []byte // max:64
Flags uint32
Options []Option // max:64
}
type ResponseMessage struct {
Data []byte
Code int32
}
type ClusterConfigMessage struct {
ClientName string // max:64
ClientVersion string // max:64
Folders []Folder
Options []Option // max:64
}
func (o *ClusterConfigMessage) GetOption(key string) string {
for _, option := range o.Options {
if option.Key == key {
return option.Value
}
}
return ""
}
type Folder struct {
ID string // max:64
Devices []Device
Flags uint32
Options []Option // max:64
}
type Device struct {
ID []byte // max:32
MaxLocalVersion int64
Flags uint32
Options []Option // max:64
}
type Option struct {
Key string // max:64
Value string // max:1024
}
type CloseMessage struct {
Reason string // max:1024
Code int32
}
type EmptyMessage struct{}

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,40 @@
// Copyright (C) 2014 The Protocol Authors.
// +build darwin
package protocol
// Darwin uses NFD normalization
import "golang.org/x/text/unicode/norm"
type nativeModel struct {
next Model
}
func (m nativeModel) Index(deviceID DeviceID, folder string, files []FileInfo, flags uint32, options []Option) {
for i := range files {
files[i].Name = norm.NFD.String(files[i].Name)
}
m.next.Index(deviceID, folder, files, flags, options)
}
func (m nativeModel) IndexUpdate(deviceID DeviceID, folder string, files []FileInfo, flags uint32, options []Option) {
for i := range files {
files[i].Name = norm.NFD.String(files[i].Name)
}
m.next.IndexUpdate(deviceID, folder, files, flags, options)
}
func (m nativeModel) Request(deviceID DeviceID, folder string, name string, offset int64, size int, hash []byte, flags uint32, options []Option) ([]byte, error) {
name = norm.NFD.String(name)
return m.next.Request(deviceID, folder, name, offset, size, hash, flags, options)
}
func (m nativeModel) ClusterConfig(deviceID DeviceID, config ClusterConfigMessage) {
m.next.ClusterConfig(deviceID, config)
}
func (m nativeModel) Close(deviceID DeviceID, err error) {
m.next.Close(deviceID, err)
}

View File

@@ -0,0 +1,31 @@
// Copyright (C) 2014 The Protocol Authors.
// +build !windows,!darwin
package protocol
// Normal Unixes uses NFC and slashes, which is the wire format.
type nativeModel struct {
next Model
}
func (m nativeModel) Index(deviceID DeviceID, folder string, files []FileInfo, flags uint32, options []Option) {
m.next.Index(deviceID, folder, files, flags, options)
}
func (m nativeModel) IndexUpdate(deviceID DeviceID, folder string, files []FileInfo, flags uint32, options []Option) {
m.next.IndexUpdate(deviceID, folder, files, flags, options)
}
func (m nativeModel) Request(deviceID DeviceID, folder string, name string, offset int64, size int, hash []byte, flags uint32, options []Option) ([]byte, error) {
return m.next.Request(deviceID, folder, name, offset, size, hash, flags, options)
}
func (m nativeModel) ClusterConfig(deviceID DeviceID, config ClusterConfigMessage) {
m.next.ClusterConfig(deviceID, config)
}
func (m nativeModel) Close(deviceID DeviceID, err error) {
m.next.Close(deviceID, err)
}

View File

@@ -21,34 +21,42 @@ var disallowedCharacters = string([]rune{
})
type nativeModel struct {
Model
next Model
}
func (m nativeModel) Index(deviceID DeviceID, folder string, files []FileInfo) {
fixupFiles(folder, files)
m.Model.Index(deviceID, folder, files)
func (m nativeModel) Index(deviceID DeviceID, folder string, files []FileInfo, flags uint32, options []Option) {
fixupFiles(files)
m.next.Index(deviceID, folder, files, flags, options)
}
func (m nativeModel) IndexUpdate(deviceID DeviceID, folder string, files []FileInfo) {
fixupFiles(folder, files)
m.Model.IndexUpdate(deviceID, folder, files)
func (m nativeModel) IndexUpdate(deviceID DeviceID, folder string, files []FileInfo, flags uint32, options []Option) {
fixupFiles(files)
m.next.IndexUpdate(deviceID, folder, files, flags, options)
}
func (m nativeModel) Request(deviceID DeviceID, folder string, name string, offset int64, hash []byte, fromTemporary bool, buf []byte) error {
func (m nativeModel) Request(deviceID DeviceID, folder string, name string, offset int64, size int, hash []byte, flags uint32, options []Option) ([]byte, error) {
name = filepath.FromSlash(name)
return m.Model.Request(deviceID, folder, name, offset, hash, fromTemporary, buf)
return m.next.Request(deviceID, folder, name, offset, size, hash, flags, options)
}
func fixupFiles(folder string, files []FileInfo) {
func (m nativeModel) ClusterConfig(deviceID DeviceID, config ClusterConfigMessage) {
m.next.ClusterConfig(deviceID, config)
}
func (m nativeModel) Close(deviceID DeviceID, err error) {
m.next.Close(deviceID, err)
}
func fixupFiles(files []FileInfo) {
for i, f := range files {
if strings.ContainsAny(f.Name, disallowedCharacters) || strings.HasSuffix(f.Name, " ") {
if strings.ContainsAny(f.Name, disallowedCharacters) {
if f.IsDeleted() {
// Don't complain if the file is marked as deleted, since it
// can't possibly exist here anyway.
continue
}
files[i].Invalid = true
l.Warnf("File name %q (folder %q) contains invalid characters; marked as invalid.", f.Name, folder)
files[i].Flags |= FlagInvalid
l.Warnf("File name %q contains invalid characters; marked as invalid.", f.Name)
}
files[i].Name = filepath.FromSlash(files[i].Name)
}

View File

@@ -0,0 +1,736 @@
// Copyright (C) 2014 The Protocol Authors.
package protocol
import (
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"io"
"sync"
"time"
lz4 "github.com/bkaradzic/go-lz4"
)
const (
BlockSize = 128 * 1024
)
const (
messageTypeClusterConfig = 0
messageTypeIndex = 1
messageTypeRequest = 2
messageTypeResponse = 3
messageTypePing = 4
messageTypePong = 5
messageTypeIndexUpdate = 6
messageTypeClose = 7
)
const (
stateInitial = iota
stateCCRcvd
stateIdxRcvd
)
// FileInfo flags
const (
FlagDeleted uint32 = 1 << 12
FlagInvalid = 1 << 13
FlagDirectory = 1 << 14
FlagNoPermBits = 1 << 15
FlagSymlink = 1 << 16
FlagSymlinkMissingTarget = 1 << 17
FlagsAll = (1 << 18) - 1
SymlinkTypeMask = FlagDirectory | FlagSymlinkMissingTarget
)
// IndexMessage message flags (for IndexUpdate)
const (
FlagIndexTemporary uint32 = 1 << iota
)
// Request message flags
const (
FlagRequestTemporary uint32 = 1 << iota
)
// ClusterConfigMessage.Folders.Devices flags
const (
FlagShareTrusted uint32 = 1 << 0
FlagShareReadOnly = 1 << 1
FlagIntroducer = 1 << 2
FlagShareBits = 0x000000ff
)
var (
ErrClusterHash = fmt.Errorf("configuration error: mismatched cluster hash")
ErrClosed = errors.New("connection closed")
)
// Specific variants of empty messages...
type pingMessage struct{ EmptyMessage }
type pongMessage struct{ EmptyMessage }
type Model interface {
// An index was received from the peer device
Index(deviceID DeviceID, folder string, files []FileInfo, flags uint32, options []Option)
// An index update was received from the peer device
IndexUpdate(deviceID DeviceID, folder string, files []FileInfo, flags uint32, options []Option)
// A request was made by the peer device
Request(deviceID DeviceID, folder string, name string, offset int64, size int, hash []byte, flags uint32, options []Option) ([]byte, error)
// A cluster configuration message was received
ClusterConfig(deviceID DeviceID, config ClusterConfigMessage)
// The peer device closed the connection
Close(deviceID DeviceID, err error)
}
type Connection interface {
ID() DeviceID
Name() string
Index(folder string, files []FileInfo, flags uint32, options []Option) error
IndexUpdate(folder string, files []FileInfo, flags uint32, options []Option) error
Request(folder string, name string, offset int64, size int, hash []byte, flags uint32, options []Option) ([]byte, error)
ClusterConfig(config ClusterConfigMessage)
Statistics() Statistics
}
type rawConnection struct {
id DeviceID
name string
receiver Model
state int
cr *countingReader
cw *countingWriter
awaiting [4096]chan asyncResult
awaitingMut sync.Mutex
idxMut sync.Mutex // ensures serialization of Index calls
nextID chan int
outbox chan hdrMsg
closed chan struct{}
once sync.Once
compression Compression
rdbuf0 []byte // used & reused by readMessage
rdbuf1 []byte // used & reused by readMessage
}
type asyncResult struct {
val []byte
err error
}
type hdrMsg struct {
hdr header
msg encodable
}
type encodable interface {
AppendXDR([]byte) ([]byte, error)
}
type isEofer interface {
IsEOF() bool
}
const (
pingTimeout = 30 * time.Second
pingIdleTime = 60 * time.Second
)
func NewConnection(deviceID DeviceID, reader io.Reader, writer io.Writer, receiver Model, name string, compress Compression) Connection {
cr := &countingReader{Reader: reader}
cw := &countingWriter{Writer: writer}
c := rawConnection{
id: deviceID,
name: name,
receiver: nativeModel{receiver},
state: stateInitial,
cr: cr,
cw: cw,
outbox: make(chan hdrMsg),
nextID: make(chan int),
closed: make(chan struct{}),
compression: compress,
}
go c.readerLoop()
go c.writerLoop()
go c.pingerLoop()
go c.idGenerator()
return wireFormatConnection{&c}
}
func (c *rawConnection) ID() DeviceID {
return c.id
}
func (c *rawConnection) Name() string {
return c.name
}
// Index writes the list of file information to the connected peer device
func (c *rawConnection) Index(folder string, idx []FileInfo, flags uint32, options []Option) error {
select {
case <-c.closed:
return ErrClosed
default:
}
c.idxMut.Lock()
c.send(-1, messageTypeIndex, IndexMessage{
Folder: folder,
Files: idx,
Flags: flags,
Options: options,
})
c.idxMut.Unlock()
return nil
}
// IndexUpdate writes the list of file information to the connected peer device as an update
func (c *rawConnection) IndexUpdate(folder string, idx []FileInfo, flags uint32, options []Option) error {
select {
case <-c.closed:
return ErrClosed
default:
}
c.idxMut.Lock()
c.send(-1, messageTypeIndexUpdate, IndexMessage{
Folder: folder,
Files: idx,
Flags: flags,
Options: options,
})
c.idxMut.Unlock()
return nil
}
// Request returns the bytes for the specified block after fetching them from the connected peer.
func (c *rawConnection) Request(folder string, name string, offset int64, size int, hash []byte, flags uint32, options []Option) ([]byte, error) {
var id int
select {
case id = <-c.nextID:
case <-c.closed:
return nil, ErrClosed
}
c.awaitingMut.Lock()
if ch := c.awaiting[id]; ch != nil {
panic("id taken")
}
rc := make(chan asyncResult, 1)
c.awaiting[id] = rc
c.awaitingMut.Unlock()
ok := c.send(id, messageTypeRequest, RequestMessage{
Folder: folder,
Name: name,
Offset: offset,
Size: int32(size),
Hash: hash,
Flags: flags,
Options: options,
})
if !ok {
return nil, ErrClosed
}
res, ok := <-rc
if !ok {
return nil, ErrClosed
}
return res.val, res.err
}
// ClusterConfig send the cluster configuration message to the peer and returns any error
func (c *rawConnection) ClusterConfig(config ClusterConfigMessage) {
c.send(-1, messageTypeClusterConfig, config)
}
func (c *rawConnection) ping() bool {
var id int
select {
case id = <-c.nextID:
case <-c.closed:
return false
}
rc := make(chan asyncResult, 1)
c.awaitingMut.Lock()
c.awaiting[id] = rc
c.awaitingMut.Unlock()
ok := c.send(id, messageTypePing, nil)
if !ok {
return false
}
res, ok := <-rc
return ok && res.err == nil
}
func (c *rawConnection) readerLoop() (err error) {
defer func() {
c.close(err)
}()
for {
select {
case <-c.closed:
return ErrClosed
default:
}
hdr, msg, err := c.readMessage()
if err != nil {
return err
}
switch msg := msg.(type) {
case IndexMessage:
switch hdr.msgType {
case messageTypeIndex:
if c.state < stateCCRcvd {
return fmt.Errorf("protocol error: index message in state %d", c.state)
}
c.handleIndex(msg)
c.state = stateIdxRcvd
case messageTypeIndexUpdate:
if c.state < stateIdxRcvd {
return fmt.Errorf("protocol error: index update message in state %d", c.state)
}
c.handleIndexUpdate(msg)
}
case RequestMessage:
if c.state < stateIdxRcvd {
return fmt.Errorf("protocol error: request message in state %d", c.state)
}
// Requests are handled asynchronously
go c.handleRequest(hdr.msgID, msg)
case ResponseMessage:
if c.state < stateIdxRcvd {
return fmt.Errorf("protocol error: response message in state %d", c.state)
}
c.handleResponse(hdr.msgID, msg)
case pingMessage:
c.send(hdr.msgID, messageTypePong, pongMessage{})
case pongMessage:
c.handlePong(hdr.msgID)
case ClusterConfigMessage:
if c.state != stateInitial {
return fmt.Errorf("protocol error: cluster config message in state %d", c.state)
}
go c.receiver.ClusterConfig(c.id, msg)
c.state = stateCCRcvd
case CloseMessage:
return errors.New(msg.Reason)
default:
return fmt.Errorf("protocol error: %s: unknown message type %#x", c.id, hdr.msgType)
}
}
}
func (c *rawConnection) readMessage() (hdr header, msg encodable, err error) {
if cap(c.rdbuf0) < 8 {
c.rdbuf0 = make([]byte, 8)
} else {
c.rdbuf0 = c.rdbuf0[:8]
}
_, err = io.ReadFull(c.cr, c.rdbuf0)
if err != nil {
return
}
hdr = decodeHeader(binary.BigEndian.Uint32(c.rdbuf0[0:4]))
msglen := int(binary.BigEndian.Uint32(c.rdbuf0[4:8]))
if debug {
l.Debugf("read header %v (msglen=%d)", hdr, msglen)
}
if hdr.version != 0 {
err = fmt.Errorf("unknown protocol version 0x%x", hdr.version)
return
}
if cap(c.rdbuf0) < msglen {
c.rdbuf0 = make([]byte, msglen)
} else {
c.rdbuf0 = c.rdbuf0[:msglen]
}
_, err = io.ReadFull(c.cr, c.rdbuf0)
if err != nil {
return
}
if debug {
l.Debugf("read %d bytes", len(c.rdbuf0))
}
msgBuf := c.rdbuf0
if hdr.compression {
c.rdbuf1 = c.rdbuf1[:cap(c.rdbuf1)]
c.rdbuf1, err = lz4.Decode(c.rdbuf1, c.rdbuf0)
if err != nil {
return
}
msgBuf = c.rdbuf1
if debug {
l.Debugf("decompressed to %d bytes", len(msgBuf))
}
}
if debug {
if len(msgBuf) > 1024 {
l.Debugf("message data:\n%s", hex.Dump(msgBuf[:1024]))
} else {
l.Debugf("message data:\n%s", hex.Dump(msgBuf))
}
}
// We check each returned error for the XDRError.IsEOF() method.
// IsEOF()==true here means that the message contained fewer fields than
// expected. It does not signify an EOF on the socket, because we've
// successfully read a size value and that many bytes already. New fields
// we expected but the other peer didn't send should be interpreted as
// zero/nil, and if that's not valid we'll verify it somewhere else.
switch hdr.msgType {
case messageTypeIndex, messageTypeIndexUpdate:
var idx IndexMessage
err = idx.UnmarshalXDR(msgBuf)
if xdrErr, ok := err.(isEofer); ok && xdrErr.IsEOF() {
err = nil
}
msg = idx
case messageTypeRequest:
var req RequestMessage
err = req.UnmarshalXDR(msgBuf)
if xdrErr, ok := err.(isEofer); ok && xdrErr.IsEOF() {
err = nil
}
msg = req
case messageTypeResponse:
var resp ResponseMessage
err = resp.UnmarshalXDR(msgBuf)
if xdrErr, ok := err.(isEofer); ok && xdrErr.IsEOF() {
err = nil
}
msg = resp
case messageTypePing:
msg = pingMessage{}
case messageTypePong:
msg = pongMessage{}
case messageTypeClusterConfig:
var cc ClusterConfigMessage
err = cc.UnmarshalXDR(msgBuf)
if xdrErr, ok := err.(isEofer); ok && xdrErr.IsEOF() {
err = nil
}
msg = cc
case messageTypeClose:
var cm CloseMessage
err = cm.UnmarshalXDR(msgBuf)
if xdrErr, ok := err.(isEofer); ok && xdrErr.IsEOF() {
err = nil
}
msg = cm
default:
err = fmt.Errorf("protocol error: %s: unknown message type %#x", c.id, hdr.msgType)
}
return
}
func (c *rawConnection) handleIndex(im IndexMessage) {
if debug {
l.Debugf("Index(%v, %v, %d file, flags %x, opts: %s)", c.id, im.Folder, len(im.Files), im.Flags, im.Options)
}
c.receiver.Index(c.id, im.Folder, filterIndexMessageFiles(im.Files), im.Flags, im.Options)
}
func (c *rawConnection) handleIndexUpdate(im IndexMessage) {
if debug {
l.Debugf("queueing IndexUpdate(%v, %v, %d files, flags %x, opts: %s)", c.id, im.Folder, len(im.Files), im.Flags, im.Options)
}
c.receiver.IndexUpdate(c.id, im.Folder, filterIndexMessageFiles(im.Files), im.Flags, im.Options)
}
func filterIndexMessageFiles(fs []FileInfo) []FileInfo {
var out []FileInfo
for i, f := range fs {
switch f.Name {
case "", ".", "..", "/": // A few obviously invalid filenames
l.Infof("Dropping invalid filename %q from incoming index", f.Name)
if out == nil {
// Most incoming updates won't contain anything invalid, so we
// delay the allocation and copy to output slice until we
// really need to do it, then copy all the so var valid files
// to it.
out = make([]FileInfo, i, len(fs)-1)
copy(out, fs)
}
default:
if out != nil {
out = append(out, f)
}
}
}
if out != nil {
return out
}
return fs
}
func (c *rawConnection) handleRequest(msgID int, req RequestMessage) {
data, err := c.receiver.Request(c.id, req.Folder, req.Name, int64(req.Offset), int(req.Size), req.Hash, req.Flags, req.Options)
c.send(msgID, messageTypeResponse, ResponseMessage{
Data: data,
Code: errorToCode(err),
})
}
func (c *rawConnection) handleResponse(msgID int, resp ResponseMessage) {
c.awaitingMut.Lock()
if rc := c.awaiting[msgID]; rc != nil {
c.awaiting[msgID] = nil
rc <- asyncResult{resp.Data, codeToError(resp.Code)}
close(rc)
}
c.awaitingMut.Unlock()
}
func (c *rawConnection) handlePong(msgID int) {
c.awaitingMut.Lock()
if rc := c.awaiting[msgID]; rc != nil {
c.awaiting[msgID] = nil
rc <- asyncResult{}
close(rc)
}
c.awaitingMut.Unlock()
}
func (c *rawConnection) send(msgID int, msgType int, msg encodable) bool {
if msgID < 0 {
select {
case id := <-c.nextID:
msgID = id
case <-c.closed:
return false
}
}
hdr := header{
version: 0,
msgID: msgID,
msgType: msgType,
}
select {
case c.outbox <- hdrMsg{hdr, msg}:
return true
case <-c.closed:
return false
}
}
func (c *rawConnection) writerLoop() {
var msgBuf = make([]byte, 8) // buffer for wire format message, kept and reused
var uncBuf []byte // buffer for uncompressed message, kept and reused
for {
var tempBuf []byte
var err error
select {
case hm := <-c.outbox:
if hm.msg != nil {
// Uncompressed message in uncBuf
uncBuf, err = hm.msg.AppendXDR(uncBuf[:0])
if err != nil {
c.close(err)
return
}
compress := false
switch c.compression {
case CompressAlways:
compress = true
case CompressMetadata:
compress = hm.hdr.msgType != messageTypeResponse
}
if compress && len(uncBuf) >= compressionThreshold {
// Use compression for large messages
hm.hdr.compression = true
// Make sure we have enough space for the compressed message plus header in msgBug
msgBuf = msgBuf[:cap(msgBuf)]
if maxLen := lz4.CompressBound(len(uncBuf)) + 8; maxLen > len(msgBuf) {
msgBuf = make([]byte, maxLen)
}
// Compressed is written to msgBuf, we keep tb for the length only
tempBuf, err = lz4.Encode(msgBuf[8:], uncBuf)
binary.BigEndian.PutUint32(msgBuf[4:8], uint32(len(tempBuf)))
msgBuf = msgBuf[0 : len(tempBuf)+8]
if debug {
l.Debugf("write compressed message; %v (len=%d)", hm.hdr, len(tempBuf))
}
} else {
// No point in compressing very short messages
hm.hdr.compression = false
msgBuf = msgBuf[:cap(msgBuf)]
if l := len(uncBuf) + 8; l > len(msgBuf) {
msgBuf = make([]byte, l)
}
binary.BigEndian.PutUint32(msgBuf[4:8], uint32(len(uncBuf)))
msgBuf = msgBuf[0 : len(uncBuf)+8]
copy(msgBuf[8:], uncBuf)
if debug {
l.Debugf("write uncompressed message; %v (len=%d)", hm.hdr, len(uncBuf))
}
}
} else {
if debug {
l.Debugf("write empty message; %v", hm.hdr)
}
binary.BigEndian.PutUint32(msgBuf[4:8], 0)
msgBuf = msgBuf[:8]
}
binary.BigEndian.PutUint32(msgBuf[0:4], encodeHeader(hm.hdr))
if err == nil {
var n int
n, err = c.cw.Write(msgBuf)
if debug {
l.Debugf("wrote %d bytes on the wire", n)
}
}
if err != nil {
c.close(err)
return
}
case <-c.closed:
return
}
}
}
func (c *rawConnection) close(err error) {
c.once.Do(func() {
close(c.closed)
c.awaitingMut.Lock()
for i, ch := range c.awaiting {
if ch != nil {
close(ch)
c.awaiting[i] = nil
}
}
c.awaitingMut.Unlock()
go c.receiver.Close(c.id, err)
})
}
func (c *rawConnection) idGenerator() {
nextID := 0
for {
nextID = (nextID + 1) & 0xfff
select {
case c.nextID <- nextID:
case <-c.closed:
return
}
}
}
func (c *rawConnection) pingerLoop() {
var rc = make(chan bool, 1)
ticker := time.Tick(pingIdleTime / 2)
for {
select {
case <-ticker:
if d := time.Since(c.cr.Last()); d < pingIdleTime {
if debug {
l.Debugln(c.id, "ping skipped after rd", d)
}
continue
}
if d := time.Since(c.cw.Last()); d < pingIdleTime {
if debug {
l.Debugln(c.id, "ping skipped after wr", d)
}
continue
}
go func() {
if debug {
l.Debugln(c.id, "ping ->")
}
rc <- c.ping()
}()
select {
case ok := <-rc:
if debug {
l.Debugln(c.id, "<- pong")
}
if !ok {
c.close(fmt.Errorf("ping failure"))
}
case <-time.After(pingTimeout):
c.close(fmt.Errorf("ping timeout"))
case <-c.closed:
return
}
case <-c.closed:
return
}
}
}
type Statistics struct {
At time.Time
InBytesTotal int64
OutBytesTotal int64
}
func (c *rawConnection) Statistics() Statistics {
return Statistics{
At: time.Now(),
InBytesTotal: c.cr.Tot(),
OutBytesTotal: c.cw.Tot(),
}
}

View File

@@ -0,0 +1,382 @@
// Copyright (C) 2014 The Protocol Authors.
package protocol
import (
"bytes"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"reflect"
"strings"
"testing"
"testing/quick"
"github.com/calmh/xdr"
)
var (
c0ID = NewDeviceID([]byte{1})
c1ID = NewDeviceID([]byte{2})
)
func TestHeaderFunctions(t *testing.T) {
f := func(ver, id, typ int) bool {
ver = int(uint(ver) % 16)
id = int(uint(id) % 4096)
typ = int(uint(typ) % 256)
h0 := header{version: ver, msgID: id, msgType: typ}
h1 := decodeHeader(encodeHeader(h0))
return h0 == h1
}
if err := quick.Check(f, nil); err != nil {
t.Error(err)
}
}
func TestHeaderLayout(t *testing.T) {
var e, a uint32
// Version are the first four bits
e = 0xf0000000
a = encodeHeader(header{version: 0xf})
if a != e {
t.Errorf("Header layout incorrect; %08x != %08x", a, e)
}
// Message ID are the following 12 bits
e = 0x0fff0000
a = encodeHeader(header{msgID: 0xfff})
if a != e {
t.Errorf("Header layout incorrect; %08x != %08x", a, e)
}
// Type are the last 8 bits before reserved
e = 0x0000ff00
a = encodeHeader(header{msgType: 0xff})
if a != e {
t.Errorf("Header layout incorrect; %08x != %08x", a, e)
}
}
func TestPing(t *testing.T) {
ar, aw := io.Pipe()
br, bw := io.Pipe()
c0 := NewConnection(c0ID, ar, bw, nil, "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
c1 := NewConnection(c1ID, br, aw, nil, "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
if ok := c0.ping(); !ok {
t.Error("c0 ping failed")
}
if ok := c1.ping(); !ok {
t.Error("c1 ping failed")
}
}
func TestPingErr(t *testing.T) {
e := errors.New("something broke")
for i := 0; i < 16; i++ {
for j := 0; j < 16; j++ {
m0 := newTestModel()
m1 := newTestModel()
ar, aw := io.Pipe()
br, bw := io.Pipe()
eaw := &ErrPipe{PipeWriter: *aw, max: i, err: e}
ebw := &ErrPipe{PipeWriter: *bw, max: j, err: e}
c0 := NewConnection(c0ID, ar, ebw, m0, "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
NewConnection(c1ID, br, eaw, m1, "name", CompressAlways)
res := c0.ping()
if (i < 8 || j < 8) && res {
t.Errorf("Unexpected ping success; i=%d, j=%d", i, j)
} else if (i >= 12 && j >= 12) && !res {
t.Errorf("Unexpected ping fail; i=%d, j=%d", i, j)
}
}
}
}
// func TestRequestResponseErr(t *testing.T) {
// e := errors.New("something broke")
// var pass bool
// for i := 0; i < 48; i++ {
// for j := 0; j < 38; j++ {
// m0 := newTestModel()
// m0.data = []byte("response data")
// m1 := newTestModel()
// ar, aw := io.Pipe()
// br, bw := io.Pipe()
// eaw := &ErrPipe{PipeWriter: *aw, max: i, err: e}
// ebw := &ErrPipe{PipeWriter: *bw, max: j, err: e}
// NewConnection(c0ID, ar, ebw, m0, nil)
// c1 := NewConnection(c1ID, br, eaw, m1, nil).(wireFormatConnection).next.(*rawConnection)
// d, err := c1.Request("default", "tn", 1234, 5678)
// if err == e || err == ErrClosed {
// t.Logf("Error at %d+%d bytes", i, j)
// if !m1.isClosed() {
// t.Fatal("c1 not closed")
// }
// if !m0.isClosed() {
// t.Fatal("c0 not closed")
// }
// continue
// }
// if err != nil {
// t.Fatal(err)
// }
// if string(d) != "response data" {
// t.Fatalf("Incorrect response data %q", string(d))
// }
// if m0.folder != "default" {
// t.Fatalf("Incorrect folder %q", m0.folder)
// }
// if m0.name != "tn" {
// t.Fatalf("Incorrect name %q", m0.name)
// }
// if m0.offset != 1234 {
// t.Fatalf("Incorrect offset %d", m0.offset)
// }
// if m0.size != 5678 {
// t.Fatalf("Incorrect size %d", m0.size)
// }
// t.Logf("Pass at %d+%d bytes", i, j)
// pass = true
// }
// }
// if !pass {
// t.Fatal("Never passed")
// }
// }
func TestVersionErr(t *testing.T) {
m0 := newTestModel()
m1 := newTestModel()
ar, aw := io.Pipe()
br, bw := io.Pipe()
c0 := NewConnection(c0ID, ar, bw, m0, "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
NewConnection(c1ID, br, aw, m1, "name", CompressAlways)
w := xdr.NewWriter(c0.cw)
w.WriteUint32(encodeHeader(header{
version: 2,
msgID: 0,
msgType: 0,
}))
w.WriteUint32(0) // Avoids reader closing due to EOF
if !m1.isClosed() {
t.Error("Connection should close due to unknown version")
}
}
func TestTypeErr(t *testing.T) {
m0 := newTestModel()
m1 := newTestModel()
ar, aw := io.Pipe()
br, bw := io.Pipe()
c0 := NewConnection(c0ID, ar, bw, m0, "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
NewConnection(c1ID, br, aw, m1, "name", CompressAlways)
w := xdr.NewWriter(c0.cw)
w.WriteUint32(encodeHeader(header{
version: 0,
msgID: 0,
msgType: 42,
}))
w.WriteUint32(0) // Avoids reader closing due to EOF
if !m1.isClosed() {
t.Error("Connection should close due to unknown message type")
}
}
func TestClose(t *testing.T) {
m0 := newTestModel()
m1 := newTestModel()
ar, aw := io.Pipe()
br, bw := io.Pipe()
c0 := NewConnection(c0ID, ar, bw, m0, "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
NewConnection(c1ID, br, aw, m1, "name", CompressAlways)
c0.close(nil)
<-c0.closed
if !m0.isClosed() {
t.Fatal("Connection should be closed")
}
// None of these should panic, some should return an error
if c0.ping() {
t.Error("Ping should not return true")
}
c0.Index("default", nil, 0, nil)
c0.Index("default", nil, 0, nil)
if _, err := c0.Request("default", "foo", 0, 0, nil, 0, nil); err == nil {
t.Error("Request should return an error")
}
}
func TestElementSizeExceededNested(t *testing.T) {
m := ClusterConfigMessage{
Folders: []Folder{
{ID: "longstringlongstringlongstringinglongstringlongstringlonlongstringlongstringlon"},
},
}
_, err := m.EncodeXDR(ioutil.Discard)
if err == nil {
t.Errorf("ID length %d > max 64, but no error", len(m.Folders[0].ID))
}
}
func TestMarshalIndexMessage(t *testing.T) {
var quickCfg = &quick.Config{MaxCountScale: 10}
if testing.Short() {
quickCfg = nil
}
f := func(m1 IndexMessage) bool {
for _, f := range m1.Files {
for i := range f.Blocks {
f.Blocks[i].Offset = 0
if len(f.Blocks[i].Hash) == 0 {
f.Blocks[i].Hash = nil
}
}
}
return testMarshal(t, "index", &m1, &IndexMessage{})
}
if err := quick.Check(f, quickCfg); err != nil {
t.Error(err)
}
}
func TestMarshalRequestMessage(t *testing.T) {
var quickCfg = &quick.Config{MaxCountScale: 10}
if testing.Short() {
quickCfg = nil
}
f := func(m1 RequestMessage) bool {
return testMarshal(t, "request", &m1, &RequestMessage{})
}
if err := quick.Check(f, quickCfg); err != nil {
t.Error(err)
}
}
func TestMarshalResponseMessage(t *testing.T) {
var quickCfg = &quick.Config{MaxCountScale: 10}
if testing.Short() {
quickCfg = nil
}
f := func(m1 ResponseMessage) bool {
if len(m1.Data) == 0 {
m1.Data = nil
}
return testMarshal(t, "response", &m1, &ResponseMessage{})
}
if err := quick.Check(f, quickCfg); err != nil {
t.Error(err)
}
}
func TestMarshalClusterConfigMessage(t *testing.T) {
var quickCfg = &quick.Config{MaxCountScale: 10}
if testing.Short() {
quickCfg = nil
}
f := func(m1 ClusterConfigMessage) bool {
return testMarshal(t, "clusterconfig", &m1, &ClusterConfigMessage{})
}
if err := quick.Check(f, quickCfg); err != nil {
t.Error(err)
}
}
func TestMarshalCloseMessage(t *testing.T) {
var quickCfg = &quick.Config{MaxCountScale: 10}
if testing.Short() {
quickCfg = nil
}
f := func(m1 CloseMessage) bool {
return testMarshal(t, "close", &m1, &CloseMessage{})
}
if err := quick.Check(f, quickCfg); err != nil {
t.Error(err)
}
}
type message interface {
EncodeXDR(io.Writer) (int, error)
DecodeXDR(io.Reader) error
}
func testMarshal(t *testing.T, prefix string, m1, m2 message) bool {
var buf bytes.Buffer
failed := func(bc []byte) {
bs, _ := json.MarshalIndent(m1, "", " ")
ioutil.WriteFile(prefix+"-1.txt", bs, 0644)
bs, _ = json.MarshalIndent(m2, "", " ")
ioutil.WriteFile(prefix+"-2.txt", bs, 0644)
if len(bc) > 0 {
f, _ := os.Create(prefix + "-data.txt")
fmt.Fprint(f, hex.Dump(bc))
f.Close()
}
}
_, err := m1.EncodeXDR(&buf)
if err != nil && strings.Contains(err.Error(), "exceeds size") {
return true
}
if err != nil {
failed(nil)
t.Fatal(err)
}
bc := make([]byte, len(buf.Bytes()))
copy(bc, buf.Bytes())
err = m2.DecodeXDR(&buf)
if err != nil {
failed(bc)
t.Fatal(err)
}
ok := reflect.DeepEqual(m1, m2)
if !ok {
failed(bc)
}
return ok
}

View File

@@ -0,0 +1,115 @@
// Copyright (C) 2015 The Protocol Authors.
package protocol
// The Vector type represents a version vector. The zero value is a usable
// version vector. The vector has slice semantics and some operations on it
// are "append-like" in that they may return the same vector modified, or a
// new allocated Vector with the modified contents.
type Vector []Counter
// Counter represents a single counter in the version vector.
type Counter struct {
ID uint64
Value uint64
}
// Update returns a Vector with the index for the specific ID incremented by
// one. If it is possible, the vector v is updated and returned. If it is not,
// a copy will be created, updated and returned.
func (v Vector) Update(ID uint64) Vector {
for i := range v {
if v[i].ID == ID {
// Update an existing index
v[i].Value++
return v
} else if v[i].ID > ID {
// Insert a new index
nv := make(Vector, len(v)+1)
copy(nv, v[:i])
nv[i].ID = ID
nv[i].Value = 1
copy(nv[i+1:], v[i:])
return nv
}
}
// Append a new new index
return append(v, Counter{ID, 1})
}
// Merge returns the vector containing the maximum indexes from a and b. If it
// is possible, the vector a is updated and returned. If it is not, a copy
// will be created, updated and returned.
func (a Vector) Merge(b Vector) Vector {
var ai, bi int
for bi < len(b) {
if ai == len(a) {
// We've reach the end of a, all that remains are appends
return append(a, b[bi:]...)
}
if a[ai].ID > b[bi].ID {
// The index from b should be inserted here
n := make(Vector, len(a)+1)
copy(n, a[:ai])
n[ai] = b[bi]
copy(n[ai+1:], a[ai:])
a = n
}
if a[ai].ID == b[bi].ID {
if v := b[bi].Value; v > a[ai].Value {
a[ai].Value = v
}
}
if bi < len(b) && a[ai].ID == b[bi].ID {
bi++
}
ai++
}
return a
}
// Copy returns an identical vector that is not shared with v.
func (v Vector) Copy() Vector {
nv := make(Vector, len(v))
copy(nv, v)
return nv
}
// Equal returns true when the two vectors are equivalent.
func (a Vector) Equal(b Vector) bool {
return a.Compare(b) == Equal
}
// LesserEqual returns true when the two vectors are equivalent or a is Lesser
// than b.
func (a Vector) LesserEqual(b Vector) bool {
comp := a.Compare(b)
return comp == Lesser || comp == Equal
}
// LesserEqual returns true when the two vectors are equivalent or a is Greater
// than b.
func (a Vector) GreaterEqual(b Vector) bool {
comp := a.Compare(b)
return comp == Greater || comp == Equal
}
// Concurrent returns true when the two vectors are concrurrent.
func (a Vector) Concurrent(b Vector) bool {
comp := a.Compare(b)
return comp == ConcurrentGreater || comp == ConcurrentLesser
}
// Counter returns the current value of the given counter ID.
func (v Vector) Counter(id uint64) uint64 {
for _, c := range v {
if c.ID == id {
return c.Value
}
}
return 0
}

View File

@@ -0,0 +1,89 @@
// Copyright (C) 2015 The Protocol Authors.
package protocol
// Ordering represents the relationship between two Vectors.
type Ordering int
const (
Equal Ordering = iota
Greater
Lesser
ConcurrentLesser
ConcurrentGreater
)
// There's really no such thing as "concurrent lesser" and "concurrent
// greater" in version vectors, just "concurrent". But it's useful to be able
// to get a strict ordering between versions for stable sorts and so on, so we
// return both variants. The convenience method Concurrent() can be used to
// check for either case.
// Compare returns the Ordering that describes a's relation to b.
func (a Vector) Compare(b Vector) Ordering {
var ai, bi int // index into a and b
var av, bv Counter // value at current index
result := Equal
for ai < len(a) || bi < len(b) {
var aMissing, bMissing bool
if ai < len(a) {
av = a[ai]
} else {
av = Counter{}
aMissing = true
}
if bi < len(b) {
bv = b[bi]
} else {
bv = Counter{}
bMissing = true
}
switch {
case av.ID == bv.ID:
// We have a counter value for each side
if av.Value > bv.Value {
if result == Lesser {
return ConcurrentLesser
}
result = Greater
} else if av.Value < bv.Value {
if result == Greater {
return ConcurrentGreater
}
result = Lesser
}
case !aMissing && av.ID < bv.ID || bMissing:
// Value is missing on the b side
if av.Value > 0 {
if result == Lesser {
return ConcurrentLesser
}
result = Greater
}
case !bMissing && bv.ID < av.ID || aMissing:
// Value is missing on the a side
if bv.Value > 0 {
if result == Greater {
return ConcurrentGreater
}
result = Lesser
}
}
if ai < len(a) && (av.ID <= bv.ID || bMissing) {
ai++
}
if bi < len(b) && (bv.ID <= av.ID || aMissing) {
bi++
}
}
return result
}

View File

@@ -0,0 +1,249 @@
// Copyright (C) 2015 The Protocol Authors.
package protocol
import (
"math"
"testing"
)
func TestCompare(t *testing.T) {
testcases := []struct {
a, b Vector
r Ordering
}{
// Empty vectors are identical
{Vector{}, Vector{}, Equal},
{Vector{}, nil, Equal},
{nil, Vector{}, Equal},
{nil, Vector{Counter{42, 0}}, Equal},
{Vector{}, Vector{Counter{42, 0}}, Equal},
{Vector{Counter{42, 0}}, nil, Equal},
{Vector{Counter{42, 0}}, Vector{}, Equal},
// Zero is the implied value for a missing Counter
{
Vector{Counter{42, 0}},
Vector{Counter{77, 0}},
Equal,
},
// Equal vectors are equal
{
Vector{Counter{42, 33}},
Vector{Counter{42, 33}},
Equal,
},
{
Vector{Counter{42, 33}, Counter{77, 24}},
Vector{Counter{42, 33}, Counter{77, 24}},
Equal,
},
// These a-vectors are all greater than the b-vector
{
Vector{Counter{42, 1}},
nil,
Greater,
},
{
Vector{Counter{42, 1}},
Vector{},
Greater,
},
{
Vector{Counter{0, 1}},
Vector{Counter{0, 0}},
Greater,
},
{
Vector{Counter{42, 1}},
Vector{Counter{42, 0}},
Greater,
},
{
Vector{Counter{math.MaxUint64, 1}},
Vector{Counter{math.MaxUint64, 0}},
Greater,
},
{
Vector{Counter{0, math.MaxUint64}},
Vector{Counter{0, 0}},
Greater,
},
{
Vector{Counter{42, math.MaxUint64}},
Vector{Counter{42, 0}},
Greater,
},
{
Vector{Counter{math.MaxUint64, math.MaxUint64}},
Vector{Counter{math.MaxUint64, 0}},
Greater,
},
{
Vector{Counter{0, math.MaxUint64}},
Vector{Counter{0, math.MaxUint64 - 1}},
Greater,
},
{
Vector{Counter{42, math.MaxUint64}},
Vector{Counter{42, math.MaxUint64 - 1}},
Greater,
},
{
Vector{Counter{math.MaxUint64, math.MaxUint64}},
Vector{Counter{math.MaxUint64, math.MaxUint64 - 1}},
Greater,
},
{
Vector{Counter{42, 2}},
Vector{Counter{42, 1}},
Greater,
},
{
Vector{Counter{22, 22}, Counter{42, 2}},
Vector{Counter{22, 22}, Counter{42, 1}},
Greater,
},
{
Vector{Counter{42, 2}, Counter{77, 3}},
Vector{Counter{42, 1}, Counter{77, 3}},
Greater,
},
{
Vector{Counter{22, 22}, Counter{42, 2}, Counter{77, 3}},
Vector{Counter{22, 22}, Counter{42, 1}, Counter{77, 3}},
Greater,
},
{
Vector{Counter{22, 23}, Counter{42, 2}, Counter{77, 4}},
Vector{Counter{22, 22}, Counter{42, 1}, Counter{77, 3}},
Greater,
},
// These a-vectors are all lesser than the b-vector
{nil, Vector{Counter{42, 1}}, Lesser},
{Vector{}, Vector{Counter{42, 1}}, Lesser},
{
Vector{Counter{42, 0}},
Vector{Counter{42, 1}},
Lesser,
},
{
Vector{Counter{42, 1}},
Vector{Counter{42, 2}},
Lesser,
},
{
Vector{Counter{22, 22}, Counter{42, 1}},
Vector{Counter{22, 22}, Counter{42, 2}},
Lesser,
},
{
Vector{Counter{42, 1}, Counter{77, 3}},
Vector{Counter{42, 2}, Counter{77, 3}},
Lesser,
},
{
Vector{Counter{22, 22}, Counter{42, 1}, Counter{77, 3}},
Vector{Counter{22, 22}, Counter{42, 2}, Counter{77, 3}},
Lesser,
},
{
Vector{Counter{22, 22}, Counter{42, 1}, Counter{77, 3}},
Vector{Counter{22, 23}, Counter{42, 2}, Counter{77, 4}},
Lesser,
},
// These are all in conflict
{
Vector{Counter{42, 2}},
Vector{Counter{43, 1}},
ConcurrentGreater,
},
{
Vector{Counter{43, 1}},
Vector{Counter{42, 2}},
ConcurrentLesser,
},
{
Vector{Counter{22, 23}, Counter{42, 1}},
Vector{Counter{22, 22}, Counter{42, 2}},
ConcurrentGreater,
},
{
Vector{Counter{22, 21}, Counter{42, 2}},
Vector{Counter{22, 22}, Counter{42, 1}},
ConcurrentLesser,
},
{
Vector{Counter{22, 21}, Counter{42, 2}, Counter{43, 1}},
Vector{Counter{20, 1}, Counter{22, 22}, Counter{42, 1}},
ConcurrentLesser,
},
}
for i, tc := range testcases {
// Test real Compare
if r := tc.a.Compare(tc.b); r != tc.r {
t.Errorf("%d: %+v.Compare(%+v) == %v (expected %v)", i, tc.a, tc.b, r, tc.r)
}
// Test convenience functions
switch tc.r {
case Greater:
if tc.a.Equal(tc.b) {
t.Errorf("%+v == %+v", tc.a, tc.b)
}
if tc.a.Concurrent(tc.b) {
t.Errorf("%+v concurrent %+v", tc.a, tc.b)
}
if !tc.a.GreaterEqual(tc.b) {
t.Errorf("%+v not >= %+v", tc.a, tc.b)
}
if tc.a.LesserEqual(tc.b) {
t.Errorf("%+v <= %+v", tc.a, tc.b)
}
case Lesser:
if tc.a.Concurrent(tc.b) {
t.Errorf("%+v concurrent %+v", tc.a, tc.b)
}
if tc.a.Equal(tc.b) {
t.Errorf("%+v == %+v", tc.a, tc.b)
}
if tc.a.GreaterEqual(tc.b) {
t.Errorf("%+v >= %+v", tc.a, tc.b)
}
if !tc.a.LesserEqual(tc.b) {
t.Errorf("%+v not <= %+v", tc.a, tc.b)
}
case Equal:
if tc.a.Concurrent(tc.b) {
t.Errorf("%+v concurrent %+v", tc.a, tc.b)
}
if !tc.a.Equal(tc.b) {
t.Errorf("%+v not == %+v", tc.a, tc.b)
}
if !tc.a.GreaterEqual(tc.b) {
t.Errorf("%+v not <= %+v", tc.a, tc.b)
}
if !tc.a.LesserEqual(tc.b) {
t.Errorf("%+v not <= %+v", tc.a, tc.b)
}
case ConcurrentLesser, ConcurrentGreater:
if !tc.a.Concurrent(tc.b) {
t.Errorf("%+v not concurrent %+v", tc.a, tc.b)
}
if tc.a.Equal(tc.b) {
t.Errorf("%+v == %+v", tc.a, tc.b)
}
if tc.a.GreaterEqual(tc.b) {
t.Errorf("%+v >= %+v", tc.a, tc.b)
}
if tc.a.LesserEqual(tc.b) {
t.Errorf("%+v <= %+v", tc.a, tc.b)
}
}
}
}

View File

@@ -0,0 +1,134 @@
// Copyright (C) 2015 The Protocol Authors.
package protocol
import "testing"
func TestUpdate(t *testing.T) {
var v Vector
// Append
v = v.Update(42)
expected := Vector{Counter{42, 1}}
if v.Compare(expected) != Equal {
t.Errorf("Update error, %+v != %+v", v, expected)
}
// Insert at front
v = v.Update(36)
expected = Vector{Counter{36, 1}, Counter{42, 1}}
if v.Compare(expected) != Equal {
t.Errorf("Update error, %+v != %+v", v, expected)
}
// Insert in moddle
v = v.Update(37)
expected = Vector{Counter{36, 1}, Counter{37, 1}, Counter{42, 1}}
if v.Compare(expected) != Equal {
t.Errorf("Update error, %+v != %+v", v, expected)
}
// Update existing
v = v.Update(37)
expected = Vector{Counter{36, 1}, Counter{37, 2}, Counter{42, 1}}
if v.Compare(expected) != Equal {
t.Errorf("Update error, %+v != %+v", v, expected)
}
}
func TestCopy(t *testing.T) {
v0 := Vector{Counter{42, 1}}
v1 := v0.Copy()
v1.Update(42)
if v0.Compare(v1) != Lesser {
t.Errorf("Copy error, %+v should be ancestor of %+v", v0, v1)
}
}
func TestMerge(t *testing.T) {
testcases := []struct {
a, b, m Vector
}{
// No-ops
{
Vector{},
Vector{},
Vector{},
},
{
Vector{Counter{22, 1}, Counter{42, 1}},
Vector{Counter{22, 1}, Counter{42, 1}},
Vector{Counter{22, 1}, Counter{42, 1}},
},
// Appends
{
Vector{},
Vector{Counter{22, 1}, Counter{42, 1}},
Vector{Counter{22, 1}, Counter{42, 1}},
},
{
Vector{Counter{22, 1}},
Vector{Counter{42, 1}},
Vector{Counter{22, 1}, Counter{42, 1}},
},
{
Vector{Counter{22, 1}},
Vector{Counter{22, 1}, Counter{42, 1}},
Vector{Counter{22, 1}, Counter{42, 1}},
},
// Insert
{
Vector{Counter{22, 1}, Counter{42, 1}},
Vector{Counter{22, 1}, Counter{23, 2}, Counter{42, 1}},
Vector{Counter{22, 1}, Counter{23, 2}, Counter{42, 1}},
},
{
Vector{Counter{42, 1}},
Vector{Counter{22, 1}},
Vector{Counter{22, 1}, Counter{42, 1}},
},
// Update
{
Vector{Counter{22, 1}, Counter{42, 2}},
Vector{Counter{22, 2}, Counter{42, 1}},
Vector{Counter{22, 2}, Counter{42, 2}},
},
// All of the above
{
Vector{Counter{10, 1}, Counter{20, 2}, Counter{30, 1}},
Vector{Counter{5, 1}, Counter{10, 2}, Counter{15, 1}, Counter{20, 1}, Counter{25, 1}, Counter{35, 1}},
Vector{Counter{5, 1}, Counter{10, 2}, Counter{15, 1}, Counter{20, 2}, Counter{25, 1}, Counter{30, 1}, Counter{35, 1}},
},
}
for i, tc := range testcases {
if m := tc.a.Merge(tc.b); m.Compare(tc.m) != Equal {
t.Errorf("%d: %+v.Merge(%+v) == %+v (expected %+v)", i, tc.a, tc.b, m, tc.m)
}
}
}
func TestCounterValue(t *testing.T) {
v0 := Vector{Counter{42, 1}, Counter{64, 5}}
if v0.Counter(42) != 1 {
t.Error("Counter error, %d != %d", v0.Counter(42), 1)
}
if v0.Counter(64) != 5 {
t.Error("Counter error, %d != %d", v0.Counter(64), 5)
}
if v0.Counter(72) != 0 {
t.Error("Counter error, %d != %d", v0.Counter(72), 0)
}
}

View File

@@ -0,0 +1,38 @@
// Copyright (C) 2015 The Protocol Authors.
package protocol
// This stuff is hacked up manually because genxdr doesn't support 'type
// Vector []Counter' declarations and it was tricky when I tried to add it...
type xdrWriter interface {
WriteUint32(uint32) (int, error)
WriteUint64(uint64) (int, error)
}
type xdrReader interface {
ReadUint32() uint32
ReadUint64() uint64
}
// EncodeXDRInto encodes the vector as an XDR object into the given XDR
// encoder.
func (v Vector) EncodeXDRInto(w xdrWriter) (int, error) {
w.WriteUint32(uint32(len(v)))
for i := range v {
w.WriteUint64(v[i].ID)
w.WriteUint64(v[i].Value)
}
return 4 + 16*len(v), nil
}
// DecodeXDRFrom decodes the XDR objects from the given reader into itself.
func (v *Vector) DecodeXDRFrom(r xdrReader) error {
l := int(r.ReadUint32())
n := make(Vector, l)
for i := range n {
n[i].ID = r.ReadUint64()
n[i].Value = r.ReadUint64()
}
*v = n
return nil
}

View File

@@ -0,0 +1,56 @@
// Copyright (C) 2014 The Protocol Authors.
package protocol
import (
"path/filepath"
"golang.org/x/text/unicode/norm"
)
type wireFormatConnection struct {
next Connection
}
func (c wireFormatConnection) ID() DeviceID {
return c.next.ID()
}
func (c wireFormatConnection) Name() string {
return c.next.Name()
}
func (c wireFormatConnection) Index(folder string, fs []FileInfo, flags uint32, options []Option) error {
var myFs = make([]FileInfo, len(fs))
copy(myFs, fs)
for i := range fs {
myFs[i].Name = norm.NFC.String(filepath.ToSlash(myFs[i].Name))
}
return c.next.Index(folder, myFs, flags, options)
}
func (c wireFormatConnection) IndexUpdate(folder string, fs []FileInfo, flags uint32, options []Option) error {
var myFs = make([]FileInfo, len(fs))
copy(myFs, fs)
for i := range fs {
myFs[i].Name = norm.NFC.String(filepath.ToSlash(myFs[i].Name))
}
return c.next.IndexUpdate(folder, myFs, flags, options)
}
func (c wireFormatConnection) Request(folder, name string, offset int64, size int, hash []byte, flags uint32, options []Option) ([]byte, error) {
name = norm.NFC.String(filepath.ToSlash(name))
return c.next.Request(folder, name, offset, size, hash, flags, options)
}
func (c wireFormatConnection) ClusterConfig(config ClusterConfigMessage) {
c.next.ClusterConfig(config)
}
func (c wireFormatConnection) Statistics() Statistics {
return c.next.Statistics()
}

View File

@@ -12,10 +12,8 @@ import (
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/memdb"
"github.com/syndtr/goleveldb/leveldb/storage"
)
// ErrBatchCorrupted records reason of batch corruption.
type ErrBatchCorrupted struct {
Reason string
}
@@ -25,7 +23,7 @@ func (e *ErrBatchCorrupted) Error() string {
}
func newErrBatchCorrupted(reason string) error {
return errors.NewErrCorrupted(storage.FileDesc{}, &ErrBatchCorrupted{reason})
return errors.NewErrCorrupted(nil, &ErrBatchCorrupted{reason})
}
const (
@@ -33,7 +31,6 @@ const (
batchGrowRec = 3000
)
// BatchReplay wraps basic batch operations.
type BatchReplay interface {
Put(key, value []byte)
Delete(key []byte)
@@ -70,20 +67,20 @@ func (b *Batch) grow(n int) {
}
}
func (b *Batch) appendRec(kt keyType, key, value []byte) {
func (b *Batch) appendRec(kt kType, key, value []byte) {
n := 1 + binary.MaxVarintLen32 + len(key)
if kt == keyTypeVal {
if kt == ktVal {
n += binary.MaxVarintLen32 + len(value)
}
b.grow(n)
off := len(b.data)
data := b.data[:off+n]
data[off] = byte(kt)
off++
off += 1
off += binary.PutUvarint(data[off:], uint64(len(key)))
copy(data[off:], key)
off += len(key)
if kt == keyTypeVal {
if kt == ktVal {
off += binary.PutUvarint(data[off:], uint64(len(value)))
copy(data[off:], value)
off += len(value)
@@ -97,13 +94,13 @@ func (b *Batch) appendRec(kt keyType, key, value []byte) {
// Put appends 'put operation' of the given key/value pair to the batch.
// It is safe to modify the contents of the argument after Put returns.
func (b *Batch) Put(key, value []byte) {
b.appendRec(keyTypeVal, key, value)
b.appendRec(ktVal, key, value)
}
// Delete appends 'delete operation' of the given key to the batch.
// It is safe to modify the contents of the argument after Delete returns.
func (b *Batch) Delete(key []byte) {
b.appendRec(keyTypeDel, key, nil)
b.appendRec(ktDel, key, nil)
}
// Dump dumps batch contents. The returned slice can be loaded into the
@@ -124,14 +121,13 @@ func (b *Batch) Load(data []byte) error {
// Replay replays batch contents.
func (b *Batch) Replay(r BatchReplay) error {
return b.decodeRec(func(i int, kt keyType, key, value []byte) error {
return b.decodeRec(func(i int, kt kType, key, value []byte) {
switch kt {
case keyTypeVal:
case ktVal:
r.Put(key, value)
case keyTypeDel:
case ktDel:
r.Delete(key)
}
return nil
})
}
@@ -158,7 +154,6 @@ func (b *Batch) append(p *Batch) {
b.grow(len(p.data) - batchHdrLen)
b.data = append(b.data, p.data[batchHdrLen:]...)
b.rLen += p.rLen
b.bLen += p.bLen
}
if p.sync {
b.sync = true
@@ -198,19 +193,18 @@ func (b *Batch) decode(prevSeq uint64, data []byte) error {
return nil
}
func (b *Batch) decodeRec(f func(i int, kt keyType, key, value []byte) error) error {
func (b *Batch) decodeRec(f func(i int, kt kType, key, value []byte)) (err error) {
off := batchHdrLen
for i := 0; i < b.rLen; i++ {
if off >= len(b.data) {
return newErrBatchCorrupted("invalid records length")
}
kt := keyType(b.data[off])
if kt > keyTypeVal {
panic(kt)
kt := kType(b.data[off])
if kt > ktVal {
return newErrBatchCorrupted("bad record: invalid type")
}
off++
off += 1
x, n := binary.Uvarint(b.data[off:])
off += n
@@ -220,7 +214,7 @@ func (b *Batch) decodeRec(f func(i int, kt keyType, key, value []byte) error) er
key := b.data[off : off+int(x)]
off += int(x)
var value []byte
if kt == keyTypeVal {
if kt == ktVal {
x, n := binary.Uvarint(b.data[off:])
off += n
if n <= 0 || off+int(x) > len(b.data) {
@@ -230,19 +224,16 @@ func (b *Batch) decodeRec(f func(i int, kt keyType, key, value []byte) error) er
off += int(x)
}
if err := f(i, kt, key, value); err != nil {
return err
}
f(i, kt, key, value)
}
return nil
}
func (b *Batch) memReplay(to *memdb.DB) error {
var ikScratch []byte
return b.decodeRec(func(i int, kt keyType, key, value []byte) error {
ikScratch = makeInternalKey(ikScratch, key, b.seq+uint64(i), kt)
return to.Put(ikScratch, value)
return b.decodeRec(func(i int, kt kType, key, value []byte) {
ikey := newIkey(key, b.seq+uint64(i), kt)
to.Put(ikey, value)
})
}
@@ -254,9 +245,8 @@ func (b *Batch) memDecodeAndReplay(prevSeq uint64, data []byte, to *memdb.DB) er
}
func (b *Batch) revertMemReplay(to *memdb.DB) error {
var ikScratch []byte
return b.decodeRec(func(i int, kt keyType, key, value []byte) error {
ikScratch := makeInternalKey(ikScratch, key, b.seq+uint64(i), kt)
return to.Delete(ikScratch)
return b.decodeRec(func(i int, kt kType, key, value []byte) {
ikey := newIkey(key, b.seq+uint64(i), kt)
to.Delete(ikey)
})
}

View File

@@ -15,7 +15,7 @@ import (
)
type tbRec struct {
kt keyType
kt kType
key, value []byte
}
@@ -24,11 +24,11 @@ type testBatch struct {
}
func (p *testBatch) Put(key, value []byte) {
p.rec = append(p.rec, &tbRec{keyTypeVal, key, value})
p.rec = append(p.rec, &tbRec{ktVal, key, value})
}
func (p *testBatch) Delete(key []byte) {
p.rec = append(p.rec, &tbRec{keyTypeDel, key, nil})
p.rec = append(p.rec, &tbRec{ktDel, key, nil})
}
func compareBatch(t *testing.T, b1, b2 *Batch) {
@@ -55,7 +55,7 @@ func compareBatch(t *testing.T, b1, b2 *Batch) {
if !bytes.Equal(r1.key, r2.key) {
t.Errorf("invalid key on record '%d' want %s, got %s", i, string(r1.key), string(r2.key))
}
if r1.kt == keyTypeVal {
if r1.kt == ktVal {
if !bytes.Equal(r1.value, r2.value) {
t.Errorf("invalid value on record '%d' want %s, got %s", i, string(r1.value), string(r2.value))
}
@@ -100,9 +100,6 @@ func TestBatch_Append(t *testing.T) {
b2b.Put([]byte("bar"), []byte("barvalue"))
b2a.append(b2b)
compareBatch(t, b1, b2a)
if b1.size() != b2a.size() {
t.Fatalf("invalid batch size want %d, got %d", b1.size(), b2a.size())
}
}
func TestBatch_Size(t *testing.T) {

View File

@@ -0,0 +1,58 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// +build !go1.2
package leveldb
import (
"sync/atomic"
"testing"
)
func BenchmarkDBReadConcurrent(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.fill()
p.gc()
defer p.close()
b.ResetTimer()
b.SetBytes(116)
b.RunParallel(func(pb *testing.PB) {
iter := p.newIter()
defer iter.Release()
for pb.Next() && iter.Next() {
}
})
}
func BenchmarkDBReadConcurrent2(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.fill()
p.gc()
defer p.close()
b.ResetTimer()
b.SetBytes(116)
var dir uint32
b.RunParallel(func(pb *testing.PB) {
iter := p.newIter()
defer iter.Release()
if atomic.AddUint32(&dir, 1)%2 == 0 {
for pb.Next() && iter.Next() {
}
} else {
if pb.Next() && iter.Last() {
for pb.Next() && iter.Prev() {
}
}
}
})
}

View File

@@ -13,7 +13,6 @@ import (
"os"
"path/filepath"
"runtime"
"sync/atomic"
"testing"
"github.com/syndtr/goleveldb/leveldb/iterator"
@@ -91,7 +90,7 @@ func openDBBench(b *testing.B, noCompress bool) *dbBench {
ro: &opt.ReadOptions{},
wo: &opt.WriteOptions{},
}
p.stor, err = storage.OpenFile(benchDB, false)
p.stor, err = storage.OpenFile(benchDB)
if err != nil {
b.Fatal("cannot open stor: ", err)
}
@@ -463,47 +462,3 @@ func BenchmarkDBGetRandom(b *testing.B) {
p.gets()
p.close()
}
func BenchmarkDBReadConcurrent(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.fill()
p.gc()
defer p.close()
b.ResetTimer()
b.SetBytes(116)
b.RunParallel(func(pb *testing.PB) {
iter := p.newIter()
defer iter.Release()
for pb.Next() && iter.Next() {
}
})
}
func BenchmarkDBReadConcurrent2(b *testing.B) {
p := openDBBench(b, false)
p.populate(b.N)
p.fill()
p.gc()
defer p.close()
b.ResetTimer()
b.SetBytes(116)
var dir uint32
b.RunParallel(func(pb *testing.PB) {
iter := p.newIter()
defer iter.Release()
if atomic.AddUint32(&dir, 1)%2 == 0 {
for pb.Next() && iter.Next() {
}
} else {
if pb.Next() && iter.Last() {
for pb.Next() && iter.Prev() {
}
}
}
})
}

View File

@@ -4,12 +4,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// +build !go1.2
package cache
import (
"math/rand"
"testing"
"time"
)
func BenchmarkLRUCache(b *testing.B) {

View File

@@ -47,21 +47,17 @@ type Cacher interface {
// so the the Release method will be called once object is released.
type Value interface{}
// NamespaceGetter provides convenient wrapper for namespace.
type NamespaceGetter struct {
type CacheGetter struct {
Cache *Cache
NS uint64
}
// Get simply calls Cache.Get() method.
func (g *NamespaceGetter) Get(key uint64, setFunc func() (size int, value Value)) *Handle {
func (g *CacheGetter) Get(key uint64, setFunc func() (size int, value Value)) *Handle {
return g.Cache.Get(g.NS, key, setFunc)
}
// The hash tables implementation is based on:
// "Dynamic-Sized Nonblocking Hash Tables", by Yujie Liu,
// Kunlong Zhang, and Michael Spear.
// ACM Symposium on Principles of Distributed Computing, Jul 2014.
// "Dynamic-Sized Nonblocking Hash Tables", by Yujie Liu, Kunlong Zhang, and Michael Spear. ACM Symposium on Principles of Distributed Computing, Jul 2014.
const (
mInitialSize = 1 << 4
@@ -614,12 +610,10 @@ func (n *Node) unrefLocked() {
}
}
// Handle is a 'cache handle' of a 'cache node'.
type Handle struct {
n unsafe.Pointer // *Node
}
// Value returns the value of the 'cache node'.
func (h *Handle) Value() Value {
n := (*Node)(atomic.LoadPointer(&h.n))
if n != nil {
@@ -628,8 +622,6 @@ func (h *Handle) Value() Value {
return nil
}
// Release releases this 'cache handle'.
// It is safe to call release multiple times.
func (h *Handle) Release() {
nPtr := atomic.LoadPointer(&h.n)
if nPtr != nil && atomic.CompareAndSwapPointer(&h.n, nPtr, nil) {

View File

@@ -45,8 +45,9 @@ func set(c *Cache, ns, key uint64, value Value, charge int, relf func()) *Handle
return c.Get(ns, key, func() (int, Value) {
if relf != nil {
return charge, releaserFunc{relf, value}
} else {
return charge, value
}
return charge, value
})
}

View File

@@ -33,9 +33,9 @@ func (icmp *iComparer) Name() string {
}
func (icmp *iComparer) Compare(a, b []byte) int {
x := icmp.ucmp.Compare(internalKey(a).ukey(), internalKey(b).ukey())
x := icmp.ucmp.Compare(iKey(a).ukey(), iKey(b).ukey())
if x == 0 {
if m, n := internalKey(a).num(), internalKey(b).num(); m > n {
if m, n := iKey(a).num(), iKey(b).num(); m > n {
x = -1
} else if m < n {
x = 1
@@ -45,13 +45,13 @@ func (icmp *iComparer) Compare(a, b []byte) int {
}
func (icmp *iComparer) Separator(dst, a, b []byte) []byte {
ua, ub := internalKey(a).ukey(), internalKey(b).ukey()
ua, ub := iKey(a).ukey(), iKey(b).ukey()
dst = icmp.ucmp.Separator(dst, ua, ub)
if dst == nil {
return nil
}
if len(dst) < len(ua) && icmp.uCompare(ua, dst) < 0 {
dst = append(dst, keyMaxNumBytes...)
dst = append(dst, kMaxNumBytes...)
} else {
// Did not close possibilities that n maybe longer than len(ub).
dst = append(dst, a[len(a)-8:]...)
@@ -60,13 +60,13 @@ func (icmp *iComparer) Separator(dst, a, b []byte) []byte {
}
func (icmp *iComparer) Successor(dst, b []byte) []byte {
ub := internalKey(b).ukey()
ub := iKey(b).ukey()
dst = icmp.ucmp.Successor(dst, ub)
if dst == nil {
return nil
}
if len(dst) < len(ub) && icmp.uCompare(ub, dst) < 0 {
dst = append(dst, keyMaxNumBytes...)
dst = append(dst, kMaxNumBytes...)
} else {
// Did not close possibilities that n maybe longer than len(ub).
dst = append(dst, b[len(b)-8:]...)

View File

@@ -9,13 +9,12 @@ package leveldb
import (
"bytes"
"fmt"
"io"
"math/rand"
"testing"
"github.com/syndtr/goleveldb/leveldb/filter"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/storage"
"io"
"math/rand"
"testing"
)
const ctValSize = 1000
@@ -100,17 +99,19 @@ func (h *dbCorruptHarness) corrupt(ft storage.FileType, fi, offset, n int) {
p := &h.dbHarness
t := p.t
fds, _ := p.stor.List(ft)
sortFds(fds)
ff, _ := p.stor.GetFiles(ft)
sff := files(ff)
sff.sort()
if fi < 0 {
fi = len(fds) - 1
fi = len(sff) - 1
}
if fi >= len(fds) {
if fi >= len(sff) {
t.Fatalf("no such file with type %q with index %d", ft, fi)
}
fd := fds[fi]
r, err := h.stor.Open(fd)
file := sff[fi]
r, err := file.Open()
if err != nil {
t.Fatal("cannot open file: ", err)
}
@@ -148,11 +149,11 @@ func (h *dbCorruptHarness) corrupt(ft storage.FileType, fi, offset, n int) {
buf[offset+i] ^= 0x80
}
err = h.stor.Remove(fd)
err = file.Remove()
if err != nil {
t.Fatal("cannot remove old file: ", err)
}
w, err := h.stor.Create(fd)
w, err := file.Create()
if err != nil {
t.Fatal("cannot create new file: ", err)
}
@@ -164,37 +165,25 @@ func (h *dbCorruptHarness) corrupt(ft storage.FileType, fi, offset, n int) {
}
func (h *dbCorruptHarness) removeAll(ft storage.FileType) {
fds, err := h.stor.List(ft)
ff, err := h.stor.GetFiles(ft)
if err != nil {
h.t.Fatal("get files: ", err)
}
for _, fd := range fds {
if err := h.stor.Remove(fd); err != nil {
h.t.Error("remove file: ", err)
}
}
}
func (h *dbCorruptHarness) forceRemoveAll(ft storage.FileType) {
fds, err := h.stor.List(ft)
if err != nil {
h.t.Fatal("get files: ", err)
}
for _, fd := range fds {
if err := h.stor.ForceRemove(fd); err != nil {
for _, f := range ff {
if err := f.Remove(); err != nil {
h.t.Error("remove file: ", err)
}
}
}
func (h *dbCorruptHarness) removeOne(ft storage.FileType) {
fds, err := h.stor.List(ft)
ff, err := h.stor.GetFiles(ft)
if err != nil {
h.t.Fatal("get files: ", err)
}
fd := fds[rand.Intn(len(fds))]
h.t.Logf("removing file @%d", fd.Num)
if err := h.stor.Remove(fd); err != nil {
f := ff[rand.Intn(len(ff))]
h.t.Logf("removing file @%d", f.Num())
if err := f.Remove(); err != nil {
h.t.Error("remove file: ", err)
}
}
@@ -232,7 +221,6 @@ func (h *dbCorruptHarness) check(min, max int) {
func TestCorruptDB_Journal(t *testing.T) {
h := newDbCorruptHarness(t)
defer h.close()
h.build(100)
h.check(100, 100)
@@ -242,11 +230,12 @@ func TestCorruptDB_Journal(t *testing.T) {
h.openDB()
h.check(36, 36)
h.close()
}
func TestCorruptDB_Table(t *testing.T) {
h := newDbCorruptHarness(t)
defer h.close()
h.build(100)
h.compactMem()
@@ -257,11 +246,12 @@ func TestCorruptDB_Table(t *testing.T) {
h.openDB()
h.check(99, 99)
h.close()
}
func TestCorruptDB_TableIndex(t *testing.T) {
h := newDbCorruptHarness(t)
defer h.close()
h.build(10000)
h.compactMem()
@@ -270,6 +260,8 @@ func TestCorruptDB_TableIndex(t *testing.T) {
h.openDB()
h.check(5000, 9999)
h.close()
}
func TestCorruptDB_MissingManifest(t *testing.T) {
@@ -279,7 +271,6 @@ func TestCorruptDB_MissingManifest(t *testing.T) {
Strict: opt.StrictJournalChecksum,
WriteBuffer: 1000 * 60,
})
defer h.close()
h.build(1000)
h.compactMem()
@@ -295,8 +286,10 @@ func TestCorruptDB_MissingManifest(t *testing.T) {
h.compactMem()
h.closeDB()
h.forceRemoveAll(storage.TypeManifest)
h.stor.SetIgnoreOpenErr(storage.TypeManifest)
h.removeAll(storage.TypeManifest)
h.openAssert(false)
h.stor.SetIgnoreOpenErr(0)
h.recover()
h.check(1000, 1000)
@@ -307,11 +300,12 @@ func TestCorruptDB_MissingManifest(t *testing.T) {
h.recover()
h.check(1000, 1000)
h.close()
}
func TestCorruptDB_SequenceNumberRecovery(t *testing.T) {
h := newDbCorruptHarness(t)
defer h.close()
h.put("foo", "v1")
h.put("foo", "v2")
@@ -327,11 +321,12 @@ func TestCorruptDB_SequenceNumberRecovery(t *testing.T) {
h.reopenDB()
h.getVal("foo", "v6")
h.close()
}
func TestCorruptDB_SequenceNumberRecoveryTable(t *testing.T) {
h := newDbCorruptHarness(t)
defer h.close()
h.put("foo", "v1")
h.put("foo", "v2")
@@ -349,11 +344,12 @@ func TestCorruptDB_SequenceNumberRecoveryTable(t *testing.T) {
h.reopenDB()
h.getVal("foo", "v6")
h.close()
}
func TestCorruptDB_CorruptedManifest(t *testing.T) {
h := newDbCorruptHarness(t)
defer h.close()
h.put("foo", "hello")
h.compactMem()
@@ -364,11 +360,12 @@ func TestCorruptDB_CorruptedManifest(t *testing.T) {
h.recover()
h.getVal("foo", "hello")
h.close()
}
func TestCorruptDB_CompactionInputError(t *testing.T) {
h := newDbCorruptHarness(t)
defer h.close()
h.build(10)
h.compactMem()
@@ -380,11 +377,12 @@ func TestCorruptDB_CompactionInputError(t *testing.T) {
h.build(10000)
h.check(10000, 10000)
h.close()
}
func TestCorruptDB_UnrelatedKeys(t *testing.T) {
h := newDbCorruptHarness(t)
defer h.close()
h.build(10)
h.compactMem()
@@ -396,11 +394,12 @@ func TestCorruptDB_UnrelatedKeys(t *testing.T) {
h.getVal(string(tkey(1000)), string(tval(1000, ctValSize)))
h.compactMem()
h.getVal(string(tkey(1000)), string(tval(1000, ctValSize)))
h.close()
}
func TestCorruptDB_Level0NewerFileHasOlderSeqnum(t *testing.T) {
h := newDbCorruptHarness(t)
defer h.close()
h.put("a", "v1")
h.put("b", "v1")
@@ -422,11 +421,12 @@ func TestCorruptDB_Level0NewerFileHasOlderSeqnum(t *testing.T) {
h.getVal("b", "v3")
h.getVal("c", "v0")
h.getVal("d", "v0")
h.close()
}
func TestCorruptDB_RecoverInvalidSeq_Issue53(t *testing.T) {
h := newDbCorruptHarness(t)
defer h.close()
h.put("a", "v1")
h.put("b", "v1")
@@ -448,11 +448,12 @@ func TestCorruptDB_RecoverInvalidSeq_Issue53(t *testing.T) {
h.getVal("b", "v3")
h.getVal("c", "v0")
h.getVal("d", "v0")
h.close()
}
func TestCorruptDB_MissingTableFiles(t *testing.T) {
h := newDbCorruptHarness(t)
defer h.close()
h.put("a", "v1")
h.put("b", "v1")
@@ -466,6 +467,8 @@ func TestCorruptDB_MissingTableFiles(t *testing.T) {
h.removeOne(storage.TypeTable)
h.openAssert(false)
h.close()
}
func TestCorruptDB_RecoverTable(t *testing.T) {
@@ -474,7 +477,6 @@ func TestCorruptDB_RecoverTable(t *testing.T) {
CompactionTableSize: 90 * opt.KiB,
Filter: filter.NewBloomFilter(10),
})
defer h.close()
h.build(1000)
h.compactMem()
@@ -493,4 +495,6 @@ func TestCorruptDB_RecoverTable(t *testing.T) {
t.Errorf("invalid seq, want=%d got=%d", seq, h.db.seq)
}
h.check(985, 985)
h.close()
}

View File

@@ -36,14 +36,14 @@ type DB struct {
s *session
// MemDB.
memMu sync.RWMutex
memPool chan *memdb.DB
mem, frozenMem *memDB
journal *journal.Writer
journalWriter storage.Writer
journalFd storage.FileDesc
frozenJournalFd storage.FileDesc
frozenSeq uint64
memMu sync.RWMutex
memPool chan *memdb.DB
mem, frozenMem *memDB
journal *journal.Writer
journalWriter storage.Writer
journalFile storage.File
frozenJournalFile storage.File
frozenSeq uint64
// Snapshot.
snapsMu sync.Mutex
@@ -61,19 +61,15 @@ type DB struct {
writeDelayN int
journalC chan *Batch
journalAckC chan error
tr *Transaction
// Compaction.
compCommitLk sync.Mutex
tcompCmdC chan cCmd
tcompPauseC chan chan<- struct{}
mcompCmdC chan cCmd
compErrC chan error
compPerErrC chan error
compErrSetC chan error
compWriteLocking bool
compStats cStats
memdbMaxLevel int // For testing.
tcompCmdC chan cCmd
tcompPauseC chan chan<- struct{}
mcompCmdC chan cCmd
compErrC chan error
compPerErrC chan error
compErrSetC chan error
compStats []cStats
// Close.
closeW sync.WaitGroup
@@ -107,48 +103,33 @@ func openDB(s *session) (*DB, error) {
compErrC: make(chan error),
compPerErrC: make(chan error),
compErrSetC: make(chan error),
compStats: make([]cStats, s.o.GetNumLevel()),
// Close
closeC: make(chan struct{}),
}
// Read-only mode.
readOnly := s.o.GetReadOnly()
if err := db.recoverJournal(); err != nil {
return nil, err
}
if readOnly {
// Recover journals (read-only mode).
if err := db.recoverJournalRO(); err != nil {
return nil, err
// Remove any obsolete files.
if err := db.checkAndCleanFiles(); err != nil {
// Close journal.
if db.journal != nil {
db.journal.Close()
db.journalWriter.Close()
}
} else {
// Recover journals.
if err := db.recoverJournal(); err != nil {
return nil, err
}
// Remove any obsolete files.
if err := db.checkAndCleanFiles(); err != nil {
// Close journal.
if db.journal != nil {
db.journal.Close()
db.journalWriter.Close()
}
return nil, err
}
return nil, err
}
// Doesn't need to be included in the wait group.
go db.compactionError()
go db.mpoolDrain()
if readOnly {
db.SetReadOnly()
} else {
db.closeW.Add(3)
go db.tCompaction()
go db.mCompaction()
go db.jWriter()
}
db.closeW.Add(3)
go db.tCompaction()
go db.mCompaction()
go db.jWriter()
s.logf("db@open done T·%v", time.Since(start))
@@ -211,7 +192,7 @@ func Open(stor storage.Storage, o *opt.Options) (db *DB, err error) {
// The returned DB instance is goroutine-safe.
// The DB must be closed after use, by calling Close method.
func OpenFile(path string, o *opt.Options) (db *DB, err error) {
stor, err := storage.OpenFile(path, o.GetReadOnly())
stor, err := storage.OpenFile(path)
if err != nil {
return
}
@@ -261,7 +242,7 @@ func Recover(stor storage.Storage, o *opt.Options) (db *DB, err error) {
// The returned DB instance is goroutine-safe.
// The DB must be closed after use, by calling Close method.
func RecoverFile(path string, o *opt.Options) (db *DB, err error) {
stor, err := storage.OpenFile(path, false)
stor, err := storage.OpenFile(path)
if err != nil {
return
}
@@ -280,11 +261,12 @@ func recoverTable(s *session, o *opt.Options) error {
o.Strict &= ^opt.StrictReader
// Get all tables and sort it by file number.
fds, err := s.stor.List(storage.TypeTable)
tableFiles_, err := s.getFiles(storage.TypeTable)
if err != nil {
return err
}
sortFds(fds)
tableFiles := files(tableFiles_)
tableFiles.sort()
var (
maxSeq uint64
@@ -292,22 +274,21 @@ func recoverTable(s *session, o *opt.Options) error {
// We will drop corrupted table.
strict = o.GetStrict(opt.StrictRecovery)
noSync = o.GetNoSync()
rec = &sessionRecord{}
rec = &sessionRecord{numLevel: o.GetNumLevel()}
bpool = util.NewBufferPool(o.GetBlockSize() + 5)
)
buildTable := func(iter iterator.Iterator) (tmpFd storage.FileDesc, size int64, err error) {
tmpFd = s.newTemp()
writer, err := s.stor.Create(tmpFd)
buildTable := func(iter iterator.Iterator) (tmp storage.File, size int64, err error) {
tmp = s.newTemp()
writer, err := tmp.Create()
if err != nil {
return
}
defer func() {
writer.Close()
if err != nil {
s.stor.Remove(tmpFd)
tmpFd = storage.FileDesc{}
tmp.Remove()
tmp = nil
}
}()
@@ -315,7 +296,7 @@ func recoverTable(s *session, o *opt.Options) error {
tw := table.NewWriter(writer, o)
for iter.Next() {
key := iter.Key()
if validInternalKey(key) {
if validIkey(key) {
err = tw.Append(key, iter.Value())
if err != nil {
return
@@ -330,18 +311,16 @@ func recoverTable(s *session, o *opt.Options) error {
if err != nil {
return
}
if !noSync {
err = writer.Sync()
if err != nil {
return
}
err = writer.Sync()
if err != nil {
return
}
size = int64(tw.BytesLen())
return
}
recoverTable := func(fd storage.FileDesc) error {
s.logf("table@recovery recovering @%d", fd.Num)
reader, err := s.stor.Open(fd)
recoverTable := func(file storage.File) error {
s.logf("table@recovery recovering @%d", file.Num())
reader, err := file.Open()
if err != nil {
return err
}
@@ -363,7 +342,7 @@ func recoverTable(s *session, o *opt.Options) error {
tgoodKey, tcorruptedKey, tcorruptedBlock int
imin, imax []byte
)
tr, err := table.NewReader(reader, size, fd, nil, bpool, o)
tr, err := table.NewReader(reader, size, storage.NewFileInfo(file), nil, bpool, o)
if err != nil {
return err
}
@@ -371,7 +350,7 @@ func recoverTable(s *session, o *opt.Options) error {
if itererr, ok := iter.(iterator.ErrorCallbackSetter); ok {
itererr.SetErrorCallback(func(err error) {
if errors.IsCorrupted(err) {
s.logf("table@recovery block corruption @%d %q", fd.Num, err)
s.logf("table@recovery block corruption @%d %q", file.Num(), err)
tcorruptedBlock++
}
})
@@ -380,7 +359,7 @@ func recoverTable(s *session, o *opt.Options) error {
// Scan the table.
for iter.Next() {
key := iter.Key()
_, seq, _, kerr := parseInternalKey(key)
_, seq, _, kerr := parseIkey(key)
if kerr != nil {
tcorruptedKey++
continue
@@ -406,23 +385,23 @@ func recoverTable(s *session, o *opt.Options) error {
if strict && (tcorruptedKey > 0 || tcorruptedBlock > 0) {
droppedTable++
s.logf("table@recovery dropped @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", fd.Num, tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq)
s.logf("table@recovery dropped @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", file.Num(), tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq)
return nil
}
if tgoodKey > 0 {
if tcorruptedKey > 0 || tcorruptedBlock > 0 {
// Rebuild the table.
s.logf("table@recovery rebuilding @%d", fd.Num)
s.logf("table@recovery rebuilding @%d", file.Num())
iter := tr.NewIterator(nil, nil)
tmpFd, newSize, err := buildTable(iter)
tmp, newSize, err := buildTable(iter)
iter.Release()
if err != nil {
return err
}
closed = true
reader.Close()
if err := s.stor.Rename(tmpFd, fd); err != nil {
if err := file.Replace(tmp); err != nil {
return err
}
size = newSize
@@ -432,30 +411,30 @@ func recoverTable(s *session, o *opt.Options) error {
}
recoveredKey += tgoodKey
// Add table to level 0.
rec.addTable(0, fd.Num, size, imin, imax)
s.logf("table@recovery recovered @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", fd.Num, tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq)
rec.addTable(0, file.Num(), uint64(size), imin, imax)
s.logf("table@recovery recovered @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", file.Num(), tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq)
} else {
droppedTable++
s.logf("table@recovery unrecoverable @%d Ck·%d Cb·%d S·%d", fd.Num, tcorruptedKey, tcorruptedBlock, size)
s.logf("table@recovery unrecoverable @%d Ck·%d Cb·%d S·%d", file.Num(), tcorruptedKey, tcorruptedBlock, size)
}
return nil
}
// Recover all tables.
if len(fds) > 0 {
s.logf("table@recovery F·%d", len(fds))
if len(tableFiles) > 0 {
s.logf("table@recovery F·%d", len(tableFiles))
// Mark file number as used.
s.markFileNum(fds[len(fds)-1].Num)
s.markFileNum(tableFiles[len(tableFiles)-1].Num())
for _, fd := range fds {
if err := recoverTable(fd); err != nil {
for _, file := range tableFiles {
if err := recoverTable(file); err != nil {
return err
}
}
s.logf("table@recovery recovered F·%d N·%d Gk·%d Ck·%d Q·%d", len(fds), recoveredKey, goodKey, corruptedKey, maxSeq)
s.logf("table@recovery recovered F·%d N·%d Gk·%d Ck·%d Q·%d", len(tableFiles), recoveredKey, goodKey, corruptedKey, maxSeq)
}
// Set sequence number.
@@ -471,136 +450,132 @@ func recoverTable(s *session, o *opt.Options) error {
}
func (db *DB) recoverJournal() error {
// Get all journals and sort it by file number.
rawFds, err := db.s.stor.List(storage.TypeJournal)
// Get all tables and sort it by file number.
journalFiles_, err := db.s.getFiles(storage.TypeJournal)
if err != nil {
return err
}
sortFds(rawFds)
journalFiles := files(journalFiles_)
journalFiles.sort()
// Journals that will be recovered.
var fds []storage.FileDesc
for _, fd := range rawFds {
if fd.Num >= db.s.stJournalNum || fd.Num == db.s.stPrevJournalNum {
fds = append(fds, fd)
// Discard older journal.
prev := -1
for i, file := range journalFiles {
if file.Num() >= db.s.stJournalNum {
if prev >= 0 {
i--
journalFiles[i] = journalFiles[prev]
}
journalFiles = journalFiles[i:]
break
} else if file.Num() == db.s.stPrevJournalNum {
prev = i
}
}
var (
ofd storage.FileDesc // Obsolete file.
rec = &sessionRecord{}
)
var jr *journal.Reader
var of storage.File
var mem *memdb.DB
batch := new(Batch)
cm := newCMem(db.s)
buf := new(util.Buffer)
// Options.
strict := db.s.o.GetStrict(opt.StrictJournal)
checksum := db.s.o.GetStrict(opt.StrictJournalChecksum)
writeBuffer := db.s.o.GetWriteBuffer()
recoverJournal := func(file storage.File) error {
db.logf("journal@recovery recovering @%d", file.Num())
reader, err := file.Open()
if err != nil {
return err
}
defer reader.Close()
// Recover journals.
if len(fds) > 0 {
db.logf("journal@recovery F·%d", len(fds))
// Mark file number as used.
db.s.markFileNum(fds[len(fds)-1].Num)
var (
// Options.
strict = db.s.o.GetStrict(opt.StrictJournal)
checksum = db.s.o.GetStrict(opt.StrictJournalChecksum)
writeBuffer = db.s.o.GetWriteBuffer()
jr *journal.Reader
mdb = memdb.New(db.s.icmp, writeBuffer)
buf = &util.Buffer{}
batch = &Batch{}
)
for _, fd := range fds {
db.logf("journal@recovery recovering @%d", fd.Num)
fr, err := db.s.stor.Open(fd)
if err != nil {
return err
}
// Create or reset journal reader instance.
if jr == nil {
jr = journal.NewReader(fr, dropper{db.s, fd}, strict, checksum)
} else {
jr.Reset(fr, dropper{db.s, fd}, strict, checksum)
}
// Flush memdb and remove obsolete journal file.
if !ofd.Nil() {
if mdb.Len() > 0 {
if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil {
fr.Close()
return err
}
}
rec.setJournalNum(fd.Num)
rec.setSeqNum(db.seq)
if err := db.s.commit(rec); err != nil {
fr.Close()
return err
}
rec.resetAddedTables()
db.s.stor.Remove(ofd)
ofd = storage.FileDesc{}
}
// Replay journal to memdb.
mdb.Reset()
for {
r, err := jr.Next()
if err != nil {
if err == io.EOF {
break
}
fr.Close()
return errors.SetFd(err, fd)
}
buf.Reset()
if _, err := buf.ReadFrom(r); err != nil {
if err == io.ErrUnexpectedEOF {
// This is error returned due to corruption, with strict == false.
continue
}
fr.Close()
return errors.SetFd(err, fd)
}
if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mdb); err != nil {
if !strict && errors.IsCorrupted(err) {
db.s.logf("journal error: %v (skipped)", err)
// We won't apply sequence number as it might be corrupted.
continue
}
fr.Close()
return errors.SetFd(err, fd)
}
// Save sequence number.
db.seq = batch.seq + uint64(batch.Len())
// Flush it if large enough.
if mdb.Size() >= writeBuffer {
if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil {
fr.Close()
return err
}
mdb.Reset()
}
}
fr.Close()
ofd = fd
// Create/reset journal reader instance.
if jr == nil {
jr = journal.NewReader(reader, dropper{db.s, file}, strict, checksum)
} else {
jr.Reset(reader, dropper{db.s, file}, strict, checksum)
}
// Flush the last memdb.
if mdb.Len() > 0 {
if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil {
// Flush memdb and remove obsolete journal file.
if of != nil {
if mem.Len() > 0 {
if err := cm.flush(mem, 0); err != nil {
return err
}
}
if err := cm.commit(file.Num(), db.seq); err != nil {
return err
}
cm.reset()
of.Remove()
of = nil
}
// Replay journal to memdb.
mem.Reset()
for {
r, err := jr.Next()
if err != nil {
if err == io.EOF {
break
}
return errors.SetFile(err, file)
}
buf.Reset()
if _, err := buf.ReadFrom(r); err != nil {
if err == io.ErrUnexpectedEOF {
// This is error returned due to corruption, with strict == false.
continue
} else {
return errors.SetFile(err, file)
}
}
if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mem); err != nil {
if strict || !errors.IsCorrupted(err) {
return errors.SetFile(err, file)
} else {
db.s.logf("journal error: %v (skipped)", err)
// We won't apply sequence number as it might be corrupted.
continue
}
}
// Save sequence number.
db.seq = batch.seq + uint64(batch.Len())
// Flush it if large enough.
if mem.Size() >= writeBuffer {
if err := cm.flush(mem, 0); err != nil {
return err
}
mem.Reset()
}
}
of = file
return nil
}
// Recover all journals.
if len(journalFiles) > 0 {
db.logf("journal@recovery F·%d", len(journalFiles))
// Mark file number as used.
db.s.markFileNum(journalFiles[len(journalFiles)-1].Num())
mem = memdb.New(db.s.icmp, writeBuffer)
for _, file := range journalFiles {
if err := recoverJournal(file); err != nil {
return err
}
}
// Flush the last journal.
if mem.Len() > 0 {
if err := cm.flush(mem, 0); err != nil {
return err
}
}
@@ -612,10 +587,8 @@ func (db *DB) recoverJournal() error {
}
// Commit.
rec.setJournalNum(db.journalFd.Num)
rec.setSeqNum(db.seq)
if err := db.s.commit(rec); err != nil {
// Close journal on error.
if err := cm.commit(db.journalFile.Num(), db.seq); err != nil {
// Close journal.
if db.journal != nil {
db.journal.Close()
db.journalWriter.Close()
@@ -624,139 +597,15 @@ func (db *DB) recoverJournal() error {
}
// Remove the last obsolete journal file.
if !ofd.Nil() {
db.s.stor.Remove(ofd)
if of != nil {
of.Remove()
}
return nil
}
func (db *DB) recoverJournalRO() error {
// Get all journals and sort it by file number.
rawFds, err := db.s.stor.List(storage.TypeJournal)
if err != nil {
return err
}
sortFds(rawFds)
// Journals that will be recovered.
var fds []storage.FileDesc
for _, fd := range rawFds {
if fd.Num >= db.s.stJournalNum || fd.Num == db.s.stPrevJournalNum {
fds = append(fds, fd)
}
}
var (
// Options.
strict = db.s.o.GetStrict(opt.StrictJournal)
checksum = db.s.o.GetStrict(opt.StrictJournalChecksum)
writeBuffer = db.s.o.GetWriteBuffer()
mdb = memdb.New(db.s.icmp, writeBuffer)
)
// Recover journals.
if len(fds) > 0 {
db.logf("journal@recovery RO·Mode F·%d", len(fds))
var (
jr *journal.Reader
buf = &util.Buffer{}
batch = &Batch{}
)
for _, fd := range fds {
db.logf("journal@recovery recovering @%d", fd.Num)
fr, err := db.s.stor.Open(fd)
if err != nil {
return err
}
// Create or reset journal reader instance.
if jr == nil {
jr = journal.NewReader(fr, dropper{db.s, fd}, strict, checksum)
} else {
jr.Reset(fr, dropper{db.s, fd}, strict, checksum)
}
// Replay journal to memdb.
for {
r, err := jr.Next()
if err != nil {
if err == io.EOF {
break
}
fr.Close()
return errors.SetFd(err, fd)
}
buf.Reset()
if _, err := buf.ReadFrom(r); err != nil {
if err == io.ErrUnexpectedEOF {
// This is error returned due to corruption, with strict == false.
continue
}
fr.Close()
return errors.SetFd(err, fd)
}
if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mdb); err != nil {
if !strict && errors.IsCorrupted(err) {
db.s.logf("journal error: %v (skipped)", err)
// We won't apply sequence number as it might be corrupted.
continue
}
fr.Close()
return errors.SetFd(err, fd)
}
// Save sequence number.
db.seq = batch.seq + uint64(batch.Len())
}
fr.Close()
}
}
// Set memDB.
db.mem = &memDB{db: db, DB: mdb, ref: 1}
return nil
}
func memGet(mdb *memdb.DB, ikey internalKey, icmp *iComparer) (ok bool, mv []byte, err error) {
mk, mv, err := mdb.Find(ikey)
if err == nil {
ukey, _, kt, kerr := parseInternalKey(mk)
if kerr != nil {
// Shouldn't have had happen.
panic(kerr)
}
if icmp.uCompare(ukey, ikey.ukey()) == 0 {
if kt == keyTypeDel {
return true, nil, ErrNotFound
}
return true, mv, nil
}
} else if err != ErrNotFound {
return true, nil, err
}
return
}
func (db *DB) get(auxm *memdb.DB, auxt tFiles, key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) {
ikey := makeInternalKey(nil, key, seq, keyTypeSeek)
if auxm != nil {
if ok, mv, me := memGet(auxm, ikey, db.s.icmp); ok {
return append([]byte{}, mv...), me
}
}
func (db *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) {
ikey := newIkey(key, seq, ktSeek)
em, fm := db.getMems()
for _, m := range [...]*memDB{em, fm} {
@@ -765,36 +614,36 @@ func (db *DB) get(auxm *memdb.DB, auxt tFiles, key []byte, seq uint64, ro *opt.R
}
defer m.decref()
if ok, mv, me := memGet(m.DB, ikey, db.s.icmp); ok {
return append([]byte{}, mv...), me
mk, mv, me := m.mdb.Find(ikey)
if me == nil {
ukey, _, kt, kerr := parseIkey(mk)
if kerr != nil {
// Shouldn't have had happen.
panic(kerr)
}
if db.s.icmp.uCompare(ukey, key) == 0 {
if kt == ktDel {
return nil, ErrNotFound
}
return append([]byte{}, mv...), nil
}
} else if me != ErrNotFound {
return nil, me
}
}
v := db.s.version()
value, cSched, err := v.get(auxt, ikey, ro, false)
value, cSched, err := v.get(ikey, ro, false)
v.release()
if cSched {
// Trigger table compaction.
db.compTrigger(db.tcompCmdC)
db.compSendTrigger(db.tcompCmdC)
}
return
}
func nilIfNotFound(err error) error {
if err == ErrNotFound {
return nil
}
return err
}
func (db *DB) has(auxm *memdb.DB, auxt tFiles, key []byte, seq uint64, ro *opt.ReadOptions) (ret bool, err error) {
ikey := makeInternalKey(nil, key, seq, keyTypeSeek)
if auxm != nil {
if ok, _, me := memGet(auxm, ikey, db.s.icmp); ok {
return me == nil, nilIfNotFound(me)
}
}
func (db *DB) has(key []byte, seq uint64, ro *opt.ReadOptions) (ret bool, err error) {
ikey := newIkey(key, seq, ktSeek)
em, fm := db.getMems()
for _, m := range [...]*memDB{em, fm} {
@@ -803,17 +652,30 @@ func (db *DB) has(auxm *memdb.DB, auxt tFiles, key []byte, seq uint64, ro *opt.R
}
defer m.decref()
if ok, _, me := memGet(m.DB, ikey, db.s.icmp); ok {
return me == nil, nilIfNotFound(me)
mk, _, me := m.mdb.Find(ikey)
if me == nil {
ukey, _, kt, kerr := parseIkey(mk)
if kerr != nil {
// Shouldn't have had happen.
panic(kerr)
}
if db.s.icmp.uCompare(ukey, key) == 0 {
if kt == ktDel {
return false, nil
}
return true, nil
}
} else if me != ErrNotFound {
return false, me
}
}
v := db.s.version()
_, cSched, err := v.get(auxt, ikey, ro, true)
_, cSched, err := v.get(ikey, ro, true)
v.release()
if cSched {
// Trigger table compaction.
db.compTrigger(db.tcompCmdC)
db.compSendTrigger(db.tcompCmdC)
}
if err == nil {
ret = true
@@ -837,7 +699,7 @@ func (db *DB) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
se := db.acquireSnapshot()
defer db.releaseSnapshot(se)
return db.get(nil, nil, key, se.seq, ro)
return db.get(key, se.seq, ro)
}
// Has returns true if the DB does contains the given key.
@@ -851,11 +713,11 @@ func (db *DB) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) {
se := db.acquireSnapshot()
defer db.releaseSnapshot(se)
return db.has(nil, nil, key, se.seq, ro)
return db.has(key, se.seq, ro)
}
// NewIterator returns an iterator for the latest snapshot of the
// underlying DB.
// uderlying DB.
// The returned iterator is not goroutine-safe, but it is safe to use
// multiple iterators concurrently, with each in a dedicated goroutine.
// It is also safe to use an iterator concurrently with modifying its
@@ -879,7 +741,7 @@ func (db *DB) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Itera
defer db.releaseSnapshot(se)
// Iterator holds 'version' lock, 'version' is immutable so snapshot
// can be released after iterator created.
return db.newIterator(nil, nil, se.seq, slice, ro)
return db.newIterator(se.seq, slice, ro)
}
// GetSnapshot returns a latest snapshot of the underlying DB. A snapshot
@@ -922,7 +784,7 @@ func (db *DB) GetProperty(name string) (value string, err error) {
const prefix = "leveldb."
if !strings.HasPrefix(name, prefix) {
return "", ErrNotFound
return "", errors.New("leveldb: GetProperty: unknown property: " + name)
}
p := name[len(prefix):]
@@ -935,8 +797,8 @@ func (db *DB) GetProperty(name string) (value string, err error) {
var level uint
var rest string
n, _ := fmt.Sscanf(p[len(numFilesPrefix):], "%d%s", &level, &rest)
if n != 1 {
err = ErrNotFound
if n != 1 || int(level) >= db.s.o.GetNumLevel() {
err = errors.New("leveldb: GetProperty: invalid property: " + name)
} else {
value = fmt.Sprint(v.tLen(int(level)))
}
@@ -944,8 +806,8 @@ func (db *DB) GetProperty(name string) (value string, err error) {
value = "Compactions\n" +
" Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB)\n" +
"-------+------------+---------------+---------------+---------------+---------------\n"
for level, tables := range v.levels {
duration, read, write := db.compStats.getStat(level)
for level, tables := range v.tables {
duration, read, write := db.compStats[level].get()
if len(tables) == 0 && duration == 0 {
continue
}
@@ -954,10 +816,10 @@ func (db *DB) GetProperty(name string) (value string, err error) {
float64(read)/1048576.0, float64(write)/1048576.0)
}
case p == "sstables":
for level, tables := range v.levels {
for level, tables := range v.tables {
value += fmt.Sprintf("--- level %d ---\n", level)
for _, t := range tables {
value += fmt.Sprintf("%d:%d[%q .. %q]\n", t.fd.Num, t.size, t.imin, t.imax)
value += fmt.Sprintf("%d:%d[%q .. %q]\n", t.file.Num(), t.size, t.imin, t.imax)
}
}
case p == "blockpool":
@@ -975,7 +837,7 @@ func (db *DB) GetProperty(name string) (value string, err error) {
case p == "aliveiters":
value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveIters))
default:
err = ErrNotFound
err = errors.New("leveldb: GetProperty: unknown property: " + name)
}
return
@@ -997,8 +859,8 @@ func (db *DB) SizeOf(ranges []util.Range) (Sizes, error) {
sizes := make(Sizes, 0, len(ranges))
for _, r := range ranges {
imin := makeInternalKey(nil, r.Start, keyMaxSeq, keyTypeSeek)
imax := makeInternalKey(nil, r.Limit, keyMaxSeq, keyTypeSeek)
imin := newIkey(r.Start, kMaxSeq, ktSeek)
imax := newIkey(r.Limit, kMaxSeq, ktSeek)
start, err := v.offsetOf(imin)
if err != nil {
return nil, err
@@ -1007,7 +869,7 @@ func (db *DB) SizeOf(ranges []util.Range) (Sizes, error) {
if err != nil {
return nil, err
}
var size int64
var size uint64
if limit >= start {
size = limit - start
}
@@ -1017,8 +879,8 @@ func (db *DB) SizeOf(ranges []util.Range) (Sizes, error) {
return sizes, nil
}
// Close closes the DB. This will also releases any outstanding snapshot,
// abort any in-flight compaction and discard open transaction.
// Close closes the DB. This will also releases any outstanding snapshot and
// abort any in-flight compaction.
//
// It is not safe to close a DB until all outstanding iterators are released.
// It is valid to call Close multiple times. Other methods should not be
@@ -1038,27 +900,17 @@ func (db *DB) Close() error {
var err error
select {
case err = <-db.compErrC:
if err == ErrReadOnly {
err = nil
}
default:
}
// Signal all goroutines.
close(db.closeC)
// Discard open transaction.
if db.tr != nil {
db.tr.Discard()
}
// Acquire writer lock.
db.writeLockC <- struct{}{}
// Wait for all gorotines to exit.
db.closeW.Wait()
// Closes journal.
// Lock writer and closes journal.
db.writeLockC <- struct{}{}
if db.journal != nil {
db.journal.Close()
db.journalWriter.Close()
@@ -1085,6 +937,8 @@ func (db *DB) Close() error {
db.frozenMem = nil
db.journal = nil
db.journalWriter = nil
db.journalFile = nil
db.frozenJournalFile = nil
db.closer = nil
return err

View File

@@ -11,79 +11,109 @@ import (
"time"
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/memdb"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/storage"
)
var (
errCompactionTransactExiting = errors.New("leveldb: compaction transact exiting")
)
type cStat struct {
type cStats struct {
sync.Mutex
duration time.Duration
read int64
write int64
read uint64
write uint64
}
func (p *cStat) add(n *cStatStaging) {
func (p *cStats) add(n *cStatsStaging) {
p.Lock()
p.duration += n.duration
p.read += n.read
p.write += n.write
p.Unlock()
}
func (p *cStat) get() (duration time.Duration, read, write int64) {
func (p *cStats) get() (duration time.Duration, read, write uint64) {
p.Lock()
defer p.Unlock()
return p.duration, p.read, p.write
}
type cStatStaging struct {
type cStatsStaging struct {
start time.Time
duration time.Duration
on bool
read int64
write int64
read uint64
write uint64
}
func (p *cStatStaging) startTimer() {
func (p *cStatsStaging) startTimer() {
if !p.on {
p.start = time.Now()
p.on = true
}
}
func (p *cStatStaging) stopTimer() {
func (p *cStatsStaging) stopTimer() {
if p.on {
p.duration += time.Since(p.start)
p.on = false
}
}
type cStats struct {
lk sync.Mutex
stats []cStat
type cMem struct {
s *session
level int
rec *sessionRecord
}
func (p *cStats) addStat(level int, n *cStatStaging) {
p.lk.Lock()
if level >= len(p.stats) {
newStats := make([]cStat, level+1)
copy(newStats, p.stats)
p.stats = newStats
}
p.stats[level].add(n)
p.lk.Unlock()
func newCMem(s *session) *cMem {
return &cMem{s: s, rec: &sessionRecord{numLevel: s.o.GetNumLevel()}}
}
func (p *cStats) getStat(level int) (duration time.Duration, read, write int64) {
p.lk.Lock()
defer p.lk.Unlock()
if level < len(p.stats) {
return p.stats[level].get()
func (c *cMem) flush(mem *memdb.DB, level int) error {
s := c.s
// Write memdb to table.
iter := mem.NewIterator(nil)
defer iter.Release()
t, n, err := s.tops.createFrom(iter)
if err != nil {
return err
}
return
// Pick level.
if level < 0 {
v := s.version()
level = v.pickLevel(t.imin.ukey(), t.imax.ukey())
v.release()
}
c.rec.addTableFile(level, t)
s.logf("mem@flush created L%d@%d N·%d S·%s %q:%q", level, t.file.Num(), n, shortenb(int(t.size)), t.imin, t.imax)
c.level = level
return nil
}
func (c *cMem) reset() {
c.rec = &sessionRecord{numLevel: c.s.o.GetNumLevel()}
}
func (c *cMem) commit(journal, seq uint64) error {
c.rec.setJournalNum(journal)
c.rec.setSeqNum(seq)
// Commit changes.
return c.s.commit(c.rec)
}
func (db *DB) compactionError() {
var err error
var (
err error
wlocked bool
)
noerr:
// No error.
for {
@@ -91,7 +121,7 @@ noerr:
case err = <-db.compErrSetC:
switch {
case err == nil:
case err == ErrReadOnly, errors.IsCorrupted(err):
case errors.IsCorrupted(err):
goto hasperr
default:
goto haserr
@@ -109,7 +139,7 @@ haserr:
switch {
case err == nil:
goto noerr
case err == ErrReadOnly, errors.IsCorrupted(err):
case errors.IsCorrupted(err):
goto hasperr
default:
}
@@ -125,9 +155,9 @@ hasperr:
case db.compPerErrC <- err:
case db.writeLockC <- struct{}{}:
// Hold write lock, so that write won't pass-through.
db.compWriteLocking = true
wlocked = true
case _, _ = <-db.closeC:
if db.compWriteLocking {
if wlocked {
// We should release the lock or Close will hang.
<-db.writeLockC
}
@@ -172,7 +202,7 @@ func (db *DB) compactionTransact(name string, t compactionTransactInterface) {
disableBackoff = db.s.o.GetDisableCompactionBackoff()
)
for n := 0; ; n++ {
// Check whether the DB is closed.
// Check wether the DB is closed.
if db.isClosed() {
db.logf("%s exiting", name)
db.compactionExitTransact()
@@ -256,27 +286,22 @@ func (db *DB) compactionExitTransact() {
panic(errCompactionTransactExiting)
}
func (db *DB) compactionCommit(name string, rec *sessionRecord) {
db.compCommitLk.Lock()
defer db.compCommitLk.Unlock() // Defer is necessary.
db.compactionTransactFunc(name+"@commit", func(cnt *compactionTransactCounter) error {
return db.s.commit(rec)
}, nil)
}
func (db *DB) memCompaction() {
mdb := db.getFrozenMem()
if mdb == nil {
mem := db.getFrozenMem()
if mem == nil {
return
}
defer mdb.decref()
defer mem.decref()
db.logf("memdb@flush N·%d S·%s", mdb.Len(), shortenb(mdb.Size()))
c := newCMem(db.s)
stats := new(cStatsStaging)
db.logf("mem@flush N·%d S·%s", mem.mdb.Len(), shortenb(mem.mdb.Size()))
// Don't compact empty memdb.
if mdb.Len() == 0 {
db.logf("memdb@flush skipping")
// drop frozen memdb
if mem.mdb.Len() == 0 {
db.logf("mem@flush skipping")
// drop frozen mem
db.dropFrozenMem()
return
}
@@ -292,44 +317,35 @@ func (db *DB) memCompaction() {
return
}
var (
rec = &sessionRecord{}
stats = &cStatStaging{}
flushLevel int
)
// Generate tables.
db.compactionTransactFunc("memdb@flush", func(cnt *compactionTransactCounter) (err error) {
db.compactionTransactFunc("mem@flush", func(cnt *compactionTransactCounter) (err error) {
stats.startTimer()
flushLevel, err = db.s.flushMemdb(rec, mdb.DB, db.memdbMaxLevel)
stats.stopTimer()
return
defer stats.stopTimer()
return c.flush(mem.mdb, -1)
}, func() error {
for _, r := range rec.addedTables {
db.logf("memdb@flush revert @%d", r.num)
if err := db.s.stor.Remove(storage.FileDesc{Type: storage.TypeTable, Num: r.num}); err != nil {
for _, r := range c.rec.addedTables {
db.logf("mem@flush revert @%d", r.num)
f := db.s.getTableFile(r.num)
if err := f.Remove(); err != nil {
return err
}
}
return nil
})
rec.setJournalNum(db.journalFd.Num)
rec.setSeqNum(db.frozenSeq)
db.compactionTransactFunc("mem@commit", func(cnt *compactionTransactCounter) (err error) {
stats.startTimer()
defer stats.stopTimer()
return c.commit(db.journalFile.Num(), db.frozenSeq)
}, nil)
// Commit.
stats.startTimer()
db.compactionCommit("memdb", rec)
stats.stopTimer()
db.logf("mem@flush committed F·%d T·%v", len(c.rec.addedTables), stats.duration)
db.logf("memdb@flush committed F·%d T·%v", len(rec.addedTables), stats.duration)
for _, r := range rec.addedTables {
for _, r := range c.rec.addedTables {
stats.write += r.size
}
db.compStats.addStat(flushLevel, stats)
db.compStats[c.level].add(stats)
// Drop frozen memdb.
// Drop frozen mem.
db.dropFrozenMem()
// Resume table compaction.
@@ -343,7 +359,7 @@ func (db *DB) memCompaction() {
}
// Trigger table compaction.
db.compTrigger(db.tcompCmdC)
db.compSendTrigger(db.tcompCmdC)
}
type tableCompactionBuilder struct {
@@ -351,7 +367,7 @@ type tableCompactionBuilder struct {
s *session
c *compaction
rec *sessionRecord
stat0, stat1 *cStatStaging
stat0, stat1 *cStatsStaging
snapHasLastUkey bool
snapLastUkey []byte
@@ -405,9 +421,9 @@ func (b *tableCompactionBuilder) flush() error {
if err != nil {
return err
}
b.rec.addTableFile(b.c.sourceLevel+1, t)
b.rec.addTableFile(b.c.level+1, t)
b.stat1.write += t.size
b.s.logf("table@build created L%d@%d N·%d S·%s %q:%q", b.c.sourceLevel+1, t.fd.Num, b.tw.tw.EntriesLen(), shortenb(int(t.size)), t.imin, t.imax)
b.s.logf("table@build created L%d@%d N·%d S·%s %q:%q", b.c.level+1, t.file.Num(), b.tw.tw.EntriesLen(), shortenb(int(t.size)), t.imin, t.imax)
b.tw = nil
return nil
}
@@ -452,7 +468,7 @@ func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error {
}
ikey := iter.Key()
ukey, seq, kt, kerr := parseInternalKey(ikey)
ukey, seq, kt, kerr := parseIkey(ikey)
if kerr == nil {
shouldStop := !resumed && b.c.shouldStopBefore(ikey)
@@ -478,14 +494,14 @@ func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error {
hasLastUkey = true
lastUkey = append(lastUkey[:0], ukey...)
lastSeq = keyMaxSeq
lastSeq = kMaxSeq
}
switch {
case lastSeq <= b.minSeq:
// Dropped because newer entry for same user key exist
fallthrough // (A)
case kt == keyTypeDel && seq <= b.minSeq && b.c.baseLevelForKey(lastUkey):
case kt == ktDel && seq <= b.minSeq && b.c.baseLevelForKey(lastUkey):
// For this user key:
// (1) there is no data in higher levels
// (2) data in lower levels will have larger seq numbers
@@ -507,7 +523,7 @@ func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error {
// Don't drop corrupted keys.
hasLastUkey = false
lastUkey = lastUkey[:0]
lastSeq = keyMaxSeq
lastSeq = kMaxSeq
b.kerrCnt++
}
@@ -530,7 +546,8 @@ func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error {
func (b *tableCompactionBuilder) revert() error {
for _, at := range b.rec.addedTables {
b.s.logf("table@build revert @%d", at.num)
if err := b.s.stor.Remove(storage.FileDesc{Type: storage.TypeTable, Num: at.num}); err != nil {
f := b.s.getTableFile(at.num)
if err := f.Remove(); err != nil {
return err
}
}
@@ -540,29 +557,31 @@ func (b *tableCompactionBuilder) revert() error {
func (db *DB) tableCompaction(c *compaction, noTrivial bool) {
defer c.release()
rec := &sessionRecord{}
rec.addCompPtr(c.sourceLevel, c.imax)
rec := &sessionRecord{numLevel: db.s.o.GetNumLevel()}
rec.addCompPtr(c.level, c.imax)
if !noTrivial && c.trivial() {
t := c.levels[0][0]
db.logf("table@move L%d@%d -> L%d", c.sourceLevel, t.fd.Num, c.sourceLevel+1)
rec.delTable(c.sourceLevel, t.fd.Num)
rec.addTableFile(c.sourceLevel+1, t)
db.compactionCommit("table-move", rec)
t := c.tables[0][0]
db.logf("table@move L%d@%d -> L%d", c.level, t.file.Num(), c.level+1)
rec.delTable(c.level, t.file.Num())
rec.addTableFile(c.level+1, t)
db.compactionTransactFunc("table@move", func(cnt *compactionTransactCounter) (err error) {
return db.s.commit(rec)
}, nil)
return
}
var stats [2]cStatStaging
for i, tables := range c.levels {
var stats [2]cStatsStaging
for i, tables := range c.tables {
for _, t := range tables {
stats[i].read += t.size
// Insert deleted tables into record
rec.delTable(c.sourceLevel+i, t.fd.Num)
rec.delTable(c.level+i, t.file.Num())
}
}
sourceSize := int(stats[0].read + stats[1].read)
minSeq := db.minSeq()
db.logf("table@compaction L%d·%d -> L%d·%d S·%s Q·%d", c.sourceLevel, len(c.levels[0]), c.sourceLevel+1, len(c.levels[1]), shortenb(sourceSize), minSeq)
db.logf("table@compaction L%d·%d -> L%d·%d S·%s Q·%d", c.level, len(c.tables[0]), c.level+1, len(c.tables[1]), shortenb(sourceSize), minSeq)
b := &tableCompactionBuilder{
db: db,
@@ -572,60 +591,49 @@ func (db *DB) tableCompaction(c *compaction, noTrivial bool) {
stat1: &stats[1],
minSeq: minSeq,
strict: db.s.o.GetStrict(opt.StrictCompaction),
tableSize: db.s.o.GetCompactionTableSize(c.sourceLevel + 1),
tableSize: db.s.o.GetCompactionTableSize(c.level + 1),
}
db.compactionTransact("table@build", b)
// Commit.
stats[1].startTimer()
db.compactionCommit("table", rec)
stats[1].stopTimer()
// Commit changes
db.compactionTransactFunc("table@commit", func(cnt *compactionTransactCounter) (err error) {
stats[1].startTimer()
defer stats[1].stopTimer()
return db.s.commit(rec)
}, nil)
resultSize := int(stats[1].write)
db.logf("table@compaction committed F%s S%s Ke·%d D·%d T·%v", sint(len(rec.addedTables)-len(rec.deletedTables)), sshortenb(resultSize-sourceSize), b.kerrCnt, b.dropCnt, stats[1].duration)
// Save compaction stats
for i := range stats {
db.compStats.addStat(c.sourceLevel+1, &stats[i])
db.compStats[c.level+1].add(&stats[i])
}
}
func (db *DB) tableRangeCompaction(level int, umin, umax []byte) error {
func (db *DB) tableRangeCompaction(level int, umin, umax []byte) {
db.logf("table@compaction range L%d %q:%q", level, umin, umax)
if level >= 0 {
if c := db.s.getCompactionRange(level, umin, umax, true); c != nil {
if c := db.s.getCompactionRange(level, umin, umax); c != nil {
db.tableCompaction(c, true)
}
} else {
// Retry until nothing to compact.
for {
compacted := false
// Scan for maximum level with overlapped tables.
v := db.s.version()
m := 1
for i := m; i < len(v.levels); i++ {
tables := v.levels[i]
if tables.overlaps(db.s.icmp, umin, umax, false) {
m = i
}
v := db.s.version()
m := 1
for i, t := range v.tables[1:] {
if t.overlaps(db.s.icmp, umin, umax, false) {
m = i + 1
}
v.release()
}
v.release()
for level := 0; level < m; level++ {
if c := db.s.getCompactionRange(level, umin, umax, false); c != nil {
db.tableCompaction(c, true)
compacted = true
}
}
if !compacted {
break
for level := 0; level < m; level++ {
if c := db.s.getCompactionRange(level, umin, umax); c != nil {
db.tableCompaction(c, true)
}
}
}
return nil
}
func (db *DB) tableAutoCompaction() {
@@ -652,11 +660,11 @@ type cCmd interface {
ack(err error)
}
type cAuto struct {
type cIdle struct {
ackC chan<- error
}
func (r cAuto) ack(err error) {
func (r cIdle) ack(err error) {
if r.ackC != nil {
defer func() {
recover()
@@ -680,21 +688,13 @@ func (r cRange) ack(err error) {
}
}
// This will trigger auto compaction but will not wait for it.
func (db *DB) compTrigger(compC chan<- cCmd) {
select {
case compC <- cAuto{}:
default:
}
}
// This will trigger auto compaction and/or wait for all compaction to be done.
func (db *DB) compTriggerWait(compC chan<- cCmd) (err error) {
// This will trigger auto compation and/or wait for all compaction to be done.
func (db *DB) compSendIdle(compC chan<- cCmd) (err error) {
ch := make(chan error)
defer close(ch)
// Send cmd.
select {
case compC <- cAuto{ch}:
case compC <- cIdle{ch}:
case err = <-db.compErrC:
return
case _, _ = <-db.closeC:
@@ -710,8 +710,16 @@ func (db *DB) compTriggerWait(compC chan<- cCmd) (err error) {
return err
}
// This will trigger auto compaction but will not wait for it.
func (db *DB) compSendTrigger(compC chan<- cCmd) {
select {
case compC <- cIdle{}:
default:
}
}
// Send range compaction request.
func (db *DB) compTriggerRange(compC chan<- cCmd, level int, min, max []byte) (err error) {
func (db *DB) compSendRange(compC chan<- cCmd, level int, min, max []byte) (err error) {
ch := make(chan error)
defer close(ch)
// Send cmd.
@@ -751,7 +759,7 @@ func (db *DB) mCompaction() {
select {
case x = <-db.mcompCmdC:
switch x.(type) {
case cAuto:
case cIdle:
db.memCompaction()
x.ack(nil)
x = nil
@@ -812,10 +820,11 @@ func (db *DB) tCompaction() {
}
if x != nil {
switch cmd := x.(type) {
case cAuto:
case cIdle:
ackQ = append(ackQ, x)
case cRange:
x.ack(db.tableRangeCompaction(cmd.level, cmd.min, cmd.max))
db.tableRangeCompaction(cmd.level, cmd.min, cmd.max)
x.ack(nil)
default:
panic("leveldb: unknown command")
}

Some files were not shown because too many files have changed in this diff Show More