Compare commits

..

25 Commits

Author SHA1 Message Date
Jakob Borg
3cc4cb0a0b Translation update 2015-03-29 09:46:44 +02:00
Jakob Borg
e6cba61740 Don't allow arbitrarily short reconnection intervals (fixes #1524) 2015-03-29 09:44:20 +02:00
Jakob Borg
cd7ce73f59 Add negative cache time to global discovery
This reduces the amount of external queries by not repeating a query for
a given address if we have failed within the last three minutes.
2015-03-26 08:43:55 +01:00
KAMADA Ken'ichi
fab4e33c58 Preserve the permission of a newly created directory
We need an explicit chmod() when creating a new directory.
Otherwise a new directory may be created with a different permission
from the one received from an originating device, because the umask
is applied to the mode given to mkdir().
The incorrect permission is later sent back to the originating device
and the original permission will be lost.
2015-03-26 08:43:16 +01:00
Audrius Butkevicius
b79b13a75b Configure location provider 2015-03-26 08:43:06 +01:00
Audrius Butkevicius
c294d5f087 Fix crash on walker error (fixes #1507) 2015-03-22 14:09:14 +00:00
Jakob Borg
10ead2e61f Send correct MIME type for SVG images (fixes #1506) 2015-03-22 12:56:50 +01:00
Jakob Borg
960b40fa89 Translation update 2015-03-22 10:34:45 +01:00
Stefan Tatschner
afad329e99 systemd: Set -logflags to 0, provide -no-browser flag
Syncthing should not try to start a browser when invoked by systemd.
Furthermore we do not need any timestamps in the journal as systemd
already handles this for us.
2015-03-22 10:26:53 +01:00
Jakob Borg
4025284fba Update integration test configs to v10 2015-03-22 10:26:53 +01:00
Jakob Borg
a595e814dd Set defaults correctly for autoNormalize
The default:"foo" struct tags aren't actually used for folder configs.
2015-03-22 10:26:51 +01:00
Alexander Graf
963d8121d9 use Lstat instead of Stat to prevent errors with symlinks 2015-03-22 08:48:37 +01:00
Audrius Butkevicius
03019988b1 Skip unspecified IPs 2015-03-22 08:48:37 +01:00
Audrius Butkevicius
97115afa32 Print LANs on startup 2015-03-22 08:48:37 +01:00
Jakob Borg
c9f5bae177 Decide once and for all to return filepath.SkipDir or nil 2015-03-22 08:47:36 +01:00
Jakob Borg
2bd11ca4e3 Automatically fix file name normalization errors (fixes #430) 2015-03-22 08:47:34 +01:00
Jakob Borg
a5de1acb46 Use SVG format logos 2015-03-22 08:46:54 +01:00
Jakob Borg
5581751e9d Rename files to match type names 2015-03-22 08:46:43 +01:00
Jakob Borg
055ae92273 Refactor state tracking (...)
Move state tracking into the puller/scanner objects. This is a first
step towards resolving #1391.

Rename Puller and Scanner to roFolder and rwFolder as they have more
duties than just pulling and scanning, and don't need to be exported.
2015-03-22 08:46:43 +01:00
Audrius Butkevicius
dea7c77055 Rebuild assets 2015-03-22 08:46:41 +01:00
Audrius Butkevicius
765dda6ad7 Fix build 2015-03-22 08:46:26 +01:00
Jakob Borg
28702a1c9d Add /rest/filestatus 2015-03-22 08:46:26 +01:00
Jakob Borg
40d1226612 MPLv2 2015-03-22 08:46:25 +01:00
Johan Vromans
effe8ce8a9 Suppress 'Last File Received' if a node is folder master (fixes #1472) 2015-03-22 08:46:24 +01:00
Jakob Borg
4c3ba24826 Add sciurius 2015-03-22 08:45:42 +01:00
324 changed files with 10555 additions and 40280 deletions

9
.gitattributes vendored
View File

@@ -1,9 +0,0 @@
# Text files use LF line endings in this repository
* text=auto
# Except the dependencies, which we leave alone
Godeps/** -text=auto
# Diffs on these files are meaningless
gui.files.go -diff
*.svg -diff

4
.gitignore vendored
View File

@@ -3,6 +3,8 @@ syncthing.exe
*.tar.gz
*.zip
*.asc
*.sublime*
.idea/
.jshintrc
coverage.out
files/pidx
@@ -10,7 +12,7 @@ bin
perfstats*.csv
coverage.xml
!gui/scripts/syncthing
.DS_Store
syncthing.md5
syncthing.exe.md5
RELEASE
deb

23
AUTHORS
View File

@@ -3,50 +3,36 @@
Aaron Bieber <qbit@deftly.net>
Alexander Graf <register-github@alex-graf.de>
Andrew Dunham <andrew@du.nham.ca>
Antony Male <antony.male@gmail.com>
Arthur Axel fREW Schmidt <frew@afoolishmanifesto.com> <frioux@gmail.com>
Audrius Butkevicius <audrius.butkevicius@gmail.com>
Bart De Vries <devriesb@gmail.com>
Arthur Axel fREW Schmidt <frew@afoolishmanifesto.com> <frioux@gmail.com>
Ben Curthoys <ben@bencurthoys.com>
Ben Schulz <ueomkail@gmail.com> <uok@users.noreply.github.com>
Ben Sidhom <bsidhom@gmail.com>
Brandon Philips <brandon@ifup.org>
Brendan Long <self@brendanlong.com>
Brian R. Becker <brbecker@gmail.com>
Caleb Callaway <enlightened.despot@gmail.com>
Carsten Hagemann <moter8@gmail.com>
Cathryne Linenweaver <cathryne.linenweaver@gmail.com> <Cathryne@users.noreply.github.com>
Chris Howie <me@chrishowie.com>
Chris Joel <chris@scriptolo.gy>
Colin Kennedy <moshen.colin@gmail.com>
Daniel Bergmann <dan.arne.bergmann@gmail.com> <brgmnn@users.noreply.github.com>
Daniel Martí <mvdan@mvdan.cc>
Denis A. <denisva@gmail.com>
Dennis Wilson <dw@risu.io>
Dominik Heidler <dominik@heidler.eu>
Elias Jarlebring <jarlebring@gmail.com>
Emil Hessman <emil@hessman.se>
Erik Meitner <e.meitner@willystreet.coop>
Federico Castagnini <federico.castagnini@gmail.com>
Felix Ableitner <me@nutomic.com>
Felix Unterpaintner <bigbear2nd@gmail.com>
Francois-Xavier Gsell <fxgsell@gmail.com>
Frank Isemann <frank@isemann.name>
Gilli Sigurdsson <gilli@vx.is>
Jacek Szafarkiewicz <szafar@linux.pl>
Jakob Borg <jakob@nym.se>
James Patterson <jamespatterson@operamail.com> <jpjp@users.noreply.github.com>
Jaroslav Malec <dzardacz@gmail.com>
Jens Diemer <github.com@jensdiemer.de> <git@jensdiemer.de>
Jochen Voss <voss@seehuhn.de>
Johan Vromans <jvromans@squirrel.nl>
Karol Różycki <rozycki.karol@gmail.com>
Ken'ichi Kamada <kamada@nanohz.org>
Kamada Ken'ichi <kamada@nanohz.org>
Lode Hoste <zillode@zillode.be>
Lord Landon Agahnim <lordlandon@gmail.com>
Marc Laporte <marc@marclaporte.com> <marc@laporte.name>
Marc Pujol <kilburn@la3.org>
Marcin Dziadus <dziadus.marcin@gmail.com>
Marc Laporte <marc@marclaporte.com>
Marc Pujol <kilburn@la3.org>
Michael Jephcote <rewt0r@gmx.com> <Rewt0r@users.noreply.github.com>
Michael Tilli <pyfisch@gmail.com>
Pascal Jungblut <github@pascalj.com> <mail@pascal-jungblut.com>
@@ -55,7 +41,6 @@ Philippe Schommers <philippe@schommers.be>
Phill Luby <phill.luby@newredo.com>
Piotr Bejda <piotrb10@gmail.com>
Ryan Sullivan <kayoticsully@gmail.com>
Sergey Mishin <ralder@yandex.ru>
Stefan Tatschner <stefan@sevenbyte.org>
Tim Abell <tim@timwise.co.uk>
Tobias Nygren <tnn@nygren.pp.se>

View File

@@ -32,15 +32,64 @@ latest info on Transifex.
## Contributing Code
Every contribution is welcome. If you want to contribute but are unsure
where to start, any open issues are fair game! See the [Contribution
Guidelines](http://docs.syncthing.net/dev/contributing.html) for the full
story on committing code.
where to start, any open issues are fair game! Be prepared for a
[certain amount of review](https://github.com/syncthing/syncthing/wiki/FAQ#why-are-you-being-so-hard-on-my-pull-request);
it's all in the name of quality. :) Following the points below will make this
a smoother process.
## Contributing Documentation
Individuals making significant and valuable contributions are given
commit-access to the project. If you make a significant contribution and
are not considered for commit-access, please contact any of the
Syncthing core team members.
Updates to the [documentation site](http://docs.syncthing.net/) can be
made as pull requests on the [documentation
repository](https://github.com/syncthing/docs).
All nontrivial contributions should go through the pull request
mechanism for internal review. Determining what is "nontrivial" is left
at the discretion of the contributor.
### Authorship
All code authors are listed in the AUTHORS file. Commits must be made
with the same name and email as listed in the AUTHORS file. To
accomplish this, ensure that your git configuration is set correctly
prior to making your first commit;
$ git config --global user.name "Jane Doe"
$ git config --global user.email janedoe@example.com
You must be reachable on the given email address. If you do not wish to
use your real name for whatever reason, using a nickname or pseudonym is
perfectly acceptable.
### Core Team
The Syncthing core team currently consists of the following members;
- Jakob Borg (@calmh)
- Audrius Butkevicius (@AudriusButkevicius)
## Coding Style
- Follow the conventions laid out in [Effective Go](https://golang.org/doc/effective_go.html)
as much as makes sense.
- All text files use Unix line endings.
- Each commit should be `go fmt` clean.
- The commit message subject should be a single short sentence
describing the change, starting with a capital letter.
- Commits that resolve an existing issue must include the issue number
as `(fixes #123)` at the end of the commit message subject.
- Imports are grouped per `goimports` standard; that is, standard
library first, then third party libraries after a blank line.
- A contribution solving a single issue or introducing a single new
feature should probably be a single commit based on the current
`master` branch. You may be asked to "rebase" or "squash" your pull
request to make sure this is the case, especially if there have been
amendments during review.
## Licensing
@@ -50,3 +99,42 @@ strings which are licensed under the Creative Commons Attribution 4.0
International License. You retain the copyright to code you have
written.
When accepting your first contribution, the maintainer of the project
will ensure that you are added to the AUTHORS file. You are welcome to
add yourself as a separate commit in your first pull request.
## Building
[See the documentation](https://github.com/syncthing/syncthing/wiki/Building)
on how to get started with a build environment.
## Branches
- `master` is the main branch containing good code that will end up in
the next release. You should base your work on it. It won't ever be
rebased or force-pushed to.
- `vx.y` branches exist to make patch releases on otherwise obsolete
minor releases. Should only contain fixes cherry picked from master.
Don't base any work on them.
- Other branches are probably topic branches and may be subject to
rebasing. Don't base any work on them unless you specifically know
otherwise.
## Tags
All releases are tagged semver style as `vx.y.z`. Release tags are
signed by GPG key BCE524C7.
## Tests
Yes please!
## Documentation
[Over here!](https://github.com/syncthing/syncthing/wiki)
## License
MPLv2

32
Godeps/Godeps.json generated
View File

@@ -1,17 +1,17 @@
{
"ImportPath": "github.com/syncthing/syncthing",
"GoVersion": "devel",
"GoVersion": "go1.4",
"Packages": [
"./cmd/..."
],
"Deps": [
{
"ImportPath": "github.com/bkaradzic/go-lz4",
"Rev": "4f7c2045dbd17b802370e2e6022200468abf02ba"
"Rev": "93a831dcee242be64a9cc9803dda84af25932de7"
},
{
"ImportPath": "github.com/calmh/logger",
"Rev": "c96f6a1a8c7b6bf2f4860c667867d90174799eb2"
"Rev": "f50d32b313bec2933a3e1049f7416a29f3413d29"
},
{
"ImportPath": "github.com/calmh/luhn",
@@ -19,31 +19,27 @@
},
{
"ImportPath": "github.com/calmh/xdr",
"Rev": "5f7208e86762911861c94f1849eddbfc0a60cbf0"
},
{
"ImportPath": "github.com/golang/snappy",
"Rev": "0c7f8a7704bfec561913f4df52c832f094ef56f0"
"Rev": "ff948d7666c5e0fd18d398f6278881724d36a90b"
},
{
"ImportPath": "github.com/juju/ratelimit",
"Rev": "772f5c38e468398c4511514f4f6aa9a4185bc0a0"
"Rev": "f9f36d11773655c0485207f0ad30dc2655f69d56"
},
{
"ImportPath": "github.com/kardianos/osext",
"Rev": "6e7f843663477789fac7c02def0d0909e969b4e5"
"Rev": "91292666f7e40f03185cdd1da7d85633c973eca7"
},
{
"ImportPath": "github.com/syncthing/protocol",
"Rev": "ebcdea63c07327a342f65415bbadc497462b8f1f"
"Rev": "1a4398cc55c8fe82a964097eaf59f2475b020a49"
},
{
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
"Rev": "183614d6b32571e867df4cf086f5480ceefbdfac"
"Rev": "e3f32eb300aa1e514fe8ba58d008da90a062273d"
},
{
"ImportPath": "github.com/thejerf/suture",
"Rev": "fc7aaeabdc43fe41c5328efa1479ffea0b820978"
"ImportPath": "github.com/syndtr/gosnappy/snappy",
"Rev": "ce8acff4829e0c2458a67ead32390ac0a381c862"
},
{
"ImportPath": "github.com/vitrun/qart/coding",
@@ -59,19 +55,19 @@
},
{
"ImportPath": "golang.org/x/crypto/bcrypt",
"Rev": "7d5b0be716b9d6d4269afdaae10032bb296d3cdf"
"Rev": "4ed45ec682102c643324fae5dff8dab085b6c300"
},
{
"ImportPath": "golang.org/x/crypto/blowfish",
"Rev": "7d5b0be716b9d6d4269afdaae10032bb296d3cdf"
"Rev": "4ed45ec682102c643324fae5dff8dab085b6c300"
},
{
"ImportPath": "golang.org/x/text/transform",
"Rev": "3eb7007b740b66a77f3c85f2660a0240b284115a"
"Rev": "c980adc4a823548817b9c47d38c6ca6b7d7d8b6a"
},
{
"ImportPath": "golang.org/x/text/unicode/norm",
"Rev": "3eb7007b740b66a77f3c85f2660a0240b284115a"
"Rev": "c980adc4a823548817b9c47d38c6ca6b7d7d8b6a"
}
]
}

View File

@@ -1,23 +0,0 @@
// +build gofuzz
package lz4
import "encoding/binary"
func Fuzz(data []byte) int {
if len(data) < 4 {
return 0
}
ln := binary.LittleEndian.Uint32(data)
if ln > (1 << 21) {
return 0
}
if _, err := Decode(nil, data); err != nil {
return 0
}
return 1
}

View File

@@ -141,7 +141,7 @@ func Decode(dst, src []byte) ([]byte, error) {
length += ln
}
if int(d.spos+length) > len(d.src) || int(d.dpos+length) > len(d.dst) {
if int(d.spos+length) > len(d.src) {
return nil, ErrCorrupt
}
@@ -179,12 +179,7 @@ func Decode(dst, src []byte) ([]byte, error) {
}
literal := d.dpos - d.ref
if literal < 4 {
if int(d.dpos+4) > len(d.dst) {
return nil, ErrCorrupt
}
d.cp(4, decr[literal])
} else {
length += 4

View File

@@ -25,10 +25,8 @@
package lz4
import (
"encoding/binary"
"errors"
)
import "encoding/binary"
import "errors"
const (
minMatch = 4

View File

@@ -6,7 +6,6 @@ package logger
import (
"fmt"
"io/ioutil"
"log"
"os"
"strings"
@@ -17,7 +16,6 @@ type LogLevel int
const (
LevelDebug LogLevel = iota
LevelVerbose
LevelInfo
LevelOK
LevelWarn
@@ -38,13 +36,6 @@ type Logger struct {
var DefaultLogger = New()
func New() *Logger {
if os.Getenv("LOGGER_DISCARD") != "" {
// Hack to completely disable logging, for example when running benchmarks.
return &Logger{
logger: log.New(ioutil.Discard, "", 0),
}
}
return &Logger{
logger: log.New(os.Stdout, "", log.Ltime),
}
@@ -92,24 +83,6 @@ func (l *Logger) Debugf(format string, vals ...interface{}) {
l.callHandlers(LevelDebug, s)
}
// Infoln logs a line with a VERBOSE prefix.
func (l *Logger) Verboseln(vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintln(vals...)
l.logger.Output(2, "VERBOSE: "+s)
l.callHandlers(LevelVerbose, s)
}
// Infof logs a formatted line with a VERBOSE prefix.
func (l *Logger) Verbosef(format string, vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintf(format, vals...)
l.logger.Output(2, "VERBOSE: "+s)
l.callHandlers(LevelVerbose, s)
}
// Infoln logs a line with an INFO prefix.
func (l *Logger) Infoln(vals ...interface{}) {
l.mut.Lock()

View File

@@ -67,7 +67,7 @@ func BenchmarkThisEncode(b *testing.B) {
func BenchmarkThisEncoder(b *testing.B) {
w := xdr.NewWriter(ioutil.Discard)
for i := 0; i < b.N; i++ {
_, err := s.EncodeXDRInto(w)
_, err := s.encodeXDR(w)
if err != nil {
b.Fatal(err)
}
@@ -108,7 +108,7 @@ func BenchmarkThisDecoder(b *testing.B) {
r := xdr.NewReader(rr)
var t XDRBenchStruct
for i := 0; i < b.N; i++ {
err := t.DecodeXDRFrom(r)
err := t.decodeXDR(r)
if err != nil {
b.Fatal(err)
}

View File

@@ -26,9 +26,7 @@ XDRBenchStruct Structure:
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| 0x0000 | I3 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ uint8 Structure \
/ /
| uint8 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Length of Bs0 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@@ -71,7 +69,7 @@ struct XDRBenchStruct {
func (o XDRBenchStruct) EncodeXDR(w io.Writer) (int, error) {
var xw = xdr.NewWriter(w)
return o.EncodeXDRInto(xw)
return o.encodeXDR(xw)
}
func (o XDRBenchStruct) MarshalXDR() ([]byte, error) {
@@ -89,11 +87,11 @@ func (o XDRBenchStruct) MustMarshalXDR() []byte {
func (o XDRBenchStruct) AppendXDR(bs []byte) ([]byte, error) {
var aw = xdr.AppendWriter(bs)
var xw = xdr.NewWriter(&aw)
_, err := o.EncodeXDRInto(xw)
_, err := o.encodeXDR(xw)
return []byte(aw), err
}
func (o XDRBenchStruct) EncodeXDRInto(xw *xdr.Writer) (int, error) {
func (o XDRBenchStruct) encodeXDR(xw *xdr.Writer) (int, error) {
xw.WriteUint64(o.I1)
xw.WriteUint32(o.I2)
xw.WriteUint16(o.I3)
@@ -113,16 +111,16 @@ func (o XDRBenchStruct) EncodeXDRInto(xw *xdr.Writer) (int, error) {
func (o *XDRBenchStruct) DecodeXDR(r io.Reader) error {
xr := xdr.NewReader(r)
return o.DecodeXDRFrom(xr)
return o.decodeXDR(xr)
}
func (o *XDRBenchStruct) UnmarshalXDR(bs []byte) error {
var br = bytes.NewReader(bs)
var xr = xdr.NewReader(br)
return o.DecodeXDRFrom(xr)
return o.decodeXDR(xr)
}
func (o *XDRBenchStruct) DecodeXDRFrom(xr *xdr.Reader) error {
func (o *XDRBenchStruct) decodeXDR(xr *xdr.Reader) error {
o.I1 = xr.ReadUint64()
o.I2 = xr.ReadUint32()
o.I3 = xr.ReadUint16()
@@ -157,7 +155,7 @@ struct repeatReader {
func (o repeatReader) EncodeXDR(w io.Writer) (int, error) {
var xw = xdr.NewWriter(w)
return o.EncodeXDRInto(xw)
return o.encodeXDR(xw)
}
func (o repeatReader) MarshalXDR() ([]byte, error) {
@@ -175,27 +173,27 @@ func (o repeatReader) MustMarshalXDR() []byte {
func (o repeatReader) AppendXDR(bs []byte) ([]byte, error) {
var aw = xdr.AppendWriter(bs)
var xw = xdr.NewWriter(&aw)
_, err := o.EncodeXDRInto(xw)
_, err := o.encodeXDR(xw)
return []byte(aw), err
}
func (o repeatReader) EncodeXDRInto(xw *xdr.Writer) (int, error) {
func (o repeatReader) encodeXDR(xw *xdr.Writer) (int, error) {
xw.WriteBytes(o.data)
return xw.Tot(), xw.Error()
}
func (o *repeatReader) DecodeXDR(r io.Reader) error {
xr := xdr.NewReader(r)
return o.DecodeXDRFrom(xr)
return o.decodeXDR(xr)
}
func (o *repeatReader) UnmarshalXDR(bs []byte) error {
var br = bytes.NewReader(bs)
var xr = xdr.NewReader(br)
return o.DecodeXDRFrom(xr)
return o.decodeXDR(xr)
}
func (o *repeatReader) DecodeXDRFrom(xr *xdr.Reader) error {
func (o *repeatReader) decodeXDR(xr *xdr.Reader) error {
o.data = xr.ReadBytes()
return xr.Error()
}

View File

@@ -52,7 +52,7 @@ import (
var encodeTpl = template.Must(template.New("encoder").Parse(`
func (o {{.TypeName}}) EncodeXDR(w io.Writer) (int, error) {
var xw = xdr.NewWriter(w)
return o.EncodeXDRInto(xw)
return o.encodeXDR(xw)
}//+n
func (o {{.TypeName}}) MarshalXDR() ([]byte, error) {
@@ -70,11 +70,11 @@ func (o {{.TypeName}}) MustMarshalXDR() []byte {
func (o {{.TypeName}}) AppendXDR(bs []byte) ([]byte, error) {
var aw = xdr.AppendWriter(bs)
var xw = xdr.NewWriter(&aw)
_, err := o.EncodeXDRInto(xw)
_, err := o.encodeXDR(xw)
return []byte(aw), err
}//+n
func (o {{.TypeName}}) EncodeXDRInto(xw *xdr.Writer) (int, error) {
func (o {{.TypeName}}) encodeXDR(xw *xdr.Writer) (int, error) {
{{range $fieldInfo := .Fields}}
{{if not $fieldInfo.IsSlice}}
{{if ne $fieldInfo.Convert ""}}
@@ -87,7 +87,7 @@ func (o {{.TypeName}}) EncodeXDRInto(xw *xdr.Writer) (int, error) {
{{end}}
xw.Write{{$fieldInfo.Encoder}}(o.{{$fieldInfo.Name}})
{{else}}
_, err := o.{{$fieldInfo.Name}}.EncodeXDRInto(xw)
_, err := o.{{$fieldInfo.Name}}.encodeXDR(xw)
if err != nil {
return xw.Tot(), err
}
@@ -105,7 +105,7 @@ func (o {{.TypeName}}) EncodeXDRInto(xw *xdr.Writer) (int, error) {
{{else if $fieldInfo.IsBasic}}
xw.Write{{$fieldInfo.Encoder}}(o.{{$fieldInfo.Name}}[i])
{{else}}
_, err := o.{{$fieldInfo.Name}}[i].EncodeXDRInto(xw)
_, err := o.{{$fieldInfo.Name}}[i].encodeXDR(xw)
if err != nil {
return xw.Tot(), err
}
@@ -118,16 +118,16 @@ func (o {{.TypeName}}) EncodeXDRInto(xw *xdr.Writer) (int, error) {
func (o *{{.TypeName}}) DecodeXDR(r io.Reader) error {
xr := xdr.NewReader(r)
return o.DecodeXDRFrom(xr)
return o.decodeXDR(xr)
}//+n
func (o *{{.TypeName}}) UnmarshalXDR(bs []byte) error {
var br = bytes.NewReader(bs)
var xr = xdr.NewReader(br)
return o.DecodeXDRFrom(xr)
return o.decodeXDR(xr)
}//+n
func (o *{{.TypeName}}) DecodeXDRFrom(xr *xdr.Reader) error {
func (o *{{.TypeName}}) decodeXDR(xr *xdr.Reader) error {
{{range $fieldInfo := .Fields}}
{{if not $fieldInfo.IsSlice}}
{{if ne $fieldInfo.Convert ""}}
@@ -139,13 +139,10 @@ func (o *{{.TypeName}}) DecodeXDRFrom(xr *xdr.Reader) error {
o.{{$fieldInfo.Name}} = xr.Read{{$fieldInfo.Encoder}}()
{{end}}
{{else}}
(&o.{{$fieldInfo.Name}}).DecodeXDRFrom(xr)
(&o.{{$fieldInfo.Name}}).decodeXDR(xr)
{{end}}
{{else}}
_{{$fieldInfo.Name}}Size := int(xr.ReadUint32())
if _{{$fieldInfo.Name}}Size < 0 {
return xdr.ElementSizeExceeded("{{$fieldInfo.Name}}", _{{$fieldInfo.Name}}Size, {{$fieldInfo.Max}})
}
{{if ge $fieldInfo.Max 1}}
if _{{$fieldInfo.Name}}Size > {{$fieldInfo.Max}} {
return xdr.ElementSizeExceeded("{{$fieldInfo.Name}}", _{{$fieldInfo.Name}}Size, {{$fieldInfo.Max}})
@@ -158,7 +155,7 @@ func (o *{{.TypeName}}) DecodeXDRFrom(xr *xdr.Reader) error {
{{else if $fieldInfo.IsBasic}}
o.{{$fieldInfo.Name}}[i] = xr.Read{{$fieldInfo.Encoder}}()
{{else}}
(&o.{{$fieldInfo.Name}}[i]).DecodeXDRFrom(xr)
(&o.{{$fieldInfo.Name}}[i]).decodeXDR(xr)
{{end}}
}
{{end}}
@@ -260,18 +257,12 @@ func handleStruct(t *ast.StructType) []fieldInfo {
} else {
f = fieldInfo{
Name: fn,
IsBasic: false,
IsSlice: true,
FieldType: tn,
Max: max,
}
}
case *ast.SelectorExpr:
f = fieldInfo{
Name: fn,
FieldType: ft.Sel.Name,
Max: max,
}
}
fs = append(fs, f)
@@ -319,9 +310,10 @@ func generateDiagram(output io.Writer, s structInfo) {
for _, f := range fs {
tn := f.FieldType
sl := f.IsSlice
name := uncamelize(f.Name)
if f.IsSlice {
if sl {
fmt.Fprintf(output, "| %s |\n", center("Number of "+name, 61))
fmt.Fprintln(output, line)
}
@@ -348,16 +340,13 @@ func generateDiagram(output io.Writer, s structInfo) {
fmt.Fprintf(output, "/ %61s /\n", "")
fmt.Fprintln(output, line)
default:
if f.IsSlice {
if sl {
tn = "Zero or more " + tn + " Structures"
fmt.Fprintf(output, "/ %s /\n", center("", 61))
fmt.Fprintf(output, "\\ %s \\\n", center(tn, 61))
fmt.Fprintf(output, "/ %s /\n", center("", 61))
} else {
tn = tn + " Structure"
fmt.Fprintf(output, "/ %s /\n", center("", 61))
fmt.Fprintf(output, "\\ %s \\\n", center(tn, 61))
fmt.Fprintf(output, "/ %s /\n", center("", 61))
fmt.Fprintf(output, "| %s |\n", center(tn, 61))
}
fmt.Fprintln(output, line)
}

View File

@@ -32,11 +32,11 @@ type TestStruct struct {
type Opaque [32]byte
func (u *Opaque) EncodeXDRInto(w *xdr.Writer) (int, error) {
func (u *Opaque) encodeXDR(w *xdr.Writer) (int, error) {
return w.WriteRaw(u[:])
}
func (u *Opaque) DecodeXDRFrom(r *xdr.Reader) (int, error) {
func (u *Opaque) decodeXDR(r *xdr.Reader) (int, error) {
return r.ReadRaw(u[:])
}

View File

@@ -18,23 +18,17 @@ TestStruct Structure:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ int Structure \
/ /
| int |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ int8 Structure \
/ /
| int8 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ uint8 Structure \
/ /
| uint8 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| 0x0000 | I16 |
| int16 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| 0x0000 | UI16 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| I32 |
| int32 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| UI32 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@@ -58,9 +52,7 @@ TestStruct Structure:
\ S (variable length) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ Opaque Structure \
/ /
| Opaque |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Number of SS |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@@ -76,9 +68,9 @@ struct TestStruct {
int I;
int8 I8;
uint8 UI8;
int I16;
int16 I16;
unsigned int UI16;
int I32;
int32 I32;
unsigned int UI32;
hyper I64;
unsigned hyper UI64;
@@ -92,7 +84,7 @@ struct TestStruct {
func (o TestStruct) EncodeXDR(w io.Writer) (int, error) {
var xw = xdr.NewWriter(w)
return o.EncodeXDRInto(xw)
return o.encodeXDR(xw)
}
func (o TestStruct) MarshalXDR() ([]byte, error) {
@@ -110,11 +102,11 @@ func (o TestStruct) MustMarshalXDR() []byte {
func (o TestStruct) AppendXDR(bs []byte) ([]byte, error) {
var aw = xdr.AppendWriter(bs)
var xw = xdr.NewWriter(&aw)
_, err := o.EncodeXDRInto(xw)
_, err := o.encodeXDR(xw)
return []byte(aw), err
}
func (o TestStruct) EncodeXDRInto(xw *xdr.Writer) (int, error) {
func (o TestStruct) encodeXDR(xw *xdr.Writer) (int, error) {
xw.WriteUint64(uint64(o.I))
xw.WriteUint8(uint8(o.I8))
xw.WriteUint8(o.UI8)
@@ -132,7 +124,7 @@ func (o TestStruct) EncodeXDRInto(xw *xdr.Writer) (int, error) {
return xw.Tot(), xdr.ElementSizeExceeded("S", l, 1024)
}
xw.WriteString(o.S)
_, err := o.C.EncodeXDRInto(xw)
_, err := o.C.encodeXDR(xw)
if err != nil {
return xw.Tot(), err
}
@@ -148,16 +140,16 @@ func (o TestStruct) EncodeXDRInto(xw *xdr.Writer) (int, error) {
func (o *TestStruct) DecodeXDR(r io.Reader) error {
xr := xdr.NewReader(r)
return o.DecodeXDRFrom(xr)
return o.decodeXDR(xr)
}
func (o *TestStruct) UnmarshalXDR(bs []byte) error {
var br = bytes.NewReader(bs)
var xr = xdr.NewReader(br)
return o.DecodeXDRFrom(xr)
return o.decodeXDR(xr)
}
func (o *TestStruct) DecodeXDRFrom(xr *xdr.Reader) error {
func (o *TestStruct) decodeXDR(xr *xdr.Reader) error {
o.I = int(xr.ReadUint64())
o.I8 = int8(xr.ReadUint8())
o.UI8 = xr.ReadUint8()
@@ -169,11 +161,8 @@ func (o *TestStruct) DecodeXDRFrom(xr *xdr.Reader) error {
o.UI64 = xr.ReadUint64()
o.BS = xr.ReadBytesMax(1024)
o.S = xr.ReadStringMax(1024)
(&o.C).DecodeXDRFrom(xr)
(&o.C).decodeXDR(xr)
_SSSize := int(xr.ReadUint32())
if _SSSize < 0 {
return xdr.ElementSizeExceeded("SS", _SSSize, 1024)
}
if _SSSize > 1024 {
return xdr.ElementSizeExceeded("SS", _SSSize, 1024)
}

View File

@@ -68,8 +68,7 @@ func (r *Reader) ReadBytesMaxInto(max int, dst []byte) []byte {
if r.err != nil {
return nil
}
if l < 0 || max > 0 && l > max {
// l may be negative on 32 bit builds
if max > 0 && l > max {
r.err = ElementSizeExceeded("bytes field", l, max)
return nil
}

View File

@@ -1,14 +0,0 @@
# This is the official list of Snappy-Go authors for copyright purposes.
# This file is distinct from the CONTRIBUTORS files.
# See the latter for an explanation.
# Names should be added to this file as
# Name or Organization <email address>
# The email address is not required for organizations.
# Please keep the list sorted.
Damian Gryski <dgryski@gmail.com>
Google Inc.
Jan Mercl <0xjnml@gmail.com>
Sebastien Binet <seb.binet@gmail.com>

View File

@@ -1,36 +0,0 @@
# This is the official list of people who can contribute
# (and typically have contributed) code to the Snappy-Go repository.
# The AUTHORS file lists the copyright holders; this file
# lists people. For example, Google employees are listed here
# but not in AUTHORS, because Google holds the copyright.
#
# The submission process automatically checks to make sure
# that people submitting code are listed in this file (by email address).
#
# Names should be added to this file only after verifying that
# the individual or the individual's organization has agreed to
# the appropriate Contributor License Agreement, found here:
#
# http://code.google.com/legal/individual-cla-v1.0.html
# http://code.google.com/legal/corporate-cla-v1.0.html
#
# The agreement for individuals can be filled out on the web.
#
# When adding J Random Contributor's name to this file,
# either J's name or J's organization's name should be
# added to the AUTHORS file, depending on whether the
# individual or corporate CLA was used.
# Names should be added to this file like so:
# Name <email address>
# Please keep the list sorted.
Damian Gryski <dgryski@gmail.com>
Jan Mercl <0xjnml@gmail.com>
Kai Backman <kaib@golang.org>
Marc-Antoine Ruel <maruel@chromium.org>
Nigel Tao <nigeltao@golang.org>
Rob Pike <r@golang.org>
Russ Cox <rsc@golang.org>
Sebastien Binet <seb.binet@gmail.com>

View File

@@ -1,27 +0,0 @@
Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -1,7 +0,0 @@
The Snappy compression format in the Go programming language.
To download and install from source:
$ go get github.com/golang/snappy
Unless otherwise noted, the Snappy-Go source files are distributed
under the BSD-style license found in the LICENSE file.

View File

@@ -1,290 +0,0 @@
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package snappy
import (
"encoding/binary"
"errors"
"io"
)
var (
// ErrCorrupt reports that the input is invalid.
ErrCorrupt = errors.New("snappy: corrupt input")
// ErrUnsupported reports that the input isn't supported.
ErrUnsupported = errors.New("snappy: unsupported input")
)
// DecodedLen returns the length of the decoded block.
func DecodedLen(src []byte) (int, error) {
v, _, err := decodedLen(src)
return v, err
}
// decodedLen returns the length of the decoded block and the number of bytes
// that the length header occupied.
func decodedLen(src []byte) (blockLen, headerLen int, err error) {
v, n := binary.Uvarint(src)
if n <= 0 {
return 0, 0, ErrCorrupt
}
if uint64(int(v)) != v {
return 0, 0, errors.New("snappy: decoded block is too large")
}
return int(v), n, nil
}
// Decode returns the decoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire decoded block.
// Otherwise, a newly allocated slice will be returned.
// It is valid to pass a nil dst.
func Decode(dst, src []byte) ([]byte, error) {
dLen, s, err := decodedLen(src)
if err != nil {
return nil, err
}
if len(dst) < dLen {
dst = make([]byte, dLen)
}
var d, offset, length int
for s < len(src) {
switch src[s] & 0x03 {
case tagLiteral:
x := uint(src[s] >> 2)
switch {
case x < 60:
s++
case x == 60:
s += 2
if s > len(src) {
return nil, ErrCorrupt
}
x = uint(src[s-1])
case x == 61:
s += 3
if s > len(src) {
return nil, ErrCorrupt
}
x = uint(src[s-2]) | uint(src[s-1])<<8
case x == 62:
s += 4
if s > len(src) {
return nil, ErrCorrupt
}
x = uint(src[s-3]) | uint(src[s-2])<<8 | uint(src[s-1])<<16
case x == 63:
s += 5
if s > len(src) {
return nil, ErrCorrupt
}
x = uint(src[s-4]) | uint(src[s-3])<<8 | uint(src[s-2])<<16 | uint(src[s-1])<<24
}
length = int(x + 1)
if length <= 0 {
return nil, errors.New("snappy: unsupported literal length")
}
if length > len(dst)-d || length > len(src)-s {
return nil, ErrCorrupt
}
copy(dst[d:], src[s:s+length])
d += length
s += length
continue
case tagCopy1:
s += 2
if s > len(src) {
return nil, ErrCorrupt
}
length = 4 + int(src[s-2])>>2&0x7
offset = int(src[s-2])&0xe0<<3 | int(src[s-1])
case tagCopy2:
s += 3
if s > len(src) {
return nil, ErrCorrupt
}
length = 1 + int(src[s-3])>>2
offset = int(src[s-2]) | int(src[s-1])<<8
case tagCopy4:
return nil, errors.New("snappy: unsupported COPY_4 tag")
}
end := d + length
if offset > d || end > len(dst) {
return nil, ErrCorrupt
}
for ; d < end; d++ {
dst[d] = dst[d-offset]
}
}
if d != dLen {
return nil, ErrCorrupt
}
return dst[:d], nil
}
// NewReader returns a new Reader that decompresses from r, using the framing
// format described at
// https://github.com/google/snappy/blob/master/framing_format.txt
func NewReader(r io.Reader) *Reader {
return &Reader{
r: r,
decoded: make([]byte, maxUncompressedChunkLen),
buf: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)+checksumSize),
}
}
// Reader is an io.Reader than can read Snappy-compressed bytes.
type Reader struct {
r io.Reader
err error
decoded []byte
buf []byte
// decoded[i:j] contains decoded bytes that have not yet been passed on.
i, j int
readHeader bool
}
// Reset discards any buffered data, resets all state, and switches the Snappy
// reader to read from r. This permits reusing a Reader rather than allocating
// a new one.
func (r *Reader) Reset(reader io.Reader) {
r.r = reader
r.err = nil
r.i = 0
r.j = 0
r.readHeader = false
}
func (r *Reader) readFull(p []byte) (ok bool) {
if _, r.err = io.ReadFull(r.r, p); r.err != nil {
if r.err == io.ErrUnexpectedEOF {
r.err = ErrCorrupt
}
return false
}
return true
}
// Read satisfies the io.Reader interface.
func (r *Reader) Read(p []byte) (int, error) {
if r.err != nil {
return 0, r.err
}
for {
if r.i < r.j {
n := copy(p, r.decoded[r.i:r.j])
r.i += n
return n, nil
}
if !r.readFull(r.buf[:4]) {
return 0, r.err
}
chunkType := r.buf[0]
if !r.readHeader {
if chunkType != chunkTypeStreamIdentifier {
r.err = ErrCorrupt
return 0, r.err
}
r.readHeader = true
}
chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
if chunkLen > len(r.buf) {
r.err = ErrUnsupported
return 0, r.err
}
// The chunk types are specified at
// https://github.com/google/snappy/blob/master/framing_format.txt
switch chunkType {
case chunkTypeCompressedData:
// Section 4.2. Compressed data (chunk type 0x00).
if chunkLen < checksumSize {
r.err = ErrCorrupt
return 0, r.err
}
buf := r.buf[:chunkLen]
if !r.readFull(buf) {
return 0, r.err
}
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
buf = buf[checksumSize:]
n, err := DecodedLen(buf)
if err != nil {
r.err = err
return 0, r.err
}
if n > len(r.decoded) {
r.err = ErrCorrupt
return 0, r.err
}
if _, err := Decode(r.decoded, buf); err != nil {
r.err = err
return 0, r.err
}
if crc(r.decoded[:n]) != checksum {
r.err = ErrCorrupt
return 0, r.err
}
r.i, r.j = 0, n
continue
case chunkTypeUncompressedData:
// Section 4.3. Uncompressed data (chunk type 0x01).
if chunkLen < checksumSize {
r.err = ErrCorrupt
return 0, r.err
}
buf := r.buf[:checksumSize]
if !r.readFull(buf) {
return 0, r.err
}
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
// Read directly into r.decoded instead of via r.buf.
n := chunkLen - checksumSize
if !r.readFull(r.decoded[:n]) {
return 0, r.err
}
if crc(r.decoded[:n]) != checksum {
r.err = ErrCorrupt
return 0, r.err
}
r.i, r.j = 0, n
continue
case chunkTypeStreamIdentifier:
// Section 4.1. Stream identifier (chunk type 0xff).
if chunkLen != len(magicBody) {
r.err = ErrCorrupt
return 0, r.err
}
if !r.readFull(r.buf[:len(magicBody)]) {
return 0, r.err
}
for i := 0; i < len(magicBody); i++ {
if r.buf[i] != magicBody[i] {
r.err = ErrCorrupt
return 0, r.err
}
}
continue
}
if chunkType <= 0x7f {
// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
r.err = ErrUnsupported
return 0, r.err
}
// Section 4.4 Padding (chunk type 0xfe).
// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
if !r.readFull(r.buf[:chunkLen]) {
return 0, r.err
}
}
}

View File

@@ -1,9 +1,3 @@
All files in this repository are licensed as follows. If you contribute
to this repository, it is assumed that you license your contribution
under the same license unless you state otherwise.
All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file.
This software is licensed under the LGPLv3, included below.
As a special exception to the GNU Lesser General Public License version 3

View File

@@ -20,7 +20,7 @@ token in the bucket represents one byte.
```go
func Writer(w io.Writer, bucket *Bucket) io.Writer
```
Writer returns a writer that is rate limited by the given token bucket. Each
Writer returns a reader that is rate limited by the given token bucket. Each
token in the bucket represents one byte.
#### type Bucket

View File

@@ -2,8 +2,7 @@
// Licensed under the LGPLv3 with static-linking exception.
// See LICENCE file for details.
// The ratelimit package provides an efficient token bucket implementation
// that can be used to limit the rate of arbitrary things.
// The ratelimit package provides an efficient token bucket implementation.
// See http://en.wikipedia.org/wiki/Token_bucket.
package ratelimit
@@ -11,7 +10,6 @@ import (
"strconv"
"sync"
"time"
"math"
)
// Bucket represents a token bucket that fills at a predetermined rate.
@@ -57,7 +55,7 @@ func NewBucketWithRate(rate float64, capacity int64) *Bucket {
continue
}
tb := NewBucketWithQuantum(fillInterval, capacity, quantum)
if diff := math.Abs(tb.Rate() - rate); diff/rate <= rateMargin {
if diff := abs(tb.Rate() - rate); diff/rate <= rateMargin {
return tb
}
}
@@ -219,3 +217,10 @@ func (tb *Bucket) adjust(now time.Time) (currentTick int64) {
tb.availTick = currentTick
return
}
func abs(f float64) float64 {
if f < 0 {
return -f
}
return f
}

View File

@@ -4,9 +4,7 @@
There is sometimes utility in finding the current executable file
that is running. This can be used for upgrading the current executable
or finding resources located relative to the executable file. Both
working directory and the os.Args[0] value are arbitrary and cannot
be relied on; os.Args[0] can be "faked".
or finding resources located relative to the executable file.
Multi-platform and supports:
* Linux

View File

@@ -16,12 +16,12 @@ func Executable() (string, error) {
}
// Returns same path as Executable, returns just the folder
// path. Excludes the executable name and any trailing slash.
// path. Excludes the executable name.
func ExecutableFolder() (string, error) {
p, err := Executable()
if err != nil {
return "", err
}
return filepath.Dir(p), nil
folder, _ := filepath.Split(p)
return folder, nil
}

View File

@@ -11,20 +11,12 @@ import (
"fmt"
"os"
"runtime"
"strings"
)
func executable() (string, error) {
switch runtime.GOOS {
case "linux":
const deletedTag = " (deleted)"
execpath, err := os.Readlink("/proc/self/exe")
if err != nil {
return execpath, err
}
execpath = strings.TrimSuffix(execpath, deletedTag)
execpath = strings.TrimPrefix(execpath, deletedTag)
return execpath, nil
return os.Readlink("/proc/self/exe")
case "netbsd":
return os.Readlink("/proc/curproc/exe")
case "openbsd", "dragonfly":

View File

@@ -7,65 +7,35 @@
package osext
import (
"bytes"
"fmt"
"io"
"os"
"os/exec"
oexec "os/exec"
"path/filepath"
"runtime"
"testing"
)
const (
executableEnvVar = "OSTEST_OUTPUT_EXECUTABLE"
const execPath_EnvVar = "OSTEST_OUTPUT_EXECPATH"
executableEnvValueMatch = "match"
executableEnvValueDelete = "delete"
)
func TestPrintExecutable(t *testing.T) {
ef, err := Executable()
if err != nil {
t.Fatalf("Executable failed: %v", err)
}
t.Log("Executable:", ef)
}
func TestPrintExecutableFolder(t *testing.T) {
ef, err := ExecutableFolder()
if err != nil {
t.Fatalf("ExecutableFolder failed: %v", err)
}
t.Log("Executable Folder:", ef)
}
func TestExecutableFolder(t *testing.T) {
ef, err := ExecutableFolder()
if err != nil {
t.Fatalf("ExecutableFolder failed: %v", err)
}
if ef[len(ef)-1] == filepath.Separator {
t.Fatal("ExecutableFolder ends with a trailing slash.")
}
}
func TestExecutableMatch(t *testing.T) {
func TestExecPath(t *testing.T) {
ep, err := Executable()
if err != nil {
t.Fatalf("Executable failed: %v", err)
t.Fatalf("ExecPath failed: %v", err)
}
// fullpath to be of the form "dir/prog".
// we want fn to be of the form "dir/prog"
dir := filepath.Dir(filepath.Dir(ep))
fullpath, err := filepath.Rel(dir, ep)
fn, err := filepath.Rel(dir, ep)
if err != nil {
t.Fatalf("filepath.Rel: %v", err)
}
// Make child start with a relative program path.
// Alter argv[0] for child to verify getting real path without argv[0].
cmd := &exec.Cmd{
Dir: dir,
Path: fullpath,
Env: []string{fmt.Sprintf("%s=%s", executableEnvVar, executableEnvValueMatch)},
}
cmd := &oexec.Cmd{}
// make child start with a relative program path
cmd.Dir = dir
cmd.Path = fn
// forge argv[0] for child, so that we can verify we could correctly
// get real path of the executable without influenced by argv[0].
cmd.Args = []string{"-", "-test.run=XXXX"}
cmd.Env = []string{fmt.Sprintf("%s=1", execPath_EnvVar)}
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("exec(self) failed: %v", err)
@@ -79,63 +49,6 @@ func TestExecutableMatch(t *testing.T) {
}
}
func TestExecutableDelete(t *testing.T) {
if runtime.GOOS != "linux" {
t.Skip()
}
fpath, err := Executable()
if err != nil {
t.Fatalf("Executable failed: %v", err)
}
r, w := io.Pipe()
stderrBuff := &bytes.Buffer{}
stdoutBuff := &bytes.Buffer{}
cmd := &exec.Cmd{
Path: fpath,
Env: []string{fmt.Sprintf("%s=%s", executableEnvVar, executableEnvValueDelete)},
Stdin: r,
Stderr: stderrBuff,
Stdout: stdoutBuff,
}
err = cmd.Start()
if err != nil {
t.Fatalf("exec(self) start failed: %v", err)
}
tempPath := fpath + "_copy"
_ = os.Remove(tempPath)
err = copyFile(tempPath, fpath)
if err != nil {
t.Fatalf("copy file failed: %v", err)
}
err = os.Remove(fpath)
if err != nil {
t.Fatalf("remove running test file failed: %v", err)
}
err = os.Rename(tempPath, fpath)
if err != nil {
t.Fatalf("rename copy to previous name failed: %v", err)
}
w.Write([]byte{0})
w.Close()
err = cmd.Wait()
if err != nil {
t.Fatalf("exec wait failed: %v", err)
}
childPath := stderrBuff.String()
if !filepath.IsAbs(childPath) {
t.Fatalf("Child returned %q, want an absolute path", childPath)
}
if !sameFile(childPath, fpath) {
t.Fatalf("Child returned %q, not the same file as %q", childPath, fpath)
}
}
func sameFile(fn1, fn2 string) bool {
fi1, err := os.Stat(fn1)
if err != nil {
@@ -147,30 +60,10 @@ func sameFile(fn1, fn2 string) bool {
}
return os.SameFile(fi1, fi2)
}
func copyFile(dest, src string) error {
df, err := os.Create(dest)
if err != nil {
return err
}
defer df.Close()
sf, err := os.Open(src)
if err != nil {
return err
}
defer sf.Close()
_, err = io.Copy(df, sf)
return err
}
func TestMain(m *testing.M) {
env := os.Getenv(executableEnvVar)
switch env {
case "":
os.Exit(m.Run())
case executableEnvValueMatch:
// First chdir to another path.
func init() {
if e := os.Getenv(execPath_EnvVar); e != "" {
// first chdir to another path
dir := "/"
if runtime.GOOS == "windows" {
dir = filepath.VolumeName(".")
@@ -181,23 +74,6 @@ func TestMain(m *testing.M) {
} else {
fmt.Fprint(os.Stderr, ep)
}
case executableEnvValueDelete:
bb := make([]byte, 1)
var err error
n, err := os.Stdin.Read(bb)
if err != nil {
fmt.Fprint(os.Stderr, "ERROR: ", err)
os.Exit(2)
}
if n != 1 {
fmt.Fprint(os.Stderr, "ERROR: n != 1, n == ", n)
os.Exit(2)
}
if ep, err := Executable(); err != nil {
fmt.Fprint(os.Stderr, "ERROR: ", err)
} else {
fmt.Fprint(os.Stderr, ep)
}
os.Exit(0)
}
os.Exit(0)
}

View File

@@ -13,9 +13,6 @@ type TestModel struct {
name string
offset int64
size int
hash []byte
flags uint32
options []Option
closedCh chan bool
}
@@ -25,22 +22,18 @@ func newTestModel() *TestModel {
}
}
func (t *TestModel) Index(deviceID DeviceID, folder string, files []FileInfo, flags uint32, options []Option) {
func (t *TestModel) Index(deviceID DeviceID, folder string, files []FileInfo) {
}
func (t *TestModel) IndexUpdate(deviceID DeviceID, folder string, files []FileInfo, flags uint32, options []Option) {
func (t *TestModel) IndexUpdate(deviceID DeviceID, folder string, files []FileInfo) {
}
func (t *TestModel) Request(deviceID DeviceID, folder, name string, offset int64, size int, hash []byte, flags uint32, options []Option, buf []byte) error {
func (t *TestModel) Request(deviceID DeviceID, folder, name string, offset int64, size int) ([]byte, error) {
t.folder = folder
t.name = name
t.offset = offset
t.size = size
t.hash = hash
t.flags = flags
t.options = options
copy(buf, t.data)
return nil
return t.data, nil
}
func (t *TestModel) Close(deviceID DeviceID, err error) {

View File

@@ -1,23 +0,0 @@
// Copyright (C) 2015 The Protocol Authors.
package protocol
import "testing"
func TestWinsConflict(t *testing.T) {
testcases := [][2]FileInfo{
// The first should always win over the second
{{Modified: 42}, {Modified: 41}},
{{Modified: 41}, {Modified: 42, Flags: FlagDeleted}},
{{Modified: 41, Version: Vector{{42, 2}, {43, 1}}}, {Modified: 41, Version: Vector{{42, 1}, {43, 2}}}},
}
for _, tc := range testcases {
if !tc[0].WinsConflict(tc[1]) {
t.Errorf("%v should win over %v", tc[0], tc[1])
}
if tc[1].WinsConflict(tc[0]) {
t.Errorf("%v should not win over %v", tc[1], tc[0])
}
}
}

View File

@@ -6,7 +6,6 @@ import (
"bytes"
"crypto/sha256"
"encoding/base32"
"encoding/binary"
"errors"
"fmt"
"regexp"
@@ -68,11 +67,6 @@ func (n DeviceID) Equals(other DeviceID) bool {
return bytes.Compare(n[:], other[:]) == 0
}
// Short returns an integer representing bits 0-63 of the device ID.
func (n DeviceID) Short() uint64 {
return binary.BigEndian.Uint64(n[:])
}
func (n *DeviceID) MarshalText() ([]byte, error) {
return []byte(n.String()), nil
}

View File

@@ -1,51 +0,0 @@
// Copyright (C) 2014 The Protocol Authors.
package protocol
import (
"errors"
)
const (
ecNoError int32 = iota
ecGeneric
ecNoSuchFile
ecInvalid
)
var (
ErrNoError error = nil
ErrGeneric = errors.New("generic error")
ErrNoSuchFile = errors.New("no such file")
ErrInvalid = errors.New("file is invalid")
)
var lookupError = map[int32]error{
ecNoError: ErrNoError,
ecGeneric: ErrGeneric,
ecNoSuchFile: ErrNoSuchFile,
ecInvalid: ErrInvalid,
}
var lookupCode = map[error]int32{
ErrNoError: ecNoError,
ErrGeneric: ecGeneric,
ErrNoSuchFile: ecNoSuchFile,
ErrInvalid: ecInvalid,
}
func codeToError(errcode int32) error {
err, ok := lookupError[errcode]
if !ok {
return ErrGeneric
}
return err
}
func errorToCode(err error) int32 {
code, ok := lookupCode[err]
if !ok {
return ecGeneric
}
return code
}

View File

@@ -1,6 +1,5 @@
// Copyright (C) 2014 The Protocol Authors.
//go:generate -command genxdr go run ../syncthing/Godeps/_workspace/src/github.com/calmh/xdr/cmd/genxdr/main.go
//go:generate genxdr -o message_xdr.go message.go
package protocol
@@ -18,13 +17,13 @@ type FileInfo struct {
Name string // max:8192
Flags uint32
Modified int64
Version Vector
Version int64
LocalVersion int64
Blocks []BlockInfo
}
func (f FileInfo) String() string {
return fmt.Sprintf("File{Name:%q, Flags:0%o, Modified:%d, Version:%v, Size:%d, Blocks:%v}",
return fmt.Sprintf("File{Name:%q, Flags:0%o, Modified:%d, Version:%d, Size:%d, Blocks:%v}",
f.Name, f.Flags, f.Modified, f.Version, f.Size(), f.Blocks)
}
@@ -58,31 +57,6 @@ func (f FileInfo) HasPermissionBits() bool {
return f.Flags&FlagNoPermBits == 0
}
// WinsConflict returns true if "f" is the one to choose when it is in
// conflict with "other".
func (f FileInfo) WinsConflict(other FileInfo) bool {
// If a modification is in conflict with a delete, we pick the
// modification.
if !f.IsDeleted() && other.IsDeleted() {
return true
}
if f.IsDeleted() && !other.IsDeleted() {
return false
}
// The one with the newer modification time wins.
if f.Modified > other.Modified {
return true
}
if f.Modified < other.Modified {
return false
}
// The modification times were equal. Use the device ID in the version
// vector as tie breaker.
return f.Version.Compare(other.Version) == ConcurrentGreater
}
type BlockInfo struct {
Offset int64 // noencode (cache only)
Size int32
@@ -104,8 +78,8 @@ type RequestMessage struct {
}
type ResponseMessage struct {
Data []byte
Code int32
Data []byte
Error int32
}
type ClusterConfigMessage struct {
@@ -127,15 +101,12 @@ func (o *ClusterConfigMessage) GetOption(key string) string {
type Folder struct {
ID string // max:64
Devices []Device
Flags uint32
Options []Option // max:64
}
type Device struct {
ID []byte // max:32
MaxLocalVersion int64
Flags uint32
Options []Option // max:64
MaxLocalVersion int64
}
type Option struct {

View File

@@ -51,7 +51,7 @@ struct IndexMessage {
func (o IndexMessage) EncodeXDR(w io.Writer) (int, error) {
var xw = xdr.NewWriter(w)
return o.EncodeXDRInto(xw)
return o.encodeXDR(xw)
}
func (o IndexMessage) MarshalXDR() ([]byte, error) {
@@ -69,15 +69,15 @@ func (o IndexMessage) MustMarshalXDR() []byte {
func (o IndexMessage) AppendXDR(bs []byte) ([]byte, error) {
var aw = xdr.AppendWriter(bs)
var xw = xdr.NewWriter(&aw)
_, err := o.EncodeXDRInto(xw)
_, err := o.encodeXDR(xw)
return []byte(aw), err
}
func (o IndexMessage) EncodeXDRInto(xw *xdr.Writer) (int, error) {
func (o IndexMessage) encodeXDR(xw *xdr.Writer) (int, error) {
xw.WriteString(o.Folder)
xw.WriteUint32(uint32(len(o.Files)))
for i := range o.Files {
_, err := o.Files[i].EncodeXDRInto(xw)
_, err := o.Files[i].encodeXDR(xw)
if err != nil {
return xw.Tot(), err
}
@@ -88,7 +88,7 @@ func (o IndexMessage) EncodeXDRInto(xw *xdr.Writer) (int, error) {
}
xw.WriteUint32(uint32(len(o.Options)))
for i := range o.Options {
_, err := o.Options[i].EncodeXDRInto(xw)
_, err := o.Options[i].encodeXDR(xw)
if err != nil {
return xw.Tot(), err
}
@@ -98,36 +98,30 @@ func (o IndexMessage) EncodeXDRInto(xw *xdr.Writer) (int, error) {
func (o *IndexMessage) DecodeXDR(r io.Reader) error {
xr := xdr.NewReader(r)
return o.DecodeXDRFrom(xr)
return o.decodeXDR(xr)
}
func (o *IndexMessage) UnmarshalXDR(bs []byte) error {
var br = bytes.NewReader(bs)
var xr = xdr.NewReader(br)
return o.DecodeXDRFrom(xr)
return o.decodeXDR(xr)
}
func (o *IndexMessage) DecodeXDRFrom(xr *xdr.Reader) error {
func (o *IndexMessage) decodeXDR(xr *xdr.Reader) error {
o.Folder = xr.ReadString()
_FilesSize := int(xr.ReadUint32())
if _FilesSize < 0 {
return xdr.ElementSizeExceeded("Files", _FilesSize, 0)
}
o.Files = make([]FileInfo, _FilesSize)
for i := range o.Files {
(&o.Files[i]).DecodeXDRFrom(xr)
(&o.Files[i]).decodeXDR(xr)
}
o.Flags = xr.ReadUint32()
_OptionsSize := int(xr.ReadUint32())
if _OptionsSize < 0 {
return xdr.ElementSizeExceeded("Options", _OptionsSize, 64)
}
if _OptionsSize > 64 {
return xdr.ElementSizeExceeded("Options", _OptionsSize, 64)
}
o.Options = make([]Option, _OptionsSize)
for i := range o.Options {
(&o.Options[i]).DecodeXDRFrom(xr)
(&o.Options[i]).decodeXDR(xr)
}
return xr.Error()
}
@@ -151,9 +145,9 @@ FileInfo Structure:
+ Modified (64 bits) +
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ Vector Structure \
/ /
| |
+ Version (64 bits) +
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
+ Local Version (64 bits) +
@@ -171,7 +165,7 @@ struct FileInfo {
string Name<8192>;
unsigned int Flags;
hyper Modified;
Vector Version;
hyper Version;
hyper LocalVersion;
BlockInfo Blocks<>;
}
@@ -180,7 +174,7 @@ struct FileInfo {
func (o FileInfo) EncodeXDR(w io.Writer) (int, error) {
var xw = xdr.NewWriter(w)
return o.EncodeXDRInto(xw)
return o.encodeXDR(xw)
}
func (o FileInfo) MarshalXDR() ([]byte, error) {
@@ -198,25 +192,22 @@ func (o FileInfo) MustMarshalXDR() []byte {
func (o FileInfo) AppendXDR(bs []byte) ([]byte, error) {
var aw = xdr.AppendWriter(bs)
var xw = xdr.NewWriter(&aw)
_, err := o.EncodeXDRInto(xw)
_, err := o.encodeXDR(xw)
return []byte(aw), err
}
func (o FileInfo) EncodeXDRInto(xw *xdr.Writer) (int, error) {
func (o FileInfo) encodeXDR(xw *xdr.Writer) (int, error) {
if l := len(o.Name); l > 8192 {
return xw.Tot(), xdr.ElementSizeExceeded("Name", l, 8192)
}
xw.WriteString(o.Name)
xw.WriteUint32(o.Flags)
xw.WriteUint64(uint64(o.Modified))
_, err := o.Version.EncodeXDRInto(xw)
if err != nil {
return xw.Tot(), err
}
xw.WriteUint64(uint64(o.Version))
xw.WriteUint64(uint64(o.LocalVersion))
xw.WriteUint32(uint32(len(o.Blocks)))
for i := range o.Blocks {
_, err := o.Blocks[i].EncodeXDRInto(xw)
_, err := o.Blocks[i].encodeXDR(xw)
if err != nil {
return xw.Tot(), err
}
@@ -226,28 +217,25 @@ func (o FileInfo) EncodeXDRInto(xw *xdr.Writer) (int, error) {
func (o *FileInfo) DecodeXDR(r io.Reader) error {
xr := xdr.NewReader(r)
return o.DecodeXDRFrom(xr)
return o.decodeXDR(xr)
}
func (o *FileInfo) UnmarshalXDR(bs []byte) error {
var br = bytes.NewReader(bs)
var xr = xdr.NewReader(br)
return o.DecodeXDRFrom(xr)
return o.decodeXDR(xr)
}
func (o *FileInfo) DecodeXDRFrom(xr *xdr.Reader) error {
func (o *FileInfo) decodeXDR(xr *xdr.Reader) error {
o.Name = xr.ReadStringMax(8192)
o.Flags = xr.ReadUint32()
o.Modified = int64(xr.ReadUint64())
(&o.Version).DecodeXDRFrom(xr)
o.Version = int64(xr.ReadUint64())
o.LocalVersion = int64(xr.ReadUint64())
_BlocksSize := int(xr.ReadUint32())
if _BlocksSize < 0 {
return xdr.ElementSizeExceeded("Blocks", _BlocksSize, 0)
}
o.Blocks = make([]BlockInfo, _BlocksSize)
for i := range o.Blocks {
(&o.Blocks[i]).DecodeXDRFrom(xr)
(&o.Blocks[i]).decodeXDR(xr)
}
return xr.Error()
}
@@ -278,7 +266,7 @@ struct BlockInfo {
func (o BlockInfo) EncodeXDR(w io.Writer) (int, error) {
var xw = xdr.NewWriter(w)
return o.EncodeXDRInto(xw)
return o.encodeXDR(xw)
}
func (o BlockInfo) MarshalXDR() ([]byte, error) {
@@ -296,11 +284,11 @@ func (o BlockInfo) MustMarshalXDR() []byte {
func (o BlockInfo) AppendXDR(bs []byte) ([]byte, error) {
var aw = xdr.AppendWriter(bs)
var xw = xdr.NewWriter(&aw)
_, err := o.EncodeXDRInto(xw)
_, err := o.encodeXDR(xw)
return []byte(aw), err
}
func (o BlockInfo) EncodeXDRInto(xw *xdr.Writer) (int, error) {
func (o BlockInfo) encodeXDR(xw *xdr.Writer) (int, error) {
xw.WriteUint32(uint32(o.Size))
if l := len(o.Hash); l > 64 {
return xw.Tot(), xdr.ElementSizeExceeded("Hash", l, 64)
@@ -311,16 +299,16 @@ func (o BlockInfo) EncodeXDRInto(xw *xdr.Writer) (int, error) {
func (o *BlockInfo) DecodeXDR(r io.Reader) error {
xr := xdr.NewReader(r)
return o.DecodeXDRFrom(xr)
return o.decodeXDR(xr)
}
func (o *BlockInfo) UnmarshalXDR(bs []byte) error {
var br = bytes.NewReader(bs)
var xr = xdr.NewReader(br)
return o.DecodeXDRFrom(xr)
return o.decodeXDR(xr)
}
func (o *BlockInfo) DecodeXDRFrom(xr *xdr.Reader) error {
func (o *BlockInfo) decodeXDR(xr *xdr.Reader) error {
o.Size = int32(xr.ReadUint32())
o.Hash = xr.ReadBytesMax(64)
return xr.Error()
@@ -381,7 +369,7 @@ struct RequestMessage {
func (o RequestMessage) EncodeXDR(w io.Writer) (int, error) {
var xw = xdr.NewWriter(w)
return o.EncodeXDRInto(xw)
return o.encodeXDR(xw)
}
func (o RequestMessage) MarshalXDR() ([]byte, error) {
@@ -399,11 +387,11 @@ func (o RequestMessage) MustMarshalXDR() []byte {
func (o RequestMessage) AppendXDR(bs []byte) ([]byte, error) {
var aw = xdr.AppendWriter(bs)
var xw = xdr.NewWriter(&aw)
_, err := o.EncodeXDRInto(xw)
_, err := o.encodeXDR(xw)
return []byte(aw), err
}
func (o RequestMessage) EncodeXDRInto(xw *xdr.Writer) (int, error) {
func (o RequestMessage) encodeXDR(xw *xdr.Writer) (int, error) {
if l := len(o.Folder); l > 64 {
return xw.Tot(), xdr.ElementSizeExceeded("Folder", l, 64)
}
@@ -424,7 +412,7 @@ func (o RequestMessage) EncodeXDRInto(xw *xdr.Writer) (int, error) {
}
xw.WriteUint32(uint32(len(o.Options)))
for i := range o.Options {
_, err := o.Options[i].EncodeXDRInto(xw)
_, err := o.Options[i].encodeXDR(xw)
if err != nil {
return xw.Tot(), err
}
@@ -434,16 +422,16 @@ func (o RequestMessage) EncodeXDRInto(xw *xdr.Writer) (int, error) {
func (o *RequestMessage) DecodeXDR(r io.Reader) error {
xr := xdr.NewReader(r)
return o.DecodeXDRFrom(xr)
return o.decodeXDR(xr)
}
func (o *RequestMessage) UnmarshalXDR(bs []byte) error {
var br = bytes.NewReader(bs)
var xr = xdr.NewReader(br)
return o.DecodeXDRFrom(xr)
return o.decodeXDR(xr)
}
func (o *RequestMessage) DecodeXDRFrom(xr *xdr.Reader) error {
func (o *RequestMessage) decodeXDR(xr *xdr.Reader) error {
o.Folder = xr.ReadStringMax(64)
o.Name = xr.ReadStringMax(8192)
o.Offset = int64(xr.ReadUint64())
@@ -451,15 +439,12 @@ func (o *RequestMessage) DecodeXDRFrom(xr *xdr.Reader) error {
o.Hash = xr.ReadBytesMax(64)
o.Flags = xr.ReadUint32()
_OptionsSize := int(xr.ReadUint32())
if _OptionsSize < 0 {
return xdr.ElementSizeExceeded("Options", _OptionsSize, 64)
}
if _OptionsSize > 64 {
return xdr.ElementSizeExceeded("Options", _OptionsSize, 64)
}
o.Options = make([]Option, _OptionsSize)
for i := range o.Options {
(&o.Options[i]).DecodeXDRFrom(xr)
(&o.Options[i]).decodeXDR(xr)
}
return xr.Error()
}
@@ -477,20 +462,20 @@ ResponseMessage Structure:
\ Data (variable length) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Code |
| Error |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
struct ResponseMessage {
opaque Data<>;
int Code;
int Error;
}
*/
func (o ResponseMessage) EncodeXDR(w io.Writer) (int, error) {
var xw = xdr.NewWriter(w)
return o.EncodeXDRInto(xw)
return o.encodeXDR(xw)
}
func (o ResponseMessage) MarshalXDR() ([]byte, error) {
@@ -508,30 +493,30 @@ func (o ResponseMessage) MustMarshalXDR() []byte {
func (o ResponseMessage) AppendXDR(bs []byte) ([]byte, error) {
var aw = xdr.AppendWriter(bs)
var xw = xdr.NewWriter(&aw)
_, err := o.EncodeXDRInto(xw)
_, err := o.encodeXDR(xw)
return []byte(aw), err
}
func (o ResponseMessage) EncodeXDRInto(xw *xdr.Writer) (int, error) {
func (o ResponseMessage) encodeXDR(xw *xdr.Writer) (int, error) {
xw.WriteBytes(o.Data)
xw.WriteUint32(uint32(o.Code))
xw.WriteUint32(uint32(o.Error))
return xw.Tot(), xw.Error()
}
func (o *ResponseMessage) DecodeXDR(r io.Reader) error {
xr := xdr.NewReader(r)
return o.DecodeXDRFrom(xr)
return o.decodeXDR(xr)
}
func (o *ResponseMessage) UnmarshalXDR(bs []byte) error {
var br = bytes.NewReader(bs)
var xr = xdr.NewReader(br)
return o.DecodeXDRFrom(xr)
return o.decodeXDR(xr)
}
func (o *ResponseMessage) DecodeXDRFrom(xr *xdr.Reader) error {
func (o *ResponseMessage) decodeXDR(xr *xdr.Reader) error {
o.Data = xr.ReadBytes()
o.Code = int32(xr.ReadUint32())
o.Error = int32(xr.ReadUint32())
return xr.Error()
}
@@ -579,7 +564,7 @@ struct ClusterConfigMessage {
func (o ClusterConfigMessage) EncodeXDR(w io.Writer) (int, error) {
var xw = xdr.NewWriter(w)
return o.EncodeXDRInto(xw)
return o.encodeXDR(xw)
}
func (o ClusterConfigMessage) MarshalXDR() ([]byte, error) {
@@ -597,11 +582,11 @@ func (o ClusterConfigMessage) MustMarshalXDR() []byte {
func (o ClusterConfigMessage) AppendXDR(bs []byte) ([]byte, error) {
var aw = xdr.AppendWriter(bs)
var xw = xdr.NewWriter(&aw)
_, err := o.EncodeXDRInto(xw)
_, err := o.encodeXDR(xw)
return []byte(aw), err
}
func (o ClusterConfigMessage) EncodeXDRInto(xw *xdr.Writer) (int, error) {
func (o ClusterConfigMessage) encodeXDR(xw *xdr.Writer) (int, error) {
if l := len(o.ClientName); l > 64 {
return xw.Tot(), xdr.ElementSizeExceeded("ClientName", l, 64)
}
@@ -612,7 +597,7 @@ func (o ClusterConfigMessage) EncodeXDRInto(xw *xdr.Writer) (int, error) {
xw.WriteString(o.ClientVersion)
xw.WriteUint32(uint32(len(o.Folders)))
for i := range o.Folders {
_, err := o.Folders[i].EncodeXDRInto(xw)
_, err := o.Folders[i].encodeXDR(xw)
if err != nil {
return xw.Tot(), err
}
@@ -622,7 +607,7 @@ func (o ClusterConfigMessage) EncodeXDRInto(xw *xdr.Writer) (int, error) {
}
xw.WriteUint32(uint32(len(o.Options)))
for i := range o.Options {
_, err := o.Options[i].EncodeXDRInto(xw)
_, err := o.Options[i].encodeXDR(xw)
if err != nil {
return xw.Tot(), err
}
@@ -632,36 +617,30 @@ func (o ClusterConfigMessage) EncodeXDRInto(xw *xdr.Writer) (int, error) {
func (o *ClusterConfigMessage) DecodeXDR(r io.Reader) error {
xr := xdr.NewReader(r)
return o.DecodeXDRFrom(xr)
return o.decodeXDR(xr)
}
func (o *ClusterConfigMessage) UnmarshalXDR(bs []byte) error {
var br = bytes.NewReader(bs)
var xr = xdr.NewReader(br)
return o.DecodeXDRFrom(xr)
return o.decodeXDR(xr)
}
func (o *ClusterConfigMessage) DecodeXDRFrom(xr *xdr.Reader) error {
func (o *ClusterConfigMessage) decodeXDR(xr *xdr.Reader) error {
o.ClientName = xr.ReadStringMax(64)
o.ClientVersion = xr.ReadStringMax(64)
_FoldersSize := int(xr.ReadUint32())
if _FoldersSize < 0 {
return xdr.ElementSizeExceeded("Folders", _FoldersSize, 0)
}
o.Folders = make([]Folder, _FoldersSize)
for i := range o.Folders {
(&o.Folders[i]).DecodeXDRFrom(xr)
(&o.Folders[i]).decodeXDR(xr)
}
_OptionsSize := int(xr.ReadUint32())
if _OptionsSize < 0 {
return xdr.ElementSizeExceeded("Options", _OptionsSize, 64)
}
if _OptionsSize > 64 {
return xdr.ElementSizeExceeded("Options", _OptionsSize, 64)
}
o.Options = make([]Option, _OptionsSize)
for i := range o.Options {
(&o.Options[i]).DecodeXDRFrom(xr)
(&o.Options[i]).decodeXDR(xr)
}
return xr.Error()
}
@@ -685,28 +664,18 @@ Folder Structure:
\ Zero or more Device Structures \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Flags |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Number of Options |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ Zero or more Option Structures \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
struct Folder {
string ID<64>;
Device Devices<>;
unsigned int Flags;
Option Options<64>;
}
*/
func (o Folder) EncodeXDR(w io.Writer) (int, error) {
var xw = xdr.NewWriter(w)
return o.EncodeXDRInto(xw)
return o.encodeXDR(xw)
}
func (o Folder) MarshalXDR() ([]byte, error) {
@@ -724,29 +693,18 @@ func (o Folder) MustMarshalXDR() []byte {
func (o Folder) AppendXDR(bs []byte) ([]byte, error) {
var aw = xdr.AppendWriter(bs)
var xw = xdr.NewWriter(&aw)
_, err := o.EncodeXDRInto(xw)
_, err := o.encodeXDR(xw)
return []byte(aw), err
}
func (o Folder) EncodeXDRInto(xw *xdr.Writer) (int, error) {
func (o Folder) encodeXDR(xw *xdr.Writer) (int, error) {
if l := len(o.ID); l > 64 {
return xw.Tot(), xdr.ElementSizeExceeded("ID", l, 64)
}
xw.WriteString(o.ID)
xw.WriteUint32(uint32(len(o.Devices)))
for i := range o.Devices {
_, err := o.Devices[i].EncodeXDRInto(xw)
if err != nil {
return xw.Tot(), err
}
}
xw.WriteUint32(o.Flags)
if l := len(o.Options); l > 64 {
return xw.Tot(), xdr.ElementSizeExceeded("Options", l, 64)
}
xw.WriteUint32(uint32(len(o.Options)))
for i := range o.Options {
_, err := o.Options[i].EncodeXDRInto(xw)
_, err := o.Devices[i].encodeXDR(xw)
if err != nil {
return xw.Tot(), err
}
@@ -756,36 +714,21 @@ func (o Folder) EncodeXDRInto(xw *xdr.Writer) (int, error) {
func (o *Folder) DecodeXDR(r io.Reader) error {
xr := xdr.NewReader(r)
return o.DecodeXDRFrom(xr)
return o.decodeXDR(xr)
}
func (o *Folder) UnmarshalXDR(bs []byte) error {
var br = bytes.NewReader(bs)
var xr = xdr.NewReader(br)
return o.DecodeXDRFrom(xr)
return o.decodeXDR(xr)
}
func (o *Folder) DecodeXDRFrom(xr *xdr.Reader) error {
func (o *Folder) decodeXDR(xr *xdr.Reader) error {
o.ID = xr.ReadStringMax(64)
_DevicesSize := int(xr.ReadUint32())
if _DevicesSize < 0 {
return xdr.ElementSizeExceeded("Devices", _DevicesSize, 0)
}
o.Devices = make([]Device, _DevicesSize)
for i := range o.Devices {
(&o.Devices[i]).DecodeXDRFrom(xr)
}
o.Flags = xr.ReadUint32()
_OptionsSize := int(xr.ReadUint32())
if _OptionsSize < 0 {
return xdr.ElementSizeExceeded("Options", _OptionsSize, 64)
}
if _OptionsSize > 64 {
return xdr.ElementSizeExceeded("Options", _OptionsSize, 64)
}
o.Options = make([]Option, _OptionsSize)
for i := range o.Options {
(&o.Options[i]).DecodeXDRFrom(xr)
(&o.Devices[i]).decodeXDR(xr)
}
return xr.Error()
}
@@ -803,32 +746,25 @@ Device Structure:
\ ID (variable length) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Flags |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
+ Max Local Version (64 bits) +
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Flags |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Number of Options |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ Zero or more Option Structures \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
struct Device {
opaque ID<32>;
hyper MaxLocalVersion;
unsigned int Flags;
Option Options<64>;
hyper MaxLocalVersion;
}
*/
func (o Device) EncodeXDR(w io.Writer) (int, error) {
var xw = xdr.NewWriter(w)
return o.EncodeXDRInto(xw)
return o.encodeXDR(xw)
}
func (o Device) MarshalXDR() ([]byte, error) {
@@ -846,56 +782,35 @@ func (o Device) MustMarshalXDR() []byte {
func (o Device) AppendXDR(bs []byte) ([]byte, error) {
var aw = xdr.AppendWriter(bs)
var xw = xdr.NewWriter(&aw)
_, err := o.EncodeXDRInto(xw)
_, err := o.encodeXDR(xw)
return []byte(aw), err
}
func (o Device) EncodeXDRInto(xw *xdr.Writer) (int, error) {
func (o Device) encodeXDR(xw *xdr.Writer) (int, error) {
if l := len(o.ID); l > 32 {
return xw.Tot(), xdr.ElementSizeExceeded("ID", l, 32)
}
xw.WriteBytes(o.ID)
xw.WriteUint64(uint64(o.MaxLocalVersion))
xw.WriteUint32(o.Flags)
if l := len(o.Options); l > 64 {
return xw.Tot(), xdr.ElementSizeExceeded("Options", l, 64)
}
xw.WriteUint32(uint32(len(o.Options)))
for i := range o.Options {
_, err := o.Options[i].EncodeXDRInto(xw)
if err != nil {
return xw.Tot(), err
}
}
xw.WriteUint64(uint64(o.MaxLocalVersion))
return xw.Tot(), xw.Error()
}
func (o *Device) DecodeXDR(r io.Reader) error {
xr := xdr.NewReader(r)
return o.DecodeXDRFrom(xr)
return o.decodeXDR(xr)
}
func (o *Device) UnmarshalXDR(bs []byte) error {
var br = bytes.NewReader(bs)
var xr = xdr.NewReader(br)
return o.DecodeXDRFrom(xr)
return o.decodeXDR(xr)
}
func (o *Device) DecodeXDRFrom(xr *xdr.Reader) error {
func (o *Device) decodeXDR(xr *xdr.Reader) error {
o.ID = xr.ReadBytesMax(32)
o.MaxLocalVersion = int64(xr.ReadUint64())
o.Flags = xr.ReadUint32()
_OptionsSize := int(xr.ReadUint32())
if _OptionsSize < 0 {
return xdr.ElementSizeExceeded("Options", _OptionsSize, 64)
}
if _OptionsSize > 64 {
return xdr.ElementSizeExceeded("Options", _OptionsSize, 64)
}
o.Options = make([]Option, _OptionsSize)
for i := range o.Options {
(&o.Options[i]).DecodeXDRFrom(xr)
}
o.MaxLocalVersion = int64(xr.ReadUint64())
return xr.Error()
}
@@ -929,7 +844,7 @@ struct Option {
func (o Option) EncodeXDR(w io.Writer) (int, error) {
var xw = xdr.NewWriter(w)
return o.EncodeXDRInto(xw)
return o.encodeXDR(xw)
}
func (o Option) MarshalXDR() ([]byte, error) {
@@ -947,11 +862,11 @@ func (o Option) MustMarshalXDR() []byte {
func (o Option) AppendXDR(bs []byte) ([]byte, error) {
var aw = xdr.AppendWriter(bs)
var xw = xdr.NewWriter(&aw)
_, err := o.EncodeXDRInto(xw)
_, err := o.encodeXDR(xw)
return []byte(aw), err
}
func (o Option) EncodeXDRInto(xw *xdr.Writer) (int, error) {
func (o Option) encodeXDR(xw *xdr.Writer) (int, error) {
if l := len(o.Key); l > 64 {
return xw.Tot(), xdr.ElementSizeExceeded("Key", l, 64)
}
@@ -965,16 +880,16 @@ func (o Option) EncodeXDRInto(xw *xdr.Writer) (int, error) {
func (o *Option) DecodeXDR(r io.Reader) error {
xr := xdr.NewReader(r)
return o.DecodeXDRFrom(xr)
return o.decodeXDR(xr)
}
func (o *Option) UnmarshalXDR(bs []byte) error {
var br = bytes.NewReader(bs)
var xr = xdr.NewReader(br)
return o.DecodeXDRFrom(xr)
return o.decodeXDR(xr)
}
func (o *Option) DecodeXDRFrom(xr *xdr.Reader) error {
func (o *Option) decodeXDR(xr *xdr.Reader) error {
o.Key = xr.ReadStringMax(64)
o.Value = xr.ReadStringMax(1024)
return xr.Error()
@@ -1006,7 +921,7 @@ struct CloseMessage {
func (o CloseMessage) EncodeXDR(w io.Writer) (int, error) {
var xw = xdr.NewWriter(w)
return o.EncodeXDRInto(xw)
return o.encodeXDR(xw)
}
func (o CloseMessage) MarshalXDR() ([]byte, error) {
@@ -1024,11 +939,11 @@ func (o CloseMessage) MustMarshalXDR() []byte {
func (o CloseMessage) AppendXDR(bs []byte) ([]byte, error) {
var aw = xdr.AppendWriter(bs)
var xw = xdr.NewWriter(&aw)
_, err := o.EncodeXDRInto(xw)
_, err := o.encodeXDR(xw)
return []byte(aw), err
}
func (o CloseMessage) EncodeXDRInto(xw *xdr.Writer) (int, error) {
func (o CloseMessage) encodeXDR(xw *xdr.Writer) (int, error) {
if l := len(o.Reason); l > 1024 {
return xw.Tot(), xdr.ElementSizeExceeded("Reason", l, 1024)
}
@@ -1039,16 +954,16 @@ func (o CloseMessage) EncodeXDRInto(xw *xdr.Writer) (int, error) {
func (o *CloseMessage) DecodeXDR(r io.Reader) error {
xr := xdr.NewReader(r)
return o.DecodeXDRFrom(xr)
return o.decodeXDR(xr)
}
func (o *CloseMessage) UnmarshalXDR(bs []byte) error {
var br = bytes.NewReader(bs)
var xr = xdr.NewReader(br)
return o.DecodeXDRFrom(xr)
return o.decodeXDR(xr)
}
func (o *CloseMessage) DecodeXDRFrom(xr *xdr.Reader) error {
func (o *CloseMessage) decodeXDR(xr *xdr.Reader) error {
o.Reason = xr.ReadStringMax(1024)
o.Code = int32(xr.ReadUint32())
return xr.Error()
@@ -1070,7 +985,7 @@ struct EmptyMessage {
func (o EmptyMessage) EncodeXDR(w io.Writer) (int, error) {
var xw = xdr.NewWriter(w)
return o.EncodeXDRInto(xw)
return o.encodeXDR(xw)
}
func (o EmptyMessage) MarshalXDR() ([]byte, error) {
@@ -1088,25 +1003,25 @@ func (o EmptyMessage) MustMarshalXDR() []byte {
func (o EmptyMessage) AppendXDR(bs []byte) ([]byte, error) {
var aw = xdr.AppendWriter(bs)
var xw = xdr.NewWriter(&aw)
_, err := o.EncodeXDRInto(xw)
_, err := o.encodeXDR(xw)
return []byte(aw), err
}
func (o EmptyMessage) EncodeXDRInto(xw *xdr.Writer) (int, error) {
func (o EmptyMessage) encodeXDR(xw *xdr.Writer) (int, error) {
return xw.Tot(), xw.Error()
}
func (o *EmptyMessage) DecodeXDR(r io.Reader) error {
xr := xdr.NewReader(r)
return o.DecodeXDRFrom(xr)
return o.decodeXDR(xr)
}
func (o *EmptyMessage) UnmarshalXDR(bs []byte) error {
var br = bytes.NewReader(bs)
var xr = xdr.NewReader(br)
return o.DecodeXDRFrom(xr)
return o.decodeXDR(xr)
}
func (o *EmptyMessage) DecodeXDRFrom(xr *xdr.Reader) error {
func (o *EmptyMessage) decodeXDR(xr *xdr.Reader) error {
return xr.Error()
}

View File

@@ -12,23 +12,23 @@ type nativeModel struct {
next Model
}
func (m nativeModel) Index(deviceID DeviceID, folder string, files []FileInfo, flags uint32, options []Option) {
func (m nativeModel) Index(deviceID DeviceID, folder string, files []FileInfo) {
for i := range files {
files[i].Name = norm.NFD.String(files[i].Name)
}
m.next.Index(deviceID, folder, files, flags, options)
m.next.Index(deviceID, folder, files)
}
func (m nativeModel) IndexUpdate(deviceID DeviceID, folder string, files []FileInfo, flags uint32, options []Option) {
func (m nativeModel) IndexUpdate(deviceID DeviceID, folder string, files []FileInfo) {
for i := range files {
files[i].Name = norm.NFD.String(files[i].Name)
}
m.next.IndexUpdate(deviceID, folder, files, flags, options)
m.next.IndexUpdate(deviceID, folder, files)
}
func (m nativeModel) Request(deviceID DeviceID, folder string, name string, offset int64, hash []byte, flags uint32, options []Option, buf []byte) error {
func (m nativeModel) Request(deviceID DeviceID, folder string, name string, offset int64, size int) ([]byte, error) {
name = norm.NFD.String(name)
return m.next.Request(deviceID, folder, name, offset, hash, flags, options, buf)
return m.next.Request(deviceID, folder, name, offset, size)
}
func (m nativeModel) ClusterConfig(deviceID DeviceID, config ClusterConfigMessage) {

View File

@@ -10,16 +10,16 @@ type nativeModel struct {
next Model
}
func (m nativeModel) Index(deviceID DeviceID, folder string, files []FileInfo, flags uint32, options []Option) {
m.next.Index(deviceID, folder, files, flags, options)
func (m nativeModel) Index(deviceID DeviceID, folder string, files []FileInfo) {
m.next.Index(deviceID, folder, files)
}
func (m nativeModel) IndexUpdate(deviceID DeviceID, folder string, files []FileInfo, flags uint32, options []Option) {
m.next.IndexUpdate(deviceID, folder, files, flags, options)
func (m nativeModel) IndexUpdate(deviceID DeviceID, folder string, files []FileInfo) {
m.next.IndexUpdate(deviceID, folder, files)
}
func (m nativeModel) Request(deviceID DeviceID, folder string, name string, offset int64, hash []byte, flags uint32, options []Option, buf []byte) error {
return m.next.Request(deviceID, folder, name, offset, hash, flags, options, buf)
func (m nativeModel) Request(deviceID DeviceID, folder string, name string, offset int64, size int) ([]byte, error) {
return m.next.Request(deviceID, folder, name, offset, size)
}
func (m nativeModel) ClusterConfig(deviceID DeviceID, config ClusterConfigMessage) {

View File

@@ -24,19 +24,41 @@ type nativeModel struct {
next Model
}
func (m nativeModel) Index(deviceID DeviceID, folder string, files []FileInfo, flags uint32, options []Option) {
fixupFiles(folder, files)
m.next.Index(deviceID, folder, files, flags, options)
func (m nativeModel) Index(deviceID DeviceID, folder string, files []FileInfo) {
for i, f := range files {
if strings.ContainsAny(f.Name, disallowedCharacters) {
if f.IsDeleted() {
// Don't complain if the file is marked as deleted, since it
// can't possibly exist here anyway.
continue
}
files[i].Flags |= FlagInvalid
l.Warnf("File name %q contains invalid characters; marked as invalid.", f.Name)
}
files[i].Name = filepath.FromSlash(f.Name)
}
m.next.Index(deviceID, folder, files)
}
func (m nativeModel) IndexUpdate(deviceID DeviceID, folder string, files []FileInfo, flags uint32, options []Option) {
fixupFiles(folder, files)
m.next.IndexUpdate(deviceID, folder, files, flags, options)
func (m nativeModel) IndexUpdate(deviceID DeviceID, folder string, files []FileInfo) {
for i, f := range files {
if strings.ContainsAny(f.Name, disallowedCharacters) {
if f.IsDeleted() {
// Don't complain if the file is marked as deleted, since it
// can't possibly exist here anyway.
continue
}
files[i].Flags |= FlagInvalid
l.Warnf("File name %q contains invalid characters; marked as invalid.", f.Name)
}
files[i].Name = filepath.FromSlash(files[i].Name)
}
m.next.IndexUpdate(deviceID, folder, files)
}
func (m nativeModel) Request(deviceID DeviceID, folder string, name string, offset int64, hash []byte, flags uint32, options []Option, buf []byte) error {
func (m nativeModel) Request(deviceID DeviceID, folder string, name string, offset int64, size int) ([]byte, error) {
name = filepath.FromSlash(name)
return m.next.Request(deviceID, folder, name, offset, hash, flags, options, buf)
return m.next.Request(deviceID, folder, name, offset, size)
}
func (m nativeModel) ClusterConfig(deviceID DeviceID, config ClusterConfigMessage) {
@@ -46,18 +68,3 @@ func (m nativeModel) ClusterConfig(deviceID DeviceID, config ClusterConfigMessag
func (m nativeModel) Close(deviceID DeviceID, err error) {
m.next.Close(deviceID, err)
}
func fixupFiles(folder string, files []FileInfo) {
for i, f := range files {
if strings.ContainsAny(f.Name, disallowedCharacters) {
if f.IsDeleted() {
// Don't complain if the file is marked as deleted, since it
// can't possibly exist here anyway.
continue
}
files[i].Flags |= FlagInvalid
l.Warnf("File name %q (folder %q) contains invalid characters; marked as invalid.", f.Name, folder)
}
files[i].Name = filepath.FromSlash(files[i].Name)
}
}

View File

@@ -31,10 +31,10 @@ const (
const (
stateInitial = iota
stateReady
stateCCRcvd
stateIdxRcvd
)
// FileInfo flags
const (
FlagDeleted uint32 = 1 << 12
FlagInvalid = 1 << 13
@@ -48,17 +48,6 @@ const (
SymlinkTypeMask = FlagDirectory | FlagSymlinkMissingTarget
)
// IndexMessage message flags (for IndexUpdate)
const (
FlagIndexTemporary uint32 = 1 << iota
)
// Request message flags
const (
FlagRequestTemporary uint32 = 1 << iota
)
// ClusterConfigMessage.Folders.Devices flags
const (
FlagShareTrusted uint32 = 1 << 0
FlagShareReadOnly = 1 << 1
@@ -77,11 +66,11 @@ type pongMessage struct{ EmptyMessage }
type Model interface {
// An index was received from the peer device
Index(deviceID DeviceID, folder string, files []FileInfo, flags uint32, options []Option)
Index(deviceID DeviceID, folder string, files []FileInfo)
// An index update was received from the peer device
IndexUpdate(deviceID DeviceID, folder string, files []FileInfo, flags uint32, options []Option)
IndexUpdate(deviceID DeviceID, folder string, files []FileInfo)
// A request was made by the peer device
Request(deviceID DeviceID, folder string, name string, offset int64, hash []byte, flags uint32, options []Option, buf []byte) error
Request(deviceID DeviceID, folder string, name string, offset int64, size int) ([]byte, error)
// A cluster configuration message was received
ClusterConfig(deviceID DeviceID, config ClusterConfigMessage)
// The peer device closed the connection
@@ -89,12 +78,11 @@ type Model interface {
}
type Connection interface {
Start()
ID() DeviceID
Name() string
Index(folder string, files []FileInfo, flags uint32, options []Option) error
IndexUpdate(folder string, files []FileInfo, flags uint32, options []Option) error
Request(folder string, name string, offset int64, size int, hash []byte, flags uint32, options []Option) ([]byte, error)
Index(folder string, files []FileInfo) error
IndexUpdate(folder string, files []FileInfo) error
Request(folder string, name string, offset int64, size int) ([]byte, error)
ClusterConfig(config ClusterConfigMessage)
Statistics() Statistics
}
@@ -103,6 +91,7 @@ type rawConnection struct {
id DeviceID
name string
receiver Model
state int
cr *countingReader
cw *countingWriter
@@ -112,11 +101,11 @@ type rawConnection struct {
idxMut sync.Mutex // ensures serialization of Index calls
nextID chan int
outbox chan hdrMsg
closed chan struct{}
once sync.Once
pool sync.Pool
nextID chan int
outbox chan hdrMsg
closed chan struct{}
once sync.Once
compression Compression
rdbuf0 []byte // used & reused by readMessage
@@ -129,9 +118,8 @@ type asyncResult struct {
}
type hdrMsg struct {
hdr header
msg encodable
done chan struct{}
hdr header
msg encodable
}
type encodable interface {
@@ -142,9 +130,9 @@ type isEofer interface {
IsEOF() bool
}
var (
PingTimeout = 30 * time.Second
PingIdleTime = 60 * time.Second
const (
pingTimeout = 30 * time.Second
pingIdleTime = 60 * time.Second
)
func NewConnection(deviceID DeviceID, reader io.Reader, writer io.Writer, receiver Model, name string, compress Compression) Connection {
@@ -152,32 +140,24 @@ func NewConnection(deviceID DeviceID, reader io.Reader, writer io.Writer, receiv
cw := &countingWriter{Writer: writer}
c := rawConnection{
id: deviceID,
name: name,
receiver: nativeModel{receiver},
cr: cr,
cw: cw,
outbox: make(chan hdrMsg),
nextID: make(chan int),
closed: make(chan struct{}),
pool: sync.Pool{
New: func() interface{} {
return make([]byte, BlockSize)
},
},
id: deviceID,
name: name,
receiver: nativeModel{receiver},
state: stateInitial,
cr: cr,
cw: cw,
outbox: make(chan hdrMsg),
nextID: make(chan int),
closed: make(chan struct{}),
compression: compress,
}
return wireFormatConnection{&c}
}
// Start creates the goroutines for sending and receiving of messages. It must
// be called exactly once after creating a connection.
func (c *rawConnection) Start() {
go c.readerLoop()
go c.writerLoop()
go c.pingerLoop()
go c.idGenerator()
return wireFormatConnection{&c}
}
func (c *rawConnection) ID() DeviceID {
@@ -189,7 +169,7 @@ func (c *rawConnection) Name() string {
}
// Index writes the list of file information to the connected peer device
func (c *rawConnection) Index(folder string, idx []FileInfo, flags uint32, options []Option) error {
func (c *rawConnection) Index(folder string, idx []FileInfo) error {
select {
case <-c.closed:
return ErrClosed
@@ -197,17 +177,15 @@ func (c *rawConnection) Index(folder string, idx []FileInfo, flags uint32, optio
}
c.idxMut.Lock()
c.send(-1, messageTypeIndex, IndexMessage{
Folder: folder,
Files: idx,
Flags: flags,
Options: options,
}, nil)
Folder: folder,
Files: idx,
})
c.idxMut.Unlock()
return nil
}
// IndexUpdate writes the list of file information to the connected peer device as an update
func (c *rawConnection) IndexUpdate(folder string, idx []FileInfo, flags uint32, options []Option) error {
func (c *rawConnection) IndexUpdate(folder string, idx []FileInfo) error {
select {
case <-c.closed:
return ErrClosed
@@ -215,17 +193,15 @@ func (c *rawConnection) IndexUpdate(folder string, idx []FileInfo, flags uint32,
}
c.idxMut.Lock()
c.send(-1, messageTypeIndexUpdate, IndexMessage{
Folder: folder,
Files: idx,
Flags: flags,
Options: options,
}, nil)
Folder: folder,
Files: idx,
})
c.idxMut.Unlock()
return nil
}
// Request returns the bytes for the specified block after fetching them from the connected peer.
func (c *rawConnection) Request(folder string, name string, offset int64, size int, hash []byte, flags uint32, options []Option) ([]byte, error) {
func (c *rawConnection) Request(folder string, name string, offset int64, size int) ([]byte, error) {
var id int
select {
case id = <-c.nextID:
@@ -242,14 +218,11 @@ func (c *rawConnection) Request(folder string, name string, offset int64, size i
c.awaitingMut.Unlock()
ok := c.send(id, messageTypeRequest, RequestMessage{
Folder: folder,
Name: name,
Offset: offset,
Size: int32(size),
Hash: hash,
Flags: flags,
Options: options,
}, nil)
Folder: folder,
Name: name,
Offset: offset,
Size: int32(size),
})
if !ok {
return nil, ErrClosed
}
@@ -263,7 +236,7 @@ func (c *rawConnection) Request(folder string, name string, offset int64, size i
// ClusterConfig send the cluster configuration message to the peer and returns any error
func (c *rawConnection) ClusterConfig(config ClusterConfigMessage) {
c.send(-1, messageTypeClusterConfig, config, nil)
c.send(-1, messageTypeClusterConfig, config)
}
func (c *rawConnection) ping() bool {
@@ -279,7 +252,7 @@ func (c *rawConnection) ping() bool {
c.awaiting[id] = rc
c.awaitingMut.Unlock()
ok := c.send(id, messageTypePing, nil, nil)
ok := c.send(id, messageTypePing, nil)
if !ok {
return false
}
@@ -293,7 +266,6 @@ func (c *rawConnection) readerLoop() (err error) {
c.close(err)
}()
state := stateInitial
for {
select {
case <-c.closed:
@@ -307,55 +279,57 @@ func (c *rawConnection) readerLoop() (err error) {
}
switch msg := msg.(type) {
case ClusterConfigMessage:
if state != stateInitial {
return fmt.Errorf("protocol error: cluster config message in state %d", state)
}
go c.receiver.ClusterConfig(c.id, msg)
state = stateReady
case IndexMessage:
if msg.Flags != 0 {
// We don't currently support or expect any flags.
return fmt.Errorf("protocol error: unknown flags 0x%x in Index(Update) message", msg.Flags)
}
switch hdr.msgType {
case messageTypeIndex:
if state != stateReady {
return fmt.Errorf("protocol error: index message in state %d", state)
if c.state < stateCCRcvd {
return fmt.Errorf("protocol error: index message in state %d", c.state)
}
c.handleIndex(msg)
state = stateReady
c.state = stateIdxRcvd
case messageTypeIndexUpdate:
if state != stateReady {
return fmt.Errorf("protocol error: index update message in state %d", state)
if c.state < stateIdxRcvd {
return fmt.Errorf("protocol error: index update message in state %d", c.state)
}
c.handleIndexUpdate(msg)
state = stateReady
}
case RequestMessage:
if state != stateReady {
return fmt.Errorf("protocol error: request message in state %d", state)
if msg.Flags != 0 {
// We don't currently support or expect any flags.
return fmt.Errorf("protocol error: unknown flags 0x%x in Request message", msg.Flags)
}
if c.state < stateIdxRcvd {
return fmt.Errorf("protocol error: request message in state %d", c.state)
}
// Requests are handled asynchronously
go c.handleRequest(hdr.msgID, msg)
case ResponseMessage:
if state != stateReady {
return fmt.Errorf("protocol error: response message in state %d", state)
if c.state < stateIdxRcvd {
return fmt.Errorf("protocol error: response message in state %d", c.state)
}
c.handleResponse(hdr.msgID, msg)
case pingMessage:
if state != stateReady {
return fmt.Errorf("protocol error: ping message in state %d", state)
}
c.send(hdr.msgID, messageTypePong, pongMessage{}, nil)
c.send(hdr.msgID, messageTypePong, pongMessage{})
case pongMessage:
if state != stateReady {
return fmt.Errorf("protocol error: pong message in state %d", state)
}
c.handlePong(hdr.msgID)
case ClusterConfigMessage:
if c.state != stateInitial {
return fmt.Errorf("protocol error: cluster config message in state %d", c.state)
}
go c.receiver.ClusterConfig(c.id, msg)
c.state = stateCCRcvd
case CloseMessage:
return errors.New(msg.Reason)
@@ -486,16 +460,16 @@ func (c *rawConnection) readMessage() (hdr header, msg encodable, err error) {
func (c *rawConnection) handleIndex(im IndexMessage) {
if debug {
l.Debugf("Index(%v, %v, %d file, flags %x, opts: %s)", c.id, im.Folder, len(im.Files), im.Flags, im.Options)
l.Debugf("Index(%v, %v, %d files)", c.id, im.Folder, len(im.Files))
}
c.receiver.Index(c.id, im.Folder, filterIndexMessageFiles(im.Files), im.Flags, im.Options)
c.receiver.Index(c.id, im.Folder, filterIndexMessageFiles(im.Files))
}
func (c *rawConnection) handleIndexUpdate(im IndexMessage) {
if debug {
l.Debugf("queueing IndexUpdate(%v, %v, %d files, flags %x, opts: %s)", c.id, im.Folder, len(im.Files), im.Flags, im.Options)
l.Debugf("queueing IndexUpdate(%v, %v, %d files)", c.id, im.Folder, len(im.Files))
}
c.receiver.IndexUpdate(c.id, im.Folder, filterIndexMessageFiles(im.Files), im.Flags, im.Options)
c.receiver.IndexUpdate(c.id, im.Folder, filterIndexMessageFiles(im.Files))
}
func filterIndexMessageFiles(fs []FileInfo) []FileInfo {
@@ -525,43 +499,18 @@ func filterIndexMessageFiles(fs []FileInfo) []FileInfo {
}
func (c *rawConnection) handleRequest(msgID int, req RequestMessage) {
size := int(req.Size)
usePool := size <= BlockSize
data, _ := c.receiver.Request(c.id, req.Folder, req.Name, int64(req.Offset), int(req.Size))
var buf []byte
var done chan struct{}
if usePool {
buf = c.pool.Get().([]byte)[:size]
done = make(chan struct{})
} else {
buf = make([]byte, size)
}
err := c.receiver.Request(c.id, req.Folder, req.Name, int64(req.Offset), req.Hash, req.Flags, req.Options, buf)
if err != nil {
c.send(msgID, messageTypeResponse, ResponseMessage{
Data: nil,
Code: errorToCode(err),
}, done)
} else {
c.send(msgID, messageTypeResponse, ResponseMessage{
Data: buf,
Code: errorToCode(err),
}, done)
}
if usePool {
<-done
c.pool.Put(buf)
}
c.send(msgID, messageTypeResponse, ResponseMessage{
Data: data,
})
}
func (c *rawConnection) handleResponse(msgID int, resp ResponseMessage) {
c.awaitingMut.Lock()
if rc := c.awaiting[msgID]; rc != nil {
c.awaiting[msgID] = nil
rc <- asyncResult{resp.Data, codeToError(resp.Code)}
rc <- asyncResult{resp.Data, nil}
close(rc)
}
c.awaitingMut.Unlock()
@@ -577,7 +526,7 @@ func (c *rawConnection) handlePong(msgID int) {
c.awaitingMut.Unlock()
}
func (c *rawConnection) send(msgID int, msgType int, msg encodable, done chan struct{}) bool {
func (c *rawConnection) send(msgID int, msgType int, msg encodable) bool {
if msgID < 0 {
select {
case id := <-c.nextID:
@@ -594,7 +543,7 @@ func (c *rawConnection) send(msgID int, msgType int, msg encodable, done chan st
}
select {
case c.outbox <- hdrMsg{hdr, msg, done}:
case c.outbox <- hdrMsg{hdr, msg}:
return true
case <-c.closed:
return false
@@ -613,9 +562,6 @@ func (c *rawConnection) writerLoop() {
if hm.msg != nil {
// Uncompressed message in uncBuf
uncBuf, err = hm.msg.AppendXDR(uncBuf[:0])
if hm.done != nil {
close(hm.done)
}
if err != nil {
c.close(err)
return
@@ -722,17 +668,17 @@ func (c *rawConnection) idGenerator() {
func (c *rawConnection) pingerLoop() {
var rc = make(chan bool, 1)
ticker := time.Tick(PingIdleTime / 2)
ticker := time.Tick(pingIdleTime / 2)
for {
select {
case <-ticker:
if d := time.Since(c.cr.Last()); d < PingIdleTime {
if d := time.Since(c.cr.Last()); d < pingIdleTime {
if debug {
l.Debugln(c.id, "ping skipped after rd", d)
}
continue
}
if d := time.Since(c.cw.Last()); d < PingIdleTime {
if d := time.Since(c.cw.Last()); d < pingIdleTime {
if debug {
l.Debugln(c.id, "ping skipped after wr", d)
}
@@ -752,7 +698,7 @@ func (c *rawConnection) pingerLoop() {
if !ok {
c.close(fmt.Errorf("ping failure"))
}
case <-time.After(PingTimeout):
case <-time.After(pingTimeout):
c.close(fmt.Errorf("ping timeout"))
case <-c.closed:
return

View File

@@ -67,12 +67,8 @@ func TestPing(t *testing.T) {
ar, aw := io.Pipe()
br, bw := io.Pipe()
c0 := NewConnection(c0ID, ar, bw, newTestModel(), "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
c0.Start()
c1 := NewConnection(c1ID, br, aw, newTestModel(), "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
c1.Start()
c0.ClusterConfig(ClusterConfigMessage{})
c1.ClusterConfig(ClusterConfigMessage{})
c0 := NewConnection(c0ID, ar, bw, nil, "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
c1 := NewConnection(c1ID, br, aw, nil, "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
if ok := c0.ping(); !ok {
t.Error("c0 ping failed")
@@ -85,8 +81,8 @@ func TestPing(t *testing.T) {
func TestPingErr(t *testing.T) {
e := errors.New("something broke")
for i := 0; i < 32; i++ {
for j := 0; j < 32; j++ {
for i := 0; i < 16; i++ {
for j := 0; j < 16; j++ {
m0 := newTestModel()
m1 := newTestModel()
@@ -96,18 +92,12 @@ func TestPingErr(t *testing.T) {
ebw := &ErrPipe{PipeWriter: *bw, max: j, err: e}
c0 := NewConnection(c0ID, ar, ebw, m0, "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
c0.Start()
c1 := NewConnection(c1ID, br, eaw, m1, "name", CompressAlways)
c1.Start()
c0.ClusterConfig(ClusterConfigMessage{})
c1.ClusterConfig(ClusterConfigMessage{})
NewConnection(c1ID, br, eaw, m1, "name", CompressAlways)
res := c0.ping()
if (i < 8 || j < 8) && res {
// This should have resulted in failure, as there is no way an empty ClusterConfig plus a Ping message fits in eight bytes.
t.Errorf("Unexpected ping success; i=%d, j=%d", i, j)
} else if (i >= 28 && j >= 28) && !res {
// This should have worked though, as 28 bytes is plenty for both.
} else if (i >= 12 && j >= 12) && !res {
t.Errorf("Unexpected ping fail; i=%d, j=%d", i, j)
}
}
@@ -178,11 +168,7 @@ func TestVersionErr(t *testing.T) {
br, bw := io.Pipe()
c0 := NewConnection(c0ID, ar, bw, m0, "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
c0.Start()
c1 := NewConnection(c1ID, br, aw, m1, "name", CompressAlways)
c1.Start()
c0.ClusterConfig(ClusterConfigMessage{})
c1.ClusterConfig(ClusterConfigMessage{})
NewConnection(c1ID, br, aw, m1, "name", CompressAlways)
w := xdr.NewWriter(c0.cw)
w.WriteUint32(encodeHeader(header{
@@ -205,11 +191,7 @@ func TestTypeErr(t *testing.T) {
br, bw := io.Pipe()
c0 := NewConnection(c0ID, ar, bw, m0, "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
c0.Start()
c1 := NewConnection(c1ID, br, aw, m1, "name", CompressAlways)
c1.Start()
c0.ClusterConfig(ClusterConfigMessage{})
c1.ClusterConfig(ClusterConfigMessage{})
NewConnection(c1ID, br, aw, m1, "name", CompressAlways)
w := xdr.NewWriter(c0.cw)
w.WriteUint32(encodeHeader(header{
@@ -232,11 +214,7 @@ func TestClose(t *testing.T) {
br, bw := io.Pipe()
c0 := NewConnection(c0ID, ar, bw, m0, "name", CompressAlways).(wireFormatConnection).next.(*rawConnection)
c0.Start()
c1 := NewConnection(c1ID, br, aw, m1, "name", CompressAlways)
c1.Start()
c0.ClusterConfig(ClusterConfigMessage{})
c1.ClusterConfig(ClusterConfigMessage{})
NewConnection(c1ID, br, aw, m1, "name", CompressAlways)
c0.close(nil)
@@ -251,10 +229,10 @@ func TestClose(t *testing.T) {
t.Error("Ping should not return true")
}
c0.Index("default", nil, 0, nil)
c0.Index("default", nil, 0, nil)
c0.Index("default", nil)
c0.Index("default", nil)
if _, err := c0.Request("default", "foo", 0, 0, nil, 0, nil); err == nil {
if _, err := c0.Request("default", "foo", 0, 0); err == nil {
t.Error("Request should return an error")
}
}

View File

@@ -1,115 +0,0 @@
// Copyright (C) 2015 The Protocol Authors.
package protocol
// The Vector type represents a version vector. The zero value is a usable
// version vector. The vector has slice semantics and some operations on it
// are "append-like" in that they may return the same vector modified, or a
// new allocated Vector with the modified contents.
type Vector []Counter
// Counter represents a single counter in the version vector.
type Counter struct {
ID uint64
Value uint64
}
// Update returns a Vector with the index for the specific ID incremented by
// one. If it is possible, the vector v is updated and returned. If it is not,
// a copy will be created, updated and returned.
func (v Vector) Update(ID uint64) Vector {
for i := range v {
if v[i].ID == ID {
// Update an existing index
v[i].Value++
return v
} else if v[i].ID > ID {
// Insert a new index
nv := make(Vector, len(v)+1)
copy(nv, v[:i])
nv[i].ID = ID
nv[i].Value = 1
copy(nv[i+1:], v[i:])
return nv
}
}
// Append a new new index
return append(v, Counter{ID, 1})
}
// Merge returns the vector containing the maximum indexes from a and b. If it
// is possible, the vector a is updated and returned. If it is not, a copy
// will be created, updated and returned.
func (a Vector) Merge(b Vector) Vector {
var ai, bi int
for bi < len(b) {
if ai == len(a) {
// We've reach the end of a, all that remains are appends
return append(a, b[bi:]...)
}
if a[ai].ID > b[bi].ID {
// The index from b should be inserted here
n := make(Vector, len(a)+1)
copy(n, a[:ai])
n[ai] = b[bi]
copy(n[ai+1:], a[ai:])
a = n
}
if a[ai].ID == b[bi].ID {
if v := b[bi].Value; v > a[ai].Value {
a[ai].Value = v
}
}
if bi < len(b) && a[ai].ID == b[bi].ID {
bi++
}
ai++
}
return a
}
// Copy returns an identical vector that is not shared with v.
func (v Vector) Copy() Vector {
nv := make(Vector, len(v))
copy(nv, v)
return nv
}
// Equal returns true when the two vectors are equivalent.
func (a Vector) Equal(b Vector) bool {
return a.Compare(b) == Equal
}
// LesserEqual returns true when the two vectors are equivalent or a is Lesser
// than b.
func (a Vector) LesserEqual(b Vector) bool {
comp := a.Compare(b)
return comp == Lesser || comp == Equal
}
// LesserEqual returns true when the two vectors are equivalent or a is Greater
// than b.
func (a Vector) GreaterEqual(b Vector) bool {
comp := a.Compare(b)
return comp == Greater || comp == Equal
}
// Concurrent returns true when the two vectors are concrurrent.
func (a Vector) Concurrent(b Vector) bool {
comp := a.Compare(b)
return comp == ConcurrentGreater || comp == ConcurrentLesser
}
// Counter returns the current value of the given counter ID.
func (v Vector) Counter(id uint64) uint64 {
for _, c := range v {
if c.ID == id {
return c.Value
}
}
return 0
}

View File

@@ -1,89 +0,0 @@
// Copyright (C) 2015 The Protocol Authors.
package protocol
// Ordering represents the relationship between two Vectors.
type Ordering int
const (
Equal Ordering = iota
Greater
Lesser
ConcurrentLesser
ConcurrentGreater
)
// There's really no such thing as "concurrent lesser" and "concurrent
// greater" in version vectors, just "concurrent". But it's useful to be able
// to get a strict ordering between versions for stable sorts and so on, so we
// return both variants. The convenience method Concurrent() can be used to
// check for either case.
// Compare returns the Ordering that describes a's relation to b.
func (a Vector) Compare(b Vector) Ordering {
var ai, bi int // index into a and b
var av, bv Counter // value at current index
result := Equal
for ai < len(a) || bi < len(b) {
var aMissing, bMissing bool
if ai < len(a) {
av = a[ai]
} else {
av = Counter{}
aMissing = true
}
if bi < len(b) {
bv = b[bi]
} else {
bv = Counter{}
bMissing = true
}
switch {
case av.ID == bv.ID:
// We have a counter value for each side
if av.Value > bv.Value {
if result == Lesser {
return ConcurrentLesser
}
result = Greater
} else if av.Value < bv.Value {
if result == Greater {
return ConcurrentGreater
}
result = Lesser
}
case !aMissing && av.ID < bv.ID || bMissing:
// Value is missing on the b side
if av.Value > 0 {
if result == Lesser {
return ConcurrentLesser
}
result = Greater
}
case !bMissing && bv.ID < av.ID || aMissing:
// Value is missing on the a side
if bv.Value > 0 {
if result == Greater {
return ConcurrentGreater
}
result = Lesser
}
}
if ai < len(a) && (av.ID <= bv.ID || bMissing) {
ai++
}
if bi < len(b) && (bv.ID <= av.ID || aMissing) {
bi++
}
}
return result
}

View File

@@ -1,249 +0,0 @@
// Copyright (C) 2015 The Protocol Authors.
package protocol
import (
"math"
"testing"
)
func TestCompare(t *testing.T) {
testcases := []struct {
a, b Vector
r Ordering
}{
// Empty vectors are identical
{Vector{}, Vector{}, Equal},
{Vector{}, nil, Equal},
{nil, Vector{}, Equal},
{nil, Vector{Counter{42, 0}}, Equal},
{Vector{}, Vector{Counter{42, 0}}, Equal},
{Vector{Counter{42, 0}}, nil, Equal},
{Vector{Counter{42, 0}}, Vector{}, Equal},
// Zero is the implied value for a missing Counter
{
Vector{Counter{42, 0}},
Vector{Counter{77, 0}},
Equal,
},
// Equal vectors are equal
{
Vector{Counter{42, 33}},
Vector{Counter{42, 33}},
Equal,
},
{
Vector{Counter{42, 33}, Counter{77, 24}},
Vector{Counter{42, 33}, Counter{77, 24}},
Equal,
},
// These a-vectors are all greater than the b-vector
{
Vector{Counter{42, 1}},
nil,
Greater,
},
{
Vector{Counter{42, 1}},
Vector{},
Greater,
},
{
Vector{Counter{0, 1}},
Vector{Counter{0, 0}},
Greater,
},
{
Vector{Counter{42, 1}},
Vector{Counter{42, 0}},
Greater,
},
{
Vector{Counter{math.MaxUint64, 1}},
Vector{Counter{math.MaxUint64, 0}},
Greater,
},
{
Vector{Counter{0, math.MaxUint64}},
Vector{Counter{0, 0}},
Greater,
},
{
Vector{Counter{42, math.MaxUint64}},
Vector{Counter{42, 0}},
Greater,
},
{
Vector{Counter{math.MaxUint64, math.MaxUint64}},
Vector{Counter{math.MaxUint64, 0}},
Greater,
},
{
Vector{Counter{0, math.MaxUint64}},
Vector{Counter{0, math.MaxUint64 - 1}},
Greater,
},
{
Vector{Counter{42, math.MaxUint64}},
Vector{Counter{42, math.MaxUint64 - 1}},
Greater,
},
{
Vector{Counter{math.MaxUint64, math.MaxUint64}},
Vector{Counter{math.MaxUint64, math.MaxUint64 - 1}},
Greater,
},
{
Vector{Counter{42, 2}},
Vector{Counter{42, 1}},
Greater,
},
{
Vector{Counter{22, 22}, Counter{42, 2}},
Vector{Counter{22, 22}, Counter{42, 1}},
Greater,
},
{
Vector{Counter{42, 2}, Counter{77, 3}},
Vector{Counter{42, 1}, Counter{77, 3}},
Greater,
},
{
Vector{Counter{22, 22}, Counter{42, 2}, Counter{77, 3}},
Vector{Counter{22, 22}, Counter{42, 1}, Counter{77, 3}},
Greater,
},
{
Vector{Counter{22, 23}, Counter{42, 2}, Counter{77, 4}},
Vector{Counter{22, 22}, Counter{42, 1}, Counter{77, 3}},
Greater,
},
// These a-vectors are all lesser than the b-vector
{nil, Vector{Counter{42, 1}}, Lesser},
{Vector{}, Vector{Counter{42, 1}}, Lesser},
{
Vector{Counter{42, 0}},
Vector{Counter{42, 1}},
Lesser,
},
{
Vector{Counter{42, 1}},
Vector{Counter{42, 2}},
Lesser,
},
{
Vector{Counter{22, 22}, Counter{42, 1}},
Vector{Counter{22, 22}, Counter{42, 2}},
Lesser,
},
{
Vector{Counter{42, 1}, Counter{77, 3}},
Vector{Counter{42, 2}, Counter{77, 3}},
Lesser,
},
{
Vector{Counter{22, 22}, Counter{42, 1}, Counter{77, 3}},
Vector{Counter{22, 22}, Counter{42, 2}, Counter{77, 3}},
Lesser,
},
{
Vector{Counter{22, 22}, Counter{42, 1}, Counter{77, 3}},
Vector{Counter{22, 23}, Counter{42, 2}, Counter{77, 4}},
Lesser,
},
// These are all in conflict
{
Vector{Counter{42, 2}},
Vector{Counter{43, 1}},
ConcurrentGreater,
},
{
Vector{Counter{43, 1}},
Vector{Counter{42, 2}},
ConcurrentLesser,
},
{
Vector{Counter{22, 23}, Counter{42, 1}},
Vector{Counter{22, 22}, Counter{42, 2}},
ConcurrentGreater,
},
{
Vector{Counter{22, 21}, Counter{42, 2}},
Vector{Counter{22, 22}, Counter{42, 1}},
ConcurrentLesser,
},
{
Vector{Counter{22, 21}, Counter{42, 2}, Counter{43, 1}},
Vector{Counter{20, 1}, Counter{22, 22}, Counter{42, 1}},
ConcurrentLesser,
},
}
for i, tc := range testcases {
// Test real Compare
if r := tc.a.Compare(tc.b); r != tc.r {
t.Errorf("%d: %+v.Compare(%+v) == %v (expected %v)", i, tc.a, tc.b, r, tc.r)
}
// Test convenience functions
switch tc.r {
case Greater:
if tc.a.Equal(tc.b) {
t.Errorf("%+v == %+v", tc.a, tc.b)
}
if tc.a.Concurrent(tc.b) {
t.Errorf("%+v concurrent %+v", tc.a, tc.b)
}
if !tc.a.GreaterEqual(tc.b) {
t.Errorf("%+v not >= %+v", tc.a, tc.b)
}
if tc.a.LesserEqual(tc.b) {
t.Errorf("%+v <= %+v", tc.a, tc.b)
}
case Lesser:
if tc.a.Concurrent(tc.b) {
t.Errorf("%+v concurrent %+v", tc.a, tc.b)
}
if tc.a.Equal(tc.b) {
t.Errorf("%+v == %+v", tc.a, tc.b)
}
if tc.a.GreaterEqual(tc.b) {
t.Errorf("%+v >= %+v", tc.a, tc.b)
}
if !tc.a.LesserEqual(tc.b) {
t.Errorf("%+v not <= %+v", tc.a, tc.b)
}
case Equal:
if tc.a.Concurrent(tc.b) {
t.Errorf("%+v concurrent %+v", tc.a, tc.b)
}
if !tc.a.Equal(tc.b) {
t.Errorf("%+v not == %+v", tc.a, tc.b)
}
if !tc.a.GreaterEqual(tc.b) {
t.Errorf("%+v not <= %+v", tc.a, tc.b)
}
if !tc.a.LesserEqual(tc.b) {
t.Errorf("%+v not <= %+v", tc.a, tc.b)
}
case ConcurrentLesser, ConcurrentGreater:
if !tc.a.Concurrent(tc.b) {
t.Errorf("%+v not concurrent %+v", tc.a, tc.b)
}
if tc.a.Equal(tc.b) {
t.Errorf("%+v == %+v", tc.a, tc.b)
}
if tc.a.GreaterEqual(tc.b) {
t.Errorf("%+v >= %+v", tc.a, tc.b)
}
if tc.a.LesserEqual(tc.b) {
t.Errorf("%+v <= %+v", tc.a, tc.b)
}
}
}
}

View File

@@ -1,134 +0,0 @@
// Copyright (C) 2015 The Protocol Authors.
package protocol
import "testing"
func TestUpdate(t *testing.T) {
var v Vector
// Append
v = v.Update(42)
expected := Vector{Counter{42, 1}}
if v.Compare(expected) != Equal {
t.Errorf("Update error, %+v != %+v", v, expected)
}
// Insert at front
v = v.Update(36)
expected = Vector{Counter{36, 1}, Counter{42, 1}}
if v.Compare(expected) != Equal {
t.Errorf("Update error, %+v != %+v", v, expected)
}
// Insert in moddle
v = v.Update(37)
expected = Vector{Counter{36, 1}, Counter{37, 1}, Counter{42, 1}}
if v.Compare(expected) != Equal {
t.Errorf("Update error, %+v != %+v", v, expected)
}
// Update existing
v = v.Update(37)
expected = Vector{Counter{36, 1}, Counter{37, 2}, Counter{42, 1}}
if v.Compare(expected) != Equal {
t.Errorf("Update error, %+v != %+v", v, expected)
}
}
func TestCopy(t *testing.T) {
v0 := Vector{Counter{42, 1}}
v1 := v0.Copy()
v1.Update(42)
if v0.Compare(v1) != Lesser {
t.Errorf("Copy error, %+v should be ancestor of %+v", v0, v1)
}
}
func TestMerge(t *testing.T) {
testcases := []struct {
a, b, m Vector
}{
// No-ops
{
Vector{},
Vector{},
Vector{},
},
{
Vector{Counter{22, 1}, Counter{42, 1}},
Vector{Counter{22, 1}, Counter{42, 1}},
Vector{Counter{22, 1}, Counter{42, 1}},
},
// Appends
{
Vector{},
Vector{Counter{22, 1}, Counter{42, 1}},
Vector{Counter{22, 1}, Counter{42, 1}},
},
{
Vector{Counter{22, 1}},
Vector{Counter{42, 1}},
Vector{Counter{22, 1}, Counter{42, 1}},
},
{
Vector{Counter{22, 1}},
Vector{Counter{22, 1}, Counter{42, 1}},
Vector{Counter{22, 1}, Counter{42, 1}},
},
// Insert
{
Vector{Counter{22, 1}, Counter{42, 1}},
Vector{Counter{22, 1}, Counter{23, 2}, Counter{42, 1}},
Vector{Counter{22, 1}, Counter{23, 2}, Counter{42, 1}},
},
{
Vector{Counter{42, 1}},
Vector{Counter{22, 1}},
Vector{Counter{22, 1}, Counter{42, 1}},
},
// Update
{
Vector{Counter{22, 1}, Counter{42, 2}},
Vector{Counter{22, 2}, Counter{42, 1}},
Vector{Counter{22, 2}, Counter{42, 2}},
},
// All of the above
{
Vector{Counter{10, 1}, Counter{20, 2}, Counter{30, 1}},
Vector{Counter{5, 1}, Counter{10, 2}, Counter{15, 1}, Counter{20, 1}, Counter{25, 1}, Counter{35, 1}},
Vector{Counter{5, 1}, Counter{10, 2}, Counter{15, 1}, Counter{20, 2}, Counter{25, 1}, Counter{30, 1}, Counter{35, 1}},
},
}
for i, tc := range testcases {
if m := tc.a.Merge(tc.b); m.Compare(tc.m) != Equal {
t.Errorf("%d: %+v.Merge(%+v) == %+v (expected %+v)", i, tc.a, tc.b, m, tc.m)
}
}
}
func TestCounterValue(t *testing.T) {
v0 := Vector{Counter{42, 1}, Counter{64, 5}}
if v0.Counter(42) != 1 {
t.Error("Counter error, %d != %d", v0.Counter(42), 1)
}
if v0.Counter(64) != 5 {
t.Error("Counter error, %d != %d", v0.Counter(64), 5)
}
if v0.Counter(72) != 0 {
t.Error("Counter error, %d != %d", v0.Counter(72), 0)
}
}

View File

@@ -1,38 +0,0 @@
// Copyright (C) 2015 The Protocol Authors.
package protocol
// This stuff is hacked up manually because genxdr doesn't support 'type
// Vector []Counter' declarations and it was tricky when I tried to add it...
type xdrWriter interface {
WriteUint32(uint32) (int, error)
WriteUint64(uint64) (int, error)
}
type xdrReader interface {
ReadUint32() uint32
ReadUint64() uint64
}
// EncodeXDRInto encodes the vector as an XDR object into the given XDR
// encoder.
func (v Vector) EncodeXDRInto(w xdrWriter) (int, error) {
w.WriteUint32(uint32(len(v)))
for i := range v {
w.WriteUint64(v[i].ID)
w.WriteUint64(v[i].Value)
}
return 4 + 16*len(v), nil
}
// DecodeXDRFrom decodes the XDR objects from the given reader into itself.
func (v *Vector) DecodeXDRFrom(r xdrReader) error {
l := int(r.ReadUint32())
n := make(Vector, l)
for i := range n {
n[i].ID = r.ReadUint64()
n[i].Value = r.ReadUint64()
}
*v = n
return nil
}

View File

@@ -12,10 +12,6 @@ type wireFormatConnection struct {
next Connection
}
func (c wireFormatConnection) Start() {
c.next.Start()
}
func (c wireFormatConnection) ID() DeviceID {
return c.next.ID()
}
@@ -24,7 +20,7 @@ func (c wireFormatConnection) Name() string {
return c.next.Name()
}
func (c wireFormatConnection) Index(folder string, fs []FileInfo, flags uint32, options []Option) error {
func (c wireFormatConnection) Index(folder string, fs []FileInfo) error {
var myFs = make([]FileInfo, len(fs))
copy(myFs, fs)
@@ -32,10 +28,10 @@ func (c wireFormatConnection) Index(folder string, fs []FileInfo, flags uint32,
myFs[i].Name = norm.NFC.String(filepath.ToSlash(myFs[i].Name))
}
return c.next.Index(folder, myFs, flags, options)
return c.next.Index(folder, myFs)
}
func (c wireFormatConnection) IndexUpdate(folder string, fs []FileInfo, flags uint32, options []Option) error {
func (c wireFormatConnection) IndexUpdate(folder string, fs []FileInfo) error {
var myFs = make([]FileInfo, len(fs))
copy(myFs, fs)
@@ -43,12 +39,12 @@ func (c wireFormatConnection) IndexUpdate(folder string, fs []FileInfo, flags ui
myFs[i].Name = norm.NFC.String(filepath.ToSlash(myFs[i].Name))
}
return c.next.IndexUpdate(folder, myFs, flags, options)
return c.next.IndexUpdate(folder, myFs)
}
func (c wireFormatConnection) Request(folder, name string, offset int64, size int, hash []byte, flags uint32, options []Option) ([]byte, error) {
func (c wireFormatConnection) Request(folder, name string, offset int64, size int) ([]byte, error) {
name = norm.NFC.String(filepath.ToSlash(name))
return c.next.Request(folder, name, offset, size, hash, flags, options)
return c.next.Request(folder, name, offset, size)
}
func (c wireFormatConnection) ClusterConfig(config ClusterConfigMessage) {

View File

@@ -63,14 +63,13 @@ type DB struct {
journalAckC chan error
// Compaction.
tcompCmdC chan cCmd
tcompPauseC chan chan<- struct{}
mcompCmdC chan cCmd
compErrC chan error
compPerErrC chan error
compErrSetC chan error
compWriteLocking bool
compStats []cStats
tcompCmdC chan cCmd
tcompPauseC chan chan<- struct{}
mcompCmdC chan cCmd
compErrC chan error
compPerErrC chan error
compErrSetC chan error
compStats []cStats
// Close.
closeW sync.WaitGroup
@@ -109,44 +108,28 @@ func openDB(s *session) (*DB, error) {
closeC: make(chan struct{}),
}
// Read-only mode.
readOnly := s.o.GetReadOnly()
if err := db.recoverJournal(); err != nil {
return nil, err
}
if readOnly {
// Recover journals (read-only mode).
if err := db.recoverJournalRO(); err != nil {
return nil, err
// Remove any obsolete files.
if err := db.checkAndCleanFiles(); err != nil {
// Close journal.
if db.journal != nil {
db.journal.Close()
db.journalWriter.Close()
}
} else {
// Recover journals.
if err := db.recoverJournal(); err != nil {
return nil, err
}
// Remove any obsolete files.
if err := db.checkAndCleanFiles(); err != nil {
// Close journal.
if db.journal != nil {
db.journal.Close()
db.journalWriter.Close()
}
return nil, err
}
return nil, err
}
// Doesn't need to be included in the wait group.
go db.compactionError()
go db.mpoolDrain()
if readOnly {
db.SetReadOnly()
} else {
db.closeW.Add(3)
go db.tCompaction()
go db.mCompaction()
go db.jWriter()
}
db.closeW.Add(3)
go db.tCompaction()
go db.mCompaction()
go db.jWriter()
s.logf("db@open done T·%v", time.Since(start))
@@ -292,7 +275,7 @@ func recoverTable(s *session, o *opt.Options) error {
// We will drop corrupted table.
strict = o.GetStrict(opt.StrictRecovery)
rec = &sessionRecord{}
rec = &sessionRecord{numLevel: o.GetNumLevel()}
bpool = util.NewBufferPool(o.GetBlockSize() + 5)
)
buildTable := func(iter iterator.Iterator) (tmp storage.File, size int64, err error) {
@@ -467,136 +450,132 @@ func recoverTable(s *session, o *opt.Options) error {
}
func (db *DB) recoverJournal() error {
// Get all journals and sort it by file number.
allJournalFiles, err := db.s.getFiles(storage.TypeJournal)
// Get all tables and sort it by file number.
journalFiles_, err := db.s.getFiles(storage.TypeJournal)
if err != nil {
return err
}
files(allJournalFiles).sort()
journalFiles := files(journalFiles_)
journalFiles.sort()
// Journals that will be recovered.
var recJournalFiles []storage.File
for _, jf := range allJournalFiles {
if jf.Num() >= db.s.stJournalNum || jf.Num() == db.s.stPrevJournalNum {
recJournalFiles = append(recJournalFiles, jf)
// Discard older journal.
prev := -1
for i, file := range journalFiles {
if file.Num() >= db.s.stJournalNum {
if prev >= 0 {
i--
journalFiles[i] = journalFiles[prev]
}
journalFiles = journalFiles[i:]
break
} else if file.Num() == db.s.stPrevJournalNum {
prev = i
}
}
var (
of storage.File // Obsolete file.
rec = &sessionRecord{}
)
var jr *journal.Reader
var of storage.File
var mem *memdb.DB
batch := new(Batch)
cm := newCMem(db.s)
buf := new(util.Buffer)
// Options.
strict := db.s.o.GetStrict(opt.StrictJournal)
checksum := db.s.o.GetStrict(opt.StrictJournalChecksum)
writeBuffer := db.s.o.GetWriteBuffer()
recoverJournal := func(file storage.File) error {
db.logf("journal@recovery recovering @%d", file.Num())
reader, err := file.Open()
if err != nil {
return err
}
defer reader.Close()
// Recover journals.
if len(recJournalFiles) > 0 {
db.logf("journal@recovery F·%d", len(recJournalFiles))
// Mark file number as used.
db.s.markFileNum(recJournalFiles[len(recJournalFiles)-1].Num())
var (
// Options.
strict = db.s.o.GetStrict(opt.StrictJournal)
checksum = db.s.o.GetStrict(opt.StrictJournalChecksum)
writeBuffer = db.s.o.GetWriteBuffer()
jr *journal.Reader
mdb = memdb.New(db.s.icmp, writeBuffer)
buf = &util.Buffer{}
batch = &Batch{}
)
for _, jf := range recJournalFiles {
db.logf("journal@recovery recovering @%d", jf.Num())
fr, err := jf.Open()
if err != nil {
return err
}
// Create or reset journal reader instance.
if jr == nil {
jr = journal.NewReader(fr, dropper{db.s, jf}, strict, checksum)
} else {
jr.Reset(fr, dropper{db.s, jf}, strict, checksum)
}
// Flush memdb and remove obsolete journal file.
if of != nil {
if mdb.Len() > 0 {
if _, err := db.s.flushMemdb(rec, mdb, -1); err != nil {
fr.Close()
return err
}
}
rec.setJournalNum(jf.Num())
rec.setSeqNum(db.seq)
if err := db.s.commit(rec); err != nil {
fr.Close()
return err
}
rec.resetAddedTables()
of.Remove()
of = nil
}
// Replay journal to memdb.
mdb.Reset()
for {
r, err := jr.Next()
if err != nil {
if err == io.EOF {
break
}
fr.Close()
return errors.SetFile(err, jf)
}
buf.Reset()
if _, err := buf.ReadFrom(r); err != nil {
if err == io.ErrUnexpectedEOF {
// This is error returned due to corruption, with strict == false.
continue
}
fr.Close()
return errors.SetFile(err, jf)
}
if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mdb); err != nil {
if !strict && errors.IsCorrupted(err) {
db.s.logf("journal error: %v (skipped)", err)
// We won't apply sequence number as it might be corrupted.
continue
}
fr.Close()
return errors.SetFile(err, jf)
}
// Save sequence number.
db.seq = batch.seq + uint64(batch.Len())
// Flush it if large enough.
if mdb.Size() >= writeBuffer {
if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil {
fr.Close()
return err
}
mdb.Reset()
}
}
fr.Close()
of = jf
// Create/reset journal reader instance.
if jr == nil {
jr = journal.NewReader(reader, dropper{db.s, file}, strict, checksum)
} else {
jr.Reset(reader, dropper{db.s, file}, strict, checksum)
}
// Flush the last memdb.
if mdb.Len() > 0 {
if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil {
// Flush memdb and remove obsolete journal file.
if of != nil {
if mem.Len() > 0 {
if err := cm.flush(mem, 0); err != nil {
return err
}
}
if err := cm.commit(file.Num(), db.seq); err != nil {
return err
}
cm.reset()
of.Remove()
of = nil
}
// Replay journal to memdb.
mem.Reset()
for {
r, err := jr.Next()
if err != nil {
if err == io.EOF {
break
}
return errors.SetFile(err, file)
}
buf.Reset()
if _, err := buf.ReadFrom(r); err != nil {
if err == io.ErrUnexpectedEOF {
// This is error returned due to corruption, with strict == false.
continue
} else {
return errors.SetFile(err, file)
}
}
if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mem); err != nil {
if strict || !errors.IsCorrupted(err) {
return errors.SetFile(err, file)
} else {
db.s.logf("journal error: %v (skipped)", err)
// We won't apply sequence number as it might be corrupted.
continue
}
}
// Save sequence number.
db.seq = batch.seq + uint64(batch.Len())
// Flush it if large enough.
if mem.Size() >= writeBuffer {
if err := cm.flush(mem, 0); err != nil {
return err
}
mem.Reset()
}
}
of = file
return nil
}
// Recover all journals.
if len(journalFiles) > 0 {
db.logf("journal@recovery F·%d", len(journalFiles))
// Mark file number as used.
db.s.markFileNum(journalFiles[len(journalFiles)-1].Num())
mem = memdb.New(db.s.icmp, writeBuffer)
for _, file := range journalFiles {
if err := recoverJournal(file); err != nil {
return err
}
}
// Flush the last journal.
if mem.Len() > 0 {
if err := cm.flush(mem, 0); err != nil {
return err
}
}
@@ -608,10 +587,8 @@ func (db *DB) recoverJournal() error {
}
// Commit.
rec.setJournalNum(db.journalFile.Num())
rec.setSeqNum(db.seq)
if err := db.s.commit(rec); err != nil {
// Close journal on error.
if err := cm.commit(db.journalFile.Num(), db.seq); err != nil {
// Close journal.
if db.journal != nil {
db.journal.Close()
db.journalWriter.Close()
@@ -627,103 +604,6 @@ func (db *DB) recoverJournal() error {
return nil
}
func (db *DB) recoverJournalRO() error {
// Get all journals and sort it by file number.
allJournalFiles, err := db.s.getFiles(storage.TypeJournal)
if err != nil {
return err
}
files(allJournalFiles).sort()
// Journals that will be recovered.
var recJournalFiles []storage.File
for _, jf := range allJournalFiles {
if jf.Num() >= db.s.stJournalNum || jf.Num() == db.s.stPrevJournalNum {
recJournalFiles = append(recJournalFiles, jf)
}
}
var (
// Options.
strict = db.s.o.GetStrict(opt.StrictJournal)
checksum = db.s.o.GetStrict(opt.StrictJournalChecksum)
writeBuffer = db.s.o.GetWriteBuffer()
mdb = memdb.New(db.s.icmp, writeBuffer)
)
// Recover journals.
if len(recJournalFiles) > 0 {
db.logf("journal@recovery RO·Mode F·%d", len(recJournalFiles))
var (
jr *journal.Reader
buf = &util.Buffer{}
batch = &Batch{}
)
for _, jf := range recJournalFiles {
db.logf("journal@recovery recovering @%d", jf.Num())
fr, err := jf.Open()
if err != nil {
return err
}
// Create or reset journal reader instance.
if jr == nil {
jr = journal.NewReader(fr, dropper{db.s, jf}, strict, checksum)
} else {
jr.Reset(fr, dropper{db.s, jf}, strict, checksum)
}
// Replay journal to memdb.
for {
r, err := jr.Next()
if err != nil {
if err == io.EOF {
break
}
fr.Close()
return errors.SetFile(err, jf)
}
buf.Reset()
if _, err := buf.ReadFrom(r); err != nil {
if err == io.ErrUnexpectedEOF {
// This is error returned due to corruption, with strict == false.
continue
}
fr.Close()
return errors.SetFile(err, jf)
}
if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mdb); err != nil {
if !strict && errors.IsCorrupted(err) {
db.s.logf("journal error: %v (skipped)", err)
// We won't apply sequence number as it might be corrupted.
continue
}
fr.Close()
return errors.SetFile(err, jf)
}
// Save sequence number.
db.seq = batch.seq + uint64(batch.Len())
}
fr.Close()
}
}
// Set memDB.
db.mem = &memDB{db: db, DB: mdb, ref: 1}
return nil
}
func (db *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) {
ikey := newIkey(key, seq, ktSeek)
@@ -734,7 +614,7 @@ func (db *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, er
}
defer m.decref()
mk, mv, me := m.Find(ikey)
mk, mv, me := m.mdb.Find(ikey)
if me == nil {
ukey, _, kt, kerr := parseIkey(mk)
if kerr != nil {
@@ -772,7 +652,7 @@ func (db *DB) has(key []byte, seq uint64, ro *opt.ReadOptions) (ret bool, err er
}
defer m.decref()
mk, _, me := m.Find(ikey)
mk, _, me := m.mdb.Find(ikey)
if me == nil {
ukey, _, kt, kerr := parseIkey(mk)
if kerr != nil {
@@ -904,7 +784,7 @@ func (db *DB) GetProperty(name string) (value string, err error) {
const prefix = "leveldb."
if !strings.HasPrefix(name, prefix) {
return "", ErrNotFound
return "", errors.New("leveldb: GetProperty: unknown property: " + name)
}
p := name[len(prefix):]
@@ -918,7 +798,7 @@ func (db *DB) GetProperty(name string) (value string, err error) {
var rest string
n, _ := fmt.Sscanf(p[len(numFilesPrefix):], "%d%s", &level, &rest)
if n != 1 || int(level) >= db.s.o.GetNumLevel() {
err = ErrNotFound
err = errors.New("leveldb: GetProperty: invalid property: " + name)
} else {
value = fmt.Sprint(v.tLen(int(level)))
}
@@ -957,7 +837,7 @@ func (db *DB) GetProperty(name string) (value string, err error) {
case p == "aliveiters":
value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveIters))
default:
err = ErrNotFound
err = errors.New("leveldb: GetProperty: unknown property: " + name)
}
return
@@ -1020,9 +900,6 @@ func (db *DB) Close() error {
var err error
select {
case err = <-db.compErrC:
if err == ErrReadOnly {
err = nil
}
default:
}

View File

@@ -11,6 +11,7 @@ import (
"time"
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/memdb"
"github.com/syndtr/goleveldb/leveldb/opt"
)
@@ -61,8 +62,58 @@ func (p *cStatsStaging) stopTimer() {
}
}
type cMem struct {
s *session
level int
rec *sessionRecord
}
func newCMem(s *session) *cMem {
return &cMem{s: s, rec: &sessionRecord{numLevel: s.o.GetNumLevel()}}
}
func (c *cMem) flush(mem *memdb.DB, level int) error {
s := c.s
// Write memdb to table.
iter := mem.NewIterator(nil)
defer iter.Release()
t, n, err := s.tops.createFrom(iter)
if err != nil {
return err
}
// Pick level.
if level < 0 {
v := s.version()
level = v.pickLevel(t.imin.ukey(), t.imax.ukey())
v.release()
}
c.rec.addTableFile(level, t)
s.logf("mem@flush created L%d@%d N·%d S·%s %q:%q", level, t.file.Num(), n, shortenb(int(t.size)), t.imin, t.imax)
c.level = level
return nil
}
func (c *cMem) reset() {
c.rec = &sessionRecord{numLevel: c.s.o.GetNumLevel()}
}
func (c *cMem) commit(journal, seq uint64) error {
c.rec.setJournalNum(journal)
c.rec.setSeqNum(seq)
// Commit changes.
return c.s.commit(c.rec)
}
func (db *DB) compactionError() {
var err error
var (
err error
wlocked bool
)
noerr:
// No error.
for {
@@ -70,7 +121,7 @@ noerr:
case err = <-db.compErrSetC:
switch {
case err == nil:
case err == ErrReadOnly, errors.IsCorrupted(err):
case errors.IsCorrupted(err):
goto hasperr
default:
goto haserr
@@ -88,7 +139,7 @@ haserr:
switch {
case err == nil:
goto noerr
case err == ErrReadOnly, errors.IsCorrupted(err):
case errors.IsCorrupted(err):
goto hasperr
default:
}
@@ -104,9 +155,9 @@ hasperr:
case db.compPerErrC <- err:
case db.writeLockC <- struct{}{}:
// Hold write lock, so that write won't pass-through.
db.compWriteLocking = true
wlocked = true
case _, _ = <-db.closeC:
if db.compWriteLocking {
if wlocked {
// We should release the lock or Close will hang.
<-db.writeLockC
}
@@ -236,18 +287,21 @@ func (db *DB) compactionExitTransact() {
}
func (db *DB) memCompaction() {
mdb := db.getFrozenMem()
if mdb == nil {
mem := db.getFrozenMem()
if mem == nil {
return
}
defer mdb.decref()
defer mem.decref()
db.logf("memdb@flush N·%d S·%s", mdb.Len(), shortenb(mdb.Size()))
c := newCMem(db.s)
stats := new(cStatsStaging)
db.logf("mem@flush N·%d S·%s", mem.mdb.Len(), shortenb(mem.mdb.Size()))
// Don't compact empty memdb.
if mdb.Len() == 0 {
db.logf("memdb@flush skipping")
// drop frozen memdb
if mem.mdb.Len() == 0 {
db.logf("mem@flush skipping")
// drop frozen mem
db.dropFrozenMem()
return
}
@@ -263,20 +317,13 @@ func (db *DB) memCompaction() {
return
}
var (
rec = &sessionRecord{}
stats = &cStatsStaging{}
flushLevel int
)
db.compactionTransactFunc("memdb@flush", func(cnt *compactionTransactCounter) (err error) {
db.compactionTransactFunc("mem@flush", func(cnt *compactionTransactCounter) (err error) {
stats.startTimer()
flushLevel, err = db.s.flushMemdb(rec, mdb.DB, -1)
stats.stopTimer()
return
defer stats.stopTimer()
return c.flush(mem.mdb, -1)
}, func() error {
for _, r := range rec.addedTables {
db.logf("memdb@flush revert @%d", r.num)
for _, r := range c.rec.addedTables {
db.logf("mem@flush revert @%d", r.num)
f := db.s.getTableFile(r.num)
if err := f.Remove(); err != nil {
return err
@@ -285,23 +332,20 @@ func (db *DB) memCompaction() {
return nil
})
db.compactionTransactFunc("memdb@commit", func(cnt *compactionTransactCounter) (err error) {
db.compactionTransactFunc("mem@commit", func(cnt *compactionTransactCounter) (err error) {
stats.startTimer()
rec.setJournalNum(db.journalFile.Num())
rec.setSeqNum(db.frozenSeq)
err = db.s.commit(rec)
stats.stopTimer()
return
defer stats.stopTimer()
return c.commit(db.journalFile.Num(), db.frozenSeq)
}, nil)
db.logf("memdb@flush committed F·%d T·%v", len(rec.addedTables), stats.duration)
db.logf("mem@flush committed F·%d T·%v", len(c.rec.addedTables), stats.duration)
for _, r := range rec.addedTables {
for _, r := range c.rec.addedTables {
stats.write += r.size
}
db.compStats[flushLevel].add(stats)
db.compStats[c.level].add(stats)
// Drop frozen memdb.
// Drop frozen mem.
db.dropFrozenMem()
// Resume table compaction.
@@ -513,7 +557,7 @@ func (b *tableCompactionBuilder) revert() error {
func (db *DB) tableCompaction(c *compaction, noTrivial bool) {
defer c.release()
rec := &sessionRecord{}
rec := &sessionRecord{numLevel: db.s.o.GetNumLevel()}
rec.addCompPtr(c.level, c.imax)
if !noTrivial && c.trivial() {

View File

@@ -8,7 +8,6 @@ package leveldb
import (
"errors"
"math/rand"
"runtime"
"sync"
"sync/atomic"
@@ -40,11 +39,11 @@ func (db *DB) newRawIterator(slice *util.Range, ro *opt.ReadOptions) iterator.It
ti := v.getIterators(slice, ro)
n := len(ti) + 2
i := make([]iterator.Iterator, 0, n)
emi := em.NewIterator(slice)
emi := em.mdb.NewIterator(slice)
emi.SetReleaser(&memdbReleaser{m: em})
i = append(i, emi)
if fm != nil {
fmi := fm.NewIterator(slice)
fmi := fm.mdb.NewIterator(slice)
fmi.SetReleaser(&memdbReleaser{m: fm})
i = append(i, fmi)
}
@@ -81,10 +80,6 @@ func (db *DB) newIterator(seq uint64, slice *util.Range, ro *opt.ReadOptions) *d
return iter
}
func (db *DB) iterSamplingRate() int {
return rand.Intn(2 * db.s.o.GetIteratorSamplingRate())
}
type dir int
const (
@@ -103,21 +98,11 @@ type dbIter struct {
seq uint64
strict bool
smaplingGap int
dir dir
key []byte
value []byte
err error
releaser util.Releaser
}
func (i *dbIter) sampleSeek() {
ikey := i.iter.Key()
i.smaplingGap -= len(ikey) + len(i.iter.Value())
for i.smaplingGap < 0 {
i.smaplingGap += i.db.iterSamplingRate()
i.db.sampleSeek(ikey)
}
dir dir
key []byte
value []byte
err error
releaser util.Releaser
}
func (i *dbIter) setErr(err error) {
@@ -190,7 +175,6 @@ func (i *dbIter) Seek(key []byte) bool {
func (i *dbIter) next() bool {
for {
if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil {
i.sampleSeek()
if seq <= i.seq {
switch kt {
case ktDel:
@@ -241,7 +225,6 @@ func (i *dbIter) prev() bool {
if i.iter.Valid() {
for {
if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil {
i.sampleSeek()
if seq <= i.seq {
if !del && i.icmp.uCompare(ukey, i.key) < 0 {
return true
@@ -283,7 +266,6 @@ func (i *dbIter) Prev() bool {
case dirForward:
for i.iter.Prev() {
if ukey, _, _, kerr := parseIkey(i.iter.Key()); kerr == nil {
i.sampleSeek()
if i.icmp.uCompare(ukey, i.key) < 0 {
goto cont
}

View File

@@ -15,8 +15,8 @@ import (
)
type memDB struct {
db *DB
*memdb.DB
db *DB
mdb *memdb.DB
ref int32
}
@@ -27,12 +27,12 @@ func (m *memDB) incref() {
func (m *memDB) decref() {
if ref := atomic.AddInt32(&m.ref, -1); ref == 0 {
// Only put back memdb with std capacity.
if m.Capacity() == m.db.s.o.GetWriteBuffer() {
m.Reset()
m.db.mpoolPut(m.DB)
if m.mdb.Capacity() == m.db.s.o.GetWriteBuffer() {
m.mdb.Reset()
m.db.mpoolPut(m.mdb)
}
m.db = nil
m.DB = nil
m.mdb = nil
} else if ref < 0 {
panic("negative memdb ref")
}
@@ -48,15 +48,6 @@ func (db *DB) addSeq(delta uint64) {
atomic.AddUint64(&db.seq, delta)
}
func (db *DB) sampleSeek(ikey iKey) {
v := db.s.version()
if v.sampleSeek(ikey) {
// Trigger table compaction.
db.compSendTrigger(db.tcompCmdC)
}
v.release()
}
func (db *DB) mpoolPut(mem *memdb.DB) {
defer func() {
recover()
@@ -126,7 +117,7 @@ func (db *DB) newMem(n int) (mem *memDB, err error) {
}
mem = &memDB{
db: db,
DB: mdb,
mdb: mdb,
ref: 2,
}
db.mem = mem

View File

@@ -405,21 +405,19 @@ func (h *dbHarness) compactRange(min, max string) {
t.Log("DB range compaction done")
}
func (h *dbHarness) sizeOf(start, limit string) uint64 {
sz, err := h.db.SizeOf([]util.Range{
func (h *dbHarness) sizeAssert(start, limit string, low, hi uint64) {
t := h.t
db := h.db
s, err := db.SizeOf([]util.Range{
{[]byte(start), []byte(limit)},
})
if err != nil {
h.t.Error("SizeOf: got error: ", err)
t.Error("SizeOf: got error: ", err)
}
return sz.Sum()
}
func (h *dbHarness) sizeAssert(start, limit string, low, hi uint64) {
sz := h.sizeOf(start, limit)
if sz < low || sz > hi {
h.t.Errorf("sizeOf %q to %q not in range, want %d - %d, got %d",
shorten(start), shorten(limit), low, hi, sz)
if s.Sum() < low || s.Sum() > hi {
t.Errorf("sizeof %q to %q not in range, want %d - %d, got %d",
shorten(start), shorten(limit), low, hi, s.Sum())
}
}
@@ -2445,7 +2443,7 @@ func TestDB_TableCompactionBuilder(t *testing.T) {
if err != nil {
t.Fatal(err)
}
rec := &sessionRecord{}
rec := &sessionRecord{numLevel: s.o.GetNumLevel()}
rec.addTableFile(i, tf)
if err := s.commit(rec); err != nil {
t.Fatal(err)
@@ -2455,7 +2453,7 @@ func TestDB_TableCompactionBuilder(t *testing.T) {
// Build grandparent.
v := s.version()
c := newCompaction(s, v, 1, append(tFiles{}, v.tables[1]...))
rec := &sessionRecord{}
rec := &sessionRecord{numLevel: s.o.GetNumLevel()}
b := &tableCompactionBuilder{
s: s,
c: c,
@@ -2479,7 +2477,7 @@ func TestDB_TableCompactionBuilder(t *testing.T) {
// Build level-1.
v = s.version()
c = newCompaction(s, v, 0, append(tFiles{}, v.tables[0]...))
rec = &sessionRecord{}
rec = &sessionRecord{numLevel: s.o.GetNumLevel()}
b = &tableCompactionBuilder{
s: s,
c: c,
@@ -2523,7 +2521,7 @@ func TestDB_TableCompactionBuilder(t *testing.T) {
// Compaction with transient error.
v = s.version()
c = newCompaction(s, v, 1, append(tFiles{}, v.tables[1]...))
rec = &sessionRecord{}
rec = &sessionRecord{numLevel: s.o.GetNumLevel()}
b = &tableCompactionBuilder{
s: s,
c: c,
@@ -2579,123 +2577,3 @@ func TestDB_TableCompactionBuilder(t *testing.T) {
}
v.release()
}
func testDB_IterTriggeredCompaction(t *testing.T, limitDiv int) {
const (
vSize = 200 * opt.KiB
tSize = 100 * opt.MiB
mIter = 100
n = tSize / vSize
)
h := newDbHarnessWopt(t, &opt.Options{
Compression: opt.NoCompression,
DisableBlockCache: true,
})
defer h.close()
key := func(x int) string {
return fmt.Sprintf("v%06d", x)
}
// Fill.
value := strings.Repeat("x", vSize)
for i := 0; i < n; i++ {
h.put(key(i), value)
}
h.compactMem()
// Delete all.
for i := 0; i < n; i++ {
h.delete(key(i))
}
h.compactMem()
var (
limit = n / limitDiv
startKey = key(0)
limitKey = key(limit)
maxKey = key(n)
slice = &util.Range{Limit: []byte(limitKey)}
initialSize0 = h.sizeOf(startKey, limitKey)
initialSize1 = h.sizeOf(limitKey, maxKey)
)
t.Logf("inital size %s [rest %s]", shortenb(int(initialSize0)), shortenb(int(initialSize1)))
for r := 0; true; r++ {
if r >= mIter {
t.Fatal("taking too long to compact")
}
// Iterates.
iter := h.db.NewIterator(slice, h.ro)
for iter.Next() {
}
if err := iter.Error(); err != nil {
t.Fatalf("Iter err: %v", err)
}
iter.Release()
// Wait compaction.
h.waitCompaction()
// Check size.
size0 := h.sizeOf(startKey, limitKey)
size1 := h.sizeOf(limitKey, maxKey)
t.Logf("#%03d size %s [rest %s]", r, shortenb(int(size0)), shortenb(int(size1)))
if size0 < initialSize0/10 {
break
}
}
if initialSize1 > 0 {
h.sizeAssert(limitKey, maxKey, initialSize1/4-opt.MiB, initialSize1+opt.MiB)
}
}
func TestDB_IterTriggeredCompaction(t *testing.T) {
testDB_IterTriggeredCompaction(t, 1)
}
func TestDB_IterTriggeredCompactionHalf(t *testing.T) {
testDB_IterTriggeredCompaction(t, 2)
}
func TestDB_ReadOnly(t *testing.T) {
h := newDbHarness(t)
defer h.close()
h.put("foo", "v1")
h.put("bar", "v2")
h.compactMem()
h.put("xfoo", "v1")
h.put("xbar", "v2")
t.Log("Trigger read-only")
if err := h.db.SetReadOnly(); err != nil {
h.close()
t.Fatalf("SetReadOnly error: %v", err)
}
h.stor.SetEmuErr(storage.TypeAll, tsOpCreate, tsOpReplace, tsOpRemove, tsOpWrite, tsOpWrite, tsOpSync)
ro := func(key, value, wantValue string) {
if err := h.db.Put([]byte(key), []byte(value), h.wo); err != ErrReadOnly {
t.Fatalf("unexpected error: %v", err)
}
h.getVal(key, wantValue)
}
ro("foo", "vx", "v1")
h.o.ReadOnly = true
h.reopenDB()
ro("foo", "vx", "v1")
ro("bar", "vx", "v2")
h.assertNumKeys(4)
}

View File

@@ -63,24 +63,24 @@ func (db *DB) rotateMem(n int) (mem *memDB, err error) {
return
}
func (db *DB) flush(n int) (mdb *memDB, mdbFree int, err error) {
func (db *DB) flush(n int) (mem *memDB, nn int, err error) {
delayed := false
flush := func() (retry bool) {
v := db.s.version()
defer v.release()
mdb = db.getEffectiveMem()
mem = db.getEffectiveMem()
defer func() {
if retry {
mdb.decref()
mdb = nil
mem.decref()
mem = nil
}
}()
mdbFree = mdb.Free()
nn = mem.mdb.Free()
switch {
case v.tLen(0) >= db.s.o.GetWriteL0SlowdownTrigger() && !delayed:
delayed = true
time.Sleep(time.Millisecond)
case mdbFree >= n:
case nn >= n:
return false
case v.tLen(0) >= db.s.o.GetWriteL0PauseTrigger():
delayed = true
@@ -90,15 +90,15 @@ func (db *DB) flush(n int) (mdb *memDB, mdbFree int, err error) {
}
default:
// Allow memdb to grow if it has no entry.
if mdb.Len() == 0 {
mdbFree = n
if mem.mdb.Len() == 0 {
nn = n
} else {
mdb.decref()
mdb, err = db.rotateMem(n)
mem.decref()
mem, err = db.rotateMem(n)
if err == nil {
mdbFree = mdb.Free()
nn = mem.mdb.Free()
} else {
mdbFree = 0
nn = 0
}
}
return false
@@ -157,18 +157,18 @@ func (db *DB) Write(b *Batch, wo *opt.WriteOptions) (err error) {
}
}()
mdb, mdbFree, err := db.flush(b.size())
mem, memFree, err := db.flush(b.size())
if err != nil {
return
}
defer mdb.decref()
defer mem.decref()
// Calculate maximum size of the batch.
m := 1 << 20
if x := b.size(); x <= 128<<10 {
m = x + (128 << 10)
}
m = minInt(m, mdbFree)
m = minInt(m, memFree)
// Merge with other batch.
drain:
@@ -197,7 +197,7 @@ drain:
select {
case db.journalC <- b:
// Write into memdb
if berr := b.memReplay(mdb.DB); berr != nil {
if berr := b.memReplay(mem.mdb); berr != nil {
panic(berr)
}
case err = <-db.compPerErrC:
@@ -211,7 +211,7 @@ drain:
case err = <-db.journalAckC:
if err != nil {
// Revert memdb if error detected
if berr := b.revertMemReplay(mdb.DB); berr != nil {
if berr := b.revertMemReplay(mem.mdb); berr != nil {
panic(berr)
}
return
@@ -225,7 +225,7 @@ drain:
if err != nil {
return
}
if berr := b.memReplay(mdb.DB); berr != nil {
if berr := b.memReplay(mem.mdb); berr != nil {
panic(berr)
}
}
@@ -233,7 +233,7 @@ drain:
// Set last seq number.
db.addSeq(uint64(b.Len()))
if b.size() >= mdbFree {
if b.size() >= memFree {
db.rotateMem(0)
}
return
@@ -249,7 +249,8 @@ func (db *DB) Put(key, value []byte, wo *opt.WriteOptions) error {
return db.Write(b, wo)
}
// Delete deletes the value for the given key.
// Delete deletes the value for the given key. It returns ErrNotFound if
// the DB does not contain the key.
//
// It is safe to modify the contents of the arguments after Delete returns.
func (db *DB) Delete(key []byte, wo *opt.WriteOptions) error {
@@ -289,9 +290,9 @@ func (db *DB) CompactRange(r util.Range) error {
}
// Check for overlaps in memdb.
mdb := db.getEffectiveMem()
defer mdb.decref()
if isMemOverlaps(db.s.icmp, mdb.DB, r.Start, r.Limit) {
mem := db.getEffectiveMem()
defer mem.decref()
if isMemOverlaps(db.s.icmp, mem.mdb, r.Start, r.Limit) {
// Memdb compaction.
if _, err := db.rotateMem(0); err != nil {
<-db.writeLockC
@@ -308,31 +309,3 @@ func (db *DB) CompactRange(r util.Range) error {
// Table compaction.
return db.compSendRange(db.tcompCmdC, -1, r.Start, r.Limit)
}
// SetReadOnly makes DB read-only. It will stay read-only until reopened.
func (db *DB) SetReadOnly() error {
if err := db.ok(); err != nil {
return err
}
// Lock writer.
select {
case db.writeLockC <- struct{}{}:
db.compWriteLocking = true
case err := <-db.compPerErrC:
return err
case _, _ = <-db.closeC:
return ErrClosed
}
// Set compaction read-only.
select {
case db.compErrSetC <- ErrReadOnly:
case perr := <-db.compPerErrC:
return perr
case _, _ = <-db.closeC:
return ErrClosed
}
return nil
}

View File

@@ -12,7 +12,6 @@ import (
var (
ErrNotFound = errors.ErrNotFound
ErrReadOnly = errors.New("leveldb: read-only mode")
ErrSnapshotReleased = errors.New("leveldb: snapshot released")
ErrIterReleased = errors.New("leveldb: iterator released")
ErrClosed = errors.New("leveldb: closed")

View File

@@ -206,7 +206,6 @@ func (p *DB) randHeight() (h int) {
return
}
// Must hold RW-lock if prev == true, as it use shared prevNode slice.
func (p *DB) findGE(key []byte, prev bool) (int, bool) {
node := 0
h := p.maxHeight - 1
@@ -303,7 +302,7 @@ func (p *DB) Put(key []byte, value []byte) error {
node := len(p.nodeData)
p.nodeData = append(p.nodeData, kvOffset, len(key), len(value), h)
for i, n := range p.prevNode[:h] {
m := n + nNext + i
m := n + 4 + i
p.nodeData = append(p.nodeData, p.nodeData[m])
p.nodeData[m] = node
}
@@ -435,22 +434,20 @@ func (p *DB) Len() int {
// Reset resets the DB to initial empty state. Allows reuse the buffer.
func (p *DB) Reset() {
p.mu.Lock()
p.rnd = rand.New(rand.NewSource(0xdeadbeef))
p.maxHeight = 1
p.n = 0
p.kvSize = 0
p.kvData = p.kvData[:0]
p.nodeData = p.nodeData[:nNext+tMaxHeight]
p.nodeData = p.nodeData[:4+tMaxHeight]
p.nodeData[nKV] = 0
p.nodeData[nKey] = 0
p.nodeData[nVal] = 0
p.nodeData[nHeight] = tMaxHeight
for n := 0; n < tMaxHeight; n++ {
p.nodeData[nNext+n] = 0
p.nodeData[4+n] = 0
p.prevNode[n] = 0
}
p.mu.Unlock()
}
// New creates a new initalized in-memory key/value DB. The capacity

View File

@@ -34,11 +34,10 @@ var (
DefaultCompactionTotalSize = 10 * MiB
DefaultCompactionTotalSizeMultiplier = 10.0
DefaultCompressionType = SnappyCompression
DefaultIteratorSamplingRate = 1 * MiB
DefaultMaxMemCompationLevel = 2
DefaultNumLevel = 7
DefaultOpenFilesCacher = LRUCacher
DefaultOpenFilesCacheCapacity = 500
DefaultMaxMemCompationLevel = 2
DefaultNumLevel = 7
DefaultWriteBuffer = 4 * MiB
DefaultWriteL0PauseTrigger = 12
DefaultWriteL0SlowdownTrigger = 8
@@ -154,7 +153,7 @@ type Options struct {
BlockCacher Cacher
// BlockCacheCapacity defines the capacity of the 'sorted table' block caching.
// Use -1 for zero, this has same effect as specifying NoCacher to BlockCacher.
// Use -1 for zero, this has same effect with specifying NoCacher to BlockCacher.
//
// The default value is 8MiB.
BlockCacheCapacity int
@@ -250,11 +249,6 @@ type Options struct {
// The default value (DefaultCompression) uses snappy compression.
Compression Compression
// DisableBufferPool allows disable use of util.BufferPool functionality.
//
// The default value is false.
DisableBufferPool bool
// DisableBlockCache allows disable use of cache.Cache functionality on
// 'sorted table' block.
//
@@ -294,13 +288,6 @@ type Options struct {
// The default value is nil.
Filter filter.Filter
// IteratorSamplingRate defines approximate gap (in bytes) between read
// sampling of an iterator. The samples will be used to determine when
// compaction should be triggered.
//
// The default is 1MiB.
IteratorSamplingRate int
// MaxMemCompationLevel defines maximum level a newly compacted 'memdb'
// will be pushed into if doesn't creates overlap. This should less than
// NumLevel. Use -1 for level-0.
@@ -321,16 +308,11 @@ type Options struct {
OpenFilesCacher Cacher
// OpenFilesCacheCapacity defines the capacity of the open files caching.
// Use -1 for zero, this has same effect as specifying NoCacher to OpenFilesCacher.
// Use -1 for zero, this has same effect with specifying NoCacher to OpenFilesCacher.
//
// The default value is 500.
OpenFilesCacheCapacity int
// If true then opens DB in read-only mode.
//
// The default value is false.
ReadOnly bool
// Strict defines the DB strict level.
Strict Strict
@@ -373,9 +355,9 @@ func (o *Options) GetBlockCacher() Cacher {
}
func (o *Options) GetBlockCacheCapacity() int {
if o == nil || o.BlockCacheCapacity == 0 {
if o == nil || o.BlockCacheCapacity <= 0 {
return DefaultBlockCacheCapacity
} else if o.BlockCacheCapacity < 0 {
} else if o.BlockCacheCapacity == -1 {
return 0
}
return o.BlockCacheCapacity
@@ -482,20 +464,6 @@ func (o *Options) GetCompression() Compression {
return o.Compression
}
func (o *Options) GetDisableBufferPool() bool {
if o == nil {
return false
}
return o.DisableBufferPool
}
func (o *Options) GetDisableBlockCache() bool {
if o == nil {
return false
}
return o.DisableBlockCache
}
func (o *Options) GetDisableCompactionBackoff() bool {
if o == nil {
return false
@@ -524,19 +492,12 @@ func (o *Options) GetFilter() filter.Filter {
return o.Filter
}
func (o *Options) GetIteratorSamplingRate() int {
if o == nil || o.IteratorSamplingRate <= 0 {
return DefaultIteratorSamplingRate
}
return o.IteratorSamplingRate
}
func (o *Options) GetMaxMemCompationLevel() int {
level := DefaultMaxMemCompationLevel
if o != nil {
if o.MaxMemCompationLevel > 0 {
level = o.MaxMemCompationLevel
} else if o.MaxMemCompationLevel < 0 {
} else if o.MaxMemCompationLevel == -1 {
level = 0
}
}
@@ -564,21 +525,14 @@ func (o *Options) GetOpenFilesCacher() Cacher {
}
func (o *Options) GetOpenFilesCacheCapacity() int {
if o == nil || o.OpenFilesCacheCapacity == 0 {
if o == nil || o.OpenFilesCacheCapacity <= 0 {
return DefaultOpenFilesCacheCapacity
} else if o.OpenFilesCacheCapacity < 0 {
} else if o.OpenFilesCacheCapacity == -1 {
return 0
}
return o.OpenFilesCacheCapacity
}
func (o *Options) GetReadOnly() bool {
if o == nil {
return false
}
return o.ReadOnly
}
func (o *Options) GetStrict(strict Strict) bool {
if o == nil || o.Strict == 0 {
return DefaultStrict&strict != 0

View File

@@ -11,8 +11,10 @@ import (
"io"
"os"
"sync"
"sync/atomic"
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/journal"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/storage"
@@ -125,16 +127,11 @@ func (s *session) recover() (err error) {
return
}
defer reader.Close()
strict := s.o.GetStrict(opt.StrictManifest)
jr := journal.NewReader(reader, dropper{s, m}, strict, true)
var (
// Options.
numLevel = s.o.GetNumLevel()
strict = s.o.GetStrict(opt.StrictManifest)
jr = journal.NewReader(reader, dropper{s, m}, strict, true)
rec = &sessionRecord{}
staging = s.stVersion.newStaging()
)
staging := s.stVersion.newStaging()
rec := &sessionRecord{numLevel: s.o.GetNumLevel()}
for {
var r io.Reader
r, err = jr.Next()
@@ -146,7 +143,7 @@ func (s *session) recover() (err error) {
return errors.SetFile(err, m)
}
err = rec.decode(r, numLevel)
err = rec.decode(r)
if err == nil {
// save compact pointers
for _, r := range rec.compPtrs {
@@ -209,3 +206,250 @@ func (s *session) commit(r *sessionRecord) (err error) {
return
}
// Pick a compaction based on current state; need external synchronization.
func (s *session) pickCompaction() *compaction {
v := s.version()
var level int
var t0 tFiles
if v.cScore >= 1 {
level = v.cLevel
cptr := s.stCompPtrs[level]
tables := v.tables[level]
for _, t := range tables {
if cptr == nil || s.icmp.Compare(t.imax, cptr) > 0 {
t0 = append(t0, t)
break
}
}
if len(t0) == 0 {
t0 = append(t0, tables[0])
}
} else {
if p := atomic.LoadPointer(&v.cSeek); p != nil {
ts := (*tSet)(p)
level = ts.level
t0 = append(t0, ts.table)
} else {
v.release()
return nil
}
}
return newCompaction(s, v, level, t0)
}
// Create compaction from given level and range; need external synchronization.
func (s *session) getCompactionRange(level int, umin, umax []byte) *compaction {
v := s.version()
t0 := v.tables[level].getOverlaps(nil, s.icmp, umin, umax, level == 0)
if len(t0) == 0 {
v.release()
return nil
}
// Avoid compacting too much in one shot in case the range is large.
// But we cannot do this for level-0 since level-0 files can overlap
// and we must not pick one file and drop another older file if the
// two files overlap.
if level > 0 {
limit := uint64(v.s.o.GetCompactionSourceLimit(level))
total := uint64(0)
for i, t := range t0 {
total += t.size
if total >= limit {
s.logf("table@compaction limiting F·%d -> F·%d", len(t0), i+1)
t0 = t0[:i+1]
break
}
}
}
return newCompaction(s, v, level, t0)
}
func newCompaction(s *session, v *version, level int, t0 tFiles) *compaction {
c := &compaction{
s: s,
v: v,
level: level,
tables: [2]tFiles{t0, nil},
maxGPOverlaps: uint64(s.o.GetCompactionGPOverlaps(level)),
tPtrs: make([]int, s.o.GetNumLevel()),
}
c.expand()
c.save()
return c
}
// compaction represent a compaction state.
type compaction struct {
s *session
v *version
level int
tables [2]tFiles
maxGPOverlaps uint64
gp tFiles
gpi int
seenKey bool
gpOverlappedBytes uint64
imin, imax iKey
tPtrs []int
released bool
snapGPI int
snapSeenKey bool
snapGPOverlappedBytes uint64
snapTPtrs []int
}
func (c *compaction) save() {
c.snapGPI = c.gpi
c.snapSeenKey = c.seenKey
c.snapGPOverlappedBytes = c.gpOverlappedBytes
c.snapTPtrs = append(c.snapTPtrs[:0], c.tPtrs...)
}
func (c *compaction) restore() {
c.gpi = c.snapGPI
c.seenKey = c.snapSeenKey
c.gpOverlappedBytes = c.snapGPOverlappedBytes
c.tPtrs = append(c.tPtrs[:0], c.snapTPtrs...)
}
func (c *compaction) release() {
if !c.released {
c.released = true
c.v.release()
}
}
// Expand compacted tables; need external synchronization.
func (c *compaction) expand() {
limit := uint64(c.s.o.GetCompactionExpandLimit(c.level))
vt0, vt1 := c.v.tables[c.level], c.v.tables[c.level+1]
t0, t1 := c.tables[0], c.tables[1]
imin, imax := t0.getRange(c.s.icmp)
// We expand t0 here just incase ukey hop across tables.
t0 = vt0.getOverlaps(t0, c.s.icmp, imin.ukey(), imax.ukey(), c.level == 0)
if len(t0) != len(c.tables[0]) {
imin, imax = t0.getRange(c.s.icmp)
}
t1 = vt1.getOverlaps(t1, c.s.icmp, imin.ukey(), imax.ukey(), false)
// Get entire range covered by compaction.
amin, amax := append(t0, t1...).getRange(c.s.icmp)
// See if we can grow the number of inputs in "level" without
// changing the number of "level+1" files we pick up.
if len(t1) > 0 {
exp0 := vt0.getOverlaps(nil, c.s.icmp, amin.ukey(), amax.ukey(), c.level == 0)
if len(exp0) > len(t0) && t1.size()+exp0.size() < limit {
xmin, xmax := exp0.getRange(c.s.icmp)
exp1 := vt1.getOverlaps(nil, c.s.icmp, xmin.ukey(), xmax.ukey(), false)
if len(exp1) == len(t1) {
c.s.logf("table@compaction expanding L%d+L%d (F·%d S·%s)+(F·%d S·%s) -> (F·%d S·%s)+(F·%d S·%s)",
c.level, c.level+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())),
len(exp0), shortenb(int(exp0.size())), len(exp1), shortenb(int(exp1.size())))
imin, imax = xmin, xmax
t0, t1 = exp0, exp1
amin, amax = append(t0, t1...).getRange(c.s.icmp)
}
}
}
// Compute the set of grandparent files that overlap this compaction
// (parent == level+1; grandparent == level+2)
if c.level+2 < c.s.o.GetNumLevel() {
c.gp = c.v.tables[c.level+2].getOverlaps(c.gp, c.s.icmp, amin.ukey(), amax.ukey(), false)
}
c.tables[0], c.tables[1] = t0, t1
c.imin, c.imax = imin, imax
}
// Check whether compaction is trivial.
func (c *compaction) trivial() bool {
return len(c.tables[0]) == 1 && len(c.tables[1]) == 0 && c.gp.size() <= c.maxGPOverlaps
}
func (c *compaction) baseLevelForKey(ukey []byte) bool {
for level, tables := range c.v.tables[c.level+2:] {
for c.tPtrs[level] < len(tables) {
t := tables[c.tPtrs[level]]
if c.s.icmp.uCompare(ukey, t.imax.ukey()) <= 0 {
// We've advanced far enough.
if c.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 {
// Key falls in this file's range, so definitely not base level.
return false
}
break
}
c.tPtrs[level]++
}
}
return true
}
func (c *compaction) shouldStopBefore(ikey iKey) bool {
for ; c.gpi < len(c.gp); c.gpi++ {
gp := c.gp[c.gpi]
if c.s.icmp.Compare(ikey, gp.imax) <= 0 {
break
}
if c.seenKey {
c.gpOverlappedBytes += gp.size
}
}
c.seenKey = true
if c.gpOverlappedBytes > c.maxGPOverlaps {
// Too much overlap for current output; start new output.
c.gpOverlappedBytes = 0
return true
}
return false
}
// Creates an iterator.
func (c *compaction) newIterator() iterator.Iterator {
// Creates iterator slice.
icap := len(c.tables)
if c.level == 0 {
// Special case for level-0
icap = len(c.tables[0]) + 1
}
its := make([]iterator.Iterator, 0, icap)
// Options.
ro := &opt.ReadOptions{
DontFillCache: true,
Strict: opt.StrictOverride,
}
strict := c.s.o.GetStrict(opt.StrictCompaction)
if strict {
ro.Strict |= opt.StrictReader
}
for i, tables := range c.tables {
if len(tables) == 0 {
continue
}
// Level-0 is not sorted and may overlaps each other.
if c.level+i == 0 {
for _, t := range tables {
its = append(its, c.s.tops.newIterator(t, nil, ro))
}
} else {
it := iterator.NewIndexedIterator(tables.newIndexIterator(c.s.tops, c.s.icmp, nil, ro), strict)
its = append(its, it)
}
}
return iterator.NewMergedIterator(its, c.s.icmp, strict)
}

View File

@@ -1,287 +0,0 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package leveldb
import (
"sync/atomic"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/memdb"
"github.com/syndtr/goleveldb/leveldb/opt"
)
func (s *session) pickMemdbLevel(umin, umax []byte) int {
v := s.version()
defer v.release()
return v.pickMemdbLevel(umin, umax)
}
func (s *session) flushMemdb(rec *sessionRecord, mdb *memdb.DB, level int) (level_ int, err error) {
// Create sorted table.
iter := mdb.NewIterator(nil)
defer iter.Release()
t, n, err := s.tops.createFrom(iter)
if err != nil {
return level, err
}
// Pick level and add to record.
if level < 0 {
level = s.pickMemdbLevel(t.imin.ukey(), t.imax.ukey())
}
rec.addTableFile(level, t)
s.logf("memdb@flush created L%d@%d N·%d S·%s %q:%q", level, t.file.Num(), n, shortenb(int(t.size)), t.imin, t.imax)
return level, nil
}
// Pick a compaction based on current state; need external synchronization.
func (s *session) pickCompaction() *compaction {
v := s.version()
var level int
var t0 tFiles
if v.cScore >= 1 {
level = v.cLevel
cptr := s.stCompPtrs[level]
tables := v.tables[level]
for _, t := range tables {
if cptr == nil || s.icmp.Compare(t.imax, cptr) > 0 {
t0 = append(t0, t)
break
}
}
if len(t0) == 0 {
t0 = append(t0, tables[0])
}
} else {
if p := atomic.LoadPointer(&v.cSeek); p != nil {
ts := (*tSet)(p)
level = ts.level
t0 = append(t0, ts.table)
} else {
v.release()
return nil
}
}
return newCompaction(s, v, level, t0)
}
// Create compaction from given level and range; need external synchronization.
func (s *session) getCompactionRange(level int, umin, umax []byte) *compaction {
v := s.version()
t0 := v.tables[level].getOverlaps(nil, s.icmp, umin, umax, level == 0)
if len(t0) == 0 {
v.release()
return nil
}
// Avoid compacting too much in one shot in case the range is large.
// But we cannot do this for level-0 since level-0 files can overlap
// and we must not pick one file and drop another older file if the
// two files overlap.
if level > 0 {
limit := uint64(v.s.o.GetCompactionSourceLimit(level))
total := uint64(0)
for i, t := range t0 {
total += t.size
if total >= limit {
s.logf("table@compaction limiting F·%d -> F·%d", len(t0), i+1)
t0 = t0[:i+1]
break
}
}
}
return newCompaction(s, v, level, t0)
}
func newCompaction(s *session, v *version, level int, t0 tFiles) *compaction {
c := &compaction{
s: s,
v: v,
level: level,
tables: [2]tFiles{t0, nil},
maxGPOverlaps: uint64(s.o.GetCompactionGPOverlaps(level)),
tPtrs: make([]int, s.o.GetNumLevel()),
}
c.expand()
c.save()
return c
}
// compaction represent a compaction state.
type compaction struct {
s *session
v *version
level int
tables [2]tFiles
maxGPOverlaps uint64
gp tFiles
gpi int
seenKey bool
gpOverlappedBytes uint64
imin, imax iKey
tPtrs []int
released bool
snapGPI int
snapSeenKey bool
snapGPOverlappedBytes uint64
snapTPtrs []int
}
func (c *compaction) save() {
c.snapGPI = c.gpi
c.snapSeenKey = c.seenKey
c.snapGPOverlappedBytes = c.gpOverlappedBytes
c.snapTPtrs = append(c.snapTPtrs[:0], c.tPtrs...)
}
func (c *compaction) restore() {
c.gpi = c.snapGPI
c.seenKey = c.snapSeenKey
c.gpOverlappedBytes = c.snapGPOverlappedBytes
c.tPtrs = append(c.tPtrs[:0], c.snapTPtrs...)
}
func (c *compaction) release() {
if !c.released {
c.released = true
c.v.release()
}
}
// Expand compacted tables; need external synchronization.
func (c *compaction) expand() {
limit := uint64(c.s.o.GetCompactionExpandLimit(c.level))
vt0, vt1 := c.v.tables[c.level], c.v.tables[c.level+1]
t0, t1 := c.tables[0], c.tables[1]
imin, imax := t0.getRange(c.s.icmp)
// We expand t0 here just incase ukey hop across tables.
t0 = vt0.getOverlaps(t0, c.s.icmp, imin.ukey(), imax.ukey(), c.level == 0)
if len(t0) != len(c.tables[0]) {
imin, imax = t0.getRange(c.s.icmp)
}
t1 = vt1.getOverlaps(t1, c.s.icmp, imin.ukey(), imax.ukey(), false)
// Get entire range covered by compaction.
amin, amax := append(t0, t1...).getRange(c.s.icmp)
// See if we can grow the number of inputs in "level" without
// changing the number of "level+1" files we pick up.
if len(t1) > 0 {
exp0 := vt0.getOverlaps(nil, c.s.icmp, amin.ukey(), amax.ukey(), c.level == 0)
if len(exp0) > len(t0) && t1.size()+exp0.size() < limit {
xmin, xmax := exp0.getRange(c.s.icmp)
exp1 := vt1.getOverlaps(nil, c.s.icmp, xmin.ukey(), xmax.ukey(), false)
if len(exp1) == len(t1) {
c.s.logf("table@compaction expanding L%d+L%d (F·%d S·%s)+(F·%d S·%s) -> (F·%d S·%s)+(F·%d S·%s)",
c.level, c.level+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())),
len(exp0), shortenb(int(exp0.size())), len(exp1), shortenb(int(exp1.size())))
imin, imax = xmin, xmax
t0, t1 = exp0, exp1
amin, amax = append(t0, t1...).getRange(c.s.icmp)
}
}
}
// Compute the set of grandparent files that overlap this compaction
// (parent == level+1; grandparent == level+2)
if c.level+2 < c.s.o.GetNumLevel() {
c.gp = c.v.tables[c.level+2].getOverlaps(c.gp, c.s.icmp, amin.ukey(), amax.ukey(), false)
}
c.tables[0], c.tables[1] = t0, t1
c.imin, c.imax = imin, imax
}
// Check whether compaction is trivial.
func (c *compaction) trivial() bool {
return len(c.tables[0]) == 1 && len(c.tables[1]) == 0 && c.gp.size() <= c.maxGPOverlaps
}
func (c *compaction) baseLevelForKey(ukey []byte) bool {
for level, tables := range c.v.tables[c.level+2:] {
for c.tPtrs[level] < len(tables) {
t := tables[c.tPtrs[level]]
if c.s.icmp.uCompare(ukey, t.imax.ukey()) <= 0 {
// We've advanced far enough.
if c.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 {
// Key falls in this file's range, so definitely not base level.
return false
}
break
}
c.tPtrs[level]++
}
}
return true
}
func (c *compaction) shouldStopBefore(ikey iKey) bool {
for ; c.gpi < len(c.gp); c.gpi++ {
gp := c.gp[c.gpi]
if c.s.icmp.Compare(ikey, gp.imax) <= 0 {
break
}
if c.seenKey {
c.gpOverlappedBytes += gp.size
}
}
c.seenKey = true
if c.gpOverlappedBytes > c.maxGPOverlaps {
// Too much overlap for current output; start new output.
c.gpOverlappedBytes = 0
return true
}
return false
}
// Creates an iterator.
func (c *compaction) newIterator() iterator.Iterator {
// Creates iterator slice.
icap := len(c.tables)
if c.level == 0 {
// Special case for level-0.
icap = len(c.tables[0]) + 1
}
its := make([]iterator.Iterator, 0, icap)
// Options.
ro := &opt.ReadOptions{
DontFillCache: true,
Strict: opt.StrictOverride,
}
strict := c.s.o.GetStrict(opt.StrictCompaction)
if strict {
ro.Strict |= opt.StrictReader
}
for i, tables := range c.tables {
if len(tables) == 0 {
continue
}
// Level-0 is not sorted and may overlaps each other.
if c.level+i == 0 {
for _, t := range tables {
its = append(its, c.s.tops.newIterator(t, nil, ro))
}
} else {
it := iterator.NewIndexedIterator(tables.newIndexIterator(c.s.tops, c.s.icmp, nil, ro), strict)
its = append(its, it)
}
}
return iterator.NewMergedIterator(its, c.s.icmp, strict)
}

View File

@@ -52,6 +52,8 @@ type dtRecord struct {
}
type sessionRecord struct {
numLevel int
hasRec int
comparer string
journalNum uint64
@@ -228,7 +230,7 @@ func (p *sessionRecord) readBytes(field string, r byteReader) []byte {
return x
}
func (p *sessionRecord) readLevel(field string, r io.ByteReader, numLevel int) int {
func (p *sessionRecord) readLevel(field string, r io.ByteReader) int {
if p.err != nil {
return 0
}
@@ -236,14 +238,14 @@ func (p *sessionRecord) readLevel(field string, r io.ByteReader, numLevel int) i
if p.err != nil {
return 0
}
if x >= uint64(numLevel) {
if x >= uint64(p.numLevel) {
p.err = errors.NewErrCorrupted(nil, &ErrManifestCorrupted{field, "invalid level number"})
return 0
}
return int(x)
}
func (p *sessionRecord) decode(r io.Reader, numLevel int) error {
func (p *sessionRecord) decode(r io.Reader) error {
br, ok := r.(byteReader)
if !ok {
br = bufio.NewReader(r)
@@ -284,13 +286,13 @@ func (p *sessionRecord) decode(r io.Reader, numLevel int) error {
p.setSeqNum(x)
}
case recCompPtr:
level := p.readLevel("comp-ptr.level", br, numLevel)
level := p.readLevel("comp-ptr.level", br)
ikey := p.readBytes("comp-ptr.ikey", br)
if p.err == nil {
p.addCompPtr(level, iKey(ikey))
}
case recAddTable:
level := p.readLevel("add-table.level", br, numLevel)
level := p.readLevel("add-table.level", br)
num := p.readUvarint("add-table.num", br)
size := p.readUvarint("add-table.size", br)
imin := p.readBytes("add-table.imin", br)
@@ -299,7 +301,7 @@ func (p *sessionRecord) decode(r io.Reader, numLevel int) error {
p.addTable(level, num, size, imin, imax)
}
case recDelTable:
level := p.readLevel("del-table.level", br, numLevel)
level := p.readLevel("del-table.level", br)
num := p.readUvarint("del-table.num", br)
if p.err == nil {
p.delTable(level, num)

View File

@@ -19,8 +19,8 @@ func decodeEncode(v *sessionRecord) (res bool, err error) {
if err != nil {
return
}
v2 := &sessionRecord{}
err = v.decode(b, opt.DefaultNumLevel)
v2 := &sessionRecord{numLevel: opt.DefaultNumLevel}
err = v.decode(b)
if err != nil {
return
}
@@ -34,7 +34,7 @@ func decodeEncode(v *sessionRecord) (res bool, err error) {
func TestSessionRecord_EncodeDecode(t *testing.T) {
big := uint64(1) << 50
v := &sessionRecord{}
v := &sessionRecord{numLevel: opt.DefaultNumLevel}
i := uint64(0)
test := func() {
res, err := decodeEncode(v)

View File

@@ -182,7 +182,7 @@ func (s *session) newManifest(rec *sessionRecord, v *version) (err error) {
defer v.release()
}
if rec == nil {
rec = &sessionRecord{}
rec = &sessionRecord{numLevel: s.o.GetNumLevel()}
}
s.fillRecord(rec, true)
v.fillRecord(rec)

View File

@@ -42,8 +42,6 @@ type tsOp uint
const (
tsOpOpen tsOp = iota
tsOpCreate
tsOpReplace
tsOpRemove
tsOpRead
tsOpReadAt
tsOpWrite
@@ -243,10 +241,6 @@ func (tf tsFile) Replace(newfile storage.File) (err error) {
if err != nil {
return
}
if tf.shouldErr(tsOpReplace) {
err = errors.New("leveldb.testStorage: emulated create error")
return
}
err = tf.File.Replace(newfile.(tsFile).File)
if err != nil {
ts.t.Errorf("E: cannot replace file, num=%d type=%v: %v", tf.Num(), tf.Type(), err)
@@ -264,10 +258,6 @@ func (tf tsFile) Remove() (err error) {
if err != nil {
return
}
if tf.shouldErr(tsOpRemove) {
err = errors.New("leveldb.testStorage: emulated create error")
return
}
err = tf.File.Remove()
if err != nil {
ts.t.Errorf("E: cannot remove file, num=%d type=%v: %v", tf.Num(), tf.Type(), err)

View File

@@ -441,26 +441,22 @@ func newTableOps(s *session) *tOps {
var (
cacher cache.Cacher
bcache *cache.Cache
bpool *util.BufferPool
)
if s.o.GetOpenFilesCacheCapacity() > 0 {
cacher = cache.NewLRU(s.o.GetOpenFilesCacheCapacity())
}
if !s.o.GetDisableBlockCache() {
if !s.o.DisableBlockCache {
var bcacher cache.Cacher
if s.o.GetBlockCacheCapacity() > 0 {
bcacher = cache.NewLRU(s.o.GetBlockCacheCapacity())
}
bcache = cache.NewCache(bcacher)
}
if !s.o.GetDisableBufferPool() {
bpool = util.NewBufferPool(s.o.GetBlockSize() + 5)
}
return &tOps{
s: s,
cache: cache.NewCache(cacher),
bcache: bcache,
bpool: bpool,
bpool: util.NewBufferPool(s.o.GetBlockSize() + 5),
}
}

View File

@@ -14,7 +14,7 @@ import (
"strings"
"sync"
"github.com/golang/snappy"
"github.com/syndtr/gosnappy/snappy"
"github.com/syndtr/goleveldb/leveldb/cache"
"github.com/syndtr/goleveldb/leveldb/comparer"

View File

@@ -12,7 +12,7 @@ import (
"fmt"
"io"
"github.com/golang/snappy"
"github.com/syndtr/gosnappy/snappy"
"github.com/syndtr/goleveldb/leveldb/comparer"
"github.com/syndtr/goleveldb/leveldb/filter"
@@ -167,7 +167,11 @@ func (w *Writer) writeBlock(buf *util.Buffer, compression opt.Compression) (bh b
if n := snappy.MaxEncodedLen(buf.Len()) + blockTrailerLen; len(w.compressionScratch) < n {
w.compressionScratch = make([]byte, n)
}
compressed := snappy.Encode(w.compressionScratch, buf.Bytes())
var compressed []byte
compressed, err = snappy.Encode(w.compressionScratch, buf.Bytes())
if err != nil {
return
}
n := len(compressed)
b = compressed[:n+blockTrailerLen]
b[n] = blockTypeSnappyCompression

View File

@@ -136,8 +136,9 @@ func (v *version) get(ikey iKey, ro *opt.ReadOptions, noValue bool) (value []byt
if !tseek {
if tset == nil {
tset = &tSet{level, t}
} else {
} else if tset.table.consumeSeek() <= 0 {
tseek = true
tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset))
}
}
@@ -202,28 +203,6 @@ func (v *version) get(ikey iKey, ro *opt.ReadOptions, noValue bool) (value []byt
return true
})
if tseek && tset.table.consumeSeek() <= 0 {
tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset))
}
return
}
func (v *version) sampleSeek(ikey iKey) (tcomp bool) {
var tset *tSet
v.walkOverlapping(ikey, func(level int, t *tFile) bool {
if tset == nil {
tset = &tSet{level, t}
return true
} else {
if tset.table.consumeSeek() <= 0 {
tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset))
}
return false
}
}, nil)
return
}
@@ -300,7 +279,7 @@ func (v *version) offsetOf(ikey iKey) (n uint64, err error) {
return
}
func (v *version) pickMemdbLevel(umin, umax []byte) (level int) {
func (v *version) pickLevel(umin, umax []byte) (level int) {
if !v.tables[0].overlaps(v.s.icmp, umin, umax, true) {
var overlaps tFiles
maxLevel := v.s.o.GetMaxMemCompationLevel()

View File

@@ -0,0 +1,124 @@
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package snappy
import (
"encoding/binary"
"errors"
)
// ErrCorrupt reports that the input is invalid.
var ErrCorrupt = errors.New("snappy: corrupt input")
// DecodedLen returns the length of the decoded block.
func DecodedLen(src []byte) (int, error) {
v, _, err := decodedLen(src)
return v, err
}
// decodedLen returns the length of the decoded block and the number of bytes
// that the length header occupied.
func decodedLen(src []byte) (blockLen, headerLen int, err error) {
v, n := binary.Uvarint(src)
if n == 0 {
return 0, 0, ErrCorrupt
}
if uint64(int(v)) != v {
return 0, 0, errors.New("snappy: decoded block is too large")
}
return int(v), n, nil
}
// Decode returns the decoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire decoded block.
// Otherwise, a newly allocated slice will be returned.
// It is valid to pass a nil dst.
func Decode(dst, src []byte) ([]byte, error) {
dLen, s, err := decodedLen(src)
if err != nil {
return nil, err
}
if len(dst) < dLen {
dst = make([]byte, dLen)
}
var d, offset, length int
for s < len(src) {
switch src[s] & 0x03 {
case tagLiteral:
x := uint(src[s] >> 2)
switch {
case x < 60:
s += 1
case x == 60:
s += 2
if s > len(src) {
return nil, ErrCorrupt
}
x = uint(src[s-1])
case x == 61:
s += 3
if s > len(src) {
return nil, ErrCorrupt
}
x = uint(src[s-2]) | uint(src[s-1])<<8
case x == 62:
s += 4
if s > len(src) {
return nil, ErrCorrupt
}
x = uint(src[s-3]) | uint(src[s-2])<<8 | uint(src[s-1])<<16
case x == 63:
s += 5
if s > len(src) {
return nil, ErrCorrupt
}
x = uint(src[s-4]) | uint(src[s-3])<<8 | uint(src[s-2])<<16 | uint(src[s-1])<<24
}
length = int(x + 1)
if length <= 0 {
return nil, errors.New("snappy: unsupported literal length")
}
if length > len(dst)-d || length > len(src)-s {
return nil, ErrCorrupt
}
copy(dst[d:], src[s:s+length])
d += length
s += length
continue
case tagCopy1:
s += 2
if s > len(src) {
return nil, ErrCorrupt
}
length = 4 + int(src[s-2])>>2&0x7
offset = int(src[s-2])&0xe0<<3 | int(src[s-1])
case tagCopy2:
s += 3
if s > len(src) {
return nil, ErrCorrupt
}
length = 1 + int(src[s-3])>>2
offset = int(src[s-2]) | int(src[s-1])<<8
case tagCopy4:
return nil, errors.New("snappy: unsupported COPY_4 tag")
}
end := d + length
if offset > d || end > len(dst) {
return nil, ErrCorrupt
}
for ; d < end; d++ {
dst[d] = dst[d-offset]
}
}
if d != dLen {
return nil, ErrCorrupt
}
return dst[:d], nil
}

View File

@@ -6,7 +6,6 @@ package snappy
import (
"encoding/binary"
"io"
)
// We limit how far copy back-references can go, the same as the C++ code.
@@ -79,7 +78,7 @@ func emitCopy(dst []byte, offset, length int) int {
// slice of dst if dst was large enough to hold the entire encoded block.
// Otherwise, a newly allocated slice will be returned.
// It is valid to pass a nil dst.
func Encode(dst, src []byte) []byte {
func Encode(dst, src []byte) ([]byte, error) {
if n := MaxEncodedLen(len(src)); len(dst) < n {
dst = make([]byte, n)
}
@@ -92,7 +91,7 @@ func Encode(dst, src []byte) []byte {
if len(src) != 0 {
d += emitLiteral(dst[d:], src)
}
return dst[:d]
return dst[:d], nil
}
// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
@@ -145,7 +144,7 @@ func Encode(dst, src []byte) []byte {
if lit != len(src) {
d += emitLiteral(dst[d:], src[lit:])
}
return dst[:d]
return dst[:d], nil
}
// MaxEncodedLen returns the maximum length of a snappy block, given its
@@ -173,82 +172,3 @@ func MaxEncodedLen(srcLen int) int {
// This last factor dominates the blowup, so the final estimate is:
return 32 + srcLen + srcLen/6
}
// NewWriter returns a new Writer that compresses to w, using the framing
// format described at
// https://github.com/google/snappy/blob/master/framing_format.txt
func NewWriter(w io.Writer) *Writer {
return &Writer{
w: w,
enc: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)),
}
}
// Writer is an io.Writer than can write Snappy-compressed bytes.
type Writer struct {
w io.Writer
err error
enc []byte
buf [checksumSize + chunkHeaderSize]byte
wroteHeader bool
}
// Reset discards the writer's state and switches the Snappy writer to write to
// w. This permits reusing a Writer rather than allocating a new one.
func (w *Writer) Reset(writer io.Writer) {
w.w = writer
w.err = nil
w.wroteHeader = false
}
// Write satisfies the io.Writer interface.
func (w *Writer) Write(p []byte) (n int, errRet error) {
if w.err != nil {
return 0, w.err
}
if !w.wroteHeader {
copy(w.enc, magicChunk)
if _, err := w.w.Write(w.enc[:len(magicChunk)]); err != nil {
w.err = err
return n, err
}
w.wroteHeader = true
}
for len(p) > 0 {
var uncompressed []byte
if len(p) > maxUncompressedChunkLen {
uncompressed, p = p[:maxUncompressedChunkLen], p[maxUncompressedChunkLen:]
} else {
uncompressed, p = p, nil
}
checksum := crc(uncompressed)
// Compress the buffer, discarding the result if the improvement
// isn't at least 12.5%.
chunkType := uint8(chunkTypeCompressedData)
chunkBody := Encode(w.enc, uncompressed)
if len(chunkBody) >= len(uncompressed)-len(uncompressed)/8 {
chunkType, chunkBody = chunkTypeUncompressedData, uncompressed
}
chunkLen := 4 + len(chunkBody)
w.buf[0] = chunkType
w.buf[1] = uint8(chunkLen >> 0)
w.buf[2] = uint8(chunkLen >> 8)
w.buf[3] = uint8(chunkLen >> 16)
w.buf[4] = uint8(checksum >> 0)
w.buf[5] = uint8(checksum >> 8)
w.buf[6] = uint8(checksum >> 16)
w.buf[7] = uint8(checksum >> 24)
if _, err := w.w.Write(w.buf[:]); err != nil {
w.err = err
return n, err
}
if _, err := w.w.Write(chunkBody); err != nil {
w.err = err
return n, err
}
n += len(uncompressed)
}
return n, nil
}

View File

@@ -5,13 +5,9 @@
// Package snappy implements the snappy block-based compression format.
// It aims for very high speeds and reasonable compression.
//
// The C++ snappy implementation is at https://github.com/google/snappy
// The C++ snappy implementation is at http://code.google.com/p/snappy/
package snappy
import (
"hash/crc32"
)
/*
Each encoded block begins with the varint-encoded length of the decoded data,
followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
@@ -40,29 +36,3 @@ const (
tagCopy2 = 0x02
tagCopy4 = 0x03
)
const (
checksumSize = 4
chunkHeaderSize = 4
magicChunk = "\xff\x06\x00\x00" + magicBody
magicBody = "sNaPpY"
// https://github.com/google/snappy/blob/master/framing_format.txt says
// that "the uncompressed data in a chunk must be no longer than 65536 bytes".
maxUncompressedChunkLen = 65536
)
const (
chunkTypeCompressedData = 0x00
chunkTypeUncompressedData = 0x01
chunkTypePadding = 0xfe
chunkTypeStreamIdentifier = 0xff
)
var crcTable = crc32.MakeTable(crc32.Castagnoli)
// crc implements the checksum specified in section 3 of
// https://github.com/google/snappy/blob/master/framing_format.txt
func crc(b []byte) uint32 {
c := crc32.Update(0, crcTable, b)
return uint32(c>>15|c<<17) + 0xa282ead8
}

View File

@@ -18,13 +18,14 @@ import (
"testing"
)
var (
download = flag.Bool("download", false, "If true, download any missing files before running benchmarks")
testdata = flag.String("testdata", "testdata", "Directory containing the test data")
)
var download = flag.Bool("download", false, "If true, download any missing files before running benchmarks")
func roundtrip(b, ebuf, dbuf []byte) error {
d, err := Decode(dbuf, Encode(ebuf, b))
e, err := Encode(ebuf, b)
if err != nil {
return fmt.Errorf("encoding error: %v", err)
}
d, err := Decode(dbuf, e)
if err != nil {
return fmt.Errorf("decoding error: %v", err)
}
@@ -54,11 +55,11 @@ func TestSmallCopy(t *testing.T) {
}
func TestSmallRand(t *testing.T) {
rng := rand.New(rand.NewSource(27354294))
rand.Seed(27354294)
for n := 1; n < 20000; n += 23 {
b := make([]byte, n)
for i := range b {
b[i] = uint8(rng.Uint32())
for i, _ := range b {
b[i] = uint8(rand.Uint32())
}
if err := roundtrip(b, nil, nil); err != nil {
t.Fatal(err)
@@ -69,7 +70,7 @@ func TestSmallRand(t *testing.T) {
func TestSmallRegular(t *testing.T) {
for n := 1; n < 20000; n += 23 {
b := make([]byte, n)
for i := range b {
for i, _ := range b {
b[i] = uint8(i%10 + 'a')
}
if err := roundtrip(b, nil, nil); err != nil {
@@ -78,132 +79,11 @@ func TestSmallRegular(t *testing.T) {
}
}
func TestInvalidVarint(t *testing.T) {
data := []byte("\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00")
if _, err := DecodedLen(data); err != ErrCorrupt {
t.Errorf("DecodedLen: got %v, want ErrCorrupt", err)
}
if _, err := Decode(nil, data); err != ErrCorrupt {
t.Errorf("Decode: got %v, want ErrCorrupt", err)
}
}
func cmp(a, b []byte) error {
if len(a) != len(b) {
return fmt.Errorf("got %d bytes, want %d", len(a), len(b))
}
for i := range a {
if a[i] != b[i] {
return fmt.Errorf("byte #%d: got 0x%02x, want 0x%02x", i, a[i], b[i])
}
}
return nil
}
func TestFramingFormat(t *testing.T) {
// src is comprised of alternating 1e5-sized sequences of random
// (incompressible) bytes and repeated (compressible) bytes. 1e5 was chosen
// because it is larger than maxUncompressedChunkLen (64k).
src := make([]byte, 1e6)
rng := rand.New(rand.NewSource(1))
for i := 0; i < 10; i++ {
if i%2 == 0 {
for j := 0; j < 1e5; j++ {
src[1e5*i+j] = uint8(rng.Intn(256))
}
} else {
for j := 0; j < 1e5; j++ {
src[1e5*i+j] = uint8(i)
}
}
}
buf := new(bytes.Buffer)
if _, err := NewWriter(buf).Write(src); err != nil {
t.Fatalf("Write: encoding: %v", err)
}
dst, err := ioutil.ReadAll(NewReader(buf))
if err != nil {
t.Fatalf("ReadAll: decoding: %v", err)
}
if err := cmp(dst, src); err != nil {
t.Fatal(err)
}
}
func TestReaderReset(t *testing.T) {
gold := bytes.Repeat([]byte("All that is gold does not glitter,\n"), 10000)
buf := new(bytes.Buffer)
if _, err := NewWriter(buf).Write(gold); err != nil {
t.Fatalf("Write: %v", err)
}
encoded, invalid, partial := buf.String(), "invalid", "partial"
r := NewReader(nil)
for i, s := range []string{encoded, invalid, partial, encoded, partial, invalid, encoded, encoded} {
if s == partial {
r.Reset(strings.NewReader(encoded))
if _, err := r.Read(make([]byte, 101)); err != nil {
t.Errorf("#%d: %v", i, err)
continue
}
continue
}
r.Reset(strings.NewReader(s))
got, err := ioutil.ReadAll(r)
switch s {
case encoded:
if err != nil {
t.Errorf("#%d: %v", i, err)
continue
}
if err := cmp(got, gold); err != nil {
t.Errorf("#%d: %v", i, err)
continue
}
case invalid:
if err == nil {
t.Errorf("#%d: got nil error, want non-nil", i)
continue
}
}
}
}
func TestWriterReset(t *testing.T) {
gold := bytes.Repeat([]byte("Not all those who wander are lost;\n"), 10000)
var gots, wants [][]byte
const n = 20
w, failed := NewWriter(nil), false
for i := 0; i <= n; i++ {
buf := new(bytes.Buffer)
w.Reset(buf)
want := gold[:len(gold)*i/n]
if _, err := w.Write(want); err != nil {
t.Errorf("#%d: Write: %v", i, err)
failed = true
continue
}
got, err := ioutil.ReadAll(NewReader(buf))
if err != nil {
t.Errorf("#%d: ReadAll: %v", i, err)
failed = true
continue
}
gots = append(gots, got)
wants = append(wants, want)
}
if failed {
return
}
for i := range gots {
if err := cmp(gots[i], wants[i]); err != nil {
t.Errorf("#%d: %v", i, err)
}
}
}
func benchDecode(b *testing.B, src []byte) {
encoded := Encode(nil, src)
encoded, err := Encode(nil, src)
if err != nil {
b.Fatal(err)
}
// Bandwidth is in amount of uncompressed data.
b.SetBytes(int64(len(src)))
b.ResetTimer()
@@ -222,10 +102,10 @@ func benchEncode(b *testing.B, src []byte) {
}
}
func readFile(b testing.TB, filename string) []byte {
func readFile(b *testing.B, filename string) []byte {
src, err := ioutil.ReadFile(filename)
if err != nil {
b.Skipf("skipping benchmark: %v", err)
b.Fatalf("failed reading %s: %s", filename, err)
}
if len(src) == 0 {
b.Fatalf("%s has zero length", filename)
@@ -264,7 +144,7 @@ func BenchmarkWordsEncode1e5(b *testing.B) { benchWords(b, 1e5, false) }
func BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) }
// testFiles' values are copied directly from
// https://raw.githubusercontent.com/google/snappy/master/snappy_unittest.cc
// https://code.google.com/p/snappy/source/browse/trunk/snappy_unittest.cc.
// The label field is unused in snappy-go.
var testFiles = []struct {
label string
@@ -272,36 +152,29 @@ var testFiles = []struct {
}{
{"html", "html"},
{"urls", "urls.10K"},
{"jpg", "fireworks.jpeg"},
{"jpg_200", "fireworks.jpeg"},
{"pdf", "paper-100k.pdf"},
{"jpg", "house.jpg"},
{"pdf", "mapreduce-osdi-1.pdf"},
{"html4", "html_x_4"},
{"cp", "cp.html"},
{"c", "fields.c"},
{"lsp", "grammar.lsp"},
{"xls", "kennedy.xls"},
{"txt1", "alice29.txt"},
{"txt2", "asyoulik.txt"},
{"txt3", "lcet10.txt"},
{"txt4", "plrabn12.txt"},
{"bin", "ptt5"},
{"sum", "sum"},
{"man", "xargs.1"},
{"pb", "geo.protodata"},
{"gaviota", "kppkn.gtb"},
}
// The test data files are present at this canonical URL.
const baseURL = "https://raw.githubusercontent.com/google/snappy/master/testdata/"
func downloadTestdata(b *testing.B, basename string) (errRet error) {
filename := filepath.Join(*testdata, basename)
if stat, err := os.Stat(filename); err == nil && stat.Size() != 0 {
return nil
}
if !*download {
b.Skipf("test data not found; skipping benchmark without the -download flag")
}
// Download the official snappy C++ implementation reference test data
// files for benchmarking.
if err := os.Mkdir(*testdata, 0777); err != nil && !os.IsExist(err) {
return fmt.Errorf("failed to create testdata: %s", err)
}
const baseURL = "https://snappy.googlecode.com/svn/trunk/testdata/"
func downloadTestdata(basename string) (errRet error) {
filename := filepath.Join("testdata", basename)
f, err := os.Create(filename)
if err != nil {
return fmt.Errorf("failed to create %s: %s", filename, err)
@@ -312,27 +185,36 @@ func downloadTestdata(b *testing.B, basename string) (errRet error) {
os.Remove(filename)
}
}()
url := baseURL + basename
resp, err := http.Get(url)
resp, err := http.Get(baseURL + basename)
if err != nil {
return fmt.Errorf("failed to download %s: %s", url, err)
return fmt.Errorf("failed to download %s: %s", baseURL+basename, err)
}
defer resp.Body.Close()
if s := resp.StatusCode; s != http.StatusOK {
return fmt.Errorf("downloading %s: HTTP status code %d (%s)", url, s, http.StatusText(s))
}
_, err = io.Copy(f, resp.Body)
if err != nil {
return fmt.Errorf("failed to download %s to %s: %s", url, filename, err)
return fmt.Errorf("failed to write %s: %s", filename, err)
}
return nil
}
func benchFile(b *testing.B, n int, decode bool) {
if err := downloadTestdata(b, testFiles[n].filename); err != nil {
b.Fatalf("failed to download testdata: %s", err)
filename := filepath.Join("testdata", testFiles[n].filename)
if stat, err := os.Stat(filename); err != nil || stat.Size() == 0 {
if !*download {
b.Fatal("test data not found; skipping benchmark without the -download flag")
}
// Download the official snappy C++ implementation reference test data
// files for benchmarking.
if err := os.Mkdir("testdata", 0777); err != nil && !os.IsExist(err) {
b.Fatalf("failed to create testdata: %s", err)
}
for _, tf := range testFiles {
if err := downloadTestdata(tf.filename); err != nil {
b.Fatalf("failed to download testdata: %s", err)
}
}
}
data := readFile(b, filepath.Join(*testdata, testFiles[n].filename))
data := readFile(b, filename)
if decode {
benchDecode(b, data)
} else {
@@ -353,6 +235,12 @@ func Benchmark_UFlat8(b *testing.B) { benchFile(b, 8, true) }
func Benchmark_UFlat9(b *testing.B) { benchFile(b, 9, true) }
func Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) }
func Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) }
func Benchmark_UFlat12(b *testing.B) { benchFile(b, 12, true) }
func Benchmark_UFlat13(b *testing.B) { benchFile(b, 13, true) }
func Benchmark_UFlat14(b *testing.B) { benchFile(b, 14, true) }
func Benchmark_UFlat15(b *testing.B) { benchFile(b, 15, true) }
func Benchmark_UFlat16(b *testing.B) { benchFile(b, 16, true) }
func Benchmark_UFlat17(b *testing.B) { benchFile(b, 17, true) }
func Benchmark_ZFlat0(b *testing.B) { benchFile(b, 0, false) }
func Benchmark_ZFlat1(b *testing.B) { benchFile(b, 1, false) }
func Benchmark_ZFlat2(b *testing.B) { benchFile(b, 2, false) }
@@ -365,3 +253,9 @@ func Benchmark_ZFlat8(b *testing.B) { benchFile(b, 8, false) }
func Benchmark_ZFlat9(b *testing.B) { benchFile(b, 9, false) }
func Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) }
func Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) }
func Benchmark_ZFlat12(b *testing.B) { benchFile(b, 12, false) }
func Benchmark_ZFlat13(b *testing.B) { benchFile(b, 13, false) }
func Benchmark_ZFlat14(b *testing.B) { benchFile(b, 14, false) }
func Benchmark_ZFlat15(b *testing.B) { benchFile(b, 15, false) }
func Benchmark_ZFlat16(b *testing.B) { benchFile(b, 16, false) }
func Benchmark_ZFlat17(b *testing.B) { benchFile(b, 17, false) }

View File

@@ -1,7 +0,0 @@
language: go
go:
- 1.1
- 1.2
- 1.3
- 1.4
- tip

View File

@@ -1,19 +0,0 @@
Copyright (c) 2014-2015 Barracuda Networks, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@@ -1,45 +0,0 @@
Suture
======
[![Build Status](https://travis-ci.org/thejerf/suture.png?branch=master)](https://travis-ci.org/thejerf/suture)
Suture provides Erlang-ish supervisor trees for Go. "Supervisor trees" ->
"sutree" -> "suture" -> holds your code together when it's trying to die.
This is intended to be a production-quality library going into code that I
will be very early on the phone tree to support when it goes down. However,
it has not been deployed into something quite that serious yet. (I will
update this statement when that changes.)
It is intended to deal gracefully with the real failure cases that can
occur with supervision trees (such as burning all your CPU time endlessly
restarting dead services), while also making no unnecessary demands on the
"service" code, and providing hooks to perform adequate logging with in a
production environment.
[A blog post describing the design decisions](http://www.jerf.org/iri/post/2930)
is available.
This module is fully covered with [godoc](http://godoc.org/github.com/thejerf/suture),
including an example, usage, and everything else you might expect from a
README.md on GitHub. (DRY.)
This is not currently tagged with particular git tags for Go as this is
currently considered to be alpha code. As I move this into production and
feel more confident about it, I'll give it relevant tags.
Code Signing
------------
Starting with the commit after ac7cf8591b, I will be signing this repository
with the ["jerf" keybase account](https://keybase.io/jerf).
Aspiration
----------
One of the big wins the Erlang community has with their pervasive OTP
support is that it makes it easy for them to distribute libraries that
easily fit into the OTP paradigm. It ought to someday be considered a good
idea to distribute libraries that provide some sort of supervisor tree
functionality out of the box. It is possible to provide this functionality
without explicitly depending on the Suture library.

View File

@@ -1,12 +0,0 @@
#!/bin/bash
GOLINTOUT=$(golint *go)
if [ ! -z "$GOLINTOUT" -o "$?" != 0 ]; then
echo golint failed:
echo $GOLINTOUT
exit 1
fi
go test

View File

@@ -1,671 +0,0 @@
/*
Package suture provides Erlang-like supervisor trees.
This implements Erlang-esque supervisor trees, as adapted for Go. This is
intended to be an industrial-strength implementation, but it has not yet
been deployed in a hostile environment. (It's headed there, though.)
Supervisor Tree -> SuTree -> suture -> holds your code together when it's
trying to fall apart.
Why use Suture?
* You want to write bullet-resistant services that will remain available
despite unforeseen failure.
* You need the code to be smart enough not to consume 100% of the CPU
restarting things.
* You want to easily compose multiple such services in one program.
* You want the Erlang programmers to stop lording their supervision
trees over you.
Suture has 100% test coverage, and is golint clean. This doesn't prove it
free of bugs, but it shows I care.
A blog post describing the design decisions is available at
http://www.jerf.org/iri/post/2930 .
Using Suture
To idiomatically use Suture, create a Supervisor which is your top level
"application" supervisor. This will often occur in your program's "main"
function.
Create "Service"s, which implement the Service interface. .Add() them
to your Supervisor. Supervisors are also services, so you can create a
tree structure here, depending on the exact combination of restarts
you want to create.
As a special case, when adding Supervisors to Supervisors, the "sub"
supervisor will have the "super" supervisor's Log function copied.
This allows you to set one log function on the "top" supervisor, and
have it propagate down to all the sub-supervisors. This also allows
libraries or modules to provide Supervisors without having to commit
their users to a particular logging method.
Finally, as what is probably the last line of your main() function, call
.Serve() on your top level supervisor. This will start all the services
you've defined.
See the Example for an example, using a simple service that serves out
incrementing integers.
*/
package suture
import (
"errors"
"fmt"
"log"
"math"
"runtime"
"sync/atomic"
"time"
)
const (
notRunning = iota
normal
paused
)
type supervisorID uint32
type serviceID uint32
var currentSupervisorID uint32
// ErrWrongSupervisor is returned by the (*Supervisor).Remove method
// if you pass a ServiceToken from the wrong Supervisor.
var ErrWrongSupervisor = errors.New("wrong supervisor for this service token, no service removed")
// ServiceToken is an opaque identifier that can be used to terminate a service that
// has been Add()ed to a Supervisor.
type ServiceToken struct {
id uint64
}
/*
Supervisor is the core type of the module that represents a Supervisor.
Supervisors should be constructed either by New or NewSimple.
Once constructed, a Supervisor should be started in one of three ways:
1. Calling .Serve().
2. Calling .ServeBackground().
3. Adding it to an existing Supervisor.
Calling Serve will cause the supervisor to run until it is shut down by
an external user calling Stop() on it. If that never happens, it simply
runs forever. I suggest creating your services in Supervisors, then making
a Serve() call on your top-level Supervisor be the last line of your main
func.
Calling ServeBackground will CORRECTLY start the supervisor running in a
new goroutine. You do not want to just:
go supervisor.Serve()
because that will briefly create a race condition as it starts up, if you
try to .Add() services immediately afterward.
*/
type Supervisor struct {
Name string
id supervisorID
failureDecay float64
failureThreshold float64
failureBackoff time.Duration
timeout time.Duration
log func(string)
services map[serviceID]Service
lastFail time.Time
failures float64
restartQueue []serviceID
state uint8
serviceCounter serviceID
control chan supervisorMessage
resumeTimer <-chan time.Time
// The testing uses the ability to grab these individual logging functions
// and get inside of suture's handling at a deep level.
// If you ever come up with some need to get into these, submit a pull
// request to make them public and some smidge of justification, and
// I'll happily do it.
// But since I've now changed the signature on these once, I'm glad I
// didn't start with them public... :)
logBadStop func(*Supervisor, Service)
logFailure func(supervisor *Supervisor, service Service, currentFailures float64, failureThreshold float64, restarting bool, error interface{}, stacktrace []byte)
logBackoff func(*Supervisor, bool)
// avoid a dependency on github.com/thejerf/abtime by just implementing
// a minimal chunk.
getNow func() time.Time
getResume func(time.Duration) <-chan time.Time
}
// Spec is used to pass arguments to the New function to create a
// supervisor. See the New function for full documentation.
type Spec struct {
Log func(string)
FailureDecay float64
FailureThreshold float64
FailureBackoff time.Duration
Timeout time.Duration
}
/*
New is the full constructor function for a supervisor.
The name is a friendly human name for the supervisor, used in logging. Suture
does not care if this is unique, but it is good for your sanity if it is.
If not set, the following values are used:
* Log: A function is created that uses log.Print.
* FailureDecay: 30 seconds
* FailureThreshold: 5 failures
* FailureBackoff: 15 seconds
* Timeout: 10 seconds
The Log function will be called when errors occur. Suture will log the
following:
* When a service has failed, with a descriptive message about the
current backoff status, and whether it was immediately restarted
* When the supervisor has gone into its backoff mode, and when it
exits it
* When a service fails to stop
The failureRate, failureThreshold, and failureBackoff controls how failures
are handled, in order to avoid the supervisor failure case where the
program does nothing but restarting failed services. If you do not
care how failures behave, the default values should be fine for the
vast majority of services, but if you want the details:
The supervisor tracks the number of failures that have occurred, with an
exponential decay on the count. Every FailureDecay seconds, the number of
failures that have occurred is cut in half. (This is done smoothly with an
exponential function.) When a failure occurs, the number of failures
is incremented by one. When the number of failures passes the
FailureThreshold, the entire service waits for FailureBackoff seconds
before attempting any further restarts, at which point it resets its
failure count to zero.
Timeout is how long Suture will wait for a service to properly terminate.
*/
func New(name string, spec Spec) (s *Supervisor) {
s = new(Supervisor)
s.Name = name
s.id = supervisorID(atomic.AddUint32(&currentSupervisorID, 1))
if spec.Log == nil {
s.log = func(msg string) {
log.Print(fmt.Sprintf("Supervisor %s: %s", s.Name, msg))
}
} else {
s.log = spec.Log
}
if spec.FailureDecay == 0 {
s.failureDecay = 30
} else {
s.failureDecay = spec.FailureDecay
}
if spec.FailureThreshold == 0 {
s.failureThreshold = 5
} else {
s.failureThreshold = spec.FailureThreshold
}
if spec.FailureBackoff == 0 {
s.failureBackoff = time.Second * 15
} else {
s.failureBackoff = spec.FailureBackoff
}
if spec.Timeout == 0 {
s.timeout = time.Second * 10
} else {
s.timeout = spec.Timeout
}
// overriding these allows for testing the threshold behavior
s.getNow = time.Now
s.getResume = time.After
s.control = make(chan supervisorMessage)
s.services = make(map[serviceID]Service)
s.restartQueue = make([]serviceID, 0, 1)
s.resumeTimer = make(chan time.Time)
// set up the default logging handlers
s.logBadStop = func(supervisor *Supervisor, service Service) {
s.log(fmt.Sprintf("%s: Service %s failed to terminate in a timely manner", serviceName(supervisor), serviceName(service)))
}
s.logFailure = func(supervisor *Supervisor, service Service, failures float64, threshold float64, restarting bool, err interface{}, st []byte) {
var errString string
e, canError := err.(error)
if canError {
errString = e.Error()
} else {
errString = fmt.Sprintf("%#v", err)
}
s.log(fmt.Sprintf("%s: Failed service '%s' (%f failures of %f), restarting: %#v, error: %s, stacktrace: %s", serviceName(supervisor), serviceName(service), failures, threshold, restarting, errString, string(st)))
}
s.logBackoff = func(s *Supervisor, entering bool) {
if entering {
s.log("Entering the backoff state.")
} else {
s.log("Exiting backoff state.")
}
}
return
}
func serviceName(service Service) (serviceName string) {
stringer, canStringer := service.(fmt.Stringer)
if canStringer {
serviceName = stringer.String()
} else {
serviceName = fmt.Sprintf("%#v", service)
}
return
}
// NewSimple is a convenience function to create a service with just a name
// and the sensible defaults.
func NewSimple(name string) *Supervisor {
return New(name, Spec{})
}
/*
Service is the interface that describes a service to a Supervisor.
Serve Method
The Serve method is called by a Supervisor to start the service.
The service should execute within the goroutine that this is
called in. If this function either returns or panics, the Supervisor
will call it again.
A Serve method SHOULD do as much cleanup of the state as possible,
to prevent any corruption in the previous state from crashing the
service again.
Stop Method
This method is used by the supervisor to stop the service. Calling this
directly on a Service given to a Supervisor will simply result in the
Service being restarted; use the Supervisor's .Remove(ServiceToken) method
to stop a service. A supervisor will call .Stop() only once. Thus, it may
be as destructive as it likes to get the service to stop.
Once Stop has been called on a Service, the Service SHOULD NOT be
reused in any other supervisor! Because of the impossibility of
guaranteeing that the service has actually stopped in Go, you can't
prove that you won't be starting two goroutines using the exact
same memory to store state, causing completely unpredictable behavior.
Stop should not return until the service has actually stopped.
"Stopped" here is defined as "the service will stop servicing any
further requests in the future". For instance, a common implementation
is to receive a message on a dedicated "stop" channel and immediately
returning. Once the stop command has been processed, the service is
stopped.
Another common Stop implementation is to forcibly close an open socket
or other resource, which will cause detectable errors to manifest in the
service code. Bear in mind that to perfectly correctly use this
approach requires a bit more work to handle the chance of a Stop
command coming in before the resource has been created.
If a service does not Stop within the supervisor's timeout duration, a log
entry will be made with a descriptive string to that effect. This does
not guarantee that the service is hung; it may still get around to being
properly stopped in the future. Until the service is fully stopped,
both the service and the spawned goroutine trying to stop it will be
"leaked".
Stringer Interface
It is not mandatory to implement the fmt.Stringer interface on your
service, but if your Service does happen to implement that, the log
messages that describe your service will use that when naming the
service. Otherwise, you'll see the GoString of your service object,
obtained via fmt.Sprintf("%#v", service).
*/
type Service interface {
Serve()
Stop()
}
/*
Add adds a service to this supervisor.
If the supervisor is currently running, the service will be started
immediately. If the supervisor is not currently running, the service
will be started when the supervisor is.
The returned ServiceID may be passed to the Remove method of the Supervisor
to terminate the service.
As a special behavior, if the service added is itself a supervisor, the
supervisor being added will copy the Log function from the Supervisor it
is being added to. This allows factoring out providing a Supervisor
from its logging.
*/
func (s *Supervisor) Add(service Service) ServiceToken {
if s == nil {
panic("can't add service to nil *suture.Supervisor")
}
if supervisor, isSupervisor := service.(*Supervisor); isSupervisor {
supervisor.logBadStop = s.logBadStop
supervisor.logFailure = s.logFailure
supervisor.logBackoff = s.logBackoff
}
if s.state == notRunning {
id := s.serviceCounter
s.serviceCounter++
s.services[id] = service
s.restartQueue = append(s.restartQueue, id)
return ServiceToken{uint64(s.id)<<32 | uint64(id)}
}
response := make(chan serviceID)
s.control <- addService{service, response}
return ServiceToken{uint64(s.id)<<32 | uint64(<-response)}
}
// ServeBackground starts running a supervisor in its own goroutine. This
// method does not return until it is safe to use .Add() on the Supervisor.
func (s *Supervisor) ServeBackground() {
go s.Serve()
s.sync()
}
/*
Serve starts the supervisor. You should call this on the top-level supervisor,
but nothing else.
*/
func (s *Supervisor) Serve() {
if s == nil {
panic("Can't serve with a nil *suture.Supervisor")
}
if s.id == 0 {
panic("Can't call Serve on an incorrectly-constructed *suture.Supervisor")
}
defer func() {
s.state = notRunning
}()
if s.state != notRunning {
// FIXME: Don't explain why I don't need a semaphore, just use one
// This doesn't use a semaphore because it's just a sanity check.
panic("Running a supervisor while it is already running?")
}
s.state = normal
// for all the services I currently know about, start them
for _, id := range s.restartQueue {
service, present := s.services[id]
if present {
s.runService(service, id)
}
}
s.restartQueue = make([]serviceID, 0, 1)
for {
select {
case m := <-s.control:
switch msg := m.(type) {
case serviceFailed:
s.handleFailedService(msg.id, msg.err, msg.stacktrace)
case serviceEnded:
service, monitored := s.services[msg.id]
if monitored {
s.handleFailedService(msg.id, fmt.Sprintf("%s returned unexpectedly", service), []byte("[unknown stack trace]"))
}
case addService:
id := s.serviceCounter
s.serviceCounter++
s.services[id] = msg.service
s.runService(msg.service, id)
msg.response <- id
case removeService:
s.removeService(msg.id)
case stopSupervisor:
for id := range s.services {
s.removeService(id)
}
return
case listServices:
services := []Service{}
for _, service := range s.services {
services = append(services, service)
}
msg.c <- services
case syncSupervisor:
// this does nothing on purpose; its sole purpose is to
// introduce a sync point via the channel receive
case panicSupervisor:
// used only by tests
panic("Panicking as requested!")
}
case _ = <-s.resumeTimer:
// We're resuming normal operation after a pause due to
// excessive thrashing
// FIXME: Ought to permit some spacing of these functions, rather
// than simply hammering through them
s.state = normal
s.failures = 0
s.logBackoff(s, false)
for _, id := range s.restartQueue {
service, present := s.services[id]
if present {
s.runService(service, id)
}
}
s.restartQueue = make([]serviceID, 0, 1)
}
}
}
func (s *Supervisor) handleFailedService(id serviceID, err interface{}, stacktrace []byte) {
now := s.getNow()
if s.lastFail.IsZero() {
s.lastFail = now
s.failures = 1.0
} else {
sinceLastFail := now.Sub(s.lastFail).Seconds()
intervals := sinceLastFail / s.failureDecay
s.failures = s.failures*math.Pow(.5, intervals) + 1
}
if s.failures > s.failureThreshold {
s.state = paused
s.logBackoff(s, true)
s.resumeTimer = s.getResume(s.failureBackoff)
}
s.lastFail = now
failedService, monitored := s.services[id]
// It is possible for a service to be no longer monitored
// by the time we get here. In that case, just ignore it.
if monitored {
if s.state == normal {
s.runService(failedService, id)
s.logFailure(s, failedService, s.failures, s.failureThreshold, true, err, stacktrace)
} else {
// FIXME: When restarting, check that the service still
// exists (it may have been stopped in the meantime)
s.restartQueue = append(s.restartQueue, id)
s.logFailure(s, failedService, s.failures, s.failureThreshold, false, err, stacktrace)
}
}
}
func (s *Supervisor) runService(service Service, id serviceID) {
go func() {
defer func() {
if r := recover(); r != nil {
buf := make([]byte, 65535, 65535)
written := runtime.Stack(buf, false)
buf = buf[:written]
s.fail(id, r, buf)
}
}()
service.Serve()
s.serviceEnded(id)
}()
}
func (s *Supervisor) removeService(id serviceID) {
service, present := s.services[id]
if present {
delete(s.services, id)
go func() {
successChan := make(chan bool)
go func() {
service.Stop()
successChan <- true
}()
failChan := s.getResume(s.timeout)
select {
case <-successChan:
// Life is good!
case <-failChan:
s.logBadStop(s, service)
}
}()
}
}
// String implements the fmt.Stringer interface.
func (s *Supervisor) String() string {
return s.Name
}
// sum type pattern for type-safe message passing; see
// http://www.jerf.org/iri/post/2917
type supervisorMessage interface {
isSupervisorMessage()
}
/*
Remove will remove the given service from the Supervisor, and attempt to Stop() it.
The ServiceID token comes from the Add() call.
*/
func (s *Supervisor) Remove(id ServiceToken) error {
sID := supervisorID(id.id >> 32)
if sID != s.id {
return ErrWrongSupervisor
}
s.control <- removeService{serviceID(id.id & 0xffffffff)}
return nil
}
/*
Services returns a []Service containing a snapshot of the services this
Supervisor is managing.
*/
func (s *Supervisor) Services() []Service {
ls := listServices{make(chan []Service)}
s.control <- ls
return <-ls.c
}
type listServices struct {
c chan []Service
}
func (ls listServices) isSupervisorMessage() {}
type removeService struct {
id serviceID
}
func (rs removeService) isSupervisorMessage() {}
func (s *Supervisor) sync() {
s.control <- syncSupervisor{}
}
type syncSupervisor struct {
}
func (ss syncSupervisor) isSupervisorMessage() {}
func (s *Supervisor) fail(id serviceID, err interface{}, stacktrace []byte) {
s.control <- serviceFailed{id, err, stacktrace}
}
type serviceFailed struct {
id serviceID
err interface{}
stacktrace []byte
}
func (sf serviceFailed) isSupervisorMessage() {}
func (s *Supervisor) serviceEnded(id serviceID) {
s.control <- serviceEnded{id}
}
type serviceEnded struct {
id serviceID
}
func (s serviceEnded) isSupervisorMessage() {}
// added by the Add() method
type addService struct {
service Service
response chan serviceID
}
func (as addService) isSupervisorMessage() {}
// Stop stops the Supervisor.
func (s *Supervisor) Stop() {
s.control <- stopSupervisor{}
}
type stopSupervisor struct {
}
func (ss stopSupervisor) isSupervisorMessage() {}
func (s *Supervisor) panic() {
s.control <- panicSupervisor{}
}
type panicSupervisor struct {
}
func (ps panicSupervisor) isSupervisorMessage() {}

View File

@@ -1,49 +0,0 @@
package suture
import "fmt"
type Incrementor struct {
current int
next chan int
stop chan bool
}
func (i *Incrementor) Stop() {
fmt.Println("Stopping the service")
i.stop <- true
}
func (i *Incrementor) Serve() {
for {
select {
case i.next <- i.current:
i.current += 1
case <-i.stop:
// We sync here just to guarantee the output of "Stopping the service",
// so this passes the test reliably.
// Most services would simply "return" here.
i.stop <- true
return
}
}
}
func ExampleNew_simple() {
supervisor := NewSimple("Supervisor")
service := &Incrementor{0, make(chan int), make(chan bool)}
supervisor.Add(service)
go supervisor.ServeBackground()
fmt.Println("Got:", <-service.next)
fmt.Println("Got:", <-service.next)
supervisor.Stop()
// We sync here just to guarantee the output of "Stopping the service"
<-service.stop
// Output:
// Got: 0
// Got: 1
// Stopping the service
}

View File

@@ -1,600 +0,0 @@
package suture
import (
"errors"
"fmt"
"reflect"
"sync"
"testing"
"time"
)
const (
Happy = iota
Fail
Panic
Hang
UseStopChan
)
var everMultistarted = false
// Test that supervisors work perfectly when everything is hunky dory.
func TestTheHappyCase(t *testing.T) {
t.Parallel()
s := NewSimple("A")
if s.String() != "A" {
t.Fatal("Can't get name from a supervisor")
}
service := NewService("B")
s.Add(service)
go s.Serve()
<-service.started
// If we stop the service, it just gets restarted
service.Stop()
<-service.started
// And it is shut down when we stop the supervisor
service.take <- UseStopChan
s.Stop()
<-service.stop
}
// Test that adding to a running supervisor does indeed start the service.
func TestAddingToRunningSupervisor(t *testing.T) {
t.Parallel()
s := NewSimple("A1")
s.ServeBackground()
defer s.Stop()
service := NewService("B1")
s.Add(service)
<-service.started
services := s.Services()
if !reflect.DeepEqual([]Service{service}, services) {
t.Fatal("Can't get list of services as expected.")
}
}
// Test what happens when services fail.
func TestFailures(t *testing.T) {
t.Parallel()
s := NewSimple("A2")
s.failureThreshold = 3.5
go s.Serve()
defer func() {
// to avoid deadlocks during shutdown, we have to not try to send
// things out on channels while we're shutting down (this undoes the
// logFailure overide about 25 lines down)
s.logFailure = func(*Supervisor, Service, float64, float64, bool, interface{}, []byte) {}
s.Stop()
}()
s.sync()
service1 := NewService("B2")
service2 := NewService("C2")
s.Add(service1)
<-service1.started
s.Add(service2)
<-service2.started
nowFeeder := NewNowFeeder()
pastVal := time.Unix(1000000, 0)
nowFeeder.appendTimes(pastVal)
s.getNow = nowFeeder.getter
resumeChan := make(chan time.Time)
s.getResume = func(d time.Duration) <-chan time.Time {
return resumeChan
}
failNotify := make(chan bool)
// use this to synchronize on here
s.logFailure = func(supervisor *Supervisor, s Service, cf float64, ft float64, r bool, error interface{}, stacktrace []byte) {
failNotify <- r
}
// All that setup was for this: Service1, please return now.
service1.take <- Fail
restarted := <-failNotify
<-service1.started
if !restarted || s.failures != 1 || s.lastFail != pastVal {
t.Fatal("Did not fail in the expected manner")
}
// Getting past this means the service was restarted.
service1.take <- Happy
// Service2, your turn.
service2.take <- Fail
nowFeeder.appendTimes(pastVal)
restarted = <-failNotify
<-service2.started
if !restarted || s.failures != 2 || s.lastFail != pastVal {
t.Fatal("Did not fail in the expected manner")
}
// And you're back. (That is, the correct service was restarted.)
service2.take <- Happy
// Now, one failureDecay later, is everything working correctly?
oneDecayLater := time.Unix(1000030, 0)
nowFeeder.appendTimes(oneDecayLater)
service2.take <- Fail
restarted = <-failNotify
<-service2.started
// playing a bit fast and loose here with floating point, but...
// we get 2 by taking the current failure value of 2, decaying it
// by one interval, which cuts it in half to 1, then adding 1 again,
// all of which "should" be precise
if !restarted || s.failures != 2 || s.lastFail != oneDecayLater {
t.Fatal("Did not decay properly", s.lastFail, oneDecayLater)
}
// For a change of pace, service1 would you be so kind as to panic?
nowFeeder.appendTimes(oneDecayLater)
service1.take <- Panic
restarted = <-failNotify
<-service1.started
if !restarted || s.failures != 3 || s.lastFail != oneDecayLater {
t.Fatal("Did not correctly recover from a panic")
}
nowFeeder.appendTimes(oneDecayLater)
backingoff := make(chan bool)
s.logBackoff = func(s *Supervisor, backingOff bool) {
backingoff <- backingOff
}
// And with this failure, we trigger the backoff code.
service1.take <- Fail
backoff := <-backingoff
restarted = <-failNotify
if !backoff || restarted || s.failures != 4 {
t.Fatal("Broke past the threshold but did not log correctly", s.failures)
}
if service1.existing != 0 {
t.Fatal("service1 still exists according to itself?")
}
// service2 is still running, because we don't shut anything down in a
// backoff, we just stop restarting.
service2.take <- Happy
var correct bool
timer := time.NewTimer(time.Millisecond * 10)
// verify the service has not been restarted
// hard to get around race conditions here without simply using a timer...
select {
case service1.take <- Happy:
correct = false
case <-timer.C:
correct = true
}
if !correct {
t.Fatal("Restarted the service during the backoff interval")
}
// tell the supervisor the restart interval has passed
resumeChan <- time.Time{}
backoff = <-backingoff
<-service1.started
s.sync()
if s.failures != 0 {
t.Fatal("Did not reset failure count after coming back from timeout.")
}
nowFeeder.appendTimes(oneDecayLater)
service1.take <- Fail
restarted = <-failNotify
<-service1.started
if !restarted || backoff {
t.Fatal("For some reason, got that we were backing off again.", restarted, backoff)
}
}
func TestRunningAlreadyRunning(t *testing.T) {
t.Parallel()
s := NewSimple("A3")
go s.Serve()
defer s.Stop()
// ensure the supervisor has made it to its main loop
s.sync()
var errored bool
func() {
defer func() {
if r := recover(); r != nil {
errored = true
}
}()
s.Serve()
}()
if !errored {
t.Fatal("Supervisor failed to prevent itself from double-running.")
}
}
func TestFullConstruction(t *testing.T) {
t.Parallel()
s := New("Moo", Spec{
Log: func(string) {},
FailureDecay: 1,
FailureThreshold: 2,
FailureBackoff: 3,
Timeout: time.Second * 29,
})
if s.String() != "Moo" || s.failureDecay != 1 || s.failureThreshold != 2 || s.failureBackoff != 3 || s.timeout != time.Second*29 {
t.Fatal("Full construction failed somehow")
}
}
// This is mostly for coverage testing.
func TestDefaultLogging(t *testing.T) {
t.Parallel()
s := NewSimple("A4")
service := NewService("B4")
s.Add(service)
s.failureThreshold = .5
s.failureBackoff = time.Millisecond * 25
go s.Serve()
s.sync()
<-service.started
resumeChan := make(chan time.Time)
s.getResume = func(d time.Duration) <-chan time.Time {
return resumeChan
}
service.take <- UseStopChan
service.take <- Fail
<-service.stop
resumeChan <- time.Time{}
<-service.started
service.take <- Happy
serviceName(&BarelyService{})
s.logBadStop(s, service)
s.logFailure(s, service, 1, 1, true, errors.New("test error"), []byte{})
s.Stop()
}
func TestNestedSupervisors(t *testing.T) {
t.Parallel()
super1 := NewSimple("Top5")
super2 := NewSimple("Nested5")
service := NewService("Service5")
super2.logBadStop = func(*Supervisor, Service) {
panic("Failed to copy logBadStop")
}
super1.Add(super2)
super2.Add(service)
// test the functions got copied from super1; if this panics, it didn't
// get copied
super2.logBadStop(super2, service)
go super1.Serve()
super1.sync()
<-service.started
service.take <- Happy
super1.Stop()
}
func TestStoppingSupervisorStopsServices(t *testing.T) {
t.Parallel()
s := NewSimple("Top6")
service := NewService("Service 6")
s.Add(service)
go s.Serve()
s.sync()
<-service.started
service.take <- UseStopChan
s.Stop()
<-service.stop
}
func TestStoppingStillWorksWithHungServices(t *testing.T) {
t.Parallel()
s := NewSimple("Top7")
service := NewService("Service WillHang7")
s.Add(service)
go s.Serve()
<-service.started
service.take <- UseStopChan
service.take <- Hang
resumeChan := make(chan time.Time)
s.getResume = func(d time.Duration) <-chan time.Time {
return resumeChan
}
failNotify := make(chan struct{})
s.logBadStop = func(supervisor *Supervisor, s Service) {
failNotify <- struct{}{}
}
s.Stop()
resumeChan <- time.Time{}
<-failNotify
service.release <- true
<-service.stop
}
func TestRemoveService(t *testing.T) {
t.Parallel()
s := NewSimple("Top")
service := NewService("ServiceToRemove8")
id := s.Add(service)
go s.Serve()
<-service.started
service.take <- UseStopChan
err := s.Remove(id)
if err != nil {
t.Fatal("Removing service somehow failed")
}
<-service.stop
err = s.Remove(ServiceToken{1<<36 + 1})
if err != ErrWrongSupervisor {
t.Fatal("Did not detect that the ServiceToken was wrong")
}
}
func TestFailureToConstruct(t *testing.T) {
t.Parallel()
var s *Supervisor
panics(func() {
s.Serve()
})
s = new(Supervisor)
panics(func() {
s.Serve()
})
}
func TestFailingSupervisors(t *testing.T) {
t.Parallel()
// This is a bit of a complicated test, so let me explain what
// all this is doing:
// 1. Set up a top-level supervisor with a hair-trigger backoff.
// 2. Add a supervisor to that.
// 3. To that supervisor, add a service.
// 4. Panic the supervisor in the middle, sending the top-level into
// backoff.
// 5. Kill the lower level service too.
// 6. Verify that when the top-level service comes out of backoff,
// the service ends up restarted as expected.
// Ultimately, we can't have more than a best-effort recovery here.
// A panic'ed supervisor can't really be trusted to have consistent state,
// and without *that*, we can't trust it to do anything sensible with
// the children it may have been running. So unlike Erlang, we can't
// can't really expect to be able to safely restart them or anything.
// Really, the "correct" answer is that the Supervisor must never panic,
// but in the event that it does, this verifies that it at least tries
// to get on with life.
// This also tests that if a Supervisor itself panics, and one of its
// monitored services goes down in the meantime, that the monitored
// service also gets correctly restarted when the supervisor does.
s1 := NewSimple("Top9")
s2 := NewSimple("Nested9")
service := NewService("Service9")
s1.Add(s2)
s2.Add(service)
go s1.Serve()
<-service.started
s1.failureThreshold = .5
// let us control precisely when s1 comes back
resumeChan := make(chan time.Time)
s1.getResume = func(d time.Duration) <-chan time.Time {
return resumeChan
}
failNotify := make(chan string)
// use this to synchronize on here
s1.logFailure = func(supervisor *Supervisor, s Service, cf float64, ft float64, r bool, error interface{}, stacktrace []byte) {
failNotify <- fmt.Sprintf("%s", s)
}
s2.panic()
failing := <-failNotify
// that's enough sync to guarantee this:
if failing != "Nested9" || s1.state != paused {
t.Fatal("Top-level supervisor did not go into backoff as expected")
}
service.take <- Fail
resumeChan <- time.Time{}
<-service.started
}
func TestNilSupervisorAdd(t *testing.T) {
t.Parallel()
var s *Supervisor
defer func() {
if r := recover(); r == nil {
t.Fatal("did not panic as expected on nil add")
}
}()
s.Add(s)
}
// http://golangtutorials.blogspot.com/2011/10/gotest-unit-testing-and-benchmarking-go.html
// claims test function are run in the same order as the source file...
// I'm not sure if this is part of the contract, though. Especially in the
// face of "t.Parallel()"...
func TestEverMultistarted(t *testing.T) {
if everMultistarted {
t.Fatal("Seem to have multistarted a service at some point, bummer.")
}
}
// A test service that can be induced to fail, panic, or hang on demand.
func NewService(name string) *FailableService {
return &FailableService{name, make(chan bool), make(chan int),
make(chan bool, 1), make(chan bool), make(chan bool), 0}
}
type FailableService struct {
name string
started chan bool
take chan int
shutdown chan bool
release chan bool
stop chan bool
existing int
}
func (s *FailableService) Serve() {
if s.existing != 0 {
everMultistarted = true
panic("Multi-started the same service! " + s.name)
}
s.existing += 1
s.started <- true
useStopChan := false
for {
select {
case val := <-s.take:
switch val {
case Happy:
// Do nothing on purpose. Life is good!
case Fail:
s.existing -= 1
if useStopChan {
s.stop <- true
}
return
case Panic:
s.existing -= 1
panic("Panic!")
case Hang:
// or more specifically, "hang until I release you"
<-s.release
case UseStopChan:
useStopChan = true
}
case <-s.shutdown:
s.existing -= 1
if useStopChan {
s.stop <- true
}
return
}
}
}
func (s *FailableService) String() string {
return s.name
}
func (s *FailableService) Stop() {
s.shutdown <- true
}
type NowFeeder struct {
values []time.Time
getter func() time.Time
m sync.Mutex
}
// This is used to test serviceName; it's a service without a Stringer.
type BarelyService struct{}
func (bs *BarelyService) Serve() {}
func (bs *BarelyService) Stop() {}
func NewNowFeeder() (nf *NowFeeder) {
nf = new(NowFeeder)
nf.getter = func() time.Time {
nf.m.Lock()
defer nf.m.Unlock()
if len(nf.values) > 0 {
ret := nf.values[0]
nf.values = nf.values[1:]
return ret
}
panic("Ran out of values for NowFeeder")
}
return
}
func (nf *NowFeeder) appendTimes(t ...time.Time) {
nf.m.Lock()
defer nf.m.Unlock()
nf.values = append(nf.values, t...)
}
func panics(doesItPanic func()) (panics bool) {
defer func() {
if r := recover(); r != nil {
panics = true
}
}()
doesItPanic()
return
}

View File

@@ -0,0 +1,23 @@
# Copyright 2011 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
maketables: maketables.go triegen.go
go build $^
normregtest: normregtest.go
go build $^
tables: maketables
./maketables > tables.go
gofmt -w tables.go
# Downloads from www.unicode.org, so not part
# of standard test scripts.
test: testtables regtest
testtables: maketables
./maketables -test > data_test.go && go test -tags=test
regtest: normregtest
./normregtest

View File

@@ -16,17 +16,20 @@ import (
"fmt"
"io"
"log"
"net/http"
"os"
"regexp"
"sort"
"strconv"
"strings"
"unicode"
"golang.org/x/text/internal/gen"
"golang.org/x/text/internal/triegen"
"golang.org/x/text/internal/ucd"
)
func main() {
gen.Init()
flag.Parse()
loadUnicodeData()
compactCCC()
loadCompositionExclusions()
@@ -43,18 +46,24 @@ func main() {
}
}
var (
tablelist = flag.String("tables",
"all",
"comma-separated list of which tables to generate; "+
"can be 'decomp', 'recomp', 'info' and 'all'")
test = flag.Bool("test",
false,
"test existing tables against DerivedNormalizationProps and generate test data for regression testing")
verbose = flag.Bool("verbose",
false,
"write data to stdout as it is parsed")
)
var url = flag.String("url",
"http://www.unicode.org/Public/"+unicode.Version+"/ucd/",
"URL of Unicode database directory")
var tablelist = flag.String("tables",
"all",
"comma-separated list of which tables to generate; "+
"can be 'decomp', 'recomp', 'info' and 'all'")
var test = flag.Bool("test",
false,
"test existing tables against DerivedNormalizationProps and generate test data for regression testing")
var verbose = flag.Bool("verbose",
false,
"write data to stdout as it is parsed")
var localFiles = flag.Bool("local",
false,
"data files have been copied to the current directory; for debugging only")
var logger = log.New(os.Stderr, "", log.Lshortfile)
const MaxChar = 0x10FFFF // anything above this shouldn't exist
@@ -180,6 +189,27 @@ func (f FormInfo) String() string {
type Decomposition []rune
func openReader(file string) (input io.ReadCloser) {
if *localFiles {
f, err := os.Open(file)
if err != nil {
logger.Fatal(err)
}
input = f
} else {
path := *url + file
resp, err := http.Get(path)
if err != nil {
logger.Fatal(err)
}
if resp.StatusCode != 200 {
logger.Fatal("bad GET status for "+file, resp.Status)
}
input = resp.Body
}
return
}
func parseDecomposition(s string, skipfirst bool) (a []rune, err error) {
decomp := strings.Split(s, " ")
if len(decomp) > 0 && skipfirst {
@@ -196,7 +226,7 @@ func parseDecomposition(s string, skipfirst bool) (a []rune, err error) {
}
func loadUnicodeData() {
f := gen.OpenUCDFile("UnicodeData.txt")
f := openReader("UnicodeData.txt")
defer f.Close()
p := ucd.New(f)
for p.Next() {
@@ -212,7 +242,7 @@ func loadUnicodeData() {
if len(decmap) > 0 {
exp, err = parseDecomposition(decmap, true)
if err != nil {
log.Fatalf(`%U: bad decomp |%v|: "%s"`, r, decmap, err)
logger.Fatalf(`%U: bad decomp |%v|: "%s"`, r, decmap, err)
}
isCompat = true
}
@@ -231,7 +261,7 @@ func loadUnicodeData() {
}
}
if err := p.Err(); err != nil {
log.Fatal(err)
logger.Fatal(err)
}
}
@@ -266,18 +296,18 @@ func compactCCC() {
// 0958 # ...
// See http://unicode.org/reports/tr44/ for full explanation
func loadCompositionExclusions() {
f := gen.OpenUCDFile("CompositionExclusions.txt")
f := openReader("CompositionExclusions.txt")
defer f.Close()
p := ucd.New(f)
for p.Next() {
c := &chars[p.Rune(0)]
if c.excludeInComp {
log.Fatalf("%U: Duplicate entry in exclusions.", c.codePoint)
logger.Fatalf("%U: Duplicate entry in exclusions.", c.codePoint)
}
c.excludeInComp = true
}
if e := p.Err(); e != nil {
log.Fatal(e)
logger.Fatal(e)
}
}
@@ -471,22 +501,29 @@ func computeNonStarterCounts() {
if exp := c.forms[FCompatibility].expandedDecomp; len(exp) > 0 {
runes = exp
}
// We consider runes that combine backwards to be non-starters for the
// purpose of Stream-Safe Text Processing.
for _, r := range runes {
if cr := &chars[r]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward {
if chars[r].ccc == 0 {
break
}
c.nLeadingNonStarters++
}
for i := len(runes) - 1; i >= 0; i-- {
if cr := &chars[runes[i]]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward {
if chars[runes[i]].ccc == 0 {
break
}
c.nTrailingNonStarters++
}
if c.nTrailingNonStarters > 3 {
log.Fatalf("%U: Decomposition with more than 3 (%d) trailing modifiers (%U)", i, c.nTrailingNonStarters, runes)
// We consider runes that combine backwards to be non-starters for the
// purpose of Stream-Safe Text Processing.
for _, f := range c.forms {
if c.ccc == 0 && f.combinesBackward {
if len(c.forms[FCompatibility].expandedDecomp) > 0 {
log.Fatalf("%U: CCC==0 modifier with an expansion is not supported.", i)
}
c.nTrailingNonStarters = 1
c.nLeadingNonStarters = 1
}
}
if isHangul(rune(i)) {
@@ -505,19 +542,19 @@ func computeNonStarterCounts() {
}
}
func printBytes(w io.Writer, b []byte, name string) {
fmt.Fprintf(w, "// %s: %d bytes\n", name, len(b))
fmt.Fprintf(w, "var %s = [...]byte {", name)
func printBytes(b []byte, name string) {
fmt.Printf("// %s: %d bytes\n", name, len(b))
fmt.Printf("var %s = [...]byte {", name)
for i, c := range b {
switch {
case i%64 == 0:
fmt.Fprintf(w, "\n// Bytes %x - %x\n", i, i+63)
fmt.Printf("\n// Bytes %x - %x\n", i, i+63)
case i%8 == 0:
fmt.Fprintf(w, "\n")
fmt.Printf("\n")
}
fmt.Fprintf(w, "0x%.2X, ", c)
fmt.Printf("0x%.2X, ", c)
}
fmt.Fprint(w, "\n}\n\n")
fmt.Print("\n}\n\n")
}
// See forminfo.go for format.
@@ -573,13 +610,13 @@ func (m *decompSet) insert(key int, s string) {
m[key][s] = true
}
func printCharInfoTables(w io.Writer) int {
func printCharInfoTables() int {
mkstr := func(r rune, f *FormInfo) (int, string) {
d := f.expandedDecomp
s := string([]rune(d))
if max := 1 << 6; len(s) >= max {
const msg = "%U: too many bytes in decomposition: %d >= %d"
log.Fatalf(msg, r, len(s), max)
logger.Fatalf(msg, r, len(s), max)
}
head := uint8(len(s))
if f.quickCheck[MComposed] != QCYes {
@@ -594,11 +631,11 @@ func printCharInfoTables(w io.Writer) int {
tccc := ccc(d[len(d)-1])
cc := ccc(r)
if cc != 0 && lccc == 0 && tccc == 0 {
log.Fatalf("%U: trailing and leading ccc are 0 for non-zero ccc %d", r, cc)
logger.Fatalf("%U: trailing and leading ccc are 0 for non-zero ccc %d", r, cc)
}
if tccc < lccc && lccc != 0 {
const msg = "%U: lccc (%d) must be <= tcc (%d)"
log.Fatalf(msg, r, lccc, tccc)
logger.Fatalf(msg, r, lccc, tccc)
}
index := normalDecomp
nTrail := chars[r].nTrailingNonStarters
@@ -615,13 +652,13 @@ func printCharInfoTables(w io.Writer) int {
if lccc > 0 {
s += string([]byte{lccc})
if index == firstCCC {
log.Fatalf("%U: multi-segment decomposition not supported for decompositions with leading CCC != 0", r)
logger.Fatalf("%U: multi-segment decomposition not supported for decompositions with leading CCC != 0", r)
}
index = firstLeadingCCC
}
if cc != lccc {
if cc != 0 {
log.Fatalf("%U: for lccc != ccc, expected ccc to be 0; was %d", r, cc)
logger.Fatalf("%U: for lccc != ccc, expected ccc to be 0; was %d", r, cc)
}
index = firstCCCZeroExcept
}
@@ -643,7 +680,7 @@ func printCharInfoTables(w io.Writer) int {
continue
}
if f.combinesBackward {
log.Fatalf("%U: combinesBackward and decompose", c.codePoint)
logger.Fatalf("%U: combinesBackward and decompose", c.codePoint)
}
index, s := mkstr(c.codePoint, &f)
decompSet.insert(index, s)
@@ -654,7 +691,7 @@ func printCharInfoTables(w io.Writer) int {
size := 0
positionMap := make(map[string]uint16)
decompositions.WriteString("\000")
fmt.Fprintln(w, "const (")
fmt.Println("const (")
for i, m := range decompSet {
sa := []string{}
for s := range m {
@@ -667,13 +704,13 @@ func printCharInfoTables(w io.Writer) int {
positionMap[s] = uint16(p)
}
if cname[i] != "" {
fmt.Fprintf(w, "%s = 0x%X\n", cname[i], decompositions.Len())
fmt.Printf("%s = 0x%X\n", cname[i], decompositions.Len())
}
}
fmt.Fprintln(w, "maxDecomp = 0x8000")
fmt.Fprintln(w, ")")
fmt.Println("maxDecomp = 0x8000")
fmt.Println(")")
b := decompositions.Bytes()
printBytes(w, b, "decomps")
printBytes(b, "decomps")
size += len(b)
varnames := []string{"nfc", "nfkc"}
@@ -689,7 +726,7 @@ func printCharInfoTables(w io.Writer) int {
if c.ccc != ccc(d[0]) {
// We assume the lead ccc of a decomposition !=0 in this case.
if ccc(d[0]) == 0 {
log.Fatalf("Expected leading CCC to be non-zero; ccc is %d", c.ccc)
logger.Fatalf("Expected leading CCC to be non-zero; ccc is %d", c.ccc)
}
}
} else if c.nLeadingNonStarters > 0 && len(f.expandedDecomp) == 0 && c.ccc == 0 && !f.combinesBackward {
@@ -700,9 +737,9 @@ func printCharInfoTables(w io.Writer) int {
trie.Insert(c.codePoint, uint64(0x8000|v))
}
}
sz, err := trie.Gen(w, triegen.Compact(&normCompacter{name: varnames[i]}))
sz, err := trie.Gen(os.Stdout, triegen.Compact(&normCompacter{name: varnames[i]}))
if err != nil {
log.Fatal(err)
logger.Fatal(err)
}
size += sz
}
@@ -718,9 +755,30 @@ func contains(sa []string, s string) bool {
return false
}
func makeTables() {
w := &bytes.Buffer{}
// Extract the version number from the URL.
func version() string {
// From http://www.unicode.org/standard/versions/#Version_Numbering:
// for the later Unicode versions, data files are located in
// versioned directories.
fields := strings.Split(*url, "/")
for _, f := range fields {
if match, _ := regexp.MatchString(`[0-9]\.[0-9]\.[0-9]`, f); match {
return f
}
}
logger.Fatal("unknown version")
return "Unknown"
}
const fileHeader = `// Generated by running
// maketables --tables=%s --url=%s
// DO NOT EDIT
package norm
`
func makeTables() {
size := 0
if *tablelist == "" {
return
@@ -729,6 +787,7 @@ func makeTables() {
if *tablelist == "all" {
list = []string{"recomp", "info"}
}
fmt.Printf(fileHeader, *tablelist, *url)
// Compute maximum decomposition size.
max := 0
@@ -738,30 +797,30 @@ func makeTables() {
}
}
fmt.Fprintln(w, "const (")
fmt.Fprintln(w, "\t// Version is the Unicode edition from which the tables are derived.")
fmt.Fprintf(w, "\tVersion = %q\n", gen.UnicodeVersion())
fmt.Fprintln(w)
fmt.Fprintln(w, "\t// MaxTransformChunkSize indicates the maximum number of bytes that Transform")
fmt.Fprintln(w, "\t// may need to write atomically for any Form. Making a destination buffer at")
fmt.Fprintln(w, "\t// least this size ensures that Transform can always make progress and that")
fmt.Fprintln(w, "\t// the user does not need to grow the buffer on an ErrShortDst.")
fmt.Fprintf(w, "\tMaxTransformChunkSize = %d+maxNonStarters*4\n", len(string(0x034F))+max)
fmt.Fprintln(w, ")\n")
fmt.Println("const (")
fmt.Println("\t// Version is the Unicode edition from which the tables are derived.")
fmt.Printf("\tVersion = %q\n", version())
fmt.Println()
fmt.Println("\t// MaxTransformChunkSize indicates the maximum number of bytes that Transform")
fmt.Println("\t// may need to write atomically for any Form. Making a destination buffer at")
fmt.Println("\t// least this size ensures that Transform can always make progress and that")
fmt.Println("\t// the user does not need to grow the buffer on an ErrShortDst.")
fmt.Printf("\tMaxTransformChunkSize = %d+maxNonStarters*4\n", len(string(0x034F))+max)
fmt.Println(")\n")
// Print the CCC remap table.
size += len(cccMap)
fmt.Fprintf(w, "var ccc = [%d]uint8{", len(cccMap))
fmt.Printf("var ccc = [%d]uint8{", len(cccMap))
for i := 0; i < len(cccMap); i++ {
if i%8 == 0 {
fmt.Fprintln(w)
fmt.Println()
}
fmt.Fprintf(w, "%3d, ", cccMap[uint8(i)])
fmt.Printf("%3d, ", cccMap[uint8(i)])
}
fmt.Fprintln(w, "\n}\n")
fmt.Println("\n}\n")
if contains(list, "info") {
size += printCharInfoTables(w)
size += printCharInfoTables()
}
if contains(list, "recomp") {
@@ -783,21 +842,20 @@ func makeTables() {
}
sz := nrentries * 8
size += sz
fmt.Fprintf(w, "// recompMap: %d bytes (entries only)\n", sz)
fmt.Fprintln(w, "var recompMap = map[uint32]rune{")
fmt.Printf("// recompMap: %d bytes (entries only)\n", sz)
fmt.Println("var recompMap = map[uint32]rune{")
for i, c := range chars {
f := c.forms[FCanonical]
d := f.decomp
if !f.isOneWay && len(d) > 0 {
key := uint32(uint16(d[0]))<<16 + uint32(uint16(d[1]))
fmt.Fprintf(w, "0x%.8X: 0x%.4X,\n", key, i)
fmt.Printf("0x%.8X: 0x%.4X,\n", key, i)
}
}
fmt.Fprintf(w, "}\n\n")
fmt.Printf("}\n\n")
}
fmt.Fprintf(w, "// Total size of tables: %dKB (%d bytes)\n", (size+512)/1024, size)
gen.WriteGoFile("tables.go", "norm", w.Bytes())
fmt.Printf("// Total size of tables: %dKB (%d bytes)\n", (size+512)/1024, size)
}
func printChars() {
@@ -832,16 +890,10 @@ func verifyComputed() {
continue
}
if a, b := c.nLeadingNonStarters > 0, (c.ccc > 0 || f.combinesBackward); a != b {
// We accept these runes to be treated differently (it only affects
// segment breaking in iteration, most likely on improper use), but
// We accept these two runes to be treated differently (it only affects
// segment breaking in iteration, most likely on inproper use), but
// reconsider if more characters are added.
// U+FF9E HALFWIDTH KATAKANA VOICED SOUND MARK;Lm;0;L;<narrow> 3099;;;;N;;;;;
// U+FF9F HALFWIDTH KATAKANA SEMI-VOICED SOUND MARK;Lm;0;L;<narrow> 309A;;;;N;;;;;
// U+3133 HANGUL LETTER KIYEOK-SIOS;Lo;0;L;<compat> 11AA;;;;N;HANGUL LETTER GIYEOG SIOS;;;;
// U+318E HANGUL LETTER ARAEAE;Lo;0;L;<compat> 11A1;;;;N;HANGUL LETTER ALAE AE;;;;
// U+FFA3 HALFWIDTH HANGUL LETTER KIYEOK-SIOS;Lo;0;L;<narrow> 3133;;;;N;HALFWIDTH HANGUL LETTER GIYEOG SIOS;;;;
// U+FFDC HALFWIDTH HANGUL LETTER I;Lo;0;L;<narrow> 3163;;;;N;;;;;
if i != 0xFF9E && i != 0xFF9F && !(0x3133 <= i && i <= 0x318E) && !(0xFFA3 <= i && i <= 0xFFDC) {
if i != 0xFF9E && i != 0xFF9F {
log.Fatalf("%U: nLead was %v; want %v", i, a, b)
}
}
@@ -849,7 +901,7 @@ func verifyComputed() {
nfc := c.forms[FCanonical]
nfkc := c.forms[FCompatibility]
if nfc.combinesBackward != nfkc.combinesBackward {
log.Fatalf("%U: Cannot combine combinesBackward\n", c.codePoint)
logger.Fatalf("%U: Cannot combine combinesBackward\n", c.codePoint)
}
}
}
@@ -861,7 +913,7 @@ func verifyComputed() {
// 0374 ; NFD_QC; N # ...
// See http://unicode.org/reports/tr44/ for full explanation
func testDerived() {
f := gen.OpenUCDFile("DerivedNormalizationProps.txt")
f := openReader("DerivedNormalizationProps.txt")
defer f.Close()
p := ucd.New(f)
for p.Next() {
@@ -894,12 +946,12 @@ func testDerived() {
log.Fatalf(`Unexpected quick check value "%s"`, p.String(2))
}
if got := c.forms[ftype].quickCheck[mode]; got != qr {
log.Printf("%U: FAILED %s (was %v need %v)\n", r, qt, got, qr)
logger.Printf("%U: FAILED %s (was %v need %v)\n", r, qt, got, qr)
}
c.forms[ftype].verified[mode] = true
}
if err := p.Err(); err != nil {
log.Fatal(err)
logger.Fatal(err)
}
// Any unspecified value must be QCYes. Verify this.
for i, c := range chars {
@@ -907,14 +959,20 @@ func testDerived() {
for k, qr := range fd.quickCheck {
if !fd.verified[k] && qr != QCYes {
m := "%U: FAIL F:%d M:%d (was %v need Yes) %s\n"
log.Printf(m, i, j, k, qr, c.name)
logger.Printf(m, i, j, k, qr, c.name)
}
}
}
}
}
var testHeader = `const (
var testHeader = `// Generated by running
// maketables --test --url=%s
// +build test
package norm
const (
Yes = iota
No
Maybe
@@ -952,10 +1010,8 @@ func printTestdata() {
nTrail uint8
f string
}
last := lastInfo{}
w := &bytes.Buffer{}
fmt.Fprintf(w, testHeader)
fmt.Printf(testHeader, *url)
for r, c := range chars {
f := c.forms[FCanonical]
qc, cf, d := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp)
@@ -969,10 +1025,9 @@ func printTestdata() {
}
current := lastInfo{c.ccc, c.nLeadingNonStarters, c.nTrailingNonStarters, s}
if last != current {
fmt.Fprintf(w, "\t{0x%x, %d, %d, %d, %s},\n", r, c.origCCC, c.nLeadingNonStarters, c.nTrailingNonStarters, s)
fmt.Printf("\t{0x%x, %d, %d, %d, %s},\n", r, c.origCCC, c.nLeadingNonStarters, c.nTrailingNonStarters, s)
last = current
}
}
fmt.Fprintln(w, "}")
gen.WriteGoFile("data_test.go", "norm", w.Bytes())
fmt.Println("}")
}

View File

@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run maketables.go triegen.go
//go:generate go run maketables.go triegen.go -test
// Package norm contains types and functions for normalizing Unicode strings.
package norm

View File

@@ -113,25 +113,7 @@ var decomposeSegmentTests = []PositionTest{
{"\u00C0b", 2, "A\u0300"},
// long
{grave(31), 60, grave(30) + cgj},
{"a" + grave(31), 61, "a" + grave(30) + cgj},
// Stability tests: see http://www.unicode.org/review/pr-29.html.
// U+0300 COMBINING GRAVE ACCENT;Mn;230;NSM;;;;;N;NON-SPACING GRAVE;;;;
// U+0B47 ORIYA VOWEL SIGN E;Mc;0;L;;;;;N;;;;;
// U+0B3E ORIYA VOWEL SIGN AA;Mc;0;L;;;;;N;;;;;
// U+1100 HANGUL CHOSEONG KIYEOK;Lo;0;L;;;;;N;;;;;
// U+1161 HANGUL JUNGSEONG A;Lo;0;L;;;;;N;;;;;
{"\u0B47\u0300\u0B3E", 8, "\u0B47\u0300\u0B3E"},
{"\u1100\u0300\u1161", 8, "\u1100\u0300\u1161"},
{"\u0B47\u0B3E", 6, "\u0B47\u0B3E"},
{"\u1100\u1161", 6, "\u1100\u1161"},
// U+04DA MALAYALAM VOWEL SIGN O;Mc;0;L;0D46 0D3E;;;;N;;;;;
// Sequence of decomposing characters that are starters and modifiers.
{"\u0d4a" + strings.Repeat("\u0d3e", 31), 90, "\u0d46" + strings.Repeat("\u0d3e", 30) + cgj},
{grave(30), 60, grave(30)},
// U+FF9E is a starter, but decomposes to U+3099, which is not.
{grave(30) + "\uff9e", 60, grave(30) + cgj},
// ends with incomplete UTF-8 encoding
{"\xCC", 0, ""},
@@ -570,44 +552,6 @@ var appendTestsNFC = []AppendTest{
"a" + rep(0x0305, maxNonStarters+4) + "\u0316",
"a" + rep(0x0305, maxNonStarters) + cgj + "\u0316" + rep(0x305, 4),
},
{ // Combine across non-blocking non-starters.
// U+0327 COMBINING CEDILLA;Mn;202;NSM;;;;;N;NON-SPACING CEDILLA;;;;
// U+0325 COMBINING RING BELOW;Mn;220;NSM;;;;;N;NON-SPACING RING BELOW;;;;
"", "a\u0327\u0325", "\u1e01\u0327",
},
{ // Jamo V+T does not combine.
"",
"\u1161\u11a8",
"\u1161\u11a8",
},
// Stability tests: see http://www.unicode.org/review/pr-29.html.
{"", "\u0b47\u0300\u0b3e", "\u0b47\u0300\u0b3e"},
{"", "\u1100\u0300\u1161", "\u1100\u0300\u1161"},
{"", "\u0b47\u0b3e", "\u0b4b"},
{"", "\u1100\u1161", "\uac00"},
// U+04DA MALAYALAM VOWEL SIGN O;Mc;0;L;0D46 0D3E;;;;N;;;;;
{ // 0d4a starts a new segment.
"",
"\u0d4a" + strings.Repeat("\u0d3e", 15) + "\u0d4a" + strings.Repeat("\u0d3e", 15),
"\u0d4a" + strings.Repeat("\u0d3e", 15) + "\u0d4a" + strings.Repeat("\u0d3e", 15),
},
{ // Split combining characters.
// TODO: don't insert CGJ before starters.
"",
"\u0d46" + strings.Repeat("\u0d3e", 31),
"\u0d4a" + strings.Repeat("\u0d3e", 29) + cgj + "\u0d3e",
},
{ // Split combining characters.
"",
"\u0d4a" + strings.Repeat("\u0d3e", 30),
"\u0d4a" + strings.Repeat("\u0d3e", 29) + cgj + "\u0d3e",
},
}
var appendTestsNFD = []AppendTest{

View File

@@ -2,37 +2,52 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm
// +build ignore
package main
import (
"bufio"
"bytes"
"flag"
"fmt"
"log"
"net/http"
"os"
"path"
"regexp"
"runtime"
"strconv"
"strings"
"sync"
"testing"
"time"
"unicode"
"unicode/utf8"
"golang.org/x/text/internal/gen"
"golang.org/x/text/unicode/norm"
)
var long = flag.Bool("long", false,
"run time-consuming tests, such as tests that fetch data online")
var once sync.Once
func skipShort(t *testing.T) {
if !gen.IsLocal() && !*long {
t.Skip("skipping test to prevent downloading; to run use -long or use -local to specify a local source")
func main() {
flag.Parse()
loadTestData()
CharacterByCharacterTests()
StandardTests()
PerformanceTest()
if errorCount == 0 {
fmt.Println("PASS")
}
once.Do(func() { loadTestData(t) })
}
const file = "NormalizationTest.txt"
var url = flag.String("url",
"http://www.unicode.org/Public/"+unicode.Version+"/ucd/"+file,
"URL of Unicode database directory")
var localFiles = flag.Bool("local",
false,
"data files have been copied to the current directory; for debugging only")
var logger = log.New(os.Stderr, "", log.Lshortfile)
// This regression test runs the test set in NormalizationTest.txt
// (taken from http://www.unicode.org/Public/<unicode.Version>/ucd/).
//
@@ -109,8 +124,22 @@ var testRe = regexp.MustCompile(`^` + strings.Repeat(`([\dA-F ]+);`, 5) + ` # (.
var counter int
// Load the data form NormalizationTest.txt
func loadTestData(t *testing.T) {
f := gen.OpenUCDFile("NormalizationTest.txt")
func loadTestData() {
if *localFiles {
pwd, _ := os.Getwd()
*url = "file://" + path.Join(pwd, file)
}
t := &http.Transport{}
t.RegisterProtocol("file", http.NewFileTransport(http.Dir("/")))
c := &http.Client{Transport: t}
resp, err := c.Get(*url)
if err != nil {
logger.Fatal(err)
}
if resp.StatusCode != 200 {
logger.Fatal("bad GET status for "+file, resp.Status)
}
f := resp.Body
defer f.Close()
scanner := bufio.NewScanner(f)
for scanner.Scan() {
@@ -121,11 +150,11 @@ func loadTestData(t *testing.T) {
m := partRe.FindStringSubmatch(line)
if m != nil {
if len(m) < 3 {
t.Fatal("Failed to parse Part: ", line)
logger.Fatal("Failed to parse Part: ", line)
}
i, err := strconv.Atoi(m[1])
if err != nil {
t.Fatal(err)
logger.Fatal(err)
}
name := m[2]
part = append(part, Part{name: name[:len(name)-1], number: i})
@@ -133,7 +162,7 @@ func loadTestData(t *testing.T) {
}
m = testRe.FindStringSubmatch(line)
if m == nil || len(m) < 7 {
t.Fatalf(`Failed to parse: "%s" result: %#v`, line, m)
logger.Fatalf(`Failed to parse: "%s" result: %#v`, line, m)
}
test := Test{name: m[6], partnr: len(part) - 1, number: counter}
counter++
@@ -141,7 +170,7 @@ func loadTestData(t *testing.T) {
for _, split := range strings.Split(m[j], " ") {
r, err := strconv.ParseUint(split, 16, 64)
if err != nil {
t.Fatal(err)
logger.Fatal(err)
}
if test.r == 0 {
// save for CharacterByCharacterTests
@@ -156,38 +185,50 @@ func loadTestData(t *testing.T) {
part.tests = append(part.tests, test)
}
if scanner.Err() != nil {
t.Fatal(scanner.Err())
logger.Fatal(scanner.Err())
}
}
func cmpResult(t *testing.T, tc *Test, name string, f Form, gold, test, result string) {
var fstr = []string{"NFC", "NFD", "NFKC", "NFKD"}
var errorCount int
func cmpResult(t *Test, name string, f norm.Form, gold, test, result string) {
if gold != result {
t.Errorf("%s:%s: %s(%+q)=%+q; want %+q: %s",
tc.Name(), name, fstr[f], test, result, gold, tc.name)
errorCount++
if errorCount > 20 {
return
}
logger.Printf("%s:%s: %s(%+q)=%+q; want %+q: %s",
t.Name(), name, fstr[f], test, result, gold, t.name)
}
}
func cmpIsNormal(t *testing.T, tc *Test, name string, f Form, test string, result, want bool) {
func cmpIsNormal(t *Test, name string, f norm.Form, test string, result, want bool) {
if result != want {
t.Errorf("%s:%s: %s(%+q)=%v; want %v", tc.Name(), name, fstr[f], test, result, want)
errorCount++
if errorCount > 20 {
return
}
logger.Printf("%s:%s: %s(%+q)=%v; want %v", t.Name(), name, fstr[f], test, result, want)
}
}
func doTest(t *testing.T, tc *Test, f Form, gold, test string) {
func doTest(t *Test, f norm.Form, gold, test string) {
testb := []byte(test)
result := f.Bytes(testb)
cmpResult(t, tc, "Bytes", f, gold, test, string(result))
cmpResult(t, "Bytes", f, gold, test, string(result))
sresult := f.String(test)
cmpResult(t, tc, "String", f, gold, test, sresult)
cmpResult(t, "String", f, gold, test, sresult)
acc := []byte{}
i := Iter{}
i := norm.Iter{}
i.InitString(f, test)
for !i.Done() {
acc = append(acc, i.Next()...)
}
cmpResult(t, tc, "Iter.Next", f, gold, test, string(acc))
cmpResult(t, "Iter.Next", f, gold, test, string(acc))
buf := make([]byte, 128)
acc = nil
@@ -196,33 +237,32 @@ func doTest(t *testing.T, tc *Test, f Form, gold, test string) {
acc = append(acc, buf[:nDst]...)
p += nSrc
}
cmpResult(t, tc, "Transform", f, gold, test, string(acc))
cmpResult(t, "Transform", f, gold, test, string(acc))
for i := range test {
out := f.Append(f.Bytes([]byte(test[:i])), []byte(test[i:])...)
cmpResult(t, tc, fmt.Sprintf(":Append:%d", i), f, gold, test, string(out))
cmpResult(t, fmt.Sprintf(":Append:%d", i), f, gold, test, string(out))
}
cmpIsNormal(t, tc, "IsNormal", f, test, f.IsNormal([]byte(test)), test == gold)
cmpIsNormal(t, tc, "IsNormalString", f, test, f.IsNormalString(test), test == gold)
cmpIsNormal(t, "IsNormal", f, test, f.IsNormal([]byte(test)), test == gold)
cmpIsNormal(t, "IsNormalString", f, test, f.IsNormalString(test), test == gold)
}
func doConformanceTests(t *testing.T, tc *Test, partn int) {
func doConformanceTests(t *Test, partn int) {
for i := 0; i <= 2; i++ {
doTest(t, tc, NFC, tc.cols[1], tc.cols[i])
doTest(t, tc, NFD, tc.cols[2], tc.cols[i])
doTest(t, tc, NFKC, tc.cols[3], tc.cols[i])
doTest(t, tc, NFKD, tc.cols[4], tc.cols[i])
doTest(t, norm.NFC, t.cols[1], t.cols[i])
doTest(t, norm.NFD, t.cols[2], t.cols[i])
doTest(t, norm.NFKC, t.cols[3], t.cols[i])
doTest(t, norm.NFKD, t.cols[4], t.cols[i])
}
for i := 3; i <= 4; i++ {
doTest(t, tc, NFC, tc.cols[3], tc.cols[i])
doTest(t, tc, NFD, tc.cols[4], tc.cols[i])
doTest(t, tc, NFKC, tc.cols[3], tc.cols[i])
doTest(t, tc, NFKD, tc.cols[4], tc.cols[i])
doTest(t, norm.NFC, t.cols[3], t.cols[i])
doTest(t, norm.NFD, t.cols[4], t.cols[i])
doTest(t, norm.NFKC, t.cols[3], t.cols[i])
doTest(t, norm.NFKD, t.cols[4], t.cols[i])
}
}
func TestCharacterByCharacter(t *testing.T) {
skipShort(t)
func CharacterByCharacterTests() {
tests := part[1].tests
var last rune = 0
for i := 0; i <= len(tests); i++ { // last one is special case
@@ -234,39 +274,37 @@ func TestCharacterByCharacter(t *testing.T) {
}
for last++; last < r; last++ {
// Check all characters that were not explicitly listed in the test.
tc := &Test{partnr: 1, number: -1}
t := &Test{partnr: 1, number: -1}
char := string(last)
doTest(t, tc, NFC, char, char)
doTest(t, tc, NFD, char, char)
doTest(t, tc, NFKC, char, char)
doTest(t, tc, NFKD, char, char)
doTest(t, norm.NFC, char, char)
doTest(t, norm.NFD, char, char)
doTest(t, norm.NFKC, char, char)
doTest(t, norm.NFKD, char, char)
}
if i < len(tests) {
doConformanceTests(t, &tests[i], 1)
doConformanceTests(&tests[i], 1)
}
}
}
func TestStandardTests(t *testing.T) {
skipShort(t)
func StandardTests() {
for _, j := range []int{0, 2, 3} {
for _, test := range part[j].tests {
doConformanceTests(t, &test, j)
doConformanceTests(&test, j)
}
}
}
// TestPerformance verifies that normalization is O(n). If any of the
// PerformanceTest verifies that normalization is O(n). If any of the
// code does not properly check for maxCombiningChars, normalization
// may exhibit O(n**2) behavior.
func TestPerformance(t *testing.T) {
skipShort(t)
func PerformanceTest() {
runtime.GOMAXPROCS(2)
success := make(chan bool, 1)
go func() {
buf := bytes.Repeat([]byte("\u035D"), 1024*1024)
buf = append(buf, "\u035B"...)
NFC.Append(nil, buf...)
norm.NFC.Append(nil, buf...)
success <- true
}()
timeout := time.After(1 * time.Second)
@@ -274,6 +312,7 @@ func TestPerformance(t *testing.T) {
case <-success:
// test completed before the timeout
case <-timeout:
t.Errorf(`unexpectedly long time to complete PerformanceTest`)
errorCount++
logger.Printf(`unexpectedly long time to complete PerformanceTest`)
}
}

View File

File diff suppressed because it is too large Load Diff

17
NICKS
View File

@@ -3,8 +3,6 @@
AudriusButkevicius <audrius.butkevicius@gmail.com>
Cathryne <cathryne.linenweaver@gmail.com> <Cathryne@users.noreply.github.com>
KayoticSully <kayoticsully@gmail.com>
LordLandon <lordlandon@gmail.com>
Moter8 <moter8@gmail.com>
Nutomic <me@nutomic.com>
Rewt0r <rewt0r@gmx.com> <Rewt0r@users.noreply.github.com>
Vilbrekin <vilbrekin@gmail.com>
@@ -14,25 +12,16 @@ andrew-d <andrew@du.nham.ca>
asdil12 <dominik@heidler.eu>
bencurthoys <ben@bencurthoys.com>
bigbear2nd <bigbear2nd@gmail.com>
brbecker <brbecker@gmail.com>
brendanlong <self@brendanlong.com>
brgmnn <dan.arne.bergmann@gmail.com> <brgmnn@users.noreply.github.com>
bsidhom <bsidhom@gmail.com>
calmh <jakob@nym.se>
canton7 <antony.male@gmail.com>
cdata <chris@scriptolo.gy>
cdhowie <me@chrishowie.com>
ceh <emil@hessman.se>
cqcallaw <enlightened.despot@gmail.com>
dva <denisva@gmail.com>
dzarda <dzardacz@gmail.com>
facastagnini <federico.castagnini@gmail.com>
filoozoom <philippe@schommers.be>
frioux <frew@afoolishmanifesto.com> <frioux@gmail.com>
fti7 <frank@isemann.name>
gillisig <gilli@vx.is>
hadogenes <szafar@linux.pl>
jarlebring <jarlebring@gmail.com>
jedie <github.com@jensdiemer.de> <git@jensdiemer.de>
jpjp <jamespatterson@operamail.com> <jpjp@users.noreply.github.com>
kamadak <kamada@nanohz.org>
@@ -41,7 +30,6 @@ kozec <kozec@kozec.com>
krozycki <rozycki.karol@gmail.com>
marcindziadus <dziadus.marcin@gmail.com>
marclaporte <marc@marclaporte.com>
mogwa1 <devriesb@gmail.com>
moshen <moshen.colin@gmail.com>
mvdan <mvdan@mvdan.cc>
pascalj <github@pascalj.com> <mail@pascal-jungblut.com>
@@ -50,9 +38,8 @@ philips <brandon@ifup.org>
piobpl <piotrb10@gmail.com>
pluby <phill.luby@newredo.com>
pyfisch <pyfisch@gmail.com>
qbit <qbit@deftly.net>
ralder <ralder@yandex.ru>
rumpelsepp <stefan@sevenbyte.org>
qbit <qbit@deftly.net>
sciurius <jvromans@squirrel.nl>
seehuhn <voss@seehuhn.de>
snnd <dw@risu.io>
@@ -61,5 +48,3 @@ tnn2 <tnn@nygren.pp.se>
tojrobinson <tully@tojr.org>
uok <ueomkail@gmail.com> <uok@users.noreply.github.com>
veeti <veeti.paananen@rojekti.fi>
wsgcsysadmin <e.meitner@willystreet.coo>
zukoo <fxgsell@gmail.com>

View File

@@ -1,58 +1,60 @@
Syncthing
syncthing
=========
[![Latest Build](http://img.shields.io/jenkins/s/http/build.syncthing.net/syncthing.svg?style=flat-square)](http://build.syncthing.net/job/syncthing/lastBuild/)
[![API Documentation](http://img.shields.io/badge/api-Godoc-blue.svg?style=flat-square)](http://godoc.org/github.com/syncthing/syncthing)
[![MPLv2 License](http://img.shields.io/badge/license-MPLv2-blue.svg?style=flat-square)](https://www.mozilla.org/MPL/2.0/)
This is the Syncthing project which pursues the following goals:
This is the `syncthing` project. The following are the project goals:
1. Define a protocol for synchronization of a folder between a number of
collaborating devices. This protocol should be well defined, unambiguous,
collaborating devices. The protocol should be well defined, unambiguous,
easily understood, free to use, efficient, secure and language neutral.
This is called the [Block Exchange
This is the [Block Exchange
Protocol](https://github.com/syncthing/specs/blob/master/BEPv1.md).
2. Provide the reference implementation to demonstrate the usability of
said protocol. This is the `syncthing` utility. We hope that
alternative, compatible implementations of the protocol will arise.
said protocol. This is the `syncthing` utility. It is the hope that
alternative, compatible implementations of the protocol will come to
exist.
The two are evolving together; the protocol is not to be considered
stable until Syncthing 1.0 is released, at which point it is locked down
stable until syncthing 1.0 is released, at which point it is locked down
for incompatible changes.
Getting Started
---------------
Take a look at the [getting started
guide](http://docs.syncthing.net/intro/getting-started.html).
guide](https://github.com/syncthing/syncthing/wiki/Getting-Started).
There are a few examples for keeping Syncthing running in the background
There are a few examples for keeping syncthing running in the background
on your system in [the etc directory](https://github.com/syncthing/syncthing/blob/master/etc).
There is an IRC channel, `#syncthing` on Freenode, for talking directly
to developers and users.
to developers and users (when awake and present, etc.).
Building
--------
Building Syncthing from source is easy, and there's a
[guide](http://docs.syncthing.net/dev/building.html).
that describes it for both Unix and Windows systems.
[guide](https://github.com/syncthing/syncthing/wiki/Building).
that describes it for both Unix and Windows.
Signed Releases
---------------
As of v0.10.15 and onwards, git tags and release binaries are GPG signed
with the key D26E6ED000654A3E (see https://syncthing.net/security.html).
with the key D26E6ED000654A3E (see http://syncthing.net/security.html).
For release binaries, MD5 and SHA1 checksums are calculated and signed,
available in the md5sum.txt.asc and sha1sum.txt.asc files.
Documentation
=============
Please see the [Syncthing
documentation site](http://docs.syncthing.net/).
The [syncthing
documentation](https://github.com/syncthing/syncthing/wiki/) is on the
Github wiki.
All code is licensed under the
[MPLv2 License](https://github.com/syncthing/syncthing/blob/master/LICENSE).
[MPLv2](https://github.com/syncthing/syncthing/blob/master/LICENSE).

View File

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.6 KiB

After

Width:  |  Height:  |  Size: 3.7 KiB

View File

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.7 KiB

After

Width:  |  Height:  |  Size: 1.8 KiB

View File

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.7 KiB

After

Width:  |  Height:  |  Size: 3.8 KiB

View File

@@ -1,58 +0,0 @@
// Copyright (C) 2015 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
// +build ignore
// Generates the list of contributors in gui/index.html based on contents of
// AUTHORS.
package main
import (
"io/ioutil"
"log"
"os"
"regexp"
"sort"
"strings"
)
func main() {
bs := readAll("AUTHORS")
lines := strings.Split(string(bs), "\n")
nameRe := regexp.MustCompile(`(.+?)\s+<`)
authors := make([]string, 0, len(lines))
for _, line := range lines {
if m := nameRe.FindStringSubmatch(line); len(m) == 2 {
authors = append(authors, " <li class=\"auto-generated\">"+m[1]+"</li>")
}
}
sort.Strings(authors)
replacement := strings.Join(authors, "\n")
authorsRe := regexp.MustCompile(`(?s)id="contributor-list">.*?</ul>`)
bs = readAll("gui/index.html")
bs = authorsRe.ReplaceAll(bs, []byte("id=\"contributor-list\">\n"+replacement+"\n </ul>"))
if err := ioutil.WriteFile("gui/index.html", bs, 0644); err != nil {
log.Fatal(err)
}
}
func readAll(path string) []byte {
fd, err := os.Open(path)
if err != nil {
log.Fatal(err)
}
defer fd.Close()
bs, err := ioutil.ReadAll(fd)
if err != nil {
log.Fatal(err)
}
return bs
}

View File

@@ -1,50 +0,0 @@
// Copyright (C) 2015 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
// +build ignore
// Neatly format benchmarking output which otherwise looks like crap.
package main
import (
"bufio"
"bytes"
"fmt"
"os"
"regexp"
"text/tabwriter"
)
var (
benchRe = regexp.MustCompile(`^Bench`)
spacesRe = regexp.MustCompile(`\s+`)
numbersRe = regexp.MustCompile(`\b[\d\.]+\b`)
)
func main() {
tw := tabwriter.NewWriter(os.Stdout, 1, 1, 1, ' ', 0)
br := bufio.NewScanner(os.Stdin)
n := 0
for br.Scan() {
line := br.Bytes()
if benchRe.Match(line) {
n++
line = spacesRe.ReplaceAllLiteral(line, []byte("\t"))
line = numbersRe.ReplaceAllFunc(line, func(n []byte) []byte {
return []byte(fmt.Sprintf("%12s", n))
})
tw.Write(line)
tw.Write([]byte("\n"))
} else if n > 0 && bytes.HasPrefix(line, []byte("ok")) {
n = 0
tw.Flush()
fmt.Printf("%s\n\n", line)
}
}
tw.Flush()
}

162
build.go
View File

@@ -78,11 +78,6 @@ func main() {
tags = []string{"noupgrade"}
}
install("./cmd/...", tags)
vet("./cmd/syncthing")
vet("./internal/...")
lint("./cmd/syncthing")
lint("./internal/...")
return
}
@@ -108,10 +103,8 @@ func main() {
build(pkg, tags)
case "test":
test("./...")
case "bench":
bench("./...")
pkg := "./..."
test(pkg)
case "assets":
assets()
@@ -134,20 +127,9 @@ func main() {
case "zip":
buildZip()
case "deb":
buildDeb()
case "clean":
clean()
case "vet":
vet("./cmd/syncthing")
vet("./internal/...")
case "lint":
lint("./cmd/syncthing")
lint("./internal/...")
default:
log.Fatalf("Unknown command %q", cmd)
}
@@ -185,11 +167,6 @@ func test(pkg string) {
runPrint("go", "test", "-short", "-timeout", "60s", pkg)
}
func bench(pkg string) {
setBuildEnv()
runPrint("go", "test", "-run", "NONE", "-bench", ".", pkg)
}
func install(pkg string, tags []string) {
os.Setenv("GOBIN", "./bin")
args := []string{"install", "-v", "-ldflags", ldflags()}
@@ -283,97 +260,6 @@ func buildZip() {
log.Println(filename)
}
func buildDeb() {
os.RemoveAll("deb")
// "goarch" here is set to whatever the Debian packages expect. We correct
// "it to what we actually know how to build and keep the Debian variant
// "name in "debarch".
debarch := goarch
switch goarch {
case "i386":
goarch = "386"
case "armel", "armhf":
goarch = "arm"
}
build("./cmd/syncthing", []string{"noupgrade"})
files := []archiveFile{
{src: "README.md", dst: "deb/usr/share/doc/syncthing/README.txt", perm: 0644},
{src: "LICENSE", dst: "deb/usr/share/doc/syncthing/LICENSE.txt", perm: 0644},
{src: "AUTHORS", dst: "deb/usr/share/doc/syncthing/AUTHORS.txt", perm: 0644},
{src: "syncthing", dst: "deb/usr/bin/syncthing", perm: 0755},
{src: "man/syncthing.1", dst: "deb/usr/share/man/man1/syncthing.1", perm: 0644},
{src: "man/syncthing-config.5", dst: "deb/usr/share/man/man5/syncthing-config.5", perm: 0644},
{src: "man/syncthing-stignore.5", dst: "deb/usr/share/man/man5/syncthing-stignore.5", perm: 0644},
{src: "man/syncthing-device-ids.7", dst: "deb/usr/share/man/man7/syncthing-device-ids.7", perm: 0644},
{src: "man/syncthing-event-api.7", dst: "deb/usr/share/man/man7/syncthing-event-api.7", perm: 0644},
{src: "man/syncthing-faq.7", dst: "deb/usr/share/man/man7/syncthing-faq.7", perm: 0644},
{src: "man/syncthing-networking.7", dst: "deb/usr/share/man/man7/syncthing-networking.7", perm: 0644},
{src: "man/syncthing-rest-api.7", dst: "deb/usr/share/man/man7/syncthing-rest-api.7", perm: 0644},
{src: "man/syncthing-security.7", dst: "deb/usr/share/man/man7/syncthing-security.7", perm: 0644},
{src: "man/syncthing-versioning.7", dst: "deb/usr/share/man/man7/syncthing-versioning.7", perm: 0644},
{src: "etc/linux-systemd/system/syncthing@.service", dst: "deb/lib/systemd/system/syncthing@.service", perm: 0644},
{src: "etc/linux-systemd/user/syncthing.service", dst: "deb/usr/lib/systemd/user/syncthing.service", perm: 0644},
}
for _, file := range listFiles("extra") {
files = append(files, archiveFile{src: file, dst: "deb/usr/share/doc/syncthing/" + filepath.Base(file), perm: 0644})
}
for _, af := range files {
if err := copyFile(af.src, af.dst, af.perm); err != nil {
log.Fatal(err)
}
}
control := `Package: syncthing
Architecture: {{arch}}
Depends: libc6
Version: {{version}}
Maintainer: Syncthing Release Management <release@syncthing.net>
Description: Open Source Continuous File Synchronization
Syncthing does bidirectional synchronization of files between two or
more computers.
`
changelog := `syncthing ({{version}}); urgency=medium
* Packaging of {{version}}.
-- Jakob Borg <jakob@nym.se> {{date}}
`
control = strings.Replace(control, "{{arch}}", debarch, -1)
control = strings.Replace(control, "{{version}}", version[1:], -1)
changelog = strings.Replace(changelog, "{{arch}}", debarch, -1)
changelog = strings.Replace(changelog, "{{version}}", version[1:], -1)
changelog = strings.Replace(changelog, "{{date}}", time.Now().Format(time.RFC1123), -1)
os.MkdirAll("deb/DEBIAN", 0755)
ioutil.WriteFile("deb/DEBIAN/control", []byte(control), 0644)
ioutil.WriteFile("deb/DEBIAN/compat", []byte("9\n"), 0644)
ioutil.WriteFile("deb/DEBIAN/changelog", []byte(changelog), 0644)
}
func copyFile(src, dst string, perm os.FileMode) error {
dstDir := filepath.Dir(dst)
os.MkdirAll(dstDir, 0755) // ignore error
srcFd, err := os.Open(src)
if err != nil {
return err
}
defer srcFd.Close()
dstFd, err := os.OpenFile(dst, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, perm)
if err != nil {
return err
}
defer dstFd.Close()
_, err = io.Copy(dstFd, srcFd)
return err
}
func listFiles(dir string) []string {
var res []string
filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error {
@@ -551,7 +437,10 @@ func run(cmd string, args ...string) []byte {
func runError(cmd string, args ...string) ([]byte, error) {
ecmd := exec.Command(cmd, args...)
bs, err := ecmd.CombinedOutput()
return bytes.TrimSpace(bs), err
if err != nil {
return nil, err
}
return bytes.TrimSpace(bs), nil
}
func runPrint(cmd string, args ...string) {
@@ -582,9 +471,8 @@ func runPipe(file, cmd string, args ...string) {
}
type archiveFile struct {
src string
dst string
perm os.FileMode
src string
dst string
}
func tarGz(out string, files []archiveFile) {
@@ -727,37 +615,3 @@ func md5File(file string) error {
return out.Close()
}
func vet(pkg string) {
bs, err := runError("go", "vet", pkg)
if err != nil && err.Error() == "exit status 3" || bytes.Contains(bs, []byte("no such tool \"vet\"")) {
// Go said there is no go vet
log.Println(`- No go vet, no vetting. Try "go get -u golang.org/x/tools/cmd/vet".`)
return
}
falseAlarmComposites := regexp.MustCompile("composite literal uses unkeyed fields")
exitStatus := regexp.MustCompile("exit status 1")
for _, line := range bytes.Split(bs, []byte("\n")) {
if falseAlarmComposites.Match(line) || exitStatus.Match(line) {
continue
}
log.Printf("%s", line)
}
}
func lint(pkg string) {
bs, err := runError("golint", pkg)
if err != nil {
log.Println(`- No golint, not linting. Try "go get -u github.com/golang/lint/golint".`)
return
}
analCommentPolicy := regexp.MustCompile(`exported (function|method|const|type|var) [^\s]+ should have comment`)
for _, line := range bytes.Split(bs, []byte("\n")) {
if analCommentPolicy.Match(line) {
continue
}
log.Printf("%s", line)
}
}

View File

@@ -17,11 +17,8 @@ case "${1:-default}" in
ulimit -t 60 &>/dev/null || true
ulimit -d 512000 &>/dev/null || true
ulimit -m 512000 &>/dev/null || true
go run build.go test
;;
bench)
LOGGER_DISCARD=1 go run build.go bench | go run benchfilter.go
go run build.go "$1"
;;
tar)
@@ -44,17 +41,7 @@ case "${1:-default}" in
go run build.go "$1"
;;
prerelease)
go run build.go transifex
git add -A gui/assets/ internal/auto/
pushd man ; ./refresh.sh ; popd
git add -A man
echo
echo Changelog:
go run changelog.go
;;
deb)
transifex)
go run build.go "$1"
;;
@@ -131,9 +118,10 @@ case "${1:-default}" in
-e "STTRACE=$STTRACE" \
syncthing/build:latest \
sh -c './build.sh clean \
&& ./build.sh test-cov \
&& ./build.sh bench \
&& ./build.sh all'
&& go vet ./cmd/... ./internal/... \
&& ( golint ./cmd/... ; golint ./internal/... ) | egrep -v "comment on exported|should have comment" \
&& ./build.sh all \
&& STTRACE=all ./build.sh test-cov'
;;
docker-test)
@@ -146,29 +134,10 @@ case "${1:-default}" in
&& go run build.go -race \
&& export GOPATH=$(pwd)/Godeps/_workspace:$GOPATH \
&& cd test \
&& go test -tags integration -v -timeout 90m -short \
&& go test -tags integration -v -timeout 60m -short \
&& git clean -fxd .'
;;
docker-lint)
docker run --rm -h syncthing-builder -u $(id -u) -t \
-v $(pwd):/go/src/github.com/syncthing/syncthing \
-w /go/src/github.com/syncthing/syncthing \
-e "STTRACE=$STTRACE" \
syncthing/build:latest \
sh -euxc 'go run build.go lint'
;;
docker-vet)
docker run --rm -h syncthing-builder -u $(id -u) -t \
-v $(pwd):/go/src/github.com/syncthing/syncthing \
-w /go/src/github.com/syncthing/syncthing \
-e "STTRACE=$STTRACE" \
syncthing/build:latest \
sh -euxc 'go run build.go vet'
;;
*)
echo "Unknown build command $1"
;;

View File

@@ -1,72 +0,0 @@
// Copyright (C) 2014 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
// +build ignore
package main
import (
"bytes"
"flag"
"fmt"
"log"
"os/exec"
"regexp"
)
var (
subjectIssues = regexp.MustCompile(`^([^(]+)\s+\((?:fixes|ref) ([^)]+)\)(?:[^\w])?$`)
issueNumbers = regexp.MustCompile(`(#\d+)`)
)
func main() {
flag.Parse()
// Display changelog since the version given on the command line, or
// figure out the last release if there were no arguments.
var prevRel string
if flag.NArg() > 0 {
prevRel = flag.Arg(0)
} else {
bs, err := runError("git", "describe", "--abbrev=0", "HEAD^")
if err != nil {
log.Fatal(err)
}
prevRel = string(bs)
}
// Get the git log with subject and author nickname
bs, err := runError("git", "log", "--reverse", "--pretty=format:%s|%aN", prevRel+"..")
if err != nil {
log.Fatal(err)
}
// Split into lines
for _, line := range bytes.Split(bs, []byte{'\n'}) {
// Split into subject and author
fields := bytes.Split(line, []byte{'|'})
subj := fields[0]
author := fields[1]
// Check if subject contains a "(fixes ...)" or "(ref ...)""
if m := subjectIssues.FindSubmatch(subj); len(m) > 0 {
// Find all issue numbers
issues := issueNumbers.FindAll(m[2], -1)
// Format a changelog entry
fmt.Printf("* %s (%s, @%s)\n", m[1], bytes.Join(issues, []byte(", ")), author)
}
}
}
func runError(cmd string, args ...string) ([]byte, error) {
ecmd := exec.Command(cmd, args...)
bs, err := ecmd.CombinedOutput()
if err != nil {
return nil, err
}
return bytes.TrimSpace(bs), nil
}

11
changelog.sh Executable file
View File

@@ -0,0 +1,11 @@
#!/bin/bash
since="$1"
if [[ -z $since ]] ; then
since="$(git describe --abbrev=0 HEAD^).."
fi
git log --reverse --pretty=format:'* %s, @%aN)' "$since" | egrep 'fixes #\d|ref #\d' | sed 's/)[,. ]*,/,/' | sed 's/fixes #/#/g' | sed 's/ref #/#/g'
git diff "$since" -- AUTHORS

View File

@@ -17,10 +17,7 @@ no-docs-typos() {
grep -v a9339d0627fff439879d157c75077f02c9fac61b |\
grep -v 254c63763a3ad42fd82259f1767db526cff94a14 |\
grep -v 4b76ec40c07078beaa2c5e250ed7d9bd6276a718 |\
grep -v ffc39dfbcb34eacc3ea12327a02b6e7741a2c207 |\
grep -v 32a76901a91ff0f663db6f0830e0aedec946e4d0 |\
grep -v af3288043a49bcc28f8ae3060852a09de552fe5f |\
grep -v 3626003f680bad3e63677982576d3a05421e88e9
grep -v ffc39dfbcb34eacc3ea12327a02b6e7741a2c207
}
print-missing-authors() {
@@ -30,7 +27,7 @@ print-missing-authors() {
}
print-missing-copyright() {
find . -name \*.go | xargs egrep -L 'Copyright|automatically generated' | grep -v Godeps | grep -v internal/auto/
find . -name \*.go | xargs egrep -L 'Copyright \(C\)|automatically generated' | grep -v Godeps | grep -v internal/auto/
}
authors=$(print-missing-authors)

View File

@@ -15,31 +15,33 @@ import (
"flag"
"go/format"
"io"
"net/http"
"os"
"path/filepath"
"strings"
"text/template"
"time"
)
var tpl = template.Must(template.New("assets").Parse(`package auto
import (
"bytes"
"compress/gzip"
"encoding/base64"
)
const (
AssetsBuildDate = "{{.BuildDate}}"
"io/ioutil"
)
func Assets() map[string][]byte {
var assets = make(map[string][]byte, {{.Assets | len}})
{{range $asset := .Assets}}
assets["{{$asset.Name}}"], _ = base64.StdEncoding.DecodeString("{{$asset.Data}}"){{end}}
var assets = make(map[string][]byte, {{.assets | len}})
var bs []byte
var gr *gzip.Reader
{{range $asset := .assets}}
bs, _ = base64.StdEncoding.DecodeString("{{$asset.Data}}")
gr, _ = gzip.NewReader(bytes.NewReader(bs))
bs, _ = ioutil.ReadAll(gr)
assets["{{$asset.Name}}"] = bs
{{end}}
return assets
}
`))
type asset struct {
@@ -84,20 +86,12 @@ func walkerFor(basePath string) filepath.WalkFunc {
}
}
type templateVars struct {
Assets []asset
BuildDate string
}
func main() {
flag.Parse()
filepath.Walk(flag.Arg(0), walkerFor(flag.Arg(0)))
var buf bytes.Buffer
tpl.Execute(&buf, templateVars{
Assets: assets,
BuildDate: time.Now().UTC().Format(http.TimeFormat),
})
tpl.Execute(&buf, map[string][]asset{"assets": assets})
bs, err := format.Source(buf.Bytes())
if err != nil {
panic(err)

View File

@@ -27,7 +27,7 @@ func main() {
log.SetOutput(os.Stdout)
log.SetFlags(0)
target := flag.String("target", "localhost:8384", "Target Syncthing instance")
target := flag.String("target", "localhost:8080", "Target Syncthing instance")
apikey := flag.String("apikey", "", "Syncthing API key")
flag.Parse()

View File

@@ -7,7 +7,6 @@
package main
import (
"encoding/binary"
"flag"
"fmt"
"log"
@@ -16,71 +15,41 @@ import (
"github.com/syncthing/protocol"
"github.com/syncthing/syncthing/internal/db"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/opt"
)
func main() {
log.SetFlags(0)
log.SetOutput(os.Stdout)
folder := flag.String("folder", "default", "Folder ID")
device := flag.String("device", "", "Device ID (blank for global)")
flag.Parse()
ldb, err := leveldb.OpenFile(flag.Arg(0), &opt.Options{
ErrorIfMissing: true,
Strict: opt.StrictAll,
OpenFilesCacheCapacity: 100,
})
ldb, err := leveldb.OpenFile(flag.Arg(0), nil)
if err != nil {
log.Fatal(err)
}
it := ldb.NewIterator(nil, nil)
var dev protocol.DeviceID
for it.Next() {
key := it.Key()
switch key[0] {
case db.KeyTypeDevice:
folder := nulString(key[1 : 1+64])
devBytes := key[1+64 : 1+64+32]
name := nulString(key[1+64+32:])
copy(dev[:], devBytes)
fmt.Printf("[device] F:%q N:%q D:%v\n", folder, name, dev)
fs := db.NewFileSet(*folder, ldb)
var f protocol.FileInfo
err := f.UnmarshalXDR(it.Value())
if err != nil {
log.Fatal(err)
}
fmt.Printf(" N:%q\n F:%#o\n M:%d\n V:%v\n S:%d\n B:%d\n", f.Name, f.Flags, f.Modified, f.Version, f.Size(), len(f.Blocks))
case db.KeyTypeGlobal:
folder := nulString(key[1 : 1+64])
name := nulString(key[1+64:])
fmt.Printf("[global] F:%q N:%q V:%x\n", folder, name, it.Value())
case db.KeyTypeBlock:
folder := nulString(key[1 : 1+64])
hash := key[1+64 : 1+64+32]
name := nulString(key[1+64+32:])
fmt.Printf("[block] F:%q H:%x N:%q I:%d\n", folder, hash, name, binary.BigEndian.Uint32(it.Value()))
case db.KeyTypeDeviceStatistic:
fmt.Printf("[dstat]\n %x\n %x\n", it.Key(), it.Value())
case db.KeyTypeFolderStatistic:
fmt.Printf("[fstat]\n %x\n %x\n", it.Key(), it.Value())
default:
fmt.Printf("[???]\n %x\n %x\n", it.Key(), it.Value())
if *device == "" {
log.Printf("*** Global index for folder %q", *folder)
fs.WithGlobalTruncated(func(fi db.FileIntf) bool {
f := fi.(db.FileInfoTruncated)
fmt.Println(f)
fmt.Println("\t", fs.Availability(f.Name))
return true
})
} else {
n, err := protocol.DeviceIDFromString(*device)
if err != nil {
log.Fatal(err)
}
log.Printf("*** Have index for folder %q device %q", *folder, n)
fs.WithHaveTruncated(n, func(fi db.FileIntf) bool {
f := fi.(db.FileInfoTruncated)
fmt.Println(f)
return true
})
}
}
func nulString(bs []byte) string {
for i := range bs {
if bs[i] == 0 {
return string(bs[:i])
}
}
return string(bs)
}

View File

@@ -1,98 +0,0 @@
// Copyright (C) 2015 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
package main
import (
"bytes"
"crypto/md5"
"flag"
"fmt"
"io"
"os"
"time"
)
func getmd5(filePath string) ([]byte, error) {
var result []byte
file, err := os.Open(filePath)
if err != nil {
return result, err
}
defer file.Close()
hash := md5.New()
if _, err := io.Copy(hash, file); err != nil {
return result, err
}
return hash.Sum(result), nil
}
func main() {
period := flag.Duration("period", 200*time.Millisecond, "Sleep period between checks")
flag.Parse()
file := flag.Arg(0)
if file == "" {
fmt.Println("Expects a path as an argument")
return
}
exists := true
size := int64(0)
mtime := time.Time{}
hash := []byte{}
for {
time.Sleep(*period)
newExists := true
fi, err := os.Stat(file)
if err != nil && os.IsNotExist(err) {
newExists = false
} else if err != nil {
fmt.Println("stat:", err)
return
}
if newExists != exists {
exists = newExists
if !newExists {
fmt.Println(file, "does not exist")
} else {
fmt.Println(file, "appeared")
}
}
if !exists {
size = 0
mtime = time.Time{}
hash = []byte{}
continue
}
if fi.IsDir() {
fmt.Println(file, "is directory")
return
}
newSize := fi.Size()
newMtime := fi.ModTime()
newHash, err := getmd5(file)
if err != nil {
fmt.Println("getmd5:", err)
}
if newSize != size || newMtime != mtime || !bytes.Equal(newHash, hash) {
fmt.Println(file, "Size:", newSize, "Mtime:", newMtime, "Hash:", fmt.Sprintf("%x", newHash))
hash = newHash
size = newSize
mtime = newMtime
}
}
}

View File

@@ -1,69 +0,0 @@
// Copyright (C) 2015 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
package main
import (
"encoding/json"
"io"
"github.com/syncthing/syncthing/internal/events"
)
// The auditSvc subscribes to events and writes these in JSON format, one
// event per line, to the specified writer.
type auditSvc struct {
w io.Writer // audit destination
stop chan struct{} // signals time to stop
started chan struct{} // signals startup complete
stopped chan struct{} // signals stop complete
}
func newAuditSvc(w io.Writer) *auditSvc {
return &auditSvc{
w: w,
stop: make(chan struct{}),
started: make(chan struct{}),
stopped: make(chan struct{}),
}
}
// Serve runs the audit service.
func (s *auditSvc) Serve() {
defer close(s.stopped)
sub := events.Default.Subscribe(events.AllEvents)
defer events.Default.Unsubscribe(sub)
enc := json.NewEncoder(s.w)
// We're ready to start processing events.
close(s.started)
for {
select {
case ev := <-sub.C():
enc.Encode(ev)
case <-s.stop:
return
}
}
}
// Stop stops the audit service.
func (s *auditSvc) Stop() {
close(s.stop)
}
// WaitForStart returns once the audit service is ready to receive events, or
// immediately if it's already running.
func (s *auditSvc) WaitForStart() {
<-s.started
}
// WaitForStop returns once the audit service has stopped.
// (Needed by the tests.)
func (s *auditSvc) WaitForStop() {
<-s.stopped
}

View File

@@ -1,54 +0,0 @@
// Copyright (C) 2015 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
package main
import (
"bytes"
"strings"
"testing"
"time"
"github.com/syncthing/syncthing/internal/events"
)
func TestAuditService(t *testing.T) {
buf := new(bytes.Buffer)
svc := newAuditSvc(buf)
// Event sent before start, will not be logged
events.Default.Log(events.Ping, "the first event")
go svc.Serve()
svc.WaitForStart()
// Event that should end up in the audit log
events.Default.Log(events.Ping, "the second event")
// We need to give the events time to arrive, since the channels are buffered etc.
time.Sleep(10 * time.Millisecond)
svc.Stop()
svc.WaitForStop()
// This event should not be logged, since we have stopped.
events.Default.Log(events.Ping, "the third event")
result := string(buf.Bytes())
t.Log(result)
if strings.Contains(result, "first event") {
t.Error("Unexpected first event")
}
if !strings.Contains(result, "second event") {
t.Error("Missing second event")
}
if strings.Contains(result, "third event") {
t.Error("Missing third event")
}
}

View File

@@ -15,84 +15,23 @@ import (
"time"
"github.com/syncthing/protocol"
"github.com/syncthing/syncthing/internal/config"
"github.com/syncthing/syncthing/internal/events"
"github.com/syncthing/syncthing/internal/model"
"github.com/thejerf/suture"
)
// The connection service listens on TLS and dials configured unconnected
// devices. Successful connections are handed to the model.
type connectionSvc struct {
*suture.Supervisor
cfg *config.Wrapper
myID protocol.DeviceID
model *model.Model
tlsCfg *tls.Config
conns chan *tls.Conn
}
func listenConnect(myID protocol.DeviceID, m *model.Model, tlsCfg *tls.Config) {
var conns = make(chan *tls.Conn)
func newConnectionSvc(cfg *config.Wrapper, myID protocol.DeviceID, model *model.Model, tlsCfg *tls.Config) *connectionSvc {
svc := &connectionSvc{
Supervisor: suture.NewSimple("connectionSvc"),
cfg: cfg,
myID: myID,
model: model,
tlsCfg: tlsCfg,
conns: make(chan *tls.Conn),
// Listen
for _, addr := range cfg.Options().ListenAddress {
go listenTLS(conns, addr, tlsCfg)
}
// There are several moving parts here; one routine per listening address
// to handle incoming connections, one routine to periodically attempt
// outgoing connections, and lastly one routine to the the common handling
// regardless of whether the connection was incoming or outgoing. It ends
// up as in the diagram below. We embed a Supervisor to manage the
// routines (i.e. log and restart if they crash or exit, etc).
//
// +-----------------+
// Incoming | +---------------+-+ +-----------------+
// Connections | | | | | Outgoing
// -------------->| | svc.listen | | | Connections
// | | (1 per listen | | svc.connect |-------------->
// | | address) | | |
// +-+ | | |
// +-----------------+ +-----------------+
// v v
// | |
// | |
// +------------+-----------+
// |
// | svc.conns
// v
// +-----------------+
// | |
// | |
// | svc.handle |------> model.AddConnection()
// | |
// | |
// +-----------------+
//
// TODO: Clean shutdown, and/or handling config changes on the fly. We
// partly do this now - new devices and addresses will be picked up, but
// not new listen addresses and we don't support disconnecting devices
// that are removed and so on...
// Connect
go dialTLS(m, conns, tlsCfg)
svc.Add(serviceFunc(svc.connect))
for _, addr := range svc.cfg.Options().ListenAddress {
addr := addr
listener := serviceFunc(func() {
svc.listen(addr)
})
svc.Add(listener)
}
svc.Add(serviceFunc(svc.handle))
return svc
}
func (s *connectionSvc) handle() {
next:
for conn := range s.conns {
for conn := range conns {
cs := conn.ConnectionState()
// We should have negotiated the next level protocol "bep/1.0" as part
@@ -116,7 +55,7 @@ next:
remoteID := protocol.NewDeviceID(remoteCert.Raw)
// The device ID should not be that of ourselves. It can happen
// though, especially in the presence of NAT hairpinning, multiple
// though, especially in the presense of NAT hairpinning, multiple
// clients between the same NAT gateway, and global discovery.
if remoteID == myID {
l.Infof("Connected to myself (%s) - should not happen", remoteID)
@@ -128,15 +67,15 @@ next:
// could use some better handling. If the old connection is dead but
// hasn't timed out yet we may want to drop *that* connection and keep
// this one. But in case we are two devices connecting to each other
// in parallel we don't want to do that or we end up with no
// in parallell we don't want to do that or we end up with no
// connections still established...
if s.model.ConnectedTo(remoteID) {
if m.ConnectedTo(remoteID) {
l.Infof("Connected to already connected device (%s)", remoteID)
conn.Close()
continue
}
for deviceID, deviceCfg := range s.cfg.Devices() {
for deviceID, deviceCfg := range cfg.Devices() {
if deviceID == remoteID {
// Verify the name on the certificate. By default we set it to
// "syncthing" when generating, but the user may have replaced
@@ -158,7 +97,7 @@ next:
// If rate limiting is set, and based on the address we should
// limit the connection, then we wrap it in a limiter.
limit := s.shouldLimit(conn.RemoteAddr())
limit := shouldLimit(conn.RemoteAddr())
wr := io.Writer(conn)
if limit && writeRateLimit != nil {
@@ -171,19 +110,23 @@ next:
}
name := fmt.Sprintf("%s-%s", conn.LocalAddr(), conn.RemoteAddr())
protoConn := protocol.NewConnection(remoteID, rd, wr, s.model, name, deviceCfg.Compression)
protoConn := protocol.NewConnection(remoteID, rd, wr, m, name, deviceCfg.Compression)
l.Infof("Established secure connection to %s at %s", remoteID, name)
if debugNet {
l.Debugf("cipher suite: %04X in lan: %t", conn.ConnectionState().CipherSuite, !limit)
}
events.Default.Log(events.DeviceConnected, map[string]string{
"id": remoteID.String(),
"addr": conn.RemoteAddr().String(),
})
s.model.AddConnection(conn, protoConn)
m.AddConnection(conn, protoConn)
continue next
}
}
if !s.cfg.IgnoredDevice(remoteID) {
if !cfg.IgnoredDevice(remoteID) {
events.Default.Log(events.DeviceRejected, map[string]string{
"device": remoteID.String(),
"address": conn.RemoteAddr().String(),
@@ -197,7 +140,7 @@ next:
}
}
func (s *connectionSvc) listen(addr string) {
func listenTLS(conns chan *tls.Conn, addr string, tlsCfg *tls.Config) {
if debugNet {
l.Debugln("listening on", addr)
}
@@ -223,9 +166,9 @@ func (s *connectionSvc) listen(addr string) {
}
tcpConn := conn.(*net.TCPConn)
s.setTCPOptions(tcpConn)
setTCPOptions(tcpConn)
tc := tls.Server(conn, s.tlsCfg)
tc := tls.Server(conn, tlsCfg)
err = tc.Handshake()
if err != nil {
l.Infoln("TLS handshake:", err)
@@ -233,20 +176,21 @@ func (s *connectionSvc) listen(addr string) {
continue
}
s.conns <- tc
conns <- tc
}
}
func (s *connectionSvc) connect() {
func dialTLS(m *model.Model, conns chan *tls.Conn, tlsCfg *tls.Config) {
delay := time.Second
for {
nextDevice:
for deviceID, deviceCfg := range s.cfg.Devices() {
for deviceID, deviceCfg := range cfg.Devices() {
if deviceID == myID {
continue
}
if s.model.ConnectedTo(deviceID) {
if m.ConnectedTo(deviceID) {
continue
}
@@ -294,9 +238,9 @@ func (s *connectionSvc) connect() {
continue
}
s.setTCPOptions(conn)
setTCPOptions(conn)
tc := tls.Client(conn, s.tlsCfg)
tc := tls.Client(conn, tlsCfg)
err = tc.Handshake()
if err != nil {
l.Infoln("TLS handshake:", err)
@@ -304,20 +248,20 @@ func (s *connectionSvc) connect() {
continue
}
s.conns <- tc
conns <- tc
continue nextDevice
}
}
time.Sleep(delay)
delay *= 2
if maxD := time.Duration(s.cfg.Options().ReconnectIntervalS) * time.Second; delay > maxD {
if maxD := time.Duration(cfg.Options().ReconnectIntervalS) * time.Second; delay > maxD {
delay = maxD
}
}
}
func (*connectionSvc) setTCPOptions(conn *net.TCPConn) {
func setTCPOptions(conn *net.TCPConn) {
var err error
if err = conn.SetLinger(0); err != nil {
l.Infoln(err)
@@ -333,8 +277,8 @@ func (*connectionSvc) setTCPOptions(conn *net.TCPConn) {
}
}
func (s *connectionSvc) shouldLimit(addr net.Addr) bool {
if s.cfg.Options().LimitBandwidthInLan {
func shouldLimit(addr net.Addr) bool {
if cfg.Options().LimitBandwidthInLan {
return true
}
@@ -349,24 +293,3 @@ func (s *connectionSvc) shouldLimit(addr net.Addr) bool {
}
return !tcpaddr.IP.IsLoopback()
}
func (s *connectionSvc) VerifyConfiguration(from, to config.Configuration) error {
return nil
}
func (s *connectionSvc) CommitConfiguration(from, to config.Configuration) bool {
// We require a restart if a device as been removed.
newDevices := make(map[protocol.DeviceID]bool, len(to.Devices))
for _, dev := range to.Devices {
newDevices[dev.DeviceID] = true
}
for _, dev := range from.Devices {
if !newDevices[dev.DeviceID] {
return false
}
}
return true
}

View File

@@ -12,7 +12,5 @@ import (
)
var (
debugNet = strings.Contains(os.Getenv("STTRACE"), "net") || os.Getenv("STTRACE") == "all"
debugHTTP = strings.Contains(os.Getenv("STTRACE"), "http") || os.Getenv("STTRACE") == "all"
debugSuture = strings.Contains(os.Getenv("STTRACE"), "suture") || os.Getenv("STTRACE") == "all"
debugNet = strings.Contains(os.Getenv("STTRACE"), "net") || os.Getenv("STTRACE") == "all"
)

Some files were not shown because too many files have changed in this diff Show More