mirror of
https://github.com/syncthing/syncthing.git
synced 2025-12-23 22:18:14 -05:00
Compare commits
59 Commits
v2.0.13-rc
...
v0.12.25
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4a228697cd | ||
|
|
b4f941784f | ||
|
|
62142c8ccd | ||
|
|
53f00ce02a | ||
|
|
a53ec582a6 | ||
|
|
8b8c48900e | ||
|
|
c0abde3157 | ||
|
|
94c6110f2e | ||
|
|
55e80c3883 | ||
|
|
6d280e7b64 | ||
|
|
e6918a5857 | ||
|
|
dca8245ba4 | ||
|
|
9550817078 | ||
|
|
7a91860735 | ||
|
|
0f4abac8c2 | ||
|
|
b16050b978 | ||
|
|
aced62fec3 | ||
|
|
5155e24bc7 | ||
|
|
3aabe3a51d | ||
|
|
17de015b90 | ||
|
|
f19e71b333 | ||
|
|
8fea354b74 | ||
|
|
7a81c27cc6 | ||
|
|
ea5808d833 | ||
|
|
8b045a826a | ||
|
|
1a6e078510 | ||
|
|
daff8010cd | ||
|
|
4035930e0e | ||
|
|
fc63a384b2 | ||
|
|
2f3449b651 | ||
|
|
31c65a39e4 | ||
|
|
7ba20928c3 | ||
|
|
c482cbbe70 | ||
|
|
4042a3e406 | ||
|
|
392132dc3b | ||
|
|
459a3dc58c | ||
|
|
194a8b0922 | ||
|
|
8a8336ae08 | ||
|
|
458e0b3b8b | ||
|
|
9758dc6422 | ||
|
|
e4a9fb8a27 | ||
|
|
f0473fde17 | ||
|
|
e2980a5210 | ||
|
|
764da14440 | ||
|
|
7da6c627fe | ||
|
|
0a092b5b7f | ||
|
|
9f8af2327d | ||
|
|
345e24142e | ||
|
|
0fdd03ddee | ||
|
|
a0fa288cb6 | ||
|
|
70bac24832 | ||
|
|
1df40fbdeb | ||
|
|
91e9ffff85 | ||
|
|
853df14e2f | ||
|
|
e17a772bb6 | ||
|
|
90e027d9a4 | ||
|
|
fdc9a5d8b0 | ||
|
|
543891a0a0 | ||
|
|
06921443fc |
2
.gitattributes
vendored
2
.gitattributes
vendored
@@ -2,7 +2,7 @@
|
||||
* text=auto
|
||||
|
||||
# Except the dependencies, which we leave alone
|
||||
vendor/** -text=auto
|
||||
Godeps/** -text=auto
|
||||
|
||||
# Diffs on these files are meaningless
|
||||
gui.files.go -diff
|
||||
|
||||
4
AUTHORS
4
AUTHORS
@@ -24,9 +24,7 @@ Chris Howie <me@chrishowie.com>
|
||||
Chris Joel <chris@scriptolo.gy>
|
||||
Colin Kennedy <moshen.colin@gmail.com>
|
||||
Daniel Bergmann <dan.arne.bergmann@gmail.com> <brgmnn@users.noreply.github.com>
|
||||
Daniel Harte <daniel@harte.me> <daniel@danielharte.co.uk> <norgeous@users.noreply.github.com>
|
||||
Daniel Martí <mvdan@mvdan.cc>
|
||||
David Rimmer <dinosore@dbrsoftware.co.uk>
|
||||
Denis A. <denisva@gmail.com>
|
||||
Dennis Wilson <dw@risu.io>
|
||||
Dominik Heidler <dominik@heidler.eu>
|
||||
@@ -49,10 +47,8 @@ Jens Diemer <github.com@jensdiemer.de> <git@jensdiemer.de>
|
||||
Jochen Voss <voss@seehuhn.de>
|
||||
Johan Vromans <jvromans@squirrel.nl>
|
||||
Karol Różycki <rozycki.karol@gmail.com>
|
||||
Kelong Cong <kc04bc@gmx.com> <kc1212@users.noreply.github.com>
|
||||
Ken'ichi Kamada <kamada@nanohz.org>
|
||||
Kevin Allen <kma1660@gmail.com>
|
||||
Lars K.W. Gohlke <lkwg82@gmx.de>
|
||||
Laurent Etiemble <laurent.etiemble@gmail.com> <laurent.etiemble@monobjc.net>
|
||||
Lode Hoste <zillode@zillode.be>
|
||||
Lord Landon Agahnim <lordlandon@gmail.com>
|
||||
|
||||
90
Godeps/Godeps.json
generated
Normal file
90
Godeps/Godeps.json
generated
Normal file
@@ -0,0 +1,90 @@
|
||||
{
|
||||
"ImportPath": "github.com/syncthing/syncthing",
|
||||
"GoVersion": "go1.5.2",
|
||||
"Packages": [
|
||||
"./cmd/..."
|
||||
],
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "github.com/bkaradzic/go-lz4",
|
||||
"Rev": "74ddf82598bc4745b965729e9c6a463bedd33049"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/calmh/du",
|
||||
"Rev": "3c0690cca16228b97741327b1b6781397afbdb24"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/calmh/luhn",
|
||||
"Rev": "0c8388ff95fa92d4094011e5a04fc99dea3d1632"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/calmh/xdr",
|
||||
"Rev": "9eb3e1a622d9364deb39c831f7e5f164393d7e37"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/snappy",
|
||||
"Rev": "723cc1e459b8eea2dea4583200fd60757d40097a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/juju/ratelimit",
|
||||
"Rev": "772f5c38e468398c4511514f4f6aa9a4185bc0a0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/kardianos/osext",
|
||||
"Rev": "29ae4ffbc9a6fe9fb2bc5029050ce6996ea1d3bc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/rcrowley/go-metrics",
|
||||
"Rev": "1ce93efbc8f9c568886b2ef85ce305b2217b3de3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
|
||||
"Rev": "1a9d62f03ea92815b46fcaab357cfd4df264b1a0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/thejerf/suture",
|
||||
"Comment": "v1.0.1",
|
||||
"Rev": "99c1f2d613756768fc4299acd9dc621e11ed3fd7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vitrun/qart/coding",
|
||||
"Rev": "ccb109cf25f0cd24474da73b9fee4e7a3e8a8ce0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vitrun/qart/gf256",
|
||||
"Rev": "ccb109cf25f0cd24474da73b9fee4e7a3e8a8ce0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vitrun/qart/qr",
|
||||
"Rev": "ccb109cf25f0cd24474da73b9fee4e7a3e8a8ce0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/bcrypt",
|
||||
"Rev": "575fdbe86e5dd89229707ebec0575ce7d088a4a6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/blowfish",
|
||||
"Rev": "575fdbe86e5dd89229707ebec0575ce7d088a4a6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/internal/iana",
|
||||
"Rev": "042ba42fa6633b34205efc66ba5719cd3afd8d38"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/ipv6",
|
||||
"Rev": "042ba42fa6633b34205efc66ba5719cd3afd8d38"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/proxy",
|
||||
"Rev": "042ba42fa6633b34205efc66ba5719cd3afd8d38"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/transform",
|
||||
"Rev": "5eb8d4684c4796dd36c74f6452f2c0fa6c79597e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/unicode/norm",
|
||||
"Rev": "5eb8d4684c4796dd36c74f6452f2c0fa6c79597e"
|
||||
}
|
||||
]
|
||||
}
|
||||
5
Godeps/Readme
generated
Normal file
5
Godeps/Readme
generated
Normal file
@@ -0,0 +1,5 @@
|
||||
This directory tree is generated automatically by godep.
|
||||
|
||||
Please do not edit.
|
||||
|
||||
See https://github.com/tools/godep for more information.
|
||||
2
Godeps/_workspace/.gitignore
generated
vendored
Normal file
2
Godeps/_workspace/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
/pkg
|
||||
/bin
|
||||
1
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/.gitignore
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/.gitignore
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
/lz4-example/lz4-example
|
||||
9
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/.travis.yml
generated
vendored
Normal file
9
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.1
|
||||
- 1.2
|
||||
- 1.3
|
||||
- 1.4
|
||||
- 1.5
|
||||
- tip
|
||||
1
Godeps/_workspace/src/github.com/calmh/xdr/.gitignore
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/calmh/xdr/.gitignore
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
coverage.out
|
||||
19
Godeps/_workspace/src/github.com/calmh/xdr/.travis.yml
generated
vendored
Normal file
19
Godeps/_workspace/src/github.com/calmh/xdr/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
language: go
|
||||
go:
|
||||
- tip
|
||||
|
||||
install:
|
||||
- export PATH=$PATH:$HOME/gopath/bin
|
||||
- go get golang.org/x/tools/cover
|
||||
- go get github.com/mattn/goveralls
|
||||
|
||||
script:
|
||||
- ./generate.sh
|
||||
- go test -coverprofile=coverage.out
|
||||
|
||||
after_success:
|
||||
- goveralls -coverprofile=coverage.out -service=travis-ci -package=calmh/xdr -repotoken="$COVERALLS_TOKEN"
|
||||
|
||||
env:
|
||||
global:
|
||||
secure: SmgnrGfp2zLrA44ChRMpjPeujubt9veZ8Fx/OseMWECmacyV5N/TuDhzIbwo6QwV4xB0sBacoPzvxQbJRVjNKsPiSu72UbcQmQ7flN4Tf7nW09tSh1iW8NgrpBCq/3UYLoBu2iPBEBKm93IK0aGNAKs6oEkB0fU27iTVBwiTXOY=
|
||||
@@ -6,5 +6,7 @@ xdr
|
||||
[](http://godoc.org/github.com/calmh/xdr)
|
||||
[](http://opensource.org/licenses/MIT)
|
||||
|
||||
This is an XDR marshalling/unmarshalling library. It uses code generation and
|
||||
not reflection.
|
||||
This is an XDR encoding/decoding library. It uses code generation and
|
||||
not reflection. It supports the IPDR bastardized XDR format when built
|
||||
with `-tags ipdr`.
|
||||
|
||||
@@ -36,73 +36,31 @@ type structInfo struct {
|
||||
Fields []fieldInfo
|
||||
}
|
||||
|
||||
func (i structInfo) SizeExpr() string {
|
||||
var xdrSizes = map[string]int{
|
||||
"int8": 4,
|
||||
"uint8": 4,
|
||||
"int16": 4,
|
||||
"uint16": 4,
|
||||
"int32": 4,
|
||||
"uint32": 4,
|
||||
"int64": 8,
|
||||
"uint64": 8,
|
||||
"int": 8,
|
||||
"bool": 4,
|
||||
}
|
||||
|
||||
var terms []string
|
||||
nl := ""
|
||||
for _, f := range i.Fields {
|
||||
if size := xdrSizes[f.FieldType]; size > 0 {
|
||||
if f.IsSlice {
|
||||
terms = append(terms, nl+"4+len(o."+f.Name+")*"+strconv.Itoa(size))
|
||||
} else {
|
||||
terms = append(terms, strconv.Itoa(size))
|
||||
}
|
||||
} else {
|
||||
switch f.FieldType {
|
||||
case "string", "[]byte":
|
||||
if f.IsSlice {
|
||||
terms = append(terms, nl+"4+xdr.SizeOfSlice(o."+f.Name+")")
|
||||
} else {
|
||||
terms = append(terms, nl+"4+len(o."+f.Name+")+xdr.Padding(len(o."+f.Name+"))")
|
||||
}
|
||||
default:
|
||||
if f.IsSlice {
|
||||
terms = append(terms, nl+"4+xdr.SizeOfSlice(o."+f.Name+")")
|
||||
} else {
|
||||
terms = append(terms, nl+"o."+f.Name+".XDRSize()")
|
||||
}
|
||||
}
|
||||
}
|
||||
nl = "\n"
|
||||
}
|
||||
return strings.Join(terms, "+")
|
||||
}
|
||||
|
||||
var headerData = `// ************************************************************
|
||||
var headerTpl = template.Must(template.New("header").Parse(`// ************************************************************
|
||||
// This file is automatically generated by genxdr. Do not edit.
|
||||
// ************************************************************
|
||||
|
||||
package {{.Package}}
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
|
||||
"github.com/calmh/xdr"
|
||||
)
|
||||
`
|
||||
`))
|
||||
|
||||
var encoderData = `
|
||||
func (o {{.Name}}) XDRSize() int {
|
||||
return {{.SizeExpr}}
|
||||
var encodeTpl = template.Must(template.New("encoder").Parse(`
|
||||
func (o {{.TypeName}}) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.EncodeXDRInto(xw)
|
||||
}//+n
|
||||
|
||||
func (o {{.Name}}) MarshalXDR() ([]byte, error) {
|
||||
buf:= make([]byte, o.XDRSize())
|
||||
m := &xdr.Marshaller{Data: buf}
|
||||
return buf, o.MarshalXDRInto(m)
|
||||
func (o {{.TypeName}}) MarshalXDR() ([]byte, error) {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}//+n
|
||||
|
||||
func (o {{.Name}}) MustMarshalXDR() []byte {
|
||||
func (o {{.TypeName}}) MustMarshalXDR() []byte {
|
||||
bs, err := o.MarshalXDR()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -110,155 +68,141 @@ func (o {{.Name}}) MustMarshalXDR() []byte {
|
||||
return bs
|
||||
}//+n
|
||||
|
||||
func (o {{.Name}}) MarshalXDRInto(m *xdr.Marshaller) error {
|
||||
{{range $fi := .Fields}}
|
||||
{{if $fi.IsSlice}}
|
||||
{{template "marshalSlice" $fi}}
|
||||
{{else}}
|
||||
{{template "marshalValue" $fi}}
|
||||
{{end}}
|
||||
{{end}}
|
||||
return m.Error
|
||||
func (o {{.TypeName}}) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
_, err := o.EncodeXDRInto(xw)
|
||||
return []byte(aw), err
|
||||
}//+n
|
||||
|
||||
{{define "marshalValue"}}
|
||||
{{if ne .Convert ""}}
|
||||
m.Marshal{{.Encoder}}({{.Convert}}(o.{{.Name}}))
|
||||
{{else if .IsBasic}}
|
||||
{{if ge .Max 1}}
|
||||
if l := len(o.{{.Name}}); l > {{.Max}} {
|
||||
return xdr.ElementSizeExceeded("{{.Name}}", l, {{.Max}})
|
||||
func (o {{.TypeName}}) EncodeXDRInto(xw *xdr.Writer) (int, error) {
|
||||
{{range $fieldInfo := .Fields}}
|
||||
{{if not $fieldInfo.IsSlice}}
|
||||
{{if ne $fieldInfo.Convert ""}}
|
||||
xw.Write{{$fieldInfo.Encoder}}({{$fieldInfo.Convert}}(o.{{$fieldInfo.Name}}))
|
||||
{{else if $fieldInfo.IsBasic}}
|
||||
{{if ge $fieldInfo.Max 1}}
|
||||
if l := len(o.{{$fieldInfo.Name}}); l > {{$fieldInfo.Max}} {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("{{$fieldInfo.Name}}", l, {{$fieldInfo.Max}})
|
||||
}
|
||||
{{end}}
|
||||
xw.Write{{$fieldInfo.Encoder}}(o.{{$fieldInfo.Name}})
|
||||
{{else}}
|
||||
_, err := o.{{$fieldInfo.Name}}.EncodeXDRInto(xw)
|
||||
if err != nil {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
{{end}}
|
||||
{{else}}
|
||||
{{if ge $fieldInfo.Max 1}}
|
||||
if l := len(o.{{$fieldInfo.Name}}); l > {{$fieldInfo.Max}} {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("{{$fieldInfo.Name}}", l, {{$fieldInfo.Max}})
|
||||
}
|
||||
{{end}}
|
||||
xw.WriteUint32(uint32(len(o.{{$fieldInfo.Name}})))
|
||||
for i := range o.{{$fieldInfo.Name}} {
|
||||
{{if ne $fieldInfo.Convert ""}}
|
||||
xw.Write{{$fieldInfo.Encoder}}({{$fieldInfo.Convert}}(o.{{$fieldInfo.Name}}[i]))
|
||||
{{else if $fieldInfo.IsBasic}}
|
||||
xw.Write{{$fieldInfo.Encoder}}(o.{{$fieldInfo.Name}}[i])
|
||||
{{else}}
|
||||
_, err := o.{{$fieldInfo.Name}}[i].EncodeXDRInto(xw)
|
||||
if err != nil {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
{{end}}
|
||||
}
|
||||
{{end}}
|
||||
m.Marshal{{.Encoder}}(o.{{.Name}})
|
||||
{{else}}
|
||||
if err := o.{{.Name}}.MarshalXDRInto(m); err != nil {
|
||||
return err
|
||||
}
|
||||
{{end}}
|
||||
{{end}}
|
||||
|
||||
{{define "marshalSlice"}}
|
||||
{{if ge .Max 1}}
|
||||
if l := len(o.{{.Name}}); l > {{.Max}} {
|
||||
return xdr.ElementSizeExceeded("{{.Name}}", l, {{.Max}})
|
||||
}
|
||||
{{end}}
|
||||
|
||||
m.MarshalUint32(uint32(len(o.{{.Name}})))
|
||||
for i := range o.{{.Name}} {
|
||||
{{if ne .Convert ""}}
|
||||
m.Marshal{{.Encoder}}({{.Convert}}(o.{{.Name}}[i]))
|
||||
{{else if .IsBasic}}
|
||||
m.Marshal{{.Encoder}}(o.{{.Name}}[i])
|
||||
{{else}}
|
||||
if err := o.{{.Name}}[i].MarshalXDRInto(m); err != nil {
|
||||
return err
|
||||
}
|
||||
{{end}}
|
||||
}
|
||||
{{end}}
|
||||
|
||||
func (o *{{.Name}}) UnmarshalXDR(bs []byte) error {
|
||||
u := &xdr.Unmarshaller{Data: bs}
|
||||
return o.UnmarshalXDRFrom(u)
|
||||
}
|
||||
|
||||
func (o *{{.Name}}) UnmarshalXDRFrom(u *xdr.Unmarshaller) error {
|
||||
{{range $fi := .Fields}}
|
||||
{{if $fi.IsSlice}}
|
||||
{{template "unmarshalSlice" $fi}}
|
||||
{{else}}
|
||||
{{template "unmarshalValue" $fi}}
|
||||
{{end}}
|
||||
{{end}}
|
||||
return u.Error
|
||||
return xw.Tot(), xw.Error()
|
||||
}//+n
|
||||
|
||||
{{define "unmarshalValue"}}
|
||||
{{if ne .Convert ""}}
|
||||
o.{{.Name}} = {{.FieldType}}(u.Unmarshal{{.Encoder}}())
|
||||
{{else if .IsBasic}}
|
||||
{{if ge .Max 1}}
|
||||
o.{{.Name}} = u.Unmarshal{{.Encoder}}Max({{.Max}})
|
||||
{{else}}
|
||||
o.{{.Name}} = u.Unmarshal{{.Encoder}}()
|
||||
{{end}}
|
||||
{{else}}
|
||||
(&o.{{.Name}}).UnmarshalXDRFrom(u)
|
||||
{{end}}
|
||||
{{end}}
|
||||
func (o *{{.TypeName}}) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}//+n
|
||||
|
||||
{{define "unmarshalSlice"}}
|
||||
_{{.Name}}Size := int(u.UnmarshalUint32())
|
||||
if _{{.Name}}Size < 0 {
|
||||
return xdr.ElementSizeExceeded("{{.Name}}", _{{.Name}}Size, {{.Max}})
|
||||
} else if _{{.Name}}Size == 0 {
|
||||
o.{{.Name}} = nil
|
||||
} else {
|
||||
{{if ge .Max 1}}
|
||||
if _{{.Name}}Size > {{.Max}} {
|
||||
return xdr.ElementSizeExceeded("{{.Name}}", _{{.Name}}Size, {{.Max}})
|
||||
}
|
||||
{{end}}
|
||||
if _{{.Name}}Size <= len(o.{{.Name}}) {
|
||||
{{if eq .FieldType "string"}}
|
||||
for i := _{{.Name}}Size; i < len(o.{{.Name}}); i++ { o.{{.Name}}[i] = "" }
|
||||
{{end}}
|
||||
{{if eq .FieldType "[]byte"}}
|
||||
for i := _{{.Name}}Size; i < len(o.{{.Name}}); i++ { o.{{.Name}}[i] = nil }
|
||||
{{end}}
|
||||
o.{{.Name}} = o.{{.Name}}[:_{{.Name}}Size]
|
||||
} else {
|
||||
o.{{.Name}} = make([]{{.FieldType}}, _{{.Name}}Size)
|
||||
}
|
||||
for i := range o.{{.Name}} {
|
||||
{{if ne .Convert ""}}
|
||||
o.{{.Name}}[i] = {{.FieldType}}(u.Unmarshal{{.Encoder}}())
|
||||
{{else if .IsBasic}}
|
||||
{{if ge .Submax 1}}
|
||||
o.{{.Name}}[i] = u.Unmarshal{{.Encoder}}Max({{.Submax}})
|
||||
func (o *{{.TypeName}}) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}//+n
|
||||
|
||||
func (o *{{.TypeName}}) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
{{range $fieldInfo := .Fields}}
|
||||
{{if not $fieldInfo.IsSlice}}
|
||||
{{if ne $fieldInfo.Convert ""}}
|
||||
o.{{$fieldInfo.Name}} = {{$fieldInfo.FieldType}}(xr.Read{{$fieldInfo.Encoder}}())
|
||||
{{else if $fieldInfo.IsBasic}}
|
||||
{{if ge $fieldInfo.Max 1}}
|
||||
o.{{$fieldInfo.Name}} = xr.Read{{$fieldInfo.Encoder}}Max({{$fieldInfo.Max}})
|
||||
{{else}}
|
||||
o.{{.Name}}[i] = u.Unmarshal{{.Encoder}}()
|
||||
o.{{$fieldInfo.Name}} = xr.Read{{$fieldInfo.Encoder}}()
|
||||
{{end}}
|
||||
{{else}}
|
||||
(&o.{{.Name}}[i]).UnmarshalXDRFrom(u)
|
||||
(&o.{{$fieldInfo.Name}}).DecodeXDRFrom(xr)
|
||||
{{end}}
|
||||
}
|
||||
}
|
||||
{{end}}
|
||||
`
|
||||
|
||||
var (
|
||||
encodeTpl = template.Must(template.New("encoder").Parse(encoderData))
|
||||
headerTpl = template.Must(template.New("header").Parse(headerData))
|
||||
)
|
||||
{{else}}
|
||||
_{{$fieldInfo.Name}}Size := int(xr.ReadUint32())
|
||||
if _{{$fieldInfo.Name}}Size < 0 {
|
||||
return xdr.ElementSizeExceeded("{{$fieldInfo.Name}}", _{{$fieldInfo.Name}}Size, {{$fieldInfo.Max}})
|
||||
}
|
||||
{{if ge $fieldInfo.Max 1}}
|
||||
if _{{$fieldInfo.Name}}Size > {{$fieldInfo.Max}} {
|
||||
return xdr.ElementSizeExceeded("{{$fieldInfo.Name}}", _{{$fieldInfo.Name}}Size, {{$fieldInfo.Max}})
|
||||
}
|
||||
{{end}}
|
||||
o.{{$fieldInfo.Name}} = make([]{{$fieldInfo.FieldType}}, _{{$fieldInfo.Name}}Size)
|
||||
for i := range o.{{$fieldInfo.Name}} {
|
||||
{{if ne $fieldInfo.Convert ""}}
|
||||
o.{{$fieldInfo.Name}}[i] = {{$fieldInfo.FieldType}}(xr.Read{{$fieldInfo.Encoder}}())
|
||||
{{else if $fieldInfo.IsBasic}}
|
||||
{{if ge $fieldInfo.Submax 1}}
|
||||
o.{{$fieldInfo.Name}}[i] = xr.Read{{$fieldInfo.Encoder}}Max({{$fieldInfo.Submax}})
|
||||
{{else}}
|
||||
o.{{$fieldInfo.Name}}[i] = xr.Read{{$fieldInfo.Encoder}}()
|
||||
{{end}}
|
||||
{{else}}
|
||||
(&o.{{$fieldInfo.Name}}[i]).DecodeXDRFrom(xr)
|
||||
{{end}}
|
||||
}
|
||||
{{end}}
|
||||
{{end}}
|
||||
return xr.Error()
|
||||
}`))
|
||||
|
||||
var emptyTypeTpl = template.Must(template.New("encoder").Parse(`
|
||||
func (o {{.Name}}) XDRSize() int {
|
||||
return 0
|
||||
}
|
||||
func (o {{.TypeName}}) EncodeXDR(w io.Writer) (int, error) {
|
||||
return 0, nil
|
||||
}//+n
|
||||
|
||||
func (o {{.Name}}) MarshalXDR() ([]byte, error) {
|
||||
func (o {{.TypeName}}) MarshalXDR() ([]byte, error) {
|
||||
return nil, nil
|
||||
}//+n
|
||||
|
||||
func (o {{.Name}}) MustMarshalXDR() []byte {
|
||||
func (o {{.TypeName}}) MustMarshalXDR() []byte {
|
||||
return nil
|
||||
}//+n
|
||||
|
||||
func (o {{.Name}}) MarshalXDRInto(m *xdr.Marshaller) error {
|
||||
func (o {{.TypeName}}) AppendXDR(bs []byte) ([]byte, error) {
|
||||
return bs, nil
|
||||
}//+n
|
||||
|
||||
func (o {{.TypeName}}) EncodeXDRInto(xw *xdr.Writer) (int, error) {
|
||||
return xw.Tot(), xw.Error()
|
||||
}//+n
|
||||
|
||||
func (o *{{.TypeName}}) DecodeXDR(r io.Reader) error {
|
||||
return nil
|
||||
}//+n
|
||||
|
||||
func (o *{{.Name}}) UnmarshalXDR(bs []byte) error {
|
||||
func (o *{{.TypeName}}) UnmarshalXDR(bs []byte) error {
|
||||
return nil
|
||||
}//+n
|
||||
|
||||
func (o *{{.Name}}) UnmarshalXDRFrom(u *xdr.Unmarshaller) error {
|
||||
return nil
|
||||
}//+n
|
||||
`))
|
||||
func (o *{{.TypeName}}) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
return xr.Error()
|
||||
}`))
|
||||
|
||||
var maxRe = regexp.MustCompile(`(?:\Wmax:)(\d+)(?:\s*,\s*(\d+))?`)
|
||||
|
||||
@@ -342,7 +286,7 @@ func handleStruct(t *ast.StructType) []fieldInfo {
|
||||
f = fieldInfo{
|
||||
Name: fn,
|
||||
IsBasic: true,
|
||||
FieldType: "[]" + tn,
|
||||
FieldType: tn,
|
||||
Encoder: enc.Encoder,
|
||||
Convert: enc.Type,
|
||||
Max: max1,
|
||||
@@ -385,14 +329,17 @@ func handleStruct(t *ast.StructType) []fieldInfo {
|
||||
}
|
||||
|
||||
func generateCode(output io.Writer, s structInfo) {
|
||||
name := s.Name
|
||||
fs := s.Fields
|
||||
|
||||
var buf bytes.Buffer
|
||||
var err error
|
||||
if len(s.Fields) == 0 {
|
||||
if len(fs) == 0 {
|
||||
// This is an empty type. We can create a quite simple codec for it.
|
||||
err = emptyTypeTpl.Execute(&buf, s)
|
||||
err = emptyTypeTpl.Execute(&buf, map[string]interface{}{"TypeName": name})
|
||||
} else {
|
||||
// Generate with the default template.
|
||||
err = encodeTpl.Execute(&buf, s)
|
||||
err = encodeTpl.Execute(&buf, map[string]interface{}{"TypeName": name, "Fields": fs})
|
||||
}
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -400,7 +347,12 @@ func generateCode(output io.Writer, s structInfo) {
|
||||
|
||||
bs := regexp.MustCompile(`(\s*\n)+`).ReplaceAll(buf.Bytes(), []byte("\n"))
|
||||
bs = bytes.Replace(bs, []byte("//+n"), []byte("\n"), -1)
|
||||
output.Write(bs)
|
||||
|
||||
bs, err = format.Source(bs)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Fprintln(output, string(bs))
|
||||
}
|
||||
|
||||
func uncamelize(s string) string {
|
||||
@@ -432,46 +384,46 @@ func generateDiagram(output io.Writer, s structInfo) {
|
||||
tn := f.FieldType
|
||||
name := uncamelize(f.Name)
|
||||
|
||||
suffix := ""
|
||||
if f.IsSlice {
|
||||
fmt.Fprintf(output, "| %s |\n", center("Number of "+name, 61))
|
||||
fmt.Fprintln(output, line)
|
||||
suffix = " (n items)"
|
||||
fmt.Fprintf(output, "/ %s /\n", center("", 61))
|
||||
}
|
||||
switch tn {
|
||||
case "bool":
|
||||
fmt.Fprintf(output, "| %s |V|\n", center(name+" (V=0 or 1)", 59))
|
||||
fmt.Fprintln(output, line)
|
||||
case "int16", "uint16":
|
||||
fmt.Fprintf(output, "| %s | %s |\n", center("16 zero bits", 29), center(name, 29))
|
||||
case "int8", "uint8":
|
||||
fmt.Fprintf(output, "| %s | %s |\n", center("24 zero bits", 45), center(name, 13))
|
||||
fmt.Fprintf(output, "| %s | %s |\n", center("0x0000", 29), center(name, 29))
|
||||
fmt.Fprintln(output, line)
|
||||
case "int32", "uint32":
|
||||
fmt.Fprintf(output, "| %s |\n", center(name+suffix, 61))
|
||||
fmt.Fprintf(output, "| %s |\n", center(name, 61))
|
||||
fmt.Fprintln(output, line)
|
||||
case "int64", "uint64":
|
||||
fmt.Fprintf(output, "| %-61s |\n", "")
|
||||
fmt.Fprintf(output, "+ %s +\n", center(name+" (64 bits)", 61))
|
||||
fmt.Fprintf(output, "| %-61s |\n", "")
|
||||
case "string", "[]byte":
|
||||
fmt.Fprintln(output, line)
|
||||
case "string", "byte": // XXX We assume slice of byte!
|
||||
fmt.Fprintf(output, "| %s |\n", center("Length of "+name, 61))
|
||||
fmt.Fprintln(output, line)
|
||||
fmt.Fprintf(output, "/ %61s /\n", "")
|
||||
fmt.Fprintf(output, "\\ %s \\\n", center(name+" (length + padded data)", 61))
|
||||
fmt.Fprintf(output, "\\ %s \\\n", center(name+" (variable length)", 61))
|
||||
fmt.Fprintf(output, "/ %61s /\n", "")
|
||||
fmt.Fprintln(output, line)
|
||||
default:
|
||||
if f.IsSlice {
|
||||
tn = "Zero or more " + tn + " Structures"
|
||||
fmt.Fprintf(output, "/ %s /\n", center("", 61))
|
||||
fmt.Fprintf(output, "\\ %s \\\n", center(tn, 61))
|
||||
fmt.Fprintf(output, "/ %s /\n", center("", 61))
|
||||
} else {
|
||||
tn = tn + " Structure"
|
||||
fmt.Fprintf(output, "/ %s /\n", center("", 61))
|
||||
fmt.Fprintf(output, "\\ %s \\\n", center(tn, 61))
|
||||
fmt.Fprintf(output, "/ %s /\n", center("", 61))
|
||||
}
|
||||
fmt.Fprintln(output, line)
|
||||
}
|
||||
|
||||
if f.IsSlice {
|
||||
fmt.Fprintf(output, "/ %s /\n", center("", 61))
|
||||
}
|
||||
fmt.Fprintln(output, line)
|
||||
}
|
||||
fmt.Fprintln(output)
|
||||
fmt.Fprintln(output)
|
||||
@@ -496,9 +448,9 @@ func generateXdr(output io.Writer, s structInfo) {
|
||||
}
|
||||
|
||||
switch tn {
|
||||
case "int8", "int16", "int32":
|
||||
case "int16", "int32":
|
||||
fmt.Fprintf(output, "\tint %s%s;\n", fn, suf)
|
||||
case "uint8", "uint16", "uint32":
|
||||
case "uint16", "uint32":
|
||||
fmt.Fprintf(output, "\tunsigned int %s%s;\n", fn, suf)
|
||||
case "int64":
|
||||
fmt.Fprintf(output, "\thyper %s%s;\n", fn, suf)
|
||||
@@ -506,7 +458,7 @@ func generateXdr(output io.Writer, s structInfo) {
|
||||
fmt.Fprintf(output, "\tunsigned hyper %s%s;\n", fn, suf)
|
||||
case "string":
|
||||
fmt.Fprintf(output, "\tstring %s<%s>;\n", fn, l)
|
||||
case "[]byte":
|
||||
case "byte":
|
||||
fmt.Fprintf(output, "\topaque %s<%s>;\n", fn, l)
|
||||
default:
|
||||
fmt.Fprintf(output, "\t%s %s%s;\n", tn, fn, suf)
|
||||
@@ -558,22 +510,6 @@ func main() {
|
||||
i := inspector(&structs)
|
||||
ast.Inspect(f, i)
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
headerTpl.Execute(buf, map[string]string{"Package": f.Name.Name})
|
||||
for _, s := range structs {
|
||||
fmt.Fprintf(buf, "\n/*\n\n")
|
||||
generateDiagram(buf, s)
|
||||
generateXdr(buf, s)
|
||||
fmt.Fprintf(buf, "*/\n")
|
||||
generateCode(buf, s)
|
||||
}
|
||||
|
||||
bs, err := format.Source(buf.Bytes())
|
||||
if err != nil {
|
||||
log.Print(buf.String())
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
var output io.Writer = os.Stdout
|
||||
if *outputFile != "" {
|
||||
fd, err := os.Create(*outputFile)
|
||||
@@ -582,5 +518,13 @@ func main() {
|
||||
}
|
||||
output = fd
|
||||
}
|
||||
output.Write(bs)
|
||||
|
||||
headerTpl.Execute(output, map[string]string{"Package": f.Name.Name})
|
||||
for _, s := range structs {
|
||||
fmt.Fprintf(output, "\n/*\n\n")
|
||||
generateDiagram(output, s)
|
||||
generateXdr(output, s)
|
||||
fmt.Fprintf(output, "*/\n")
|
||||
generateCode(output, s)
|
||||
}
|
||||
}
|
||||
16
Godeps/_workspace/src/github.com/calmh/xdr/debug.go
generated
vendored
Normal file
16
Godeps/_workspace/src/github.com/calmh/xdr/debug.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
package xdr
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
var (
|
||||
debug = len(os.Getenv("XDRTRACE")) > 0
|
||||
dl = log.New(os.Stdout, "xdr: ", log.Lshortfile|log.Ltime|log.Lmicroseconds)
|
||||
)
|
||||
|
||||
const maxDebugBytes = 32
|
||||
@@ -1,5 +1,5 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
// Package xdr implements an XDR (RFC 4506) marshaller/unmarshaller.
|
||||
// Package xdr implements an XDR (RFC 4506) encoder/decoder.
|
||||
package xdr
|
||||
10
Godeps/_workspace/src/github.com/calmh/xdr/pad_ipdr.go
generated
vendored
Normal file
10
Godeps/_workspace/src/github.com/calmh/xdr/pad_ipdr.go
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
// +build ipdr
|
||||
|
||||
package xdr
|
||||
|
||||
func pad(l int) int {
|
||||
return 0
|
||||
}
|
||||
14
Godeps/_workspace/src/github.com/calmh/xdr/pad_xdr.go
generated
vendored
Normal file
14
Godeps/_workspace/src/github.com/calmh/xdr/pad_xdr.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
// +build !ipdr
|
||||
|
||||
package xdr
|
||||
|
||||
func pad(l int) int {
|
||||
d := l % 4
|
||||
if d == 0 {
|
||||
return 0
|
||||
}
|
||||
return 4 - d
|
||||
}
|
||||
171
Godeps/_workspace/src/github.com/calmh/xdr/reader.go
generated
vendored
Normal file
171
Godeps/_workspace/src/github.com/calmh/xdr/reader.go
generated
vendored
Normal file
@@ -0,0 +1,171 @@
|
||||
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
// All rights reserved. Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package xdr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type Reader struct {
|
||||
r io.Reader
|
||||
err error
|
||||
b [8]byte
|
||||
}
|
||||
|
||||
func NewReader(r io.Reader) *Reader {
|
||||
return &Reader{
|
||||
r: r,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reader) ReadRaw(bs []byte) (int, error) {
|
||||
if r.err != nil {
|
||||
return 0, r.err
|
||||
}
|
||||
|
||||
var n int
|
||||
n, r.err = io.ReadFull(r.r, bs)
|
||||
return n, r.err
|
||||
}
|
||||
|
||||
func (r *Reader) ReadString() string {
|
||||
return r.ReadStringMax(0)
|
||||
}
|
||||
|
||||
func (r *Reader) ReadStringMax(max int) string {
|
||||
buf := r.ReadBytesMaxInto(max, nil)
|
||||
bh := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
|
||||
sh := reflect.StringHeader{
|
||||
Data: bh.Data,
|
||||
Len: bh.Len,
|
||||
}
|
||||
return *((*string)(unsafe.Pointer(&sh)))
|
||||
}
|
||||
|
||||
func (r *Reader) ReadBytes() []byte {
|
||||
return r.ReadBytesInto(nil)
|
||||
}
|
||||
|
||||
func (r *Reader) ReadBytesMax(max int) []byte {
|
||||
return r.ReadBytesMaxInto(max, nil)
|
||||
}
|
||||
|
||||
func (r *Reader) ReadBytesInto(dst []byte) []byte {
|
||||
return r.ReadBytesMaxInto(0, dst)
|
||||
}
|
||||
|
||||
func (r *Reader) ReadBytesMaxInto(max int, dst []byte) []byte {
|
||||
if r.err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
l := int(r.ReadUint32())
|
||||
if r.err != nil {
|
||||
return nil
|
||||
}
|
||||
if l < 0 || max > 0 && l > max {
|
||||
// l may be negative on 32 bit builds
|
||||
r.err = ElementSizeExceeded("bytes field", l, max)
|
||||
return nil
|
||||
}
|
||||
|
||||
if fullLen := l + pad(l); fullLen > len(dst) {
|
||||
dst = make([]byte, fullLen)
|
||||
} else {
|
||||
dst = dst[:fullLen]
|
||||
}
|
||||
|
||||
var n int
|
||||
n, r.err = io.ReadFull(r.r, dst)
|
||||
if r.err != nil {
|
||||
if debug {
|
||||
dl.Printf("rd bytes (%d): %v", len(dst), r.err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if debug {
|
||||
if n > maxDebugBytes {
|
||||
dl.Printf("rd bytes (%d): %x...", len(dst), dst[:maxDebugBytes])
|
||||
} else {
|
||||
dl.Printf("rd bytes (%d): %x", len(dst), dst)
|
||||
}
|
||||
}
|
||||
return dst[:l]
|
||||
}
|
||||
|
||||
func (r *Reader) ReadBool() bool {
|
||||
return r.ReadUint8() != 0
|
||||
}
|
||||
|
||||
func (r *Reader) ReadUint32() uint32 {
|
||||
if r.err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
_, r.err = io.ReadFull(r.r, r.b[:4])
|
||||
if r.err != nil {
|
||||
if debug {
|
||||
dl.Printf("rd uint32: %v", r.err)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
v := uint32(r.b[3]) | uint32(r.b[2])<<8 | uint32(r.b[1])<<16 | uint32(r.b[0])<<24
|
||||
|
||||
if debug {
|
||||
dl.Printf("rd uint32=%d (0x%08x)", v, v)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func (r *Reader) ReadUint64() uint64 {
|
||||
if r.err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
_, r.err = io.ReadFull(r.r, r.b[:8])
|
||||
if r.err != nil {
|
||||
if debug {
|
||||
dl.Printf("rd uint64: %v", r.err)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
v := uint64(r.b[7]) | uint64(r.b[6])<<8 | uint64(r.b[5])<<16 | uint64(r.b[4])<<24 |
|
||||
uint64(r.b[3])<<32 | uint64(r.b[2])<<40 | uint64(r.b[1])<<48 | uint64(r.b[0])<<56
|
||||
|
||||
if debug {
|
||||
dl.Printf("rd uint64=%d (0x%016x)", v, v)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
type XDRError struct {
|
||||
op string
|
||||
err error
|
||||
}
|
||||
|
||||
func (e XDRError) Error() string {
|
||||
return "xdr " + e.op + ": " + e.err.Error()
|
||||
}
|
||||
|
||||
func (e XDRError) IsEOF() bool {
|
||||
return e.err == io.EOF
|
||||
}
|
||||
|
||||
func (r *Reader) Error() error {
|
||||
if r.err == nil {
|
||||
return nil
|
||||
}
|
||||
return XDRError{"read", r.err}
|
||||
}
|
||||
|
||||
func ElementSizeExceeded(field string, size, limit int) error {
|
||||
return fmt.Errorf("%s exceeds size limit; %d > %d", field, size, limit)
|
||||
}
|
||||
49
Godeps/_workspace/src/github.com/calmh/xdr/reader_ipdr.go
generated
vendored
Normal file
49
Godeps/_workspace/src/github.com/calmh/xdr/reader_ipdr.go
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
// All rights reserved. Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ipdr
|
||||
|
||||
package xdr
|
||||
|
||||
import "io"
|
||||
|
||||
func (r *Reader) ReadUint8() uint8 {
|
||||
if r.err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
_, r.err = io.ReadFull(r.r, r.b[:1])
|
||||
if r.err != nil {
|
||||
if debug {
|
||||
dl.Printf("rd uint8: %v", r.err)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
if debug {
|
||||
dl.Printf("rd uint8=%d (0x%02x)", r.b[0], r.b[0])
|
||||
}
|
||||
return r.b[0]
|
||||
}
|
||||
|
||||
func (r *Reader) ReadUint16() uint16 {
|
||||
if r.err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
_, r.err = io.ReadFull(r.r, r.b[:2])
|
||||
if r.err != nil {
|
||||
if debug {
|
||||
dl.Printf("rd uint16: %v", r.err)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
v := uint16(r.b[1]) | uint16(r.b[0])<<8
|
||||
|
||||
if debug {
|
||||
dl.Printf("rd uint16=%d (0x%04x)", v, v)
|
||||
}
|
||||
return v
|
||||
}
|
||||
15
Godeps/_workspace/src/github.com/calmh/xdr/reader_xdr.go
generated
vendored
Normal file
15
Godeps/_workspace/src/github.com/calmh/xdr/reader_xdr.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
// All rights reserved. Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !ipdr
|
||||
|
||||
package xdr
|
||||
|
||||
func (r *Reader) ReadUint8() uint8 {
|
||||
return uint8(r.ReadUint32())
|
||||
}
|
||||
|
||||
func (r *Reader) ReadUint16() uint16 {
|
||||
return uint16(r.ReadUint32())
|
||||
}
|
||||
146
Godeps/_workspace/src/github.com/calmh/xdr/writer.go
generated
vendored
Normal file
146
Godeps/_workspace/src/github.com/calmh/xdr/writer.go
generated
vendored
Normal file
@@ -0,0 +1,146 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
package xdr
|
||||
|
||||
import (
|
||||
"io"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var padBytes = []byte{0, 0, 0}
|
||||
|
||||
type Writer struct {
|
||||
w io.Writer
|
||||
tot int
|
||||
err error
|
||||
b [8]byte
|
||||
}
|
||||
|
||||
type AppendWriter []byte
|
||||
|
||||
func (w *AppendWriter) Write(bs []byte) (int, error) {
|
||||
*w = append(*w, bs...)
|
||||
return len(bs), nil
|
||||
}
|
||||
|
||||
func NewWriter(w io.Writer) *Writer {
|
||||
return &Writer{
|
||||
w: w,
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Writer) WriteRaw(bs []byte) (int, error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
var n int
|
||||
n, w.err = w.w.Write(bs)
|
||||
return n, w.err
|
||||
}
|
||||
|
||||
func (w *Writer) WriteString(s string) (int, error) {
|
||||
sh := *((*reflect.StringHeader)(unsafe.Pointer(&s)))
|
||||
bh := reflect.SliceHeader{
|
||||
Data: sh.Data,
|
||||
Len: sh.Len,
|
||||
Cap: sh.Len,
|
||||
}
|
||||
return w.WriteBytes(*(*[]byte)(unsafe.Pointer(&bh)))
|
||||
}
|
||||
|
||||
func (w *Writer) WriteBytes(bs []byte) (int, error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
w.WriteUint32(uint32(len(bs)))
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
if debug {
|
||||
if len(bs) > maxDebugBytes {
|
||||
dl.Printf("wr bytes (%d): %x...", len(bs), bs[:maxDebugBytes])
|
||||
} else {
|
||||
dl.Printf("wr bytes (%d): %x", len(bs), bs)
|
||||
}
|
||||
}
|
||||
|
||||
var l, n int
|
||||
n, w.err = w.w.Write(bs)
|
||||
l += n
|
||||
|
||||
if p := pad(len(bs)); w.err == nil && p > 0 {
|
||||
n, w.err = w.w.Write(padBytes[:p])
|
||||
l += n
|
||||
}
|
||||
|
||||
w.tot += l
|
||||
return l, w.err
|
||||
}
|
||||
|
||||
func (w *Writer) WriteBool(v bool) (int, error) {
|
||||
if v {
|
||||
return w.WriteUint8(1)
|
||||
} else {
|
||||
return w.WriteUint8(0)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Writer) WriteUint32(v uint32) (int, error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
if debug {
|
||||
dl.Printf("wr uint32=%d", v)
|
||||
}
|
||||
|
||||
w.b[0] = byte(v >> 24)
|
||||
w.b[1] = byte(v >> 16)
|
||||
w.b[2] = byte(v >> 8)
|
||||
w.b[3] = byte(v)
|
||||
|
||||
var l int
|
||||
l, w.err = w.w.Write(w.b[:4])
|
||||
w.tot += l
|
||||
return l, w.err
|
||||
}
|
||||
|
||||
func (w *Writer) WriteUint64(v uint64) (int, error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
if debug {
|
||||
dl.Printf("wr uint64=%d", v)
|
||||
}
|
||||
|
||||
w.b[0] = byte(v >> 56)
|
||||
w.b[1] = byte(v >> 48)
|
||||
w.b[2] = byte(v >> 40)
|
||||
w.b[3] = byte(v >> 32)
|
||||
w.b[4] = byte(v >> 24)
|
||||
w.b[5] = byte(v >> 16)
|
||||
w.b[6] = byte(v >> 8)
|
||||
w.b[7] = byte(v)
|
||||
|
||||
var l int
|
||||
l, w.err = w.w.Write(w.b[:8])
|
||||
w.tot += l
|
||||
return l, w.err
|
||||
}
|
||||
|
||||
func (w *Writer) Tot() int {
|
||||
return w.tot
|
||||
}
|
||||
|
||||
func (w *Writer) Error() error {
|
||||
if w.err == nil {
|
||||
return nil
|
||||
}
|
||||
return XDRError{"write", w.err}
|
||||
}
|
||||
41
Godeps/_workspace/src/github.com/calmh/xdr/writer_ipdr.go
generated
vendored
Normal file
41
Godeps/_workspace/src/github.com/calmh/xdr/writer_ipdr.go
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
// +build ipdr
|
||||
|
||||
package xdr
|
||||
|
||||
func (w *Writer) WriteUint8(v uint8) (int, error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
if debug {
|
||||
dl.Printf("wr uint8=%d", v)
|
||||
}
|
||||
|
||||
w.b[0] = byte(v)
|
||||
|
||||
var l int
|
||||
l, w.err = w.w.Write(w.b[:1])
|
||||
w.tot += l
|
||||
return l, w.err
|
||||
}
|
||||
|
||||
func (w *Writer) WriteUint16(v uint16) (int, error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
if debug {
|
||||
dl.Printf("wr uint8=%d", v)
|
||||
}
|
||||
|
||||
w.b[0] = byte(v >> 8)
|
||||
w.b[1] = byte(v)
|
||||
|
||||
var l int
|
||||
l, w.err = w.w.Write(w.b[:2])
|
||||
w.tot += l
|
||||
return l, w.err
|
||||
}
|
||||
14
Godeps/_workspace/src/github.com/calmh/xdr/writer_xdr.go
generated
vendored
Normal file
14
Godeps/_workspace/src/github.com/calmh/xdr/writer_xdr.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
// +build !ipdr
|
||||
|
||||
package xdr
|
||||
|
||||
func (w *Writer) WriteUint8(v uint8) (int, error) {
|
||||
return w.WriteUint32(uint32(v))
|
||||
}
|
||||
|
||||
func (w *Writer) WriteUint16(v uint16) (int, error) {
|
||||
return w.WriteUint32(uint32(v))
|
||||
}
|
||||
@@ -11,5 +11,4 @@
|
||||
Damian Gryski <dgryski@gmail.com>
|
||||
Google Inc.
|
||||
Jan Mercl <0xjnml@gmail.com>
|
||||
Rodolfo Carvalho <rhcarvalho@gmail.com>
|
||||
Sebastien Binet <seb.binet@gmail.com>
|
||||
@@ -32,6 +32,5 @@ Kai Backman <kaib@golang.org>
|
||||
Marc-Antoine Ruel <maruel@chromium.org>
|
||||
Nigel Tao <nigeltao@golang.org>
|
||||
Rob Pike <r@golang.org>
|
||||
Rodolfo Carvalho <rhcarvalho@gmail.com>
|
||||
Russ Cox <rsc@golang.org>
|
||||
Sebastien Binet <seb.binet@gmail.com>
|
||||
@@ -17,9 +17,6 @@ var (
|
||||
ErrTooLarge = errors.New("snappy: decoded block is too large")
|
||||
// ErrUnsupported reports that the input isn't supported.
|
||||
ErrUnsupported = errors.New("snappy: unsupported input")
|
||||
|
||||
errUnsupportedCopy4Tag = errors.New("snappy: unsupported COPY_4 tag")
|
||||
errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length")
|
||||
)
|
||||
|
||||
// DecodedLen returns the length of the decoded block.
|
||||
@@ -43,36 +40,96 @@ func decodedLen(src []byte) (blockLen, headerLen int, err error) {
|
||||
return int(v), n, nil
|
||||
}
|
||||
|
||||
const (
|
||||
decodeErrCodeCorrupt = 1
|
||||
decodeErrCodeUnsupportedLiteralLength = 2
|
||||
decodeErrCodeUnsupportedCopy4Tag = 3
|
||||
)
|
||||
|
||||
// Decode returns the decoded form of src. The returned slice may be a sub-
|
||||
// slice of dst if dst was large enough to hold the entire decoded block.
|
||||
// Otherwise, a newly allocated slice will be returned.
|
||||
//
|
||||
// The dst and src must not overlap. It is valid to pass a nil dst.
|
||||
// It is valid to pass a nil dst.
|
||||
func Decode(dst, src []byte) ([]byte, error) {
|
||||
dLen, s, err := decodedLen(src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if dLen <= len(dst) {
|
||||
dst = dst[:dLen]
|
||||
} else {
|
||||
if len(dst) < dLen {
|
||||
dst = make([]byte, dLen)
|
||||
}
|
||||
switch decode(dst, src[s:]) {
|
||||
case 0:
|
||||
return dst, nil
|
||||
case decodeErrCodeUnsupportedLiteralLength:
|
||||
return nil, errUnsupportedLiteralLength
|
||||
case decodeErrCodeUnsupportedCopy4Tag:
|
||||
return nil, errUnsupportedCopy4Tag
|
||||
|
||||
var d, offset, length int
|
||||
for s < len(src) {
|
||||
switch src[s] & 0x03 {
|
||||
case tagLiteral:
|
||||
x := uint(src[s] >> 2)
|
||||
switch {
|
||||
case x < 60:
|
||||
s++
|
||||
case x == 60:
|
||||
s += 2
|
||||
if s > len(src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
x = uint(src[s-1])
|
||||
case x == 61:
|
||||
s += 3
|
||||
if s > len(src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
x = uint(src[s-2]) | uint(src[s-1])<<8
|
||||
case x == 62:
|
||||
s += 4
|
||||
if s > len(src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
x = uint(src[s-3]) | uint(src[s-2])<<8 | uint(src[s-1])<<16
|
||||
case x == 63:
|
||||
s += 5
|
||||
if s > len(src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
x = uint(src[s-4]) | uint(src[s-3])<<8 | uint(src[s-2])<<16 | uint(src[s-1])<<24
|
||||
}
|
||||
length = int(x + 1)
|
||||
if length <= 0 {
|
||||
return nil, errors.New("snappy: unsupported literal length")
|
||||
}
|
||||
if length > len(dst)-d || length > len(src)-s {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
copy(dst[d:], src[s:s+length])
|
||||
d += length
|
||||
s += length
|
||||
continue
|
||||
|
||||
case tagCopy1:
|
||||
s += 2
|
||||
if s > len(src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
length = 4 + int(src[s-2])>>2&0x7
|
||||
offset = int(src[s-2])&0xe0<<3 | int(src[s-1])
|
||||
|
||||
case tagCopy2:
|
||||
s += 3
|
||||
if s > len(src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
length = 1 + int(src[s-3])>>2
|
||||
offset = int(src[s-2]) | int(src[s-1])<<8
|
||||
|
||||
case tagCopy4:
|
||||
return nil, errors.New("snappy: unsupported COPY_4 tag")
|
||||
}
|
||||
|
||||
end := d + length
|
||||
if offset > d || end > len(dst) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
for ; d < end; d++ {
|
||||
dst[d] = dst[d-offset]
|
||||
}
|
||||
}
|
||||
return nil, ErrCorrupt
|
||||
if d != dLen {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
return dst[:d], nil
|
||||
}
|
||||
|
||||
// NewReader returns a new Reader that decompresses from r, using the framing
|
||||
@@ -81,12 +138,12 @@ func Decode(dst, src []byte) ([]byte, error) {
|
||||
func NewReader(r io.Reader) *Reader {
|
||||
return &Reader{
|
||||
r: r,
|
||||
decoded: make([]byte, maxBlockSize),
|
||||
buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize),
|
||||
decoded: make([]byte, maxUncompressedChunkLen),
|
||||
buf: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)+checksumSize),
|
||||
}
|
||||
}
|
||||
|
||||
// Reader is an io.Reader that can read Snappy-compressed bytes.
|
||||
// Reader is an io.Reader than can read Snappy-compressed bytes.
|
||||
type Reader struct {
|
||||
r io.Reader
|
||||
err error
|
||||
254
Godeps/_workspace/src/github.com/golang/snappy/encode.go
generated
vendored
Normal file
254
Godeps/_workspace/src/github.com/golang/snappy/encode.go
generated
vendored
Normal file
@@ -0,0 +1,254 @@
|
||||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package snappy
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
)
|
||||
|
||||
// We limit how far copy back-references can go, the same as the C++ code.
|
||||
const maxOffset = 1 << 15
|
||||
|
||||
// emitLiteral writes a literal chunk and returns the number of bytes written.
|
||||
func emitLiteral(dst, lit []byte) int {
|
||||
i, n := 0, uint(len(lit)-1)
|
||||
switch {
|
||||
case n < 60:
|
||||
dst[0] = uint8(n)<<2 | tagLiteral
|
||||
i = 1
|
||||
case n < 1<<8:
|
||||
dst[0] = 60<<2 | tagLiteral
|
||||
dst[1] = uint8(n)
|
||||
i = 2
|
||||
case n < 1<<16:
|
||||
dst[0] = 61<<2 | tagLiteral
|
||||
dst[1] = uint8(n)
|
||||
dst[2] = uint8(n >> 8)
|
||||
i = 3
|
||||
case n < 1<<24:
|
||||
dst[0] = 62<<2 | tagLiteral
|
||||
dst[1] = uint8(n)
|
||||
dst[2] = uint8(n >> 8)
|
||||
dst[3] = uint8(n >> 16)
|
||||
i = 4
|
||||
case int64(n) < 1<<32:
|
||||
dst[0] = 63<<2 | tagLiteral
|
||||
dst[1] = uint8(n)
|
||||
dst[2] = uint8(n >> 8)
|
||||
dst[3] = uint8(n >> 16)
|
||||
dst[4] = uint8(n >> 24)
|
||||
i = 5
|
||||
default:
|
||||
panic("snappy: source buffer is too long")
|
||||
}
|
||||
if copy(dst[i:], lit) != len(lit) {
|
||||
panic("snappy: destination buffer is too short")
|
||||
}
|
||||
return i + len(lit)
|
||||
}
|
||||
|
||||
// emitCopy writes a copy chunk and returns the number of bytes written.
|
||||
func emitCopy(dst []byte, offset, length int) int {
|
||||
i := 0
|
||||
for length > 0 {
|
||||
x := length - 4
|
||||
if 0 <= x && x < 1<<3 && offset < 1<<11 {
|
||||
dst[i+0] = uint8(offset>>8)&0x07<<5 | uint8(x)<<2 | tagCopy1
|
||||
dst[i+1] = uint8(offset)
|
||||
i += 2
|
||||
break
|
||||
}
|
||||
|
||||
x = length
|
||||
if x > 1<<6 {
|
||||
x = 1 << 6
|
||||
}
|
||||
dst[i+0] = uint8(x-1)<<2 | tagCopy2
|
||||
dst[i+1] = uint8(offset)
|
||||
dst[i+2] = uint8(offset >> 8)
|
||||
i += 3
|
||||
length -= x
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// Encode returns the encoded form of src. The returned slice may be a sub-
|
||||
// slice of dst if dst was large enough to hold the entire encoded block.
|
||||
// Otherwise, a newly allocated slice will be returned.
|
||||
// It is valid to pass a nil dst.
|
||||
func Encode(dst, src []byte) []byte {
|
||||
if n := MaxEncodedLen(len(src)); len(dst) < n {
|
||||
dst = make([]byte, n)
|
||||
}
|
||||
|
||||
// The block starts with the varint-encoded length of the decompressed bytes.
|
||||
d := binary.PutUvarint(dst, uint64(len(src)))
|
||||
|
||||
// Return early if src is short.
|
||||
if len(src) <= 4 {
|
||||
if len(src) != 0 {
|
||||
d += emitLiteral(dst[d:], src)
|
||||
}
|
||||
return dst[:d]
|
||||
}
|
||||
|
||||
// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
|
||||
const maxTableSize = 1 << 14
|
||||
shift, tableSize := uint(32-8), 1<<8
|
||||
for tableSize < maxTableSize && tableSize < len(src) {
|
||||
shift--
|
||||
tableSize *= 2
|
||||
}
|
||||
var table [maxTableSize]int
|
||||
|
||||
// Iterate over the source bytes.
|
||||
var (
|
||||
s int // The iterator position.
|
||||
t int // The last position with the same hash as s.
|
||||
lit int // The start position of any pending literal bytes.
|
||||
)
|
||||
for s+3 < len(src) {
|
||||
// Update the hash table.
|
||||
b0, b1, b2, b3 := src[s], src[s+1], src[s+2], src[s+3]
|
||||
h := uint32(b0) | uint32(b1)<<8 | uint32(b2)<<16 | uint32(b3)<<24
|
||||
p := &table[(h*0x1e35a7bd)>>shift]
|
||||
// We need to to store values in [-1, inf) in table. To save
|
||||
// some initialization time, (re)use the table's zero value
|
||||
// and shift the values against this zero: add 1 on writes,
|
||||
// subtract 1 on reads.
|
||||
t, *p = *p-1, s+1
|
||||
// If t is invalid or src[s:s+4] differs from src[t:t+4], accumulate a literal byte.
|
||||
if t < 0 || s-t >= maxOffset || b0 != src[t] || b1 != src[t+1] || b2 != src[t+2] || b3 != src[t+3] {
|
||||
s++
|
||||
continue
|
||||
}
|
||||
// Otherwise, we have a match. First, emit any pending literal bytes.
|
||||
if lit != s {
|
||||
d += emitLiteral(dst[d:], src[lit:s])
|
||||
}
|
||||
// Extend the match to be as long as possible.
|
||||
s0 := s
|
||||
s, t = s+4, t+4
|
||||
for s < len(src) && src[s] == src[t] {
|
||||
s++
|
||||
t++
|
||||
}
|
||||
// Emit the copied bytes.
|
||||
d += emitCopy(dst[d:], s-t, s-s0)
|
||||
lit = s
|
||||
}
|
||||
|
||||
// Emit any final pending literal bytes and return.
|
||||
if lit != len(src) {
|
||||
d += emitLiteral(dst[d:], src[lit:])
|
||||
}
|
||||
return dst[:d]
|
||||
}
|
||||
|
||||
// MaxEncodedLen returns the maximum length of a snappy block, given its
|
||||
// uncompressed length.
|
||||
func MaxEncodedLen(srcLen int) int {
|
||||
// Compressed data can be defined as:
|
||||
// compressed := item* literal*
|
||||
// item := literal* copy
|
||||
//
|
||||
// The trailing literal sequence has a space blowup of at most 62/60
|
||||
// since a literal of length 60 needs one tag byte + one extra byte
|
||||
// for length information.
|
||||
//
|
||||
// Item blowup is trickier to measure. Suppose the "copy" op copies
|
||||
// 4 bytes of data. Because of a special check in the encoding code,
|
||||
// we produce a 4-byte copy only if the offset is < 65536. Therefore
|
||||
// the copy op takes 3 bytes to encode, and this type of item leads
|
||||
// to at most the 62/60 blowup for representing literals.
|
||||
//
|
||||
// Suppose the "copy" op copies 5 bytes of data. If the offset is big
|
||||
// enough, it will take 5 bytes to encode the copy op. Therefore the
|
||||
// worst case here is a one-byte literal followed by a five-byte copy.
|
||||
// That is, 6 bytes of input turn into 7 bytes of "compressed" data.
|
||||
//
|
||||
// This last factor dominates the blowup, so the final estimate is:
|
||||
return 32 + srcLen + srcLen/6
|
||||
}
|
||||
|
||||
// NewWriter returns a new Writer that compresses to w, using the framing
|
||||
// format described at
|
||||
// https://github.com/google/snappy/blob/master/framing_format.txt
|
||||
func NewWriter(w io.Writer) *Writer {
|
||||
return &Writer{
|
||||
w: w,
|
||||
enc: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)),
|
||||
}
|
||||
}
|
||||
|
||||
// Writer is an io.Writer than can write Snappy-compressed bytes.
|
||||
type Writer struct {
|
||||
w io.Writer
|
||||
err error
|
||||
enc []byte
|
||||
buf [checksumSize + chunkHeaderSize]byte
|
||||
wroteHeader bool
|
||||
}
|
||||
|
||||
// Reset discards the writer's state and switches the Snappy writer to write to
|
||||
// w. This permits reusing a Writer rather than allocating a new one.
|
||||
func (w *Writer) Reset(writer io.Writer) {
|
||||
w.w = writer
|
||||
w.err = nil
|
||||
w.wroteHeader = false
|
||||
}
|
||||
|
||||
// Write satisfies the io.Writer interface.
|
||||
func (w *Writer) Write(p []byte) (n int, errRet error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
if !w.wroteHeader {
|
||||
copy(w.enc, magicChunk)
|
||||
if _, err := w.w.Write(w.enc[:len(magicChunk)]); err != nil {
|
||||
w.err = err
|
||||
return n, err
|
||||
}
|
||||
w.wroteHeader = true
|
||||
}
|
||||
for len(p) > 0 {
|
||||
var uncompressed []byte
|
||||
if len(p) > maxUncompressedChunkLen {
|
||||
uncompressed, p = p[:maxUncompressedChunkLen], p[maxUncompressedChunkLen:]
|
||||
} else {
|
||||
uncompressed, p = p, nil
|
||||
}
|
||||
checksum := crc(uncompressed)
|
||||
|
||||
// Compress the buffer, discarding the result if the improvement
|
||||
// isn't at least 12.5%.
|
||||
chunkType := uint8(chunkTypeCompressedData)
|
||||
chunkBody := Encode(w.enc, uncompressed)
|
||||
if len(chunkBody) >= len(uncompressed)-len(uncompressed)/8 {
|
||||
chunkType, chunkBody = chunkTypeUncompressedData, uncompressed
|
||||
}
|
||||
|
||||
chunkLen := 4 + len(chunkBody)
|
||||
w.buf[0] = chunkType
|
||||
w.buf[1] = uint8(chunkLen >> 0)
|
||||
w.buf[2] = uint8(chunkLen >> 8)
|
||||
w.buf[3] = uint8(chunkLen >> 16)
|
||||
w.buf[4] = uint8(checksum >> 0)
|
||||
w.buf[5] = uint8(checksum >> 8)
|
||||
w.buf[6] = uint8(checksum >> 16)
|
||||
w.buf[7] = uint8(checksum >> 24)
|
||||
if _, err := w.w.Write(w.buf[:]); err != nil {
|
||||
w.err = err
|
||||
return n, err
|
||||
}
|
||||
if _, err := w.w.Write(chunkBody); err != nil {
|
||||
w.err = err
|
||||
return n, err
|
||||
}
|
||||
n += len(uncompressed)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
@@ -6,7 +6,7 @@
|
||||
// It aims for very high speeds and reasonable compression.
|
||||
//
|
||||
// The C++ snappy implementation is at https://github.com/google/snappy
|
||||
package snappy // import "github.com/golang/snappy"
|
||||
package snappy
|
||||
|
||||
import (
|
||||
"hash/crc32"
|
||||
@@ -46,25 +46,9 @@ const (
|
||||
chunkHeaderSize = 4
|
||||
magicChunk = "\xff\x06\x00\x00" + magicBody
|
||||
magicBody = "sNaPpY"
|
||||
|
||||
// maxBlockSize is the maximum size of the input to encodeBlock. It is not
|
||||
// part of the wire format per se, but some parts of the encoder assume
|
||||
// that an offset fits into a uint16.
|
||||
//
|
||||
// Also, for the framing format (Writer type instead of Encode function),
|
||||
// https://github.com/google/snappy/blob/master/framing_format.txt says
|
||||
// that "the uncompressed data in a chunk must be no longer than 65536
|
||||
// bytes".
|
||||
maxBlockSize = 65536
|
||||
|
||||
// maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is
|
||||
// hard coded to be a const instead of a variable, so that obufLen can also
|
||||
// be a const. Their equivalence is confirmed by
|
||||
// TestMaxEncodedLenOfMaxBlockSize.
|
||||
maxEncodedLenOfMaxBlockSize = 76490
|
||||
|
||||
obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize
|
||||
obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize
|
||||
// that "the uncompressed data in a chunk must be no longer than 65536 bytes".
|
||||
maxUncompressedChunkLen = 65536
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -8,10 +8,10 @@
|
||||
package ratelimit
|
||||
|
||||
import (
|
||||
"math"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
"math"
|
||||
)
|
||||
|
||||
// Bucket represents a token bucket that fills at a predetermined rate.
|
||||
@@ -171,30 +171,6 @@ func (tb *Bucket) takeAvailable(now time.Time, count int64) int64 {
|
||||
return count
|
||||
}
|
||||
|
||||
// Available returns the number of available tokens. It will be negative
|
||||
// when there are consumers waiting for tokens. Note that if this
|
||||
// returns greater than zero, it does not guarantee that calls that take
|
||||
// tokens from the buffer will succeed, as the number of available
|
||||
// tokens could have changed in the meantime. This method is intended
|
||||
// primarily for metrics reporting and debugging.
|
||||
func (tb *Bucket) Available() int64 {
|
||||
return tb.available(time.Now())
|
||||
}
|
||||
|
||||
// available is the internal version of available - it takes the current time as
|
||||
// an argument to enable easy testing.
|
||||
func (tb *Bucket) available(now time.Time) int64 {
|
||||
tb.mu.Lock()
|
||||
defer tb.mu.Unlock()
|
||||
tb.adjust(now)
|
||||
return tb.avail
|
||||
}
|
||||
|
||||
// Capacity returns the capacity that the bucket was created with.
|
||||
func (tb *Bucket) Capacity() int64 {
|
||||
return tb.capacity
|
||||
}
|
||||
|
||||
// Rate returns the fill rate of the bucket, in tokens per second.
|
||||
func (tb *Bucket) Rate() float64 {
|
||||
return 1e9 * float64(tb.quantum) / float64(tb.fillInterval)
|
||||
@@ -3,7 +3,7 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Extensions to the standard "os" package.
|
||||
package osext // import "github.com/kardianos/osext"
|
||||
package osext
|
||||
|
||||
import "path/filepath"
|
||||
|
||||
9
Godeps/_workspace/src/github.com/rcrowley/go-metrics/.gitignore
generated
vendored
Normal file
9
Godeps/_workspace/src/github.com/rcrowley/go-metrics/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
*.[68]
|
||||
*.a
|
||||
*.out
|
||||
*.swp
|
||||
_obj
|
||||
_testmain.go
|
||||
cmd/metrics-bench/metrics-bench
|
||||
cmd/metrics-example/metrics-example
|
||||
cmd/never-read/never-read
|
||||
13
Godeps/_workspace/src/github.com/rcrowley/go-metrics/.travis.yml
generated
vendored
Normal file
13
Godeps/_workspace/src/github.com/rcrowley/go-metrics/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.2
|
||||
- 1.3
|
||||
- 1.4
|
||||
|
||||
script:
|
||||
- ./validate.sh
|
||||
|
||||
# this should give us faster builds according to
|
||||
# http://docs.travis-ci.com/user/migrating-from-legacy/
|
||||
sudo: false
|
||||
@@ -39,7 +39,7 @@ t.Update(47)
|
||||
Periodically log every metric in human-readable form to standard error:
|
||||
|
||||
```go
|
||||
go metrics.Log(metrics.DefaultRegistry, 5 * time.Second, log.New(os.Stderr, "metrics: ", log.Lmicroseconds))
|
||||
go metrics.Log(metrics.DefaultRegistry, 60e9, log.New(os.Stderr, "metrics: ", log.Lmicroseconds))
|
||||
```
|
||||
|
||||
Periodically log every metric in slightly-more-parseable form to syslog:
|
||||
@@ -103,19 +103,6 @@ import "github.com/rcrowley/go-metrics/stathat"
|
||||
go stathat.Stathat(metrics.DefaultRegistry, 10e9, "example@example.com")
|
||||
```
|
||||
|
||||
Maintain all metrics along with expvars at `/debug/metrics`:
|
||||
|
||||
This uses the same mechanism as [the official expvar](http://golang.org/pkg/expvar/)
|
||||
but exposed under `/debug/metrics`, which shows a json representation of all your usual expvars
|
||||
as well as all your go-metrics.
|
||||
|
||||
|
||||
```go
|
||||
import "github.com/rcrowley/go-metrics/exp"
|
||||
|
||||
exp.Exp(metrics.DefaultRegistry)
|
||||
```
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
@@ -23,7 +23,6 @@ func translateTimerAttributes(d time.Duration) (attrs map[string]interface{}) {
|
||||
|
||||
type Reporter struct {
|
||||
Email, Token string
|
||||
Namespace string
|
||||
Source string
|
||||
Interval time.Duration
|
||||
Registry metrics.Registry
|
||||
@@ -33,7 +32,7 @@ type Reporter struct {
|
||||
}
|
||||
|
||||
func NewReporter(r metrics.Registry, d time.Duration, e string, t string, s string, p []float64, u time.Duration) *Reporter {
|
||||
return &Reporter{e, t, "", s, d, r, p, translateTimerAttributes(u), int64(d / time.Second)}
|
||||
return &Reporter{e, t, s, d, r, p, translateTimerAttributes(u), int64(d / time.Second)}
|
||||
}
|
||||
|
||||
func Librato(r metrics.Registry, d time.Duration, e string, t string, s string, p []float64, u time.Duration) {
|
||||
@@ -89,9 +88,6 @@ func (self *Reporter) BuildRequest(now time.Time, r metrics.Registry) (snapshot
|
||||
snapshot.Counters = make([]Measurement, 0)
|
||||
histogramGaugeCount := 1 + len(self.Percentiles)
|
||||
r.Each(func(name string, metric interface{}) {
|
||||
if self.Namespace != "" {
|
||||
name = fmt.Sprintf("%s.%s", self.Namespace, name)
|
||||
}
|
||||
measurement := Measurement{}
|
||||
measurement[Period] = self.Interval.Seconds()
|
||||
switch m := metric.(type) {
|
||||
@@ -5,17 +5,10 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
func Log(r Registry, freq time.Duration, l *log.Logger) {
|
||||
LogScaled(r, freq, time.Nanosecond, l)
|
||||
}
|
||||
|
||||
// Output each metric in the given registry periodically using the given
|
||||
// logger. Print timings in `scale` units (eg time.Millisecond) rather than nanos.
|
||||
func LogScaled(r Registry, freq time.Duration, scale time.Duration, l *log.Logger) {
|
||||
du := float64(scale)
|
||||
duSuffix := scale.String()[1:]
|
||||
|
||||
for _ = range time.Tick(freq) {
|
||||
// logger.
|
||||
func Log(r Registry, d time.Duration, l *log.Logger) {
|
||||
for _ = range time.Tick(d) {
|
||||
r.Each(func(name string, i interface{}) {
|
||||
switch metric := i.(type) {
|
||||
case Counter:
|
||||
@@ -58,15 +51,15 @@ func LogScaled(r Registry, freq time.Duration, scale time.Duration, l *log.Logge
|
||||
ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
|
||||
l.Printf("timer %s\n", name)
|
||||
l.Printf(" count: %9d\n", t.Count())
|
||||
l.Printf(" min: %12.2f%s\n", float64(t.Min())/du, duSuffix)
|
||||
l.Printf(" max: %12.2f%s\n", float64(t.Max())/du, duSuffix)
|
||||
l.Printf(" mean: %12.2f%s\n", t.Mean()/du, duSuffix)
|
||||
l.Printf(" stddev: %12.2f%s\n", t.StdDev()/du, duSuffix)
|
||||
l.Printf(" median: %12.2f%s\n", ps[0]/du, duSuffix)
|
||||
l.Printf(" 75%%: %12.2f%s\n", ps[1]/du, duSuffix)
|
||||
l.Printf(" 95%%: %12.2f%s\n", ps[2]/du, duSuffix)
|
||||
l.Printf(" 99%%: %12.2f%s\n", ps[3]/du, duSuffix)
|
||||
l.Printf(" 99.9%%: %12.2f%s\n", ps[4]/du, duSuffix)
|
||||
l.Printf(" min: %9d\n", t.Min())
|
||||
l.Printf(" max: %9d\n", t.Max())
|
||||
l.Printf(" mean: %12.2f\n", t.Mean())
|
||||
l.Printf(" stddev: %12.2f\n", t.StdDev())
|
||||
l.Printf(" median: %12.2f\n", ps[0])
|
||||
l.Printf(" 75%%: %12.2f\n", ps[1])
|
||||
l.Printf(" 95%%: %12.2f\n", ps[2])
|
||||
l.Printf(" 99%%: %12.2f\n", ps[3])
|
||||
l.Printf(" 99.9%%: %12.2f\n", ps[4])
|
||||
l.Printf(" 1-min rate: %12.2f\n", t.Rate1())
|
||||
l.Printf(" 5-min rate: %12.2f\n", t.Rate5())
|
||||
l.Printf(" 15-min rate: %12.2f\n", t.Rate15())
|
||||
@@ -157,13 +157,6 @@ func NewPrefixedRegistry(prefix string) Registry {
|
||||
}
|
||||
}
|
||||
|
||||
func NewPrefixedChildRegistry(parent Registry, prefix string) Registry {
|
||||
return &PrefixedRegistry{
|
||||
underlying: parent,
|
||||
prefix: prefix,
|
||||
}
|
||||
}
|
||||
|
||||
// Call the given function for each registered metric.
|
||||
func (r *PrefixedRegistry) Each(fn func(string, interface{})) {
|
||||
r.underlying.Each(fn)
|
||||
@@ -2,7 +2,6 @@ package metrics
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"time"
|
||||
)
|
||||
|
||||
@@ -10,37 +9,35 @@ var (
|
||||
memStats runtime.MemStats
|
||||
runtimeMetrics struct {
|
||||
MemStats struct {
|
||||
Alloc Gauge
|
||||
BuckHashSys Gauge
|
||||
DebugGC Gauge
|
||||
EnableGC Gauge
|
||||
Frees Gauge
|
||||
HeapAlloc Gauge
|
||||
HeapIdle Gauge
|
||||
HeapInuse Gauge
|
||||
HeapObjects Gauge
|
||||
HeapReleased Gauge
|
||||
HeapSys Gauge
|
||||
LastGC Gauge
|
||||
Lookups Gauge
|
||||
Mallocs Gauge
|
||||
MCacheInuse Gauge
|
||||
MCacheSys Gauge
|
||||
MSpanInuse Gauge
|
||||
MSpanSys Gauge
|
||||
NextGC Gauge
|
||||
NumGC Gauge
|
||||
GCCPUFraction GaugeFloat64
|
||||
PauseNs Histogram
|
||||
PauseTotalNs Gauge
|
||||
StackInuse Gauge
|
||||
StackSys Gauge
|
||||
Sys Gauge
|
||||
TotalAlloc Gauge
|
||||
Alloc Gauge
|
||||
BuckHashSys Gauge
|
||||
DebugGC Gauge
|
||||
EnableGC Gauge
|
||||
Frees Gauge
|
||||
HeapAlloc Gauge
|
||||
HeapIdle Gauge
|
||||
HeapInuse Gauge
|
||||
HeapObjects Gauge
|
||||
HeapReleased Gauge
|
||||
HeapSys Gauge
|
||||
LastGC Gauge
|
||||
Lookups Gauge
|
||||
Mallocs Gauge
|
||||
MCacheInuse Gauge
|
||||
MCacheSys Gauge
|
||||
MSpanInuse Gauge
|
||||
MSpanSys Gauge
|
||||
NextGC Gauge
|
||||
NumGC Gauge
|
||||
PauseNs Histogram
|
||||
PauseTotalNs Gauge
|
||||
StackInuse Gauge
|
||||
StackSys Gauge
|
||||
Sys Gauge
|
||||
TotalAlloc Gauge
|
||||
}
|
||||
NumCgoCall Gauge
|
||||
NumGoroutine Gauge
|
||||
NumThread Gauge
|
||||
ReadMemStats Timer
|
||||
}
|
||||
frees uint64
|
||||
@@ -48,8 +45,6 @@ var (
|
||||
mallocs uint64
|
||||
numGC uint32
|
||||
numCgoCalls int64
|
||||
|
||||
threadCreateProfile = pprof.Lookup("threadcreate")
|
||||
)
|
||||
|
||||
// Capture new values for the Go runtime statistics exported in
|
||||
@@ -102,7 +97,6 @@ func CaptureRuntimeMemStatsOnce(r Registry) {
|
||||
runtimeMetrics.MemStats.MSpanSys.Update(int64(memStats.MSpanSys))
|
||||
runtimeMetrics.MemStats.NextGC.Update(int64(memStats.NextGC))
|
||||
runtimeMetrics.MemStats.NumGC.Update(int64(memStats.NumGC - numGC))
|
||||
runtimeMetrics.MemStats.GCCPUFraction.Update(gcCPUFraction(&memStats))
|
||||
|
||||
// <https://code.google.com/p/go/source/browse/src/pkg/runtime/mgc0.c>
|
||||
i := numGC % uint32(len(memStats.PauseNs))
|
||||
@@ -138,8 +132,6 @@ func CaptureRuntimeMemStatsOnce(r Registry) {
|
||||
numCgoCalls = currentNumCgoCalls
|
||||
|
||||
runtimeMetrics.NumGoroutine.Update(int64(runtime.NumGoroutine()))
|
||||
|
||||
runtimeMetrics.NumThread.Update(int64(threadCreateProfile.Count()))
|
||||
}
|
||||
|
||||
// Register runtimeMetrics for the Go runtime statistics exported in runtime and
|
||||
@@ -166,7 +158,6 @@ func RegisterRuntimeMemStats(r Registry) {
|
||||
runtimeMetrics.MemStats.MSpanSys = NewGauge()
|
||||
runtimeMetrics.MemStats.NextGC = NewGauge()
|
||||
runtimeMetrics.MemStats.NumGC = NewGauge()
|
||||
runtimeMetrics.MemStats.GCCPUFraction = NewGaugeFloat64()
|
||||
runtimeMetrics.MemStats.PauseNs = NewHistogram(NewExpDecaySample(1028, 0.015))
|
||||
runtimeMetrics.MemStats.PauseTotalNs = NewGauge()
|
||||
runtimeMetrics.MemStats.StackInuse = NewGauge()
|
||||
@@ -175,7 +166,6 @@ func RegisterRuntimeMemStats(r Registry) {
|
||||
runtimeMetrics.MemStats.TotalAlloc = NewGauge()
|
||||
runtimeMetrics.NumCgoCall = NewGauge()
|
||||
runtimeMetrics.NumGoroutine = NewGauge()
|
||||
runtimeMetrics.NumThread = NewGauge()
|
||||
runtimeMetrics.ReadMemStats = NewTimer()
|
||||
|
||||
r.Register("runtime.MemStats.Alloc", runtimeMetrics.MemStats.Alloc)
|
||||
@@ -198,7 +188,6 @@ func RegisterRuntimeMemStats(r Registry) {
|
||||
r.Register("runtime.MemStats.MSpanSys", runtimeMetrics.MemStats.MSpanSys)
|
||||
r.Register("runtime.MemStats.NextGC", runtimeMetrics.MemStats.NextGC)
|
||||
r.Register("runtime.MemStats.NumGC", runtimeMetrics.MemStats.NumGC)
|
||||
r.Register("runtime.MemStats.GCCPUFraction", runtimeMetrics.MemStats.GCCPUFraction)
|
||||
r.Register("runtime.MemStats.PauseNs", runtimeMetrics.MemStats.PauseNs)
|
||||
r.Register("runtime.MemStats.PauseTotalNs", runtimeMetrics.MemStats.PauseTotalNs)
|
||||
r.Register("runtime.MemStats.StackInuse", runtimeMetrics.MemStats.StackInuse)
|
||||
@@ -207,6 +196,5 @@ func RegisterRuntimeMemStats(r Registry) {
|
||||
r.Register("runtime.MemStats.TotalAlloc", runtimeMetrics.MemStats.TotalAlloc)
|
||||
r.Register("runtime.NumCgoCall", runtimeMetrics.NumCgoCall)
|
||||
r.Register("runtime.NumGoroutine", runtimeMetrics.NumGoroutine)
|
||||
r.Register("runtime.NumThread", runtimeMetrics.NumThread)
|
||||
r.Register("runtime.ReadMemStats", runtimeMetrics.ReadMemStats)
|
||||
}
|
||||
@@ -12,10 +12,8 @@ import (
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/memdb"
|
||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||
)
|
||||
|
||||
// ErrBatchCorrupted records reason of batch corruption.
|
||||
type ErrBatchCorrupted struct {
|
||||
Reason string
|
||||
}
|
||||
@@ -25,7 +23,7 @@ func (e *ErrBatchCorrupted) Error() string {
|
||||
}
|
||||
|
||||
func newErrBatchCorrupted(reason string) error {
|
||||
return errors.NewErrCorrupted(storage.FileDesc{}, &ErrBatchCorrupted{reason})
|
||||
return errors.NewErrCorrupted(nil, &ErrBatchCorrupted{reason})
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -33,7 +31,6 @@ const (
|
||||
batchGrowRec = 3000
|
||||
)
|
||||
|
||||
// BatchReplay wraps basic batch operations.
|
||||
type BatchReplay interface {
|
||||
Put(key, value []byte)
|
||||
Delete(key []byte)
|
||||
@@ -70,20 +67,20 @@ func (b *Batch) grow(n int) {
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Batch) appendRec(kt keyType, key, value []byte) {
|
||||
func (b *Batch) appendRec(kt kType, key, value []byte) {
|
||||
n := 1 + binary.MaxVarintLen32 + len(key)
|
||||
if kt == keyTypeVal {
|
||||
if kt == ktVal {
|
||||
n += binary.MaxVarintLen32 + len(value)
|
||||
}
|
||||
b.grow(n)
|
||||
off := len(b.data)
|
||||
data := b.data[:off+n]
|
||||
data[off] = byte(kt)
|
||||
off++
|
||||
off += 1
|
||||
off += binary.PutUvarint(data[off:], uint64(len(key)))
|
||||
copy(data[off:], key)
|
||||
off += len(key)
|
||||
if kt == keyTypeVal {
|
||||
if kt == ktVal {
|
||||
off += binary.PutUvarint(data[off:], uint64(len(value)))
|
||||
copy(data[off:], value)
|
||||
off += len(value)
|
||||
@@ -97,13 +94,13 @@ func (b *Batch) appendRec(kt keyType, key, value []byte) {
|
||||
// Put appends 'put operation' of the given key/value pair to the batch.
|
||||
// It is safe to modify the contents of the argument after Put returns.
|
||||
func (b *Batch) Put(key, value []byte) {
|
||||
b.appendRec(keyTypeVal, key, value)
|
||||
b.appendRec(ktVal, key, value)
|
||||
}
|
||||
|
||||
// Delete appends 'delete operation' of the given key to the batch.
|
||||
// It is safe to modify the contents of the argument after Delete returns.
|
||||
func (b *Batch) Delete(key []byte) {
|
||||
b.appendRec(keyTypeDel, key, nil)
|
||||
b.appendRec(ktDel, key, nil)
|
||||
}
|
||||
|
||||
// Dump dumps batch contents. The returned slice can be loaded into the
|
||||
@@ -124,14 +121,13 @@ func (b *Batch) Load(data []byte) error {
|
||||
|
||||
// Replay replays batch contents.
|
||||
func (b *Batch) Replay(r BatchReplay) error {
|
||||
return b.decodeRec(func(i int, kt keyType, key, value []byte) error {
|
||||
return b.decodeRec(func(i int, kt kType, key, value []byte) {
|
||||
switch kt {
|
||||
case keyTypeVal:
|
||||
case ktVal:
|
||||
r.Put(key, value)
|
||||
case keyTypeDel:
|
||||
case ktDel:
|
||||
r.Delete(key)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
@@ -197,19 +193,18 @@ func (b *Batch) decode(prevSeq uint64, data []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Batch) decodeRec(f func(i int, kt keyType, key, value []byte) error) error {
|
||||
func (b *Batch) decodeRec(f func(i int, kt kType, key, value []byte)) (err error) {
|
||||
off := batchHdrLen
|
||||
for i := 0; i < b.rLen; i++ {
|
||||
if off >= len(b.data) {
|
||||
return newErrBatchCorrupted("invalid records length")
|
||||
}
|
||||
|
||||
kt := keyType(b.data[off])
|
||||
if kt > keyTypeVal {
|
||||
panic(kt)
|
||||
kt := kType(b.data[off])
|
||||
if kt > ktVal {
|
||||
return newErrBatchCorrupted("bad record: invalid type")
|
||||
}
|
||||
off++
|
||||
off += 1
|
||||
|
||||
x, n := binary.Uvarint(b.data[off:])
|
||||
off += n
|
||||
@@ -219,7 +214,7 @@ func (b *Batch) decodeRec(f func(i int, kt keyType, key, value []byte) error) er
|
||||
key := b.data[off : off+int(x)]
|
||||
off += int(x)
|
||||
var value []byte
|
||||
if kt == keyTypeVal {
|
||||
if kt == ktVal {
|
||||
x, n := binary.Uvarint(b.data[off:])
|
||||
off += n
|
||||
if n <= 0 || off+int(x) > len(b.data) {
|
||||
@@ -229,19 +224,16 @@ func (b *Batch) decodeRec(f func(i int, kt keyType, key, value []byte) error) er
|
||||
off += int(x)
|
||||
}
|
||||
|
||||
if err := f(i, kt, key, value); err != nil {
|
||||
return err
|
||||
}
|
||||
f(i, kt, key, value)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Batch) memReplay(to *memdb.DB) error {
|
||||
var ikScratch []byte
|
||||
return b.decodeRec(func(i int, kt keyType, key, value []byte) error {
|
||||
ikScratch = makeInternalKey(ikScratch, key, b.seq+uint64(i), kt)
|
||||
return to.Put(ikScratch, value)
|
||||
return b.decodeRec(func(i int, kt kType, key, value []byte) {
|
||||
ikey := newIkey(key, b.seq+uint64(i), kt)
|
||||
to.Put(ikey, value)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -253,9 +245,8 @@ func (b *Batch) memDecodeAndReplay(prevSeq uint64, data []byte, to *memdb.DB) er
|
||||
}
|
||||
|
||||
func (b *Batch) revertMemReplay(to *memdb.DB) error {
|
||||
var ikScratch []byte
|
||||
return b.decodeRec(func(i int, kt keyType, key, value []byte) error {
|
||||
ikScratch := makeInternalKey(ikScratch, key, b.seq+uint64(i), kt)
|
||||
return to.Delete(ikScratch)
|
||||
return b.decodeRec(func(i int, kt kType, key, value []byte) {
|
||||
ikey := newIkey(key, b.seq+uint64(i), kt)
|
||||
to.Delete(ikey)
|
||||
})
|
||||
}
|
||||
@@ -47,21 +47,17 @@ type Cacher interface {
|
||||
// so the the Release method will be called once object is released.
|
||||
type Value interface{}
|
||||
|
||||
// NamespaceGetter provides convenient wrapper for namespace.
|
||||
type NamespaceGetter struct {
|
||||
type CacheGetter struct {
|
||||
Cache *Cache
|
||||
NS uint64
|
||||
}
|
||||
|
||||
// Get simply calls Cache.Get() method.
|
||||
func (g *NamespaceGetter) Get(key uint64, setFunc func() (size int, value Value)) *Handle {
|
||||
func (g *CacheGetter) Get(key uint64, setFunc func() (size int, value Value)) *Handle {
|
||||
return g.Cache.Get(g.NS, key, setFunc)
|
||||
}
|
||||
|
||||
// The hash tables implementation is based on:
|
||||
// "Dynamic-Sized Nonblocking Hash Tables", by Yujie Liu,
|
||||
// Kunlong Zhang, and Michael Spear.
|
||||
// ACM Symposium on Principles of Distributed Computing, Jul 2014.
|
||||
// "Dynamic-Sized Nonblocking Hash Tables", by Yujie Liu, Kunlong Zhang, and Michael Spear. ACM Symposium on Principles of Distributed Computing, Jul 2014.
|
||||
|
||||
const (
|
||||
mInitialSize = 1 << 4
|
||||
@@ -614,12 +610,10 @@ func (n *Node) unrefLocked() {
|
||||
}
|
||||
}
|
||||
|
||||
// Handle is a 'cache handle' of a 'cache node'.
|
||||
type Handle struct {
|
||||
n unsafe.Pointer // *Node
|
||||
}
|
||||
|
||||
// Value returns the value of the 'cache node'.
|
||||
func (h *Handle) Value() Value {
|
||||
n := (*Node)(atomic.LoadPointer(&h.n))
|
||||
if n != nil {
|
||||
@@ -628,8 +622,6 @@ func (h *Handle) Value() Value {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Release releases this 'cache handle'.
|
||||
// It is safe to call release multiple times.
|
||||
func (h *Handle) Release() {
|
||||
nPtr := atomic.LoadPointer(&h.n)
|
||||
if nPtr != nil && atomic.CompareAndSwapPointer(&h.n, nPtr, nil) {
|
||||
@@ -33,9 +33,9 @@ func (icmp *iComparer) Name() string {
|
||||
}
|
||||
|
||||
func (icmp *iComparer) Compare(a, b []byte) int {
|
||||
x := icmp.ucmp.Compare(internalKey(a).ukey(), internalKey(b).ukey())
|
||||
x := icmp.ucmp.Compare(iKey(a).ukey(), iKey(b).ukey())
|
||||
if x == 0 {
|
||||
if m, n := internalKey(a).num(), internalKey(b).num(); m > n {
|
||||
if m, n := iKey(a).num(), iKey(b).num(); m > n {
|
||||
x = -1
|
||||
} else if m < n {
|
||||
x = 1
|
||||
@@ -45,13 +45,13 @@ func (icmp *iComparer) Compare(a, b []byte) int {
|
||||
}
|
||||
|
||||
func (icmp *iComparer) Separator(dst, a, b []byte) []byte {
|
||||
ua, ub := internalKey(a).ukey(), internalKey(b).ukey()
|
||||
ua, ub := iKey(a).ukey(), iKey(b).ukey()
|
||||
dst = icmp.ucmp.Separator(dst, ua, ub)
|
||||
if dst == nil {
|
||||
return nil
|
||||
}
|
||||
if len(dst) < len(ua) && icmp.uCompare(ua, dst) < 0 {
|
||||
dst = append(dst, keyMaxNumBytes...)
|
||||
dst = append(dst, kMaxNumBytes...)
|
||||
} else {
|
||||
// Did not close possibilities that n maybe longer than len(ub).
|
||||
dst = append(dst, a[len(a)-8:]...)
|
||||
@@ -60,13 +60,13 @@ func (icmp *iComparer) Separator(dst, a, b []byte) []byte {
|
||||
}
|
||||
|
||||
func (icmp *iComparer) Successor(dst, b []byte) []byte {
|
||||
ub := internalKey(b).ukey()
|
||||
ub := iKey(b).ukey()
|
||||
dst = icmp.ucmp.Successor(dst, ub)
|
||||
if dst == nil {
|
||||
return nil
|
||||
}
|
||||
if len(dst) < len(ub) && icmp.uCompare(ub, dst) < 0 {
|
||||
dst = append(dst, keyMaxNumBytes...)
|
||||
dst = append(dst, kMaxNumBytes...)
|
||||
} else {
|
||||
// Did not close possibilities that n maybe longer than len(ub).
|
||||
dst = append(dst, b[len(b)-8:]...)
|
||||
@@ -36,14 +36,14 @@ type DB struct {
|
||||
s *session
|
||||
|
||||
// MemDB.
|
||||
memMu sync.RWMutex
|
||||
memPool chan *memdb.DB
|
||||
mem, frozenMem *memDB
|
||||
journal *journal.Writer
|
||||
journalWriter storage.Writer
|
||||
journalFd storage.FileDesc
|
||||
frozenJournalFd storage.FileDesc
|
||||
frozenSeq uint64
|
||||
memMu sync.RWMutex
|
||||
memPool chan *memdb.DB
|
||||
mem, frozenMem *memDB
|
||||
journal *journal.Writer
|
||||
journalWriter storage.Writer
|
||||
journalFile storage.File
|
||||
frozenJournalFile storage.File
|
||||
frozenSeq uint64
|
||||
|
||||
// Snapshot.
|
||||
snapsMu sync.Mutex
|
||||
@@ -61,10 +61,8 @@ type DB struct {
|
||||
writeDelayN int
|
||||
journalC chan *Batch
|
||||
journalAckC chan error
|
||||
tr *Transaction
|
||||
|
||||
// Compaction.
|
||||
compCommitLk sync.Mutex
|
||||
tcompCmdC chan cCmd
|
||||
tcompPauseC chan chan<- struct{}
|
||||
mcompCmdC chan cCmd
|
||||
@@ -72,8 +70,7 @@ type DB struct {
|
||||
compPerErrC chan error
|
||||
compErrSetC chan error
|
||||
compWriteLocking bool
|
||||
compStats cStats
|
||||
memdbMaxLevel int // For testing.
|
||||
compStats []cStats
|
||||
|
||||
// Close.
|
||||
closeW sync.WaitGroup
|
||||
@@ -107,6 +104,7 @@ func openDB(s *session) (*DB, error) {
|
||||
compErrC: make(chan error),
|
||||
compPerErrC: make(chan error),
|
||||
compErrSetC: make(chan error),
|
||||
compStats: make([]cStats, s.o.GetNumLevel()),
|
||||
// Close
|
||||
closeC: make(chan struct{}),
|
||||
}
|
||||
@@ -211,7 +209,7 @@ func Open(stor storage.Storage, o *opt.Options) (db *DB, err error) {
|
||||
// The returned DB instance is goroutine-safe.
|
||||
// The DB must be closed after use, by calling Close method.
|
||||
func OpenFile(path string, o *opt.Options) (db *DB, err error) {
|
||||
stor, err := storage.OpenFile(path, o.GetReadOnly())
|
||||
stor, err := storage.OpenFile(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -261,7 +259,7 @@ func Recover(stor storage.Storage, o *opt.Options) (db *DB, err error) {
|
||||
// The returned DB instance is goroutine-safe.
|
||||
// The DB must be closed after use, by calling Close method.
|
||||
func RecoverFile(path string, o *opt.Options) (db *DB, err error) {
|
||||
stor, err := storage.OpenFile(path, false)
|
||||
stor, err := storage.OpenFile(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -280,11 +278,12 @@ func recoverTable(s *session, o *opt.Options) error {
|
||||
o.Strict &= ^opt.StrictReader
|
||||
|
||||
// Get all tables and sort it by file number.
|
||||
fds, err := s.stor.List(storage.TypeTable)
|
||||
tableFiles_, err := s.getFiles(storage.TypeTable)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sortFds(fds)
|
||||
tableFiles := files(tableFiles_)
|
||||
tableFiles.sort()
|
||||
|
||||
var (
|
||||
maxSeq uint64
|
||||
@@ -297,17 +296,17 @@ func recoverTable(s *session, o *opt.Options) error {
|
||||
rec = &sessionRecord{}
|
||||
bpool = util.NewBufferPool(o.GetBlockSize() + 5)
|
||||
)
|
||||
buildTable := func(iter iterator.Iterator) (tmpFd storage.FileDesc, size int64, err error) {
|
||||
tmpFd = s.newTemp()
|
||||
writer, err := s.stor.Create(tmpFd)
|
||||
buildTable := func(iter iterator.Iterator) (tmp storage.File, size int64, err error) {
|
||||
tmp = s.newTemp()
|
||||
writer, err := tmp.Create()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
writer.Close()
|
||||
if err != nil {
|
||||
s.stor.Remove(tmpFd)
|
||||
tmpFd = storage.FileDesc{}
|
||||
tmp.Remove()
|
||||
tmp = nil
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -315,7 +314,7 @@ func recoverTable(s *session, o *opt.Options) error {
|
||||
tw := table.NewWriter(writer, o)
|
||||
for iter.Next() {
|
||||
key := iter.Key()
|
||||
if validInternalKey(key) {
|
||||
if validIkey(key) {
|
||||
err = tw.Append(key, iter.Value())
|
||||
if err != nil {
|
||||
return
|
||||
@@ -339,9 +338,9 @@ func recoverTable(s *session, o *opt.Options) error {
|
||||
size = int64(tw.BytesLen())
|
||||
return
|
||||
}
|
||||
recoverTable := func(fd storage.FileDesc) error {
|
||||
s.logf("table@recovery recovering @%d", fd.Num)
|
||||
reader, err := s.stor.Open(fd)
|
||||
recoverTable := func(file storage.File) error {
|
||||
s.logf("table@recovery recovering @%d", file.Num())
|
||||
reader, err := file.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -363,7 +362,7 @@ func recoverTable(s *session, o *opt.Options) error {
|
||||
tgoodKey, tcorruptedKey, tcorruptedBlock int
|
||||
imin, imax []byte
|
||||
)
|
||||
tr, err := table.NewReader(reader, size, fd, nil, bpool, o)
|
||||
tr, err := table.NewReader(reader, size, storage.NewFileInfo(file), nil, bpool, o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -371,7 +370,7 @@ func recoverTable(s *session, o *opt.Options) error {
|
||||
if itererr, ok := iter.(iterator.ErrorCallbackSetter); ok {
|
||||
itererr.SetErrorCallback(func(err error) {
|
||||
if errors.IsCorrupted(err) {
|
||||
s.logf("table@recovery block corruption @%d %q", fd.Num, err)
|
||||
s.logf("table@recovery block corruption @%d %q", file.Num(), err)
|
||||
tcorruptedBlock++
|
||||
}
|
||||
})
|
||||
@@ -380,7 +379,7 @@ func recoverTable(s *session, o *opt.Options) error {
|
||||
// Scan the table.
|
||||
for iter.Next() {
|
||||
key := iter.Key()
|
||||
_, seq, _, kerr := parseInternalKey(key)
|
||||
_, seq, _, kerr := parseIkey(key)
|
||||
if kerr != nil {
|
||||
tcorruptedKey++
|
||||
continue
|
||||
@@ -406,23 +405,23 @@ func recoverTable(s *session, o *opt.Options) error {
|
||||
|
||||
if strict && (tcorruptedKey > 0 || tcorruptedBlock > 0) {
|
||||
droppedTable++
|
||||
s.logf("table@recovery dropped @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", fd.Num, tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq)
|
||||
s.logf("table@recovery dropped @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", file.Num(), tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq)
|
||||
return nil
|
||||
}
|
||||
|
||||
if tgoodKey > 0 {
|
||||
if tcorruptedKey > 0 || tcorruptedBlock > 0 {
|
||||
// Rebuild the table.
|
||||
s.logf("table@recovery rebuilding @%d", fd.Num)
|
||||
s.logf("table@recovery rebuilding @%d", file.Num())
|
||||
iter := tr.NewIterator(nil, nil)
|
||||
tmpFd, newSize, err := buildTable(iter)
|
||||
tmp, newSize, err := buildTable(iter)
|
||||
iter.Release()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
closed = true
|
||||
reader.Close()
|
||||
if err := s.stor.Rename(tmpFd, fd); err != nil {
|
||||
if err := file.Replace(tmp); err != nil {
|
||||
return err
|
||||
}
|
||||
size = newSize
|
||||
@@ -432,30 +431,30 @@ func recoverTable(s *session, o *opt.Options) error {
|
||||
}
|
||||
recoveredKey += tgoodKey
|
||||
// Add table to level 0.
|
||||
rec.addTable(0, fd.Num, size, imin, imax)
|
||||
s.logf("table@recovery recovered @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", fd.Num, tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq)
|
||||
rec.addTable(0, file.Num(), uint64(size), imin, imax)
|
||||
s.logf("table@recovery recovered @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", file.Num(), tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq)
|
||||
} else {
|
||||
droppedTable++
|
||||
s.logf("table@recovery unrecoverable @%d Ck·%d Cb·%d S·%d", fd.Num, tcorruptedKey, tcorruptedBlock, size)
|
||||
s.logf("table@recovery unrecoverable @%d Ck·%d Cb·%d S·%d", file.Num(), tcorruptedKey, tcorruptedBlock, size)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Recover all tables.
|
||||
if len(fds) > 0 {
|
||||
s.logf("table@recovery F·%d", len(fds))
|
||||
if len(tableFiles) > 0 {
|
||||
s.logf("table@recovery F·%d", len(tableFiles))
|
||||
|
||||
// Mark file number as used.
|
||||
s.markFileNum(fds[len(fds)-1].Num)
|
||||
s.markFileNum(tableFiles[len(tableFiles)-1].Num())
|
||||
|
||||
for _, fd := range fds {
|
||||
if err := recoverTable(fd); err != nil {
|
||||
for _, file := range tableFiles {
|
||||
if err := recoverTable(file); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
s.logf("table@recovery recovered F·%d N·%d Gk·%d Ck·%d Q·%d", len(fds), recoveredKey, goodKey, corruptedKey, maxSeq)
|
||||
s.logf("table@recovery recovered F·%d N·%d Gk·%d Ck·%d Q·%d", len(tableFiles), recoveredKey, goodKey, corruptedKey, maxSeq)
|
||||
}
|
||||
|
||||
// Set sequence number.
|
||||
@@ -472,31 +471,31 @@ func recoverTable(s *session, o *opt.Options) error {
|
||||
|
||||
func (db *DB) recoverJournal() error {
|
||||
// Get all journals and sort it by file number.
|
||||
rawFds, err := db.s.stor.List(storage.TypeJournal)
|
||||
allJournalFiles, err := db.s.getFiles(storage.TypeJournal)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sortFds(rawFds)
|
||||
files(allJournalFiles).sort()
|
||||
|
||||
// Journals that will be recovered.
|
||||
var fds []storage.FileDesc
|
||||
for _, fd := range rawFds {
|
||||
if fd.Num >= db.s.stJournalNum || fd.Num == db.s.stPrevJournalNum {
|
||||
fds = append(fds, fd)
|
||||
var recJournalFiles []storage.File
|
||||
for _, jf := range allJournalFiles {
|
||||
if jf.Num() >= db.s.stJournalNum || jf.Num() == db.s.stPrevJournalNum {
|
||||
recJournalFiles = append(recJournalFiles, jf)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
ofd storage.FileDesc // Obsolete file.
|
||||
of storage.File // Obsolete file.
|
||||
rec = &sessionRecord{}
|
||||
)
|
||||
|
||||
// Recover journals.
|
||||
if len(fds) > 0 {
|
||||
db.logf("journal@recovery F·%d", len(fds))
|
||||
if len(recJournalFiles) > 0 {
|
||||
db.logf("journal@recovery F·%d", len(recJournalFiles))
|
||||
|
||||
// Mark file number as used.
|
||||
db.s.markFileNum(fds[len(fds)-1].Num)
|
||||
db.s.markFileNum(recJournalFiles[len(recJournalFiles)-1].Num())
|
||||
|
||||
var (
|
||||
// Options.
|
||||
@@ -510,31 +509,31 @@ func (db *DB) recoverJournal() error {
|
||||
batch = &Batch{}
|
||||
)
|
||||
|
||||
for _, fd := range fds {
|
||||
db.logf("journal@recovery recovering @%d", fd.Num)
|
||||
for _, jf := range recJournalFiles {
|
||||
db.logf("journal@recovery recovering @%d", jf.Num())
|
||||
|
||||
fr, err := db.s.stor.Open(fd)
|
||||
fr, err := jf.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create or reset journal reader instance.
|
||||
if jr == nil {
|
||||
jr = journal.NewReader(fr, dropper{db.s, fd}, strict, checksum)
|
||||
jr = journal.NewReader(fr, dropper{db.s, jf}, strict, checksum)
|
||||
} else {
|
||||
jr.Reset(fr, dropper{db.s, fd}, strict, checksum)
|
||||
jr.Reset(fr, dropper{db.s, jf}, strict, checksum)
|
||||
}
|
||||
|
||||
// Flush memdb and remove obsolete journal file.
|
||||
if !ofd.Nil() {
|
||||
if of != nil {
|
||||
if mdb.Len() > 0 {
|
||||
if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil {
|
||||
if _, err := db.s.flushMemdb(rec, mdb, -1); err != nil {
|
||||
fr.Close()
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
rec.setJournalNum(fd.Num)
|
||||
rec.setJournalNum(jf.Num())
|
||||
rec.setSeqNum(db.seq)
|
||||
if err := db.s.commit(rec); err != nil {
|
||||
fr.Close()
|
||||
@@ -542,8 +541,8 @@ func (db *DB) recoverJournal() error {
|
||||
}
|
||||
rec.resetAddedTables()
|
||||
|
||||
db.s.stor.Remove(ofd)
|
||||
ofd = storage.FileDesc{}
|
||||
of.Remove()
|
||||
of = nil
|
||||
}
|
||||
|
||||
// Replay journal to memdb.
|
||||
@@ -556,7 +555,7 @@ func (db *DB) recoverJournal() error {
|
||||
}
|
||||
|
||||
fr.Close()
|
||||
return errors.SetFd(err, fd)
|
||||
return errors.SetFile(err, jf)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
@@ -567,7 +566,7 @@ func (db *DB) recoverJournal() error {
|
||||
}
|
||||
|
||||
fr.Close()
|
||||
return errors.SetFd(err, fd)
|
||||
return errors.SetFile(err, jf)
|
||||
}
|
||||
if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mdb); err != nil {
|
||||
if !strict && errors.IsCorrupted(err) {
|
||||
@@ -577,7 +576,7 @@ func (db *DB) recoverJournal() error {
|
||||
}
|
||||
|
||||
fr.Close()
|
||||
return errors.SetFd(err, fd)
|
||||
return errors.SetFile(err, jf)
|
||||
}
|
||||
|
||||
// Save sequence number.
|
||||
@@ -595,7 +594,7 @@ func (db *DB) recoverJournal() error {
|
||||
}
|
||||
|
||||
fr.Close()
|
||||
ofd = fd
|
||||
of = jf
|
||||
}
|
||||
|
||||
// Flush the last memdb.
|
||||
@@ -612,7 +611,7 @@ func (db *DB) recoverJournal() error {
|
||||
}
|
||||
|
||||
// Commit.
|
||||
rec.setJournalNum(db.journalFd.Num)
|
||||
rec.setJournalNum(db.journalFile.Num())
|
||||
rec.setSeqNum(db.seq)
|
||||
if err := db.s.commit(rec); err != nil {
|
||||
// Close journal on error.
|
||||
@@ -624,8 +623,8 @@ func (db *DB) recoverJournal() error {
|
||||
}
|
||||
|
||||
// Remove the last obsolete journal file.
|
||||
if !ofd.Nil() {
|
||||
db.s.stor.Remove(ofd)
|
||||
if of != nil {
|
||||
of.Remove()
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -633,17 +632,17 @@ func (db *DB) recoverJournal() error {
|
||||
|
||||
func (db *DB) recoverJournalRO() error {
|
||||
// Get all journals and sort it by file number.
|
||||
rawFds, err := db.s.stor.List(storage.TypeJournal)
|
||||
allJournalFiles, err := db.s.getFiles(storage.TypeJournal)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sortFds(rawFds)
|
||||
files(allJournalFiles).sort()
|
||||
|
||||
// Journals that will be recovered.
|
||||
var fds []storage.FileDesc
|
||||
for _, fd := range rawFds {
|
||||
if fd.Num >= db.s.stJournalNum || fd.Num == db.s.stPrevJournalNum {
|
||||
fds = append(fds, fd)
|
||||
var recJournalFiles []storage.File
|
||||
for _, jf := range allJournalFiles {
|
||||
if jf.Num() >= db.s.stJournalNum || jf.Num() == db.s.stPrevJournalNum {
|
||||
recJournalFiles = append(recJournalFiles, jf)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -657,8 +656,8 @@ func (db *DB) recoverJournalRO() error {
|
||||
)
|
||||
|
||||
// Recover journals.
|
||||
if len(fds) > 0 {
|
||||
db.logf("journal@recovery RO·Mode F·%d", len(fds))
|
||||
if len(recJournalFiles) > 0 {
|
||||
db.logf("journal@recovery RO·Mode F·%d", len(recJournalFiles))
|
||||
|
||||
var (
|
||||
jr *journal.Reader
|
||||
@@ -666,19 +665,19 @@ func (db *DB) recoverJournalRO() error {
|
||||
batch = &Batch{}
|
||||
)
|
||||
|
||||
for _, fd := range fds {
|
||||
db.logf("journal@recovery recovering @%d", fd.Num)
|
||||
for _, jf := range recJournalFiles {
|
||||
db.logf("journal@recovery recovering @%d", jf.Num())
|
||||
|
||||
fr, err := db.s.stor.Open(fd)
|
||||
fr, err := jf.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create or reset journal reader instance.
|
||||
if jr == nil {
|
||||
jr = journal.NewReader(fr, dropper{db.s, fd}, strict, checksum)
|
||||
jr = journal.NewReader(fr, dropper{db.s, jf}, strict, checksum)
|
||||
} else {
|
||||
jr.Reset(fr, dropper{db.s, fd}, strict, checksum)
|
||||
jr.Reset(fr, dropper{db.s, jf}, strict, checksum)
|
||||
}
|
||||
|
||||
// Replay journal to memdb.
|
||||
@@ -690,7 +689,7 @@ func (db *DB) recoverJournalRO() error {
|
||||
}
|
||||
|
||||
fr.Close()
|
||||
return errors.SetFd(err, fd)
|
||||
return errors.SetFile(err, jf)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
@@ -701,7 +700,7 @@ func (db *DB) recoverJournalRO() error {
|
||||
}
|
||||
|
||||
fr.Close()
|
||||
return errors.SetFd(err, fd)
|
||||
return errors.SetFile(err, jf)
|
||||
}
|
||||
if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mdb); err != nil {
|
||||
if !strict && errors.IsCorrupted(err) {
|
||||
@@ -711,7 +710,7 @@ func (db *DB) recoverJournalRO() error {
|
||||
}
|
||||
|
||||
fr.Close()
|
||||
return errors.SetFd(err, fd)
|
||||
return errors.SetFile(err, jf)
|
||||
}
|
||||
|
||||
// Save sequence number.
|
||||
@@ -728,35 +727,46 @@ func (db *DB) recoverJournalRO() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func memGet(mdb *memdb.DB, ikey internalKey, icmp *iComparer) (ok bool, mv []byte, err error) {
|
||||
mk, mv, err := mdb.Find(ikey)
|
||||
if err == nil {
|
||||
ukey, _, kt, kerr := parseInternalKey(mk)
|
||||
if kerr != nil {
|
||||
// Shouldn't have had happen.
|
||||
panic(kerr)
|
||||
func (db *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) {
|
||||
ikey := newIkey(key, seq, ktSeek)
|
||||
|
||||
em, fm := db.getMems()
|
||||
for _, m := range [...]*memDB{em, fm} {
|
||||
if m == nil {
|
||||
continue
|
||||
}
|
||||
if icmp.uCompare(ukey, ikey.ukey()) == 0 {
|
||||
if kt == keyTypeDel {
|
||||
return true, nil, ErrNotFound
|
||||
defer m.decref()
|
||||
|
||||
mk, mv, me := m.Find(ikey)
|
||||
if me == nil {
|
||||
ukey, _, kt, kerr := parseIkey(mk)
|
||||
if kerr != nil {
|
||||
// Shouldn't have had happen.
|
||||
panic(kerr)
|
||||
}
|
||||
return true, mv, nil
|
||||
|
||||
if db.s.icmp.uCompare(ukey, key) == 0 {
|
||||
if kt == ktDel {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
return append([]byte{}, mv...), nil
|
||||
}
|
||||
} else if me != ErrNotFound {
|
||||
return nil, me
|
||||
}
|
||||
} else if err != ErrNotFound {
|
||||
return true, nil, err
|
||||
}
|
||||
|
||||
v := db.s.version()
|
||||
value, cSched, err := v.get(ikey, ro, false)
|
||||
v.release()
|
||||
if cSched {
|
||||
// Trigger table compaction.
|
||||
db.compSendTrigger(db.tcompCmdC)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (db *DB) get(auxm *memdb.DB, auxt tFiles, key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) {
|
||||
ikey := makeInternalKey(nil, key, seq, keyTypeSeek)
|
||||
|
||||
if auxm != nil {
|
||||
if ok, mv, me := memGet(auxm, ikey, db.s.icmp); ok {
|
||||
return append([]byte{}, mv...), me
|
||||
}
|
||||
}
|
||||
func (db *DB) has(key []byte, seq uint64, ro *opt.ReadOptions) (ret bool, err error) {
|
||||
ikey := newIkey(key, seq, ktSeek)
|
||||
|
||||
em, fm := db.getMems()
|
||||
for _, m := range [...]*memDB{em, fm} {
|
||||
@@ -765,55 +775,30 @@ func (db *DB) get(auxm *memdb.DB, auxt tFiles, key []byte, seq uint64, ro *opt.R
|
||||
}
|
||||
defer m.decref()
|
||||
|
||||
if ok, mv, me := memGet(m.DB, ikey, db.s.icmp); ok {
|
||||
return append([]byte{}, mv...), me
|
||||
mk, _, me := m.Find(ikey)
|
||||
if me == nil {
|
||||
ukey, _, kt, kerr := parseIkey(mk)
|
||||
if kerr != nil {
|
||||
// Shouldn't have had happen.
|
||||
panic(kerr)
|
||||
}
|
||||
if db.s.icmp.uCompare(ukey, key) == 0 {
|
||||
if kt == ktDel {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
} else if me != ErrNotFound {
|
||||
return false, me
|
||||
}
|
||||
}
|
||||
|
||||
v := db.s.version()
|
||||
value, cSched, err := v.get(auxt, ikey, ro, false)
|
||||
_, cSched, err := v.get(ikey, ro, true)
|
||||
v.release()
|
||||
if cSched {
|
||||
// Trigger table compaction.
|
||||
db.compTrigger(db.tcompCmdC)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func nilIfNotFound(err error) error {
|
||||
if err == ErrNotFound {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (db *DB) has(auxm *memdb.DB, auxt tFiles, key []byte, seq uint64, ro *opt.ReadOptions) (ret bool, err error) {
|
||||
ikey := makeInternalKey(nil, key, seq, keyTypeSeek)
|
||||
|
||||
if auxm != nil {
|
||||
if ok, _, me := memGet(auxm, ikey, db.s.icmp); ok {
|
||||
return me == nil, nilIfNotFound(me)
|
||||
}
|
||||
}
|
||||
|
||||
em, fm := db.getMems()
|
||||
for _, m := range [...]*memDB{em, fm} {
|
||||
if m == nil {
|
||||
continue
|
||||
}
|
||||
defer m.decref()
|
||||
|
||||
if ok, _, me := memGet(m.DB, ikey, db.s.icmp); ok {
|
||||
return me == nil, nilIfNotFound(me)
|
||||
}
|
||||
}
|
||||
|
||||
v := db.s.version()
|
||||
_, cSched, err := v.get(auxt, ikey, ro, true)
|
||||
v.release()
|
||||
if cSched {
|
||||
// Trigger table compaction.
|
||||
db.compTrigger(db.tcompCmdC)
|
||||
db.compSendTrigger(db.tcompCmdC)
|
||||
}
|
||||
if err == nil {
|
||||
ret = true
|
||||
@@ -837,7 +822,7 @@ func (db *DB) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
|
||||
|
||||
se := db.acquireSnapshot()
|
||||
defer db.releaseSnapshot(se)
|
||||
return db.get(nil, nil, key, se.seq, ro)
|
||||
return db.get(key, se.seq, ro)
|
||||
}
|
||||
|
||||
// Has returns true if the DB does contains the given key.
|
||||
@@ -851,11 +836,11 @@ func (db *DB) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) {
|
||||
|
||||
se := db.acquireSnapshot()
|
||||
defer db.releaseSnapshot(se)
|
||||
return db.has(nil, nil, key, se.seq, ro)
|
||||
return db.has(key, se.seq, ro)
|
||||
}
|
||||
|
||||
// NewIterator returns an iterator for the latest snapshot of the
|
||||
// underlying DB.
|
||||
// uderlying DB.
|
||||
// The returned iterator is not goroutine-safe, but it is safe to use
|
||||
// multiple iterators concurrently, with each in a dedicated goroutine.
|
||||
// It is also safe to use an iterator concurrently with modifying its
|
||||
@@ -879,7 +864,7 @@ func (db *DB) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Itera
|
||||
defer db.releaseSnapshot(se)
|
||||
// Iterator holds 'version' lock, 'version' is immutable so snapshot
|
||||
// can be released after iterator created.
|
||||
return db.newIterator(nil, nil, se.seq, slice, ro)
|
||||
return db.newIterator(se.seq, slice, ro)
|
||||
}
|
||||
|
||||
// GetSnapshot returns a latest snapshot of the underlying DB. A snapshot
|
||||
@@ -935,7 +920,7 @@ func (db *DB) GetProperty(name string) (value string, err error) {
|
||||
var level uint
|
||||
var rest string
|
||||
n, _ := fmt.Sscanf(p[len(numFilesPrefix):], "%d%s", &level, &rest)
|
||||
if n != 1 {
|
||||
if n != 1 || int(level) >= db.s.o.GetNumLevel() {
|
||||
err = ErrNotFound
|
||||
} else {
|
||||
value = fmt.Sprint(v.tLen(int(level)))
|
||||
@@ -944,8 +929,8 @@ func (db *DB) GetProperty(name string) (value string, err error) {
|
||||
value = "Compactions\n" +
|
||||
" Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB)\n" +
|
||||
"-------+------------+---------------+---------------+---------------+---------------\n"
|
||||
for level, tables := range v.levels {
|
||||
duration, read, write := db.compStats.getStat(level)
|
||||
for level, tables := range v.tables {
|
||||
duration, read, write := db.compStats[level].get()
|
||||
if len(tables) == 0 && duration == 0 {
|
||||
continue
|
||||
}
|
||||
@@ -954,10 +939,10 @@ func (db *DB) GetProperty(name string) (value string, err error) {
|
||||
float64(read)/1048576.0, float64(write)/1048576.0)
|
||||
}
|
||||
case p == "sstables":
|
||||
for level, tables := range v.levels {
|
||||
for level, tables := range v.tables {
|
||||
value += fmt.Sprintf("--- level %d ---\n", level)
|
||||
for _, t := range tables {
|
||||
value += fmt.Sprintf("%d:%d[%q .. %q]\n", t.fd.Num, t.size, t.imin, t.imax)
|
||||
value += fmt.Sprintf("%d:%d[%q .. %q]\n", t.file.Num(), t.size, t.imin, t.imax)
|
||||
}
|
||||
}
|
||||
case p == "blockpool":
|
||||
@@ -997,8 +982,8 @@ func (db *DB) SizeOf(ranges []util.Range) (Sizes, error) {
|
||||
|
||||
sizes := make(Sizes, 0, len(ranges))
|
||||
for _, r := range ranges {
|
||||
imin := makeInternalKey(nil, r.Start, keyMaxSeq, keyTypeSeek)
|
||||
imax := makeInternalKey(nil, r.Limit, keyMaxSeq, keyTypeSeek)
|
||||
imin := newIkey(r.Start, kMaxSeq, ktSeek)
|
||||
imax := newIkey(r.Limit, kMaxSeq, ktSeek)
|
||||
start, err := v.offsetOf(imin)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1007,7 +992,7 @@ func (db *DB) SizeOf(ranges []util.Range) (Sizes, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var size int64
|
||||
var size uint64
|
||||
if limit >= start {
|
||||
size = limit - start
|
||||
}
|
||||
@@ -1017,8 +1002,8 @@ func (db *DB) SizeOf(ranges []util.Range) (Sizes, error) {
|
||||
return sizes, nil
|
||||
}
|
||||
|
||||
// Close closes the DB. This will also releases any outstanding snapshot,
|
||||
// abort any in-flight compaction and discard open transaction.
|
||||
// Close closes the DB. This will also releases any outstanding snapshot and
|
||||
// abort any in-flight compaction.
|
||||
//
|
||||
// It is not safe to close a DB until all outstanding iterators are released.
|
||||
// It is valid to call Close multiple times. Other methods should not be
|
||||
@@ -1047,18 +1032,11 @@ func (db *DB) Close() error {
|
||||
// Signal all goroutines.
|
||||
close(db.closeC)
|
||||
|
||||
// Discard open transaction.
|
||||
if db.tr != nil {
|
||||
db.tr.Discard()
|
||||
}
|
||||
|
||||
// Acquire writer lock.
|
||||
db.writeLockC <- struct{}{}
|
||||
|
||||
// Wait for all gorotines to exit.
|
||||
db.closeW.Wait()
|
||||
|
||||
// Closes journal.
|
||||
// Lock writer and closes journal.
|
||||
db.writeLockC <- struct{}{}
|
||||
if db.journal != nil {
|
||||
db.journal.Close()
|
||||
db.journalWriter.Close()
|
||||
@@ -1085,6 +1063,8 @@ func (db *DB) Close() error {
|
||||
db.frozenMem = nil
|
||||
db.journal = nil
|
||||
db.journalWriter = nil
|
||||
db.journalFile = nil
|
||||
db.frozenJournalFile = nil
|
||||
db.closer = nil
|
||||
|
||||
return err
|
||||
@@ -12,76 +12,55 @@ import (
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||
)
|
||||
|
||||
var (
|
||||
errCompactionTransactExiting = errors.New("leveldb: compaction transact exiting")
|
||||
)
|
||||
|
||||
type cStat struct {
|
||||
type cStats struct {
|
||||
sync.Mutex
|
||||
duration time.Duration
|
||||
read int64
|
||||
write int64
|
||||
read uint64
|
||||
write uint64
|
||||
}
|
||||
|
||||
func (p *cStat) add(n *cStatStaging) {
|
||||
func (p *cStats) add(n *cStatsStaging) {
|
||||
p.Lock()
|
||||
p.duration += n.duration
|
||||
p.read += n.read
|
||||
p.write += n.write
|
||||
p.Unlock()
|
||||
}
|
||||
|
||||
func (p *cStat) get() (duration time.Duration, read, write int64) {
|
||||
func (p *cStats) get() (duration time.Duration, read, write uint64) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
return p.duration, p.read, p.write
|
||||
}
|
||||
|
||||
type cStatStaging struct {
|
||||
type cStatsStaging struct {
|
||||
start time.Time
|
||||
duration time.Duration
|
||||
on bool
|
||||
read int64
|
||||
write int64
|
||||
read uint64
|
||||
write uint64
|
||||
}
|
||||
|
||||
func (p *cStatStaging) startTimer() {
|
||||
func (p *cStatsStaging) startTimer() {
|
||||
if !p.on {
|
||||
p.start = time.Now()
|
||||
p.on = true
|
||||
}
|
||||
}
|
||||
|
||||
func (p *cStatStaging) stopTimer() {
|
||||
func (p *cStatsStaging) stopTimer() {
|
||||
if p.on {
|
||||
p.duration += time.Since(p.start)
|
||||
p.on = false
|
||||
}
|
||||
}
|
||||
|
||||
type cStats struct {
|
||||
lk sync.Mutex
|
||||
stats []cStat
|
||||
}
|
||||
|
||||
func (p *cStats) addStat(level int, n *cStatStaging) {
|
||||
p.lk.Lock()
|
||||
if level >= len(p.stats) {
|
||||
newStats := make([]cStat, level+1)
|
||||
copy(newStats, p.stats)
|
||||
p.stats = newStats
|
||||
}
|
||||
p.stats[level].add(n)
|
||||
p.lk.Unlock()
|
||||
}
|
||||
|
||||
func (p *cStats) getStat(level int) (duration time.Duration, read, write int64) {
|
||||
p.lk.Lock()
|
||||
defer p.lk.Unlock()
|
||||
if level < len(p.stats) {
|
||||
return p.stats[level].get()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (db *DB) compactionError() {
|
||||
var err error
|
||||
noerr:
|
||||
@@ -256,14 +235,6 @@ func (db *DB) compactionExitTransact() {
|
||||
panic(errCompactionTransactExiting)
|
||||
}
|
||||
|
||||
func (db *DB) compactionCommit(name string, rec *sessionRecord) {
|
||||
db.compCommitLk.Lock()
|
||||
defer db.compCommitLk.Unlock() // Defer is necessary.
|
||||
db.compactionTransactFunc(name+"@commit", func(cnt *compactionTransactCounter) error {
|
||||
return db.s.commit(rec)
|
||||
}, nil)
|
||||
}
|
||||
|
||||
func (db *DB) memCompaction() {
|
||||
mdb := db.getFrozenMem()
|
||||
if mdb == nil {
|
||||
@@ -294,40 +265,41 @@ func (db *DB) memCompaction() {
|
||||
|
||||
var (
|
||||
rec = &sessionRecord{}
|
||||
stats = &cStatStaging{}
|
||||
stats = &cStatsStaging{}
|
||||
flushLevel int
|
||||
)
|
||||
|
||||
// Generate tables.
|
||||
db.compactionTransactFunc("memdb@flush", func(cnt *compactionTransactCounter) (err error) {
|
||||
stats.startTimer()
|
||||
flushLevel, err = db.s.flushMemdb(rec, mdb.DB, db.memdbMaxLevel)
|
||||
flushLevel, err = db.s.flushMemdb(rec, mdb.DB, -1)
|
||||
stats.stopTimer()
|
||||
return
|
||||
}, func() error {
|
||||
for _, r := range rec.addedTables {
|
||||
db.logf("memdb@flush revert @%d", r.num)
|
||||
if err := db.s.stor.Remove(storage.FileDesc{Type: storage.TypeTable, Num: r.num}); err != nil {
|
||||
f := db.s.getTableFile(r.num)
|
||||
if err := f.Remove(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
rec.setJournalNum(db.journalFd.Num)
|
||||
rec.setSeqNum(db.frozenSeq)
|
||||
|
||||
// Commit.
|
||||
stats.startTimer()
|
||||
db.compactionCommit("memdb", rec)
|
||||
stats.stopTimer()
|
||||
db.compactionTransactFunc("memdb@commit", func(cnt *compactionTransactCounter) (err error) {
|
||||
stats.startTimer()
|
||||
rec.setJournalNum(db.journalFile.Num())
|
||||
rec.setSeqNum(db.frozenSeq)
|
||||
err = db.s.commit(rec)
|
||||
stats.stopTimer()
|
||||
return
|
||||
}, nil)
|
||||
|
||||
db.logf("memdb@flush committed F·%d T·%v", len(rec.addedTables), stats.duration)
|
||||
|
||||
for _, r := range rec.addedTables {
|
||||
stats.write += r.size
|
||||
}
|
||||
db.compStats.addStat(flushLevel, stats)
|
||||
db.compStats[flushLevel].add(stats)
|
||||
|
||||
// Drop frozen memdb.
|
||||
db.dropFrozenMem()
|
||||
@@ -343,7 +315,7 @@ func (db *DB) memCompaction() {
|
||||
}
|
||||
|
||||
// Trigger table compaction.
|
||||
db.compTrigger(db.tcompCmdC)
|
||||
db.compSendTrigger(db.tcompCmdC)
|
||||
}
|
||||
|
||||
type tableCompactionBuilder struct {
|
||||
@@ -351,7 +323,7 @@ type tableCompactionBuilder struct {
|
||||
s *session
|
||||
c *compaction
|
||||
rec *sessionRecord
|
||||
stat0, stat1 *cStatStaging
|
||||
stat0, stat1 *cStatsStaging
|
||||
|
||||
snapHasLastUkey bool
|
||||
snapLastUkey []byte
|
||||
@@ -405,9 +377,9 @@ func (b *tableCompactionBuilder) flush() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.rec.addTableFile(b.c.sourceLevel+1, t)
|
||||
b.rec.addTableFile(b.c.level+1, t)
|
||||
b.stat1.write += t.size
|
||||
b.s.logf("table@build created L%d@%d N·%d S·%s %q:%q", b.c.sourceLevel+1, t.fd.Num, b.tw.tw.EntriesLen(), shortenb(int(t.size)), t.imin, t.imax)
|
||||
b.s.logf("table@build created L%d@%d N·%d S·%s %q:%q", b.c.level+1, t.file.Num(), b.tw.tw.EntriesLen(), shortenb(int(t.size)), t.imin, t.imax)
|
||||
b.tw = nil
|
||||
return nil
|
||||
}
|
||||
@@ -452,7 +424,7 @@ func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error {
|
||||
}
|
||||
|
||||
ikey := iter.Key()
|
||||
ukey, seq, kt, kerr := parseInternalKey(ikey)
|
||||
ukey, seq, kt, kerr := parseIkey(ikey)
|
||||
|
||||
if kerr == nil {
|
||||
shouldStop := !resumed && b.c.shouldStopBefore(ikey)
|
||||
@@ -478,14 +450,14 @@ func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error {
|
||||
|
||||
hasLastUkey = true
|
||||
lastUkey = append(lastUkey[:0], ukey...)
|
||||
lastSeq = keyMaxSeq
|
||||
lastSeq = kMaxSeq
|
||||
}
|
||||
|
||||
switch {
|
||||
case lastSeq <= b.minSeq:
|
||||
// Dropped because newer entry for same user key exist
|
||||
fallthrough // (A)
|
||||
case kt == keyTypeDel && seq <= b.minSeq && b.c.baseLevelForKey(lastUkey):
|
||||
case kt == ktDel && seq <= b.minSeq && b.c.baseLevelForKey(lastUkey):
|
||||
// For this user key:
|
||||
// (1) there is no data in higher levels
|
||||
// (2) data in lower levels will have larger seq numbers
|
||||
@@ -507,7 +479,7 @@ func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error {
|
||||
// Don't drop corrupted keys.
|
||||
hasLastUkey = false
|
||||
lastUkey = lastUkey[:0]
|
||||
lastSeq = keyMaxSeq
|
||||
lastSeq = kMaxSeq
|
||||
b.kerrCnt++
|
||||
}
|
||||
|
||||
@@ -530,7 +502,8 @@ func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error {
|
||||
func (b *tableCompactionBuilder) revert() error {
|
||||
for _, at := range b.rec.addedTables {
|
||||
b.s.logf("table@build revert @%d", at.num)
|
||||
if err := b.s.stor.Remove(storage.FileDesc{Type: storage.TypeTable, Num: at.num}); err != nil {
|
||||
f := b.s.getTableFile(at.num)
|
||||
if err := f.Remove(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -541,28 +514,30 @@ func (db *DB) tableCompaction(c *compaction, noTrivial bool) {
|
||||
defer c.release()
|
||||
|
||||
rec := &sessionRecord{}
|
||||
rec.addCompPtr(c.sourceLevel, c.imax)
|
||||
rec.addCompPtr(c.level, c.imax)
|
||||
|
||||
if !noTrivial && c.trivial() {
|
||||
t := c.levels[0][0]
|
||||
db.logf("table@move L%d@%d -> L%d", c.sourceLevel, t.fd.Num, c.sourceLevel+1)
|
||||
rec.delTable(c.sourceLevel, t.fd.Num)
|
||||
rec.addTableFile(c.sourceLevel+1, t)
|
||||
db.compactionCommit("table-move", rec)
|
||||
t := c.tables[0][0]
|
||||
db.logf("table@move L%d@%d -> L%d", c.level, t.file.Num(), c.level+1)
|
||||
rec.delTable(c.level, t.file.Num())
|
||||
rec.addTableFile(c.level+1, t)
|
||||
db.compactionTransactFunc("table@move", func(cnt *compactionTransactCounter) (err error) {
|
||||
return db.s.commit(rec)
|
||||
}, nil)
|
||||
return
|
||||
}
|
||||
|
||||
var stats [2]cStatStaging
|
||||
for i, tables := range c.levels {
|
||||
var stats [2]cStatsStaging
|
||||
for i, tables := range c.tables {
|
||||
for _, t := range tables {
|
||||
stats[i].read += t.size
|
||||
// Insert deleted tables into record
|
||||
rec.delTable(c.sourceLevel+i, t.fd.Num)
|
||||
rec.delTable(c.level+i, t.file.Num())
|
||||
}
|
||||
}
|
||||
sourceSize := int(stats[0].read + stats[1].read)
|
||||
minSeq := db.minSeq()
|
||||
db.logf("table@compaction L%d·%d -> L%d·%d S·%s Q·%d", c.sourceLevel, len(c.levels[0]), c.sourceLevel+1, len(c.levels[1]), shortenb(sourceSize), minSeq)
|
||||
db.logf("table@compaction L%d·%d -> L%d·%d S·%s Q·%d", c.level, len(c.tables[0]), c.level+1, len(c.tables[1]), shortenb(sourceSize), minSeq)
|
||||
|
||||
b := &tableCompactionBuilder{
|
||||
db: db,
|
||||
@@ -572,60 +547,49 @@ func (db *DB) tableCompaction(c *compaction, noTrivial bool) {
|
||||
stat1: &stats[1],
|
||||
minSeq: minSeq,
|
||||
strict: db.s.o.GetStrict(opt.StrictCompaction),
|
||||
tableSize: db.s.o.GetCompactionTableSize(c.sourceLevel + 1),
|
||||
tableSize: db.s.o.GetCompactionTableSize(c.level + 1),
|
||||
}
|
||||
db.compactionTransact("table@build", b)
|
||||
|
||||
// Commit.
|
||||
stats[1].startTimer()
|
||||
db.compactionCommit("table", rec)
|
||||
stats[1].stopTimer()
|
||||
// Commit changes
|
||||
db.compactionTransactFunc("table@commit", func(cnt *compactionTransactCounter) (err error) {
|
||||
stats[1].startTimer()
|
||||
defer stats[1].stopTimer()
|
||||
return db.s.commit(rec)
|
||||
}, nil)
|
||||
|
||||
resultSize := int(stats[1].write)
|
||||
db.logf("table@compaction committed F%s S%s Ke·%d D·%d T·%v", sint(len(rec.addedTables)-len(rec.deletedTables)), sshortenb(resultSize-sourceSize), b.kerrCnt, b.dropCnt, stats[1].duration)
|
||||
|
||||
// Save compaction stats
|
||||
for i := range stats {
|
||||
db.compStats.addStat(c.sourceLevel+1, &stats[i])
|
||||
db.compStats[c.level+1].add(&stats[i])
|
||||
}
|
||||
}
|
||||
|
||||
func (db *DB) tableRangeCompaction(level int, umin, umax []byte) error {
|
||||
func (db *DB) tableRangeCompaction(level int, umin, umax []byte) {
|
||||
db.logf("table@compaction range L%d %q:%q", level, umin, umax)
|
||||
|
||||
if level >= 0 {
|
||||
if c := db.s.getCompactionRange(level, umin, umax, true); c != nil {
|
||||
if c := db.s.getCompactionRange(level, umin, umax); c != nil {
|
||||
db.tableCompaction(c, true)
|
||||
}
|
||||
} else {
|
||||
// Retry until nothing to compact.
|
||||
for {
|
||||
compacted := false
|
||||
|
||||
// Scan for maximum level with overlapped tables.
|
||||
v := db.s.version()
|
||||
m := 1
|
||||
for i := m; i < len(v.levels); i++ {
|
||||
tables := v.levels[i]
|
||||
if tables.overlaps(db.s.icmp, umin, umax, false) {
|
||||
m = i
|
||||
}
|
||||
v := db.s.version()
|
||||
m := 1
|
||||
for i, t := range v.tables[1:] {
|
||||
if t.overlaps(db.s.icmp, umin, umax, false) {
|
||||
m = i + 1
|
||||
}
|
||||
v.release()
|
||||
}
|
||||
v.release()
|
||||
|
||||
for level := 0; level < m; level++ {
|
||||
if c := db.s.getCompactionRange(level, umin, umax, false); c != nil {
|
||||
db.tableCompaction(c, true)
|
||||
compacted = true
|
||||
}
|
||||
}
|
||||
|
||||
if !compacted {
|
||||
break
|
||||
for level := 0; level < m; level++ {
|
||||
if c := db.s.getCompactionRange(level, umin, umax); c != nil {
|
||||
db.tableCompaction(c, true)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *DB) tableAutoCompaction() {
|
||||
@@ -652,11 +616,11 @@ type cCmd interface {
|
||||
ack(err error)
|
||||
}
|
||||
|
||||
type cAuto struct {
|
||||
type cIdle struct {
|
||||
ackC chan<- error
|
||||
}
|
||||
|
||||
func (r cAuto) ack(err error) {
|
||||
func (r cIdle) ack(err error) {
|
||||
if r.ackC != nil {
|
||||
defer func() {
|
||||
recover()
|
||||
@@ -680,21 +644,13 @@ func (r cRange) ack(err error) {
|
||||
}
|
||||
}
|
||||
|
||||
// This will trigger auto compaction but will not wait for it.
|
||||
func (db *DB) compTrigger(compC chan<- cCmd) {
|
||||
select {
|
||||
case compC <- cAuto{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// This will trigger auto compation and/or wait for all compaction to be done.
|
||||
func (db *DB) compTriggerWait(compC chan<- cCmd) (err error) {
|
||||
func (db *DB) compSendIdle(compC chan<- cCmd) (err error) {
|
||||
ch := make(chan error)
|
||||
defer close(ch)
|
||||
// Send cmd.
|
||||
select {
|
||||
case compC <- cAuto{ch}:
|
||||
case compC <- cIdle{ch}:
|
||||
case err = <-db.compErrC:
|
||||
return
|
||||
case _, _ = <-db.closeC:
|
||||
@@ -710,8 +666,16 @@ func (db *DB) compTriggerWait(compC chan<- cCmd) (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
// This will trigger auto compaction but will not wait for it.
|
||||
func (db *DB) compSendTrigger(compC chan<- cCmd) {
|
||||
select {
|
||||
case compC <- cIdle{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// Send range compaction request.
|
||||
func (db *DB) compTriggerRange(compC chan<- cCmd, level int, min, max []byte) (err error) {
|
||||
func (db *DB) compSendRange(compC chan<- cCmd, level int, min, max []byte) (err error) {
|
||||
ch := make(chan error)
|
||||
defer close(ch)
|
||||
// Send cmd.
|
||||
@@ -751,7 +715,7 @@ func (db *DB) mCompaction() {
|
||||
select {
|
||||
case x = <-db.mcompCmdC:
|
||||
switch x.(type) {
|
||||
case cAuto:
|
||||
case cIdle:
|
||||
db.memCompaction()
|
||||
x.ack(nil)
|
||||
x = nil
|
||||
@@ -812,10 +776,11 @@ func (db *DB) tCompaction() {
|
||||
}
|
||||
if x != nil {
|
||||
switch cmd := x.(type) {
|
||||
case cAuto:
|
||||
case cIdle:
|
||||
ackQ = append(ackQ, x)
|
||||
case cRange:
|
||||
x.ack(db.tableRangeCompaction(cmd.level, cmd.min, cmd.max))
|
||||
db.tableRangeCompaction(cmd.level, cmd.min, cmd.max)
|
||||
x.ack(nil)
|
||||
default:
|
||||
panic("leveldb: unknown command")
|
||||
}
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
errInvalidInternalKey = errors.New("leveldb: Iterator: invalid internal key")
|
||||
errInvalidIkey = errors.New("leveldb: Iterator: invalid internal key")
|
||||
)
|
||||
|
||||
type memdbReleaser struct {
|
||||
@@ -33,50 +33,40 @@ func (mr *memdbReleaser) Release() {
|
||||
})
|
||||
}
|
||||
|
||||
func (db *DB) newRawIterator(auxm *memDB, auxt tFiles, slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
|
||||
strict := opt.GetStrict(db.s.o.Options, ro, opt.StrictReader)
|
||||
func (db *DB) newRawIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
|
||||
em, fm := db.getMems()
|
||||
v := db.s.version()
|
||||
|
||||
tableIts := v.getIterators(slice, ro)
|
||||
n := len(tableIts) + len(auxt) + 3
|
||||
its := make([]iterator.Iterator, 0, n)
|
||||
|
||||
if auxm != nil {
|
||||
ami := auxm.NewIterator(slice)
|
||||
ami.SetReleaser(&memdbReleaser{m: auxm})
|
||||
its = append(its, ami)
|
||||
}
|
||||
for _, t := range auxt {
|
||||
its = append(its, v.s.tops.newIterator(t, slice, ro))
|
||||
}
|
||||
|
||||
ti := v.getIterators(slice, ro)
|
||||
n := len(ti) + 2
|
||||
i := make([]iterator.Iterator, 0, n)
|
||||
emi := em.NewIterator(slice)
|
||||
emi.SetReleaser(&memdbReleaser{m: em})
|
||||
its = append(its, emi)
|
||||
i = append(i, emi)
|
||||
if fm != nil {
|
||||
fmi := fm.NewIterator(slice)
|
||||
fmi.SetReleaser(&memdbReleaser{m: fm})
|
||||
its = append(its, fmi)
|
||||
i = append(i, fmi)
|
||||
}
|
||||
its = append(its, tableIts...)
|
||||
mi := iterator.NewMergedIterator(its, db.s.icmp, strict)
|
||||
i = append(i, ti...)
|
||||
strict := opt.GetStrict(db.s.o.Options, ro, opt.StrictReader)
|
||||
mi := iterator.NewMergedIterator(i, db.s.icmp, strict)
|
||||
mi.SetReleaser(&versionReleaser{v: v})
|
||||
return mi
|
||||
}
|
||||
|
||||
func (db *DB) newIterator(auxm *memDB, auxt tFiles, seq uint64, slice *util.Range, ro *opt.ReadOptions) *dbIter {
|
||||
func (db *DB) newIterator(seq uint64, slice *util.Range, ro *opt.ReadOptions) *dbIter {
|
||||
var islice *util.Range
|
||||
if slice != nil {
|
||||
islice = &util.Range{}
|
||||
if slice.Start != nil {
|
||||
islice.Start = makeInternalKey(nil, slice.Start, keyMaxSeq, keyTypeSeek)
|
||||
islice.Start = newIkey(slice.Start, kMaxSeq, ktSeek)
|
||||
}
|
||||
if slice.Limit != nil {
|
||||
islice.Limit = makeInternalKey(nil, slice.Limit, keyMaxSeq, keyTypeSeek)
|
||||
islice.Limit = newIkey(slice.Limit, kMaxSeq, ktSeek)
|
||||
}
|
||||
}
|
||||
rawIter := db.newRawIterator(auxm, auxt, islice, ro)
|
||||
rawIter := db.newRawIterator(islice, ro)
|
||||
iter := &dbIter{
|
||||
db: db,
|
||||
icmp: db.s.icmp,
|
||||
@@ -187,7 +177,7 @@ func (i *dbIter) Seek(key []byte) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
ikey := makeInternalKey(nil, key, i.seq, keyTypeSeek)
|
||||
ikey := newIkey(key, i.seq, ktSeek)
|
||||
if i.iter.Seek(ikey) {
|
||||
i.dir = dirSOI
|
||||
return i.next()
|
||||
@@ -199,15 +189,15 @@ func (i *dbIter) Seek(key []byte) bool {
|
||||
|
||||
func (i *dbIter) next() bool {
|
||||
for {
|
||||
if ukey, seq, kt, kerr := parseInternalKey(i.iter.Key()); kerr == nil {
|
||||
if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil {
|
||||
i.sampleSeek()
|
||||
if seq <= i.seq {
|
||||
switch kt {
|
||||
case keyTypeDel:
|
||||
case ktDel:
|
||||
// Skip deleted key.
|
||||
i.key = append(i.key[:0], ukey...)
|
||||
i.dir = dirForward
|
||||
case keyTypeVal:
|
||||
case ktVal:
|
||||
if i.dir == dirSOI || i.icmp.uCompare(ukey, i.key) > 0 {
|
||||
i.key = append(i.key[:0], ukey...)
|
||||
i.value = append(i.value[:0], i.iter.Value()...)
|
||||
@@ -250,13 +240,13 @@ func (i *dbIter) prev() bool {
|
||||
del := true
|
||||
if i.iter.Valid() {
|
||||
for {
|
||||
if ukey, seq, kt, kerr := parseInternalKey(i.iter.Key()); kerr == nil {
|
||||
if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil {
|
||||
i.sampleSeek()
|
||||
if seq <= i.seq {
|
||||
if !del && i.icmp.uCompare(ukey, i.key) < 0 {
|
||||
return true
|
||||
}
|
||||
del = (kt == keyTypeDel)
|
||||
del = (kt == ktDel)
|
||||
if !del {
|
||||
i.key = append(i.key[:0], ukey...)
|
||||
i.value = append(i.value[:0], i.iter.Value()...)
|
||||
@@ -292,7 +282,7 @@ func (i *dbIter) Prev() bool {
|
||||
return i.Last()
|
||||
case dirForward:
|
||||
for i.iter.Prev() {
|
||||
if ukey, _, _, kerr := parseInternalKey(i.iter.Key()); kerr == nil {
|
||||
if ukey, _, _, kerr := parseIkey(i.iter.Key()); kerr == nil {
|
||||
i.sampleSeek()
|
||||
if i.icmp.uCompare(ukey, i.key) < 0 {
|
||||
goto cont
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user