Compare commits

..

34 Commits

Author SHA1 Message Date
Tim Howes
fe9c2b9857 lib/ignore: Match directory contents for patterns ending in / (fixes #3639)
Appends "**" to patterns with a terminal slash, so that directory
contents are ignored, but not the directory itself.
2016-10-04 08:12:55 +09:00
Jakob Borg
2a2177e7fa authors: Add timhowes 2016-10-04 08:11:57 +09:00
Jakob Borg
d1d565e58b cmd/syncthing: Localhost header comparison should be case insensitive 2016-10-03 17:34:13 +09:00
Peter Hoeg
891ff383ec etc/linux-systemd: Remove bogus dependency on networking for user unit
GitHub-Pull-Request: https://github.com/syncthing/syncthing/pull/3627
2016-10-03 03:49:00 +00:00
Nathan Morrison
d322ebd0b9 Add API service for local disk changes
GitHub-Pull-Request: https://github.com/syncthing/syncthing/pull/3626
LGTM: calmh, AudriusButkevicius
2016-09-28 15:54:13 +00:00
Peter Hoeg
50190236bb Ignore pkill error on resume
The ```syncthing-resume.service``` will show as a failed service in case
there are no syncthing processes running after resume but it can be
safely ignored because it makes no difference.

GitHub-Pull-Request: https://github.com/syncthing/syncthing/pull/3630
2016-09-28 10:21:15 +00:00
Jakob Borg
d5a0f91cb4 cmd/syncthing: Restore useful levels of traceback on panic 2016-09-26 21:14:17 +02:00
Jakob Borg
467c1b26fb cmd/syncthing, lib/config: Log errors replacing or saving config (ref #3567) 2016-09-24 09:59:09 +02:00
Jakob Borg
3cabecda04 lib/upnp: Correct the result deduplication mechanism (fixes #3578)
GitHub-Pull-Request: https://github.com/syncthing/syncthing/pull/3618
2016-09-24 07:33:56 +00:00
Jakob Borg
6d3160b0ab gui: Folder is out of sync when it needs deletes, too (fixes #3588) 2016-09-24 09:11:38 +02:00
Jakob Borg
d328e0fb75 cmd/syncthing: Add selectable sha256 package (fixes #3613, fixes #3614)
This adds autodetection of the fastest hashing library on startup, thus
handling the performance regression. It also adds an environment
variable to control the selection, STHASHING=standard (Go standard
library version, avoids SIGILL crash when the minio library has bugs on
odd CPUs), STHASHING=minio (to force using the minio version) or unset
for the default autodetection.

GitHub-Pull-Request: https://github.com/syncthing/syncthing/pull/3617
2016-09-23 19:33:54 +00:00
Jakob Borg
5f01afb7ea build: No need for outdated go2xunit 2016-09-18 21:02:42 +02:00
fti7
6fe2fa5ff0 gui: Slightly lighten the dark theme
Skip-check: authors

GitHub-Pull-Request: https://github.com/syncthing/syncthing/pull/3605
2016-09-18 13:47:36 +00:00
Jakob Borg
b371b1fe34 lib/versioner: Test both spaces and parens in ext versioner paths
GitHub-Pull-Request: https://github.com/syncthing/syncthing/pull/3610
2016-09-18 12:24:55 +00:00
Jakob Borg
90c0a39df8 lib/versioner: Test for external versioner
GitHub-Pull-Request: https://github.com/syncthing/syncthing/pull/3609
2016-09-17 20:34:50 +00:00
Lars K.W. Gohlke
70c5a5dff1 lib/versioner: Rename versioner_test to simple_test
GitHub-Pull-Request: https://github.com/syncthing/syncthing/pull/3603
2016-09-16 11:01:43 +00:00
Jakob Borg
da0b7cc7f2 lib/model: Correct lock taking order in ConnectionStats (fixes #3596)
GitHub-Pull-Request: https://github.com/syncthing/syncthing/pull/3597
2016-09-14 19:38:55 +00:00
Jakob Borg
139e9b144e lib/config: Fix tests for changes in previous commit 2016-09-13 22:20:22 +02:00
Jakob Borg
77c0a19451 vendor: Update github.com/d4l3k/messagediff 2016-09-13 22:20:22 +02:00
Jakob Borg
58cbd19742 vendor: Update golang.org/cznic/... 2016-09-13 22:20:22 +02:00
Jakob Borg
9bf6917ae8 vendor: Update golang.org/x/crypto/... 2016-09-13 22:20:22 +02:00
Jakob Borg
897cca0a82 vendor: Update golang.org/x/net/... 2016-09-13 22:20:22 +02:00
Jakob Borg
6af09c61be vendor: Update github.com/thejerf/suture 2016-09-13 22:20:22 +02:00
Jakob Borg
c3c7798446 vendor: Update github.com/gobwas/glob 2016-09-13 22:20:22 +02:00
Jakob Borg
06dc91fadf vendor: Update github.com/syndtr/goleveldb 2016-09-13 22:20:22 +02:00
Jakob Borg
526cab538a jenkins: Don't fetch --prune unnecessarily, print build version on Windows 2016-09-13 22:18:55 +02:00
Jakob Borg
81d19a00aa vendor: Add github.com/cznic/lldb and friends (new recursive dependency) 2016-09-13 21:57:19 +02:00
Jakob Borg
ca755ec9e0 vendor: Add golang.org/x/net/bpf 2016-09-13 21:56:33 +02:00
Jakob Borg
4f6206cb2d build: Simpler creation of Debian packages
GitHub-Pull-Request: https://github.com/syncthing/syncthing/pull/3591
2016-09-12 12:21:07 +00:00
Jakob Borg
7fb53ec954 lib/config: Correct name of discovery-v6-4 server 2016-09-12 11:30:06 +02:00
Jakob Borg
d8b5070ca8 lib/config: Update default set of discovery servers 2016-09-12 09:55:45 +02:00
Jakob Borg
5e99d38412 all: Use github.com/minio/sha256-simd
GitHub-Pull-Request: https://github.com/syncthing/syncthing/pull/3581
2016-09-09 09:57:51 +00:00
Laurent Etiemble
3990014073 cmd/syncthing: Conditionally enable CORS
GitHub-Pull-Request: https://github.com/syncthing/syncthing/pull/3541
LGTM: AudriusButkevicius
2016-09-06 22:16:50 +00:00
Jakob Borg
3e51206a6b build, jenkins: Jenkins version tag should be same as when building manually 2016-09-06 13:02:17 +02:00
224 changed files with 11440 additions and 3688 deletions

1
.gitignore vendored
View File

@@ -5,6 +5,7 @@ stdiscosrv.exe
*.tar.gz
*.zip
*.asc
*.deb
.jshintrc
coverage.out
files/pidx

View File

@@ -87,6 +87,7 @@ Sergey Mishin (ralder) <ralder@yandex.ru>
Stefan Kuntz (Stefan-Code) <stefan.github@gmail.com> <Stefan.github@gmail.com>
Stefan Tatschner (rumpelsepp) <stefan@sevenbyte.org> <rumpelsepp@sevenbyte.org>
Tim Abell (timabell) <tim@timwise.co.uk>
Tim Howes (timhowes) <timhowes@berkeley.edu>
Tobias Nygren (tnn2) <tnn@nygren.pp.se>
Tomas Cerveny (kozec) <kozec@kozec.com>
Tully Robinson (tojrobinson) <tully@tojr.org>

1
NICKS
View File

@@ -101,6 +101,7 @@ snnd <dw@risu.io>
Stefan-Code <stefan.github@gmail.com>
Stefan-Code <Stefan.github@gmail.com>
timabell <tim@timwise.co.uk>
timhowes <timhowes@berkeley.edu>
tnn2 <tnn@nygren.pp.se>
tojrobinson <tully@tojr.org>
tpng <benny.tpng@gmail.com>

View File

@@ -27,7 +27,6 @@ import (
"strconv"
"strings"
"syscall"
"text/template"
"time"
)
@@ -43,12 +42,12 @@ var (
)
type target struct {
name string
buildPkg string
binaryName string
archiveFiles []archiveFile
debianFiles []archiveFile
tags []string
name string
buildPkg string
binaryName string
archiveFiles []archiveFile
installationFiles []archiveFile
tags []string
}
type archiveFile struct {
@@ -76,7 +75,7 @@ var targets = map[string]target{
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0644},
// All files from etc/ and extra/ added automatically in init().
},
debianFiles: []archiveFile{
installationFiles: []archiveFile{
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0755},
{src: "README.md", dst: "deb/usr/share/doc/syncthing/README.txt", perm: 0644},
{src: "LICENSE", dst: "deb/usr/share/doc/syncthing/LICENSE.txt", perm: 0644},
@@ -106,7 +105,7 @@ var targets = map[string]target{
{src: "cmd/stdiscosrv/LICENSE", dst: "LICENSE.txt", perm: 0644},
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0644},
},
debianFiles: []archiveFile{
installationFiles: []archiveFile{
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0755},
{src: "cmd/stdiscosrv/README.md", dst: "deb/usr/share/doc/stdiscosrv/README.txt", perm: 0644},
{src: "cmd/stdiscosrv/LICENSE", dst: "deb/usr/share/doc/stdiscosrv/LICENSE.txt", perm: 0644},
@@ -125,7 +124,7 @@ var targets = map[string]target{
{src: "cmd/strelaysrv/LICENSE", dst: "LICENSE.txt", perm: 0644},
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0644},
},
debianFiles: []archiveFile{
installationFiles: []archiveFile{
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0755},
{src: "cmd/strelaysrv/README.md", dst: "deb/usr/share/doc/strelaysrv/README.txt", perm: 0644},
{src: "cmd/strelaysrv/LICENSE", dst: "deb/usr/share/doc/strelaysrv/LICENSE.txt", perm: 0644},
@@ -143,7 +142,7 @@ var targets = map[string]target{
{src: "cmd/strelaypoolsrv/LICENSE", dst: "LICENSE.txt", perm: 0644},
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0644},
},
debianFiles: []archiveFile{
installationFiles: []archiveFile{
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0755},
{src: "cmd/strelaypoolsrv/README.md", dst: "deb/usr/share/doc/relaysrv/README.txt", perm: 0644},
{src: "cmd/strelaypoolsrv/LICENSE", dst: "deb/usr/share/doc/relaysrv/LICENSE.txt", perm: 0644},
@@ -163,7 +162,7 @@ func init() {
syncthingPkg.archiveFiles = append(syncthingPkg.archiveFiles, archiveFile{src: file, dst: file, perm: 0644})
}
for _, file := range listFiles("extra") {
syncthingPkg.debianFiles = append(syncthingPkg.debianFiles, archiveFile{src: file, dst: "deb/usr/share/doc/syncthing/" + filepath.Base(file), perm: 0644})
syncthingPkg.installationFiles = append(syncthingPkg.installationFiles, archiveFile{src: file, dst: "deb/usr/share/doc/syncthing/" + filepath.Base(file), perm: 0644})
}
targets["syncthing"] = syncthingPkg
}
@@ -298,6 +297,9 @@ func runCommand(cmd string, target target) {
}
}
case "version":
fmt.Println(getVersion())
default:
log.Fatalf("Unknown command %q", cmd)
}
@@ -354,7 +356,6 @@ func setup() {
runPrint("go", "get", "-v", "github.com/FiloSottile/gvt")
runPrint("go", "get", "-v", "github.com/axw/gocov/gocov")
runPrint("go", "get", "-v", "github.com/AlekSi/gocov-xml")
runPrint("go", "get", "-v", "bitbucket.org/tebeka/go2xunit")
runPrint("go", "get", "-v", "github.com/alecthomas/gometalinter")
runPrint("go", "get", "-v", "github.com/mitchellh/go-wordwrap")
}
@@ -491,46 +492,31 @@ func buildDeb(target target) {
build(target, []string{"noupgrade"})
for i := range target.debianFiles {
target.debianFiles[i].src = strings.Replace(target.debianFiles[i].src, "{{binary}}", target.binaryName, 1)
target.debianFiles[i].dst = strings.Replace(target.debianFiles[i].dst, "{{binary}}", target.binaryName, 1)
for i := range target.installationFiles {
target.installationFiles[i].src = strings.Replace(target.installationFiles[i].src, "{{binary}}", target.binaryName, 1)
target.installationFiles[i].dst = strings.Replace(target.installationFiles[i].dst, "{{binary}}", target.binaryName, 1)
}
for _, af := range target.debianFiles {
for _, af := range target.installationFiles {
if err := copyFile(af.src, af.dst, af.perm); err != nil {
log.Fatal(err)
}
}
os.MkdirAll("deb/DEBIAN", 0755)
data := map[string]string{
"name": target.name,
"arch": debarch,
"version": version[1:],
"date": time.Now().Format(time.RFC1123),
}
debTemplateFiles := append(listFiles("debtpl/common"), listFiles("debtpl/"+target.name)...)
for _, file := range debTemplateFiles {
tpl, err := template.New(filepath.Base(file)).ParseFiles(file)
if err != nil {
log.Fatal(err)
}
outFile := filepath.Join("deb/DEBIAN", filepath.Base(file))
out, err := os.Create(outFile)
if err != nil {
log.Fatal(err)
}
if err := tpl.Execute(out, data); err != nil {
log.Fatal(err)
}
if err := out.Close(); err != nil {
log.Fatal(err)
}
info, _ := os.Lstat(file)
os.Chmod(outFile, info.Mode())
maintainer := "Syncthing Release Management <release@syncthing.net>"
debver := version
if strings.HasPrefix(debver, "v") {
debver = debver[1:]
}
runPrint("fpm", "-t", "deb", "-s", "dir", "-C", "deb",
"-n", "syncthing", "-v", debver, "-a", debarch,
"--vendor", maintainer, "-m", maintainer,
"-d", "libc6",
"-d", "procps", // because postinst script
"--url", "https://syncthing.net/",
"--description", "Open Source Continuous File Synchronization",
"--after-upgrade", "script/post-upgrade",
"--license", "MPL-2")
}
func copyFile(src, dst string, perm os.FileMode) error {

BIN
cmd/stdiscosrv/stdiscosrv Executable file
View File

Binary file not shown.

View File

@@ -53,6 +53,7 @@ type apiService struct {
statics *staticsServer
model modelIntf
eventSub events.BufferedSubscription
diskEventSub events.BufferedSubscription
discoverer discover.CachingMux
connectionsService connectionsIntf
fss *folderSummaryService
@@ -113,7 +114,7 @@ type connectionsIntf interface {
Status() map[string]interface{}
}
func newAPIService(id protocol.DeviceID, cfg configIntf, httpsCertFile, httpsKeyFile, assetDir string, m modelIntf, eventSub events.BufferedSubscription, discoverer discover.CachingMux, connectionsService connectionsIntf, errors, systemLog logger.Recorder) *apiService {
func newAPIService(id protocol.DeviceID, cfg configIntf, httpsCertFile, httpsKeyFile, assetDir string, m modelIntf, eventSub events.BufferedSubscription, diskEventSub events.BufferedSubscription, discoverer discover.CachingMux, connectionsService connectionsIntf, errors, systemLog logger.Recorder) *apiService {
service := &apiService{
id: id,
cfg: cfg,
@@ -122,6 +123,7 @@ func newAPIService(id protocol.DeviceID, cfg configIntf, httpsCertFile, httpsKey
statics: newStaticsServer(cfg.GUI().Theme, assetDir),
model: m,
eventSub: eventSub,
diskEventSub: diskEventSub,
discoverer: discoverer,
connectionsService: connectionsService,
systemConfigMut: sync.NewMutex(),
@@ -229,7 +231,8 @@ func (s *apiService) Serve() {
getRestMux.HandleFunc("/rest/db/need", s.getDBNeed) // folder [perpage] [page]
getRestMux.HandleFunc("/rest/db/status", s.getDBStatus) // folder
getRestMux.HandleFunc("/rest/db/browse", s.getDBBrowse) // folder [prefix] [dirsonly] [levels]
getRestMux.HandleFunc("/rest/events", s.getEvents) // since [limit]
getRestMux.HandleFunc("/rest/events", s.getIndexEvents) // since [limit]
getRestMux.HandleFunc("/rest/events/disk", s.getDiskEvents) // since [limit]
getRestMux.HandleFunc("/rest/stats/device", s.getDeviceStats) // -
getRestMux.HandleFunc("/rest/stats/folder", s.getFolderStats) // -
getRestMux.HandleFunc("/rest/svc/deviceid", s.getDeviceID) // id
@@ -442,10 +445,12 @@ func corsMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Process OPTIONS requests
if r.Method == "OPTIONS" {
// Add a generous access-control-allow-origin header for CORS requests
w.Header().Add("Access-Control-Allow-Origin", "*")
// Only GET/POST Methods are supported
w.Header().Set("Access-Control-Allow-Methods", "GET, POST")
// Only this custom header can be set
w.Header().Set("Access-Control-Allow-Headers", "X-API-Key")
// Only these headers can be set
w.Header().Set("Access-Control-Allow-Headers", "Content-Type, X-API-Key")
// The request is meant to be cached 10 minutes
w.Header().Set("Access-Control-Max-Age", "600")
@@ -763,11 +768,13 @@ func (s *apiService) postSystemConfig(w http.ResponseWriter, r *http.Request) {
// Activate and save
if err := s.cfg.Replace(to); err != nil {
l.Warnln("Replacing config:", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if err := s.cfg.Save(); err != nil {
l.Warnln("Saving config:", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
@@ -989,15 +996,22 @@ func (s *apiService) postDBIgnores(w http.ResponseWriter, r *http.Request) {
s.getDBIgnores(w, r)
}
func (s *apiService) getEvents(w http.ResponseWriter, r *http.Request) {
func (s *apiService) getIndexEvents(w http.ResponseWriter, r *http.Request) {
s.fss.gotEventRequest()
s.getEvents(w, r, s.eventSub)
}
func (s *apiService) getDiskEvents(w http.ResponseWriter, r *http.Request) {
s.getEvents(w, r, s.diskEventSub)
}
func (s *apiService) getEvents(w http.ResponseWriter, r *http.Request, eventSub events.BufferedSubscription) {
qs := r.URL.Query()
sinceStr := qs.Get("since")
limitStr := qs.Get("limit")
since, _ := strconv.Atoi(sinceStr)
limit, _ := strconv.Atoi(limitStr)
s.fss.gotEventRequest()
// Flush before blocking, to indicate that we've received the request and
// that it should not be retried. Must set Content-Type header before
// flushing.
@@ -1005,7 +1019,7 @@ func (s *apiService) getEvents(w http.ResponseWriter, r *http.Request) {
f := w.(http.Flusher)
f.Flush()
evs := s.eventSub.Since(since, nil)
evs := eventSub.Since(since, nil)
if 0 < limit && limit < len(evs) {
evs = evs[len(evs)-limit:]
}
@@ -1314,7 +1328,7 @@ func addressIsLocalhost(addr string) bool {
// There was no port, so we assume the address was just a hostname
host = addr
}
switch host {
switch strings.ToLower(host) {
case "127.0.0.1", "::1", "localhost":
return true
default:

View File

@@ -37,6 +37,9 @@ func csrfMiddleware(unique string, prefix string, cfg config.GUIConfiguration, n
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Allow requests carrying a valid API key
if cfg.IsValidAPIKey(r.Header.Get("X-API-Key")) {
// Set the access-control-allow-origin header for CORS requests
// since a valid API key has been provided
w.Header().Add("Access-Control-Allow-Origin", "*")
next.ServeHTTP(w, r)
return
}

View File

@@ -70,7 +70,7 @@ func TestStopAfterBrokenConfig(t *testing.T) {
}
w := config.Wrap("/dev/null", cfg)
srv := newAPIService(protocol.LocalDeviceID, w, "../../test/h1/https-cert.pem", "../../test/h1/https-key.pem", "", nil, nil, nil, nil, nil, nil)
srv := newAPIService(protocol.LocalDeviceID, w, "../../test/h1/https-cert.pem", "../../test/h1/https-key.pem", "", nil, nil, nil, nil, nil, nil, nil)
srv.started = make(chan string)
sup := suture.NewSimple("test")
@@ -469,6 +469,7 @@ func startHTTP(cfg *mockedConfig) (string, error) {
httpsKeyFile := "../../test/h1/https-key.pem"
assetDir := "../../gui"
eventSub := new(mockedEventSub)
diskEventSub := new(mockedEventSub)
discoverer := new(mockedCachingMux)
connections := new(mockedConnections)
errorLog := new(mockedLoggerRecorder)
@@ -477,7 +478,7 @@ func startHTTP(cfg *mockedConfig) (string, error) {
// Instantiate the API service
svc := newAPIService(protocol.LocalDeviceID, cfg, httpsCertFile, httpsKeyFile, assetDir, model,
eventSub, discoverer, connections, errorLog, systemLog)
eventSub, diskEventSub, discoverer, connections, errorLog, systemLog)
svc.started = addrChan
// Actually start the API service
@@ -832,9 +833,11 @@ func TestAddressIsLocalhost(t *testing.T) {
}{
// These are all valid localhost addresses
{"localhost", true},
{"LOCALHOST", true},
{"::1", true},
{"127.0.0.1", true},
{"localhost:8080", true},
{"LOCALHOST:8000", true},
{"[::1]:8080", true},
{"127.0.0.1:8080", true},
@@ -857,3 +860,64 @@ func TestAddressIsLocalhost(t *testing.T) {
}
}
}
func TestAccessControlAllowOriginHeader(t *testing.T) {
const testAPIKey = "foobarbaz"
cfg := new(mockedConfig)
cfg.gui.APIKey = testAPIKey
baseURL, err := startHTTP(cfg)
if err != nil {
t.Fatal(err)
}
cli := &http.Client{
Timeout: time.Second,
}
req, _ := http.NewRequest("GET", baseURL+"/rest/system/status", nil)
req.Header.Set("X-API-Key", testAPIKey)
resp, err := cli.Do(req)
if err != nil {
t.Fatal(err)
}
resp.Body.Close()
if resp.StatusCode != http.StatusOK {
t.Fatal("GET on /rest/system/status should succeed, not", resp.Status)
}
if resp.Header.Get("Access-Control-Allow-Origin") != "*" {
t.Fatal("GET on /rest/system/status should return a 'Access-Control-Allow-Origin: *' header")
}
}
func TestOptionsRequest(t *testing.T) {
const testAPIKey = "foobarbaz"
cfg := new(mockedConfig)
cfg.gui.APIKey = testAPIKey
baseURL, err := startHTTP(cfg)
if err != nil {
t.Fatal(err)
}
cli := &http.Client{
Timeout: time.Second,
}
req, _ := http.NewRequest("OPTIONS", baseURL+"/rest/system/status", nil)
resp, err := cli.Do(req)
if err != nil {
t.Fatal(err)
}
resp.Body.Close()
if resp.StatusCode != http.StatusNoContent {
t.Fatal("OPTIONS on /rest/system/status should succeed, not", resp.Status)
}
if resp.Header.Get("Access-Control-Allow-Origin") != "*" {
t.Fatal("OPTIONS on /rest/system/status should return a 'Access-Control-Allow-Origin: *' header")
}
if resp.Header.Get("Access-Control-Allow-Methods") != "GET, POST" {
t.Fatal("OPTIONS on /rest/system/status should return a 'Access-Control-Allow-Methods: GET, POST' header")
}
if resp.Header.Get("Access-Control-Allow-Headers") != "Content-Type, X-API-Key" {
t.Fatal("OPTIONS on /rest/system/status should return a 'Access-Control-Allow-Headers: Content-Type, X-API-KEY' header")
}
}

View File

@@ -23,6 +23,7 @@ import (
"path/filepath"
"regexp"
"runtime"
"runtime/debug"
"runtime/pprof"
"sort"
"strconv"
@@ -41,6 +42,7 @@ import (
"github.com/syncthing/syncthing/lib/osutil"
"github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/rand"
"github.com/syncthing/syncthing/lib/sha256"
"github.com/syncthing/syncthing/lib/symlinks"
"github.com/syncthing/syncthing/lib/tlsutil"
"github.com/syncthing/syncthing/lib/upgrade"
@@ -166,6 +168,11 @@ are mostly useful for developers. Use with care.
STNOUPGRADE Disable automatic upgrades.
STHASHING Select the SHA256 hashing package to use. Possible values
are "standard" for the Go standard library implementation,
"minio" for the github.com/minio/sha256-simd implementation,
and blank (the default) for auto detection.
GOMAXPROCS Set the maximum number of CPU cores to use. Defaults to all
available CPU cores.
@@ -274,6 +281,9 @@ func parseCommandLineOptions() RuntimeOptions {
}
func main() {
// We want all (our) goroutines in panic traces.
debug.SetTraceback("all")
options := parseCommandLineOptions()
l.SetFlags(options.logFlags)
@@ -542,6 +552,7 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
// events. The LocalChangeDetected event might overwhelm the event
// receiver in some situations so we will not subscribe to it here.
apiSub := events.NewBufferedSubscription(events.Default.Subscribe(events.AllEvents&^events.LocalChangeDetected), 1000)
diskSub := events.NewBufferedSubscription(events.Default.Subscribe(events.LocalChangeDetected), 1000)
if len(os.Getenv("GOMAXPROCS")) == 0 {
runtime.GOMAXPROCS(runtime.NumCPU())
@@ -567,7 +578,9 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
l.Infoln(LongVersion)
l.Infoln("My ID:", myID)
printHashRate()
sha256.SelectAlgo()
sha256.Report()
// Emit the Starting event, now that we know who we are.
@@ -740,7 +753,7 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
// GUI
setupGUI(mainService, cfg, m, apiSub, cachedDiscovery, connectionsService, errors, systemLog, runtimeOptions)
setupGUI(mainService, cfg, m, apiSub, diskSub, cachedDiscovery, connectionsService, errors, systemLog, runtimeOptions)
if runtimeOptions.cpuProfile {
f, err := os.Create(fmt.Sprintf("cpu-%d.pprof", os.Getpid()))
@@ -840,22 +853,6 @@ func setupSignalHandling() {
}()
}
// printHashRate prints the hashing performance in MB/s, formatting it with
// appropriate precision for the value, i.e. 182 MB/s, 18 MB/s, 1.8 MB/s, 0.18
// MB/s.
func printHashRate() {
hashRate := cpuBench(3, 100*time.Millisecond)
decimals := 0
if hashRate < 1 {
decimals = 2
} else if hashRate < 10 {
decimals = 1
}
l.Infof("Single thread hash performance is ~%.*f MB/s", decimals, hashRate)
}
func loadConfig() (*config.Wrapper, error) {
cfgFile := locations[locConfigFile]
cfg, err := config.Load(cfgFile, myID)
@@ -932,7 +929,7 @@ func startAuditing(mainService *suture.Supervisor) {
l.Infoln("Audit log in", auditFile)
}
func setupGUI(mainService *suture.Supervisor, cfg *config.Wrapper, m *model.Model, apiSub events.BufferedSubscription, discoverer discover.CachingMux, connectionsService *connections.Service, errors, systemLog logger.Recorder, runtimeOptions RuntimeOptions) {
func setupGUI(mainService *suture.Supervisor, cfg *config.Wrapper, m *model.Model, apiSub events.BufferedSubscription, diskSub events.BufferedSubscription, discoverer discover.CachingMux, connectionsService *connections.Service, errors, systemLog logger.Recorder, runtimeOptions RuntimeOptions) {
guiCfg := cfg.GUI()
if !guiCfg.Enabled {
@@ -943,7 +940,7 @@ func setupGUI(mainService *suture.Supervisor, cfg *config.Wrapper, m *model.Mode
l.Warnln("Insecure admin access is enabled.")
}
api := newAPIService(myID, cfg, locations[locHTTPSCertFile], locations[locHTTPSKeyFile], runtimeOptions.assetDir, m, apiSub, discoverer, connectionsService, errors, systemLog)
api := newAPIService(myID, cfg, locations[locHTTPSCertFile], locations[locHTTPSKeyFile], runtimeOptions.assetDir, m, apiSub, diskSub, discoverer, connectionsService, errors, systemLog)
cfg.Subscribe(api)
mainService.Add(api)

View File

@@ -178,6 +178,22 @@ func copyStderr(stderr io.Reader, dst io.Writer) {
if panicFd == nil {
dst.Write([]byte(line))
if strings.Contains(line, "SIGILL") {
l.Warnln(`
*******************************************************************************
* Crash due to illegal instruction detected. This is most likely due to a CPU *
* incompatibility with the high performance hashing package. Switching to the *
* standard hashing package instead. Please report this issue at: *
* *
* https://github.com/syncthing/syncthing/issues *
* *
* Include the details of your CPU. *
*******************************************************************************
`)
os.Setenv("STHASHING", "standard")
return
}
if strings.HasPrefix(line, "panic:") || strings.HasPrefix(line, "fatal error:") {
panicFd, err = os.Create(timestampedLoc(locPanicLog))
if err != nil {

View File

@@ -9,7 +9,6 @@ package main
import (
"bytes"
"crypto/rand"
"crypto/sha256"
"crypto/tls"
"encoding/json"
"fmt"
@@ -23,6 +22,7 @@ import (
"github.com/syncthing/syncthing/lib/dialer"
"github.com/syncthing/syncthing/lib/model"
"github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/sha256"
"github.com/syncthing/syncthing/lib/upgrade"
"github.com/thejerf/suture"
)

View File

@@ -1,5 +0,0 @@
{{.name}} ({{.version}}); urgency=medium
* Packaging of {{.version}}.
-- Syncthing Release Management <release@syncthing.net> {{.date}}

View File

@@ -1 +0,0 @@
9

View File

@@ -1,16 +0,0 @@
Package: syncthing
Version: {{.version}}
Priority: optional
Section: net
Architecture: {{.arch}}
Depends: libc6, procps
Homepage: https://syncthing.net/
Maintainer: Syncthing Release Management <release@syncthing.net>
Description: Open Source Continuous File Synchronization
Syncthing is an application that lets you synchronize your files across
multiple devices. This means the creation, modification or deletion of files
on one machine will automatically be replicated to your other devices. We
believe your data is your data alone and you deserve to choose where it is
stored. Therefore Syncthing does not upload your data to the cloud but
exchanges your data across your machines as soon as they are online at the
same time.

View File

@@ -1,6 +0,0 @@
#!/bin/bash
set -euo pipefail
if [[ ${1:-} == configure ]]; then
pkill -HUP -x syncthing || true
fi

View File

@@ -5,7 +5,7 @@ After=suspend.target
[Service]
Type=oneshot
ExecStart=/usr/bin/pkill -HUP -x syncthing
ExecStart=-/usr/bin/pkill -HUP -x syncthing
[Install]
WantedBy=suspend.target

View File

@@ -1,7 +1,6 @@
[Unit]
Description=Syncthing - Open Source Continuous File Synchronization
Documentation=man:syncthing(1)
After=network.target
Wants=syncthing-inotify.service
[Service]

View File

@@ -5,11 +5,13 @@ Dark theme
Author: alessandro.g89
Source: https://userstyles.org/styles/122502/syncthing-dark
Modified by: fti7
**/
body {
color: #aaa !important;
background-color: black !important;
background-color: #272727 !important;
}
a:hover,a:focus,a.focus{
@@ -20,8 +22,10 @@ a:hover,a:focus,a.focus{
/* navbar */
.navbar {
background-color: #333 !important;
border-color: #333 !important;
border-width: 2px !important;
border-color: #424242 !important;
border-top-width: 1px !important;
border-bottom-width: 1px !important;
}
.navbar-text, .dropdown>a, .dropdown-menu>li>a, .hidden-xs>a, .navbar-link {
@@ -29,7 +33,7 @@ a:hover,a:focus,a.focus{
}
.dropdown-menu {
border-color: #333 !important;
border-color: #424242 !important;
border-width: 2px !important;
background-color: #222 !important;
}
@@ -40,7 +44,7 @@ a:hover,a:focus,a.focus{
}
.open>.dropdown-toggle, .dropdown-toggle:hover {
border-color: #333 !important;
border-color: #424242 !important;
background-color: #222 !important;
}
@@ -51,7 +55,7 @@ a:hover,a:focus,a.focus{
li.hidden-xs:hover, .navbar-link:hover, .navbar-link:focus {
outline: none !important;
border-color: #333 !important;
border-color: #424242 !important;
background-color: #222 !important;
}
@@ -63,18 +67,18 @@ li.hidden-xs:hover, .navbar-link:hover, .navbar-link:focus {
/* main panel */
.panel {
background-color: #111 !important;
background-color: #323232 !important;
border-width: 2px !important;
}
.panel-default {
border-color: #222 !important;
border-color: #424242 !important;
}
.panel-default > .panel-heading {
color: #aaa !important;
border-color: #222 !important;
background-color: #222 !important;
background-color: #3B3B3B !important;
}
.panel-warning > .panel-heading {
color: #222 !important;
@@ -85,16 +89,16 @@ li.hidden-xs:hover, .navbar-link:hover, .navbar-link:focus {
}
.panel-footer {
background-color: #111 !important;
background-color: #2D2D2D !important;
border-width: 0 !important;
}
.table-striped>tbody>tr:nth-of-type(odd) {
background-color: #181818 !important;
background-color: #2E2E2E !important;
}
.panel-group .panel-heading+.panel-collapse>.panel-body, .panel-group .panel-heading+.panel-collapse>.list-group {
border-top: 1px solid #222 !important;
border-top: 1px solid #424242 !important;
}
.identicon rect {
@@ -156,11 +160,11 @@ li.hidden-xs:hover, .navbar-link:hover, .navbar-link:focus {
/* modal dialogs */
.modal-header {
border-bottom-color: #222 !important;
border-bottom-color: #424242 !important;
}
.modal-header:not(.alert) {
background-color: #222;
background-color: #3B3B3B;
}
.alert-info {
@@ -179,12 +183,12 @@ li.hidden-xs:hover, .navbar-link:hover, .navbar-link:focus {
.modal-content {
border-color: #666 !important;
border-width: 2px !important;
background-color: #111 !important;
background-color: #272727 !important;
}
.modal-footer {
border-color: #111 !important;
background-color: #111 !important;
border-color: #303030 !important;
background-color: #2D2D2D !important;
}
.help-block {
@@ -193,8 +197,8 @@ li.hidden-xs:hover, .navbar-link:hover, .navbar-link:focus {
.form-control {
color: #aaa !important;
border-color: #444 !important;
background-color: black !important;
border-color: #424242 !important;
background-color: #3B3B3B !important;
}
code.ng-binding{
@@ -204,8 +208,8 @@ code.ng-binding{
.well, .form-control[readonly="readonly"], .popover { /* read-only fields*/
color: #666 !important;
border-color: #444 !important;
background-color: #111 !important;
border-color: #424242 !important;
background-color: #3B3B3B !important;
}
/* buttons for pagination */

View File

@@ -12,7 +12,7 @@
<p translate>Copyright &copy; 2014-2016 the following Contributors:</p>
<div class="row">
<div class="col-md-12" id="contributor-list">
Jakob Borg, Audrius Butkevicius, Alexander Graf, Anderson Mesquita, Antony Male, Ben Schulz, Caleb Callaway, Daniel Harte, Lars K.W. Gohlke, Lode Hoste, Michael Ploujnikov, Philippe Schommers, Ryan Sullivan, Sergey Mishin, Stefan Tatschner, Aaron Bieber, Adam Piggott, Alessandro G., Alexandre Viau, Andrew Dunham, Andrey D, Antoine Lamielle, Arthur Axel fREW Schmidt, Bart De Vries, Ben Curthoys, Ben Sidhom, Benny Ng, Brandon Philips, Brendan Long, Brian R. Becker, Carsten Hagemann, Cathryne Linenweaver, Cedric Staniewski, Chris Howie, Chris Joel, Colin Kennedy, Daniel Bergmann, Daniel Martí, David Rimmer, Denis A., Dennis Wilson, Dominik Heidler, Elias Jarlebring, Emil Hessman, Erik Meitner, Federico Castagnini, Felix Ableitner, Felix Unterpaintner, Francois-Xavier Gsell, Frank Isemann, Gilli Sigurdsson, Jaakko Hannikainen, Jacek Szafarkiewicz, Jake Peterson, James Patterson, Jaroslav Malec, Jens Diemer, Jochen Voss, Johan Vromans, Karol Różycki, Kelong Cong, Ken'ichi Kamada, Kevin Allen, Laurent Etiemble, Lord Landon Agahnim, Majed Abdulaziz, Marc Laporte, Marc Pujol, Marcin Dziadus, Mateusz Naściszewski, Matt Burke, Max Schulze, Michael Jephcote, Michael Tilli, Nate Morrison, Pascal Jungblut, Peter Hoeg, Phill Luby, Piotr Bejda, Scott Klupfel, Stefan Kuntz, Tim Abell, Tobias Nygren, Tomas Cerveny, Tully Robinson, Tyler Brazier, Veeti Paananen, Victor Buinsky, Vil Brekin, William A. Kennington III, Wulf Weich, Yannic A.
Jakob Borg, Audrius Butkevicius, Alexander Graf, Anderson Mesquita, Antony Male, Ben Schulz, Caleb Callaway, Daniel Harte, Lars K.W. Gohlke, Lode Hoste, Michael Ploujnikov, Philippe Schommers, Ryan Sullivan, Sergey Mishin, Stefan Tatschner, Aaron Bieber, Adam Piggott, Alessandro G., Alexandre Viau, Andrew Dunham, Andrey D, Antoine Lamielle, Arthur Axel fREW Schmidt, Bart De Vries, Ben Curthoys, Ben Sidhom, Benny Ng, Brandon Philips, Brendan Long, Brian R. Becker, Carsten Hagemann, Cathryne Linenweaver, Cedric Staniewski, Chris Howie, Chris Joel, Colin Kennedy, Daniel Bergmann, Daniel Martí, David Rimmer, Denis A., Dennis Wilson, Dominik Heidler, Elias Jarlebring, Emil Hessman, Erik Meitner, Federico Castagnini, Felix Ableitner, Felix Unterpaintner, Francois-Xavier Gsell, Frank Isemann, Gilli Sigurdsson, Jaakko Hannikainen, Jacek Szafarkiewicz, Jake Peterson, James Patterson, Jaroslav Malec, Jens Diemer, Jochen Voss, Johan Vromans, Karol Różycki, Kelong Cong, Ken'ichi Kamada, Kevin Allen, Laurent Etiemble, Lord Landon Agahnim, Majed Abdulaziz, Marc Laporte, Marc Pujol, Marcin Dziadus, Mateusz Naściszewski, Matt Burke, Max Schulze, Michael Jephcote, Michael Tilli, Nate Morrison, Pascal Jungblut, Peter Hoeg, Phill Luby, Piotr Bejda, Scott Klupfel, Stefan Kuntz, Tim Abell, Tim Howes, Tobias Nygren, Tomas Cerveny, Tully Robinson, Tyler Brazier, Veeti Paananen, Victor Buinsky, Vil Brekin, William A. Kennington III, Wulf Weich, Yannic A.
</div>
</div>
<hr/>

View File

@@ -654,7 +654,7 @@ angular.module('syncthing.core')
if (state === 'error') {
return 'stopped'; // legacy, the state is called "stopped" in the GUI
}
if (state === 'idle' && $scope.model[folderCfg.id].needFiles > 0) {
if (state === 'idle' && $scope.model[folderCfg.id].needFiles + $scope.model[folderCfg.id].needDeletes > 0) {
return 'outofsync';
}
if (state === 'scanning') {

View File

@@ -44,17 +44,8 @@ for plat in "${platforms[@]}"; do
done
go run build.go -goarch amd64 deb
fakeroot sh -c 'chown -R root:root deb ; dpkg-deb -b deb .'
mv *.deb "$WORKSPACE"
go run build.go -goarch i386 deb
fakeroot sh -c 'chown -R root:root deb ; dpkg-deb -b deb .'
mv *.deb "$WORKSPACE"
go run build.go -goarch armel deb
fakeroot sh -c 'chown -R root:root deb ; dpkg-deb -b deb .'
mv *.deb "$WORKSPACE"
go run build.go -goarch armhf deb
fakeroot sh -c 'chown -R root:root deb ; dpkg-deb -b deb .'
mv *.deb "$WORKSPACE"

View File

@@ -24,6 +24,7 @@ cd src\github.com\syncthing\syncthing
echo Initializing ^& cleaning
go version
git clean -fxd || goto error
go run build.go version
echo.
echo Fetching extras

View File

@@ -23,7 +23,7 @@ function init {
rm -f *.tar.gz *.zip *.deb
cd src/github.com/syncthing/syncthing
version=$(git describe)
version=$(go run build.go version)
echo "Building $version"
echo
}
@@ -32,7 +32,6 @@ function clean {
echo Cleaning
rm -rf "$GOPATH/pkg"
git clean -fxd
git fetch --prune
echo
}

View File

@@ -42,16 +42,16 @@ var (
// DefaultDiscoveryServersV4 should be substituted when the configuration
// contains <globalAnnounceServer>default-v4</globalAnnounceServer>.
DefaultDiscoveryServersV4 = []string{
"https://discovery-v4-1.syncthing.net/v2/?id=SR7AARM-TCBUZ5O-VFAXY4D-CECGSDE-3Q6IZ4G-XG7AH75-OBIXJQV-QJ6NLQA", // 194.126.249.5, Sweden
"https://discovery-v4-2.syncthing.net/v2/?id=DVU36WY-H3LVZHW-E6LLFRE-YAFN5EL-HILWRYP-OC2M47J-Z4PE62Y-ADIBDQC", // 45.55.230.38, USA
"https://discovery-v4-3.syncthing.net/v2/?id=VK6HNJ3-VVMM66S-HRVWSCR-IXEHL2H-U4AQ4MW-UCPQBWX-J2L2UBK-NVZRDQZ", // 128.199.95.124, Singapore
"https://discovery-v4-4.syncthing.net/v2/?id=LYXKCHX-VI3NYZR-ALCJBHF-WMZYSPK-QG6QJA3-MPFYMSO-U56GTUK-NA2MIAW", // 95.85.19.244, NL
}
// DefaultDiscoveryServersV6 should be substituted when the configuration
// contains <globalAnnounceServer>default-v6</globalAnnounceServer>.
DefaultDiscoveryServersV6 = []string{
"https://discovery-v6-1.syncthing.net/v2/?id=SR7AARM-TCBUZ5O-VFAXY4D-CECGSDE-3Q6IZ4G-XG7AH75-OBIXJQV-QJ6NLQA", // 2001:470:28:4d6::5, Sweden
"https://discovery-v6-2.syncthing.net/v2/?id=DVU36WY-H3LVZHW-E6LLFRE-YAFN5EL-HILWRYP-OC2M47J-Z4PE62Y-ADIBDQC", // 2604:a880:800:10::182:a001, USA
"https://discovery-v6-3.syncthing.net/v2/?id=VK6HNJ3-VVMM66S-HRVWSCR-IXEHL2H-U4AQ4MW-UCPQBWX-J2L2UBK-NVZRDQZ", // 2400:6180:0:d0::d9:d001, Singapore
"https://discovery-v6-4.syncthing.net/v2/?id=LYXKCHX-VI3NYZR-ALCJBHF-WMZYSPK-QG6QJA3-MPFYMSO-U56GTUK-NA2MIAW", // 2a03:b0c0:0:1010::4ed:3001, NL
}
// DefaultDiscoveryServers should be substituted when the configuration
// contains <globalAnnounceServer>default</globalAnnounceServer>.

View File

@@ -64,6 +64,7 @@ func TestDefaultValues(t *testing.T) {
AlwaysLocalNets: []string{},
OverwriteRemoteDevNames: false,
TempIndexMinBlocks: 10,
UnackedNotificationIDs: []string{},
}
cfg := New(device1)
@@ -103,6 +104,9 @@ func TestDeviceConfig(t *testing.T) {
AutoNormalize: true,
MinDiskFreePct: 1,
MaxConflicts: -1,
Versioning: VersioningConfiguration{
Params: map[string]string{},
},
},
}
@@ -194,6 +198,7 @@ func TestOverriddenValues(t *testing.T) {
AlwaysLocalNets: []string{},
OverwriteRemoteDevNames: true,
TempIndexMinBlocks: 100,
UnackedNotificationIDs: []string{},
}
cfg, err := Load("testdata/overridenvalues.xml", device1)

View File

@@ -304,15 +304,18 @@ func (w *Wrapper) Device(id protocol.DeviceID) (DeviceConfiguration, bool) {
func (w *Wrapper) Save() error {
fd, err := osutil.CreateAtomic(w.path, 0600)
if err != nil {
l.Debugln("CreateAtomic:", err)
return err
}
if err := w.cfg.WriteXML(fd); err != nil {
l.Debugln("WriteXML:", err)
fd.Close()
return err
}
if err := fd.Close(); err != nil {
l.Debugln("Close:", err)
return err
}

View File

@@ -345,7 +345,7 @@ func parseIgnoreFile(fd io.Reader, currentFile string, seen map[string]bool) ([]
case strings.HasSuffix(line, "/**"):
err = addPattern(line)
case strings.HasSuffix(line, "/"):
err = addPattern(line)
err = addPattern(line + "**")
default:
err = addPattern(line)
if err == nil {

View File

@@ -718,3 +718,22 @@ func TestIssue3174(t *testing.T) {
t.Error("Should match")
}
}
func TestIssue3639(t *testing.T) {
stignore := `
foo/
`
pats := New(true)
err := pats.Parse(bytes.NewBufferString(stignore), ".stignore")
if err != nil {
t.Fatal(err)
}
if !pats.Match("foo/bar").IsIgnored() {
t.Error("Should match 'foo/bar'")
}
if pats.Match("foo").IsIgnored() {
t.Error("Should not match 'foo'")
}
}

View File

@@ -396,8 +396,8 @@ func (info ConnectionInfo) MarshalJSON() ([]byte, error) {
// ConnectionStats returns a map with connection statistics for each device.
func (m *Model) ConnectionStats() map[string]interface{} {
m.pmut.RLock()
m.fmut.RLock()
m.pmut.RLock()
res := make(map[string]interface{})
devs := m.cfg.Devices()
@@ -426,8 +426,8 @@ func (m *Model) ConnectionStats() map[string]interface{} {
res["connections"] = conns
m.fmut.RUnlock()
m.pmut.RUnlock()
m.fmut.RUnlock()
in, out := protocol.TotalInOut()
res["total"] = ConnectionInfo{
@@ -1465,12 +1465,12 @@ func sendIndexTo(minSequence int64, conn protocol.Connection, folder string, fs
func (m *Model) updateLocalsFromScanning(folder string, fs []protocol.FileInfo) {
m.updateLocals(folder, fs)
// Fire the LocalChangeDetected event to notify listeners about local
// updates.
m.fmut.RLock()
path := m.folderCfgs[folder].Path()
folderCfg := m.folderCfgs[folder]
m.fmut.RUnlock()
m.localChangeDetected(folder, path, fs)
// Fire the LocalChangeDetected event to notify listeners about local updates.
m.localChangeDetected(folderCfg, fs)
}
func (m *Model) updateLocalsFromPulling(folder string, fs []protocol.FileInfo) {
@@ -1500,9 +1500,8 @@ func (m *Model) updateLocals(folder string, fs []protocol.FileInfo) {
})
}
func (m *Model) localChangeDetected(folder, path string, files []protocol.FileInfo) {
// For windows paths, strip unwanted chars from the front
path = strings.Replace(path, `\\?\`, "", 1)
func (m *Model) localChangeDetected(folderCfg config.FolderConfiguration, files []protocol.FileInfo) {
path := strings.Replace(folderCfg.Path(), `\\?\`, "", 1)
for _, file := range files {
objType := "file"
@@ -1526,14 +1525,16 @@ func (m *Model) localChangeDetected(folder, path string, files []protocol.FileIn
action = "deleted"
}
// The full file path, adjusted to the local path separator character.
// The full file path, adjusted to the local path separator character. Also
// for windows paths, strip unwanted chars from the front.
path := filepath.Join(path, filepath.FromSlash(file.Name))
events.Default.Log(events.LocalChangeDetected, map[string]string{
"folder": folder,
"action": action,
"type": objType,
"path": path,
"folderID": folderCfg.ID,
"label": folderCfg.Label,
"action": action,
"type": objType,
"path": path,
})
}
}

View File

@@ -7,13 +7,13 @@ package protocol
import (
"bytes"
"crypto/sha256"
"encoding/binary"
"errors"
"fmt"
"time"
"github.com/syncthing/syncthing/lib/rand"
"github.com/syncthing/syncthing/lib/sha256"
)
const (

View File

@@ -4,7 +4,6 @@ package protocol
import (
"bytes"
"crypto/sha256"
"encoding/base32"
"encoding/binary"
"errors"
@@ -12,6 +11,8 @@ import (
"regexp"
"strings"
"github.com/syncthing/syncthing/lib/sha256"
"github.com/calmh/luhn"
)

View File

@@ -8,11 +8,11 @@ package scanner
import (
"bytes"
"crypto/sha256"
"fmt"
"io"
"github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/sha256"
)
var SHA256OfNothing = []uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}

136
lib/sha256/sha256.go Normal file
View File

@@ -0,0 +1,136 @@
// Copyright (C) 2016 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
package sha256
import (
"crypto/rand"
cryptoSha256 "crypto/sha256"
"fmt"
"hash"
"os"
"time"
minioSha256 "github.com/minio/sha256-simd"
"github.com/syncthing/syncthing/lib/logger"
)
var l = logger.DefaultLogger.NewFacility("sha256", "SHA256 hashing package")
const (
benchmarkingIterations = 3
benchmarkingDuration = 150 * time.Millisecond
defaultImpl = "crypto/sha256"
minioImpl = "minio/sha256-simd"
)
const (
BlockSize = cryptoSha256.BlockSize
Size = cryptoSha256.Size
)
// May be switched out for another implementation
var (
New = cryptoSha256.New
Sum256 = cryptoSha256.Sum256
)
var (
selectedImpl = defaultImpl
cryptoPerf float64
minioPerf float64
)
func SelectAlgo() {
switch os.Getenv("STHASHING") {
case "":
// When unset, probe for the fastest implementation.
benchmark()
if minioPerf > cryptoPerf {
selectMinio()
}
case "minio":
// When set to "minio", use that. Benchmark anyway to be able to
// present the difference.
benchmark()
selectMinio()
default:
// When set to anything else, such as "standard", use the default Go
// implementation. Benchmark that anyway, so we can report something
// useful in Report(). Make sure not to touch the minio
// implementation as it may be disabled for incompatibility reasons.
cryptoPerf = cpuBenchOnce(benchmarkingIterations*benchmarkingDuration, cryptoSha256.New)
}
}
// Report prints a line with the measured hash performance rates for the
// selected and alternate implementation.
func Report() {
var otherImpl string
var selectedRate, otherRate float64
switch selectedImpl {
case defaultImpl:
selectedRate = cryptoPerf
otherRate = minioPerf
otherImpl = minioImpl
case minioImpl:
selectedRate = minioPerf
otherRate = cryptoPerf
otherImpl = defaultImpl
}
l.Infof("Single thread hash performance is %s using %s (%s using %s).", formatRate(selectedRate), selectedImpl, formatRate(otherRate), otherImpl)
}
func selectMinio() {
New = minioSha256.New
Sum256 = minioSha256.Sum256
selectedImpl = minioImpl
}
func benchmark() {
// Interleave the tests to achieve some sort of fairness if the CPU is
// just in the process of spinning up to full speed.
for i := 0; i < benchmarkingIterations; i++ {
if perf := cpuBenchOnce(benchmarkingDuration, cryptoSha256.New); perf > cryptoPerf {
cryptoPerf = perf
}
if perf := cpuBenchOnce(benchmarkingDuration, minioSha256.New); perf > minioPerf {
minioPerf = perf
}
}
}
func cpuBenchOnce(duration time.Duration, newFn func() hash.Hash) float64 {
chunkSize := 100 * 1 << 10
h := newFn()
bs := make([]byte, chunkSize)
rand.Reader.Read(bs)
t0 := time.Now()
b := 0
for time.Since(t0) < duration {
h.Write(bs)
b += chunkSize
}
h.Sum(nil)
d := time.Since(t0)
return float64(int(float64(b)/d.Seconds()/(1<<20)*100)) / 100
}
func formatRate(rate float64) string {
decimals := 0
if rate < 1 {
decimals = 2
} else if rate < 10 {
decimals = 1
}
return fmt.Sprintf("%.*f MB/s", decimals, rate)
}

View File

@@ -12,7 +12,6 @@ import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/sha256"
"crypto/x509"
"encoding/asn1"
"encoding/pem"
@@ -20,6 +19,8 @@ import (
"fmt"
"io"
"math/big"
"github.com/syncthing/syncthing/lib/sha256"
)
// GenerateKeys returns a new key pair, with the private and public key

View File

@@ -108,19 +108,21 @@ func Discover(renewal, timeout time.Duration) []nat.Device {
close(resultChan)
}()
seenResults := make(map[string]bool)
nextResult:
for result := range resultChan {
for _, existingResult := range results {
if existingResult.ID() == result.ID() {
l.Debugf("Skipping duplicate result %s with services:", result.uuid)
for _, service := range result.services {
l.Debugf("* [%s] %s", service.ID, service.URL)
}
continue nextResult
if seenResults[result.ID()] {
l.Debugf("Skipping duplicate result %s with services:", result.uuid)
for _, service := range result.services {
l.Debugf("* [%s] %s", service.ID, service.URL)
}
continue nextResult
}
result := result // Reallocate as we need to keep a pointer
results = append(results, &result)
seenResults[result.ID()] = true
l.Debugf("UPnP discovery result %s with services:", result.uuid)
for _, service := range result.services {
l.Debugf("* [%s] %s", service.ID, service.URL)

View File

@@ -0,0 +1,5 @@
set "FOLDER_PATH=%~1"
set "FILE_PATH=%~2"
echo "1--%FOLDER_PATH%--"
echo "2--%FILE_PATH%--"
del "%FOLDER_PATH%\%FILE_PATH%"

View File

@@ -0,0 +1,5 @@
#!/bin/sh
echo "1--$1--"
echo "2--$2--"
rm -f "$1/$2"

View File

@@ -0,0 +1,89 @@
// Copyright (C) 2016 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
package versioner
import (
"io/ioutil"
"os"
"path/filepath"
"runtime"
"testing"
)
func TestExternalNoCommand(t *testing.T) {
file := "testdata/folder path/long filename.txt"
prepForRemoval(t, file)
defer os.RemoveAll("testdata")
// The file should exist before the versioner run.
if _, err := os.Lstat(file); err != nil {
t.Fatal("File should exist")
}
// The versioner should fail due to missing command.
e := External{
command: "nonexistant command",
folderPath: "testdata/folder path",
}
if err := e.Archive(file); err == nil {
t.Error("Command should have failed")
}
// The file should not have been removed.
if _, err := os.Lstat(file); err != nil {
t.Fatal("File should still exist")
}
}
func TestExternal(t *testing.T) {
cmd := "./_external_test/external.sh"
if runtime.GOOS == "windows" {
cmd = `.\_external_test\external.bat`
}
file := "testdata/folder path/dir (parens)/long filename (parens).txt"
prepForRemoval(t, file)
defer os.RemoveAll("testdata")
// The file should exist before the versioner run.
if _, err := os.Lstat(file); err != nil {
t.Fatal("File should exist")
}
// The versioner should run successfully.
e := External{
command: cmd,
folderPath: "testdata/folder path",
}
if err := e.Archive(file); err != nil {
t.Fatal(err)
}
// The file should no longer exist.
if _, err := os.Lstat(file); !os.IsNotExist(err) {
t.Error("File should no longer exist")
}
}
func prepForRemoval(t *testing.T, file string) {
if err := os.RemoveAll("testdata"); err != nil {
t.Fatal(err)
}
if err := os.MkdirAll(filepath.Dir(file), 0755); err != nil {
t.Fatal(err)
}
if err := ioutil.WriteFile(file, []byte("hello\n"), 0644); err != nil {
t.Fatal(err)
}
}

3
script/post-upgrade Normal file
View File

@@ -0,0 +1,3 @@
#!/bin/sh
pkill -HUP -x syncthing || true

View File

@@ -27,6 +27,7 @@ var jsonEndpoints = []string{
"/rest/db/status?folder=default",
"/rest/db/browse?folder=default",
"/rest/events?since=-1&limit=5",
"/rest/events/disk?since=-1&limit=5",
"/rest/stats/device",
"/rest/stats/folder",
"/rest/svc/deviceid?id=I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU",

44
vendor/github.com/cznic/b/doc.go generated vendored
View File

@@ -6,10 +6,30 @@
//
// Changelog
//
// 2016-07-16: Update benchmark results to newer Go version. Add a note on
// concurrency.
//
// 2014-06-26: Lower GC presure by recycling things.
//
// 2014-04-18: Added new method Put.
//
// Concurrency considerations
//
// Tree.{Clear,Delete,Put,Set} mutate the tree. One can use eg. a
// sync.Mutex.Lock/Unlock (or sync.RWMutex.Lock/Unlock) to wrap those calls if
// they are to be invoked concurrently.
//
// Tree.{First,Get,Last,Len,Seek,SeekFirst,SekLast} read but do not mutate the
// tree. One can use eg. a sync.RWMutex.RLock/RUnlock to wrap those calls if
// they are to be invoked concurrently with any of the tree mutating methods.
//
// Enumerator.{Next,Prev} mutate the enumerator and read but not mutate the
// tree. One can use eg. a sync.RWMutex.RLock/RUnlock to wrap those calls if
// they are to be invoked concurrently with any of the tree mutating methods. A
// separate mutex for the enumerator, or the whole tree in a simplified
// variant, is necessary if the enumerator's Next/Prev methods per se are to
// be invoked concurrently.
//
// Generic types
//
// Keys and their associated values are interface{} typed, similar to all of
@@ -34,20 +54,20 @@
// No other changes to int.go are necessary, it compiles just fine.
//
// Running the benchmarks for 1000 keys on a machine with Intel i5-4670 CPU @
// 3.4GHz, Go release 1.4.2.
// 3.4GHz, Go 1.7rc1.
//
// $ go test -bench 1e3 example/all_test.go example/int.go
// BenchmarkSetSeq1e3-4 20000 78265 ns/op
// BenchmarkGetSeq1e3-4 20000 67980 ns/op
// BenchmarkSetRnd1e3-4 10000 172720 ns/op
// BenchmarkGetRnd1e3-4 20000 89539 ns/op
// BenchmarkDelSeq1e3-4 20000 87863 ns/op
// BenchmarkDelRnd1e3-4 10000 130891 ns/op
// BenchmarkSeekSeq1e3-4 10000 100118 ns/op
// BenchmarkSeekRnd1e3-4 10000 121684 ns/op
// BenchmarkNext1e3-4 200000 6330 ns/op
// BenchmarkPrev1e3-4 200000 9066 ns/op
// PASS
// BenchmarkSetSeq1e3 10000 151620 ns/op
// BenchmarkGetSeq1e3 10000 115354 ns/op
// BenchmarkSetRnd1e3 5000 255865 ns/op
// BenchmarkGetRnd1e3 10000 140466 ns/op
// BenchmarkDelSeq1e3 10000 143860 ns/op
// BenchmarkDelRnd1e3 10000 188228 ns/op
// BenchmarkSeekSeq1e3 10000 156448 ns/op
// BenchmarkSeekRnd1e3 10000 190587 ns/op
// BenchmarkNext1e3 200000 9407 ns/op
// BenchmarkPrev1e3 200000 9306 ns/op
// ok command-line-arguments 26.369s
// ok command-line-arguments 42.531s
// $
package b

27
vendor/github.com/cznic/internal/buffer/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,27 @@
Copyright (c) 2016 The Internal Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the names of the authors nor the names of the
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

55
vendor/github.com/cznic/internal/buffer/buffer.go generated vendored Normal file
View File

@@ -0,0 +1,55 @@
// Copyright 2016 The Internal Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package buffer implements a pool of pointers to byte slices.
//
// Example usage pattern
//
// p := buffer.Get(size)
// b := *p // Now you can use b in any way you need.
// ...
// // When b will not be used anymore
// buffer.Put(p)
// ...
// // If b or p are not going out of scope soon, optionally
// b = nil
// p = nil
//
// Otherwise the pool cannot release the buffer on garbage collection.
//
// Do not do
//
// p := buffer.Get(size)
// b := *p
// ...
// buffer.Put(&b)
//
// or
//
// b := *buffer.Get(size)
// ...
// buffer.Put(&b)
package buffer
import (
"github.com/cznic/internal/slice"
)
// CGet returns a pointer to a byte slice of len size. The pointed to byte
// slice is zeroed up to its cap. CGet panics for size < 0.
//
// CGet is safe for concurrent use by multiple goroutines.
func CGet(size int) *[]byte { return slice.Bytes.CGet(size).(*[]byte) }
// Get returns a pointer to a byte slice of len size. The pointed to byte slice
// is not zeroed. Get panics for size < 0.
//
// Get is safe for concurrent use by multiple goroutines.
func Get(size int) *[]byte { return slice.Bytes.Get(size).(*[]byte) }
// Put puts a pointer to a byte slice into a pool for possible later reuse by
// CGet or Get.
//
// Put is safe for concurrent use by multiple goroutines.
func Put(p *[]byte) { slice.Bytes.Put(p) }

27
vendor/github.com/cznic/internal/file/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,27 @@
Copyright (c) 2016 The Internal Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the names of the authors nor the names of the
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

434
vendor/github.com/cznic/internal/file/file.go generated vendored Normal file
View File

@@ -0,0 +1,434 @@
// Copyright 2016 The Internal Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package file provides an os.File-like interface of a memory mapped file.
package file
import (
"fmt"
"io"
"os"
"time"
"github.com/cznic/fileutil"
"github.com/cznic/internal/buffer"
"github.com/cznic/mathutil"
"github.com/edsrzf/mmap-go"
)
const copyBufSize = 1 << 20 // 1 MB.
var (
_ Interface = (*mem)(nil)
_ Interface = (*file)(nil)
_ os.FileInfo = stat{}
sysPage = os.Getpagesize()
)
// Interface is a os.File-like entity.
type Interface interface {
io.ReaderAt
io.ReaderFrom
io.WriterAt
io.WriterTo
Close() error
Stat() (os.FileInfo, error)
Sync() error
Truncate(int64) error
}
// Open returns a new Interface backed by f, or an error, if any.
func Open(f *os.File) (Interface, error) { return newFile(f, 1<<30, 20) }
// OpenMem returns a new Interface, or an error, if any. The Interface content
// is volatile, it's backed only by process' memory.
func OpenMem(name string) (Interface, error) { return newMem(name, 18), nil }
type memMap map[int64]*[]byte
type mem struct {
m memMap
modTime time.Time
name string
pgBits uint
pgMask int
pgSize int
size int64
}
func newMem(name string, pgBits uint) *mem {
pgSize := 1 << pgBits
return &mem{
m: memMap{},
modTime: time.Now(),
name: name,
pgBits: pgBits,
pgMask: pgSize - 1,
pgSize: pgSize,
}
}
func (f *mem) IsDir() bool { return false }
func (f *mem) Mode() os.FileMode { return os.ModeTemporary + 0600 }
func (f *mem) ModTime() time.Time { return f.modTime }
func (f *mem) Name() string { return f.name }
func (f *mem) ReadFrom(r io.Reader) (n int64, err error) { return readFrom(f, r) }
func (f *mem) Size() (n int64) { return f.size }
func (f *mem) Stat() (os.FileInfo, error) { return f, nil }
func (f *mem) Sync() error { return nil }
func (f *mem) Sys() interface{} { return nil }
func (f *mem) WriteTo(w io.Writer) (n int64, err error) { return writeTo(f, w) }
func (f *mem) Close() error {
f.Truncate(0)
f.m = nil
return nil
}
func (f *mem) ReadAt(b []byte, off int64) (n int, err error) {
avail := f.size - off
pi := off >> f.pgBits
po := int(off) & f.pgMask
rem := len(b)
if int64(rem) >= avail {
rem = int(avail)
err = io.EOF
}
var zeroPage *[]byte
for rem != 0 && avail > 0 {
pg := f.m[pi]
if pg == nil {
if zeroPage == nil {
zeroPage = buffer.CGet(f.pgSize)
defer buffer.Put(zeroPage)
}
pg = zeroPage
}
nc := copy(b[:mathutil.Min(rem, f.pgSize)], (*pg)[po:])
pi++
po = 0
rem -= nc
n += nc
b = b[nc:]
}
return n, err
}
func (f *mem) Truncate(size int64) (err error) {
if size < 0 {
return fmt.Errorf("invalid truncate size: %d", size)
}
first := size >> f.pgBits
if size&int64(f.pgMask) != 0 {
first++
}
last := f.size >> f.pgBits
if f.size&int64(f.pgMask) != 0 {
last++
}
for ; first <= last; first++ {
if p := f.m[first]; p != nil {
buffer.Put(p)
}
delete(f.m, first)
}
f.size = size
return nil
}
func (f *mem) WriteAt(b []byte, off int64) (n int, err error) {
pi := off >> f.pgBits
po := int(off) & f.pgMask
n = len(b)
rem := n
var nc int
for rem != 0 {
pg := f.m[pi]
if pg == nil {
pg = buffer.CGet(f.pgSize)
f.m[pi] = pg
}
nc = copy((*pg)[po:], b)
pi++
po = 0
rem -= nc
b = b[nc:]
}
f.size = mathutil.MaxInt64(f.size, off+int64(n))
return n, nil
}
type stat struct {
os.FileInfo
size int64
}
func (s stat) Size() int64 { return s.size }
type fileMap map[int64]mmap.MMap
type file struct {
f *os.File
m fileMap
maxPages int
pgBits uint
pgMask int
pgSize int
size int64
fsize int64
}
func newFile(f *os.File, maxSize int64, pgBits uint) (*file, error) {
if maxSize < 0 {
panic("internal error")
}
pgSize := 1 << pgBits
switch {
case sysPage > pgSize:
pgBits = uint(mathutil.Log2Uint64(uint64(sysPage)))
default:
pgBits = uint(mathutil.Log2Uint64(uint64(pgSize / sysPage * sysPage)))
}
pgSize = 1 << pgBits
fi := &file{
f: f,
m: fileMap{},
maxPages: int(mathutil.MinInt64(
1024,
mathutil.MaxInt64(maxSize/int64(pgSize), 1)),
),
pgBits: pgBits,
pgMask: pgSize - 1,
pgSize: pgSize,
}
info, err := f.Stat()
if err != nil {
return nil, err
}
if err = fi.Truncate(info.Size()); err != nil {
return nil, err
}
return fi, nil
}
func (f *file) ReadFrom(r io.Reader) (n int64, err error) { return readFrom(f, r) }
func (f *file) Sync() (err error) { return f.f.Sync() }
func (f *file) WriteTo(w io.Writer) (n int64, err error) { return writeTo(f, w) }
func (f *file) Close() (err error) {
for _, p := range f.m {
if err = p.Unmap(); err != nil {
return err
}
}
if err = f.f.Truncate(f.size); err != nil {
return err
}
if err = f.f.Sync(); err != nil {
return err
}
if err = f.f.Close(); err != nil {
return err
}
f.m = nil
f.f = nil
return nil
}
func (f *file) page(index int64) (mmap.MMap, error) {
if len(f.m) == f.maxPages {
for i, p := range f.m {
if err := p.Unmap(); err != nil {
return nil, err
}
delete(f.m, i)
break
}
}
off := index << f.pgBits
fsize := off + int64(f.pgSize)
if fsize > f.fsize {
if err := f.f.Truncate(fsize); err != nil {
return nil, err
}
f.fsize = fsize
}
p, err := mmap.MapRegion(f.f, f.pgSize, mmap.RDWR, 0, off)
if err != nil {
return nil, err
}
f.m[index] = p
return p, nil
}
func (f *file) ReadAt(b []byte, off int64) (n int, err error) {
avail := f.size - off
pi := off >> f.pgBits
po := int(off) & f.pgMask
rem := len(b)
if int64(rem) >= avail {
rem = int(avail)
err = io.EOF
}
for rem != 0 && avail > 0 {
pg := f.m[pi]
if pg == nil {
if pg, err = f.page(pi); err != nil {
return n, err
}
}
nc := copy(b[:mathutil.Min(rem, f.pgSize)], pg[po:])
pi++
po = 0
rem -= nc
n += nc
b = b[nc:]
}
return n, err
}
func (f *file) Stat() (os.FileInfo, error) {
fi, err := f.f.Stat()
if err != nil {
return nil, err
}
return stat{fi, f.size}, nil
}
func (f *file) Truncate(size int64) (err error) {
if size < 0 {
return fmt.Errorf("invalid truncate size: %d", size)
}
first := size >> f.pgBits
if size&int64(f.pgMask) != 0 {
first++
}
last := f.size >> f.pgBits
if f.size&int64(f.pgMask) != 0 {
last++
}
for ; first <= last; first++ {
if p := f.m[first]; p != nil {
if err := p.Unmap(); err != nil {
return err
}
}
delete(f.m, first)
}
f.size = size
fsize := (size + int64(f.pgSize) - 1) &^ int64(f.pgMask)
if fsize != f.fsize {
if err := f.f.Truncate(fsize); err != nil {
return err
}
}
f.fsize = fsize
return nil
}
func (f *file) WriteAt(b []byte, off int64) (n int, err error) {
pi := off >> f.pgBits
po := int(off) & f.pgMask
n = len(b)
rem := n
var nc int
for rem != 0 {
pg := f.m[pi]
if pg == nil {
pg, err = f.page(pi)
if err != nil {
return n, err
}
}
nc = copy(pg[po:], b)
pi++
po = 0
rem -= nc
b = b[nc:]
}
f.size = mathutil.MaxInt64(f.size, off+int64(n))
return n, nil
}
// ----------------------------------------------------------------------------
func readFrom(f Interface, r io.Reader) (n int64, err error) {
f.Truncate(0)
p := buffer.Get(copyBufSize)
b := *p
defer buffer.Put(p)
var off int64
var werr error
for {
rn, rerr := r.Read(b)
if rn != 0 {
_, werr = f.WriteAt(b[:rn], off)
n += int64(rn)
off += int64(rn)
}
if rerr != nil {
if !fileutil.IsEOF(rerr) {
err = rerr
}
break
}
if werr != nil {
err = werr
break
}
}
return n, err
}
func writeTo(f Interface, w io.Writer) (n int64, err error) {
p := buffer.Get(copyBufSize)
b := *p
defer buffer.Put(p)
var off int64
var werr error
for {
rn, rerr := f.ReadAt(b, off)
if rn != 0 {
_, werr = w.Write(b[:rn])
n += int64(rn)
off += int64(rn)
}
if rerr != nil {
if !fileutil.IsEOF(rerr) {
err = rerr
}
break
}
if werr != nil {
err = werr
break
}
}
return n, err
}

27
vendor/github.com/cznic/internal/slice/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,27 @@
Copyright (c) 2016 The Internal Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the names of the authors nor the names of the
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

173
vendor/github.com/cznic/internal/slice/pool.go generated vendored Normal file
View File

@@ -0,0 +1,173 @@
// Copyright 2016 The Internal Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package slice implements pools of pointers to slices.
package slice
import (
"sync"
"github.com/cznic/mathutil"
)
var (
// Bytes is a ready to use *[]byte Pool.
Bytes *Pool
// Ints is a ready to use *[]int Pool.
Ints *Pool
)
func init() {
Bytes = newBytes()
Ints = NewPool(
func(size int) interface{} { // create
b := make([]int, size)
return &b
},
func(s interface{}) { // clear
b := *s.(*[]int)
b = b[:cap(b)]
for i := range b {
b[i] = 0
}
},
func(s interface{}, size int) { // setSize
p := s.(*[]int)
*p = (*p)[:size]
},
func(s interface{}) int { return cap(*s.(*[]int)) }, // cap
)
}
func newBytes() *Pool {
return NewPool(
func(size int) interface{} { // create
b := make([]byte, size)
return &b
},
func(s interface{}) { // clear
b := *s.(*[]byte)
b = b[:cap(b)]
for i := range b {
b[i] = 0
}
},
func(s interface{}, size int) { // setSize
p := s.(*[]byte)
*p = (*p)[:size]
},
func(s interface{}) int { return cap(*s.(*[]byte)) }, // cap
)
}
// Pool implements a pool of pointers to slices.
//
// Example usage pattern (assuming pool is, for example, a *[]byte Pool)
//
// p := pool.Get(size).(*[]byte)
// b := *p // Now you can use b in any way you need.
// ...
// // When b will not be used anymore
// pool.Put(p)
// ...
// // If b or p are not going out of scope soon, optionally
// b = nil
// p = nil
//
// Otherwise the pool cannot release the slice on garbage collection.
//
// Do not do
//
// p := pool.Get(size).(*[]byte)
// b := *p
// ...
// pool.Put(&b)
//
// or
//
// b := *pool.Get(size).(*[]byte)
// ...
// pool.Put(&b)
type Pool struct {
cap func(interface{}) int
clear func(interface{})
m [63]sync.Pool
null interface{}
setSize func(interface{}, int)
}
// NewPool returns a newly created Pool. Assuming the desired slice type is
// []T:
//
// The create function returns a *[]T of len == cap == size.
//
// The argument of clear is *[]T and the function sets all the slice elements
// to the respective zero value.
//
// The setSize function gets a *[]T and sets its len to size.
//
// The cap function gets a *[]T and returns its capacity.
func NewPool(
create func(size int) interface{},
clear func(interface{}),
setSize func(p interface{}, size int),
cap func(p interface{}) int,
) *Pool {
p := &Pool{clear: clear, setSize: setSize, cap: cap, null: create(0)}
for i := range p.m {
size := 1 << uint(i)
p.m[i] = sync.Pool{New: func() interface{} {
// 0: 1 - 1
// 1: 10 - 10
// 2: 11 - 100
// 3: 101 - 1000
// 4: 1001 - 10000
// 5: 10001 - 100000
return create(size)
}}
}
return p
}
// CGet returns a *[]T of len size. The pointed to slice is zeroed up to its
// cap. CGet panics for size < 0.
//
// CGet is safe for concurrent use by multiple goroutines.
func (p *Pool) CGet(size int) interface{} {
s := p.Get(size)
p.clear(s)
return s
}
// Get returns a *[]T of len size. The pointed to slice is not zeroed. Get
// panics for size < 0.
//
// Get is safe for concurrent use by multiple goroutines.
func (p *Pool) Get(size int) interface{} {
var index int
switch {
case size < 0:
panic("Pool.Get: negative size")
case size == 0:
return p.null
case size > 1:
index = mathutil.Log2Uint64(uint64(size-1)) + 1
}
s := p.m[index].Get()
p.setSize(s, size)
return s
}
// Put puts a *[]T into a pool for possible later reuse by CGet or Get. Put
// panics is its argument is not of type *[]T.
//
// Put is safe for concurrent use by multiple goroutines.
func (p *Pool) Put(b interface{}) {
size := p.cap(b)
if size == 0 {
return
}
p.m[mathutil.Log2Uint64(uint64(size))].Put(b)
}

View File

@@ -28,9 +28,9 @@ type acidWriter0 ACIDFiler0
func (a *acidWriter0) WriteAt(b []byte, off int64) (n int, err error) {
f := (*ACIDFiler0)(a)
if f.bwal == nil { // new epoch
if f.newEpoch {
f.newEpoch = false
f.data = f.data[:0]
f.bwal = bufio.NewWriter(f.wal)
if err = a.writePacket([]interface{}{wpt00Header, walTypeACIDFiler0, ""}); err != nil {
return
}
@@ -96,12 +96,13 @@ const (
// [1]: http://godoc.org/github.com/cznic/exp/dbm
type ACIDFiler0 struct {
*RollbackFiler
wal *os.File
bwal *bufio.Writer
data []acidWrite
testHook bool // keeps WAL untruncated (once)
peakWal int64 // tracks WAL maximum used size
newEpoch bool
peakBitFilerPages int // track maximum transaction memory
peakWal int64 // tracks WAL maximum used size
testHook bool // keeps WAL untruncated (once)
wal *os.File
}
// NewACIDFiler0 returns a newly created ACIDFiler0 with WAL in wal.
@@ -128,6 +129,8 @@ func NewACIDFiler(db Filer, wal *os.File) (r *ACIDFiler0, err error) {
}
}
r.bwal = bufio.NewWriter(r.wal)
r.newEpoch = true
acidWriter := (*acidWriter0)(r)
if r.RollbackFiler, err = NewRollbackFiler(
@@ -142,17 +145,12 @@ func NewACIDFiler(db Filer, wal *os.File) (r *ACIDFiler0, err error) {
return
}
r.bwal = nil
if err = r.wal.Sync(); err != nil {
return
}
wfi, err := r.wal.Stat()
switch err != nil {
case true:
// unexpected, but ignored
case false:
if err == nil {
r.peakWal = mathutil.MaxInt64(wfi.Size(), r.peakWal)
}
@@ -185,6 +183,8 @@ func NewACIDFiler(db Filer, wal *os.File) (r *ACIDFiler0, err error) {
}
r.testHook = false
r.bwal.Reset(r.wal)
r.newEpoch = true
return r.wal.Sync()
},

27
vendor/github.com/cznic/lldb/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,27 @@
Copyright (c) 2014 The lldb Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the names of the authors nor the names of the
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -12,8 +12,8 @@ import (
"sort"
"strings"
"github.com/cznic/bufs"
"github.com/cznic/fileutil"
"github.com/cznic/internal/buffer"
"github.com/cznic/sortutil"
)
@@ -245,8 +245,9 @@ func (t *BTree) Get(buf, key []byte) (value []byte, err error) {
return
}
buffer := bufs.GCache.Get(maxBuf)
defer bufs.GCache.Put(buffer)
pbuffer := buffer.Get(maxBuf)
defer buffer.Put(pbuffer)
buffer := *pbuffer
if buffer, err = t.root.get(t.store, buffer, t.collate, key); buffer == nil || err != nil {
return
}
@@ -479,9 +480,10 @@ func (t *BTree) Set(key, value []byte) (err error) {
}
t.serial++
dst := bufs.GCache.Get(maxBuf)
pdst := buffer.Get(maxBuf)
dst := *pdst
_, err = t.root.put(dst, t.store, t.collate, key, value, true)
bufs.GCache.Put(dst)
buffer.Put(pdst)
return
}
@@ -733,8 +735,9 @@ func CreateBTree(store *Allocator, collate func(a, b []byte) int) (bt *BTree, ha
// (handled by some upper layer "dispatcher").
func OpenBTree(store *Allocator, collate func(a, b []byte) int, handle int64) (bt *BTree, err error) {
r := &BTree{store: store, root: btree(handle), collate: collate}
b := bufs.GCache.Get(7)
defer bufs.GCache.Put(b)
pb := buffer.Get(7)
defer buffer.Put(pb)
b := *pb
if b, err = store.Get(b, handle); err != nil {
return
}
@@ -883,7 +886,7 @@ DataPage[X] == 8+14*X
type btreeIndexPage []byte
func newBTreeIndexPage(leftmostChild int64) (p btreeIndexPage) {
p = bufs.GCache.Get(1 + (kIndex+1)*2*7)[:8]
p = (*buffer.Get(1 + (kIndex+1)*2*7))[:8]
p[0] = tagBTreeIndexPage
h2b(p[1:], leftmostChild)
return
@@ -942,8 +945,9 @@ func (p btreeIndexPage) insert3(index int, dataPage, child int64) btreeIndexPage
}
func (p btreeIndexPage) cmp(a btreeStore, c func(a, b []byte) int, keyA []byte, keyBIndex int) (int, error) {
b := bufs.GCache.Get(maxBuf)
defer bufs.GCache.Put(b)
pb := buffer.Get(maxBuf)
defer buffer.Put(pb)
b := *pb
dp, err := a.Get(b, p.dataPage(keyBIndex))
if err != nil {
return 0, err
@@ -963,12 +967,6 @@ func (q btreeIndexPage) setLen(n int) btreeIndexPage {
func (p btreeIndexPage) split(a btreeStore, root btree, ph *int64, parent int64, parentIndex int, index *int) (btreeIndexPage, error) {
right := newBTreeIndexPage(0)
canRecycle := true
defer func() {
if canRecycle {
bufs.GCache.Put(right)
}
}()
right = right.setLen(kIndex)
copy(right[1:1+(2*kIndex+1)*7], p[1+14*(kIndex+1):])
p = p.setLen(kIndex)
@@ -982,8 +980,9 @@ func (p btreeIndexPage) split(a btreeStore, root btree, ph *int64, parent int64,
}
if parentIndex >= 0 {
var pp btreeIndexPage = bufs.GCache.Get(maxBuf)
defer bufs.GCache.Put(pp)
ppp := buffer.Get(maxBuf)
defer buffer.Put(ppp)
pp := btreeIndexPage(*ppp)
if pp, err = a.Get(pp, parent); err != nil {
return nil, err
}
@@ -994,7 +993,6 @@ func (p btreeIndexPage) split(a btreeStore, root btree, ph *int64, parent int64,
} else {
nr := newBTreeIndexPage(*ph)
defer bufs.GCache.Put(nr)
nr = nr.insert3(0, p.dataPage(kIndex), rh)
nrh, err := a.Alloc(nr)
if err != nil {
@@ -1007,7 +1005,6 @@ func (p btreeIndexPage) split(a btreeStore, root btree, ph *int64, parent int64,
}
if *index > kIndex {
p = right
canRecycle = false
*ph = rh
*index -= kIndex + 1
}
@@ -1031,8 +1028,9 @@ func (p btreeIndexPage) underflow(a btreeStore, root, iroot, parent int64, ph *i
return nil, err
}
var left btreeIndexPage = bufs.GCache.Get(maxBuf)
defer bufs.GCache.Put(left)
pleft := buffer.Get(maxBuf)
defer buffer.Put(pleft)
left := btreeIndexPage(*pleft)
if lh != 0 {
if left, err = a.Get(left, lh); err != nil {
@@ -1040,8 +1038,9 @@ func (p btreeIndexPage) underflow(a btreeStore, root, iroot, parent int64, ph *i
}
if lc := btreeIndexPage(left).len(); lc > kIndex {
var pp = bufs.GCache.Get(maxBuf)
defer bufs.GCache.Put(pp)
ppp := buffer.Get(maxBuf)
defer buffer.Put(ppp)
pp := *ppp
if pp, err = a.Get(pp, parent); err != nil {
return nil, err
}
@@ -1068,15 +1067,17 @@ func (p btreeIndexPage) underflow(a btreeStore, root, iroot, parent int64, ph *i
}
if rh != 0 {
right := bufs.GCache.Get(maxBuf)
defer bufs.GCache.Put(right)
pright := buffer.Get(maxBuf)
defer buffer.Put(pright)
right := *pright
if right, err = a.Get(right, rh); err != nil {
return nil, err
}
if rc := btreeIndexPage(right).len(); rc > kIndex {
pp := bufs.GCache.Get(maxBuf)
defer bufs.GCache.Put(pp)
ppp := buffer.Get(maxBuf)
defer buffer.Put(ppp)
pp := *ppp
if pp, err = a.Get(pp, parent); err != nil {
return nil, err
}
@@ -1117,15 +1118,17 @@ func (p btreeIndexPage) underflow(a btreeStore, root, iroot, parent int64, ph *i
// must persist all changes made
func (p btreeIndexPage) concat(a btreeStore, root, iroot, parent, ph, rh int64, parentIndex int) (btreeIndexPage, error) {
pp := bufs.GCache.Get(maxBuf)
defer bufs.GCache.Put(pp)
ppp := buffer.Get(maxBuf)
defer buffer.Put(ppp)
pp := *ppp
pp, err := a.Get(pp, parent)
if err != nil {
return nil, err
}
right := bufs.GCache.Get(maxBuf)
defer bufs.GCache.Put(right)
pright := buffer.Get(maxBuf)
defer buffer.Put(pright)
right := *pright
if right, err = a.Get(right, rh); err != nil {
return nil, err
}
@@ -1157,8 +1160,9 @@ func (p btreeIndexPage) concat(a btreeStore, root, iroot, parent, ph, rh int64,
return nil, err
}
b7 := bufs.GCache.Get(7)
defer bufs.GCache.Put(b7)
pb7 := buffer.Get(7)
defer buffer.Put(pb7)
b7 := *pb7
return p, a.Realloc(root, h2b(b7[:7], ph))
}
@@ -1224,7 +1228,7 @@ Value[X] == 15+kKV+2*kKV*X
type btreeDataPage []byte
func newBTreeDataPage() (p btreeDataPage) {
p = bufs.GCache.Cget(1 + 2*7 + (kData+1)*2*kKV)[:1+2*7]
p = (*buffer.CGet(1 + 2*7 + (kData+1)*2*kKV))[:1+2*7]
p[0] = tagBTreeDataPage
return
}
@@ -1433,7 +1437,6 @@ func (p btreeDataPage) insertItem(a btreeStore, index int, key, value []byte) (b
func (p btreeDataPage) split(a btreeStore, root, ph, parent int64, parentIndex, index int, key, value []byte) (btreeDataPage, error) {
right, rh, err := newBTreeDataPageAlloc(a)
// fails defer bufs.GCache.Put(right)
if err != nil {
return nil, err
}
@@ -1441,8 +1444,9 @@ func (p btreeDataPage) split(a btreeStore, root, ph, parent int64, parentIndex,
if next := p.next(); next != 0 {
right.setNext(p.next())
nxh := right.next()
nx := bufs.GCache.Get(maxBuf)
defer bufs.GCache.Put(nx)
pnx := buffer.Get(maxBuf)
defer buffer.Put(pnx)
nx := *pnx
if nx, err = a.Get(nx, nxh); err != nil {
return nil, err
}
@@ -1460,8 +1464,9 @@ func (p btreeDataPage) split(a btreeStore, root, ph, parent int64, parentIndex,
p = p.setLen(kData)
if parentIndex >= 0 {
var pp btreeIndexPage = bufs.GCache.Get(maxBuf)
defer bufs.GCache.Put(pp)
ppp := buffer.Get(maxBuf)
defer buffer.Put(ppp)
pp := btreeIndexPage(*ppp)
if pp, err = a.Get(pp, parent); err != nil {
return nil, err
}
@@ -1473,7 +1478,6 @@ func (p btreeDataPage) split(a btreeStore, root, ph, parent int64, parentIndex,
} else {
nr := newBTreeIndexPage(ph)
defer bufs.GCache.Put(nr)
nr = nr.insert3(0, rh, rh)
nrh, err := a.Alloc(nr)
if err != nil {
@@ -1508,8 +1512,9 @@ func (p btreeDataPage) overflow(a btreeStore, root, ph, parent int64, parentInde
}
if leftH != 0 {
left := btreeDataPage(bufs.GCache.Get(maxBuf))
defer bufs.GCache.Put(left)
pleft := buffer.Get(maxBuf)
defer buffer.Put(pleft)
left := btreeDataPage(*pleft)
if left, err = a.Get(left, leftH); err != nil {
return nil, err
}
@@ -1530,8 +1535,9 @@ func (p btreeDataPage) overflow(a btreeStore, root, ph, parent int64, parentInde
}
if rightH != 0 {
right := btreeDataPage(bufs.GCache.Get(maxBuf))
defer bufs.GCache.Put(right)
pright := buffer.Get(maxBuf)
defer buffer.Put(pright)
right := btreeDataPage(*pright)
if right, err = a.Get(right, rightH); err != nil {
return nil, err
}
@@ -1647,8 +1653,9 @@ func (p btreeDataPage) extract(a btreeStore, index int) (btreeDataPage, []byte,
func checkSiblings(a btreeStore, parent int64, parentIndex int) (left, right int64, err error) {
if parentIndex >= 0 {
var p btreeIndexPage = bufs.GCache.Get(maxBuf)
defer bufs.GCache.Put(p)
pp := buffer.Get(maxBuf)
defer buffer.Put(pp)
p := btreeIndexPage(*pp)
if p, err = a.Get(p, parent); err != nil {
return
}
@@ -1671,8 +1678,9 @@ func (p btreeDataPage) underflow(a btreeStore, root, iroot, parent, ph int64, pa
}
if lh != 0 {
left := bufs.GCache.Get(maxBuf)
defer bufs.GCache.Put(left)
pleft := buffer.Get(maxBuf)
defer buffer.Put(pleft)
left := *pleft
if left, err = a.Get(left, lh); err != nil {
return err
}
@@ -1688,8 +1696,9 @@ func (p btreeDataPage) underflow(a btreeStore, root, iroot, parent, ph int64, pa
}
if rh != 0 {
right := bufs.GCache.Get(maxBuf)
defer bufs.GCache.Put(right)
pright := buffer.Get(maxBuf)
defer buffer.Put(pright)
right := *pright
if right, err = a.Get(right, rh); err != nil {
return err
}
@@ -1705,8 +1714,9 @@ func (p btreeDataPage) underflow(a btreeStore, root, iroot, parent, ph int64, pa
}
if lh != 0 {
left := bufs.GCache.Get(maxBuf)
defer bufs.GCache.Put(left)
pleft := buffer.Get(maxBuf)
defer buffer.Put(pleft)
left := *pleft
if left, err = a.Get(left, lh); err != nil {
return err
}
@@ -1723,8 +1733,9 @@ func (p btreeDataPage) underflow(a btreeStore, root, iroot, parent, ph int64, pa
// concat must persist all changes made.
func (p btreeDataPage) concat(a btreeStore, root, iroot, parent, ph, rh int64, parentIndex int) (err error) {
right := bufs.GCache.Get(maxBuf)
defer bufs.GCache.Put(right)
pright := buffer.Get(maxBuf)
defer buffer.Put(pright)
right := *pright
if right, err = a.Get(right, rh); err != nil {
return err
}
@@ -1732,8 +1743,9 @@ func (p btreeDataPage) concat(a btreeStore, root, iroot, parent, ph, rh int64, p
right, p = btreeDataPage(right).moveLeft(p, btreeDataPage(right).len())
nxh := btreeDataPage(right).next()
if nxh != 0 {
nx := bufs.GCache.Get(maxBuf)
defer bufs.GCache.Put(nx)
pnx := buffer.Get(maxBuf)
defer buffer.Put(pnx)
nx := *pnx
if nx, err = a.Get(nx, nxh); err != nil {
return err
}
@@ -1748,8 +1760,9 @@ func (p btreeDataPage) concat(a btreeStore, root, iroot, parent, ph, rh int64, p
return err
}
pp := bufs.GCache.Get(maxBuf)
defer bufs.GCache.Put(pp)
ppp := buffer.Get(maxBuf)
defer buffer.Put(ppp)
pp := *ppp
if pp, err = a.Get(pp, parent); err != nil {
return err
}
@@ -1785,8 +1798,9 @@ func newBTree(a btreeStore) (btree, error) {
}
func (root btree) String(a btreeStore) string {
r := bufs.GCache.Get(16)
defer bufs.GCache.Put(r)
pr := buffer.Get(16)
defer buffer.Put(pr)
r := *pr
r, err := a.Get(r, int64(root))
if err != nil {
panic(err)
@@ -1807,8 +1821,9 @@ func (root btree) String(a btreeStore) string {
}
m[h] = true
var b btreePage = bufs.GCache.Get(maxBuf)
defer bufs.GCache.Put(b)
pb := buffer.Get(maxBuf)
defer buffer.Put(pb)
b := btreePage(*pb)
var err error
if b, err = a.Get(b, h); err != nil {
panic(err)
@@ -1873,7 +1888,6 @@ func (root btree) put2(dst []byte, a btreeStore, c func(a, b []byte) int, key []
var h int64
if iroot == 0 {
p := newBTreeDataPage()
defer bufs.GCache.Put(p)
if value, written, err = upd(key, nil); err != nil || !written {
return
}
@@ -1895,8 +1909,9 @@ func (root btree) put2(dst []byte, a btreeStore, c func(a, b []byte) int, key []
var parent int64
ph := iroot
p := bufs.GCache.Get(maxBuf)
defer bufs.GCache.Put(p)
pp := buffer.Get(maxBuf)
defer buffer.Put(pp)
p := *pp
for {
if p, err = a.Get(p[:cap(p)], ph); err != nil {
@@ -2041,8 +2056,9 @@ func (root btree) extract(a btreeStore, dst []byte, c func(a, b []byte) int, key
parentIndex := -1
var parent int64
p := bufs.GCache.Get(maxBuf)
defer bufs.GCache.Put(p)
pp := buffer.Get(maxBuf)
defer buffer.Put(pp)
p := *pp
for {
if p, err = a.Get(p[:cap(p)], ph); err != nil {
@@ -2122,8 +2138,9 @@ func (root btree) extract(a btreeStore, dst []byte, c func(a, b []byte) int, key
}
func (root btree) deleteAny(a btreeStore) (bool, error) {
r := bufs.GCache.Get(7)
defer bufs.GCache.Put(r)
pr := buffer.Get(7)
defer buffer.Put(pr)
r := *pr
var err error
if r, err = a.Get(r, int64(root)); err != nil {
return false, err
@@ -2137,8 +2154,9 @@ func (root btree) deleteAny(a btreeStore) (bool, error) {
ph := iroot
parentIndex := -1
var parent int64
p := bufs.GCache.Get(maxBuf)
defer bufs.GCache.Put(p)
pp := buffer.Get(maxBuf)
defer buffer.Put(pp)
p := *pp
for {
if p, err = a.Get(p, ph); err != nil {
@@ -2148,8 +2166,9 @@ func (root btree) deleteAny(a btreeStore) (bool, error) {
index := btreePage(p).len() / 2
if btreePage(p).isIndex() {
dph := btreeIndexPage(p).dataPage(index)
dp := bufs.GCache.Get(maxBuf)
defer bufs.GCache.Put(dp)
pdp := buffer.Get(maxBuf)
defer buffer.Put(pdp)
dp := *pdp
if dp, err = a.Get(dp, dph); err != nil {
return false, err
}
@@ -2197,8 +2216,9 @@ func (root btree) deleteAny(a btreeStore) (bool, error) {
}
func (root btree) first(a btreeStore) (ph int64, p btreeDataPage, err error) {
r := bufs.GCache.Get(7)
defer bufs.GCache.Put(r)
pr := buffer.Get(7)
defer buffer.Put(pr)
r := *pr
if r, err = a.Get(r, int64(root)); err != nil {
return
}
@@ -2217,8 +2237,9 @@ func (root btree) first(a btreeStore) (ph int64, p btreeDataPage, err error) {
}
func (root btree) last(a btreeStore) (ph int64, p btreeDataPage, err error) {
r := bufs.GCache.Get(7)
defer bufs.GCache.Put(r)
pr := buffer.Get(7)
defer buffer.Put(pr)
r := *pr
if r, err = a.Get(r, int64(root)); err != nil {
return
}
@@ -2238,8 +2259,9 @@ func (root btree) last(a btreeStore) (ph int64, p btreeDataPage, err error) {
// key >= p[index].key
func (root btree) seek(a btreeStore, c func(a, b []byte) int, key []byte) (p btreeDataPage, index int, equal bool, err error) {
r := bufs.GCache.Get(7)
defer bufs.GCache.Put(r)
pr := buffer.Get(7)
defer buffer.Put(pr)
r := *pr
if r, err = a.Get(r, int64(root)); err != nil {
return
}
@@ -2271,8 +2293,9 @@ func (root btree) seek(a btreeStore, c func(a, b []byte) int, key []byte) (p btr
}
func (root btree) clear(a btreeStore) (err error) {
r := bufs.GCache.Get(7)
defer bufs.GCache.Put(r)
pr := buffer.Get(7)
defer buffer.Put(pr)
r := *pr
if r, err = a.Get(r, int64(root)); err != nil {
return
}
@@ -2291,8 +2314,9 @@ func (root btree) clear(a btreeStore) (err error) {
}
func (root btree) clear2(a btreeStore, ph int64) (err error) {
var p = bufs.GCache.Get(maxBuf)
defer bufs.GCache.Put(p)
pp := buffer.Get(maxBuf)
defer buffer.Put(pp)
p := *pp
if p, err = a.Get(p, ph); err != nil {
return
}

View File

@@ -15,13 +15,13 @@ import (
"strings"
"sync"
"github.com/cznic/bufs"
"github.com/cznic/internal/buffer"
"github.com/cznic/mathutil"
"github.com/cznic/zappy"
)
const (
maxBuf = maxRq + 20 // bufs,Buffers.Alloc
maxBuf = maxRq + 20
)
// Options are passed to the NewAllocator to amend some configuration. The
@@ -413,8 +413,9 @@ func (a *Allocator) cfree(h int64) {
// Passing handles not obtained initially from Alloc or not anymore valid to
// any other Allocator methods can result in an irreparably corrupted database.
func (a *Allocator) Alloc(b []byte) (handle int64, err error) {
buf := bufs.GCache.Get(zappy.MaxEncodedLen(len(b)))
defer bufs.GCache.Put(buf)
pbuf := buffer.Get(zappy.MaxEncodedLen(len(b)))
defer buffer.Put(pbuf)
buf := *pbuf
buf, _, cc, err := a.makeUsedBlock(buf, b)
if err != nil {
return
@@ -632,8 +633,7 @@ func (a *Allocator) unlink(h, atoms, p, n int64) (err error) {
// Return len(slice) == n, reuse src if possible.
func need(n int, src []byte) []byte {
if cap(src) < n {
bufs.GCache.Put(src)
return bufs.GCache.Get(n)
return *buffer.Get(n)
}
return src[:n]
@@ -683,8 +683,9 @@ func (a *Allocator) Get(buf []byte, handle int64) (b []byte, err error) {
}
}(handle)
first := bufs.GCache.Get(16)
defer bufs.GCache.Put(first)
pfirst := buffer.Get(16)
defer buffer.Put(pfirst)
first := *pfirst
relocated := false
relocSrc := handle
reloc:
@@ -714,8 +715,9 @@ reloc:
return zappy.Decode(buf, first[1:dlen+1])
}
default:
cc := bufs.GCache.Get(1)
defer bufs.GCache.Put(cc)
pcc := buffer.Get(1)
defer buffer.Put(pcc)
cc := *pcc
dlen := int(tag)
atoms := n2atoms(dlen)
tailOff := off + 16*int64(atoms) - 1
@@ -734,8 +736,9 @@ reloc:
}
return
case tagCompressed:
zbuf := bufs.GCache.Get(dlen)
defer bufs.GCache.Put(zbuf)
pzbuf := buffer.Get(dlen)
defer buffer.Put(pzbuf)
zbuf := *pzbuf
off += 1
if err = a.read(zbuf, off); err != nil {
return buf[:0], err
@@ -747,8 +750,9 @@ reloc:
case 0:
return buf[:0], nil
case tagUsedLong:
cc := bufs.GCache.Get(1)
defer bufs.GCache.Put(cc)
pcc := buffer.Get(1)
defer buffer.Put(pcc)
cc := *pcc
dlen := m2n(int(first[1])<<8 | int(first[2]))
atoms := n2atoms(dlen)
tailOff := off + 16*int64(atoms) - 1
@@ -767,8 +771,9 @@ reloc:
}
return
case tagCompressed:
zbuf := bufs.GCache.Get(dlen)
defer bufs.GCache.Put(zbuf)
pzbuf := buffer.Get(dlen)
defer buffer.Put(pzbuf)
zbuf := *pzbuf
off += 3
if err = a.read(zbuf, off); err != nil {
return buf[:0], err
@@ -819,10 +824,12 @@ func (a *Allocator) Realloc(handle int64, b []byte) (err error) {
func (a *Allocator) realloc(handle int64, b []byte) (err error) {
var dlen, needAtoms0 int
b8 := bufs.GCache.Get(8)
defer bufs.GCache.Put(b8)
dst := bufs.GCache.Get(zappy.MaxEncodedLen(len(b)))
defer bufs.GCache.Put(dst)
pb8 := buffer.Get(8)
defer buffer.Put(pb8)
b8 := *pb8
pdst := buffer.Get(zappy.MaxEncodedLen(len(b)))
defer buffer.Put(pdst)
dst := *pdst
b, needAtoms0, cc, err := a.makeUsedBlock(dst, b)
if err != nil {
return
@@ -922,8 +929,9 @@ retry:
return err
}
rb := bufs.GCache.Cget(16)
defer bufs.GCache.Put(rb)
prb := buffer.CGet(16)
defer buffer.Put(prb)
rb := *prb
rb[0] = tagUsedRelocated
h2b(rb[1:], newH)
if err = a.writeAt(rb[:], h2off(handle)); err != nil {
@@ -950,8 +958,9 @@ func (a *Allocator) write(off int64, b ...[]byte) (err error) {
for _, part := range b {
rq += len(part)
}
buf := bufs.GCache.Get(rq)
defer bufs.GCache.Put(buf)
pbuf := buffer.Get(rq)
defer buffer.Put(pbuf)
buf := *pbuf
buf = buf[:0]
for _, part := range b {
buf = append(buf, part...)
@@ -987,8 +996,9 @@ func (a *Allocator) nfo(h int64) (tag byte, s, p, n int64, err error) {
}
}
buf := bufs.GCache.Get(22)
defer bufs.GCache.Put(buf)
pbuf := buffer.Get(22)
defer buffer.Put(pbuf)
buf := *pbuf
if err = a.read(buf[:rq], off); err != nil {
return
}
@@ -1020,8 +1030,9 @@ func (a *Allocator) leftNfo(h int64) (tag byte, s, p, n int64, err error) {
return
}
buf := bufs.GCache.Get(8)
defer bufs.GCache.Put(buf)
pbuf := buffer.Get(8)
defer buffer.Put(pbuf)
buf := *pbuf
off := h2off(h)
if err = a.read(buf[:], off-8); err != nil {
return
@@ -1038,8 +1049,9 @@ func (a *Allocator) leftNfo(h int64) (tag byte, s, p, n int64, err error) {
// Set h.prev = p
func (a *Allocator) prev(h, p int64) (err error) {
b := bufs.GCache.Get(7)
defer bufs.GCache.Put(b)
pb := buffer.Get(7)
defer buffer.Put(pb)
b := *pb
off := h2off(h)
if err = a.read(b[:1], off); err != nil {
return
@@ -1058,8 +1070,9 @@ func (a *Allocator) prev(h, p int64) (err error) {
// Set h.next = n
func (a *Allocator) next(h, n int64) (err error) {
b := bufs.GCache.Get(7)
defer bufs.GCache.Put(b)
pb := buffer.Get(7)
defer buffer.Put(pb)
b := *pb
off := h2off(h)
if err = a.read(b[:1], off); err != nil {
return
@@ -1078,8 +1091,9 @@ func (a *Allocator) next(h, n int64) (err error) {
// Make the filer image @h a free block.
func (a *Allocator) makeFree(h, atoms, prev, next int64) (err error) {
buf := bufs.GCache.Get(22)
defer bufs.GCache.Put(buf)
pbuf := buffer.Get(22)
defer buffer.Put(pbuf)
buf := *pbuf
switch {
case atoms == 1:
buf[0], buf[15] = tagFreeShort, tagFreeShort
@@ -1142,8 +1156,9 @@ func (a *Allocator) makeUsedBlock(dst []byte, b []byte) (w []byte, rqAtoms int,
func (a *Allocator) writeUsedBlock(h int64, cc byte, b []byte) (err error) {
n := len(b)
rq := n2atoms(n) << 4
buf := bufs.GCache.Get(rq)
defer bufs.GCache.Put(buf)
pbuf := buffer.Get(rq)
defer buffer.Put(pbuf)
buf := *pbuf
switch n <= maxShort {
case true:
buf[0] = byte(n)
@@ -1706,8 +1721,9 @@ func (f *flt) init() {
}
func (f *flt) load(fi Filer, off int64) (err error) {
b := bufs.GCache.Get(fltSz)
defer bufs.GCache.Put(b)
pb := buffer.Get(fltSz)
defer buffer.Put(pb)
b := *pb
if _, err = fi.ReadAt(b[:], off); err != nil {
return
}
@@ -1764,8 +1780,9 @@ func (f *flt) setHead(h, atoms int64, fi Filer) (err error) {
case atoms < 1:
panic(atoms)
case atoms >= maxFLTRq:
b := bufs.GCache.Get(7)
defer bufs.GCache.Put(b)
pb := buffer.Get(7)
defer buffer.Put(pb)
b := *pb
if _, err = fi.WriteAt(h2b(b[:], h), 8*13+1); err != nil {
return
}
@@ -1777,8 +1794,9 @@ func (f *flt) setHead(h, atoms int64, fi Filer) (err error) {
g := f[lg:]
for i := range f {
if atoms < g[i+1].minSize {
b := bufs.GCache.Get(7)
defer bufs.GCache.Put(b)
pb := buffer.Get(7)
defer buffer.Put(pb)
b := *pb
if _, err = fi.WriteAt(h2b(b[:], h), 8*int64(i+lg)+1); err != nil {
return
}

View File

@@ -2,18 +2,23 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package lldb (WIP) implements a low level database engine. The database
// model used could be considered a specific implementation of some small(est)
// Package lldb implements a low level database engine. The database model used
// could be considered a specific implementation of some small(est)
// intersection of models listed in [1]. As a settled term is lacking, it'll be
// called here a 'Virtual memory model' (VMM).
//
// Experimental release notes
// Changelog
//
// This is an experimental release. Don't open a DB from two applications or
// two instances of an application - it will get corrupted (no file locking is
// implemented and this task is delegated to lldb's clients).
// 2016-07-24: v1.0.4 brings some performance improvements.
//
// WARNING: THE LLDB API IS SUBJECT TO CHANGE.
// 2016-07-22: v1.0.3 brings some small performance improvements.
//
// 2016-07-12: v1.0.2 now uses packages from cznic/internal.
//
// 2016-07-12: v1.0.1 adds a license for testdata/fortunes.txt.
//
// 2016-07-11: First standalone release v1.0.0 of the package previously
// published as experimental (github.com/cznic/exp/lldb).
//
// Filers
//

107
vendor/github.com/cznic/lldb/memfiler.go generated vendored Normal file
View File

@@ -0,0 +1,107 @@
// Copyright 2014 The lldb Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// A memory-only implementation of Filer.
package lldb
import (
"fmt"
"io"
"github.com/cznic/internal/file"
)
const (
pgBits = 16
pgSize = 1 << pgBits
pgMask = pgSize - 1
)
var _ Filer = &MemFiler{}
// MemFiler is a memory backed Filer. It implements BeginUpdate, EndUpdate and
// Rollback as no-ops. MemFiler is not automatically persistent, but it has
// ReadFrom and WriteTo methods.
type MemFiler struct {
fi file.Interface
nest int
}
// NewMemFiler returns a new MemFiler.
func NewMemFiler() *MemFiler {
fi, err := file.OpenMem("")
if err != nil {
return nil
}
return &MemFiler{fi: fi}
}
// BeginUpdate implements Filer.
func (f *MemFiler) BeginUpdate() error {
f.nest++
return nil
}
// Close implements Filer.
func (f *MemFiler) Close() (err error) {
if f.nest != 0 {
return &ErrPERM{(f.Name() + ":Close")}
}
return f.fi.Close()
}
// EndUpdate implements Filer.
func (f *MemFiler) EndUpdate() (err error) {
if f.nest == 0 {
return &ErrPERM{(f.Name() + ": EndUpdate")}
}
f.nest--
return
}
// Name implements Filer.
func (f *MemFiler) Name() string { return fmt.Sprintf("%p.memfiler", f) }
// PunchHole implements Filer.
func (f *MemFiler) PunchHole(off, size int64) (err error) { return nil }
// ReadAt implements Filer.
func (f *MemFiler) ReadAt(b []byte, off int64) (n int, err error) { return f.fi.ReadAt(b, off) }
// ReadFrom is a helper to populate MemFiler's content from r. 'n' reports the
// number of bytes read from 'r'.
func (f *MemFiler) ReadFrom(r io.Reader) (n int64, err error) { return f.fi.ReadFrom(r) }
// Rollback implements Filer.
func (f *MemFiler) Rollback() (err error) { return nil }
// Size implements Filer.
func (f *MemFiler) Size() (int64, error) {
info, err := f.fi.Stat()
if err != nil {
return 0, err
}
return info.Size(), nil
}
// Sync implements Filer.
func (f *MemFiler) Sync() error { return nil }
// Truncate implements Filer.
func (f *MemFiler) Truncate(size int64) (err error) { return f.fi.Truncate(size) }
// WriteAt implements Filer.
func (f *MemFiler) WriteAt(b []byte, off int64) (n int, err error) { return f.fi.WriteAt(b, off) }
// WriteTo is a helper to copy/persist MemFiler's content to w. If w is also
// an io.WriterAt then WriteTo may attempt to _not_ write any big, for some
// value of big, runs of zeros, i.e. it will attempt to punch holes, where
// possible, in `w` if that happens to be a freshly created or to zero length
// truncated OS file. 'n' reports the number of bytes written to 'w'.
func (f *MemFiler) WriteTo(w io.Writer) (n int64, err error) { return f.fi.WriteTo(w) }

View File

@@ -9,11 +9,10 @@ package lldb
import (
"os"
"github.com/cznic/fileutil"
"github.com/cznic/mathutil"
"github.com/cznic/internal/file"
)
var _ Filer = &SimpleFileFiler{} // Ensure SimpleFileFiler is a Filer.
var _ Filer = &SimpleFileFiler{}
// SimpleFileFiler is an os.File backed Filer intended for use where structural
// consistency can be reached by other means (SimpleFileFiler is for example
@@ -27,14 +26,20 @@ var _ Filer = &SimpleFileFiler{} // Ensure SimpleFileFiler is a Filer.
// when, for example, a power outage occurs or the updating process terminates
// abruptly.
type SimpleFileFiler struct {
file *os.File
fi file.Interface
name string
nest int
size int64 // not set if < 0
}
// NewSimpleFileFiler returns a new SimpleFileFiler.
func NewSimpleFileFiler(f *os.File) *SimpleFileFiler {
return &SimpleFileFiler{file: f, size: -1}
fi, err := file.Open(f)
if err != nil {
return nil
}
sf := &SimpleFileFiler{fi: fi, name: f.Name()}
return sf
}
// BeginUpdate implements Filer.
@@ -49,7 +54,7 @@ func (f *SimpleFileFiler) Close() (err error) {
return &ErrPERM{(f.Name() + ":Close")}
}
return f.file.Close()
return f.fi.Close()
}
// EndUpdate implements Filer.
@@ -63,61 +68,32 @@ func (f *SimpleFileFiler) EndUpdate() (err error) {
}
// Name implements Filer.
func (f *SimpleFileFiler) Name() string {
return f.file.Name()
}
func (f *SimpleFileFiler) Name() string { return f.name }
// PunchHole implements Filer.
func (f *SimpleFileFiler) PunchHole(off, size int64) (err error) {
return fileutil.PunchHole(f.file, off, size)
}
func (f *SimpleFileFiler) PunchHole(off, size int64) (err error) { return nil }
// ReadAt implements Filer.
func (f *SimpleFileFiler) ReadAt(b []byte, off int64) (n int, err error) {
return f.file.ReadAt(b, off)
}
func (f *SimpleFileFiler) ReadAt(b []byte, off int64) (n int, err error) { return f.fi.ReadAt(b, off) }
// Rollback implements Filer.
func (f *SimpleFileFiler) Rollback() (err error) { return }
func (f *SimpleFileFiler) Rollback() (err error) { return nil }
// Size implements Filer.
func (f *SimpleFileFiler) Size() (int64, error) {
if f.size < 0 { // boot
fi, err := os.Stat(f.file.Name())
if err != nil {
return 0, err
}
f.size = fi.Size()
info, err := f.fi.Stat()
if err != nil {
return 0, err
}
return f.size, nil
return info.Size(), nil
}
// Sync implements Filer.
func (f *SimpleFileFiler) Sync() error {
return f.file.Sync()
}
func (f *SimpleFileFiler) Sync() error { return f.fi.Sync() }
// Truncate implements Filer.
func (f *SimpleFileFiler) Truncate(size int64) (err error) {
if size < 0 {
return &ErrINVAL{"Truncate size", size}
}
f.size = size
return f.file.Truncate(size)
}
func (f *SimpleFileFiler) Truncate(size int64) (err error) { return f.fi.Truncate(size) }
// WriteAt implements Filer.
func (f *SimpleFileFiler) WriteAt(b []byte, off int64) (n int, err error) {
if f.size < 0 { // boot
fi, err := os.Stat(f.file.Name())
if err != nil {
return 0, err
}
f.size = fi.Size()
}
f.size = mathutil.MaxInt64(f.size, int64(len(b))+off)
return f.file.WriteAt(b, off)
}
func (f *SimpleFileFiler) WriteAt(b []byte, off int64) (n int, err error) { return f.fi.WriteAt(b, off) }

View File

@@ -56,6 +56,7 @@ import (
"sync"
"github.com/cznic/fileutil"
"github.com/cznic/internal/buffer"
"github.com/cznic/mathutil"
)
@@ -65,28 +66,16 @@ var (
)
const (
bfBits = 9
bfBits = 12
bfSize = 1 << bfBits
bfMask = bfSize - 1
)
var (
bitmask = [8]byte{1, 2, 4, 8, 16, 32, 64, 128}
bitZeroPage bitPage
allDirtyFlags [bfSize >> 3]byte
)
func init() {
for i := range allDirtyFlags {
allDirtyFlags[i] = 0xff
}
}
type (
bitPage struct {
prev, next *bitPage
data [bfSize]byte
flags [bfSize >> 3]byte
pdata *[]byte
data []byte
dirty bool
}
@@ -96,7 +85,6 @@ type (
parent Filer
m bitFilerMap
size int64
sync.Mutex
}
)
@@ -118,6 +106,12 @@ func (f *bitFiler) Close() (err error) { return }
func (f *bitFiler) Name() string { return fmt.Sprintf("%p.bitfiler", f) }
func (f *bitFiler) Size() (int64, error) { return f.size, nil }
func (f *bitFiler) free() {
for _, pg := range f.m {
buffer.Put(pg.pdata)
}
}
func (f *bitFiler) PunchHole(off, size int64) (err error) {
first := off >> bfBits
if off&bfMask != 0 {
@@ -131,13 +125,13 @@ func (f *bitFiler) PunchHole(off, size int64) (err error) {
if limit := f.size >> bfBits; last > limit {
last = limit
}
f.Lock()
for pgI := first; pgI <= last; pgI++ {
pg := &bitPage{}
pg.flags = allDirtyFlags
pg.pdata = buffer.CGet(bfSize)
pg.data = *pg.pdata
pg.dirty = true
f.m[pgI] = pg
}
f.Unlock()
return
}
@@ -151,14 +145,14 @@ func (f *bitFiler) ReadAt(b []byte, off int64) (n int, err error) {
err = io.EOF
}
for rem != 0 && avail > 0 {
f.Lock()
pg := f.m[pgI]
if pg == nil {
pg = &bitPage{}
pg.pdata = buffer.CGet(bfSize)
pg.data = *pg.pdata
if f.parent != nil {
_, err = f.parent.ReadAt(pg.data[:], off&^bfMask)
_, err = f.parent.ReadAt(pg.data, off&^bfMask)
if err != nil && !fileutil.IsEOF(err) {
f.Unlock()
return
}
@@ -166,7 +160,6 @@ func (f *bitFiler) ReadAt(b []byte, off int64) (n int, err error) {
}
f.m[pgI] = pg
}
f.Unlock()
nc := copy(b[:mathutil.Min(rem, bfSize)], pg.data[pgO:])
pgI++
pgO = 0
@@ -179,8 +172,6 @@ func (f *bitFiler) ReadAt(b []byte, off int64) (n int, err error) {
}
func (f *bitFiler) Truncate(size int64) (err error) {
f.Lock()
defer f.Unlock()
switch {
case size < 0:
return &ErrINVAL{"Truncate size", size}
@@ -199,6 +190,9 @@ func (f *bitFiler) Truncate(size int64) (err error) {
last++
}
for ; first < last; first++ {
if bp, ok := f.m[first]; ok {
buffer.Put(bp.pdata)
}
delete(f.m, first)
}
@@ -214,14 +208,14 @@ func (f *bitFiler) WriteAt(b []byte, off int64) (n int, err error) {
rem := n
var nc int
for rem != 0 {
f.Lock()
pg := f.m[pgI]
if pg == nil {
pg = &bitPage{}
pg.pdata = buffer.CGet(bfSize)
pg.data = *pg.pdata
if f.parent != nil {
_, err = f.parent.ReadAt(pg.data[:], off&^bfMask)
_, err = f.parent.ReadAt(pg.data, off&^bfMask)
if err != nil && !fileutil.IsEOF(err) {
f.Unlock()
return
}
@@ -229,13 +223,9 @@ func (f *bitFiler) WriteAt(b []byte, off int64) (n int, err error) {
}
f.m[pgI] = pg
}
f.Unlock()
nc = copy(pg.data[pgO:], b)
pgI++
pg.dirty = true
for i := pgO; i < pgO+nc; i++ {
pg.flags[i>>3] |= bitmask[i&7]
}
pgO = 0
rem -= nc
b = b[nc:]
@@ -257,8 +247,6 @@ func (f *bitFiler) link() {
}
func (f *bitFiler) dumpDirty(w io.WriterAt) (nwr int, err error) {
f.Lock()
defer f.Unlock()
f.link()
for pgI, pg := range f.m {
if !pg.dirty {
@@ -271,36 +259,11 @@ func (f *bitFiler) dumpDirty(w io.WriterAt) (nwr int, err error) {
}
for pg != nil && pg.dirty {
last := false
var off int64
first := -1
for i := 0; i < bfSize; i++ {
flag := pg.flags[i>>3]&bitmask[i&7] != 0
switch {
case flag && !last: // Leading edge detected
off = pgI<<bfBits + int64(i)
first = i
case !flag && last: // Trailing edge detected
n, err := w.WriteAt(pg.data[first:i], off)
if n != i-first {
return 0, err
}
first = -1
nwr++
}
last = flag
}
if first >= 0 {
i := bfSize
n, err := w.WriteAt(pg.data[first:i], off)
if n != i-first {
return 0, err
}
nwr++
if _, err := w.WriteAt(pg.data, pgI<<bfBits); err != nil {
return 0, err
}
nwr++
pg.dirty = false
pg = pg.next
pgI++
@@ -479,6 +442,11 @@ func (r *RollbackFiler) Close() (err error) {
err = &ErrPERM{r.f.Name() + ": Close inside an open transaction"}
}
if r.bitFiler != nil {
r.bitFiler.free()
r.bitFiler = nil
}
return
}
@@ -510,13 +478,18 @@ func (r *RollbackFiler) EndUpdate() (err error) {
switch {
case r.tlevel == 0:
r.bitFiler = nil
defer func() {
r.bitFiler.free()
r.bitFiler = nil
}()
if nwr == 0 {
return
}
return r.checkpoint(sz)
default:
r.bitFiler.free()
r.bitFiler = parent.(*bitFiler)
sz, _ := bf.Size() // bitFiler.Size() never returns err != nil
return parent.Truncate(sz)
@@ -576,6 +549,7 @@ func (r *RollbackFiler) Rollback() (err error) {
}
if r.tlevel > 1 {
r.bitFiler.free()
r.bitFiler = r.bitFiler.parent.(*bitFiler)
}
r.tlevel--

View File

@@ -500,7 +500,7 @@ http://en.wikipedia.org/wiki/Miller-Rabin_primality_test#Algorithm_and_running_t
return composite
return probably prime
... this function behaves like passing 1 for 'k' and additionaly a
... this function behaves like passing 1 for 'k' and additionally a
fixed/non-random 'a'. Otherwise it's the same algorithm.
See also: http://mathworld.wolfram.com/Rabin-MillerStrongPseudoprimeTest.html

View File

@@ -221,7 +221,7 @@ record handle} and the B+Tree value is not used.
+------+-----------------+ +--------------+
If the indexed values are not all NULL then key of the B+Tree key are the indexed
values and the B+Tree value is the record handle.
values and the B+Tree value is the record handle.
B+Tree key B+Tree value
+----------------+ +---------------+
@@ -262,7 +262,7 @@ out are stripped off and "resupplied" on decoding transparently. See also
blob.go. If the length of the resulting slice is <= shortBlob, the first and
only chunk is the scalar encoding of
[]interface{}{typeTag, slice}. // initial (and last) chunk
The length of slice can be zero (for blob("")). If the resulting slice is long
@@ -285,9 +285,9 @@ Links
Referenced from above:
[0]: http://godoc.org/github.com/cznic/exp/lldb#hdr-Block_handles
[1]: http://godoc.org/github.com/cznic/exp/lldb#EncodeScalars
[2]: http://godoc.org/github.com/cznic/exp/lldb#BTree
[0]: http://godoc.org/github.com/cznic/lldb#hdr-Block_handles
[1]: http://godoc.org/github.com/cznic/lldb#EncodeScalars
[2]: http://godoc.org/github.com/cznic/lldb#BTree
Rationale

20
vendor/github.com/cznic/ql/doc.go generated vendored
View File

@@ -14,6 +14,20 @@
//
// Change list
//
// 2016-07-29: Release v1.0.6 enables alternatively using = instead of == for
// equality oparation.
//
// https://github.com/cznic/ql/issues/131
//
// 2016-07-11: Release v1.0.5 undoes vendoring of lldb. QL now uses stable lldb
// (github.com/cznic/lldb).
//
// https://github.com/cznic/ql/issues/128
//
// 2016-07-06: Release v1.0.4 fixes a panic when closing the WAL file.
//
// https://github.com/cznic/ql/pull/127
//
// 2016-04-03: Release v1.0.3 fixes a data race.
//
// https://github.com/cznic/ql/issues/126
@@ -299,7 +313,7 @@
// andnot = "&^" .
// lsh = "<<" .
// le = "<=" .
// eq = "==" .
// eq = "==" | "=" .
// ge = ">=" .
// neq = "!=" .
// oror = "||" .
@@ -800,7 +814,7 @@
//
// expr1 LIKE expr2
//
// yeild a boolean value true if expr2, a regular expression, matches expr1
// yield a boolean value true if expr2, a regular expression, matches expr1
// (see also [6]). Both expression must be of type string. If any one of the
// expressions is NULL the result is NULL.
//
@@ -887,7 +901,7 @@
//
// expr IS NOT NULL // case B
//
// yeild a boolean value true if expr does not have a specific type (case A) or
// yield a boolean value true if expr does not have a specific type (case A) or
// if expr has a specific type (case B). In other cases the result is a boolean
// value false.
//

33
vendor/github.com/cznic/ql/etc.go generated vendored
View File

@@ -10,7 +10,6 @@ import (
"io"
"math"
"math/big"
"strings"
"time"
)
@@ -2764,38 +2763,6 @@ var isSystemName = map[string]bool{
"__Table": true,
}
func qualifier(s string) string {
if pos := strings.IndexByte(s, '.'); pos >= 0 {
s = s[:pos]
}
return s
}
func mustQualifier(s string) string {
q := qualifier(s)
if q == s {
panic("internal error 068")
}
return q
}
func selector(s string) string {
if pos := strings.IndexByte(s, '.'); pos >= 0 {
s = s[pos+1:]
}
return s
}
func mustSelector(s string) string {
q := selector(s)
if q == s {
panic("internal error 053")
}
return q
}
func qnames(l []string) []string {
r := make([]string, len(l))
for i, v := range l {

25
vendor/github.com/cznic/ql/expr.go generated vendored
View File

@@ -135,12 +135,6 @@ func mentionedColumns(e expression) map[string]struct{} {
return m
}
func mentionedQColumns(e expression) map[string]struct{} {
m := map[string]struct{}{}
mentionedColumns0(e, true, false, m)
return m
}
func staticExpr(e expression) (expression, error) {
if e.isStatic() {
v, err := e.eval(nil, nil)
@@ -166,11 +160,6 @@ type (
idealUint uint64
)
type exprTab struct {
expr expression
table string
}
type pexpr struct {
expr expression
}
@@ -3397,20 +3386,6 @@ func (u *unaryOperation) String() string {
}
}
// !ident
func (u *unaryOperation) isNotQIdent() (bool, string, expression) {
if u.op != '!' {
return false, "", nil
}
id, ok := u.v.(*ident)
if ok && id.isQualified() {
return true, mustQualifier(id.s), &unaryOperation{'!', &ident{mustSelector(id.s)}}
}
return false, "", nil
}
func (u *unaryOperation) eval(execCtx *execCtx, ctx map[interface{}]interface{}) (r interface{}, err error) {
defer func() {
if e := recover(); e != nil {

4
vendor/github.com/cznic/ql/file.go generated vendored
View File

@@ -19,9 +19,9 @@ import (
"sync"
"time"
"github.com/cznic/lldb"
"github.com/cznic/mathutil"
"github.com/cznic/ql/vendored/github.com/camlistore/go4/lock"
"github.com/cznic/ql/vendored/github.com/cznic/exp/lldb"
)
const (
@@ -409,7 +409,7 @@ func newFileFromOSFile(f lldb.OSFile) (fi *file, err error) {
w, err = os.OpenFile(wn, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0666)
closew = true
defer func() {
if closew {
if w != nil && closew {
nm := w.Name()
w.Close()
os.Remove(nm)

View File

@@ -42,7 +42,6 @@ type HTTPFile struct {
isFile bool
name string
off int
sz int
}
// Close implements http.File.
@@ -212,7 +211,7 @@ func (db *DB) NewHTTPFS(query string) (*HTTPFS, error) {
// The elements in a file path are separated by slash ('/', U+002F) characters,
// regardless of host operating system convention.
func (f *HTTPFS) Open(name string) (http.File, error) {
if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 ||
if filepath.Separator != '/' && strings.Contains(name, string(filepath.Separator)) ||
strings.Contains(name, "\x00") {
return nil, fmt.Errorf("invalid character in file path: %q", name)
}
@@ -264,7 +263,7 @@ func (f *HTTPFS) Open(name string) (http.File, error) {
n++
switch name := data[0].(type) {
case string:
if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 ||
if filepath.Separator != '/' && strings.Contains(name, string(filepath.Separator)) ||
strings.Contains(name, "\x00") {
return false, fmt.Errorf("invalid character in file path: %q", name)
}

2021
vendor/github.com/cznic/ql/parser.go generated vendored
View File

File diff suppressed because it is too large Load Diff

22
vendor/github.com/cznic/ql/ql.go generated vendored
View File

@@ -144,8 +144,8 @@ func (l List) String() string {
return b.String()
}
// IsExplainStmt reports whether l is a single EXPLAIN statment or a single EXPLAIN
// statment enclosed in a transaction.
// IsExplainStmt reports whether l is a single EXPLAIN statement or a single EXPLAIN
// statement enclosed in a transaction.
func (l List) IsExplainStmt() bool {
switch len(l.l) {
case 1:
@@ -209,10 +209,10 @@ type TCtx struct {
// NewRWCtx returns a new read/write transaction context. NewRWCtx is safe for
// concurrent use by multiple goroutines, every one of them will get a new,
// unique conext.
// unique context.
func NewRWCtx() *TCtx { return &TCtx{} }
// Recordset is a result of a select statment. It can call a user function for
// Recordset is a result of a select statement. It can call a user function for
// every row (record) in the set using the Do method.
//
// Recordsets can be safely reused. Evaluation of the rows is performed lazily.
@@ -672,16 +672,6 @@ func (r tableRset) plan(ctx *execCtx) (plan, error) {
return rs, nil
}
func findFldIndex(fields []*fld, name string) int {
for i, f := range fields {
if f.name == name {
return i
}
}
return -1
}
func findFld(fields []*fld, name string) (f *fld) {
for _, f = range fields {
if f.name == name {
@@ -1276,7 +1266,7 @@ func (db *DB) run1(pc *TCtx, s stmt, arg ...interface{}) (rs Recordset, tnla, tn
}
if pc != db.cc {
for db.rw == true {
for db.rw {
db.mu.Unlock() // Transaction isolation
db.mu.Lock()
}
@@ -1501,7 +1491,7 @@ type IndexInfo struct {
Name string // Index name
Table string // Table name.
Column string // Column name.
Unique bool // Wheter the index is unique.
Unique bool // Whether the index is unique.
ExpressionList []string // Index expression list.
}

2
vendor/github.com/cznic/ql/stmt.go generated vendored
View File

@@ -8,7 +8,6 @@ import (
"bytes"
"fmt"
"strings"
"sync"
"github.com/cznic/strutil"
)
@@ -716,7 +715,6 @@ type selectStmt struct {
group *groupByRset
hasAggregates bool
limit *limitRset
mu sync.Mutex
offset *offsetRset
order *orderByRset
where *whereRset

View File

@@ -137,8 +137,7 @@ type table struct {
defaults []expression
}
func (t *table) hasIndices() bool { return len(t.indices) != 0 || len(t.indices2) != 0 }
func (t *table) hasIndices2() bool { return len(t.indices2) != 0 }
func (t *table) hasIndices() bool { return len(t.indices) != 0 || len(t.indices2) != 0 }
func (t *table) constraintsAndDefaults(ctx *execCtx) error {
if isSystemName[t.name] {
@@ -747,14 +746,6 @@ func (t *table) addRecord(execCtx *execCtx, r []interface{}) (id int64, err erro
return
}
func (t *table) flds() (r []*fld) {
r = make([]*fld, len(t.cols))
for i, v := range t.cols {
r[i] = &fld{expr: &ident{v.name}, name: v.name}
}
return
}
func (t *table) fieldNames() []string {
r := make([]string, len(t.cols))
for i, v := range t.cols {
@@ -802,10 +793,10 @@ type root struct {
head int64 // Single linked table list
lastInsertID int64
parent *root
rowsAffected int64 //LATER implement
store storage
tables map[string]*table
thead *table
//rowsAffected int64 //LATER implement
store storage
tables map[string]*table
thead *table
}
func newRoot(store storage) (r *root, err error) {

View File

@@ -1,344 +0,0 @@
// Copyright 2014 The lldb Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// A memory-only implementation of Filer.
/*
pgBits: 8
BenchmarkMemFilerWrSeq 100000 19430 ns/op 1646.93 MB/s
BenchmarkMemFilerRdSeq 100000 17390 ns/op 1840.13 MB/s
BenchmarkMemFilerWrRand 1000000 1903 ns/op 133.94 MB/s
BenchmarkMemFilerRdRand 1000000 1153 ns/op 221.16 MB/s
pgBits: 9
BenchmarkMemFilerWrSeq 100000 16195 ns/op 1975.80 MB/s
BenchmarkMemFilerRdSeq 200000 13011 ns/op 2459.39 MB/s
BenchmarkMemFilerWrRand 1000000 2248 ns/op 227.28 MB/s
BenchmarkMemFilerRdRand 1000000 1177 ns/op 433.94 MB/s
pgBits: 10
BenchmarkMemFilerWrSeq 100000 16169 ns/op 1979.04 MB/s
BenchmarkMemFilerRdSeq 200000 12673 ns/op 2524.91 MB/s
BenchmarkMemFilerWrRand 1000000 5550 ns/op 184.30 MB/s
BenchmarkMemFilerRdRand 1000000 1699 ns/op 601.79 MB/s
pgBits: 11
BenchmarkMemFilerWrSeq 100000 13449 ns/op 2379.31 MB/s
BenchmarkMemFilerRdSeq 200000 12058 ns/op 2653.80 MB/s
BenchmarkMemFilerWrRand 500000 4335 ns/op 471.47 MB/s
BenchmarkMemFilerRdRand 1000000 2843 ns/op 719.47 MB/s
pgBits: 12
BenchmarkMemFilerWrSeq 200000 11976 ns/op 2672.00 MB/s
BenchmarkMemFilerRdSeq 200000 12255 ns/op 2611.06 MB/s
BenchmarkMemFilerWrRand 200000 8058 ns/op 507.14 MB/s
BenchmarkMemFilerRdRand 500000 4365 ns/op 936.15 MB/s
pgBits: 13
BenchmarkMemFilerWrSeq 200000 10852 ns/op 2948.69 MB/s
BenchmarkMemFilerRdSeq 200000 11561 ns/op 2767.77 MB/s
BenchmarkMemFilerWrRand 200000 9748 ns/op 840.15 MB/s
BenchmarkMemFilerRdRand 500000 7236 ns/op 1131.59 MB/s
pgBits: 14
BenchmarkMemFilerWrSeq 200000 10328 ns/op 3098.12 MB/s
BenchmarkMemFilerRdSeq 200000 11292 ns/op 2833.66 MB/s
BenchmarkMemFilerWrRand 100000 16768 ns/op 978.75 MB/s
BenchmarkMemFilerRdRand 200000 13033 ns/op 1258.43 MB/s
pgBits: 15
BenchmarkMemFilerWrSeq 200000 10309 ns/op 3103.93 MB/s
BenchmarkMemFilerRdSeq 200000 11126 ns/op 2876.12 MB/s
BenchmarkMemFilerWrRand 50000 31985 ns/op 1021.74 MB/s
BenchmarkMemFilerRdRand 100000 25217 ns/op 1297.65 MB/s
pgBits: 16
BenchmarkMemFilerWrSeq 200000 10324 ns/op 3099.45 MB/s
BenchmarkMemFilerRdSeq 200000 11201 ns/op 2856.80 MB/s
BenchmarkMemFilerWrRand 20000 55226 ns/op 1184.76 MB/s
BenchmarkMemFilerRdRand 50000 48316 ns/op 1355.16 MB/s
pgBits: 17
BenchmarkMemFilerWrSeq 200000 10377 ns/op 3083.53 MB/s
BenchmarkMemFilerRdSeq 200000 11018 ns/op 2904.18 MB/s
BenchmarkMemFilerWrRand 10000 143425 ns/op 913.12 MB/s
BenchmarkMemFilerRdRand 20000 95267 ns/op 1376.99 MB/s
pgBits: 18
BenchmarkMemFilerWrSeq 200000 10312 ns/op 3102.96 MB/s
BenchmarkMemFilerRdSeq 200000 11069 ns/op 2890.84 MB/s
BenchmarkMemFilerWrRand 5000 280910 ns/op 934.14 MB/s
BenchmarkMemFilerRdRand 10000 188500 ns/op 1388.17 MB/s
*/
package lldb
import (
"bytes"
"fmt"
"io"
"github.com/cznic/fileutil"
"github.com/cznic/mathutil"
)
const (
pgBits = 16
pgSize = 1 << pgBits
pgMask = pgSize - 1
)
var _ Filer = &MemFiler{} // Ensure MemFiler is a Filer.
type memFilerMap map[int64]*[pgSize]byte
// MemFiler is a memory backed Filer. It implements BeginUpdate, EndUpdate and
// Rollback as no-ops. MemFiler is not automatically persistent, but it has
// ReadFrom and WriteTo methods.
type MemFiler struct {
m memFilerMap
nest int
size int64
}
// NewMemFiler returns a new MemFiler.
func NewMemFiler() *MemFiler {
return &MemFiler{m: memFilerMap{}}
}
// BeginUpdate implements Filer.
func (f *MemFiler) BeginUpdate() error {
f.nest++
return nil
}
// Close implements Filer.
func (f *MemFiler) Close() (err error) {
if f.nest != 0 {
return &ErrPERM{(f.Name() + ":Close")}
}
return
}
// EndUpdate implements Filer.
func (f *MemFiler) EndUpdate() (err error) {
if f.nest == 0 {
return &ErrPERM{(f.Name() + ": EndUpdate")}
}
f.nest--
return
}
// Name implements Filer.
func (f *MemFiler) Name() string {
return fmt.Sprintf("%p.memfiler", f)
}
// PunchHole implements Filer.
func (f *MemFiler) PunchHole(off, size int64) (err error) {
if off < 0 {
return &ErrINVAL{f.Name() + ": PunchHole off", off}
}
if size < 0 || off+size > f.size {
return &ErrINVAL{f.Name() + ": PunchHole size", size}
}
first := off >> pgBits
if off&pgMask != 0 {
first++
}
off += size - 1
last := off >> pgBits
if off&pgMask != 0 {
last--
}
if limit := f.size >> pgBits; last > limit {
last = limit
}
for pg := first; pg <= last; pg++ {
delete(f.m, pg)
}
return
}
var zeroPage [pgSize]byte
// ReadAt implements Filer.
func (f *MemFiler) ReadAt(b []byte, off int64) (n int, err error) {
avail := f.size - off
pgI := off >> pgBits
pgO := int(off & pgMask)
rem := len(b)
if int64(rem) >= avail {
rem = int(avail)
err = io.EOF
}
for rem != 0 && avail > 0 {
pg := f.m[pgI]
if pg == nil {
pg = &zeroPage
}
nc := copy(b[:mathutil.Min(rem, pgSize)], pg[pgO:])
pgI++
pgO = 0
rem -= nc
n += nc
b = b[nc:]
}
return
}
// ReadFrom is a helper to populate MemFiler's content from r. 'n' reports the
// number of bytes read from 'r'.
func (f *MemFiler) ReadFrom(r io.Reader) (n int64, err error) {
if err = f.Truncate(0); err != nil {
return
}
var (
b [pgSize]byte
rn int
off int64
)
var rerr error
for rerr == nil {
if rn, rerr = r.Read(b[:]); rn != 0 {
f.WriteAt(b[:rn], off)
off += int64(rn)
n += int64(rn)
}
}
if !fileutil.IsEOF(rerr) {
err = rerr
}
return
}
// Rollback implements Filer.
func (f *MemFiler) Rollback() (err error) { return }
// Size implements Filer.
func (f *MemFiler) Size() (int64, error) {
return f.size, nil
}
// Sync implements Filer.
func (f *MemFiler) Sync() error {
return nil
}
// Truncate implements Filer.
func (f *MemFiler) Truncate(size int64) (err error) {
switch {
case size < 0:
return &ErrINVAL{"Truncate size", size}
case size == 0:
f.m = memFilerMap{}
f.size = 0
return
}
first := size >> pgBits
if size&pgMask != 0 {
first++
}
last := f.size >> pgBits
if f.size&pgMask != 0 {
last++
}
for ; first < last; first++ {
delete(f.m, first)
}
f.size = size
return
}
// WriteAt implements Filer.
func (f *MemFiler) WriteAt(b []byte, off int64) (n int, err error) {
pgI := off >> pgBits
pgO := int(off & pgMask)
n = len(b)
rem := n
var nc int
for rem != 0 {
if pgO == 0 && rem >= pgSize && bytes.Equal(b[:pgSize], zeroPage[:]) {
delete(f.m, pgI)
nc = pgSize
} else {
pg := f.m[pgI]
if pg == nil {
pg = new([pgSize]byte)
f.m[pgI] = pg
}
nc = copy((*pg)[pgO:], b)
}
pgI++
pgO = 0
rem -= nc
b = b[nc:]
}
f.size = mathutil.MaxInt64(f.size, off+int64(n))
return
}
// WriteTo is a helper to copy/persist MemFiler's content to w. If w is also
// an io.WriterAt then WriteTo may attempt to _not_ write any big, for some
// value of big, runs of zeros, i.e. it will attempt to punch holes, where
// possible, in `w` if that happens to be a freshly created or to zero length
// truncated OS file. 'n' reports the number of bytes written to 'w'.
func (f *MemFiler) WriteTo(w io.Writer) (n int64, err error) {
var (
b [pgSize]byte
wn, rn int
off int64
rerr error
)
if wa, ok := w.(io.WriterAt); ok {
lastPgI := f.size >> pgBits
for pgI := int64(0); pgI <= lastPgI; pgI++ {
sz := pgSize
if pgI == lastPgI {
sz = int(f.size & pgMask)
}
pg := f.m[pgI]
if pg != nil {
wn, err = wa.WriteAt(pg[:sz], off)
if err != nil {
return
}
n += int64(wn)
off += int64(sz)
if wn != sz {
return n, io.ErrShortWrite
}
}
}
return
}
var werr error
for rerr == nil {
if rn, rerr = f.ReadAt(b[:], off); rn != 0 {
off += int64(rn)
if wn, werr = w.Write(b[:rn]); werr != nil {
return n, werr
}
n += int64(wn)
}
}
if !fileutil.IsEOF(rerr) {
err = rerr
}
return
}

View File

@@ -10,6 +10,10 @@
package zappy
import (
"github.com/cznic/internal/buffer"
)
/*
#include <stdint.h>
@@ -109,7 +113,7 @@ func Decode(buf, src []byte) ([]byte, error) {
}
if len(buf) < dLen {
buf = make([]byte, dLen)
buf = *buffer.Get(dLen)
}
d := int(C.decode(C.int(s), C.int(len(src)), (*C.uint8_t)(&src[0]), C.int(len(buf)), (*C.uint8_t)(&buf[0])))

View File

@@ -12,6 +12,8 @@ package zappy
import (
"encoding/binary"
"github.com/cznic/internal/buffer"
)
func puregoDecode() bool { return true }
@@ -35,7 +37,7 @@ func Decode(buf, src []byte) ([]byte, error) {
}
if len(buf) < dLen {
buf = make([]byte, dLen)
buf = *buffer.Get(dLen)
}
var d, offset, length int

View File

@@ -33,5 +33,5 @@ func emitLiteral(dst, lit []byte) (n int) {
// MaxEncodedLen returns the maximum length of a zappy block, given its
// uncompressed length.
func MaxEncodedLen(srcLen int) int {
return 10 + srcLen
return 10 + srcLen + (srcLen+1)/2
}

View File

@@ -107,6 +107,8 @@ import (
"encoding/binary"
"fmt"
"math"
"github.com/cznic/internal/buffer"
)
func puregoEncode() bool { return false }
@@ -117,7 +119,7 @@ func puregoEncode() bool { return false }
// It is valid to pass a nil buf.
func Encode(buf, src []byte) ([]byte, error) {
if n := MaxEncodedLen(len(src)); len(buf) < n {
buf = make([]byte, n)
buf = *buffer.Get(n)
}
if len(src) > math.MaxInt32 {

View File

@@ -14,6 +14,8 @@ import (
"encoding/binary"
"fmt"
"math"
"github.com/cznic/internal/buffer"
)
func puregoEncode() bool { return true }
@@ -24,7 +26,7 @@ func puregoEncode() bool { return true }
// It is valid to pass a nil buf.
func Encode(buf, src []byte) ([]byte, error) {
if n := MaxEncodedLen(len(src)); len(buf) < n {
buf = make([]byte, n)
buf = *buffer.Get(n)
}
if len(src) > math.MaxInt32 {

View File

@@ -124,7 +124,6 @@ Old=Go snappy, new=zappy:
The package builds with CGO_ENABLED=0 as well, but the performance is worse.
$ CGO_ENABLED=0 go test -test.run=NONE -test.bench=. > old.benchcmp
$ CGO_ENABLED=1 go test -test.run=NONE -test.bench=. > new.benchcmp
$ benchcmp old.benchcmp new.benchcmp

View File

@@ -1,65 +0,0 @@
# messagediff [![Build Status](https://travis-ci.org/d4l3k/messagediff.svg?branch=master)](https://travis-ci.org/d4l3k/messagediff) [![Coverage Status](https://coveralls.io/repos/github/d4l3k/messagediff/badge.svg?branch=master)](https://coveralls.io/github/d4l3k/messagediff?branch=master) [![GoDoc](https://godoc.org/github.com/d4l3k/messagediff?status.svg)](https://godoc.org/github.com/d4l3k/messagediff)
A library for doing diffs of arbitrary Golang structs.
If the unsafe package is available messagediff will diff unexported fields in
addition to exported fields. This is primarily used for testing purposes as it
allows for providing informative error messages.
## Example Usage
In a normal file:
```go
package main
import "github.com/d4l3k/messagediff"
type someStruct struct {
A, b int
C []int
}
func main() {
a := someStruct{1, 2, []int{1}}
b := someStruct{1, 3, []int{1, 2}}
diff, equal := messagediff.PrettyDiff(a, b)
/*
diff =
`added: .C[1] = 2
modified: .b = 3`
equal = false
*/
}
```
In a test:
```go
import "github.com/d4l3k/messagediff"
...
type someStruct struct {
A, b int
C []int
}
func TestSomething(t *testing.T) {
want := someStruct{1, 2, []int{1}}
got := someStruct{1, 3, []int{1, 2}}
if diff, equal := messagediff.PrettyDiff(want, got); !equal {
t.Errorf("Something() = %#v\n%s", got, diff)
}
}
```
See the `DeepDiff` function for using the diff results programmatically.
## License
Copyright (c) 2015 [Tristan Rice](https://fn.lc) <rice@fn.lc>
messagediff is licensed under the MIT license. See the LICENSE file for more information.
bypass.go and bypasssafe.go are borrowed from
[go-spew](https://github.com/davecgh/go-spew) and have a seperate copyright
notice.

View File

@@ -0,0 +1,59 @@
package examples
import (
"fmt"
"github.com/d4l3k/messagediff"
"golang.org/x/net/html"
"golang.org/x/net/html/atom"
)
func ExampleAtom() {
got := data2()
want := data1()
diff, equal := messagediff.PrettyDiff(want, got)
fmt.Printf("%v %s", equal, diff)
// Output: false modified: [0].FirstChild.NextSibling.Attr = " baz"
}
func data1() []*html.Node {
n := &html.Node{
Type: html.ElementNode, Data: atom.Span.String(),
Attr: []html.Attribute{
{Key: atom.Class.String(), Val: "foo"},
},
}
n.AppendChild(
&html.Node{
Type: html.ElementNode, Data: atom.Span.String(),
Attr: []html.Attribute{
{Key: atom.Class.String(), Val: "bar"},
},
},
)
n.AppendChild(&html.Node{
Type: html.TextNode, Data: "baz",
})
return []*html.Node{n}
}
func data2() []*html.Node {
n := &html.Node{
Type: html.ElementNode, Data: atom.Span.String(),
Attr: []html.Attribute{
{Key: atom.Class.String(), Val: "foo"},
},
}
n.AppendChild(
&html.Node{
Type: html.ElementNode, Data: atom.Span.String(),
Attr: []html.Attribute{
{Key: atom.Class.String(), Val: "bar"},
},
},
)
n.AppendChild(&html.Node{
Type: html.TextNode, Data: " baz",
})
return []*html.Node{n}
}

View File

@@ -0,0 +1 @@
package examples

View File

@@ -5,6 +5,7 @@ import (
"reflect"
"sort"
"strings"
"unsafe"
)
// PrettyDiff does a deep comparison and returns the nicely formated results.
@@ -26,43 +27,87 @@ func PrettyDiff(a, b interface{}) (string, bool) {
// DeepDiff does a deep comparison and returns the results.
func DeepDiff(a, b interface{}) (*Diff, bool) {
d := newdiff()
return d, diff(a, b, nil, d)
d := newDiff()
return d, d.diff(reflect.ValueOf(a), reflect.ValueOf(b), nil)
}
func newdiff() *Diff {
func newDiff() *Diff {
return &Diff{
Added: make(map[*Path]interface{}),
Removed: make(map[*Path]interface{}),
Modified: make(map[*Path]interface{}),
visited: make(map[visit]bool),
}
}
func diff(a, b interface{}, path Path, d *Diff) bool {
aVal := reflect.ValueOf(a)
bVal := reflect.ValueOf(b)
func (d *Diff) diff(aVal, bVal reflect.Value, path Path) bool {
// Validity checks. Should only trigger if nil is one of the original arguments.
if !aVal.IsValid() && !bVal.IsValid() {
// Both are nil.
return true
}
if !aVal.IsValid() || !bVal.IsValid() {
// One is nil and the other isn't.
d.Modified[&path] = b
if !bVal.IsValid() {
d.Modified[&path] = nil
return false
} else if !aVal.IsValid() {
d.Modified[&path] = bVal.Interface()
return false
}
if aVal.Type() != bVal.Type() {
d.Modified[&path] = b
d.Modified[&path] = bVal.Interface()
return false
}
kind := aVal.Type().Kind()
kind := aVal.Kind()
// Borrowed from the reflect package to handle recursive data structures.
hard := func(k reflect.Kind) bool {
switch k {
case reflect.Array, reflect.Map, reflect.Slice, reflect.Struct:
return true
}
return false
}
if aVal.CanAddr() && bVal.CanAddr() && hard(kind) {
addr1 := unsafe.Pointer(aVal.UnsafeAddr())
addr2 := unsafe.Pointer(bVal.UnsafeAddr())
if uintptr(addr1) > uintptr(addr2) {
// Canonicalize order to reduce number of entries in visited.
// Assumes non-moving garbage collector.
addr1, addr2 = addr2, addr1
}
// Short circuit if references are already seen.
typ := aVal.Type()
v := visit{addr1, addr2, typ}
if d.visited[v] {
return true
}
// Remember for later.
d.visited[v] = true
}
// End of borrowed code.
equal := true
switch kind {
case reflect.Map, reflect.Ptr, reflect.Func, reflect.Chan, reflect.Slice:
if aVal.IsNil() && bVal.IsNil() {
return true
}
if aVal.IsNil() || bVal.IsNil() {
d.Modified[&path] = bVal.Interface()
return false
}
}
switch kind {
case reflect.Array, reflect.Slice:
aLen := aVal.Len()
bLen := bVal.Len()
for i := 0; i < min(aLen, bLen); i++ {
localPath := append(path, SliceIndex(i))
if eq := diff(aVal.Index(i).Interface(), bVal.Index(i).Interface(), localPath, d); !eq {
if eq := d.diff(aVal.Index(i), bVal.Index(i), localPath); !eq {
equal = false
}
}
@@ -87,7 +132,7 @@ func diff(a, b interface{}, path Path, d *Diff) bool {
if !bI.IsValid() {
d.Removed[&localPath] = aI.Interface()
equal = false
} else if eq := diff(aI.Interface(), bI.Interface(), localPath, d); !eq {
} else if eq := d.diff(aI, bI, localPath); !eq {
equal = false
}
}
@@ -106,30 +151,19 @@ func diff(a, b interface{}, path Path, d *Diff) bool {
index := []int{i}
field := typ.FieldByIndex(index)
localPath := append(path, StructField(field.Name))
aI := unsafeReflectValue(aVal.FieldByIndex(index)).Interface()
bI := unsafeReflectValue(bVal.FieldByIndex(index)).Interface()
if eq := diff(aI, bI, localPath, d); !eq {
aI := unsafeReflectValue(aVal.FieldByIndex(index))
bI := unsafeReflectValue(bVal.FieldByIndex(index))
if eq := d.diff(aI, bI, localPath); !eq {
equal = false
}
}
case reflect.Ptr:
aVal = aVal.Elem()
bVal = bVal.Elem()
if !aVal.IsValid() && !bVal.IsValid() {
// Both are nil.
equal = true
} else if !aVal.IsValid() || !bVal.IsValid() {
// One is nil and the other isn't.
d.Modified[&path] = b
equal = false
} else {
equal = diff(aVal.Interface(), bVal.Interface(), path, d)
}
equal = d.diff(aVal.Elem(), bVal.Elem(), path)
default:
if reflect.DeepEqual(a, b) {
if reflect.DeepEqual(aVal.Interface(), bVal.Interface()) {
equal = true
} else {
d.Modified[&path] = b
d.Modified[&path] = bVal.Interface()
equal = false
}
}
@@ -143,9 +177,21 @@ func min(a, b int) int {
return b
}
// During deepValueEqual, must keep track of checks that are
// in progress. The comparison algorithm assumes that all
// checks in progress are true when it reencounters them.
// Visited comparisons are stored in a map indexed by visit.
// This is borrowed from the reflect package.
type visit struct {
a1 unsafe.Pointer
a2 unsafe.Pointer
typ reflect.Type
}
// Diff represents a change in a struct.
type Diff struct {
Added, Removed, Modified map[*Path]interface{}
visited map[visit]bool
}
// Path represents a path to a changed datum.

View File

@@ -8,14 +8,46 @@ import (
type testStruct struct {
A, b int
C []int
D [3]int
}
type RecursiveStruct struct {
Key int
Child *RecursiveStruct
}
func newRecursiveStruct(key int) *RecursiveStruct {
a := &RecursiveStruct{
Key: key,
}
b := &RecursiveStruct{
Key: key,
Child: a,
}
a.Child = b
return a
}
type testCase struct {
a, b interface{}
diff string
equal bool
}
func checkTestCases(t *testing.T, testData []testCase) {
for i, td := range testData {
diff, equal := PrettyDiff(td.a, td.b)
if diff != td.diff {
t.Errorf("%d. PrettyDiff(%#v, %#v) diff = %#v; not %#v", i, td.a, td.b, diff, td.diff)
}
if equal != td.equal {
t.Errorf("%d. PrettyDiff(%#v, %#v) equal = %#v; not %#v", i, td.a, td.b, equal, td.equal)
}
}
}
func TestPrettyDiff(t *testing.T) {
testData := []struct {
a, b interface{}
diff string
equal bool
}{
testData := []testCase{
{
true,
false,
@@ -59,8 +91,8 @@ func TestPrettyDiff(t *testing.T) {
false,
},
{
testStruct{1, 2, []int{1}},
testStruct{1, 3, []int{1, 2}},
testStruct{1, 2, []int{1}, [3]int{4, 5, 6}},
testStruct{1, 3, []int{1, 2}, [3]int{4, 5, 6}},
"added: .C[1] = 2\nmodified: .b = 3\n",
false,
},
@@ -71,11 +103,17 @@ func TestPrettyDiff(t *testing.T) {
true,
},
{
&time.Time{},
&struct{}{},
nil,
"modified: = <nil>\n",
false,
},
{
nil,
&struct{}{},
"modified: = &struct {}{}\n",
false,
},
{
time.Time{},
time.Time{},
@@ -89,15 +127,25 @@ func TestPrettyDiff(t *testing.T) {
false,
},
}
for i, td := range testData {
diff, equal := PrettyDiff(td.a, td.b)
if diff != td.diff {
t.Errorf("%d. PrettyDiff(%#v, %#v) diff = %#v; not %#v", i, td.a, td.b, diff, td.diff)
}
if equal != td.equal {
t.Errorf("%d. PrettyDiff(%#v, %#v) equal = %#v; not %#v", i, td.a, td.b, equal, td.equal)
}
checkTestCases(t, testData)
}
func TestPrettyDiffRecursive(t *testing.T) {
testData := []testCase{
{
newRecursiveStruct(1),
newRecursiveStruct(1),
"",
true,
},
{
newRecursiveStruct(1),
newRecursiveStruct(2),
"modified: .Child.Key = 2\nmodified: .Key = 2\n",
false,
},
}
checkTestCases(t, testData)
}
func TestPathString(t *testing.T) {

25
vendor/github.com/edsrzf/mmap-go/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,25 @@
Copyright (c) 2011, Evan Shaw <edsrzf@gmail.com>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

112
vendor/github.com/edsrzf/mmap-go/mmap.go generated vendored Normal file
View File

@@ -0,0 +1,112 @@
// Copyright 2011 Evan Shaw. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file defines the common package interface and contains a little bit of
// factored out logic.
// Package mmap allows mapping files into memory. It tries to provide a simple, reasonably portable interface,
// but doesn't go out of its way to abstract away every little platform detail.
// This specifically means:
// * forked processes may or may not inherit mappings
// * a file's timestamp may or may not be updated by writes through mappings
// * specifying a size larger than the file's actual size can increase the file's size
// * If the mapped file is being modified by another process while your program's running, don't expect consistent results between platforms
package mmap
import (
"errors"
"os"
"reflect"
"unsafe"
)
const (
// RDONLY maps the memory read-only.
// Attempts to write to the MMap object will result in undefined behavior.
RDONLY = 0
// RDWR maps the memory as read-write. Writes to the MMap object will update the
// underlying file.
RDWR = 1 << iota
// COPY maps the memory as copy-on-write. Writes to the MMap object will affect
// memory, but the underlying file will remain unchanged.
COPY
// If EXEC is set, the mapped memory is marked as executable.
EXEC
)
const (
// If the ANON flag is set, the mapped memory will not be backed by a file.
ANON = 1 << iota
)
// MMap represents a file mapped into memory.
type MMap []byte
// Map maps an entire file into memory.
// If ANON is set in flags, f is ignored.
func Map(f *os.File, prot, flags int) (MMap, error) {
return MapRegion(f, -1, prot, flags, 0)
}
// MapRegion maps part of a file into memory.
// The offset parameter must be a multiple of the system's page size.
// If length < 0, the entire file will be mapped.
// If ANON is set in flags, f is ignored.
func MapRegion(f *os.File, length int, prot, flags int, offset int64) (MMap, error) {
var fd uintptr
if flags&ANON == 0 {
fd = uintptr(f.Fd())
if length < 0 {
fi, err := f.Stat()
if err != nil {
return nil, err
}
length = int(fi.Size())
}
} else {
if length <= 0 {
return nil, errors.New("anonymous mapping requires non-zero length")
}
fd = ^uintptr(0)
}
return mmap(length, uintptr(prot), uintptr(flags), fd, offset)
}
func (m *MMap) header() *reflect.SliceHeader {
return (*reflect.SliceHeader)(unsafe.Pointer(m))
}
// Lock keeps the mapped region in physical memory, ensuring that it will not be
// swapped out.
func (m MMap) Lock() error {
dh := m.header()
return lock(dh.Data, uintptr(dh.Len))
}
// Unlock reverses the effect of Lock, allowing the mapped region to potentially
// be swapped out.
// If m is already unlocked, aan error will result.
func (m MMap) Unlock() error {
dh := m.header()
return unlock(dh.Data, uintptr(dh.Len))
}
// Flush synchronizes the mapping's contents to the file's contents on disk.
func (m MMap) Flush() error {
dh := m.header()
return flush(dh.Data, uintptr(dh.Len))
}
// Unmap deletes the memory mapped region, flushes any remaining changes, and sets
// m to nil.
// Trying to read or write any remaining references to m after Unmap is called will
// result in undefined behavior.
// Unmap should only be called on the slice value that was originally returned from
// a call to Map. Calling Unmap on a derived slice may cause errors.
func (m *MMap) Unmap() error {
dh := m.header()
err := unmap(dh.Data, uintptr(dh.Len))
*m = nil
return err
}

67
vendor/github.com/edsrzf/mmap-go/mmap_unix.go generated vendored Normal file
View File

@@ -0,0 +1,67 @@
// Copyright 2011 Evan Shaw. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin dragonfly freebsd linux openbsd solaris netbsd
package mmap
import (
"syscall"
)
func mmap(len int, inprot, inflags, fd uintptr, off int64) ([]byte, error) {
flags := syscall.MAP_SHARED
prot := syscall.PROT_READ
switch {
case inprot&COPY != 0:
prot |= syscall.PROT_WRITE
flags = syscall.MAP_PRIVATE
case inprot&RDWR != 0:
prot |= syscall.PROT_WRITE
}
if inprot&EXEC != 0 {
prot |= syscall.PROT_EXEC
}
if inflags&ANON != 0 {
flags |= syscall.MAP_ANON
}
b, err := syscall.Mmap(int(fd), off, len, prot, flags)
if err != nil {
return nil, err
}
return b, nil
}
func flush(addr, len uintptr) error {
_, _, errno := syscall.Syscall(_SYS_MSYNC, addr, len, _MS_SYNC)
if errno != 0 {
return syscall.Errno(errno)
}
return nil
}
func lock(addr, len uintptr) error {
_, _, errno := syscall.Syscall(syscall.SYS_MLOCK, addr, len, 0)
if errno != 0 {
return syscall.Errno(errno)
}
return nil
}
func unlock(addr, len uintptr) error {
_, _, errno := syscall.Syscall(syscall.SYS_MUNLOCK, addr, len, 0)
if errno != 0 {
return syscall.Errno(errno)
}
return nil
}
func unmap(addr, len uintptr) error {
_, _, errno := syscall.Syscall(syscall.SYS_MUNMAP, addr, len, 0)
if errno != 0 {
return syscall.Errno(errno)
}
return nil
}

125
vendor/github.com/edsrzf/mmap-go/mmap_windows.go generated vendored Normal file
View File

@@ -0,0 +1,125 @@
// Copyright 2011 Evan Shaw. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mmap
import (
"errors"
"os"
"sync"
"syscall"
)
// mmap on Windows is a two-step process.
// First, we call CreateFileMapping to get a handle.
// Then, we call MapviewToFile to get an actual pointer into memory.
// Because we want to emulate a POSIX-style mmap, we don't want to expose
// the handle -- only the pointer. We also want to return only a byte slice,
// not a struct, so it's convenient to manipulate.
// We keep this map so that we can get back the original handle from the memory address.
var handleLock sync.Mutex
var handleMap = map[uintptr]syscall.Handle{}
func mmap(len int, prot, flags, hfile uintptr, off int64) ([]byte, error) {
flProtect := uint32(syscall.PAGE_READONLY)
dwDesiredAccess := uint32(syscall.FILE_MAP_READ)
switch {
case prot&COPY != 0:
flProtect = syscall.PAGE_WRITECOPY
dwDesiredAccess = syscall.FILE_MAP_COPY
case prot&RDWR != 0:
flProtect = syscall.PAGE_READWRITE
dwDesiredAccess = syscall.FILE_MAP_WRITE
}
if prot&EXEC != 0 {
flProtect <<= 4
dwDesiredAccess |= syscall.FILE_MAP_EXECUTE
}
// The maximum size is the area of the file, starting from 0,
// that we wish to allow to be mappable. It is the sum of
// the length the user requested, plus the offset where that length
// is starting from. This does not map the data into memory.
maxSizeHigh := uint32((off + int64(len)) >> 32)
maxSizeLow := uint32((off + int64(len)) & 0xFFFFFFFF)
// TODO: Do we need to set some security attributes? It might help portability.
h, errno := syscall.CreateFileMapping(syscall.Handle(hfile), nil, flProtect, maxSizeHigh, maxSizeLow, nil)
if h == 0 {
return nil, os.NewSyscallError("CreateFileMapping", errno)
}
// Actually map a view of the data into memory. The view's size
// is the length the user requested.
fileOffsetHigh := uint32(off >> 32)
fileOffsetLow := uint32(off & 0xFFFFFFFF)
addr, errno := syscall.MapViewOfFile(h, dwDesiredAccess, fileOffsetHigh, fileOffsetLow, uintptr(len))
if addr == 0 {
return nil, os.NewSyscallError("MapViewOfFile", errno)
}
handleLock.Lock()
handleMap[addr] = h
handleLock.Unlock()
m := MMap{}
dh := m.header()
dh.Data = addr
dh.Len = len
dh.Cap = dh.Len
return m, nil
}
func flush(addr, len uintptr) error {
errno := syscall.FlushViewOfFile(addr, len)
if errno != nil {
return os.NewSyscallError("FlushViewOfFile", errno)
}
handleLock.Lock()
defer handleLock.Unlock()
handle, ok := handleMap[addr]
if !ok {
// should be impossible; we would've errored above
return errors.New("unknown base address")
}
errno = syscall.FlushFileBuffers(handle)
return os.NewSyscallError("FlushFileBuffers", errno)
}
func lock(addr, len uintptr) error {
errno := syscall.VirtualLock(addr, len)
return os.NewSyscallError("VirtualLock", errno)
}
func unlock(addr, len uintptr) error {
errno := syscall.VirtualUnlock(addr, len)
return os.NewSyscallError("VirtualUnlock", errno)
}
func unmap(addr, len uintptr) error {
flush(addr, len)
// Lock the UnmapViewOfFile along with the handleMap deletion.
// As soon as we unmap the view, the OS is free to give the
// same addr to another new map. We don't want another goroutine
// to insert and remove the same addr into handleMap while
// we're trying to remove our old addr/handle pair.
handleLock.Lock()
defer handleLock.Unlock()
err := syscall.UnmapViewOfFile(addr)
if err != nil {
return err
}
handle, ok := handleMap[addr]
if !ok {
// should be impossible; we would've errored above
return errors.New("unknown base address")
}
delete(handleMap, addr)
e := syscall.CloseHandle(syscall.Handle(handle))
return os.NewSyscallError("CloseHandle", e)
}

8
vendor/github.com/edsrzf/mmap-go/msync_netbsd.go generated vendored Normal file
View File

@@ -0,0 +1,8 @@
// Copyright 2011 Evan Shaw. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mmap
const _SYS_MSYNC = 277
const _MS_SYNC = 0x04

14
vendor/github.com/edsrzf/mmap-go/msync_unix.go generated vendored Normal file
View File

@@ -0,0 +1,14 @@
// Copyright 2011 Evan Shaw. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin dragonfly freebsd linux openbsd solaris
package mmap
import (
"syscall"
)
const _SYS_MSYNC = syscall.SYS_MSYNC
const _MS_SYNC = syscall.MS_SYNC

View File

@@ -1,508 +0,0 @@
package glob
// TODO use constructor with all matchers, and to their structs private
// TODO glue multiple Text nodes (like after QuoteMeta)
import (
"fmt"
"github.com/gobwas/glob/match"
"github.com/gobwas/glob/runes"
"reflect"
)
func optimize(matcher match.Matcher) match.Matcher {
switch m := matcher.(type) {
case match.Any:
if len(m.Separators) == 0 {
return match.NewSuper()
}
case match.AnyOf:
if len(m.Matchers) == 1 {
return m.Matchers[0]
}
return m
case match.List:
if m.Not == false && len(m.List) == 1 {
return match.NewText(string(m.List))
}
return m
case match.BTree:
m.Left = optimize(m.Left)
m.Right = optimize(m.Right)
r, ok := m.Value.(match.Text)
if !ok {
return m
}
leftNil := m.Left == nil
rightNil := m.Right == nil
if leftNil && rightNil {
return match.NewText(r.Str)
}
_, leftSuper := m.Left.(match.Super)
lp, leftPrefix := m.Left.(match.Prefix)
_, rightSuper := m.Right.(match.Super)
rs, rightSuffix := m.Right.(match.Suffix)
if leftSuper && rightSuper {
return match.NewContains(r.Str, false)
}
if leftSuper && rightNil {
return match.NewSuffix(r.Str)
}
if rightSuper && leftNil {
return match.NewPrefix(r.Str)
}
if leftNil && rightSuffix {
return match.NewPrefixSuffix(r.Str, rs.Suffix)
}
if rightNil && leftPrefix {
return match.NewPrefixSuffix(lp.Prefix, r.Str)
}
return m
}
return matcher
}
func glueMatchers(matchers []match.Matcher) match.Matcher {
var (
glued []match.Matcher
winner match.Matcher
)
maxLen := -1
if m := glueAsEvery(matchers); m != nil {
glued = append(glued, m)
return m
}
if m := glueAsRow(matchers); m != nil {
glued = append(glued, m)
return m
}
for _, g := range glued {
if l := g.Len(); l > maxLen {
maxLen = l
winner = g
}
}
return winner
}
func glueAsRow(matchers []match.Matcher) match.Matcher {
if len(matchers) <= 1 {
return nil
}
var (
c []match.Matcher
l int
)
for _, matcher := range matchers {
if ml := matcher.Len(); ml == -1 {
return nil
} else {
c = append(c, matcher)
l += ml
}
}
return match.NewRow(l, c...)
}
func glueAsEvery(matchers []match.Matcher) match.Matcher {
if len(matchers) <= 1 {
return nil
}
var (
hasAny bool
hasSuper bool
hasSingle bool
min int
separator []rune
)
for i, matcher := range matchers {
var sep []rune
switch m := matcher.(type) {
case match.Super:
sep = []rune{}
hasSuper = true
case match.Any:
sep = m.Separators
hasAny = true
case match.Single:
sep = m.Separators
hasSingle = true
min++
case match.List:
if !m.Not {
return nil
}
sep = m.List
hasSingle = true
min++
default:
return nil
}
// initialize
if i == 0 {
separator = sep
}
if runes.Equal(sep, separator) {
continue
}
return nil
}
if hasSuper && !hasAny && !hasSingle {
return match.NewSuper()
}
if hasAny && !hasSuper && !hasSingle {
return match.NewAny(separator)
}
if (hasAny || hasSuper) && min > 0 && len(separator) == 0 {
return match.NewMin(min)
}
every := match.NewEveryOf()
if min > 0 {
every.Add(match.NewMin(min))
if !hasAny && !hasSuper {
every.Add(match.NewMax(min))
}
}
if len(separator) > 0 {
every.Add(match.NewContains(string(separator), true))
}
return every
}
func minimizeMatchers(matchers []match.Matcher) []match.Matcher {
var done match.Matcher
var left, right, count int
for l := 0; l < len(matchers); l++ {
for r := len(matchers); r > l; r-- {
if glued := glueMatchers(matchers[l:r]); glued != nil {
var swap bool
if done == nil {
swap = true
} else {
cl, gl := done.Len(), glued.Len()
swap = cl > -1 && gl > -1 && gl > cl
swap = swap || count < r-l
}
if swap {
done = glued
left = l
right = r
count = r - l
}
}
}
}
if done == nil {
return matchers
}
next := append(append([]match.Matcher{}, matchers[:left]...), done)
if right < len(matchers) {
next = append(next, matchers[right:]...)
}
if len(next) == len(matchers) {
return next
}
return minimizeMatchers(next)
}
func minimizeAnyOf(children []node) node {
var nodes [][]node
var min int
var idx int
for i, desc := range children {
pat, ok := desc.(*nodePattern)
if !ok {
return nil
}
n := pat.children()
ln := len(n)
if len(nodes) == 0 || (ln < min) {
min = ln
idx = i
}
nodes = append(nodes, pat.children())
}
minNodes := nodes[idx]
if idx+1 < len(nodes) {
nodes = append(nodes[:idx], nodes[idx+1:]...)
} else {
nodes = nodes[:idx]
}
var commonLeft []node
var commonLeftCount int
for i, n := range minNodes {
has := true
for _, t := range nodes {
if !reflect.DeepEqual(n, t[i]) {
has = false
break
}
}
if has {
commonLeft = append(commonLeft, n)
commonLeftCount++
} else {
break
}
}
var commonRight []node
var commonRightCount int
for i := min - 1; i > commonLeftCount-1; i-- {
n := minNodes[i]
has := true
for _, t := range nodes {
if !reflect.DeepEqual(n, t[len(t)-(min-i)]) {
has = false
break
}
}
if has {
commonRight = append(commonRight, n)
commonRightCount++
} else {
break
}
}
if commonLeftCount == 0 && commonRightCount == 0 {
return nil
}
nodes = append(nodes, minNodes)
nodes[len(nodes)-1], nodes[idx] = nodes[idx], nodes[len(nodes)-1]
var result []node
if commonLeftCount > 0 {
result = append(result, &nodePattern{nodeImpl: nodeImpl{desc: commonLeft}})
}
var anyOf []node
for _, n := range nodes {
if commonLeftCount+commonRightCount == len(n) {
anyOf = append(anyOf, nil)
} else {
anyOf = append(anyOf, &nodePattern{nodeImpl: nodeImpl{desc: n[commonLeftCount : len(n)-commonRightCount]}})
}
}
anyOf = uniqueNodes(anyOf)
if len(anyOf) == 1 {
if anyOf[0] != nil {
result = append(result, &nodePattern{nodeImpl: nodeImpl{desc: anyOf}})
}
} else {
result = append(result, &nodeAnyOf{nodeImpl: nodeImpl{desc: anyOf}})
}
if commonRightCount > 0 {
result = append(result, &nodePattern{nodeImpl: nodeImpl{desc: commonRight}})
}
return &nodePattern{nodeImpl: nodeImpl{desc: result}}
}
func uniqueNodes(nodes []node) (result []node) {
head:
for _, n := range nodes {
for _, e := range result {
if reflect.DeepEqual(e, n) {
continue head
}
}
result = append(result, n)
}
return
}
func compileMatchers(matchers []match.Matcher) (match.Matcher, error) {
if len(matchers) == 0 {
return nil, fmt.Errorf("compile error: need at least one matcher")
}
if len(matchers) == 1 {
return matchers[0], nil
}
if m := glueMatchers(matchers); m != nil {
return m, nil
}
idx := -1
maxLen := -1
var val match.Matcher
for i, matcher := range matchers {
if l := matcher.Len(); l != -1 && l >= maxLen {
maxLen = l
idx = i
val = matcher
}
}
if val == nil { // not found matcher with static length
r, err := compileMatchers(matchers[1:])
if err != nil {
return nil, err
}
return match.NewBTree(matchers[0], nil, r), nil
}
left := matchers[:idx]
var right []match.Matcher
if len(matchers) > idx+1 {
right = matchers[idx+1:]
}
var l, r match.Matcher
var err error
if len(left) > 0 {
l, err = compileMatchers(left)
if err != nil {
return nil, err
}
}
if len(right) > 0 {
r, err = compileMatchers(right)
if err != nil {
return nil, err
}
}
return match.NewBTree(val, l, r), nil
}
func do(leaf node, s []rune) (m match.Matcher, err error) {
switch n := leaf.(type) {
case *nodeAnyOf:
// todo this could be faster on pattern_alternatives_combine_lite
if n := minimizeAnyOf(n.children()); n != nil {
return do(n, s)
}
var matchers []match.Matcher
for _, desc := range n.children() {
if desc == nil {
matchers = append(matchers, match.NewNothing())
continue
}
m, err := do(desc, s)
if err != nil {
return nil, err
}
matchers = append(matchers, optimize(m))
}
return match.NewAnyOf(matchers...), nil
case *nodePattern:
nodes := leaf.children()
if len(nodes) == 0 {
return match.NewNothing(), nil
}
var matchers []match.Matcher
for _, desc := range nodes {
m, err := do(desc, s)
if err != nil {
return nil, err
}
matchers = append(matchers, optimize(m))
}
m, err = compileMatchers(minimizeMatchers(matchers))
if err != nil {
return nil, err
}
case *nodeList:
m = match.NewList([]rune(n.chars), n.not)
case *nodeRange:
m = match.NewRange(n.lo, n.hi, n.not)
case *nodeAny:
m = match.NewAny(s)
case *nodeSuper:
m = match.NewSuper()
case *nodeSingle:
m = match.NewSingle(s)
case *nodeText:
m = match.NewText(n.text)
default:
return nil, fmt.Errorf("could not compile tree: unknown node type")
}
return optimize(m), nil
}
func compile(ast *nodePattern, s []rune) (Glob, error) {
g, err := do(ast, s)
if err != nil {
return nil, err
}
return g, nil
}

518
vendor/github.com/gobwas/glob/compiler/compiler.go generated vendored Normal file
View File

@@ -0,0 +1,518 @@
package compiler
// TODO use constructor with all matchers, and to their structs private
// TODO glue multiple Text nodes (like after QuoteMeta)
import (
"fmt"
"github.com/gobwas/glob/match"
"github.com/gobwas/glob/syntax/ast"
"github.com/gobwas/glob/util/runes"
"reflect"
)
func optimizeMatcher(matcher match.Matcher) match.Matcher {
switch m := matcher.(type) {
case match.Any:
if len(m.Separators) == 0 {
return match.NewSuper()
}
case match.AnyOf:
if len(m.Matchers) == 1 {
return m.Matchers[0]
}
return m
case match.List:
if m.Not == false && len(m.List) == 1 {
return match.NewText(string(m.List))
}
return m
case match.BTree:
m.Left = optimizeMatcher(m.Left)
m.Right = optimizeMatcher(m.Right)
r, ok := m.Value.(match.Text)
if !ok {
return m
}
leftNil := m.Left == nil
rightNil := m.Right == nil
if leftNil && rightNil {
return match.NewText(r.Str)
}
_, leftSuper := m.Left.(match.Super)
lp, leftPrefix := m.Left.(match.Prefix)
_, rightSuper := m.Right.(match.Super)
rs, rightSuffix := m.Right.(match.Suffix)
if leftSuper && rightSuper {
return match.NewContains(r.Str, false)
}
if leftSuper && rightNil {
return match.NewSuffix(r.Str)
}
if rightSuper && leftNil {
return match.NewPrefix(r.Str)
}
if leftNil && rightSuffix {
return match.NewPrefixSuffix(r.Str, rs.Suffix)
}
if rightNil && leftPrefix {
return match.NewPrefixSuffix(lp.Prefix, r.Str)
}
return m
}
return matcher
}
func compileMatchers(matchers []match.Matcher) (match.Matcher, error) {
if len(matchers) == 0 {
return nil, fmt.Errorf("compile error: need at least one matcher")
}
if len(matchers) == 1 {
return matchers[0], nil
}
if m := glueMatchers(matchers); m != nil {
return m, nil
}
idx := -1
maxLen := -1
var val match.Matcher
for i, matcher := range matchers {
if l := matcher.Len(); l != -1 && l >= maxLen {
maxLen = l
idx = i
val = matcher
}
}
if val == nil { // not found matcher with static length
r, err := compileMatchers(matchers[1:])
if err != nil {
return nil, err
}
return match.NewBTree(matchers[0], nil, r), nil
}
left := matchers[:idx]
var right []match.Matcher
if len(matchers) > idx+1 {
right = matchers[idx+1:]
}
var l, r match.Matcher
var err error
if len(left) > 0 {
l, err = compileMatchers(left)
if err != nil {
return nil, err
}
}
if len(right) > 0 {
r, err = compileMatchers(right)
if err != nil {
return nil, err
}
}
return match.NewBTree(val, l, r), nil
}
func glueMatchers(matchers []match.Matcher) match.Matcher {
if m := glueMatchersAsEvery(matchers); m != nil {
return m
}
if m := glueMatchersAsRow(matchers); m != nil {
return m
}
return nil
}
func glueMatchersAsRow(matchers []match.Matcher) match.Matcher {
if len(matchers) <= 1 {
return nil
}
var (
c []match.Matcher
l int
)
for _, matcher := range matchers {
if ml := matcher.Len(); ml == -1 {
return nil
} else {
c = append(c, matcher)
l += ml
}
}
return match.NewRow(l, c...)
}
func glueMatchersAsEvery(matchers []match.Matcher) match.Matcher {
if len(matchers) <= 1 {
return nil
}
var (
hasAny bool
hasSuper bool
hasSingle bool
min int
separator []rune
)
for i, matcher := range matchers {
var sep []rune
switch m := matcher.(type) {
case match.Super:
sep = []rune{}
hasSuper = true
case match.Any:
sep = m.Separators
hasAny = true
case match.Single:
sep = m.Separators
hasSingle = true
min++
case match.List:
if !m.Not {
return nil
}
sep = m.List
hasSingle = true
min++
default:
return nil
}
// initialize
if i == 0 {
separator = sep
}
if runes.Equal(sep, separator) {
continue
}
return nil
}
if hasSuper && !hasAny && !hasSingle {
return match.NewSuper()
}
if hasAny && !hasSuper && !hasSingle {
return match.NewAny(separator)
}
if (hasAny || hasSuper) && min > 0 && len(separator) == 0 {
return match.NewMin(min)
}
every := match.NewEveryOf()
if min > 0 {
every.Add(match.NewMin(min))
if !hasAny && !hasSuper {
every.Add(match.NewMax(min))
}
}
if len(separator) > 0 {
every.Add(match.NewContains(string(separator), true))
}
return every
}
func minimizeMatchers(matchers []match.Matcher) []match.Matcher {
var done match.Matcher
var left, right, count int
for l := 0; l < len(matchers); l++ {
for r := len(matchers); r > l; r-- {
if glued := glueMatchers(matchers[l:r]); glued != nil {
var swap bool
if done == nil {
swap = true
} else {
cl, gl := done.Len(), glued.Len()
swap = cl > -1 && gl > -1 && gl > cl
swap = swap || count < r-l
}
if swap {
done = glued
left = l
right = r
count = r - l
}
}
}
}
if done == nil {
return matchers
}
next := append(append([]match.Matcher{}, matchers[:left]...), done)
if right < len(matchers) {
next = append(next, matchers[right:]...)
}
if len(next) == len(matchers) {
return next
}
return minimizeMatchers(next)
}
// minimizeAnyOf tries to apply some heuristics to minimize number of nodes in given tree
func minimizeTree(tree *ast.Node) *ast.Node {
switch tree.Kind {
case ast.KindAnyOf:
return minimizeTreeAnyOf(tree)
default:
return nil
}
}
// minimizeAnyOf tries to find common children of given node of AnyOf pattern
// it searches for common children from left and from right
// if any common children are found then it returns new optimized ast tree
// else it returns nil
func minimizeTreeAnyOf(tree *ast.Node) *ast.Node {
if !areOfSameKind(tree.Children, ast.KindPattern) {
return nil
}
commonLeft, commonRight := commonChildren(tree.Children)
commonLeftCount, commonRightCount := len(commonLeft), len(commonRight)
if commonLeftCount == 0 && commonRightCount == 0 { // there are no common parts
return nil
}
var result []*ast.Node
if commonLeftCount > 0 {
result = append(result, ast.NewNode(ast.KindPattern, nil, commonLeft...))
}
var anyOf []*ast.Node
for _, child := range tree.Children {
reuse := child.Children[commonLeftCount : len(child.Children)-commonRightCount]
var node *ast.Node
if len(reuse) == 0 {
// this pattern is completely reduced by commonLeft and commonRight patterns
// so it become nothing
node = ast.NewNode(ast.KindNothing, nil)
} else {
node = ast.NewNode(ast.KindPattern, nil, reuse...)
}
anyOf = appendIfUnique(anyOf, node)
}
switch {
case len(anyOf) == 1 && anyOf[0].Kind != ast.KindNothing:
result = append(result, anyOf[0])
case len(anyOf) > 1:
result = append(result, ast.NewNode(ast.KindAnyOf, nil, anyOf...))
}
if commonRightCount > 0 {
result = append(result, ast.NewNode(ast.KindPattern, nil, commonRight...))
}
return ast.NewNode(ast.KindPattern, nil, result...)
}
func commonChildren(nodes []*ast.Node) (commonLeft, commonRight []*ast.Node) {
if len(nodes) <= 1 {
return
}
// find node that has least number of children
idx := leastChildren(nodes)
if idx == -1 {
return
}
tree := nodes[idx]
treeLength := len(tree.Children)
// allocate max able size for rightCommon slice
// to get ability insert elements in reverse order (from end to start)
// without sorting
commonRight = make([]*ast.Node, treeLength)
lastRight := treeLength // will use this to get results as commonRight[lastRight:]
var (
breakLeft bool
breakRight bool
commonTotal int
)
for i, j := 0, treeLength-1; commonTotal < treeLength && j >= 0 && !(breakLeft && breakLeft); i, j = i+1, j-1 {
treeLeft := tree.Children[i]
treeRight := tree.Children[j]
for k := 0; k < len(nodes) && !(breakLeft && breakLeft); k++ {
// skip least children node
if k == idx {
continue
}
restLeft := nodes[k].Children[i]
restRight := nodes[k].Children[j+len(nodes[k].Children)-treeLength]
breakLeft = breakLeft || !treeLeft.Equal(restLeft)
// disable searching for right common parts, if left part is already overlapping
breakRight = breakRight || (!breakLeft && j <= i)
breakRight = breakRight || !treeRight.Equal(restRight)
}
if !breakLeft {
commonTotal++
commonLeft = append(commonLeft, treeLeft)
}
if !breakRight {
commonTotal++
lastRight = j
commonRight[j] = treeRight
}
}
commonRight = commonRight[lastRight:]
return
}
func appendIfUnique(target []*ast.Node, val *ast.Node) []*ast.Node {
for _, n := range target {
if reflect.DeepEqual(n, val) {
return target
}
}
return append(target, val)
}
func areOfSameKind(nodes []*ast.Node, kind ast.Kind) bool {
for _, n := range nodes {
if n.Kind != kind {
return false
}
}
return true
}
func leastChildren(nodes []*ast.Node) int {
min := -1
idx := -1
for i, n := range nodes {
if idx == -1 || (len(n.Children) < min) {
min = len(n.Children)
idx = i
}
}
return idx
}
func compileTreeChildren(tree *ast.Node, sep []rune) ([]match.Matcher, error) {
var matchers []match.Matcher
for _, desc := range tree.Children {
m, err := compile(desc, sep)
if err != nil {
return nil, err
}
matchers = append(matchers, optimizeMatcher(m))
}
return matchers, nil
}
func compile(tree *ast.Node, sep []rune) (m match.Matcher, err error) {
switch tree.Kind {
case ast.KindAnyOf:
// todo this could be faster on pattern_alternatives_combine_lite (see glob_test.go)
if n := minimizeTree(tree); n != nil {
return compile(n, sep)
}
matchers, err := compileTreeChildren(tree, sep)
if err != nil {
return nil, err
}
return match.NewAnyOf(matchers...), nil
case ast.KindPattern:
if len(tree.Children) == 0 {
return match.NewNothing(), nil
}
matchers, err := compileTreeChildren(tree, sep)
if err != nil {
return nil, err
}
m, err = compileMatchers(minimizeMatchers(matchers))
if err != nil {
return nil, err
}
case ast.KindAny:
m = match.NewAny(sep)
case ast.KindSuper:
m = match.NewSuper()
case ast.KindSingle:
m = match.NewSingle(sep)
case ast.KindNothing:
m = match.NewNothing()
case ast.KindList:
l := tree.Value.(ast.List)
m = match.NewList([]rune(l.Chars), l.Not)
case ast.KindRange:
r := tree.Value.(ast.Range)
m = match.NewRange(r.Lo, r.Hi, r.Not)
case ast.KindText:
t := tree.Value.(ast.Text)
m = match.NewText(t.Text)
default:
return nil, fmt.Errorf("could not compile tree: unknown node type")
}
return optimizeMatcher(m), nil
}
func Compile(tree *ast.Node, sep []rune) (match.Matcher, error) {
m, err := compile(tree, sep)
if err != nil {
return nil, err
}
return m, nil
}

View File

@@ -1,5 +1,10 @@
package glob
import (
"github.com/gobwas/glob/compiler"
"github.com/gobwas/glob/syntax"
)
// Glob represents compiled glob pattern.
type Glob interface {
Match(string) bool
@@ -32,12 +37,12 @@ type Glob interface {
// comma-separated (without spaces) patterns
//
func Compile(pattern string, separators ...rune) (Glob, error) {
ast, err := parse(newLexer(pattern))
ast, err := syntax.Parse(pattern)
if err != nil {
return nil, err
}
matcher, err := compile(ast, separators)
matcher, err := compiler.Compile(ast, separators)
if err != nil {
return nil, err
}
@@ -63,7 +68,7 @@ func QuoteMeta(s string) string {
// a byte loop is correct because all meta characters are ASCII
j := 0
for i := 0; i < len(s); i++ {
if special(s[i]) {
if syntax.Special(s[i]) {
b[j] = '\\'
j++
}

View File

@@ -2,7 +2,7 @@ package match
import (
"fmt"
"github.com/gobwas/glob/strings"
"github.com/gobwas/glob/util/strings"
)
type Any struct {

View File

@@ -2,7 +2,7 @@ package match
import (
"fmt"
"github.com/gobwas/glob/runes"
"github.com/gobwas/glob/util/runes"
"unicode/utf8"
)

View File

@@ -2,7 +2,7 @@ package match
import (
"fmt"
"github.com/gobwas/glob/runes"
"github.com/gobwas/glob/util/runes"
"unicode/utf8"
)

View File

@@ -1,230 +0,0 @@
package glob
import (
"errors"
"fmt"
"unicode/utf8"
)
type node interface {
children() []node
append(node)
}
// todo may be split it into another package
type lexerIface interface {
nextItem() item
}
type nodeImpl struct {
desc []node
}
func (n *nodeImpl) append(c node) {
n.desc = append(n.desc, c)
}
func (n *nodeImpl) children() []node {
return n.desc
}
type nodeList struct {
nodeImpl
not bool
chars string
}
type nodeRange struct {
nodeImpl
not bool
lo, hi rune
}
type nodeText struct {
nodeImpl
text string
}
type nodePattern struct{ nodeImpl }
type nodeAny struct{ nodeImpl }
type nodeSuper struct{ nodeImpl }
type nodeSingle struct{ nodeImpl }
type nodeAnyOf struct{ nodeImpl }
type tree struct {
root node
current node
path []node
}
func (t *tree) enter(c node) {
if t.root == nil {
t.root = c
t.current = c
return
}
t.current.append(c)
t.path = append(t.path, c)
t.current = c
}
func (t *tree) leave() {
if len(t.path)-1 <= 0 {
t.current = t.root
t.path = nil
return
}
t.path = t.path[:len(t.path)-1]
t.current = t.path[len(t.path)-1]
}
type parseFn func(*tree, lexerIface) (parseFn, error)
func parse(lexer lexerIface) (*nodePattern, error) {
var parser parseFn
root := &nodePattern{}
tree := &tree{}
tree.enter(root)
for parser = parserMain; ; {
next, err := parser(tree, lexer)
if err != nil {
return nil, err
}
if next == nil {
break
}
parser = next
}
return root, nil
}
func parserMain(tree *tree, lexer lexerIface) (parseFn, error) {
for stop := false; !stop; {
item := lexer.nextItem()
switch item.t {
case item_eof:
stop = true
continue
case item_error:
return nil, errors.New(item.s)
case item_text:
tree.current.append(&nodeText{text: item.s})
return parserMain, nil
case item_any:
tree.current.append(&nodeAny{})
return parserMain, nil
case item_super:
tree.current.append(&nodeSuper{})
return parserMain, nil
case item_single:
tree.current.append(&nodeSingle{})
return parserMain, nil
case item_range_open:
return parserRange, nil
case item_terms_open:
tree.enter(&nodeAnyOf{})
tree.enter(&nodePattern{})
return parserMain, nil
case item_separator:
tree.leave()
tree.enter(&nodePattern{})
return parserMain, nil
case item_terms_close:
tree.leave()
tree.leave()
return parserMain, nil
default:
return nil, fmt.Errorf("unexpected token: %s", item)
}
}
return nil, nil
}
func parserRange(tree *tree, lexer lexerIface) (parseFn, error) {
var (
not bool
lo rune
hi rune
chars string
)
for {
item := lexer.nextItem()
switch item.t {
case item_eof:
return nil, errors.New("unexpected end")
case item_error:
return nil, errors.New(item.s)
case item_not:
not = true
case item_range_lo:
r, w := utf8.DecodeRuneInString(item.s)
if len(item.s) > w {
return nil, fmt.Errorf("unexpected length of lo character")
}
lo = r
case item_range_between:
//
case item_range_hi:
r, w := utf8.DecodeRuneInString(item.s)
if len(item.s) > w {
return nil, fmt.Errorf("unexpected length of lo character")
}
hi = r
if hi < lo {
return nil, fmt.Errorf("hi character '%s' should be greater than lo '%s'", string(hi), string(lo))
}
case item_text:
chars = item.s
case item_range_close:
isRange := lo != 0 && hi != 0
isChars := chars != ""
if isChars == isRange {
return nil, fmt.Errorf("could not parse range")
}
if isRange {
tree.current.append(&nodeRange{
lo: lo,
hi: hi,
not: not,
})
} else {
tree.current.append(&nodeList{
chars: chars,
not: not,
})
}
return parserMain, nil
}
}
}

72
vendor/github.com/gobwas/glob/syntax/ast/ast.go generated vendored Normal file
View File

@@ -0,0 +1,72 @@
package ast
type Node struct {
Parent *Node
Children []*Node
Value interface{}
Kind Kind
}
func NewNode(k Kind, v interface{}, ch ...*Node) *Node {
n := &Node{
Kind: k,
Value: v,
}
for _, c := range ch {
Insert(n, c)
}
return n
}
func (a *Node) Equal(b *Node) bool {
if a.Kind != b.Kind {
return false
}
if a.Value != b.Value {
return false
}
if len(a.Children) != len(b.Children) {
return false
}
for i, c := range a.Children {
if !c.Equal(b.Children[i]) {
return false
}
}
return true
}
func Insert(parent *Node, children ...*Node) {
parent.Children = append(parent.Children, children...)
for _, ch := range children {
ch.Parent = parent
}
}
type List struct {
Not bool
Chars string
}
type Range struct {
Not bool
Lo, Hi rune
}
type Text struct {
Text string
}
type Kind int
const (
KindNothing Kind = iota
KindPattern
KindList
KindRange
KindText
KindAny
KindSuper
KindSingle
KindAnyOf
)

157
vendor/github.com/gobwas/glob/syntax/ast/parser.go generated vendored Normal file
View File

@@ -0,0 +1,157 @@
package ast
import (
"errors"
"fmt"
"github.com/gobwas/glob/syntax/lexer"
"unicode/utf8"
)
type Lexer interface {
Next() lexer.Token
}
type parseFn func(*Node, Lexer) (parseFn, *Node, error)
func Parse(lexer Lexer) (*Node, error) {
var parser parseFn
root := NewNode(KindPattern, nil)
var (
tree *Node
err error
)
for parser, tree = parserMain, root; parser != nil; {
parser, tree, err = parser(tree, lexer)
if err != nil {
return nil, err
}
}
return root, nil
}
func parserMain(tree *Node, lex Lexer) (parseFn, *Node, error) {
for {
token := lex.Next()
switch token.Type {
case lexer.EOF:
return nil, tree, nil
case lexer.Error:
return nil, tree, errors.New(token.Raw)
case lexer.Text:
Insert(tree, NewNode(KindText, Text{token.Raw}))
return parserMain, tree, nil
case lexer.Any:
Insert(tree, NewNode(KindAny, nil))
return parserMain, tree, nil
case lexer.Super:
Insert(tree, NewNode(KindSuper, nil))
return parserMain, tree, nil
case lexer.Single:
Insert(tree, NewNode(KindSingle, nil))
return parserMain, tree, nil
case lexer.RangeOpen:
return parserRange, tree, nil
case lexer.TermsOpen:
a := NewNode(KindAnyOf, nil)
Insert(tree, a)
p := NewNode(KindPattern, nil)
Insert(a, p)
return parserMain, p, nil
case lexer.Separator:
p := NewNode(KindPattern, nil)
Insert(tree.Parent, p)
return parserMain, p, nil
case lexer.TermsClose:
return parserMain, tree.Parent.Parent, nil
default:
return nil, tree, fmt.Errorf("unexpected token: %s", token)
}
}
return nil, tree, fmt.Errorf("unknown error")
}
func parserRange(tree *Node, lex Lexer) (parseFn, *Node, error) {
var (
not bool
lo rune
hi rune
chars string
)
for {
token := lex.Next()
switch token.Type {
case lexer.EOF:
return nil, tree, errors.New("unexpected end")
case lexer.Error:
return nil, tree, errors.New(token.Raw)
case lexer.Not:
not = true
case lexer.RangeLo:
r, w := utf8.DecodeRuneInString(token.Raw)
if len(token.Raw) > w {
return nil, tree, fmt.Errorf("unexpected length of lo character")
}
lo = r
case lexer.RangeBetween:
//
case lexer.RangeHi:
r, w := utf8.DecodeRuneInString(token.Raw)
if len(token.Raw) > w {
return nil, tree, fmt.Errorf("unexpected length of lo character")
}
hi = r
if hi < lo {
return nil, tree, fmt.Errorf("hi character '%s' should be greater than lo '%s'", string(hi), string(lo))
}
case lexer.Text:
chars = token.Raw
case lexer.RangeClose:
isRange := lo != 0 && hi != 0
isChars := chars != ""
if isChars == isRange {
return nil, tree, fmt.Errorf("could not parse range")
}
if isRange {
Insert(tree, NewNode(KindRange, Range{
Lo: lo,
Hi: hi,
Not: not,
}))
} else {
Insert(tree, NewNode(KindList, List{
Chars: chars,
Not: not,
}))
}
return parserMain, tree, nil
}
}
}

View File

@@ -1,9 +1,9 @@
package glob
package lexer
import (
"bytes"
"fmt"
"github.com/gobwas/glob/runes"
"github.com/gobwas/glob/util/runes"
"unicode/utf8"
)
@@ -30,123 +30,24 @@ var specials = []byte{
char_terms_close,
}
func special(c byte) bool {
func Special(c byte) bool {
return bytes.IndexByte(specials, c) != -1
}
type itemType int
type tokens []Token
const (
item_eof itemType = iota
item_error
item_text
item_char
item_any
item_super
item_single
item_not
item_separator
item_range_open
item_range_close
item_range_lo
item_range_hi
item_range_between
item_terms_open
item_terms_close
)
func (i itemType) String() string {
switch i {
case item_eof:
return "eof"
case item_error:
return "error"
case item_text:
return "text"
case item_char:
return "char"
case item_any:
return "any"
case item_super:
return "super"
case item_single:
return "single"
case item_not:
return "not"
case item_separator:
return "separator"
case item_range_open:
return "range_open"
case item_range_close:
return "range_close"
case item_range_lo:
return "range_lo"
case item_range_hi:
return "range_hi"
case item_range_between:
return "range_between"
case item_terms_open:
return "terms_open"
case item_terms_close:
return "terms_close"
default:
return "undef"
}
}
type item struct {
t itemType
s string
}
func (i item) String() string {
return fmt.Sprintf("%v<%q>", i.t, i.s)
}
type stubLexer struct {
Items []item
pos int
}
func (s *stubLexer) nextItem() (ret item) {
if s.pos == len(s.Items) {
return item{item_eof, ""}
}
ret = s.Items[s.pos]
s.pos++
return
}
type items []item
func (i *items) shift() (ret item) {
func (i *tokens) shift() (ret Token) {
ret = (*i)[0]
copy(*i, (*i)[1:])
*i = (*i)[:len(*i)-1]
return
}
func (i *items) push(v item) {
func (i *tokens) push(v Token) {
*i = append(*i, v)
}
func (i *items) empty() bool {
func (i *tokens) empty() bool {
return len(*i) == 0
}
@@ -157,7 +58,7 @@ type lexer struct {
pos int
err error
items items
tokens tokens
termsLevel int
lastRune rune
@@ -165,14 +66,26 @@ type lexer struct {
hasRune bool
}
func newLexer(source string) *lexer {
func NewLexer(source string) *lexer {
l := &lexer{
data: source,
items: items(make([]item, 0, 4)),
data: source,
tokens: tokens(make([]Token, 0, 4)),
}
return l
}
func (l *lexer) Next() Token {
if l.err != nil {
return Token{Error, l.err.Error()}
}
if !l.tokens.empty() {
return l.tokens.shift()
}
l.fetchItem()
return l.Next()
}
func (l *lexer) peek() (r rune, w int) {
if l.pos == len(l.data) {
return eof, 0
@@ -233,18 +146,6 @@ func (l *lexer) termsLeave() {
l.termsLevel--
}
func (l *lexer) nextItem() item {
if l.err != nil {
return item{item_error, l.err.Error()}
}
if !l.items.empty() {
return l.items.shift()
}
l.fetchItem()
return l.nextItem()
}
var inTextBreakers = []rune{char_single, char_any, char_range_open, char_terms_open}
var inTermsBreakers = append(inTextBreakers, char_terms_close, char_comma)
@@ -252,32 +153,32 @@ func (l *lexer) fetchItem() {
r := l.read()
switch {
case r == eof:
l.items.push(item{item_eof, ""})
l.tokens.push(Token{EOF, ""})
case r == char_terms_open:
l.termsEnter()
l.items.push(item{item_terms_open, string(r)})
l.tokens.push(Token{TermsOpen, string(r)})
case r == char_comma && l.inTerms():
l.items.push(item{item_separator, string(r)})
l.tokens.push(Token{Separator, string(r)})
case r == char_terms_close && l.inTerms():
l.items.push(item{item_terms_close, string(r)})
l.tokens.push(Token{TermsClose, string(r)})
l.termsLeave()
case r == char_range_open:
l.items.push(item{item_range_open, string(r)})
l.tokens.push(Token{RangeOpen, string(r)})
l.fetchRange()
case r == char_single:
l.items.push(item{item_single, string(r)})
l.tokens.push(Token{Single, string(r)})
case r == char_any:
if l.read() == char_any {
l.items.push(item{item_super, string(r) + string(r)})
l.tokens.push(Token{Super, string(r) + string(r)})
} else {
l.unread()
l.items.push(item{item_any, string(r)})
l.tokens.push(Token{Any, string(r)})
}
default:
@@ -308,27 +209,27 @@ func (l *lexer) fetchRange() {
if r != char_range_close {
l.errorf("expected close range character")
} else {
l.items.push(item{item_range_close, string(r)})
l.tokens.push(Token{RangeClose, string(r)})
}
return
}
if wantHi {
l.items.push(item{item_range_hi, string(r)})
l.tokens.push(Token{RangeHi, string(r)})
wantClose = true
continue
}
if !seenNot && r == char_range_not {
l.items.push(item{item_not, string(r)})
l.tokens.push(Token{Not, string(r)})
seenNot = true
continue
}
if n, w := l.peek(); n == char_range_between {
l.seek(w)
l.items.push(item{item_range_lo, string(r)})
l.items.push(item{item_range_between, string(n)})
l.tokens.push(Token{RangeLo, string(r)})
l.tokens.push(Token{RangeBetween, string(n)})
wantHi = true
continue
}
@@ -367,6 +268,6 @@ reading:
}
if len(data) > 0 {
l.items.push(item{item_text, string(data)})
l.tokens.push(Token{Text, string(data)})
}
}

Some files were not shown because too many files have changed in this diff Show More