Compare commits

...

23 Commits

Author SHA1 Message Date
Jakob Borg
84c0749d20 Slightly more compact GUI resources 2014-01-20 23:17:57 +01:00
Jakob Borg
6b02f9e44f Fix GUI files modtime (ish...) 2014-01-20 23:08:29 +01:00
Jakob Borg
84d7452f9e Use embed instead of nrsc, enables 'go get' 2014-01-20 23:01:38 +01:00
Jakob Borg
9b449cb527 Fix windows build (fixes #38) 2014-01-20 23:00:49 +01:00
Jakob Borg
d9ffd359e2 Tweak locking and integration test. 2014-01-20 22:22:27 +01:00
Jakob Borg
b67443eb40 Integration test 2014-01-20 07:38:57 +01:00
Jakob Borg
4ac204b604 Fine grained locking 2014-01-20 07:38:48 +01:00
Jakob Borg
fff50b5472 Delete deadlock 2014-01-14 17:47:27 -07:00
Jakob Borg
8d5aed410f Clear availability for disconnected node 2014-01-13 11:22:57 -07:00
Jakob Borg
ba0e4ded65 Deadlock fix and cleanups 2014-01-13 10:29:23 -07:00
Jakob Borg
f0b18685a5 Show 'this node' in GUI 2014-01-12 15:19:03 -07:00
Jakob Borg
fc2b557ae6 Don't print help twice 2014-01-12 14:47:04 -07:00
Jakob Borg
af399ae9f3 Cleanup ignore tests 2014-01-12 11:10:15 -07:00
Jakob Borg
45fcf4bc84 Implement new puller routine (fixes #33) 2014-01-12 11:02:16 -07:00
Jakob Borg
55f61ccb5e Simple send rate limit 2014-01-12 10:19:22 -07:00
Jakob Borg
b601fc5627 Don't build with CPU usage on Solaris 2014-01-10 15:32:30 +01:00
Jakob Borg
832c0ffad0 Report CPU/mem usage in GUI 2014-01-10 00:12:32 +01:00
Jakob Borg
cb33f27f23 Woops: reignore .stignore 2014-01-09 23:00:42 +01:00
Jakob Borg
92dee7c082 Only fetch deps, don't build 2014-01-09 23:00:23 +01:00
Jakob Borg
b9af45bc6b Prepopulate ignore patterns (fixes #21) 2014-01-09 22:46:01 +01:00
Jakob Borg
a18f6c6d90 Do go get as part of build unless fast build requested (fixes #31) 2014-01-09 21:22:05 +01:00
Jakob Borg
6e11e3cda9 Build for Linux on ARM (fixes #32) 2014-01-09 21:17:41 +01:00
Jakob Borg
2935aebe53 Benchmarking 2014-01-09 14:11:55 +01:00
31 changed files with 1729 additions and 9039 deletions

View File

@@ -3,21 +3,22 @@
version=$(git describe --always)
buildDir=dist
if [[ $fast != yes ]] ; then
go get -d
go test ./...
fi
if [[ -z $1 ]] ; then
go test ./...
go build -ldflags "-X main.Version $version" \
&& nrsc syncthing gui
go build -ldflags "-X main.Version $version"
elif [[ $1 == "embed" ]] ; then
embedder main gui > gui.files.go
elif [[ $1 == "tar" ]] ; then
go test ./...
go build -ldflags "-X main.Version $version" \
&& nrsc syncthing gui \
&& mkdir syncthing-dist \
&& cp syncthing README.md LICENSE syncthing-dist \
&& tar zcvf syncthing-dist.tar.gz syncthing-dist \
&& rm -rf syncthing-dist
else
go test ./... || exit 1
elif [[ $1 == "all" ]] ; then
rm -rf "$buildDir"
mkdir -p "$buildDir" || exit 1
@@ -28,7 +29,6 @@ else
export GOARCH="$goarch"
export name="syncthing-$goos-$goarch"
go build -ldflags "-X main.Version $version" \
&& nrsc syncthing gui \
&& mkdir -p "$name" \
&& cp syncthing "$buildDir/$name" \
&& cp README.md LICENSE "$name" \
@@ -38,6 +38,25 @@ else
done
done
for goos in linux ; do
for goarm in 5 6 7 ; do
for goarch in arm ; do
echo "$goos-${goarch}v$goarm"
export GOARM="$goarm"
export GOOS="$goos"
export GOARCH="$goarch"
export name="syncthing-$goos-${goarch}v$goarm"
go build -ldflags "-X main.Version $version" \
&& mkdir -p "$name" \
&& cp syncthing "$buildDir/$name" \
&& cp README.md LICENSE "$name" \
&& mv syncthing "$name" \
&& tar zcf "$buildDir/$name.tar.gz" "$name" \
&& rm -r "$name"
done
done
done
for goos in windows ; do
for goarch in amd64 386 ; do
echo "$goos-$goarch"
@@ -45,10 +64,10 @@ else
export GOARCH="$goarch"
export name="syncthing-$goos-$goarch"
go build -ldflags "-X main.Version $version" \
&& nrsc syncthing.exe gui \
&& mkdir -p "$name" \
&& mv syncthing.exe "$buildDir/$name.exe" \
&& cp syncthing.exe "$buildDir/$name.exe" \
&& cp README.md LICENSE "$name" \
&& mv syncthing.exe "$name" \
&& zip -qr "$buildDir/$name.zip" "$name" \
&& rm -r "$name"
done

6
gui.files.go Normal file
View File

File diff suppressed because one or more lines are too long

59
gui.go
View File

@@ -3,15 +3,17 @@ package main
import (
"encoding/json"
"fmt"
"io"
"log"
"mime"
"net/http"
"path/filepath"
"runtime"
"sync"
"time"
"bitbucket.org/tebeka/nrsc"
"github.com/calmh/syncthing/model"
"github.com/codegangsta/martini"
"github.com/cratonica/embed"
)
func startGUI(addr string, m *model.Model) {
@@ -22,10 +24,16 @@ func startGUI(addr string, m *model.Model) {
router.Get("/rest/connections", restGetConnections)
router.Get("/rest/config", restGetConfig)
router.Get("/rest/need", restGetNeed)
router.Get("/rest/system", restGetSystem)
fs, err := embed.Unpack(Resources)
if err != nil {
panic(err)
}
go func() {
mr := martini.New()
mr.Use(nrscStatic("gui"))
mr.Use(embeddedStatic(fs))
mr.Use(martini.Recovery())
mr.Action(router.Handle)
mr.Map(m)
@@ -34,6 +42,7 @@ func startGUI(addr string, m *model.Model) {
warnln("GUI not possible:", err)
}
}()
}
func getRoot(w http.ResponseWriter, r *http.Request) {
@@ -71,6 +80,7 @@ func restGetConnections(m *model.Model, w http.ResponseWriter) {
func restGetConfig(w http.ResponseWriter) {
var res = make(map[string]interface{})
res["myID"] = myID
res["repository"] = config.OptionMap("repository")
res["nodes"] = config.OptionMap("nodes")
w.Header().Set("Content-Type", "application/json")
@@ -100,36 +110,47 @@ func restGetNeed(m *model.Model, w http.ResponseWriter) {
json.NewEncoder(w).Encode(gfs)
}
func nrscStatic(path string) interface{} {
if err := nrsc.Initialize(); err != nil {
panic("Unable to initialize nrsc: " + err.Error())
}
var cpuUsagePercent float64
var cpuUsageLock sync.RWMutex
func restGetSystem(w http.ResponseWriter) {
var m runtime.MemStats
runtime.ReadMemStats(&m)
res := make(map[string]interface{})
res["goroutines"] = runtime.NumGoroutine()
res["alloc"] = m.Alloc
res["sys"] = m.Sys
cpuUsageLock.RLock()
res["cpuPercent"] = cpuUsagePercent
cpuUsageLock.RUnlock()
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(res)
}
func embeddedStatic(fs map[string][]byte) interface{} {
var modt = time.Now().UTC().Format(http.TimeFormat)
return func(res http.ResponseWriter, req *http.Request, log *log.Logger) {
file := req.URL.Path
// nrsc expects there not to be a leading slash
if file[0] == '/' {
file = file[1:]
}
f := nrsc.Get(file)
if f == nil {
bs, ok := fs[file]
if !ok {
return
}
rdr, err := f.Open()
if err != nil {
http.Error(res, "Internal Server Error", http.StatusInternalServerError)
}
defer rdr.Close()
mtype := mime.TypeByExtension(filepath.Ext(req.URL.Path))
if len(mtype) != 0 {
res.Header().Set("Content-Type", mtype)
}
res.Header().Set("Content-Size", fmt.Sprintf("%d", f.Size()))
res.Header().Set("Last-Modified", f.ModTime().UTC().Format(http.TimeFormat))
res.Header().Set("Content-Size", fmt.Sprintf("%d", len(bs)))
res.Header().Set("Last-Modified", modt)
io.Copy(res, rdr)
res.Write(bs)
}
}

View File

@@ -26,6 +26,9 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
});
$scope.refresh = function () {
$http.get("/rest/system").success(function (data) {
$scope.system = data;
});
$http.get("/rest/model").success(function (data) {
$scope.model = data;
modelGetSucceeded();
@@ -71,16 +74,19 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
setInterval($scope.refresh, 10000);
});
function decimals(num) {
if (num > 100) {
return 0;
}
if (num > 10) {
return 1;
}
return 2;
function decimals(val, num) {
if (val === 0) { return 0; }
var digits = Math.floor(Math.log(Math.abs(val))/Math.log(10));
var decimals = Math.max(0, num - digits);
return decimals;
}
syncthing.filter('natural', function() {
return function(input, valid) {
return input.toFixed(decimals(input, valid));
}
});
syncthing.filter('binary', function() {
return function(input) {
if (input === undefined) {
@@ -88,15 +94,15 @@ syncthing.filter('binary', function() {
}
if (input > 1024 * 1024 * 1024) {
input /= 1024 * 1024 * 1024;
return input.toFixed(decimals(input)) + ' Gi';
return input.toFixed(decimals(input, 2)) + ' Gi';
}
if (input > 1024 * 1024) {
input /= 1024 * 1024;
return input.toFixed(decimals(input)) + ' Mi';
return input.toFixed(decimals(input, 2)) + ' Mi';
}
if (input > 1024) {
input /= 1024;
return input.toFixed(decimals(input)) + ' Ki';
return input.toFixed(decimals(input, 2)) + ' Ki';
}
return Math.round(input) + ' ';
}
@@ -109,15 +115,15 @@ syncthing.filter('metric', function() {
}
if (input > 1000 * 1000 * 1000) {
input /= 1000 * 1000 * 1000;
return input.toFixed(decimals(input)) + ' G';
return input.toFixed(decimals(input, 2)) + ' G';
}
if (input > 1000 * 1000) {
input /= 1000 * 1000;
return input.toFixed(decimals(input)) + ' M';
return input.toFixed(decimals(input, 2)) + ' M';
}
if (input > 1000) {
input /= 1000;
return input.toFixed(decimals(input)) + ' k';
return input.toFixed(decimals(input, 2)) + ' k';
}
return Math.round(input) + ' ';
}

View File

@@ -1,397 +0,0 @@
/*!
* Bootstrap v3.0.3 (http://getbootstrap.com)
* Copyright 2013 Twitter, Inc.
* Licensed under http://www.apache.org/licenses/LICENSE-2.0
*/
.btn-default,
.btn-primary,
.btn-success,
.btn-info,
.btn-warning,
.btn-danger {
text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.2);
-webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 1px rgba(0, 0, 0, 0.075);
box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 1px rgba(0, 0, 0, 0.075);
}
.btn-default:active,
.btn-primary:active,
.btn-success:active,
.btn-info:active,
.btn-warning:active,
.btn-danger:active,
.btn-default.active,
.btn-primary.active,
.btn-success.active,
.btn-info.active,
.btn-warning.active,
.btn-danger.active {
-webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);
box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);
}
.btn:active,
.btn.active {
background-image: none;
}
.btn-default {
text-shadow: 0 1px 0 #fff;
background-image: -webkit-linear-gradient(top, #ffffff 0%, #e0e0e0 100%);
background-image: linear-gradient(to bottom, #ffffff 0%, #e0e0e0 100%);
background-repeat: repeat-x;
border-color: #dbdbdb;
border-color: #ccc;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe0e0e0', GradientType=0);
filter: progid:DXImageTransform.Microsoft.gradient(enabled=false);
}
.btn-default:hover,
.btn-default:focus {
background-color: #e0e0e0;
background-position: 0 -15px;
}
.btn-default:active,
.btn-default.active {
background-color: #e0e0e0;
border-color: #dbdbdb;
}
.btn-primary {
background-image: -webkit-linear-gradient(top, #428bca 0%, #2d6ca2 100%);
background-image: linear-gradient(to bottom, #428bca 0%, #2d6ca2 100%);
background-repeat: repeat-x;
border-color: #2b669a;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff2d6ca2', GradientType=0);
filter: progid:DXImageTransform.Microsoft.gradient(enabled=false);
}
.btn-primary:hover,
.btn-primary:focus {
background-color: #2d6ca2;
background-position: 0 -15px;
}
.btn-primary:active,
.btn-primary.active {
background-color: #2d6ca2;
border-color: #2b669a;
}
.btn-success {
background-image: -webkit-linear-gradient(top, #5cb85c 0%, #419641 100%);
background-image: linear-gradient(to bottom, #5cb85c 0%, #419641 100%);
background-repeat: repeat-x;
border-color: #3e8f3e;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff419641', GradientType=0);
filter: progid:DXImageTransform.Microsoft.gradient(enabled=false);
}
.btn-success:hover,
.btn-success:focus {
background-color: #419641;
background-position: 0 -15px;
}
.btn-success:active,
.btn-success.active {
background-color: #419641;
border-color: #3e8f3e;
}
.btn-warning {
background-image: -webkit-linear-gradient(top, #f0ad4e 0%, #eb9316 100%);
background-image: linear-gradient(to bottom, #f0ad4e 0%, #eb9316 100%);
background-repeat: repeat-x;
border-color: #e38d13;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffeb9316', GradientType=0);
filter: progid:DXImageTransform.Microsoft.gradient(enabled=false);
}
.btn-warning:hover,
.btn-warning:focus {
background-color: #eb9316;
background-position: 0 -15px;
}
.btn-warning:active,
.btn-warning.active {
background-color: #eb9316;
border-color: #e38d13;
}
.btn-danger {
background-image: -webkit-linear-gradient(top, #d9534f 0%, #c12e2a 100%);
background-image: linear-gradient(to bottom, #d9534f 0%, #c12e2a 100%);
background-repeat: repeat-x;
border-color: #b92c28;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc12e2a', GradientType=0);
filter: progid:DXImageTransform.Microsoft.gradient(enabled=false);
}
.btn-danger:hover,
.btn-danger:focus {
background-color: #c12e2a;
background-position: 0 -15px;
}
.btn-danger:active,
.btn-danger.active {
background-color: #c12e2a;
border-color: #b92c28;
}
.btn-info {
background-image: -webkit-linear-gradient(top, #5bc0de 0%, #2aabd2 100%);
background-image: linear-gradient(to bottom, #5bc0de 0%, #2aabd2 100%);
background-repeat: repeat-x;
border-color: #28a4c9;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff2aabd2', GradientType=0);
filter: progid:DXImageTransform.Microsoft.gradient(enabled=false);
}
.btn-info:hover,
.btn-info:focus {
background-color: #2aabd2;
background-position: 0 -15px;
}
.btn-info:active,
.btn-info.active {
background-color: #2aabd2;
border-color: #28a4c9;
}
.thumbnail,
.img-thumbnail {
-webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);
box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);
}
.dropdown-menu > li > a:hover,
.dropdown-menu > li > a:focus {
background-color: #e8e8e8;
background-image: -webkit-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);
background-image: linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);
}
.dropdown-menu > .active > a,
.dropdown-menu > .active > a:hover,
.dropdown-menu > .active > a:focus {
background-color: #357ebd;
background-image: -webkit-linear-gradient(top, #428bca 0%, #357ebd 100%);
background-image: linear-gradient(to bottom, #428bca 0%, #357ebd 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff357ebd', GradientType=0);
}
.navbar-default {
background-image: -webkit-linear-gradient(top, #ffffff 0%, #f8f8f8 100%);
background-image: linear-gradient(to bottom, #ffffff 0%, #f8f8f8 100%);
background-repeat: repeat-x;
border-radius: 4px;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff8f8f8', GradientType=0);
filter: progid:DXImageTransform.Microsoft.gradient(enabled=false);
-webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 5px rgba(0, 0, 0, 0.075);
box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 5px rgba(0, 0, 0, 0.075);
}
.navbar-default .navbar-nav > .active > a {
background-image: -webkit-linear-gradient(top, #ebebeb 0%, #f3f3f3 100%);
background-image: linear-gradient(to bottom, #ebebeb 0%, #f3f3f3 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff3f3f3', GradientType=0);
-webkit-box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.075);
box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.075);
}
.navbar-brand,
.navbar-nav > li > a {
text-shadow: 0 1px 0 rgba(255, 255, 255, 0.25);
}
.navbar-inverse {
background-image: -webkit-linear-gradient(top, #3c3c3c 0%, #222222 100%);
background-image: linear-gradient(to bottom, #3c3c3c 0%, #222222 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c', endColorstr='#ff222222', GradientType=0);
filter: progid:DXImageTransform.Microsoft.gradient(enabled=false);
}
.navbar-inverse .navbar-nav > .active > a {
background-image: -webkit-linear-gradient(top, #222222 0%, #282828 100%);
background-image: linear-gradient(to bottom, #222222 0%, #282828 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff222222', endColorstr='#ff282828', GradientType=0);
-webkit-box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.25);
box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.25);
}
.navbar-inverse .navbar-brand,
.navbar-inverse .navbar-nav > li > a {
text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25);
}
.navbar-static-top,
.navbar-fixed-top,
.navbar-fixed-bottom {
border-radius: 0;
}
.alert {
text-shadow: 0 1px 0 rgba(255, 255, 255, 0.2);
-webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25), 0 1px 2px rgba(0, 0, 0, 0.05);
box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25), 0 1px 2px rgba(0, 0, 0, 0.05);
}
.alert-success {
background-image: -webkit-linear-gradient(top, #dff0d8 0%, #c8e5bc 100%);
background-image: linear-gradient(to bottom, #dff0d8 0%, #c8e5bc 100%);
background-repeat: repeat-x;
border-color: #b2dba1;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffc8e5bc', GradientType=0);
}
.alert-info {
background-image: -webkit-linear-gradient(top, #d9edf7 0%, #b9def0 100%);
background-image: linear-gradient(to bottom, #d9edf7 0%, #b9def0 100%);
background-repeat: repeat-x;
border-color: #9acfea;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffb9def0', GradientType=0);
}
.alert-warning {
background-image: -webkit-linear-gradient(top, #fcf8e3 0%, #f8efc0 100%);
background-image: linear-gradient(to bottom, #fcf8e3 0%, #f8efc0 100%);
background-repeat: repeat-x;
border-color: #f5e79e;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fff8efc0', GradientType=0);
}
.alert-danger {
background-image: -webkit-linear-gradient(top, #f2dede 0%, #e7c3c3 100%);
background-image: linear-gradient(to bottom, #f2dede 0%, #e7c3c3 100%);
background-repeat: repeat-x;
border-color: #dca7a7;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffe7c3c3', GradientType=0);
}
.progress {
background-image: -webkit-linear-gradient(top, #ebebeb 0%, #f5f5f5 100%);
background-image: linear-gradient(to bottom, #ebebeb 0%, #f5f5f5 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff5f5f5', GradientType=0);
}
.progress-bar {
background-image: -webkit-linear-gradient(top, #428bca 0%, #3071a9 100%);
background-image: linear-gradient(to bottom, #428bca 0%, #3071a9 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff3071a9', GradientType=0);
}
.progress-bar-success {
background-image: -webkit-linear-gradient(top, #5cb85c 0%, #449d44 100%);
background-image: linear-gradient(to bottom, #5cb85c 0%, #449d44 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0);
}
.progress-bar-info {
background-image: -webkit-linear-gradient(top, #5bc0de 0%, #31b0d5 100%);
background-image: linear-gradient(to bottom, #5bc0de 0%, #31b0d5 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0);
}
.progress-bar-warning {
background-image: -webkit-linear-gradient(top, #f0ad4e 0%, #ec971f 100%);
background-image: linear-gradient(to bottom, #f0ad4e 0%, #ec971f 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0);
}
.progress-bar-danger {
background-image: -webkit-linear-gradient(top, #d9534f 0%, #c9302c 100%);
background-image: linear-gradient(to bottom, #d9534f 0%, #c9302c 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0);
}
.list-group {
border-radius: 4px;
-webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);
box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);
}
.list-group-item.active,
.list-group-item.active:hover,
.list-group-item.active:focus {
text-shadow: 0 -1px 0 #3071a9;
background-image: -webkit-linear-gradient(top, #428bca 0%, #3278b3 100%);
background-image: linear-gradient(to bottom, #428bca 0%, #3278b3 100%);
background-repeat: repeat-x;
border-color: #3278b3;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff3278b3', GradientType=0);
}
.panel {
-webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);
box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);
}
.panel-default > .panel-heading {
background-image: -webkit-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);
background-image: linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);
}
.panel-primary > .panel-heading {
background-image: -webkit-linear-gradient(top, #428bca 0%, #357ebd 100%);
background-image: linear-gradient(to bottom, #428bca 0%, #357ebd 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff357ebd', GradientType=0);
}
.panel-success > .panel-heading {
background-image: -webkit-linear-gradient(top, #dff0d8 0%, #d0e9c6 100%);
background-image: linear-gradient(to bottom, #dff0d8 0%, #d0e9c6 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffd0e9c6', GradientType=0);
}
.panel-info > .panel-heading {
background-image: -webkit-linear-gradient(top, #d9edf7 0%, #c4e3f3 100%);
background-image: linear-gradient(to bottom, #d9edf7 0%, #c4e3f3 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffc4e3f3', GradientType=0);
}
.panel-warning > .panel-heading {
background-image: -webkit-linear-gradient(top, #fcf8e3 0%, #faf2cc 100%);
background-image: linear-gradient(to bottom, #fcf8e3 0%, #faf2cc 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fffaf2cc', GradientType=0);
}
.panel-danger > .panel-heading {
background-image: -webkit-linear-gradient(top, #f2dede 0%, #ebcccc 100%);
background-image: linear-gradient(to bottom, #f2dede 0%, #ebcccc 100%);
background-repeat: repeat-x;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffebcccc', GradientType=0);
}
.well {
background-image: -webkit-linear-gradient(top, #e8e8e8 0%, #f5f5f5 100%);
background-image: linear-gradient(to bottom, #e8e8e8 0%, #f5f5f5 100%);
background-repeat: repeat-x;
border-color: #dcdcdc;
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8', endColorstr='#fff5f5f5', GradientType=0);
-webkit-box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.05), 0 1px 0 rgba(255, 255, 255, 0.1);
box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.05), 0 1px 0 rgba(255, 255, 255, 0.1);
}

View File

File diff suppressed because one or more lines are too long

View File

File diff suppressed because it is too large Load Diff

View File

File diff suppressed because it is too large Load Diff

View File

@@ -9,7 +9,7 @@
<link rel="shortcut icon" href="../../docs-assets/ico/favicon.png">
<title>syncthing</title>
<link href="bootstrap/css/bootstrap.css" rel="stylesheet">
<link href="bootstrap/css/bootstrap.min.css" rel="stylesheet">
<style type="text/css">
html, body {
height: 100%;
@@ -47,27 +47,42 @@ html, body {
<div class="row">
<div class="col-md-12">
<h2>Synchronization</h2>
<div class="progress">
<div class="progress-bar" role="progressbar" aria-valuenow="60" aria-valuemin="0" aria-valuemax="100"
ng-class="{'progress-bar-success': model.needBytes === 0, 'progress-bar-info': model.needBytes !== 0}"
style="width: {{100 * model.inSyncBytes / model.globalBytes | number:2}}%;">
{{100 * model.inSyncBytes / model.globalBytes | number:0}}%
<div class="panel" ng-class="{'panel-success': model.needBytes === 0, 'panel-primary': model.needBytes !== 0}">
<div class="panel-heading"><h3 class="panel-title">Synchronization</h3></div>
<div class="panel-body">
<div class="progress">
<div class="progress-bar" role="progressbar" aria-valuenow="60" aria-valuemin="0" aria-valuemax="100"
ng-class="{'progress-bar-success': model.needBytes === 0, 'progress-bar-info': model.needBytes !== 0}"
style="width: {{100 * model.inSyncBytes / model.globalBytes | number:2}}%;">
{{100 * model.inSyncBytes / model.globalBytes | alwaysNumber | number:0}}%
</div>
</div>
<p ng-show="model.needBytes > 0">Need {{model.needFiles | alwaysNumber}} files, {{model.needBytes | binary}}B</p>
</div>
</div>
<p ng-show="model.needBytes > 0">Need {{model.needFiles | alwaysNumber}} files, {{model.needBytes | binary}}B</p>
</div>
</div>
<div class="row">
<div class="col-md-6">
<h1>Repository Status</h1>
<div class="panel panel-info">
<div class="panel-heading"><h3 class="panel-title">Repository</h3></div>
<div class="panel-body">
<p>Cluster contains {{model.globalFiles | alwaysNumber}} files, {{model.globalBytes | binary}}B
<span class="text-muted">(+{{model.globalDeleted | alwaysNumber}} delete records)</span></p>
<p>Cluster contains {{model.globalFiles | alwaysNumber}} files, {{model.globalBytes | binary}}B
<span class="text-muted">(+{{model.globalDeleted | alwaysNumber}} delete records)</span></p>
<p>Local repository has {{model.localFiles | alwaysNumber}} files, {{model.localBytes | binary}}B
<span class="text-muted">(+{{model.localDeleted | alwaysNumber}} delete records)</span></p>
</div>
</div>
<p>Local repository has {{model.localFiles | alwaysNumber}} files, {{model.localBytes | binary}}B
<span class="text-muted">(+{{model.localDeleted | alwaysNumber}} delete records)</span></p>
<div class="panel panel-info">
<div class="panel-heading"><h3 class="panel-title">System</h3></div>
<div class="panel-body">
<p>{{system.sys | binary}}B RAM allocated, {{system.alloc | binary}}B in use</p>
<p>{{system.cpuPercent | alwaysNumber | natural:1}}% CPU, {{system.goroutines | alwaysNumber}} goroutines</p>
</div>
</div>
<div ng-show="model.needFiles > 0">
<h2>Files to Synchronize</h2>
@@ -80,32 +95,42 @@ html, body {
</div>
</div>
<div class="col-md-6">
<h1>Cluster Status</h1>
<table class="table table-condensed">
<tbody>
<tr ng-repeat="(node, address) in config.nodes" ng-class="{'text-primary': !!connections[node]}">
<td><abbr class="text-monospace" title="{{node}}">{{node | short}}</abbr></td>
<td>
<span ng-show="!!connections[node]">
<span class="glyphicon glyphicon-link"></span>
{{connections[node].Address}}
</span>
<span ng-hide="!!connections[node]">
<span class="glyphicon glyphicon-cog"></span>
{{address}}
</span>
</td>
<td class="text-right">
<abbr title="{{connections[node].InBytesTotal | binary}}B">{{connections[node].inbps | metric}}b/s</abbr>
<span class="text-muted glyphicon glyphicon-cloud-download"></span>
</td>
<td class="text-right">
<abbr title="{{connections[node].OutBytesTotal | binary}}B">{{connections[node].outbps | metric}}b/s</abbr>
<span class="text-muted glyphicon glyphicon-cloud-upload"></span>
</td>
</tr>
</tbody>
</table>
<div class="panel panel-info">
<div class="panel-heading"><h3 class="panel-title">Cluster</h3></div>
<table class="table table-condensed">
<tbody>
<tr ng-repeat="(node, address) in config.nodes" ng-class="{'text-primary': !!connections[node], 'text-muted': node == config.myID}">
<td><abbr class="text-monospace" title="{{node}}">{{node | short}}</abbr></td>
<td>
<span ng-show="node == config.myID">
<span class="glyphicon glyphicon-ok"></span>
(this node)
</span>
<span ng-show="node != config.myID && !!connections[node]">
<span class="glyphicon glyphicon-link"></span>
{{connections[node].Address}}
</span>
<span ng-show="node != config.myID && !connections[node]">
<span class="glyphicon glyphicon-cog"></span>
{{address}}
</span>
</td>
<td class="text-right">
<span ng-show="node != config.myID">
<abbr title="{{connections[node].InBytesTotal | binary}}B">{{connections[node].inbps | metric}}b/s</abbr>
<span class="text-muted glyphicon glyphicon-cloud-download"></span>
</span>
</td>
<td class="text-right">
<span ng-show="node != config.myID">
<abbr title="{{connections[node].OutBytesTotal | binary}}B">{{connections[node].outbps | metric}}b/s</abbr>
<span class="text-muted glyphicon glyphicon-cloud-upload"></span>
</span>
</td>
</tr>
</tbody>
</table>
</div>
</div>
</div>
</div>
@@ -123,7 +148,7 @@ html, body {
<div class="modal-content">
<div class="modal-header alert alert-danger">
<h4 class="modal-title">
<span class="glyphicon glyphicon-exclamation-sign"></span>
<span class="glyphicon glyphicon-exclamation-sign"></span>
Connection Error
</h4>
</div>

31
gui_unix.go Normal file
View File

@@ -0,0 +1,31 @@
//+build !windows,!solaris
package main
import (
"syscall"
"time"
)
func init() {
go trackCPUUsage()
}
func trackCPUUsage() {
var prevUsage int64
var prevTime = time.Now().UnixNano()
var rusage syscall.Rusage
for {
time.Sleep(10 * time.Second)
syscall.Getrusage(syscall.RUSAGE_SELF, &rusage)
curTime := time.Now().UnixNano()
timeDiff := curTime - prevTime
curUsage := rusage.Utime.Nano() + rusage.Stime.Nano()
usageDiff := curUsage - prevUsage
cpuUsageLock.Lock()
cpuUsagePercent = 100 * float64(usageDiff) / float64(timeDiff)
cpuUsageLock.Unlock()
prevTime = curTime
prevUsage = curUsage
}
}

5
integration/.gitignore vendored Normal file
View File

@@ -0,0 +1,5 @@
files-*
conf-*
md5-*
genfiles
md5r

42
integration/genfiles.go Normal file
View File

@@ -0,0 +1,42 @@
package main
import (
"crypto/rand"
"flag"
"fmt"
"io/ioutil"
mr "math/rand"
"os"
"path"
)
func name() string {
var b [16]byte
rand.Reader.Read(b[:])
return fmt.Sprintf("%x", b[:])
}
func main() {
var files int
var maxexp int
flag.IntVar(&files, "files", 1000, "Number of files")
flag.IntVar(&maxexp, "maxexp", 20, "Maximum file size (max = 2^n + 128*1024 B)")
flag.Parse()
for i := 0; i < files; i++ {
n := name()
p0 := path.Join(string(n[0]), n[0:2])
os.MkdirAll(p0, 0755)
s := 1 << uint(mr.Intn(maxexp))
a := 128 * 1024
if a > s {
a = s
}
s += mr.Intn(a)
b := make([]byte, s)
rand.Reader.Read(b)
p1 := path.Join(p0, n)
ioutil.WriteFile(p1, b, 0644)
}
}

59
integration/md5r.go Normal file
View File

@@ -0,0 +1,59 @@
package main
import (
"crypto/md5"
"flag"
"fmt"
"io"
"os"
"path/filepath"
)
func main() {
flag.Parse()
args := flag.Args()
if len(args) == 0 {
args = []string{"."}
}
for _, path := range args {
err := filepath.Walk(path, walker)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
}
func walker(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
sum, err := md5file(path)
if err != nil {
return err
}
fmt.Printf("%s %s\n", sum, path)
}
return nil
}
func md5file(fname string) (hash string, err error) {
f, err := os.Open(fname)
if err != nil {
return
}
defer f.Close()
h := md5.New()
io.Copy(h, f)
hb := h.Sum(nil)
hash = fmt.Sprintf("%x", hb)
return
}

69
integration/test.sh Executable file
View File

@@ -0,0 +1,69 @@
#!/bin/bash
rm -rf files-* conf-* md5-*
extraopts=""
p=$(pwd)
go build genfiles.go
go build md5r.go
echo "Setting up (keys)..."
i1=$(syncthing -c conf-1 2>&1 | awk '/My ID/ {print $6}')
echo $i1
i2=$(syncthing -c conf-2 2>&1 | awk '/My ID/ {print $6}')
echo $i2
i3=$(syncthing -c conf-3 2>&1 | awk '/My ID/ {print $6}')
echo $i3
echo "Setting up (files)..."
for i in 1 2 3 ; do
cat >conf-$i/syncthing.ini <<EOT
[repository]
dir = $p/files-$i
[nodes]
$i1 = 127.0.0.1:22001
$i2 = 127.0.0.1:22002
$i3 = 127.0.0.1:22003
EOT
mkdir files-$i
pushd files-$i >/dev/null
../genfiles -maxexp 21 -files 4000
../md5r > ../md5-$i
popd >/dev/null
done
echo "Starting..."
for i in 1 2 3 ; do
sleep 1
syncthing -c conf-$i --no-gui -l :2200$i $extraopts &
done
cat md5-* | sort > md5-tot
while true ; do
read
echo Verifying...
conv=0
for i in 1 2 3 ; do
pushd files-$i >/dev/null
../md5r | sort > ../md5-$i
popd >/dev/null
if ! cmp md5-$i md5-tot >/dev/null ; then
echo $i unconverged
else
conv=$((conv + 1))
echo $i converged
fi
done
if [[ $conv == 3 ]] ; then
kill %1
kill %2
kill %3
exit
fi
done

View File

@@ -6,7 +6,8 @@ import (
"os"
)
var logger = log.New(os.Stderr, "", log.Ltime)
// set in main()
var logger *log.Logger
func debugln(vals ...interface{}) {
s := fmt.Sprintln(vals...)

50
main.go
View File

@@ -10,6 +10,8 @@ import (
_ "net/http/pprof"
"os"
"path"
"runtime"
"runtime/debug"
"strconv"
"strings"
"time"
@@ -39,7 +41,7 @@ type Options struct {
type DebugOptions struct {
LogSource bool `long:"log-source"`
TraceModel []string `long:"trace-model" value-name:"TRACE" description:"idx, net, file, need"`
TraceModel []string `long:"trace-model" value-name:"TRACE" description:"idx, net, file, need, pull"`
TraceConnect bool `long:"trace-connect"`
Profiler string `long:"profiler" value-name:"ADDR"`
}
@@ -52,8 +54,8 @@ type DiscoveryOptions struct {
}
type AdvancedOptions struct {
RequestsInFlight int `long:"reqs-in-flight" description:"Parallell in flight requests per file" default:"4" value-name:"REQS"`
FilesInFlight int `long:"files-in-flight" description:"Parallell in flight file pulls" default:"8" value-name:"FILES"`
RequestsInFlight int `long:"reqs-in-flight" description:"Parallell in flight requests per node" default:"8" value-name:"REQS"`
LimitRate int `long:"send-rate" description:"Rate limit for outgoing data" default:"0" value-name:"KBPS"`
ScanInterval time.Duration `long:"scan-intv" description:"Repository scan interval" default:"60s" value-name:"INTV"`
ConnInterval time.Duration `long:"conn-intv" description:"Node reconnect interval" default:"60s" value-name:"INTV"`
}
@@ -66,6 +68,7 @@ const (
)
var (
myID string
config ini.Config
nodeAddrs = make(map[string][]string)
)
@@ -73,6 +76,11 @@ var (
func main() {
_, err := flags.Parse(&opts)
if err != nil {
if err, ok := err.(*flags.Error); ok {
if err.Type == flags.ErrHelp {
os.Exit(0)
}
}
fatalln(err)
}
@@ -81,8 +89,19 @@ func main() {
os.Exit(0)
}
if len(os.Getenv("GOGC")) == 0 {
debug.SetGCPercent(25)
}
if len(os.Getenv("GOMAXPROCS")) == 0 {
runtime.GOMAXPROCS(runtime.NumCPU())
}
log.SetOutput(os.Stderr)
logger = log.New(os.Stderr, "", log.Flags())
if len(opts.Debug.TraceModel) > 0 || opts.Debug.LogSource {
logger = log.New(os.Stderr, "", log.Lshortfile|log.Ldate|log.Ltime|log.Lmicroseconds)
log.SetFlags(log.Lshortfile | log.Ldate | log.Ltime | log.Lmicroseconds)
logger.SetFlags(log.Lshortfile | log.Ldate | log.Ltime | log.Lmicroseconds)
}
opts.ConfDir = expandTilde(opts.ConfDir)
@@ -98,8 +117,10 @@ func main() {
fatalErr(err)
}
myID := string(certId(cert.Certificate[0]))
myID = string(certId(cert.Certificate[0]))
infoln("My ID:", myID)
log.SetPrefix("[" + myID[0:5] + "] ")
logger.SetPrefix("[" + myID[0:5] + "] ")
if opts.Debug.Profiler != "" {
go func() {
@@ -148,6 +169,9 @@ func main() {
for _, t := range opts.Debug.TraceModel {
m.Trace(t)
}
if opts.Advanced.LimitRate > 0 {
m.LimitRate(opts.Advanced.LimitRate)
}
// GUI
if !opts.NoGUI && opts.GUIAddr != "" {
@@ -191,7 +215,7 @@ func main() {
infoln("Deletes from peer nodes are allowed")
}
okln("Ready to synchronize (read-write)")
m.StartRW(!opts.NoDelete, opts.Advanced.FilesInFlight, opts.Advanced.RequestsInFlight)
m.StartRW(!opts.NoDelete, opts.Advanced.RequestsInFlight)
} else {
okln("Ready to synchronize (read only; no external updates accepted)")
}
@@ -201,7 +225,9 @@ func main() {
go func() {
for {
time.Sleep(opts.Advanced.ScanInterval)
updateLocalModel(m)
if m.LocalAge() > opts.Advanced.ScanInterval.Seconds()/2 {
updateLocalModel(m)
}
}
}()
@@ -226,7 +252,7 @@ func printStatsLoop(m *model.Model) {
outbps := 8 * int(float64(stats.OutBytesTotal-lastStats[node].OutBytesTotal)/secs)
if inbps+outbps > 0 {
infof("%s: %sb/s in, %sb/s out", node, MetricPrefix(inbps), MetricPrefix(outbps))
infof("%s: %sb/s in, %sb/s out", node[0:5], MetricPrefix(inbps), MetricPrefix(outbps))
}
lastStats[node] = stats
@@ -282,7 +308,8 @@ listen:
for nodeID := range nodeAddrs {
if nodeID == remoteID {
m.AddConnection(conn, remoteID)
protoConn := protocol.NewConnection(remoteID, conn, conn, m)
m.AddConnection(conn, protoConn)
continue listen
}
}
@@ -351,7 +378,8 @@ func connect(myID string, addr string, nodeAddrs map[string][]string, m *model.M
continue
}
m.AddConnection(conn, remoteID)
protoConn := protocol.NewConnection(remoteID, conn, conn, m)
m.AddConnection(conn, protoConn)
continue nextNode
}
}
@@ -361,7 +389,7 @@ func connect(myID string, addr string, nodeAddrs map[string][]string, m *model.M
}
func updateLocalModel(m *model.Model) {
files := m.FilteredWalk(!opts.NoSymlinks)
files, _ := m.Walk(!opts.NoSymlinks)
m.ReplaceLocal(files)
saveIndex(m)
}

View File

@@ -7,15 +7,15 @@ import (
)
type Block struct {
Offset uint64
Length uint32
Offset int64
Size uint32
Hash []byte
}
// Blocks returns the blockwise hash of the reader.
func Blocks(r io.Reader, blocksize int) ([]Block, error) {
var blocks []Block
var offset uint64
var offset int64
for {
lr := &io.LimitedReader{r, int64(blocksize)}
hf := sha256.New()
@@ -30,11 +30,11 @@ func Blocks(r io.Reader, blocksize int) ([]Block, error) {
b := Block{
Offset: offset,
Length: uint32(n),
Size: uint32(n),
Hash: hf.Sum(nil),
}
blocks = append(blocks, b)
offset += uint64(n)
offset += int64(n)
}
return blocks, nil

View File

@@ -52,7 +52,7 @@ func TestBlocks(t *testing.T) {
t.Fatalf("Incorrect number of blocks %d != %d", l, len(test.hash))
} else {
i := 0
for off := uint64(0); off < uint64(len(test.data)); off += uint64(test.blocksize) {
for off := int64(0); off < int64(len(test.data)); off += int64(test.blocksize) {
if blocks[i].Offset != off {
t.Errorf("Incorrect offset for block %d: %d != %d", i, blocks[i].Offset, off)
}
@@ -61,8 +61,8 @@ func TestBlocks(t *testing.T) {
if rem := len(test.data) - int(off); bs > rem {
bs = rem
}
if int(blocks[i].Length) != bs {
t.Errorf("Incorrect length for block %d: %d != %d", i, blocks[i].Length, bs)
if int(blocks[i].Size) != bs {
t.Errorf("Incorrect length for block %d: %d != %d", i, blocks[i].Size, bs)
}
if h := fmt.Sprintf("%x", blocks[i].Hash); h != test.hash[i] {
t.Errorf("Incorrect block hash %q != %q", h, test.hash[i])
@@ -106,8 +106,8 @@ func TestDiff(t *testing.T) {
if d[j].Offset != test.d[j].Offset {
t.Errorf("Incorrect offset for diff %d block %d; %d != %d", i, j, d[j].Offset, test.d[j].Offset)
}
if d[j].Length != test.d[j].Length {
t.Errorf("Incorrect length for diff %d block %d; %d != %d", i, j, d[j].Length, test.d[j].Length)
if d[j].Size != test.d[j].Size {
t.Errorf("Incorrect length for diff %d block %d; %d != %d", i, j, d[j].Size, test.d[j].Size)
}
}
}

173
model/filemonitor.go Normal file
View File

@@ -0,0 +1,173 @@
package model
import (
"bytes"
"errors"
"fmt"
"log"
"os"
"path"
"sync"
"time"
"github.com/calmh/syncthing/buffers"
)
type fileMonitor struct {
name string // in-repo name
path string // full path
writeDone sync.WaitGroup
model *Model
global File
localBlocks []Block
copyError error
writeError error
}
func (m *fileMonitor) FileBegins(cc <-chan content) error {
if m.model.trace["file"] {
log.Printf("FILE: FileBegins: " + m.name)
}
tmp := tempName(m.path, m.global.Modified)
dir := path.Dir(tmp)
_, err := os.Stat(dir)
if err != nil && os.IsNotExist(err) {
err = os.MkdirAll(dir, 0777)
if err != nil {
return err
}
}
outFile, err := os.Create(tmp)
if err != nil {
return err
}
m.writeDone.Add(1)
var writeWg sync.WaitGroup
if len(m.localBlocks) > 0 {
writeWg.Add(1)
inFile, err := os.Open(m.path)
if err != nil {
return err
}
// Copy local blocks, close infile when done
go m.copyLocalBlocks(inFile, outFile, &writeWg)
}
// Write remote blocks,
writeWg.Add(1)
go m.copyRemoteBlocks(cc, outFile, &writeWg)
// Wait for both writing routines, then close the outfile
go func() {
writeWg.Wait()
outFile.Close()
m.writeDone.Done()
}()
return nil
}
func (m *fileMonitor) copyLocalBlocks(inFile, outFile *os.File, writeWg *sync.WaitGroup) {
defer inFile.Close()
defer writeWg.Done()
var buf = buffers.Get(BlockSize)
defer buffers.Put(buf)
for _, lb := range m.localBlocks {
buf = buf[:lb.Size]
_, err := inFile.ReadAt(buf, lb.Offset)
if err != nil {
m.copyError = err
return
}
_, err = outFile.WriteAt(buf, lb.Offset)
if err != nil {
m.copyError = err
return
}
}
}
func (m *fileMonitor) copyRemoteBlocks(cc <-chan content, outFile *os.File, writeWg *sync.WaitGroup) {
defer writeWg.Done()
for content := range cc {
_, err := outFile.WriteAt(content.data, content.offset)
buffers.Put(content.data)
if err != nil {
m.writeError = err
return
}
}
}
func (m *fileMonitor) FileDone() error {
if m.model.trace["file"] {
log.Printf("FILE: FileDone: " + m.name)
}
m.writeDone.Wait()
tmp := tempName(m.path, m.global.Modified)
defer os.Remove(tmp)
if m.copyError != nil {
return m.copyError
}
if m.writeError != nil {
return m.writeError
}
err := hashCheck(tmp, m.global.Blocks)
if err != nil {
return err
}
err = os.Chtimes(tmp, time.Unix(m.global.Modified, 0), time.Unix(m.global.Modified, 0))
if err != nil {
return err
}
err = os.Chmod(tmp, os.FileMode(m.global.Flags&0777))
if err != nil {
return err
}
err = os.Rename(tmp, m.path)
if err != nil {
return err
}
m.model.updateLocal(m.global)
return nil
}
func hashCheck(name string, correct []Block) error {
rf, err := os.Open(name)
if err != nil {
return err
}
defer rf.Close()
current, err := Blocks(rf, BlockSize)
if err != nil {
return err
}
if len(current) != len(correct) {
return errors.New("incorrect number of blocks")
}
for i := range current {
if bytes.Compare(current[i].Hash, correct[i].Hash) != 0 {
return fmt.Errorf("hash mismatch: %x != %x", current[i], correct[i])
}
}
return nil
}

243
model/filequeue.go Normal file
View File

@@ -0,0 +1,243 @@
package model
import (
"log"
"sort"
"sync"
"time"
)
type Monitor interface {
FileBegins(<-chan content) error
FileDone() error
}
type FileQueue struct {
files queuedFileList
sorted bool
fmut sync.Mutex // protects files and sorted
availability map[string][]string
amut sync.Mutex // protects availability
}
type queuedFile struct {
name string
blocks []Block
activeBlocks []bool
given int
remaining int
channel chan content
nodes []string
nodesChecked time.Time
monitor Monitor
}
type content struct {
offset int64
data []byte
}
type queuedFileList []queuedFile
func (l queuedFileList) Len() int { return len(l) }
func (l queuedFileList) Swap(a, b int) { l[a], l[b] = l[b], l[a] }
func (l queuedFileList) Less(a, b int) bool {
// Sort by most blocks already given out, then alphabetically
if l[a].given != l[b].given {
return l[a].given > l[b].given
}
return l[a].name < l[b].name
}
type queuedBlock struct {
name string
block Block
index int
}
func NewFileQueue() *FileQueue {
return &FileQueue{
availability: make(map[string][]string),
}
}
func (q *FileQueue) Add(name string, blocks []Block, monitor Monitor) {
q.fmut.Lock()
defer q.fmut.Unlock()
for _, f := range q.files {
if f.name == name {
panic("re-adding added file " + f.name)
}
}
q.files = append(q.files, queuedFile{
name: name,
blocks: blocks,
activeBlocks: make([]bool, len(blocks)),
remaining: len(blocks),
channel: make(chan content),
monitor: monitor,
})
q.sorted = false
}
func (q *FileQueue) Len() int {
q.fmut.Lock()
defer q.fmut.Unlock()
return len(q.files)
}
func (q *FileQueue) Get(nodeID string) (queuedBlock, bool) {
q.fmut.Lock()
defer q.fmut.Unlock()
if !q.sorted {
sort.Sort(q.files)
q.sorted = true
}
for i := range q.files {
qf := &q.files[i]
q.amut.Lock()
av := q.availability[qf.name]
q.amut.Unlock()
if len(av) == 0 {
// Noone has the file we want; abort.
if qf.remaining != len(qf.blocks) {
// We have already started on this file; close it down
close(qf.channel)
if mon := qf.monitor; mon != nil {
mon.FileDone()
}
}
q.deleteAt(i)
return queuedBlock{}, false
}
for _, ni := range av {
// Find and return the next block in the queue
if ni == nodeID {
for j, b := range qf.blocks {
if !qf.activeBlocks[j] {
qf.activeBlocks[j] = true
qf.given++
return queuedBlock{
name: qf.name,
block: b,
index: j,
}, true
}
}
break
}
}
}
// We found nothing to do
return queuedBlock{}, false
}
func (q *FileQueue) Done(file string, offset int64, data []byte) {
q.fmut.Lock()
defer q.fmut.Unlock()
c := content{
offset: offset,
data: data,
}
for i := range q.files {
qf := &q.files[i]
if qf.name == file {
if qf.monitor != nil && qf.remaining == len(qf.blocks) {
err := qf.monitor.FileBegins(qf.channel)
if err != nil {
log.Printf("WARNING: %s: %v (not synced)", qf.name, err)
q.deleteAt(i)
return
}
}
qf.channel <- c
qf.remaining--
if qf.remaining == 0 {
close(qf.channel)
if qf.monitor != nil {
err := qf.monitor.FileDone()
if err != nil {
log.Printf("WARNING: %s: %v", qf.name, err)
}
}
q.deleteAt(i)
}
return
}
}
panic("unreachable")
}
func (q *FileQueue) Queued(file string) bool {
q.fmut.Lock()
defer q.fmut.Unlock()
for _, qf := range q.files {
if qf.name == file {
return true
}
}
return false
}
func (q *FileQueue) QueuedFiles() (files []string) {
q.fmut.Lock()
defer q.fmut.Unlock()
for _, qf := range q.files {
files = append(files, qf.name)
}
return
}
func (q *FileQueue) deleteAt(i int) {
q.files = q.files[:i+copy(q.files[i:], q.files[i+1:])]
}
func (q *FileQueue) deleteFile(n string) {
for i, file := range q.files {
if n == file.name {
q.deleteAt(i)
return
}
}
}
func (q *FileQueue) SetAvailable(file string, nodes []string) {
q.amut.Lock()
defer q.amut.Unlock()
q.availability[file] = nodes
}
func (q *FileQueue) RemoveAvailable(toRemove string) {
q.amut.Lock()
defer q.amut.Unlock()
for file, nodes := range q.availability {
for i, node := range nodes {
if node == toRemove {
q.availability[file] = nodes[:i+copy(nodes[i:], nodes[i+1:])]
if len(q.availability[file]) == 0 {
q.deleteFile(file)
}
}
break
}
}
}

277
model/filequeue_test.go Normal file
View File

@@ -0,0 +1,277 @@
package model
import (
"reflect"
"sync"
"sync/atomic"
"testing"
)
func TestFileQueueAdd(t *testing.T) {
q := NewFileQueue()
q.Add("foo", nil, nil)
}
func TestFileQueueAddSorting(t *testing.T) {
q := NewFileQueue()
q.SetAvailable("zzz", []string{"nodeID"})
q.SetAvailable("aaa", []string{"nodeID"})
q.Add("zzz", []Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
q.Add("aaa", []Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
b, _ := q.Get("nodeID")
if b.name != "aaa" {
t.Errorf("Incorrectly sorted get: %+v", b)
}
q = NewFileQueue()
q.SetAvailable("zzz", []string{"nodeID"})
q.SetAvailable("aaa", []string{"nodeID"})
q.Add("zzz", []Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
b, _ = q.Get("nodeID") // Start on zzzz
if b.name != "zzz" {
t.Errorf("Incorrectly sorted get: %+v", b)
}
q.Add("aaa", []Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
b, _ = q.Get("nodeID")
if b.name != "zzz" {
// Continue rather than starting a new file
t.Errorf("Incorrectly sorted get: %+v", b)
}
}
func TestFileQueueLen(t *testing.T) {
q := NewFileQueue()
q.Add("foo", nil, nil)
q.Add("bar", nil, nil)
if l := q.Len(); l != 2 {
t.Errorf("Incorrect len %d != 2 after adds", l)
}
}
func TestFileQueueGet(t *testing.T) {
q := NewFileQueue()
q.SetAvailable("foo", []string{"nodeID"})
q.SetAvailable("bar", []string{"nodeID"})
q.Add("foo", []Block{
{Offset: 0, Size: 128, Hash: []byte("some foo hash bytes")},
{Offset: 128, Size: 128, Hash: []byte("some other foo hash bytes")},
{Offset: 256, Size: 128, Hash: []byte("more foo hash bytes")},
}, nil)
q.Add("bar", []Block{
{Offset: 0, Size: 128, Hash: []byte("some bar hash bytes")},
{Offset: 128, Size: 128, Hash: []byte("some other bar hash bytes")},
}, nil)
// First get should return the first block of the first file
expected := queuedBlock{
name: "bar",
block: Block{
Offset: 0,
Size: 128,
Hash: []byte("some bar hash bytes"),
},
}
actual, ok := q.Get("nodeID")
if !ok {
t.Error("Unexpected non-OK Get()")
}
if !reflect.DeepEqual(expected, actual) {
t.Errorf("Incorrect block returned (first)\n E: %+v\n A: %+v", expected, actual)
}
// Second get should return the next block of the first file
expected = queuedBlock{
name: "bar",
block: Block{
Offset: 128,
Size: 128,
Hash: []byte("some other bar hash bytes"),
},
index: 1,
}
actual, ok = q.Get("nodeID")
if !ok {
t.Error("Unexpected non-OK Get()")
}
if !reflect.DeepEqual(expected, actual) {
t.Errorf("Incorrect block returned (second)\n E: %+v\n A: %+v", expected, actual)
}
// Third get should return the first block of the second file
expected = queuedBlock{
name: "foo",
block: Block{
Offset: 0,
Size: 128,
Hash: []byte("some foo hash bytes"),
},
}
actual, ok = q.Get("nodeID")
if !ok {
t.Error("Unexpected non-OK Get()")
}
if !reflect.DeepEqual(expected, actual) {
t.Errorf("Incorrect block returned (third)\n E: %+v\n A: %+v", expected, actual)
}
}
/*
func TestFileQueueDone(t *testing.T) {
ch := make(chan content)
var recv sync.WaitGroup
recv.Add(1)
go func() {
content := <-ch
if bytes.Compare(content.data, []byte("first block bytes")) != 0 {
t.Error("Incorrect data in first content block")
}
content = <-ch
if bytes.Compare(content.data, []byte("second block bytes")) != 0 {
t.Error("Incorrect data in second content block")
}
_, ok := <-ch
if ok {
t.Error("Content channel not closed")
}
recv.Done()
}()
q := FileQueue{resolver: fakeResolver{}}
q.Add("foo", []Block{
{Offset: 0, Length: 128, Hash: []byte("some foo hash bytes")},
{Offset: 128, Length: 128, Hash: []byte("some other foo hash bytes")},
}, ch)
b0, _ := q.Get("nodeID")
b1, _ := q.Get("nodeID")
q.Done(b0.name, b0.block.Offset, []byte("first block bytes"))
q.Done(b1.name, b1.block.Offset, []byte("second block bytes"))
recv.Wait()
// Queue should now have one file less
if l := q.Len(); l != 0 {
t.Error("Queue not empty")
}
_, ok := q.Get("nodeID")
if ok {
t.Error("Unexpected OK Get()")
}
}
*/
func TestFileQueueGetNodeIDs(t *testing.T) {
q := NewFileQueue()
q.SetAvailable("a-foo", []string{"nodeID", "a"})
q.SetAvailable("b-bar", []string{"nodeID", "b"})
q.Add("a-foo", []Block{
{Offset: 0, Size: 128, Hash: []byte("some foo hash bytes")},
{Offset: 128, Size: 128, Hash: []byte("some other foo hash bytes")},
{Offset: 256, Size: 128, Hash: []byte("more foo hash bytes")},
}, nil)
q.Add("b-bar", []Block{
{Offset: 0, Size: 128, Hash: []byte("some bar hash bytes")},
{Offset: 128, Size: 128, Hash: []byte("some other bar hash bytes")},
}, nil)
expected := queuedBlock{
name: "b-bar",
block: Block{
Offset: 0,
Size: 128,
Hash: []byte("some bar hash bytes"),
},
}
actual, ok := q.Get("b")
if !ok {
t.Error("Unexpected non-OK Get()")
}
if !reflect.DeepEqual(expected, actual) {
t.Errorf("Incorrect block returned\n E: %+v\n A: %+v", expected, actual)
}
expected = queuedBlock{
name: "a-foo",
block: Block{
Offset: 0,
Size: 128,
Hash: []byte("some foo hash bytes"),
},
}
actual, ok = q.Get("a")
if !ok {
t.Error("Unexpected non-OK Get()")
}
if !reflect.DeepEqual(expected, actual) {
t.Errorf("Incorrect block returned\n E: %+v\n A: %+v", expected, actual)
}
expected = queuedBlock{
name: "a-foo",
block: Block{
Offset: 128,
Size: 128,
Hash: []byte("some other foo hash bytes"),
},
index: 1,
}
actual, ok = q.Get("nodeID")
if !ok {
t.Error("Unexpected non-OK Get()")
}
if !reflect.DeepEqual(expected, actual) {
t.Errorf("Incorrect block returned\n E: %+v\n A: %+v", expected, actual)
}
}
func TestFileQueueThreadHandling(t *testing.T) {
// This should pass with go test -race
const n = 100
var total int
var blocks []Block
for i := 1; i <= n; i++ {
blocks = append(blocks, Block{Offset: int64(i), Size: 1})
total += i
}
q := NewFileQueue()
q.Add("foo", blocks, nil)
q.SetAvailable("foo", []string{"nodeID"})
var start = make(chan bool)
var gotTot uint32
var wg sync.WaitGroup
wg.Add(n)
for i := 1; i <= n; i++ {
go func() {
<-start
b, _ := q.Get("nodeID")
atomic.AddUint32(&gotTot, uint32(b.block.Offset))
wg.Done()
}()
}
close(start)
wg.Wait()
if int(gotTot) != total {
t.Error("Total mismatch; %d != %d", gotTot, total)
}
}

View File

@@ -1,16 +1,5 @@
package model
/*
Locking
=======
The model has read and write locks. These must be acquired as appropriate by
public methods. To prevent deadlock situations, private methods should never
acquire locks, but document what locks they require.
*/
import (
"crypto/sha1"
"errors"
@@ -28,31 +17,50 @@ import (
)
type Model struct {
sync.RWMutex
dir string
global map[string]File // the latest version of each file as it exists in the cluster
local map[string]File // the files we currently have locally on disk
remote map[string]map[string]File
need map[string]bool // the files we need to update
nodes map[string]*protocol.Connection
rawConn map[string]io.ReadWriteCloser
global map[string]File // the latest version of each file as it exists in the cluster
gmut sync.RWMutex // protects global
local map[string]File // the files we currently have locally on disk
lmut sync.RWMutex // protects local
remote map[string]map[string]File
rmut sync.RWMutex // protects remote
protoConn map[string]Connection
rawConn map[string]io.Closer
pmut sync.RWMutex // protects protoConn and rawConn
updatedLocal int64 // timestamp of last update to local
updateGlobal int64 // timestamp of last update to remote
// Queue for files to fetch. fq can call back into the model, so we must ensure
// to hold no locks when calling methods on fq.
fq *FileQueue
dq chan File // queue for files to delete
updatedLocal int64 // timestamp of last update to local
updateGlobal int64 // timestamp of last update to remote
lastIdxBcast time.Time
lastIdxBcastRequest time.Time
umut sync.RWMutex // provides updated* and lastIdx*
rwRunning bool
parallellFiles int
paralllelReqs int
delete bool
rwRunning bool
delete bool
initmut sync.Mutex // protects rwRunning and delete
trace map[string]bool
fileLastChanged map[string]time.Time
fileWasSuppressed map[string]int
fmut sync.Mutex // protects fileLastChanged and fileWasSuppressed
parallellRequests int
limitRequestRate chan struct{}
imut sync.Mutex // protects Index
}
type Connection interface {
ID() string
Index([]protocol.FileInfo)
Request(name string, offset int64, size uint32, hash []byte) ([]byte, error)
Statistics() protocol.Statistics
}
const (
@@ -77,32 +85,46 @@ func NewModel(dir string) *Model {
global: make(map[string]File),
local: make(map[string]File),
remote: make(map[string]map[string]File),
need: make(map[string]bool),
nodes: make(map[string]*protocol.Connection),
rawConn: make(map[string]io.ReadWriteCloser),
protoConn: make(map[string]Connection),
rawConn: make(map[string]io.Closer),
lastIdxBcast: time.Now(),
trace: make(map[string]bool),
fileLastChanged: make(map[string]time.Time),
fileWasSuppressed: make(map[string]int),
fq: NewFileQueue(),
dq: make(chan File),
}
go m.broadcastIndexLoop()
return m
}
func (m *Model) LimitRate(kbps int) {
m.limitRequestRate = make(chan struct{}, kbps)
n := kbps/10 + 1
go func() {
for {
time.Sleep(100 * time.Millisecond)
for i := 0; i < n; i++ {
select {
case m.limitRequestRate <- struct{}{}:
}
}
}
}()
}
// Trace enables trace logging of the given facility. This is a debugging function; grep for m.trace.
func (m *Model) Trace(t string) {
m.Lock()
defer m.Unlock()
m.trace[t] = true
}
// StartRW starts read/write processing on the current model. When in
// read/write mode the model will attempt to keep in sync with the cluster by
// pulling needed files from peer nodes.
func (m *Model) StartRW(del bool, pfiles, preqs int) {
m.Lock()
defer m.Unlock()
func (m *Model) StartRW(del bool, threads int) {
m.initmut.Lock()
defer m.initmut.Unlock()
if m.rwRunning {
panic("starting started model")
@@ -110,22 +132,30 @@ func (m *Model) StartRW(del bool, pfiles, preqs int) {
m.rwRunning = true
m.delete = del
m.parallellFiles = pfiles
m.paralllelReqs = preqs
m.parallellRequests = threads
go m.cleanTempFiles()
go m.puller()
if del {
go m.deleteLoop()
}
}
// Generation returns an opaque integer that is guaranteed to increment on
// every change to the local repository or global model.
func (m *Model) Generation() int64 {
m.RLock()
defer m.RUnlock()
m.umut.RLock()
defer m.umut.RUnlock()
return m.updatedLocal + m.updateGlobal
}
func (m *Model) LocalAge() float64 {
m.umut.RLock()
defer m.umut.RUnlock()
return time.Since(time.Unix(m.updatedLocal, 0)).Seconds()
}
type ConnectionInfo struct {
protocol.Statistics
Address string
@@ -137,11 +167,10 @@ func (m *Model) ConnectionStats() map[string]ConnectionInfo {
RemoteAddr() net.Addr
}
m.RLock()
defer m.RUnlock()
m.pmut.RLock()
var res = make(map[string]ConnectionInfo)
for node, conn := range m.nodes {
for node, conn := range m.protoConn {
ci := ConnectionInfo{
Statistics: conn.Statistics(),
}
@@ -150,14 +179,15 @@ func (m *Model) ConnectionStats() map[string]ConnectionInfo {
}
res[node] = ci
}
m.pmut.RUnlock()
return res
}
// LocalSize returns the number of files, deleted files and total bytes for all
// files in the global model.
func (m *Model) GlobalSize() (files, deleted, bytes int) {
m.RLock()
defer m.RUnlock()
m.gmut.RLock()
for _, f := range m.global {
if f.Flags&protocol.FlagDeleted == 0 {
@@ -167,14 +197,15 @@ func (m *Model) GlobalSize() (files, deleted, bytes int) {
deleted++
}
}
m.gmut.RUnlock()
return
}
// LocalSize returns the number of files, deleted files and total bytes for all
// files in the local repository.
func (m *Model) LocalSize() (files, deleted, bytes int) {
m.RLock()
defer m.RUnlock()
m.lmut.RLock()
for _, f := range m.local {
if f.Flags&protocol.FlagDeleted == 0 {
@@ -184,14 +215,16 @@ func (m *Model) LocalSize() (files, deleted, bytes int) {
deleted++
}
}
m.lmut.RUnlock()
return
}
// InSyncSize returns the number and total byte size of the local files that
// are in sync with the global model.
func (m *Model) InSyncSize() (files, bytes int) {
m.RLock()
defer m.RUnlock()
m.gmut.RLock()
m.lmut.RLock()
for n, f := range m.local {
if gf, ok := m.global[n]; ok && f.Equals(gf) {
@@ -199,27 +232,33 @@ func (m *Model) InSyncSize() (files, bytes int) {
bytes += f.Size()
}
}
m.lmut.RUnlock()
m.gmut.RUnlock()
return
}
// NeedFiles returns the list of currently needed files and the total size.
func (m *Model) NeedFiles() (files []File, bytes int) {
m.RLock()
defer m.RUnlock()
qf := m.fq.QueuedFiles()
for n := range m.need {
m.gmut.RLock()
for _, n := range qf {
f := m.global[n]
files = append(files, f)
bytes += f.Size()
}
m.gmut.RUnlock()
return
}
// Index is called when a new node is connected and we receive their full index.
// Implements the protocol.Model interface.
func (m *Model) Index(nodeID string, fs []protocol.FileInfo) {
m.Lock()
defer m.Unlock()
m.imut.Lock()
defer m.imut.Unlock()
if m.trace["net"] {
log.Printf("NET IDX(in): %s: %d files", nodeID, len(fs))
@@ -229,7 +268,10 @@ func (m *Model) Index(nodeID string, fs []protocol.FileInfo) {
for _, f := range fs {
m.indexUpdate(repo, f)
}
m.rmut.Lock()
m.remote[nodeID] = repo
m.rmut.Unlock()
m.recomputeGlobal()
m.recomputeNeed()
@@ -238,22 +280,25 @@ func (m *Model) Index(nodeID string, fs []protocol.FileInfo) {
// IndexUpdate is called for incremental updates to connected nodes' indexes.
// Implements the protocol.Model interface.
func (m *Model) IndexUpdate(nodeID string, fs []protocol.FileInfo) {
m.Lock()
defer m.Unlock()
m.imut.Lock()
defer m.imut.Unlock()
if m.trace["net"] {
log.Printf("NET IDXUP(in): %s: %d files", nodeID, len(fs))
}
m.rmut.Lock()
repo, ok := m.remote[nodeID]
if !ok {
log.Printf("WARNING: Index update from node %s that does not have an index", nodeID)
m.rmut.Unlock()
return
}
for _, f := range fs {
m.indexUpdate(repo, f)
}
m.rmut.Unlock()
m.recomputeGlobal()
m.recomputeNeed()
@@ -276,11 +321,13 @@ func (m *Model) indexUpdate(repo map[string]File, f protocol.FileInfo) {
repo[f.Name] = fileFromFileInfo(f)
}
// Close removes the peer from the model and closes the underlyign connection if possible.
// Close removes the peer from the model and closes the underlying connection if possible.
// Implements the protocol.Model interface.
func (m *Model) Close(node string, err error) {
m.Lock()
defer m.Unlock()
m.fq.RemoveAvailable(node)
m.pmut.Lock()
m.rmut.Lock()
conn, ok := m.rawConn[node]
if ok {
@@ -288,21 +335,28 @@ func (m *Model) Close(node string, err error) {
}
delete(m.remote, node)
delete(m.nodes, node)
delete(m.protoConn, node)
delete(m.rawConn, node)
m.rmut.Unlock()
m.pmut.Unlock()
m.recomputeGlobal()
m.recomputeNeed()
}
// Request returns the specified data segment by reading it from local disk.
// Implements the protocol.Model interface.
func (m *Model) Request(nodeID, name string, offset uint64, size uint32, hash []byte) ([]byte, error) {
func (m *Model) Request(nodeID, name string, offset int64, size uint32, hash []byte) ([]byte, error) {
// Verify that the requested file exists in the local and global model.
m.RLock()
m.lmut.RLock()
lf, localOk := m.local[name]
m.lmut.RUnlock()
m.gmut.RLock()
_, globalOk := m.global[name]
m.RUnlock()
m.gmut.RUnlock()
if !localOk || !globalOk {
log.Printf("SECURITY (nonexistent file) REQ(in): %s: %q o=%d s=%d h=%x", nodeID, name, offset, size, hash)
return nil, ErrNoSuchFile
@@ -322,44 +376,57 @@ func (m *Model) Request(nodeID, name string, offset uint64, size uint32, hash []
defer fd.Close()
buf := buffers.Get(int(size))
_, err = fd.ReadAt(buf, int64(offset))
_, err = fd.ReadAt(buf, offset)
if err != nil {
return nil, err
}
if m.limitRequestRate != nil {
for s := 0; s < len(buf); s += 1024 {
<-m.limitRequestRate
}
}
return buf, nil
}
// ReplaceLocal replaces the local repository index with the given list of files.
// Change suppression is applied to files changing too often.
func (m *Model) ReplaceLocal(fs []File) {
m.Lock()
defer m.Unlock()
var updated bool
var newLocal = make(map[string]File)
m.lmut.RLock()
for _, f := range fs {
newLocal[f.Name] = f
if ef := m.local[f.Name]; !ef.Equals(f) {
updated = true
}
}
m.lmut.RUnlock()
if m.markDeletedLocals(newLocal) {
updated = true
}
m.lmut.RLock()
if len(newLocal) != len(m.local) {
updated = true
}
m.lmut.RUnlock()
if updated {
m.lmut.Lock()
m.local = newLocal
m.lmut.Unlock()
m.recomputeGlobal()
m.recomputeNeed()
m.umut.Lock()
m.updatedLocal = time.Now().Unix()
m.lastIdxBcastRequest = time.Now()
m.umut.Unlock()
}
}
@@ -367,13 +434,12 @@ func (m *Model) ReplaceLocal(fs []File) {
// in protocol data types. Does not track deletes, should only be used to seed
// the local index from a cache file at startup.
func (m *Model) SeedLocal(fs []protocol.FileInfo) {
m.Lock()
defer m.Unlock()
m.lmut.Lock()
m.local = make(map[string]File)
for _, f := range fs {
m.local[f.Name] = fileFromFileInfo(f)
}
m.lmut.Unlock()
m.recomputeGlobal()
m.recomputeNeed()
@@ -381,19 +447,12 @@ func (m *Model) SeedLocal(fs []protocol.FileInfo) {
// ConnectedTo returns true if we are connected to the named node.
func (m *Model) ConnectedTo(nodeID string) bool {
m.RLock()
defer m.RUnlock()
_, ok := m.nodes[nodeID]
m.pmut.RLock()
_, ok := m.protoConn[nodeID]
m.pmut.RUnlock()
return ok
}
// ProtocolIndex returns the current local index in protocol data types.
func (m *Model) ProtocolIndex() []protocol.FileInfo {
m.RLock()
defer m.RUnlock()
return m.protocolIndex()
}
// RepoID returns a unique ID representing the current repository location.
func (m *Model) RepoID() string {
return fmt.Sprintf("%x", sha1.Sum([]byte(m.dir)))
@@ -402,24 +461,59 @@ func (m *Model) RepoID() string {
// AddConnection adds a new peer connection to the model. An initial index will
// be sent to the connected peer, thereafter index updates whenever the local
// repository changes.
func (m *Model) AddConnection(conn io.ReadWriteCloser, nodeID string) {
node := protocol.NewConnection(nodeID, conn, conn, m)
m.Lock()
m.nodes[nodeID] = node
m.rawConn[nodeID] = conn
m.Unlock()
m.RLock()
idx := m.protocolIndex()
m.RUnlock()
func (m *Model) AddConnection(rawConn io.Closer, protoConn Connection) {
nodeID := protoConn.ID()
m.pmut.Lock()
m.protoConn[nodeID] = protoConn
m.rawConn[nodeID] = rawConn
m.pmut.Unlock()
go func() {
node.Index(idx)
idx := m.ProtocolIndex()
protoConn.Index(idx)
}()
m.initmut.Lock()
rw := m.rwRunning
m.initmut.Unlock()
if !rw {
return
}
for i := 0; i < m.parallellRequests; i++ {
i := i
go func() {
if m.trace["pull"] {
log.Println("PULL: Starting", nodeID, i)
}
for {
m.pmut.RLock()
if _, ok := m.protoConn[nodeID]; !ok {
if m.trace["pull"] {
log.Println("PULL: Exiting", nodeID, i)
}
m.pmut.RUnlock()
return
}
m.pmut.RUnlock()
qb, ok := m.fq.Get(nodeID)
if ok {
if m.trace["pull"] {
log.Println("PULL: Request", nodeID, i, qb.name, qb.block.Offset)
}
data, _ := protoConn.Request(qb.name, qb.block.Offset, qb.block.Size, qb.block.Hash)
m.fq.Done(qb.name, qb.block.Offset, data)
} else {
time.Sleep(1 * time.Second)
}
}
}()
}
}
func (m *Model) shouldSuppressChange(name string) bool {
m.fmut.Lock()
sup := shouldSuppressChange(m.fileLastChanged[name], m.fileWasSuppressed[name])
if sup {
m.fileWasSuppressed[name]++
@@ -427,6 +521,7 @@ func (m *Model) shouldSuppressChange(name string) bool {
m.fileWasSuppressed[name] = 0
m.fileLastChanged[name] = time.Now()
}
m.fmut.Unlock()
return sup
}
@@ -441,10 +536,13 @@ func shouldSuppressChange(lastChange time.Time, numChanges int) bool {
return false
}
// protocolIndex returns the current local index in protocol data types.
// ProtocolIndex returns the current local index in protocol data types.
// Must be called with the read lock held.
func (m *Model) protocolIndex() []protocol.FileInfo {
func (m *Model) ProtocolIndex() []protocol.FileInfo {
var index []protocol.FileInfo
m.lmut.RLock()
for _, f := range m.local {
mf := fileInfoFromFile(f)
if m.trace["idx"] {
@@ -456,13 +554,16 @@ func (m *Model) protocolIndex() []protocol.FileInfo {
}
index = append(index, mf)
}
m.lmut.RUnlock()
return index
}
func (m *Model) requestGlobal(nodeID, name string, offset uint64, size uint32, hash []byte) ([]byte, error) {
m.RLock()
nc, ok := m.nodes[nodeID]
m.RUnlock()
func (m *Model) requestGlobal(nodeID, name string, offset int64, size uint32, hash []byte) ([]byte, error) {
m.pmut.RLock()
nc, ok := m.protoConn[nodeID]
m.pmut.RUnlock()
if !ok {
return nil, fmt.Errorf("requestGlobal: no such node: %s", nodeID)
}
@@ -476,29 +577,35 @@ func (m *Model) requestGlobal(nodeID, name string, offset uint64, size uint32, h
func (m *Model) broadcastIndexLoop() {
for {
m.RLock()
m.umut.RLock()
bcastRequested := m.lastIdxBcastRequest.After(m.lastIdxBcast)
holdtimeExceeded := time.Since(m.lastIdxBcastRequest) > idxBcastHoldtime
m.RUnlock()
m.umut.RUnlock()
maxDelayExceeded := time.Since(m.lastIdxBcast) > idxBcastMaxDelay
if bcastRequested && (holdtimeExceeded || maxDelayExceeded) {
m.Lock()
idx := m.ProtocolIndex()
var indexWg sync.WaitGroup
indexWg.Add(len(m.nodes))
idx := m.protocolIndex()
indexWg.Add(len(m.protoConn))
m.umut.Lock()
m.lastIdxBcast = time.Now()
for _, node := range m.nodes {
m.umut.Unlock()
m.pmut.RLock()
for _, node := range m.protoConn {
node := node
if m.trace["net"] {
log.Printf("NET IDX(out/loop): %s: %d files", node.ID, len(idx))
log.Printf("NET IDX(out/loop): %s: %d files", node.ID(), len(idx))
}
go func() {
node.Index(idx)
indexWg.Done()
}()
}
m.Unlock()
m.pmut.RUnlock()
indexWg.Wait()
}
time.Sleep(idxBcastHoldtime)
@@ -506,13 +613,16 @@ func (m *Model) broadcastIndexLoop() {
}
// markDeletedLocals sets the deleted flag on files that have gone missing locally.
// Must be called with the write lock held.
func (m *Model) markDeletedLocals(newLocal map[string]File) bool {
// For every file in the existing local table, check if they are also
// present in the new local table. If they are not, check that we already
// had the newest version available according to the global table and if so
// note the file as having been deleted.
var updated bool
m.gmut.RLock()
m.lmut.RLock()
for n, f := range m.local {
if _, ok := newLocal[n]; !ok {
if gf := m.global[n]; !gf.NewerThan(f) {
@@ -526,39 +636,74 @@ func (m *Model) markDeletedLocals(newLocal map[string]File) bool {
}
}
}
m.lmut.RUnlock()
m.gmut.RUnlock()
return updated
}
func (m *Model) updateLocal(f File) {
var updated bool
m.lmut.Lock()
if ef, ok := m.local[f.Name]; !ok || !ef.Equals(f) {
m.local[f.Name] = f
updated = true
}
m.lmut.Unlock()
if updated {
m.recomputeGlobal()
m.recomputeNeed()
// We don't recomputeNeed here for two reasons:
// - a need shouldn't have arisen due to having a newer local file
// - recomputeNeed might call into fq.Add but we might have been called by
// fq which would be a deadlock on fq
m.umut.Lock()
m.updatedLocal = time.Now().Unix()
m.lastIdxBcastRequest = time.Now()
m.umut.Unlock()
}
}
// Must be called with the write lock held.
func (m *Model) recomputeGlobal() {
var newGlobal = make(map[string]File)
m.lmut.RLock()
for n, f := range m.local {
newGlobal[n] = f
}
m.lmut.RUnlock()
for _, fs := range m.remote {
var available = make(map[string][]string)
m.rmut.RLock()
var highestMod int64
for nodeID, fs := range m.remote {
for n, nf := range fs {
if lf, ok := newGlobal[n]; !ok || nf.NewerThan(lf) {
newGlobal[n] = nf
available[n] = []string{nodeID}
if nf.Modified > highestMod {
highestMod = nf.Modified
}
} else if lf.Equals(nf) {
available[n] = append(available[n], nodeID)
}
}
}
m.rmut.RUnlock()
for f, ns := range available {
m.fq.SetAvailable(f, ns)
}
// Figure out if anything actually changed
m.gmut.RLock()
var updated bool
if len(newGlobal) != len(m.global) {
if highestMod > m.updateGlobal || len(newGlobal) != len(m.global) {
updated = true
} else {
for n, f0 := range newGlobal {
@@ -568,18 +713,35 @@ func (m *Model) recomputeGlobal() {
}
}
}
m.gmut.RUnlock()
if updated {
m.updateGlobal = time.Now().Unix()
m.gmut.Lock()
m.umut.Lock()
m.global = newGlobal
m.updateGlobal = time.Now().Unix()
m.umut.Unlock()
m.gmut.Unlock()
}
}
// Must be called with the write lock held.
func (m *Model) recomputeNeed() {
m.need = make(map[string]bool)
type addOrder struct {
n string
remote []Block
fm *fileMonitor
}
var toDelete []File
var toAdd []addOrder
m.gmut.RLock()
for n, gf := range m.global {
m.lmut.RLock()
lf, ok := m.local[n]
m.lmut.RUnlock()
if !ok || gf.NewerThan(lf) {
if gf.Flags&protocol.FlagInvalid != 0 {
// Never attempt to sync invalid files
@@ -594,17 +756,43 @@ func (m *Model) recomputeNeed() {
continue
}
if m.trace["need"] {
log.Println("NEED:", ok, lf, gf)
log.Printf("NEED: lf:%v gf:%v", lf, gf)
}
if gf.Flags&protocol.FlagDeleted != 0 {
toDelete = append(toDelete, gf)
} else {
local, remote := BlockDiff(lf.Blocks, gf.Blocks)
fm := fileMonitor{
name: n,
path: path.Clean(path.Join(m.dir, n)),
global: gf,
model: m,
localBlocks: local,
}
toAdd = append(toAdd, addOrder{n, remote, &fm})
}
m.need[n] = true
}
}
m.gmut.RUnlock()
for _, ao := range toAdd {
if !m.fq.Queued(ao.n) {
m.fq.Add(ao.n, ao.remote, ao.fm)
}
}
for _, gf := range toDelete {
m.dq <- gf
}
}
// Must be called with the read lock held.
func (m *Model) whoHas(name string) []string {
func (m *Model) WhoHas(name string) []string {
var remote []string
m.gmut.RLock()
m.rmut.RLock()
gf := m.global[name]
for node, files := range m.remote {
if file, ok := files[name]; ok && file.Equals(gf) {
@@ -612,41 +800,58 @@ func (m *Model) whoHas(name string) []string {
}
}
m.rmut.RUnlock()
m.gmut.RUnlock()
return remote
}
func (m *Model) deleteLoop() {
for file := range m.dq {
if m.trace["file"] {
log.Println("FILE: Delete", file.Name)
}
path := path.Clean(path.Join(m.dir, file.Name))
err := os.Remove(path)
if err != nil {
log.Printf("WARNING: %s: %v", file.Name, err)
}
m.updateLocal(file)
}
}
func fileFromFileInfo(f protocol.FileInfo) File {
var blocks []Block
var offset uint64
for _, b := range f.Blocks {
blocks = append(blocks, Block{
var blocks = make([]Block, len(f.Blocks))
var offset int64
for i, b := range f.Blocks {
blocks[i] = Block{
Offset: offset,
Length: b.Length,
Size: b.Size,
Hash: b.Hash,
})
offset += uint64(b.Length)
}
offset += int64(b.Size)
}
return File{
Name: f.Name,
Flags: f.Flags,
Modified: int64(f.Modified),
Modified: f.Modified,
Version: f.Version,
Blocks: blocks,
}
}
func fileInfoFromFile(f File) protocol.FileInfo {
var blocks []protocol.BlockInfo
for _, b := range f.Blocks {
blocks = append(blocks, protocol.BlockInfo{
Length: b.Length,
Hash: b.Hash,
})
var blocks = make([]protocol.BlockInfo, len(f.Blocks))
for i, b := range f.Blocks {
blocks[i] = protocol.BlockInfo{
Size: b.Size,
Hash: b.Hash,
}
}
return protocol.FileInfo{
Name: f.Name,
Flags: f.Flags,
Modified: int64(f.Modified),
Modified: f.Modified,
Version: f.Version,
Blocks: blocks,
}

View File

@@ -1,258 +0,0 @@
package model
/*
Locking
=======
These methods are never called from the outside so don't follow the locking
policy in model.go.
TODO(jb): Refactor this into smaller and cleaner pieces.
TODO(jb): Increase performance by taking apparent peer bandwidth into account.
*/
import (
"bytes"
"errors"
"fmt"
"io"
"log"
"os"
"path"
"sync"
"time"
"github.com/calmh/syncthing/buffers"
"github.com/calmh/syncthing/protocol"
)
func (m *Model) pullFile(name string) error {
m.RLock()
var localFile = m.local[name]
var globalFile = m.global[name]
var nodeIDs = m.whoHas(name)
m.RUnlock()
if len(nodeIDs) == 0 {
return fmt.Errorf("%s: no connected nodes with file available", name)
}
filename := path.Join(m.dir, name)
sdir := path.Dir(filename)
_, err := os.Stat(sdir)
if err != nil && os.IsNotExist(err) {
os.MkdirAll(sdir, 0777)
}
tmpFilename := tempName(filename, globalFile.Modified)
tmpFile, err := os.Create(tmpFilename)
if err != nil {
return err
}
contentChan := make(chan content, 32)
var applyDone sync.WaitGroup
applyDone.Add(1)
go func() {
applyContent(contentChan, tmpFile)
tmpFile.Close()
applyDone.Done()
}()
local, remote := BlockDiff(localFile.Blocks, globalFile.Blocks)
var fetchDone sync.WaitGroup
// One local copy routine
fetchDone.Add(1)
go func() {
for _, block := range local {
data, err := m.Request("<local>", name, block.Offset, block.Length, block.Hash)
if err != nil {
break
}
contentChan <- content{
offset: int64(block.Offset),
data: data,
}
}
fetchDone.Done()
}()
// N remote copy routines
var remoteBlocks = blockIterator{blocks: remote}
for i := 0; i < m.paralllelReqs; i++ {
curNode := nodeIDs[i%len(nodeIDs)]
fetchDone.Add(1)
go func(nodeID string) {
for {
block, ok := remoteBlocks.Next()
if !ok {
break
}
data, err := m.requestGlobal(nodeID, name, block.Offset, block.Length, block.Hash)
if err != nil {
break
}
contentChan <- content{
offset: int64(block.Offset),
data: data,
}
}
fetchDone.Done()
}(curNode)
}
fetchDone.Wait()
close(contentChan)
applyDone.Wait()
err = hashCheck(tmpFilename, globalFile.Blocks)
if err != nil {
return fmt.Errorf("%s: %s (deleting)", path.Base(name), err.Error())
}
err = os.Chtimes(tmpFilename, time.Unix(globalFile.Modified, 0), time.Unix(globalFile.Modified, 0))
if err != nil {
return err
}
err = os.Chmod(tmpFilename, os.FileMode(globalFile.Flags&0777))
if err != nil {
return err
}
err = os.Rename(tmpFilename, filename)
if err != nil {
return err
}
return nil
}
func (m *Model) puller() {
for {
time.Sleep(time.Second)
var ns []string
m.RLock()
for n := range m.need {
ns = append(ns, n)
}
m.RUnlock()
if len(ns) == 0 {
continue
}
var limiter = make(chan bool, m.parallellFiles)
var allDone sync.WaitGroup
for _, n := range ns {
limiter <- true
allDone.Add(1)
go func(n string) {
defer func() {
allDone.Done()
<-limiter
}()
m.RLock()
f, ok := m.global[n]
m.RUnlock()
if !ok {
return
}
var err error
if f.Flags&protocol.FlagDeleted == 0 {
if m.trace["file"] {
log.Printf("FILE: Pull %q", n)
}
err = m.pullFile(n)
} else {
if m.trace["file"] {
log.Printf("FILE: Remove %q", n)
}
// Cheerfully ignore errors here
_ = os.Remove(path.Join(m.dir, n))
}
if err == nil {
m.Lock()
m.updateLocal(f)
m.Unlock()
}
}(n)
}
allDone.Wait()
}
}
type content struct {
offset int64
data []byte
}
func applyContent(cc <-chan content, dst io.WriterAt) error {
var err error
for c := range cc {
_, err = dst.WriteAt(c.data, c.offset)
buffers.Put(c.data)
if err != nil {
return err
}
}
return nil
}
func hashCheck(name string, correct []Block) error {
rf, err := os.Open(name)
if err != nil {
return err
}
defer rf.Close()
current, err := Blocks(rf, BlockSize)
if err != nil {
return err
}
if len(current) != len(correct) {
return errors.New("incorrect number of blocks")
}
for i := range current {
if bytes.Compare(current[i].Hash, correct[i].Hash) != 0 {
return fmt.Errorf("hash mismatch: %x != %x", current[i], correct[i])
}
}
return nil
}
type blockIterator struct {
sync.Mutex
blocks []Block
}
func (i *blockIterator) Next() (b Block, ok bool) {
i.Lock()
defer i.Unlock()
if len(i.blocks) == 0 {
return
}
b, i.blocks = i.blocks[0], i.blocks[1:]
ok = true
return
}

View File

@@ -2,6 +2,7 @@ package model
import (
"bytes"
"fmt"
"os"
"reflect"
"testing"
@@ -17,7 +18,7 @@ func TestNewModel(t *testing.T) {
t.Fatalf("NewModel returned nil")
}
if len(m.need) > 0 {
if fs, _ := m.NeedFiles(); len(fs) > 0 {
t.Errorf("New model should have no Need")
}
@@ -31,19 +32,13 @@ var testDataExpected = map[string]File{
Name: "foo",
Flags: 0,
Modified: 0,
Blocks: []Block{{Offset: 0x0, Length: 0x7, Hash: []uint8{0xae, 0xc0, 0x70, 0x64, 0x5f, 0xe5, 0x3e, 0xe3, 0xb3, 0x76, 0x30, 0x59, 0x37, 0x61, 0x34, 0xf0, 0x58, 0xcc, 0x33, 0x72, 0x47, 0xc9, 0x78, 0xad, 0xd1, 0x78, 0xb6, 0xcc, 0xdf, 0xb0, 0x1, 0x9f}}},
Blocks: []Block{{Offset: 0x0, Size: 0x7, Hash: []uint8{0xae, 0xc0, 0x70, 0x64, 0x5f, 0xe5, 0x3e, 0xe3, 0xb3, 0x76, 0x30, 0x59, 0x37, 0x61, 0x34, 0xf0, 0x58, 0xcc, 0x33, 0x72, 0x47, 0xc9, 0x78, 0xad, 0xd1, 0x78, 0xb6, 0xcc, 0xdf, 0xb0, 0x1, 0x9f}}},
},
"bar": File{
Name: "bar",
Flags: 0,
Modified: 0,
Blocks: []Block{{Offset: 0x0, Length: 0xa, Hash: []uint8{0x2f, 0x72, 0xcc, 0x11, 0xa6, 0xfc, 0xd0, 0x27, 0x1e, 0xce, 0xf8, 0xc6, 0x10, 0x56, 0xee, 0x1e, 0xb1, 0x24, 0x3b, 0xe3, 0x80, 0x5b, 0xf9, 0xa9, 0xdf, 0x98, 0xf9, 0x2f, 0x76, 0x36, 0xb0, 0x5c}}},
},
"baz/quux": File{
Name: "baz/quux",
Flags: 0,
Modified: 0,
Blocks: []Block{{Offset: 0x0, Length: 0x9, Hash: []uint8{0xc1, 0x54, 0xd9, 0x4e, 0x94, 0xba, 0x72, 0x98, 0xa6, 0xad, 0xb0, 0x52, 0x3a, 0xfe, 0x34, 0xd1, 0xb6, 0xa5, 0x81, 0xd6, 0xb8, 0x93, 0xa7, 0x63, 0xd4, 0x5d, 0xdc, 0x5e, 0x20, 0x9d, 0xcb, 0x83}}},
Blocks: []Block{{Offset: 0x0, Size: 0xa, Hash: []uint8{0x2f, 0x72, 0xcc, 0x11, 0xa6, 0xfc, 0xd0, 0x27, 0x1e, 0xce, 0xf8, 0xc6, 0x10, 0x56, 0xee, 0x1e, 0xb1, 0x24, 0x3b, 0xe3, 0x80, 0x5b, 0xf9, 0xa9, 0xdf, 0x98, 0xf9, 0x2f, 0x76, 0x36, 0xb0, 0x5c}}},
},
}
@@ -62,7 +57,7 @@ func TestUpdateLocal(t *testing.T) {
fs, _ := m.Walk(false)
m.ReplaceLocal(fs)
if len(m.need) > 0 {
if fs, _ := m.NeedFiles(); len(fs) > 0 {
t.Fatalf("Model with only local data should have no need")
}
@@ -111,8 +106,8 @@ func TestRemoteUpdateExisting(t *testing.T) {
}
m.Index("42", []protocol.FileInfo{newFile})
if l := len(m.need); l != 1 {
t.Errorf("Model missing Need for one file (%d != 1)", l)
if fs, _ := m.NeedFiles(); len(fs) != 1 {
t.Errorf("Model missing Need for one file (%d != 1)", len(fs))
}
}
@@ -128,8 +123,8 @@ func TestRemoteAddNew(t *testing.T) {
}
m.Index("42", []protocol.FileInfo{newFile})
if l1, l2 := len(m.need), 1; l1 != l2 {
t.Errorf("Model len(m.need) incorrect (%d != %d)", l1, l2)
if fs, _ := m.NeedFiles(); len(fs) != 1 {
t.Errorf("Model len(m.need) incorrect (%d != 1)", len(fs))
}
}
@@ -146,8 +141,8 @@ func TestRemoteUpdateOld(t *testing.T) {
}
m.Index("42", []protocol.FileInfo{newFile})
if l1, l2 := len(m.need), 0; l1 != l2 {
t.Errorf("Model len(need) incorrect (%d != %d)", l1, l2)
if fs, _ := m.NeedFiles(); len(fs) != 0 {
t.Errorf("Model len(need) incorrect (%d != 0)", len(fs))
}
}
@@ -170,16 +165,16 @@ func TestRemoteIndexUpdate(t *testing.T) {
m.Index("42", []protocol.FileInfo{foo})
if _, ok := m.need["foo"]; !ok {
if fs, _ := m.NeedFiles(); fs[0].Name != "foo" {
t.Error("Model doesn't need 'foo'")
}
m.IndexUpdate("42", []protocol.FileInfo{bar})
if _, ok := m.need["foo"]; !ok {
if fs, _ := m.NeedFiles(); fs[0].Name != "foo" {
t.Error("Model doesn't need 'foo'")
}
if _, ok := m.need["bar"]; !ok {
if fs, _ := m.NeedFiles(); fs[1].Name != "bar" {
t.Error("Model doesn't need 'bar'")
}
}
@@ -297,8 +292,8 @@ func TestForgetNode(t *testing.T) {
if l1, l2 := len(m.global), len(fs); l1 != l2 {
t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2)
}
if l1, l2 := len(m.need), 0; l1 != l2 {
t.Errorf("Model len(need) incorrect (%d != %d)", l1, l2)
if fs, _ := m.NeedFiles(); len(fs) != 0 {
t.Errorf("Model len(need) incorrect (%d != 0)", len(fs))
}
newFile := protocol.FileInfo{
@@ -308,14 +303,21 @@ func TestForgetNode(t *testing.T) {
}
m.Index("42", []protocol.FileInfo{newFile})
newFile = protocol.FileInfo{
Name: "new file 2",
Modified: time.Now().Unix(),
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
}
m.Index("43", []protocol.FileInfo{newFile})
if l1, l2 := len(m.local), len(fs); l1 != l2 {
t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2)
}
if l1, l2 := len(m.global), len(fs)+1; l1 != l2 {
if l1, l2 := len(m.global), len(fs)+2; l1 != l2 {
t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2)
}
if l1, l2 := len(m.need), 1; l1 != l2 {
t.Errorf("Model len(need) incorrect (%d != %d)", l1, l2)
if fs, _ := m.NeedFiles(); len(fs) != 2 {
t.Errorf("Model len(need) incorrect (%d != 2)", len(fs))
}
m.Close("42", nil)
@@ -323,11 +325,12 @@ func TestForgetNode(t *testing.T) {
if l1, l2 := len(m.local), len(fs); l1 != l2 {
t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2)
}
if l1, l2 := len(m.global), len(fs); l1 != l2 {
if l1, l2 := len(m.global), len(fs)+1; l1 != l2 {
t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2)
}
if l1, l2 := len(m.need), 0; l1 != l2 {
t.Errorf("Model len(need) incorrect (%d != %d)", l1, l2)
if fs, _ := m.NeedFiles(); len(fs) != 1 {
t.Errorf("Model len(need) incorrect (%d != 1)", len(fs))
}
}
@@ -406,3 +409,113 @@ func TestIgnoreWithUnknownFlags(t *testing.T) {
t.Error("Model not should include", invalid)
}
}
func prepareModel(n int, m *Model) []protocol.FileInfo {
fs, _ := m.Walk(false)
m.ReplaceLocal(fs)
files := make([]protocol.FileInfo, n)
t := time.Now().Unix()
for i := 0; i < n; i++ {
files[i] = protocol.FileInfo{
Name: fmt.Sprintf("file%d", i),
Modified: t,
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
}
}
m.Index("42", files)
return files
}
func BenchmarkRecomputeGlobal10k(b *testing.B) {
m := NewModel("testdata")
prepareModel(10000, m)
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.recomputeGlobal()
}
}
func BenchmarkRecomputeNeed10K(b *testing.B) {
m := NewModel("testdata")
prepareModel(10000, m)
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.recomputeNeed()
}
}
func BenchmarkIndexUpdate10000(b *testing.B) {
m := NewModel("testdata")
files := prepareModel(10000, m)
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.IndexUpdate("42", files)
}
}
type FakeConnection struct {
id string
requestData []byte
}
func (FakeConnection) Close() error {
return nil
}
func (f FakeConnection) ID() string {
return string(f.id)
}
func (FakeConnection) Index([]protocol.FileInfo) {}
func (f FakeConnection) Request(name string, offset int64, size uint32, hash []byte) ([]byte, error) {
return f.requestData, nil
}
func (FakeConnection) Ping() bool {
return true
}
func (FakeConnection) Statistics() protocol.Statistics {
return protocol.Statistics{}
}
func BenchmarkRequest(b *testing.B) {
m := NewModel("testdata")
fs, _ := m.Walk(false)
m.ReplaceLocal(fs)
const n = 1000
files := make([]protocol.FileInfo, n)
t := time.Now().Unix()
for i := 0; i < n; i++ {
files[i] = protocol.FileInfo{
Name: fmt.Sprintf("file%d", i),
Modified: t,
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
}
}
fc := FakeConnection{
id: "42",
requestData: []byte("some data to return"),
}
m.AddConnection(fc, fc)
m.Index("42", files)
b.ResetTimer()
for i := 0; i < b.N; i++ {
data, err := m.requestGlobal("42", files[i%n].Name, 0, 32, nil)
if err != nil {
b.Error(err)
}
if data == nil {
b.Error("nil data")
}
}
}

View File

@@ -26,11 +26,16 @@ type File struct {
func (f File) Size() (bytes int) {
for _, b := range f.Blocks {
bytes += int(b.Length)
bytes += int(b.Size)
}
return
}
func (f File) String() string {
return fmt.Sprintf("File{Name:%q, Flags:0x%x, Modified:%d, Version:%d:, NumBlocks:%d}",
f.Name, f.Flags, f.Modified, f.Version, len(f.Blocks))
}
func (f File) Equals(o File) bool {
return f.Modified == o.Modified && f.Version == o.Version
}
@@ -49,16 +54,12 @@ func tempName(name string, modified int64) string {
return path.Join(tdir, tname)
}
func (m *Model) genWalker(res *[]File, ign map[string][]string) filepath.WalkFunc {
func (m *Model) loadIgnoreFiles(ign map[string][]string) filepath.WalkFunc {
return func(p string, info os.FileInfo, err error) error {
if err != nil {
return nil
}
if isTempName(p) {
return nil
}
rn, err := filepath.Rel(m.dir, p)
if err != nil {
return nil
@@ -75,42 +76,68 @@ func (m *Model) genWalker(res *[]File, ign map[string][]string) filepath.WalkFun
}
}
ign[pn] = patterns
}
return nil
}
}
func (m *Model) walkAndHashFiles(res *[]File, ign map[string][]string) filepath.WalkFunc {
return func(p string, info os.FileInfo, err error) error {
if err != nil {
if m.trace["file"] {
log.Printf("FILE: %q: %v", p, err)
}
return nil
}
if isTempName(p) {
return nil
}
rn, err := filepath.Rel(m.dir, p)
if err != nil {
return nil
}
if _, sn := path.Split(rn); sn == ".stignore" {
// We never sync the .stignore files
return nil
}
if ignoreFile(ign, rn) {
if m.trace["file"] {
log.Println("FILE: IGNORE:", rn)
}
return nil
}
if info.Mode()&os.ModeType == 0 {
fi, err := os.Stat(p)
if err != nil {
return nil
}
modified := fi.ModTime().Unix()
modified := info.ModTime().Unix()
m.RLock()
hf, ok := m.local[rn]
m.RUnlock()
m.lmut.RLock()
lf, ok := m.local[rn]
m.lmut.RUnlock()
if ok && hf.Modified == modified {
if nf := uint32(info.Mode()); nf != hf.Flags {
hf.Flags = nf
hf.Version++
if ok && lf.Modified == modified {
if nf := uint32(info.Mode()); nf != lf.Flags {
lf.Flags = nf
lf.Version++
}
*res = append(*res, hf)
*res = append(*res, lf)
} else {
m.Lock()
if m.shouldSuppressChange(rn) {
if m.trace["file"] {
log.Println("FILE: SUPPRESS:", rn, m.fileWasSuppressed[rn], time.Since(m.fileLastChanged[rn]))
}
if ok {
hf.Flags = protocol.FlagInvalid
hf.Version++
*res = append(*res, hf)
lf.Flags = protocol.FlagInvalid
lf.Version++
*res = append(*res, lf)
}
m.Unlock()
return nil
}
m.Unlock()
if m.trace["file"] {
log.Printf("FILE: Hash %q", p)
@@ -149,8 +176,11 @@ func (m *Model) genWalker(res *[]File, ign map[string][]string) filepath.WalkFun
// file system. Files are blockwise hashed.
func (m *Model) Walk(followSymlinks bool) (files []File, ignore map[string][]string) {
ignore = make(map[string][]string)
fn := m.genWalker(&files, ignore)
filepath.Walk(m.dir, fn)
hashFiles := m.walkAndHashFiles(&files, ignore)
filepath.Walk(m.dir, m.loadIgnoreFiles(ignore))
filepath.Walk(m.dir, hashFiles)
if followSymlinks {
d, err := os.Open(m.dir)
@@ -164,9 +194,11 @@ func (m *Model) Walk(followSymlinks bool) (files []File, ignore map[string][]str
return
}
for _, fi := range fis {
if fi.Mode()&os.ModeSymlink != 0 {
filepath.Walk(path.Join(m.dir, fi.Name())+"/", fn)
for _, info := range fis {
if info.Mode()&os.ModeSymlink != 0 {
dir := path.Join(m.dir, info.Name()) + "/"
filepath.Walk(dir, m.loadIgnoreFiles(ignore))
filepath.Walk(dir, hashFiles)
}
}
}
@@ -174,14 +206,6 @@ func (m *Model) Walk(followSymlinks bool) (files []File, ignore map[string][]str
return
}
// Walk returns the list of files found in the local repository by scanning the
// file system. Files are blockwise hashed. Patterns marked in .stignore files
// are removed from the results.
func (m *Model) FilteredWalk(followSymlinks bool) []File {
var files, ignored = m.Walk(followSymlinks)
return ignoreFilter(ignored, files)
}
func (m *Model) cleanTempFile(path string, info os.FileInfo, err error) error {
if err != nil {
return err
@@ -199,20 +223,16 @@ func (m *Model) cleanTempFiles() {
filepath.Walk(m.dir, m.cleanTempFile)
}
func ignoreFilter(patterns map[string][]string, files []File) (filtered []File) {
nextFile:
for _, f := range files {
first, last := path.Split(f.Name)
for prefix, pats := range patterns {
if len(prefix) == 0 || prefix == first || strings.HasPrefix(first, prefix+"/") {
for _, pattern := range pats {
if match, _ := path.Match(pattern, last); match {
continue nextFile
}
func ignoreFile(patterns map[string][]string, file string) bool {
first, last := path.Split(file)
for prefix, pats := range patterns {
if len(prefix) == 0 || prefix == first || strings.HasPrefix(first, prefix+"/") {
for _, pattern := range pats {
if match, _ := path.Match(pattern, last); match {
return true
}
}
}
filtered = append(filtered, f)
}
return filtered
return false
}

View File

@@ -13,7 +13,6 @@ var testdata = []struct {
hash string
}{
{"bar", 10, "2f72cc11a6fcd0271ecef8c61056ee1eb1243be3805bf9a9df98f92f7636b05c"},
{"baz/quux", 9, "c154d94e94ba7298a6adb0523afe34d1b6a581d6b893a763d45ddc5e209dcb83"},
{"foo", 7, "aec070645fe53ee3b3763059376134f058cc337247c978add178b6ccdfb0019f"},
}
@@ -50,53 +49,34 @@ func TestWalk(t *testing.T) {
}
}
func TestFilteredWalk(t *testing.T) {
m := NewModel("testdata")
files := m.FilteredWalk(false)
if len(files) != 2 {
t.Fatalf("Incorrect number of walked filtered files %d != 2", len(files))
}
if files[0].Name != "bar" {
t.Error("Incorrect first file", files[0])
}
if files[1].Name != "foo" {
t.Error("Incorrect second file", files[1])
}
}
func TestIgnore(t *testing.T) {
var patterns = map[string][]string{
"": {"t2"},
"foo": {"bar", "z*"},
"foo/baz": {"quux", ".*"},
}
var files = []File{
{Name: "foo/bar"},
{Name: "foo/quux"},
{Name: "foo/zuux"},
{Name: "foo/qzuux"},
{Name: "foo/baz/t1"},
{Name: "foo/baz/t2"},
{Name: "foo/baz/bar"},
{Name: "foo/baz/quuxa"},
{Name: "foo/baz/aquux"},
{Name: "foo/baz/.quux"},
{Name: "foo/baz/zquux"},
{Name: "foo/baz/quux"},
{Name: "foo/bazz/quux"},
}
var remaining = []File{
{Name: "foo/quux"},
{Name: "foo/qzuux"},
{Name: "foo/baz/t1"},
{Name: "foo/baz/quuxa"},
{Name: "foo/baz/aquux"},
{Name: "foo/bazz/quux"},
var tests = []struct {
f string
r bool
}{
{"foo/bar", true},
{"foo/quux", false},
{"foo/zuux", true},
{"foo/qzuux", false},
{"foo/baz/t1", false},
{"foo/baz/t2", true},
{"foo/baz/bar", true},
{"foo/baz/quuxa", false},
{"foo/baz/aquux", false},
{"foo/baz/.quux", true},
{"foo/baz/zquux", true},
{"foo/baz/quux", true},
{"foo/bazz/quux", false},
}
var filtered = ignoreFilter(patterns, files)
if !reflect.DeepEqual(filtered, remaining) {
t.Errorf("Filtering mismatch\n %v\n %v", remaining, filtered)
for i, tc := range tests {
if r := ignoreFile(patterns, tc.f); r != tc.r {
t.Errorf("Incorrect ignoreFile() #%d; E: %v, A: %v", i, tc.r, r)
}
}
}

View File

@@ -5,7 +5,7 @@ import "io"
type TestModel struct {
data []byte
name string
offset uint64
offset int64
size uint32
hash []byte
closed bool
@@ -17,7 +17,7 @@ func (t *TestModel) Index(nodeID string, files []FileInfo) {
func (t *TestModel) IndexUpdate(nodeID string, files []FileInfo) {
}
func (t *TestModel) Request(nodeID, name string, offset uint64, size uint32, hash []byte) ([]byte, error) {
func (t *TestModel) Request(nodeID, name string, offset int64, size uint32, hash []byte) ([]byte, error) {
t.name = name
t.offset = offset
t.size = size

View File

@@ -4,7 +4,7 @@ import "io"
type request struct {
name string
offset uint64
offset int64
size uint32
hash []byte
}
@@ -42,7 +42,7 @@ func (w *marshalWriter) writeIndex(idx []FileInfo) {
w.writeUint32(f.Version)
w.writeUint32(uint32(len(f.Blocks)))
for _, b := range f.Blocks {
w.writeUint32(b.Length)
w.writeUint32(b.Size)
w.writeBytes(b.Hash)
}
}
@@ -56,7 +56,7 @@ func WriteIndex(w io.Writer, idx []FileInfo) (int, error) {
func (w *marshalWriter) writeRequest(r request) {
w.writeString(r.name)
w.writeUint64(r.offset)
w.writeUint64(uint64(r.offset))
w.writeUint32(r.size)
w.writeBytes(r.hash)
}
@@ -82,7 +82,7 @@ func (r *marshalReader) readIndex() []FileInfo {
nblocks := r.readUint32()
blocks := make([]BlockInfo, nblocks)
for j := range blocks {
blocks[j].Length = r.readUint32()
blocks[j].Size = r.readUint32()
blocks[j].Hash = r.readBytes()
}
files[i].Blocks = blocks
@@ -100,7 +100,7 @@ func ReadIndex(r io.Reader) ([]FileInfo, error) {
func (r *marshalReader) readRequest() request {
var req request
req.name = r.readString()
req.offset = r.readUint64()
req.offset = int64(r.readUint64())
req.size = r.readUint32()
req.hash = r.readBytes()
return req

View File

@@ -46,7 +46,7 @@ func TestIndex(t *testing.T) {
}
func TestRequest(t *testing.T) {
f := func(name string, offset uint64, size uint32, hash []byte) bool {
f := func(name string, offset int64, size uint32, hash []byte) bool {
var buf = new(bytes.Buffer)
var req = request{name, offset, size, hash}
var wr = marshalWriter{w: buf}

View File

@@ -34,8 +34,8 @@ type FileInfo struct {
}
type BlockInfo struct {
Length uint32
Hash []byte
Size uint32
Hash []byte
}
type Model interface {
@@ -44,7 +44,7 @@ type Model interface {
// An index update was received from the peer node
IndexUpdate(nodeID string, files []FileInfo)
// A request was made by the peer node
Request(nodeID, name string, offset uint64, size uint32, hash []byte) ([]byte, error)
Request(nodeID, name string, offset int64, size uint32, hash []byte) ([]byte, error)
// The peer node closed the connection
Close(nodeID string, err error)
}
@@ -52,7 +52,7 @@ type Model interface {
type Connection struct {
sync.RWMutex
ID string
id string
receiver Model
reader io.Reader
mreader *marshalReader
@@ -89,13 +89,13 @@ func NewConnection(nodeID string, reader io.Reader, writer io.Writer, receiver M
}
c := Connection{
id: nodeID,
receiver: receiver,
reader: flrd,
mreader: &marshalReader{r: flrd},
writer: flwr,
mwriter: &marshalWriter{w: flwr},
awaiting: make(map[int]chan asyncResult),
ID: nodeID,
}
go c.readerLoop()
@@ -104,6 +104,10 @@ func NewConnection(nodeID string, reader io.Reader, writer io.Writer, receiver M
return &c
}
func (c *Connection) ID() string {
return c.id
}
// Index writes the list of file information to the connected peer node
func (c *Connection) Index(idx []FileInfo) {
c.Lock()
@@ -137,16 +141,16 @@ func (c *Connection) Index(idx []FileInfo) {
c.Unlock()
if err != nil {
c.Close(err)
c.close(err)
return
} else if c.mwriter.err != nil {
c.Close(c.mwriter.err)
c.close(c.mwriter.err)
return
}
}
// Request returns the bytes for the specified block after fetching them from the connected peer.
func (c *Connection) Request(name string, offset uint64, size uint32, hash []byte) ([]byte, error) {
func (c *Connection) Request(name string, offset int64, size uint32, hash []byte) ([]byte, error) {
c.Lock()
if c.closed {
c.Unlock()
@@ -158,13 +162,13 @@ func (c *Connection) Request(name string, offset uint64, size uint32, hash []byt
c.mwriter.writeRequest(request{name, offset, size, hash})
if c.mwriter.err != nil {
c.Unlock()
c.Close(c.mwriter.err)
c.close(c.mwriter.err)
return nil, c.mwriter.err
}
err := c.flush()
if err != nil {
c.Unlock()
c.Close(err)
c.close(err)
return nil, err
}
c.nextId = (c.nextId + 1) & 0xfff
@@ -177,7 +181,7 @@ func (c *Connection) Request(name string, offset uint64, size uint32, hash []byt
return res.val, res.err
}
func (c *Connection) Ping() bool {
func (c *Connection) ping() bool {
c.Lock()
if c.closed {
c.Unlock()
@@ -189,11 +193,11 @@ func (c *Connection) Ping() bool {
err := c.flush()
if err != nil {
c.Unlock()
c.Close(err)
c.close(err)
return false
} else if c.mwriter.err != nil {
c.Unlock()
c.Close(c.mwriter.err)
c.close(c.mwriter.err)
return false
}
c.nextId = (c.nextId + 1) & 0xfff
@@ -203,9 +207,6 @@ func (c *Connection) Ping() bool {
return ok && res.err == nil
}
func (c *Connection) Stop() {
}
type flusher interface {
Flush() error
}
@@ -217,7 +218,7 @@ func (c *Connection) flush() error {
return nil
}
func (c *Connection) Close(err error) {
func (c *Connection) close(err error) {
c.Lock()
if c.closed {
c.Unlock()
@@ -230,7 +231,7 @@ func (c *Connection) Close(err error) {
c.awaiting = nil
c.Unlock()
c.receiver.Close(c.ID, err)
c.receiver.Close(c.id, err)
}
func (c *Connection) isClosed() bool {
@@ -244,11 +245,11 @@ loop:
for {
hdr := c.mreader.readHeader()
if c.mreader.err != nil {
c.Close(c.mreader.err)
c.close(c.mreader.err)
break loop
}
if hdr.version != 0 {
c.Close(fmt.Errorf("Protocol error: %s: unknown message version %#x", c.ID, hdr.version))
c.close(fmt.Errorf("Protocol error: %s: unknown message version %#x", c.ID, hdr.version))
break loop
}
@@ -256,10 +257,10 @@ loop:
case messageTypeIndex:
files := c.mreader.readIndex()
if c.mreader.err != nil {
c.Close(c.mreader.err)
c.close(c.mreader.err)
break loop
} else {
c.receiver.Index(c.ID, files)
c.receiver.Index(c.id, files)
}
c.Lock()
c.hasRecvdIndex = true
@@ -268,16 +269,16 @@ loop:
case messageTypeIndexUpdate:
files := c.mreader.readIndex()
if c.mreader.err != nil {
c.Close(c.mreader.err)
c.close(c.mreader.err)
break loop
} else {
c.receiver.IndexUpdate(c.ID, files)
c.receiver.IndexUpdate(c.id, files)
}
case messageTypeRequest:
req := c.mreader.readRequest()
if c.mreader.err != nil {
c.Close(c.mreader.err)
c.close(c.mreader.err)
break loop
}
go c.processRequest(hdr.msgID, req)
@@ -286,7 +287,7 @@ loop:
data := c.mreader.readResponse()
if c.mreader.err != nil {
c.Close(c.mreader.err)
c.close(c.mreader.err)
break loop
} else {
c.Lock()
@@ -306,10 +307,10 @@ loop:
err := c.flush()
c.Unlock()
if err != nil {
c.Close(err)
c.close(err)
break loop
} else if c.mwriter.err != nil {
c.Close(c.mwriter.err)
c.close(c.mwriter.err)
break loop
}
@@ -328,26 +329,27 @@ loop:
}
default:
c.Close(fmt.Errorf("Protocol error: %s: unknown message type %#x", c.ID, hdr.msgType))
c.close(fmt.Errorf("Protocol error: %s: unknown message type %#x", c.ID, hdr.msgType))
break loop
}
}
}
func (c *Connection) processRequest(msgID int, req request) {
data, _ := c.receiver.Request(c.ID, req.name, req.offset, req.size, req.hash)
data, _ := c.receiver.Request(c.id, req.name, req.offset, req.size, req.hash)
c.Lock()
c.mwriter.writeUint32(encodeHeader(header{0, msgID, messageTypeResponse}))
c.mwriter.writeResponse(data)
err := c.flush()
err := c.mwriter.err
if err == nil {
err = c.flush()
}
c.Unlock()
buffers.Put(data)
if err != nil {
c.Close(err)
} else if c.mwriter.err != nil {
c.Close(c.mwriter.err)
c.close(err)
}
}
@@ -362,15 +364,15 @@ func (c *Connection) pingerLoop() {
if ready {
go func() {
rc <- c.Ping()
rc <- c.ping()
}()
select {
case ok := <-rc:
if !ok {
c.Close(fmt.Errorf("Ping failure"))
c.close(fmt.Errorf("Ping failure"))
}
case <-time.After(pingTimeout):
c.Close(fmt.Errorf("Ping timeout"))
c.close(fmt.Errorf("Ping timeout"))
}
}
}

View File

@@ -46,10 +46,10 @@ func TestPing(t *testing.T) {
c0 := NewConnection("c0", ar, bw, nil)
c1 := NewConnection("c1", br, aw, nil)
if ok := c0.Ping(); !ok {
if ok := c0.ping(); !ok {
t.Error("c0 ping failed")
}
if ok := c1.Ping(); !ok {
if ok := c1.ping(); !ok {
t.Error("c1 ping failed")
}
}
@@ -70,7 +70,7 @@ func TestPingErr(t *testing.T) {
c0 := NewConnection("c0", ar, ebw, m0)
NewConnection("c1", br, eaw, m1)
res := c0.Ping()
res := c0.ping()
if (i < 4 || j < 4) && res {
t.Errorf("Unexpected ping success; i=%d, j=%d", i, j)
} else if (i >= 8 && j >= 8) && !res {
@@ -190,7 +190,7 @@ func TestClose(t *testing.T) {
c0 := NewConnection("c0", ar, bw, m0)
NewConnection("c1", br, aw, m1)
c0.Close(nil)
c0.close(nil)
ok := c0.isClosed()
if !ok {
@@ -199,7 +199,7 @@ func TestClose(t *testing.T) {
// None of these should panic, some should return an error
ok = c0.Ping()
ok = c0.ping()
if ok {
t.Error("Ping should not return true")
}