mirror of
https://github.com/syncthing/syncthing.git
synced 2026-01-02 19:09:11 -05:00
Compare commits
17 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
832c0ffad0 | ||
|
|
cb33f27f23 | ||
|
|
92dee7c082 | ||
|
|
b9af45bc6b | ||
|
|
a18f6c6d90 | ||
|
|
6e11e3cda9 | ||
|
|
2935aebe53 | ||
|
|
71f78f0d62 | ||
|
|
3e1194e5ff | ||
|
|
6d64992e64 | ||
|
|
211180108e | ||
|
|
17e78d6f7e | ||
|
|
1ef86379fb | ||
|
|
884a7d6a1b | ||
|
|
334961fe10 | ||
|
|
2cfb24892f | ||
|
|
d4fe1400d2 |
36
build.sh
36
build.sh
@@ -3,21 +3,27 @@
|
||||
version=$(git describe --always)
|
||||
buildDir=dist
|
||||
|
||||
if [[ -z $1 ]] ; then
|
||||
if [[ $1 == "-f" ]] ; then
|
||||
fast=yes
|
||||
shift
|
||||
fi
|
||||
|
||||
if [[ $fast != yes ]] ; then
|
||||
go get -d
|
||||
go test ./...
|
||||
fi
|
||||
|
||||
if [[ -z $1 ]] ; then
|
||||
go build -ldflags "-X main.Version $version" \
|
||||
&& nrsc syncthing gui
|
||||
elif [[ $1 == "tar" ]] ; then
|
||||
go test ./...
|
||||
go build -ldflags "-X main.Version $version" \
|
||||
&& nrsc syncthing gui \
|
||||
&& mkdir syncthing-dist \
|
||||
&& cp syncthing README.md LICENSE syncthing-dist \
|
||||
&& tar zcvf syncthing-dist.tar.gz syncthing-dist \
|
||||
&& rm -rf syncthing-dist
|
||||
else
|
||||
go test ./... || exit 1
|
||||
|
||||
elif [[ $1 == "all" ]] ; then
|
||||
rm -rf "$buildDir"
|
||||
mkdir -p "$buildDir" || exit 1
|
||||
|
||||
@@ -38,6 +44,26 @@ else
|
||||
done
|
||||
done
|
||||
|
||||
for goos in linux ; do
|
||||
for goarm in 5 6 7 ; do
|
||||
for goarch in arm ; do
|
||||
echo "$goos-${goarch}v$goarm"
|
||||
export GOARM="$goarm"
|
||||
export GOOS="$goos"
|
||||
export GOARCH="$goarch"
|
||||
export name="syncthing-$goos-${goarch}v$goarm"
|
||||
go build -ldflags "-X main.Version $version" \
|
||||
&& nrsc syncthing gui \
|
||||
&& mkdir -p "$name" \
|
||||
&& cp syncthing "$buildDir/$name" \
|
||||
&& cp README.md LICENSE "$name" \
|
||||
&& mv syncthing "$name" \
|
||||
&& tar zcf "$buildDir/$name.tar.gz" "$name" \
|
||||
&& rm -r "$name"
|
||||
done
|
||||
done
|
||||
done
|
||||
|
||||
for goos in windows ; do
|
||||
for goarch in amd64 386 ; do
|
||||
echo "$goos-$goarch"
|
||||
|
||||
28
gui.go
28
gui.go
@@ -8,6 +8,8 @@ import (
|
||||
"mime"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
"bitbucket.org/tebeka/nrsc"
|
||||
"github.com/calmh/syncthing/model"
|
||||
@@ -22,6 +24,7 @@ func startGUI(addr string, m *model.Model) {
|
||||
router.Get("/rest/connections", restGetConnections)
|
||||
router.Get("/rest/config", restGetConfig)
|
||||
router.Get("/rest/need", restGetNeed)
|
||||
router.Get("/rest/system", restGetSystem)
|
||||
|
||||
go func() {
|
||||
mr := martini.New()
|
||||
@@ -29,8 +32,12 @@ func startGUI(addr string, m *model.Model) {
|
||||
mr.Use(martini.Recovery())
|
||||
mr.Action(router.Handle)
|
||||
mr.Map(m)
|
||||
http.ListenAndServe(addr, mr)
|
||||
err := http.ListenAndServe(addr, mr)
|
||||
if err != nil {
|
||||
warnln("GUI not possible:", err)
|
||||
}
|
||||
}()
|
||||
|
||||
}
|
||||
|
||||
func getRoot(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -97,6 +104,25 @@ func restGetNeed(m *model.Model, w http.ResponseWriter) {
|
||||
json.NewEncoder(w).Encode(gfs)
|
||||
}
|
||||
|
||||
var cpuUsagePercent float64
|
||||
var cpuUsageLock sync.RWMutex
|
||||
|
||||
func restGetSystem(w http.ResponseWriter) {
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
|
||||
res := make(map[string]interface{})
|
||||
res["goroutines"] = runtime.NumGoroutine()
|
||||
res["alloc"] = m.Alloc
|
||||
res["sys"] = m.Sys
|
||||
cpuUsageLock.RLock()
|
||||
res["cpuPercent"] = cpuUsagePercent
|
||||
cpuUsageLock.RUnlock()
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(res)
|
||||
}
|
||||
|
||||
func nrscStatic(path string) interface{} {
|
||||
if err := nrsc.Initialize(); err != nil {
|
||||
panic("Unable to initialize nrsc: " + err.Error())
|
||||
|
||||
60
gui/app.js
60
gui/app.js
@@ -1,18 +1,39 @@
|
||||
var syncthing = angular.module('syncthing', []);
|
||||
|
||||
syncthing.controller('SyncthingCtrl', function ($scope, $http) {
|
||||
var prevDate = 0;
|
||||
var modelGetOK = true;
|
||||
|
||||
function modelGetSucceeded() {
|
||||
if (!modelGetOK) {
|
||||
$('#networkError').modal('hide');
|
||||
modelGetOK = true;
|
||||
}
|
||||
}
|
||||
|
||||
function modelGetFailed() {
|
||||
if (modelGetOK) {
|
||||
$('#networkError').modal({backdrop: 'static', keyboard: false});
|
||||
modelGetOK = false;
|
||||
}
|
||||
}
|
||||
|
||||
$http.get("/rest/version").success(function (data) {
|
||||
$scope.version = data;
|
||||
});
|
||||
$http.get("/rest/config").success(function (data) {
|
||||
$scope.config = data;
|
||||
});
|
||||
|
||||
var prevDate = 0;
|
||||
|
||||
$scope.refresh = function () {
|
||||
$http.get("/rest/system").success(function (data) {
|
||||
$scope.system = data;
|
||||
});
|
||||
$http.get("/rest/model").success(function (data) {
|
||||
$scope.model = data;
|
||||
modelGetSucceeded();
|
||||
}).error(function () {
|
||||
modelGetFailed();
|
||||
});
|
||||
$http.get("/rest/connections").success(function (data) {
|
||||
var now = Date.now();
|
||||
@@ -21,8 +42,8 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
|
||||
|
||||
for (var id in data) {
|
||||
try {
|
||||
data[id].inbps = 8 * (data[id].InBytesTotal - $scope.connections[id].InBytesTotal) / td;
|
||||
data[id].outbps = 8 * (data[id].OutBytesTotal - $scope.connections[id].OutBytesTotal) / td;
|
||||
data[id].inbps = Math.max(0, 8 * (data[id].InBytesTotal - $scope.connections[id].InBytesTotal) / td);
|
||||
data[id].outbps = Math.max(0, 8 * (data[id].OutBytesTotal - $scope.connections[id].OutBytesTotal) / td);
|
||||
} catch (e) {
|
||||
data[id].inbps = 0;
|
||||
data[id].outbps = 0;
|
||||
@@ -53,16 +74,19 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
|
||||
setInterval($scope.refresh, 10000);
|
||||
});
|
||||
|
||||
function decimals(num) {
|
||||
if (num > 100) {
|
||||
return 0;
|
||||
}
|
||||
if (num > 10) {
|
||||
return 1;
|
||||
}
|
||||
return 2;
|
||||
function decimals(val, num) {
|
||||
if (val === 0) { return 0; }
|
||||
var digits = Math.floor(Math.log(Math.abs(val))/Math.log(10));
|
||||
var decimals = Math.max(0, num - digits);
|
||||
return decimals;
|
||||
}
|
||||
|
||||
syncthing.filter('natural', function() {
|
||||
return function(input, valid) {
|
||||
return input.toFixed(decimals(input, valid));
|
||||
}
|
||||
});
|
||||
|
||||
syncthing.filter('binary', function() {
|
||||
return function(input) {
|
||||
if (input === undefined) {
|
||||
@@ -70,15 +94,15 @@ syncthing.filter('binary', function() {
|
||||
}
|
||||
if (input > 1024 * 1024 * 1024) {
|
||||
input /= 1024 * 1024 * 1024;
|
||||
return input.toFixed(decimals(input)) + ' Gi';
|
||||
return input.toFixed(decimals(input, 2)) + ' Gi';
|
||||
}
|
||||
if (input > 1024 * 1024) {
|
||||
input /= 1024 * 1024;
|
||||
return input.toFixed(decimals(input)) + ' Mi';
|
||||
return input.toFixed(decimals(input, 2)) + ' Mi';
|
||||
}
|
||||
if (input > 1024) {
|
||||
input /= 1024;
|
||||
return input.toFixed(decimals(input)) + ' Ki';
|
||||
return input.toFixed(decimals(input, 2)) + ' Ki';
|
||||
}
|
||||
return Math.round(input) + ' ';
|
||||
}
|
||||
@@ -91,15 +115,15 @@ syncthing.filter('metric', function() {
|
||||
}
|
||||
if (input > 1000 * 1000 * 1000) {
|
||||
input /= 1000 * 1000 * 1000;
|
||||
return input.toFixed(decimals(input)) + ' G';
|
||||
return input.toFixed(decimals(input, 2)) + ' G';
|
||||
}
|
||||
if (input > 1000 * 1000) {
|
||||
input /= 1000 * 1000;
|
||||
return input.toFixed(decimals(input)) + ' M';
|
||||
return input.toFixed(decimals(input, 2)) + ' M';
|
||||
}
|
||||
if (input > 1000) {
|
||||
input /= 1000;
|
||||
return input.toFixed(decimals(input)) + ' k';
|
||||
return input.toFixed(decimals(input, 2)) + ' k';
|
||||
}
|
||||
return Math.round(input) + ' ';
|
||||
}
|
||||
|
||||
173
gui/index.html
173
gui/index.html
@@ -11,9 +11,20 @@
|
||||
<title>syncthing</title>
|
||||
<link href="bootstrap/css/bootstrap.css" rel="stylesheet">
|
||||
<style type="text/css">
|
||||
body {
|
||||
html, body {
|
||||
height: 100%;
|
||||
}
|
||||
#wrap{
|
||||
padding-top: 20px;
|
||||
padding-bottom: 20px;
|
||||
min-height: 100%;
|
||||
height: auto;
|
||||
margin: 0 auto -50px;
|
||||
padding: 20px 0 50px 0;
|
||||
}
|
||||
#footer {
|
||||
height: 50px;
|
||||
padding: 12px;
|
||||
background-color: #f5f5f5;
|
||||
}
|
||||
|
||||
.header {
|
||||
@@ -28,75 +39,119 @@ body {
|
||||
</head>
|
||||
|
||||
<body ng-controller="SyncthingCtrl">
|
||||
<div class="container">
|
||||
<div class="header">
|
||||
<h3 class="text-muted">syncthing <small>|</small> <small>{{version}}</small></h3>
|
||||
</div>
|
||||
<div id="wrap">
|
||||
<div class="container">
|
||||
<div class="header">
|
||||
<h3 class="text-muted">syncthing</h3>
|
||||
</div>
|
||||
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<h2>Synchronization</h2>
|
||||
<div class="progress">
|
||||
<div class="progress-bar" role="progressbar" aria-valuenow="60" aria-valuemin="0" aria-valuemax="100"
|
||||
ng-class="{'progress-bar-success': model.needBytes === 0, 'progress-bar-info': model.needBytes !== 0}"
|
||||
style="width: {{100 * model.inSyncBytes / model.globalBytes | number:2}}%;">
|
||||
{{100 * model.inSyncBytes / model.globalBytes | number:0}}%
|
||||
<div class="row">
|
||||
<div class="col-md-12">
|
||||
<div class="panel" ng-class="{'panel-success': model.needBytes === 0, 'panel-primary': model.needBytes !== 0}">
|
||||
<div class="panel-heading"><h3 class="panel-title">Synchronization</h3></div>
|
||||
<div class="panel-body">
|
||||
<div class="progress">
|
||||
<div class="progress-bar" role="progressbar" aria-valuenow="60" aria-valuemin="0" aria-valuemax="100"
|
||||
ng-class="{'progress-bar-success': model.needBytes === 0, 'progress-bar-info': model.needBytes !== 0}"
|
||||
style="width: {{100 * model.inSyncBytes / model.globalBytes | number:2}}%;">
|
||||
{{100 * model.inSyncBytes / model.globalBytes | alwaysNumber | number:0}}%
|
||||
</div>
|
||||
</div>
|
||||
<p ng-show="model.needBytes > 0">Need {{model.needFiles | alwaysNumber}} files, {{model.needBytes | binary}}B</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<p ng-show="model.needBytes > 0">Need {{model.needFiles | alwaysNumber}} files, {{model.needBytes | binary}}B</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="row">
|
||||
<div class="col-md-6">
|
||||
<h1>Repository Status</h1>
|
||||
<div class="row">
|
||||
<div class="col-md-6">
|
||||
<div class="panel panel-info">
|
||||
<div class="panel-heading"><h3 class="panel-title">Repository</h3></div>
|
||||
<div class="panel-body">
|
||||
<p>Cluster contains {{model.globalFiles | alwaysNumber}} files, {{model.globalBytes | binary}}B
|
||||
<span class="text-muted">(+{{model.globalDeleted | alwaysNumber}} delete records)</span></p>
|
||||
|
||||
<p>Cluster contains {{model.globalFiles | alwaysNumber}} files, {{model.globalBytes | binary}}B
|
||||
<span class="text-muted">(+{{model.globalDeleted | alwaysNumber}} delete records)</span></p>
|
||||
<p>Local repository has {{model.localFiles | alwaysNumber}} files, {{model.localBytes | binary}}B
|
||||
<span class="text-muted">(+{{model.localDeleted | alwaysNumber}} delete records)</span></p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<p>Local repository has {{model.localFiles | alwaysNumber}} files, {{model.localBytes | binary}}B
|
||||
<span class="text-muted">(+{{model.localDeleted | alwaysNumber}} delete records)</span></p>
|
||||
<div class="panel panel-info">
|
||||
<div class="panel-heading"><h3 class="panel-title">System</h3></div>
|
||||
<div class="panel-body">
|
||||
<p>{{system.sys | binary}}B RAM allocated, {{system.alloc | binary}}B in use</p>
|
||||
<p>{{system.cpuPercent | alwaysNumber | natural:1}}% CPU, {{system.goroutines | alwaysNumber}} goroutines</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div ng-show="model.needFiles > 0">
|
||||
<h2>Files to Synchronize</h2>
|
||||
<table class="table table-condensed table-striped">
|
||||
<tr ng-repeat="file in need track by $index">
|
||||
<td><abbr title="{{file.Name}}">{{file.ShortName}}</abbr></td>
|
||||
<td class="text-right">{{file.Size | binary}}B</td>
|
||||
</tr>
|
||||
</table>
|
||||
<div ng-show="model.needFiles > 0">
|
||||
<h2>Files to Synchronize</h2>
|
||||
<table class="table table-condensed table-striped">
|
||||
<tr ng-repeat="file in need track by $index">
|
||||
<td><abbr title="{{file.Name}}">{{file.ShortName}}</abbr></td>
|
||||
<td class="text-right">{{file.Size | binary}}B</td>
|
||||
</tr>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
<div class="col-md-6">
|
||||
<div class="panel panel-info">
|
||||
<div class="panel-heading"><h3 class="panel-title">Cluster</h3></div>
|
||||
<table class="table table-condensed">
|
||||
<tbody>
|
||||
<tr ng-repeat="(node, address) in config.nodes" ng-class="{'text-primary': !!connections[node]}">
|
||||
<td><abbr class="text-monospace" title="{{node}}">{{node | short}}</abbr></td>
|
||||
<td>
|
||||
<span ng-show="!!connections[node]">
|
||||
<span class="glyphicon glyphicon-link"></span>
|
||||
{{connections[node].Address}}
|
||||
</span>
|
||||
<span ng-hide="!!connections[node]">
|
||||
<span class="glyphicon glyphicon-cog"></span>
|
||||
{{address}}
|
||||
</span>
|
||||
</td>
|
||||
<td class="text-right">
|
||||
<abbr title="{{connections[node].InBytesTotal | binary}}B">{{connections[node].inbps | metric}}b/s</abbr>
|
||||
<span class="text-muted glyphicon glyphicon-cloud-download"></span>
|
||||
</td>
|
||||
<td class="text-right">
|
||||
<abbr title="{{connections[node].OutBytesTotal | binary}}B">{{connections[node].outbps | metric}}b/s</abbr>
|
||||
<span class="text-muted glyphicon glyphicon-cloud-upload"></span>
|
||||
</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="col-md-6">
|
||||
<h1>Cluster Status</h1>
|
||||
<table class="table table-condensed">
|
||||
<tbody>
|
||||
<tr ng-repeat="(node, address) in config.nodes" ng-class="{'text-primary': !!connections[node]}">
|
||||
<td><abbr class="text-monospace" title="{{node}}">{{node | short}}</abbr></td>
|
||||
<td>
|
||||
<span ng-show="!!connections[node]">
|
||||
<span class="glyphicon glyphicon-link"></span>
|
||||
{{connections[node].Address}}
|
||||
</span>
|
||||
<span ng-hide="!!connections[node]">
|
||||
<span class="glyphicon glyphicon-cog"></span>
|
||||
{{address}}
|
||||
</span>
|
||||
</td>
|
||||
<td class="text-right">
|
||||
<abbr title="{{connections[node].InBytesTotal | binary}}B">{{connections[node].inbps | metric}}b/s</abbr>
|
||||
<span class="text-muted glyphicon glyphicon-cloud-download"></span>
|
||||
</td>
|
||||
<td class="text-right">
|
||||
<abbr title="{{connections[node].OutBytesTotal | binary}}B">{{connections[node].outbps | metric}}b/s</abbr>
|
||||
<span class="text-muted glyphicon glyphicon-cloud-upload"></span>
|
||||
</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
<div id="footer" class="text-center">
|
||||
syncthing {{version}}
|
||||
| <a href="https://github.com/calmh/syncthing/releases">Latest Release</a>
|
||||
| <a href="https://github.com/calmh/syncthing/wiki">Documentation</a>
|
||||
| <a href="https://github.com/calmh/syncthing/issues">Bugs</a>
|
||||
| <a href="https://github.com/calmh/syncthing">Source Code</a>
|
||||
</div>
|
||||
|
||||
<div id="networkError" class="modal fade">
|
||||
<div class="modal-dialog">
|
||||
<div class="modal-content">
|
||||
<div class="modal-header alert alert-danger">
|
||||
<h4 class="modal-title">
|
||||
<span class="glyphicon glyphicon-exclamation-sign"></span>
|
||||
Connection Error
|
||||
</h4>
|
||||
</div>
|
||||
<div class="modal-body">
|
||||
<p>
|
||||
Syncthing seems to be down, or there is a problem with your Internet connection.
|
||||
Retrying…
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
|
||||
<script src="angular.min.js"></script>
|
||||
|
||||
31
gui_unix.go
Normal file
31
gui_unix.go
Normal file
@@ -0,0 +1,31 @@
|
||||
//+build !windows
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
go trackCPUUsage()
|
||||
}
|
||||
|
||||
func trackCPUUsage() {
|
||||
var prevUsage int64
|
||||
var prevTime = time.Now().UnixNano()
|
||||
var rusage syscall.Rusage
|
||||
for {
|
||||
time.Sleep(10 * time.Second)
|
||||
syscall.Getrusage(syscall.RUSAGE_SELF, &rusage)
|
||||
curTime := time.Now().UnixNano()
|
||||
timeDiff := curTime - prevTime
|
||||
curUsage := rusage.Utime.Nano() + rusage.Stime.Nano()
|
||||
usageDiff := curUsage - prevUsage
|
||||
cpuUsageLock.Lock()
|
||||
cpuUsagePercent = 100 * float64(usageDiff) / float64(timeDiff)
|
||||
cpuUsageLock.Unlock()
|
||||
prevTime = curTime
|
||||
prevUsage = curUsage
|
||||
}
|
||||
}
|
||||
75
main.go
75
main.go
@@ -3,12 +3,15 @@ package main
|
||||
import (
|
||||
"compress/gzip"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -21,17 +24,19 @@ import (
|
||||
)
|
||||
|
||||
type Options struct {
|
||||
ConfDir string `short:"c" long:"cfg" description:"Configuration directory" default:"~/.syncthing" value-name:"DIR"`
|
||||
Listen string `short:"l" long:"listen" description:"Listen address" default:":22000" value-name:"ADDR"`
|
||||
ReadOnly bool `short:"r" long:"ro" description:"Repository is read only"`
|
||||
Rehash bool `long:"rehash" description:"Ignore cache and rehash all files in repository"`
|
||||
NoDelete bool `long:"no-delete" description:"Never delete files"`
|
||||
NoSymlinks bool `long:"no-symlinks" description:"Don't follow first level symlinks in the repo"`
|
||||
NoStats bool `long:"no-stats" description:"Don't print model and connection statistics"`
|
||||
GUIAddr string `long:"gui" description:"GUI listen address" default:"" value-name:"ADDR"`
|
||||
Discovery DiscoveryOptions `group:"Discovery Options"`
|
||||
Advanced AdvancedOptions `group:"Advanced Options"`
|
||||
Debug DebugOptions `group:"Debugging Options"`
|
||||
ConfDir string `short:"c" long:"cfg" description:"Configuration directory" default:"~/.syncthing" value-name:"DIR"`
|
||||
Listen string `short:"l" long:"listen" description:"Listen address" default:":22000" value-name:"ADDR"`
|
||||
ReadOnly bool `short:"r" long:"ro" description:"Repository is read only"`
|
||||
Rehash bool `long:"rehash" description:"Ignore cache and rehash all files in repository"`
|
||||
NoDelete bool `long:"no-delete" description:"Never delete files"`
|
||||
NoSymlinks bool `long:"no-symlinks" description:"Don't follow first level symlinks in the repo"`
|
||||
NoStats bool `long:"no-stats" description:"Don't print model and connection statistics"`
|
||||
NoGUI bool `long:"no-gui" description:"Don't start GUI"`
|
||||
GUIAddr string `long:"gui-addr" description:"GUI listen address" default:"127.0.0.1:8080" value-name:"ADDR"`
|
||||
ShowVersion bool `short:"v" long:"version" description:"Show version"`
|
||||
Discovery DiscoveryOptions `group:"Discovery Options"`
|
||||
Advanced AdvancedOptions `group:"Advanced Options"`
|
||||
Debug DebugOptions `group:"Debugging Options"`
|
||||
}
|
||||
|
||||
type DebugOptions struct {
|
||||
@@ -70,8 +75,22 @@ var (
|
||||
func main() {
|
||||
_, err := flags.Parse(&opts)
|
||||
if err != nil {
|
||||
fatalln(err)
|
||||
}
|
||||
|
||||
if opts.ShowVersion {
|
||||
fmt.Println(Version)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
if len(os.Getenv("GOGC")) == 0 {
|
||||
debug.SetGCPercent(25)
|
||||
}
|
||||
|
||||
if len(os.Getenv("GOMAXPROCS")) == 0 {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
}
|
||||
|
||||
if len(opts.Debug.TraceModel) > 0 || opts.Debug.LogSource {
|
||||
logger = log.New(os.Stderr, "", log.Lshortfile|log.Ldate|log.Ltime|log.Lmicroseconds)
|
||||
}
|
||||
@@ -105,11 +124,13 @@ func main() {
|
||||
// connections.
|
||||
|
||||
cfg := &tls.Config{
|
||||
ClientAuth: tls.RequestClientCert,
|
||||
ServerName: "syncthing",
|
||||
NextProtos: []string{"bep/1.0"},
|
||||
InsecureSkipVerify: true,
|
||||
Certificates: []tls.Certificate{cert},
|
||||
Certificates: []tls.Certificate{cert},
|
||||
NextProtos: []string{"bep/1.0"},
|
||||
ServerName: myID,
|
||||
ClientAuth: tls.RequestClientCert,
|
||||
SessionTicketsDisabled: true,
|
||||
InsecureSkipVerify: true,
|
||||
MinVersion: tls.VersionTLS12,
|
||||
}
|
||||
|
||||
// Load the configuration file, if it exists.
|
||||
@@ -139,8 +160,18 @@ func main() {
|
||||
}
|
||||
|
||||
// GUI
|
||||
if opts.GUIAddr != "" {
|
||||
startGUI(opts.GUIAddr, m)
|
||||
if !opts.NoGUI && opts.GUIAddr != "" {
|
||||
host, port, err := net.SplitHostPort(opts.GUIAddr)
|
||||
if err != nil {
|
||||
warnf("Cannot start GUI on %q: %v", opts.GUIAddr, err)
|
||||
} else {
|
||||
if len(host) > 0 {
|
||||
infof("Starting web GUI on http://%s", opts.GUIAddr)
|
||||
} else {
|
||||
infof("Starting web GUI on port %s", port)
|
||||
}
|
||||
startGUI(opts.GUIAddr, m)
|
||||
}
|
||||
}
|
||||
|
||||
// Walk the repository and update the local model before establishing any
|
||||
@@ -261,7 +292,8 @@ listen:
|
||||
|
||||
for nodeID := range nodeAddrs {
|
||||
if nodeID == remoteID {
|
||||
m.AddConnection(conn, remoteID)
|
||||
protoConn := protocol.NewConnection(remoteID, conn, conn, m)
|
||||
m.AddConnection(conn, protoConn)
|
||||
continue listen
|
||||
}
|
||||
}
|
||||
@@ -330,7 +362,8 @@ func connect(myID string, addr string, nodeAddrs map[string][]string, m *model.M
|
||||
continue
|
||||
}
|
||||
|
||||
m.AddConnection(conn, remoteID)
|
||||
protoConn := protocol.NewConnection(remoteID, conn, conn, m)
|
||||
m.AddConnection(conn, protoConn)
|
||||
continue nextNode
|
||||
}
|
||||
}
|
||||
@@ -340,7 +373,7 @@ func connect(myID string, addr string, nodeAddrs map[string][]string, m *model.M
|
||||
}
|
||||
|
||||
func updateLocalModel(m *model.Model) {
|
||||
files := m.FilteredWalk(!opts.NoSymlinks)
|
||||
files, _ := m.Walk(!opts.NoSymlinks)
|
||||
m.ReplaceLocal(files)
|
||||
saveIndex(m)
|
||||
}
|
||||
|
||||
159
model/model.go
159
model/model.go
@@ -31,12 +31,12 @@ type Model struct {
|
||||
sync.RWMutex
|
||||
dir string
|
||||
|
||||
global map[string]File // the latest version of each file as it exists in the cluster
|
||||
local map[string]File // the files we currently have locally on disk
|
||||
remote map[string]map[string]File
|
||||
need map[string]bool // the files we need to update
|
||||
nodes map[string]*protocol.Connection
|
||||
rawConn map[string]io.ReadWriteCloser
|
||||
global map[string]File // the latest version of each file as it exists in the cluster
|
||||
local map[string]File // the files we currently have locally on disk
|
||||
remote map[string]map[string]File
|
||||
need map[string]bool // the files we need to update
|
||||
protoConn map[string]Connection
|
||||
rawConn map[string]io.Closer
|
||||
|
||||
updatedLocal int64 // timestamp of last update to local
|
||||
updateGlobal int64 // timestamp of last update to remote
|
||||
@@ -55,9 +55,14 @@ type Model struct {
|
||||
fileWasSuppressed map[string]int
|
||||
}
|
||||
|
||||
const (
|
||||
FlagDeleted = 1 << 12
|
||||
type Connection interface {
|
||||
ID() string
|
||||
Index([]protocol.FileInfo)
|
||||
Request(name string, offset uint64, size uint32, hash []byte) ([]byte, error)
|
||||
Statistics() protocol.Statistics
|
||||
}
|
||||
|
||||
const (
|
||||
idxBcastHoldtime = 15 * time.Second // Wait at least this long after the last index modification
|
||||
idxBcastMaxDelay = 120 * time.Second // Unless we've already waited this long
|
||||
|
||||
@@ -65,7 +70,10 @@ const (
|
||||
maxFileHoldTimeS = 600 // Always allow file changes at least this often
|
||||
)
|
||||
|
||||
var ErrNoSuchFile = errors.New("no such file")
|
||||
var (
|
||||
ErrNoSuchFile = errors.New("no such file")
|
||||
ErrInvalid = errors.New("file is invalid")
|
||||
)
|
||||
|
||||
// NewModel creates and starts a new model. The model starts in read-only mode,
|
||||
// where it sends index information to connected peers and responds to requests
|
||||
@@ -77,8 +85,8 @@ func NewModel(dir string) *Model {
|
||||
local: make(map[string]File),
|
||||
remote: make(map[string]map[string]File),
|
||||
need: make(map[string]bool),
|
||||
nodes: make(map[string]*protocol.Connection),
|
||||
rawConn: make(map[string]io.ReadWriteCloser),
|
||||
protoConn: make(map[string]Connection),
|
||||
rawConn: make(map[string]io.Closer),
|
||||
lastIdxBcast: time.Now(),
|
||||
trace: make(map[string]bool),
|
||||
fileLastChanged: make(map[string]time.Time),
|
||||
@@ -140,7 +148,7 @@ func (m *Model) ConnectionStats() map[string]ConnectionInfo {
|
||||
defer m.RUnlock()
|
||||
|
||||
var res = make(map[string]ConnectionInfo)
|
||||
for node, conn := range m.nodes {
|
||||
for node, conn := range m.protoConn {
|
||||
ci := ConnectionInfo{
|
||||
Statistics: conn.Statistics(),
|
||||
}
|
||||
@@ -159,7 +167,7 @@ func (m *Model) GlobalSize() (files, deleted, bytes int) {
|
||||
defer m.RUnlock()
|
||||
|
||||
for _, f := range m.global {
|
||||
if f.Flags&FlagDeleted == 0 {
|
||||
if f.Flags&protocol.FlagDeleted == 0 {
|
||||
files++
|
||||
bytes += f.Size()
|
||||
} else {
|
||||
@@ -176,7 +184,7 @@ func (m *Model) LocalSize() (files, deleted, bytes int) {
|
||||
defer m.RUnlock()
|
||||
|
||||
for _, f := range m.local {
|
||||
if f.Flags&FlagDeleted == 0 {
|
||||
if f.Flags&protocol.FlagDeleted == 0 {
|
||||
files++
|
||||
bytes += f.Size()
|
||||
} else {
|
||||
@@ -193,7 +201,7 @@ func (m *Model) InSyncSize() (files, bytes int) {
|
||||
defer m.RUnlock()
|
||||
|
||||
for n, f := range m.local {
|
||||
if gf, ok := m.global[n]; ok && f.Modified == gf.Modified {
|
||||
if gf, ok := m.global[n]; ok && f.Equals(gf) {
|
||||
files++
|
||||
bytes += f.Size()
|
||||
}
|
||||
@@ -224,10 +232,11 @@ func (m *Model) Index(nodeID string, fs []protocol.FileInfo) {
|
||||
log.Printf("NET IDX(in): %s: %d files", nodeID, len(fs))
|
||||
}
|
||||
|
||||
m.remote[nodeID] = make(map[string]File)
|
||||
repo := make(map[string]File)
|
||||
for _, f := range fs {
|
||||
m.remote[nodeID][f.Name] = fileFromFileInfo(f)
|
||||
m.indexUpdate(repo, f)
|
||||
}
|
||||
m.remote[nodeID] = repo
|
||||
|
||||
m.recomputeGlobal()
|
||||
m.recomputeNeed()
|
||||
@@ -245,21 +254,35 @@ func (m *Model) IndexUpdate(nodeID string, fs []protocol.FileInfo) {
|
||||
|
||||
repo, ok := m.remote[nodeID]
|
||||
if !ok {
|
||||
log.Printf("WARNING: Index update from node %s that does not have an index", nodeID)
|
||||
return
|
||||
}
|
||||
|
||||
for _, f := range fs {
|
||||
if f.Flags&FlagDeleted != 0 && !m.delete {
|
||||
// Files marked as deleted do not even enter the model
|
||||
continue
|
||||
}
|
||||
repo[f.Name] = fileFromFileInfo(f)
|
||||
m.indexUpdate(repo, f)
|
||||
}
|
||||
|
||||
m.recomputeGlobal()
|
||||
m.recomputeNeed()
|
||||
}
|
||||
|
||||
func (m *Model) indexUpdate(repo map[string]File, f protocol.FileInfo) {
|
||||
if m.trace["idx"] {
|
||||
var flagComment string
|
||||
if f.Flags&protocol.FlagDeleted != 0 {
|
||||
flagComment = " (deleted)"
|
||||
}
|
||||
log.Printf("IDX(in): %q m=%d f=%o%s v=%d (%d blocks)", f.Name, f.Modified, f.Flags, flagComment, f.Version, len(f.Blocks))
|
||||
}
|
||||
|
||||
if extraFlags := f.Flags &^ (protocol.FlagInvalid | protocol.FlagDeleted | 0xfff); extraFlags != 0 {
|
||||
log.Printf("WARNING: IDX(in): Unknown flags 0x%x in index record %+v", extraFlags, f)
|
||||
return
|
||||
}
|
||||
|
||||
repo[f.Name] = fileFromFileInfo(f)
|
||||
}
|
||||
|
||||
// Close removes the peer from the model and closes the underlyign connection if possible.
|
||||
// Implements the protocol.Model interface.
|
||||
func (m *Model) Close(node string, err error) {
|
||||
@@ -272,7 +295,7 @@ func (m *Model) Close(node string, err error) {
|
||||
}
|
||||
|
||||
delete(m.remote, node)
|
||||
delete(m.nodes, node)
|
||||
delete(m.protoConn, node)
|
||||
delete(m.rawConn, node)
|
||||
|
||||
m.recomputeGlobal()
|
||||
@@ -284,13 +307,16 @@ func (m *Model) Close(node string, err error) {
|
||||
func (m *Model) Request(nodeID, name string, offset uint64, size uint32, hash []byte) ([]byte, error) {
|
||||
// Verify that the requested file exists in the local and global model.
|
||||
m.RLock()
|
||||
_, localOk := m.local[name]
|
||||
lf, localOk := m.local[name]
|
||||
_, globalOk := m.global[name]
|
||||
m.RUnlock()
|
||||
if !localOk || !globalOk {
|
||||
log.Printf("SECURITY (nonexistent file) REQ(in): %s: %q o=%d s=%d h=%x", nodeID, name, offset, size, hash)
|
||||
return nil, ErrNoSuchFile
|
||||
}
|
||||
if lf.Flags&protocol.FlagInvalid != 0 {
|
||||
return nil, ErrInvalid
|
||||
}
|
||||
|
||||
if m.trace["net"] && nodeID != "<local>" {
|
||||
log.Printf("NET REQ(in): %s: %q o=%d s=%d h=%x", nodeID, name, offset, size, hash)
|
||||
@@ -322,7 +348,7 @@ func (m *Model) ReplaceLocal(fs []File) {
|
||||
|
||||
for _, f := range fs {
|
||||
newLocal[f.Name] = f
|
||||
if ef := m.local[f.Name]; ef.Modified != f.Modified {
|
||||
if ef := m.local[f.Name]; !ef.Equals(f) {
|
||||
updated = true
|
||||
}
|
||||
}
|
||||
@@ -364,7 +390,7 @@ func (m *Model) SeedLocal(fs []protocol.FileInfo) {
|
||||
func (m *Model) ConnectedTo(nodeID string) bool {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
_, ok := m.nodes[nodeID]
|
||||
_, ok := m.protoConn[nodeID]
|
||||
return ok
|
||||
}
|
||||
|
||||
@@ -383,12 +409,11 @@ func (m *Model) RepoID() string {
|
||||
// AddConnection adds a new peer connection to the model. An initial index will
|
||||
// be sent to the connected peer, thereafter index updates whenever the local
|
||||
// repository changes.
|
||||
func (m *Model) AddConnection(conn io.ReadWriteCloser, nodeID string) {
|
||||
node := protocol.NewConnection(nodeID, conn, conn, m)
|
||||
|
||||
func (m *Model) AddConnection(rawConn io.Closer, protoConn Connection) {
|
||||
nodeID := protoConn.ID()
|
||||
m.Lock()
|
||||
m.nodes[nodeID] = node
|
||||
m.rawConn[nodeID] = conn
|
||||
m.protoConn[nodeID] = protoConn
|
||||
m.rawConn[nodeID] = rawConn
|
||||
m.Unlock()
|
||||
|
||||
m.RLock()
|
||||
@@ -396,7 +421,7 @@ func (m *Model) AddConnection(conn io.ReadWriteCloser, nodeID string) {
|
||||
m.RUnlock()
|
||||
|
||||
go func() {
|
||||
node.Index(idx)
|
||||
protoConn.Index(idx)
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -430,10 +455,10 @@ func (m *Model) protocolIndex() []protocol.FileInfo {
|
||||
mf := fileInfoFromFile(f)
|
||||
if m.trace["idx"] {
|
||||
var flagComment string
|
||||
if mf.Flags&FlagDeleted != 0 {
|
||||
if mf.Flags&protocol.FlagDeleted != 0 {
|
||||
flagComment = " (deleted)"
|
||||
}
|
||||
log.Printf("IDX: %q m=%d f=%o%s (%d blocks)", mf.Name, mf.Modified, mf.Flags, flagComment, len(mf.Blocks))
|
||||
log.Printf("IDX(out): %q m=%d f=%o%s v=%d (%d blocks)", mf.Name, mf.Modified, mf.Flags, flagComment, mf.Version, len(mf.Blocks))
|
||||
}
|
||||
index = append(index, mf)
|
||||
}
|
||||
@@ -442,7 +467,7 @@ func (m *Model) protocolIndex() []protocol.FileInfo {
|
||||
|
||||
func (m *Model) requestGlobal(nodeID, name string, offset uint64, size uint32, hash []byte) ([]byte, error) {
|
||||
m.RLock()
|
||||
nc, ok := m.nodes[nodeID]
|
||||
nc, ok := m.protoConn[nodeID]
|
||||
m.RUnlock()
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("requestGlobal: no such node: %s", nodeID)
|
||||
@@ -466,10 +491,10 @@ func (m *Model) broadcastIndexLoop() {
|
||||
if bcastRequested && (holdtimeExceeded || maxDelayExceeded) {
|
||||
m.Lock()
|
||||
var indexWg sync.WaitGroup
|
||||
indexWg.Add(len(m.nodes))
|
||||
indexWg.Add(len(m.protoConn))
|
||||
idx := m.protocolIndex()
|
||||
m.lastIdxBcast = time.Now()
|
||||
for _, node := range m.nodes {
|
||||
for _, node := range m.protoConn {
|
||||
node := node
|
||||
if m.trace["net"] {
|
||||
log.Printf("NET IDX(out/loop): %s: %d files", node.ID, len(idx))
|
||||
@@ -496,10 +521,10 @@ func (m *Model) markDeletedLocals(newLocal map[string]File) bool {
|
||||
var updated bool
|
||||
for n, f := range m.local {
|
||||
if _, ok := newLocal[n]; !ok {
|
||||
if gf := m.global[n]; gf.Modified <= f.Modified {
|
||||
if f.Flags&FlagDeleted == 0 {
|
||||
f.Flags = FlagDeleted
|
||||
f.Modified = f.Modified + 1
|
||||
if gf := m.global[n]; !gf.NewerThan(f) {
|
||||
if f.Flags&protocol.FlagDeleted == 0 {
|
||||
f.Flags = protocol.FlagDeleted
|
||||
f.Version++
|
||||
f.Blocks = nil
|
||||
updated = true
|
||||
}
|
||||
@@ -511,7 +536,7 @@ func (m *Model) markDeletedLocals(newLocal map[string]File) bool {
|
||||
}
|
||||
|
||||
func (m *Model) updateLocal(f File) {
|
||||
if ef, ok := m.local[f.Name]; !ok || ef.Modified != f.Modified {
|
||||
if ef, ok := m.local[f.Name]; !ok || !ef.Equals(f) {
|
||||
m.local[f.Name] = f
|
||||
m.recomputeGlobal()
|
||||
m.recomputeNeed()
|
||||
@@ -528,10 +553,14 @@ func (m *Model) recomputeGlobal() {
|
||||
newGlobal[n] = f
|
||||
}
|
||||
|
||||
var highestMod int64
|
||||
for _, fs := range m.remote {
|
||||
for n, f := range fs {
|
||||
if cf, ok := newGlobal[n]; !ok || cf.Modified < f.Modified {
|
||||
newGlobal[n] = f
|
||||
for n, nf := range fs {
|
||||
if lf, ok := newGlobal[n]; !ok || nf.NewerThan(lf) {
|
||||
newGlobal[n] = nf
|
||||
if nf.Modified > highestMod {
|
||||
highestMod = nf.Modified
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -539,11 +568,11 @@ func (m *Model) recomputeGlobal() {
|
||||
// Figure out if anything actually changed
|
||||
|
||||
var updated bool
|
||||
if len(newGlobal) != len(m.global) {
|
||||
if highestMod > m.updateGlobal || len(newGlobal) != len(m.global) {
|
||||
updated = true
|
||||
} else {
|
||||
for n, f0 := range newGlobal {
|
||||
if f1, ok := m.global[n]; !ok || f0.Modified != f1.Modified {
|
||||
if f1, ok := m.global[n]; !ok || !f0.Equals(f1) {
|
||||
updated = true
|
||||
break
|
||||
}
|
||||
@@ -559,19 +588,23 @@ func (m *Model) recomputeGlobal() {
|
||||
// Must be called with the write lock held.
|
||||
func (m *Model) recomputeNeed() {
|
||||
m.need = make(map[string]bool)
|
||||
for n, f := range m.global {
|
||||
hf, ok := m.local[n]
|
||||
if !ok || f.Modified > hf.Modified {
|
||||
if f.Flags&FlagDeleted != 0 && !m.delete {
|
||||
for n, gf := range m.global {
|
||||
lf, ok := m.local[n]
|
||||
if !ok || gf.NewerThan(lf) {
|
||||
if gf.Flags&protocol.FlagInvalid != 0 {
|
||||
// Never attempt to sync invalid files
|
||||
continue
|
||||
}
|
||||
if gf.Flags&protocol.FlagDeleted != 0 && !m.delete {
|
||||
// Don't want to delete files, so forget this need
|
||||
continue
|
||||
}
|
||||
if f.Flags&FlagDeleted != 0 && !ok {
|
||||
if gf.Flags&protocol.FlagDeleted != 0 && !ok {
|
||||
// Don't have the file, so don't need to delete it
|
||||
continue
|
||||
}
|
||||
if m.trace["need"] {
|
||||
log.Println("NEED:", ok, hf, f)
|
||||
log.Println("NEED:", ok, lf, gf)
|
||||
}
|
||||
m.need[n] = true
|
||||
}
|
||||
@@ -584,7 +617,7 @@ func (m *Model) whoHas(name string) []string {
|
||||
|
||||
gf := m.global[name]
|
||||
for node, files := range m.remote {
|
||||
if file, ok := files[name]; ok && file.Modified == gf.Modified {
|
||||
if file, ok := files[name]; ok && file.Equals(gf) {
|
||||
remote = append(remote, node)
|
||||
}
|
||||
}
|
||||
@@ -593,36 +626,38 @@ func (m *Model) whoHas(name string) []string {
|
||||
}
|
||||
|
||||
func fileFromFileInfo(f protocol.FileInfo) File {
|
||||
var blocks []Block
|
||||
var blocks = make([]Block, len(f.Blocks))
|
||||
var offset uint64
|
||||
for _, b := range f.Blocks {
|
||||
blocks = append(blocks, Block{
|
||||
for i, b := range f.Blocks {
|
||||
blocks[i] = Block{
|
||||
Offset: offset,
|
||||
Length: b.Length,
|
||||
Hash: b.Hash,
|
||||
})
|
||||
}
|
||||
offset += uint64(b.Length)
|
||||
}
|
||||
return File{
|
||||
Name: f.Name,
|
||||
Flags: f.Flags,
|
||||
Modified: int64(f.Modified),
|
||||
Version: f.Version,
|
||||
Blocks: blocks,
|
||||
}
|
||||
}
|
||||
|
||||
func fileInfoFromFile(f File) protocol.FileInfo {
|
||||
var blocks []protocol.BlockInfo
|
||||
for _, b := range f.Blocks {
|
||||
blocks = append(blocks, protocol.BlockInfo{
|
||||
var blocks = make([]protocol.BlockInfo, len(f.Blocks))
|
||||
for i, b := range f.Blocks {
|
||||
blocks[i] = protocol.BlockInfo{
|
||||
Length: b.Length,
|
||||
Hash: b.Hash,
|
||||
})
|
||||
}
|
||||
}
|
||||
return protocol.FileInfo{
|
||||
Name: f.Name,
|
||||
Flags: f.Flags,
|
||||
Modified: int64(f.Modified),
|
||||
Version: f.Version,
|
||||
Blocks: blocks,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/calmh/syncthing/buffers"
|
||||
"github.com/calmh/syncthing/protocol"
|
||||
)
|
||||
|
||||
func (m *Model) pullFile(name string) error {
|
||||
@@ -171,7 +172,7 @@ func (m *Model) puller() {
|
||||
}
|
||||
|
||||
var err error
|
||||
if f.Flags&FlagDeleted == 0 {
|
||||
if f.Flags&protocol.FlagDeleted == 0 {
|
||||
if m.trace["file"] {
|
||||
log.Printf("FILE: Pull %q", n)
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package model
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
@@ -39,12 +40,6 @@ var testDataExpected = map[string]File{
|
||||
Modified: 0,
|
||||
Blocks: []Block{{Offset: 0x0, Length: 0xa, Hash: []uint8{0x2f, 0x72, 0xcc, 0x11, 0xa6, 0xfc, 0xd0, 0x27, 0x1e, 0xce, 0xf8, 0xc6, 0x10, 0x56, 0xee, 0x1e, 0xb1, 0x24, 0x3b, 0xe3, 0x80, 0x5b, 0xf9, 0xa9, 0xdf, 0x98, 0xf9, 0x2f, 0x76, 0x36, 0xb0, 0x5c}}},
|
||||
},
|
||||
"baz/quux": File{
|
||||
Name: "baz/quux",
|
||||
Flags: 0,
|
||||
Modified: 0,
|
||||
Blocks: []Block{{Offset: 0x0, Length: 0x9, Hash: []uint8{0xc1, 0x54, 0xd9, 0x4e, 0x94, 0xba, 0x72, 0x98, 0xa6, 0xad, 0xb0, 0x52, 0x3a, 0xfe, 0x34, 0xd1, 0xb6, 0xa5, 0x81, 0xd6, 0xb8, 0x93, 0xa7, 0x63, 0xd4, 0x5d, 0xdc, 0x5e, 0x20, 0x9d, 0xcb, 0x83}}},
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
@@ -228,9 +223,12 @@ func TestDelete(t *testing.T) {
|
||||
if len(m.local["a new file"].Blocks) != 0 {
|
||||
t.Error("Unexpected non-zero blocks for deleted file in local")
|
||||
}
|
||||
if ft := m.local["a new file"].Modified; ft != ot+1 {
|
||||
if ft := m.local["a new file"].Modified; ft != ot {
|
||||
t.Errorf("Unexpected time %d != %d for deleted file in local", ft, ot+1)
|
||||
}
|
||||
if fv := m.local["a new file"].Version; fv != 1 {
|
||||
t.Errorf("Unexpected version %d != 1 for deleted file in local", fv)
|
||||
}
|
||||
|
||||
if m.global["a new file"].Flags&(1<<12) == 0 {
|
||||
t.Error("Unexpected deleted flag = 0 in global table")
|
||||
@@ -238,8 +236,11 @@ func TestDelete(t *testing.T) {
|
||||
if len(m.global["a new file"].Blocks) != 0 {
|
||||
t.Error("Unexpected non-zero blocks for deleted file in global")
|
||||
}
|
||||
if ft := m.local["a new file"].Modified; ft != ot+1 {
|
||||
t.Errorf("Unexpected time %d != %d for deleted file in local", ft, ot+1)
|
||||
if ft := m.global["a new file"].Modified; ft != ot {
|
||||
t.Errorf("Unexpected time %d != %d for deleted file in global", ft, ot+1)
|
||||
}
|
||||
if fv := m.local["a new file"].Version; fv != 1 {
|
||||
t.Errorf("Unexpected version %d != 1 for deleted file in global", fv)
|
||||
}
|
||||
|
||||
// Another update should change nothing
|
||||
@@ -259,8 +260,11 @@ func TestDelete(t *testing.T) {
|
||||
if len(m.local["a new file"].Blocks) != 0 {
|
||||
t.Error("Unexpected non-zero blocks for deleted file in local")
|
||||
}
|
||||
if ft := m.local["a new file"].Modified; ft != ot+1 {
|
||||
t.Errorf("Unexpected time %d != %d for deleted file in local", ft, ot+1)
|
||||
if ft := m.local["a new file"].Modified; ft != ot {
|
||||
t.Errorf("Unexpected time %d != %d for deleted file in local", ft, ot)
|
||||
}
|
||||
if fv := m.local["a new file"].Version; fv != 1 {
|
||||
t.Errorf("Unexpected version %d != 1 for deleted file in local", fv)
|
||||
}
|
||||
|
||||
if m.global["a new file"].Flags&(1<<12) == 0 {
|
||||
@@ -269,8 +273,11 @@ func TestDelete(t *testing.T) {
|
||||
if len(m.global["a new file"].Blocks) != 0 {
|
||||
t.Error("Unexpected non-zero blocks for deleted file in global")
|
||||
}
|
||||
if ft := m.local["a new file"].Modified; ft != ot+1 {
|
||||
t.Errorf("Unexpected time %d != %d for deleted file in local", ft, ot+1)
|
||||
if ft := m.global["a new file"].Modified; ft != ot {
|
||||
t.Errorf("Unexpected time %d != %d for deleted file in global", ft, ot)
|
||||
}
|
||||
if fv := m.local["a new file"].Version; fv != 1 {
|
||||
t.Errorf("Unexpected version %d != 1 for deleted file in global", fv)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -365,3 +372,142 @@ func TestSuppression(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIgnoreWithUnknownFlags(t *testing.T) {
|
||||
m := NewModel("testdata")
|
||||
fs, _ := m.Walk(false)
|
||||
m.ReplaceLocal(fs)
|
||||
|
||||
valid := protocol.FileInfo{
|
||||
Name: "valid",
|
||||
Modified: time.Now().Unix(),
|
||||
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
|
||||
Flags: protocol.FlagDeleted | 0755,
|
||||
}
|
||||
|
||||
invalid := protocol.FileInfo{
|
||||
Name: "invalid",
|
||||
Modified: time.Now().Unix(),
|
||||
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
|
||||
Flags: 1<<27 | protocol.FlagDeleted | 0755,
|
||||
}
|
||||
|
||||
m.Index("42", []protocol.FileInfo{valid, invalid})
|
||||
|
||||
if _, ok := m.global[valid.Name]; !ok {
|
||||
t.Error("Model should include", valid)
|
||||
}
|
||||
if _, ok := m.global[invalid.Name]; ok {
|
||||
t.Error("Model not should include", invalid)
|
||||
}
|
||||
}
|
||||
|
||||
func prepareModel(n int, m *Model) []protocol.FileInfo {
|
||||
fs, _ := m.Walk(false)
|
||||
m.ReplaceLocal(fs)
|
||||
|
||||
files := make([]protocol.FileInfo, n)
|
||||
t := time.Now().Unix()
|
||||
for i := 0; i < n; i++ {
|
||||
files[i] = protocol.FileInfo{
|
||||
Name: fmt.Sprintf("file%d", i),
|
||||
Modified: t,
|
||||
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
|
||||
}
|
||||
}
|
||||
|
||||
m.Index("42", files)
|
||||
return files
|
||||
}
|
||||
|
||||
func BenchmarkRecomputeGlobal10k(b *testing.B) {
|
||||
m := NewModel("testdata")
|
||||
prepareModel(10000, m)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
m.recomputeGlobal()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkRecomputeNeed10K(b *testing.B) {
|
||||
m := NewModel("testdata")
|
||||
prepareModel(10000, m)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
m.recomputeNeed()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkIndexUpdate10000(b *testing.B) {
|
||||
m := NewModel("testdata")
|
||||
files := prepareModel(10000, m)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
m.IndexUpdate("42", files)
|
||||
}
|
||||
}
|
||||
|
||||
type FakeConnection struct {
|
||||
id string
|
||||
requestData []byte
|
||||
}
|
||||
|
||||
func (FakeConnection) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f FakeConnection) ID() string {
|
||||
return string(f.id)
|
||||
}
|
||||
|
||||
func (FakeConnection) Index([]protocol.FileInfo) {}
|
||||
|
||||
func (f FakeConnection) Request(name string, offset uint64, size uint32, hash []byte) ([]byte, error) {
|
||||
return f.requestData, nil
|
||||
}
|
||||
|
||||
func (FakeConnection) Ping() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (FakeConnection) Statistics() protocol.Statistics {
|
||||
return protocol.Statistics{}
|
||||
}
|
||||
|
||||
func BenchmarkRequest(b *testing.B) {
|
||||
m := NewModel("testdata")
|
||||
fs, _ := m.Walk(false)
|
||||
m.ReplaceLocal(fs)
|
||||
|
||||
const n = 1000
|
||||
files := make([]protocol.FileInfo, n)
|
||||
t := time.Now().Unix()
|
||||
for i := 0; i < n; i++ {
|
||||
files[i] = protocol.FileInfo{
|
||||
Name: fmt.Sprintf("file%d", i),
|
||||
Modified: t,
|
||||
Blocks: []protocol.BlockInfo{{100, []byte("some hash bytes")}},
|
||||
}
|
||||
}
|
||||
|
||||
fc := FakeConnection{
|
||||
id: "42",
|
||||
requestData: []byte("some data to return"),
|
||||
}
|
||||
m.AddConnection(fc, fc)
|
||||
m.Index("42", files)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
data, err := m.requestGlobal("42", files[i%n].Name, 0, 32, nil)
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
if data == nil {
|
||||
b.Error("nil data")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
101
model/walk.go
101
model/walk.go
@@ -10,6 +10,8 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/calmh/syncthing/protocol"
|
||||
)
|
||||
|
||||
const BlockSize = 128 * 1024
|
||||
@@ -18,6 +20,7 @@ type File struct {
|
||||
Name string
|
||||
Flags uint32
|
||||
Modified int64
|
||||
Version uint32
|
||||
Blocks []Block
|
||||
}
|
||||
|
||||
@@ -28,6 +31,14 @@ func (f File) Size() (bytes int) {
|
||||
return
|
||||
}
|
||||
|
||||
func (f File) Equals(o File) bool {
|
||||
return f.Modified == o.Modified && f.Version == o.Version
|
||||
}
|
||||
|
||||
func (f File) NewerThan(o File) bool {
|
||||
return f.Modified > o.Modified || (f.Modified == o.Modified && f.Version > o.Version)
|
||||
}
|
||||
|
||||
func isTempName(name string) bool {
|
||||
return strings.HasPrefix(path.Base(name), ".syncthing.")
|
||||
}
|
||||
@@ -38,16 +49,12 @@ func tempName(name string, modified int64) string {
|
||||
return path.Join(tdir, tname)
|
||||
}
|
||||
|
||||
func (m *Model) genWalker(res *[]File, ign map[string][]string) filepath.WalkFunc {
|
||||
func (m *Model) loadIgnoreFiles(ign map[string][]string) filepath.WalkFunc {
|
||||
return func(p string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if isTempName(p) {
|
||||
return nil
|
||||
}
|
||||
|
||||
rn, err := filepath.Rel(m.dir, p)
|
||||
if err != nil {
|
||||
return nil
|
||||
@@ -64,6 +71,36 @@ func (m *Model) genWalker(res *[]File, ign map[string][]string) filepath.WalkFun
|
||||
}
|
||||
}
|
||||
ign[pn] = patterns
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Model) walkAndHashFiles(res *[]File, ign map[string][]string) filepath.WalkFunc {
|
||||
return func(p string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if isTempName(p) {
|
||||
return nil
|
||||
}
|
||||
|
||||
rn, err := filepath.Rel(m.dir, p)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, sn := path.Split(rn); sn == ".stignore" {
|
||||
// We never sync the .stignore files
|
||||
return nil
|
||||
}
|
||||
|
||||
if ignoreFile(ign, rn) {
|
||||
if m.trace["file"] {
|
||||
log.Println("FILE: IGNORE:", rn)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -79,7 +116,10 @@ func (m *Model) genWalker(res *[]File, ign map[string][]string) filepath.WalkFun
|
||||
m.RUnlock()
|
||||
|
||||
if ok && hf.Modified == modified {
|
||||
// No change
|
||||
if nf := uint32(info.Mode()); nf != hf.Flags {
|
||||
hf.Flags = nf
|
||||
hf.Version++
|
||||
}
|
||||
*res = append(*res, hf)
|
||||
} else {
|
||||
m.Lock()
|
||||
@@ -89,7 +129,8 @@ func (m *Model) genWalker(res *[]File, ign map[string][]string) filepath.WalkFun
|
||||
}
|
||||
|
||||
if ok {
|
||||
// Files that are ignored will be suppressed but don't actually exist in the local model
|
||||
hf.Flags = protocol.FlagInvalid
|
||||
hf.Version++
|
||||
*res = append(*res, hf)
|
||||
}
|
||||
m.Unlock()
|
||||
@@ -134,8 +175,11 @@ func (m *Model) genWalker(res *[]File, ign map[string][]string) filepath.WalkFun
|
||||
// file system. Files are blockwise hashed.
|
||||
func (m *Model) Walk(followSymlinks bool) (files []File, ignore map[string][]string) {
|
||||
ignore = make(map[string][]string)
|
||||
fn := m.genWalker(&files, ignore)
|
||||
filepath.Walk(m.dir, fn)
|
||||
|
||||
hashFiles := m.walkAndHashFiles(&files, ignore)
|
||||
|
||||
filepath.Walk(m.dir, m.loadIgnoreFiles(ignore))
|
||||
filepath.Walk(m.dir, hashFiles)
|
||||
|
||||
if followSymlinks {
|
||||
d, err := os.Open(m.dir)
|
||||
@@ -151,7 +195,9 @@ func (m *Model) Walk(followSymlinks bool) (files []File, ignore map[string][]str
|
||||
|
||||
for _, fi := range fis {
|
||||
if fi.Mode()&os.ModeSymlink != 0 {
|
||||
filepath.Walk(path.Join(m.dir, fi.Name())+"/", fn)
|
||||
dir := path.Join(m.dir, fi.Name()) + "/"
|
||||
filepath.Walk(dir, m.loadIgnoreFiles(ignore))
|
||||
filepath.Walk(dir, hashFiles)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -159,14 +205,6 @@ func (m *Model) Walk(followSymlinks bool) (files []File, ignore map[string][]str
|
||||
return
|
||||
}
|
||||
|
||||
// Walk returns the list of files found in the local repository by scanning the
|
||||
// file system. Files are blockwise hashed. Patterns marked in .stignore files
|
||||
// are removed from the results.
|
||||
func (m *Model) FilteredWalk(followSymlinks bool) []File {
|
||||
var files, ignored = m.Walk(followSymlinks)
|
||||
return ignoreFilter(ignored, files)
|
||||
}
|
||||
|
||||
func (m *Model) cleanTempFile(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -185,19 +223,24 @@ func (m *Model) cleanTempFiles() {
|
||||
}
|
||||
|
||||
func ignoreFilter(patterns map[string][]string, files []File) (filtered []File) {
|
||||
nextFile:
|
||||
for _, f := range files {
|
||||
first, last := path.Split(f.Name)
|
||||
for prefix, pats := range patterns {
|
||||
if len(prefix) == 0 || prefix == first || strings.HasPrefix(first, prefix+"/") {
|
||||
for _, pattern := range pats {
|
||||
if match, _ := path.Match(pattern, last); match {
|
||||
continue nextFile
|
||||
}
|
||||
}
|
||||
}
|
||||
if !ignoreFile(patterns, f.Name) {
|
||||
filtered = append(filtered, f)
|
||||
}
|
||||
filtered = append(filtered, f)
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
|
||||
func ignoreFile(patterns map[string][]string, file string) bool {
|
||||
first, last := path.Split(file)
|
||||
for prefix, pats := range patterns {
|
||||
if len(prefix) == 0 || prefix == first || strings.HasPrefix(first, prefix+"/") {
|
||||
for _, pattern := range pats {
|
||||
if match, _ := path.Match(pattern, last); match {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -13,7 +13,6 @@ var testdata = []struct {
|
||||
hash string
|
||||
}{
|
||||
{"bar", 10, "2f72cc11a6fcd0271ecef8c61056ee1eb1243be3805bf9a9df98f92f7636b05c"},
|
||||
{"baz/quux", 9, "c154d94e94ba7298a6adb0523afe34d1b6a581d6b893a763d45ddc5e209dcb83"},
|
||||
{"foo", 7, "aec070645fe53ee3b3763059376134f058cc337247c978add178b6ccdfb0019f"},
|
||||
}
|
||||
|
||||
@@ -50,21 +49,6 @@ func TestWalk(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilteredWalk(t *testing.T) {
|
||||
m := NewModel("testdata")
|
||||
files := m.FilteredWalk(false)
|
||||
|
||||
if len(files) != 2 {
|
||||
t.Fatalf("Incorrect number of walked filtered files %d != 2", len(files))
|
||||
}
|
||||
if files[0].Name != "bar" {
|
||||
t.Error("Incorrect first file", files[0])
|
||||
}
|
||||
if files[1].Name != "foo" {
|
||||
t.Error("Incorrect second file", files[1])
|
||||
}
|
||||
}
|
||||
|
||||
func TestIgnore(t *testing.T) {
|
||||
var patterns = map[string][]string{
|
||||
"": {"t2"},
|
||||
|
||||
@@ -62,11 +62,10 @@ reserved bits must be set to zero.
|
||||
All data following the message header is in XDR (RFC 1014) encoding.
|
||||
The actual data types in use by BEP, in XDR naming convention, are:
|
||||
|
||||
- unsigned int -- unsigned 32 bit integer
|
||||
- hyper -- signed 64 bit integer
|
||||
- unsigned hyper -- signed 64 bit integer
|
||||
- opaque<> -- variable length opaque data
|
||||
- string<> -- variable length string
|
||||
- (unsigned) int -- (unsigned) 32 bit integer
|
||||
- (unsigned) hyper -- (unsigned) 64 bit integer
|
||||
- opaque<> -- variable length opaque data
|
||||
- string<> -- variable length string
|
||||
|
||||
The encoding of opaque<> and string<> are identical, the distinction is
|
||||
solely in interpretation. Opaque data should not be interpreted as such,
|
||||
@@ -92,6 +91,7 @@ message.
|
||||
string Name<>;
|
||||
unsigned int Flags;
|
||||
hyper Modified;
|
||||
unsigned int Version;
|
||||
BlockInfo Blocks<>;
|
||||
}
|
||||
|
||||
@@ -102,15 +102,19 @@ message.
|
||||
|
||||
The file name is the part relative to the repository root. The
|
||||
modification time is expressed as the number of seconds since the Unix
|
||||
Epoch. The hash algorithm is implied by the hash length. Currently, the
|
||||
hash must be 32 bytes long and computed by SHA256.
|
||||
Epoch. The version field is a counter that increments each time the file
|
||||
changes but resets to zero each time the modification is updated. This
|
||||
is used to signal changes to the file (or file metadata) while the
|
||||
modification time remains unchanged. The hash algorithm is implied by
|
||||
the hash length. Currently, the hash must be 32 bytes long and computed
|
||||
by SHA256.
|
||||
|
||||
The flags field is made up of the following single bit flags:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Reserved |D| Unix Perm. & Mode |
|
||||
| Reserved |I|D| Unix Perm. & Mode |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
- The lower 12 bits hold the common Unix permission and mode bits.
|
||||
@@ -118,9 +122,13 @@ The flags field is made up of the following single bit flags:
|
||||
- Bit 19 ("D") is set when the file has been deleted. The block list
|
||||
shall contain zero blocks and the modification time indicates the
|
||||
time of deletion or, if deletion time is not reliably determinable,
|
||||
one second past the last know modification time.
|
||||
the last known modification time and a higher version number.
|
||||
|
||||
- Bit 0 through 18 are reserved for future use and shall be set to
|
||||
- Bit 18 ("I") is set when the file is invalid and unavailable for
|
||||
synchronization. A peer may set this bit to indicate that it can
|
||||
temporarily not serve data for the file.
|
||||
|
||||
- Bit 0 through 17 are reserved for future use and shall be set to
|
||||
zero.
|
||||
|
||||
### Request (Type = 2)
|
||||
|
||||
@@ -39,6 +39,7 @@ func (w *marshalWriter) writeIndex(idx []FileInfo) {
|
||||
w.writeString(f.Name)
|
||||
w.writeUint32(f.Flags)
|
||||
w.writeUint64(uint64(f.Modified))
|
||||
w.writeUint32(f.Version)
|
||||
w.writeUint32(uint32(len(f.Blocks)))
|
||||
for _, b := range f.Blocks {
|
||||
w.writeUint32(b.Length)
|
||||
@@ -77,6 +78,7 @@ func (r *marshalReader) readIndex() []FileInfo {
|
||||
files[i].Name = r.readString()
|
||||
files[i].Flags = r.readUint32()
|
||||
files[i].Modified = int64(r.readUint64())
|
||||
files[i].Version = r.readUint32()
|
||||
nblocks := r.readUint32()
|
||||
blocks := make([]BlockInfo, nblocks)
|
||||
for j := range blocks {
|
||||
|
||||
@@ -12,8 +12,9 @@ func TestIndex(t *testing.T) {
|
||||
idx := []FileInfo{
|
||||
{
|
||||
"Foo",
|
||||
0755,
|
||||
FlagInvalid & FlagDeleted & 0755,
|
||||
1234567890,
|
||||
142,
|
||||
[]BlockInfo{
|
||||
{12345678, []byte("hash hash hash")},
|
||||
{23456781, []byte("ash hash hashh")},
|
||||
@@ -23,6 +24,7 @@ func TestIndex(t *testing.T) {
|
||||
"Quux/Quux",
|
||||
0644,
|
||||
2345678901,
|
||||
232323232,
|
||||
[]BlockInfo{
|
||||
{45678123, []byte("4321 hash hash hash")},
|
||||
{56781234, []byte("3214 ash hash hashh")},
|
||||
@@ -81,6 +83,7 @@ func BenchmarkWriteIndex(b *testing.B) {
|
||||
"Foo",
|
||||
0777,
|
||||
1234567890,
|
||||
424242,
|
||||
[]BlockInfo{
|
||||
{12345678, []byte("hash hash hash")},
|
||||
{23456781, []byte("ash hash hashh")},
|
||||
@@ -90,6 +93,7 @@ func BenchmarkWriteIndex(b *testing.B) {
|
||||
"Quux/Quux",
|
||||
0644,
|
||||
2345678901,
|
||||
323232,
|
||||
[]BlockInfo{
|
||||
{45678123, []byte("4321 hash hash hash")},
|
||||
{56781234, []byte("3214 ash hash hashh")},
|
||||
|
||||
@@ -20,10 +20,16 @@ const (
|
||||
messageTypeIndexUpdate = 6
|
||||
)
|
||||
|
||||
const (
|
||||
FlagDeleted = 1 << 12
|
||||
FlagInvalid = 1 << 13
|
||||
)
|
||||
|
||||
type FileInfo struct {
|
||||
Name string
|
||||
Flags uint32
|
||||
Modified int64
|
||||
Version uint32
|
||||
Blocks []BlockInfo
|
||||
}
|
||||
|
||||
@@ -46,7 +52,7 @@ type Model interface {
|
||||
type Connection struct {
|
||||
sync.RWMutex
|
||||
|
||||
ID string
|
||||
id string
|
||||
receiver Model
|
||||
reader io.Reader
|
||||
mreader *marshalReader
|
||||
@@ -55,7 +61,7 @@ type Connection struct {
|
||||
closed bool
|
||||
awaiting map[int]chan asyncResult
|
||||
nextId int
|
||||
indexSent map[string]int64
|
||||
indexSent map[string][2]int64
|
||||
|
||||
hasSentIndex bool
|
||||
hasRecvdIndex bool
|
||||
@@ -83,13 +89,13 @@ func NewConnection(nodeID string, reader io.Reader, writer io.Writer, receiver M
|
||||
}
|
||||
|
||||
c := Connection{
|
||||
id: nodeID,
|
||||
receiver: receiver,
|
||||
reader: flrd,
|
||||
mreader: &marshalReader{r: flrd},
|
||||
writer: flwr,
|
||||
mwriter: &marshalWriter{w: flwr},
|
||||
awaiting: make(map[int]chan asyncResult),
|
||||
ID: nodeID,
|
||||
}
|
||||
|
||||
go c.readerLoop()
|
||||
@@ -98,6 +104,10 @@ func NewConnection(nodeID string, reader io.Reader, writer io.Writer, receiver M
|
||||
return &c
|
||||
}
|
||||
|
||||
func (c *Connection) ID() string {
|
||||
return c.id
|
||||
}
|
||||
|
||||
// Index writes the list of file information to the connected peer node
|
||||
func (c *Connection) Index(idx []FileInfo) {
|
||||
c.Lock()
|
||||
@@ -106,18 +116,18 @@ func (c *Connection) Index(idx []FileInfo) {
|
||||
// This is the first time we send an index.
|
||||
msgType = messageTypeIndex
|
||||
|
||||
c.indexSent = make(map[string]int64)
|
||||
c.indexSent = make(map[string][2]int64)
|
||||
for _, f := range idx {
|
||||
c.indexSent[f.Name] = f.Modified
|
||||
c.indexSent[f.Name] = [2]int64{f.Modified, int64(f.Version)}
|
||||
}
|
||||
} else {
|
||||
// We have sent one full index. Only send updates now.
|
||||
msgType = messageTypeIndexUpdate
|
||||
var diff []FileInfo
|
||||
for _, f := range idx {
|
||||
if modified, ok := c.indexSent[f.Name]; !ok || f.Modified != modified {
|
||||
if vs, ok := c.indexSent[f.Name]; !ok || f.Modified != vs[0] || int64(f.Version) != vs[1] {
|
||||
diff = append(diff, f)
|
||||
c.indexSent[f.Name] = f.Modified
|
||||
c.indexSent[f.Name] = [2]int64{f.Modified, int64(f.Version)}
|
||||
}
|
||||
}
|
||||
idx = diff
|
||||
@@ -131,10 +141,10 @@ func (c *Connection) Index(idx []FileInfo) {
|
||||
c.Unlock()
|
||||
|
||||
if err != nil {
|
||||
c.Close(err)
|
||||
c.close(err)
|
||||
return
|
||||
} else if c.mwriter.err != nil {
|
||||
c.Close(c.mwriter.err)
|
||||
c.close(c.mwriter.err)
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -152,13 +162,13 @@ func (c *Connection) Request(name string, offset uint64, size uint32, hash []byt
|
||||
c.mwriter.writeRequest(request{name, offset, size, hash})
|
||||
if c.mwriter.err != nil {
|
||||
c.Unlock()
|
||||
c.Close(c.mwriter.err)
|
||||
c.close(c.mwriter.err)
|
||||
return nil, c.mwriter.err
|
||||
}
|
||||
err := c.flush()
|
||||
if err != nil {
|
||||
c.Unlock()
|
||||
c.Close(err)
|
||||
c.close(err)
|
||||
return nil, err
|
||||
}
|
||||
c.nextId = (c.nextId + 1) & 0xfff
|
||||
@@ -171,7 +181,7 @@ func (c *Connection) Request(name string, offset uint64, size uint32, hash []byt
|
||||
return res.val, res.err
|
||||
}
|
||||
|
||||
func (c *Connection) Ping() bool {
|
||||
func (c *Connection) ping() bool {
|
||||
c.Lock()
|
||||
if c.closed {
|
||||
c.Unlock()
|
||||
@@ -183,11 +193,11 @@ func (c *Connection) Ping() bool {
|
||||
err := c.flush()
|
||||
if err != nil {
|
||||
c.Unlock()
|
||||
c.Close(err)
|
||||
c.close(err)
|
||||
return false
|
||||
} else if c.mwriter.err != nil {
|
||||
c.Unlock()
|
||||
c.Close(c.mwriter.err)
|
||||
c.close(c.mwriter.err)
|
||||
return false
|
||||
}
|
||||
c.nextId = (c.nextId + 1) & 0xfff
|
||||
@@ -197,9 +207,6 @@ func (c *Connection) Ping() bool {
|
||||
return ok && res.err == nil
|
||||
}
|
||||
|
||||
func (c *Connection) Stop() {
|
||||
}
|
||||
|
||||
type flusher interface {
|
||||
Flush() error
|
||||
}
|
||||
@@ -211,7 +218,7 @@ func (c *Connection) flush() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Connection) Close(err error) {
|
||||
func (c *Connection) close(err error) {
|
||||
c.Lock()
|
||||
if c.closed {
|
||||
c.Unlock()
|
||||
@@ -224,7 +231,7 @@ func (c *Connection) Close(err error) {
|
||||
c.awaiting = nil
|
||||
c.Unlock()
|
||||
|
||||
c.receiver.Close(c.ID, err)
|
||||
c.receiver.Close(c.id, err)
|
||||
}
|
||||
|
||||
func (c *Connection) isClosed() bool {
|
||||
@@ -238,11 +245,11 @@ loop:
|
||||
for {
|
||||
hdr := c.mreader.readHeader()
|
||||
if c.mreader.err != nil {
|
||||
c.Close(c.mreader.err)
|
||||
c.close(c.mreader.err)
|
||||
break loop
|
||||
}
|
||||
if hdr.version != 0 {
|
||||
c.Close(fmt.Errorf("Protocol error: %s: unknown message version %#x", c.ID, hdr.version))
|
||||
c.close(fmt.Errorf("Protocol error: %s: unknown message version %#x", c.ID, hdr.version))
|
||||
break loop
|
||||
}
|
||||
|
||||
@@ -250,10 +257,10 @@ loop:
|
||||
case messageTypeIndex:
|
||||
files := c.mreader.readIndex()
|
||||
if c.mreader.err != nil {
|
||||
c.Close(c.mreader.err)
|
||||
c.close(c.mreader.err)
|
||||
break loop
|
||||
} else {
|
||||
c.receiver.Index(c.ID, files)
|
||||
c.receiver.Index(c.id, files)
|
||||
}
|
||||
c.Lock()
|
||||
c.hasRecvdIndex = true
|
||||
@@ -262,16 +269,16 @@ loop:
|
||||
case messageTypeIndexUpdate:
|
||||
files := c.mreader.readIndex()
|
||||
if c.mreader.err != nil {
|
||||
c.Close(c.mreader.err)
|
||||
c.close(c.mreader.err)
|
||||
break loop
|
||||
} else {
|
||||
c.receiver.IndexUpdate(c.ID, files)
|
||||
c.receiver.IndexUpdate(c.id, files)
|
||||
}
|
||||
|
||||
case messageTypeRequest:
|
||||
req := c.mreader.readRequest()
|
||||
if c.mreader.err != nil {
|
||||
c.Close(c.mreader.err)
|
||||
c.close(c.mreader.err)
|
||||
break loop
|
||||
}
|
||||
go c.processRequest(hdr.msgID, req)
|
||||
@@ -280,7 +287,7 @@ loop:
|
||||
data := c.mreader.readResponse()
|
||||
|
||||
if c.mreader.err != nil {
|
||||
c.Close(c.mreader.err)
|
||||
c.close(c.mreader.err)
|
||||
break loop
|
||||
} else {
|
||||
c.Lock()
|
||||
@@ -300,10 +307,10 @@ loop:
|
||||
err := c.flush()
|
||||
c.Unlock()
|
||||
if err != nil {
|
||||
c.Close(err)
|
||||
c.close(err)
|
||||
break loop
|
||||
} else if c.mwriter.err != nil {
|
||||
c.Close(c.mwriter.err)
|
||||
c.close(c.mwriter.err)
|
||||
break loop
|
||||
}
|
||||
|
||||
@@ -322,14 +329,14 @@ loop:
|
||||
}
|
||||
|
||||
default:
|
||||
c.Close(fmt.Errorf("Protocol error: %s: unknown message type %#x", c.ID, hdr.msgType))
|
||||
c.close(fmt.Errorf("Protocol error: %s: unknown message type %#x", c.ID, hdr.msgType))
|
||||
break loop
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Connection) processRequest(msgID int, req request) {
|
||||
data, _ := c.receiver.Request(c.ID, req.name, req.offset, req.size, req.hash)
|
||||
data, _ := c.receiver.Request(c.id, req.name, req.offset, req.size, req.hash)
|
||||
|
||||
c.Lock()
|
||||
c.mwriter.writeUint32(encodeHeader(header{0, msgID, messageTypeResponse}))
|
||||
@@ -339,9 +346,9 @@ func (c *Connection) processRequest(msgID int, req request) {
|
||||
|
||||
buffers.Put(data)
|
||||
if err != nil {
|
||||
c.Close(err)
|
||||
c.close(err)
|
||||
} else if c.mwriter.err != nil {
|
||||
c.Close(c.mwriter.err)
|
||||
c.close(c.mwriter.err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -356,15 +363,15 @@ func (c *Connection) pingerLoop() {
|
||||
|
||||
if ready {
|
||||
go func() {
|
||||
rc <- c.Ping()
|
||||
rc <- c.ping()
|
||||
}()
|
||||
select {
|
||||
case ok := <-rc:
|
||||
if !ok {
|
||||
c.Close(fmt.Errorf("Ping failure"))
|
||||
c.close(fmt.Errorf("Ping failure"))
|
||||
}
|
||||
case <-time.After(pingTimeout):
|
||||
c.Close(fmt.Errorf("Ping timeout"))
|
||||
c.close(fmt.Errorf("Ping timeout"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -46,10 +46,10 @@ func TestPing(t *testing.T) {
|
||||
c0 := NewConnection("c0", ar, bw, nil)
|
||||
c1 := NewConnection("c1", br, aw, nil)
|
||||
|
||||
if ok := c0.Ping(); !ok {
|
||||
if ok := c0.ping(); !ok {
|
||||
t.Error("c0 ping failed")
|
||||
}
|
||||
if ok := c1.Ping(); !ok {
|
||||
if ok := c1.ping(); !ok {
|
||||
t.Error("c1 ping failed")
|
||||
}
|
||||
}
|
||||
@@ -70,7 +70,7 @@ func TestPingErr(t *testing.T) {
|
||||
c0 := NewConnection("c0", ar, ebw, m0)
|
||||
NewConnection("c1", br, eaw, m1)
|
||||
|
||||
res := c0.Ping()
|
||||
res := c0.ping()
|
||||
if (i < 4 || j < 4) && res {
|
||||
t.Errorf("Unexpected ping success; i=%d, j=%d", i, j)
|
||||
} else if (i >= 8 && j >= 8) && !res {
|
||||
@@ -190,7 +190,7 @@ func TestClose(t *testing.T) {
|
||||
c0 := NewConnection("c0", ar, bw, m0)
|
||||
NewConnection("c1", br, aw, m1)
|
||||
|
||||
c0.Close(nil)
|
||||
c0.close(nil)
|
||||
|
||||
ok := c0.isClosed()
|
||||
if !ok {
|
||||
@@ -199,7 +199,7 @@ func TestClose(t *testing.T) {
|
||||
|
||||
// None of these should panic, some should return an error
|
||||
|
||||
ok = c0.Ping()
|
||||
ok = c0.ping()
|
||||
if ok {
|
||||
t.Error("Ping should not return true")
|
||||
}
|
||||
|
||||
17
tls.go
17
tls.go
@@ -3,7 +3,7 @@ package main
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
@@ -12,11 +12,12 @@ import (
|
||||
"math/big"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
tlsRSABits = 2048
|
||||
tlsRSABits = 3072
|
||||
tlsName = "syncthing"
|
||||
)
|
||||
|
||||
@@ -25,13 +26,15 @@ func loadCert(dir string) (tls.Certificate, error) {
|
||||
}
|
||||
|
||||
func certId(bs []byte) string {
|
||||
hf := sha1.New()
|
||||
hf := sha256.New()
|
||||
hf.Write(bs)
|
||||
id := hf.Sum(nil)
|
||||
return base32.StdEncoding.EncodeToString(id)
|
||||
return strings.Trim(base32.StdEncoding.EncodeToString(id), "=")
|
||||
}
|
||||
|
||||
func newCertificate(dir string) {
|
||||
infoln("Generating RSA certificate and key...")
|
||||
|
||||
priv, err := rsa.GenerateKey(rand.Reader, tlsRSABits)
|
||||
fatalErr(err)
|
||||
|
||||
@@ -47,7 +50,7 @@ func newCertificate(dir string) {
|
||||
NotAfter: notAfter,
|
||||
|
||||
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
|
||||
BasicConstraintsValid: true,
|
||||
}
|
||||
|
||||
@@ -58,11 +61,11 @@ func newCertificate(dir string) {
|
||||
fatalErr(err)
|
||||
pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
|
||||
certOut.Close()
|
||||
okln("Created TLS certificate file")
|
||||
okln("Created RSA certificate file")
|
||||
|
||||
keyOut, err := os.OpenFile(path.Join(dir, "key.pem"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
||||
fatalErr(err)
|
||||
pem.Encode(keyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)})
|
||||
keyOut.Close()
|
||||
okln("Created TLS key file")
|
||||
okln("Created RSA key file")
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user