mirror of
https://github.com/syncthing/syncthing.git
synced 2026-01-09 14:29:22 -05:00
Compare commits
17 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3ed783983f | ||
|
|
837f3a68ab | ||
|
|
59e45c5c68 | ||
|
|
94761d0472 | ||
|
|
a91eb701bf | ||
|
|
1a1f118f1a | ||
|
|
b115fca8a9 | ||
|
|
dfd239ac06 | ||
|
|
2ae218d069 | ||
|
|
1401d2ee9b | ||
|
|
f39e105101 | ||
|
|
482795bab0 | ||
|
|
10e8861f14 | ||
|
|
ecc6476308 | ||
|
|
28e347002a | ||
|
|
b3d19bd5cc | ||
|
|
647165ab89 |
File diff suppressed because one or more lines are too long
11
build.sh
11
build.sh
@@ -11,6 +11,8 @@ host=${host%%.*}
|
||||
ldflags="-w -X main.Version $version -X main.BuildStamp $date -X main.BuildUser $user -X main.BuildHost $host"
|
||||
|
||||
build() {
|
||||
go vet ./... || exit 1
|
||||
|
||||
if command -v godep >/dev/null ; then
|
||||
godep=godep
|
||||
else
|
||||
@@ -20,7 +22,6 @@ build() {
|
||||
godep=
|
||||
fi
|
||||
${godep} go build $* -ldflags "$ldflags" ./cmd/syncthing
|
||||
${godep} go build -ldflags "$ldflags" ./cmd/stcli
|
||||
}
|
||||
|
||||
assets() {
|
||||
@@ -62,7 +63,7 @@ zipDist() {
|
||||
}
|
||||
|
||||
deps() {
|
||||
godep save ./cmd/syncthing ./cmd/assets ./cmd/stcli ./discover/cmd/discosrv
|
||||
godep save ./cmd/syncthing ./cmd/assets ./discover/cmd/discosrv
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
@@ -96,7 +97,7 @@ case "$1" in
|
||||
test || exit 1
|
||||
assets
|
||||
|
||||
for os in darwin-amd64 linux-386 linux-amd64 freebsd-amd64 windows-amd64 ; do
|
||||
for os in darwin-amd64 linux-386 linux-amd64 freebsd-amd64 windows-amd64 windows-386 ; do
|
||||
export GOOS=${os%-*}
|
||||
export GOARCH=${os#*-}
|
||||
|
||||
@@ -126,6 +127,10 @@ case "$1" in
|
||||
build
|
||||
tarDist "syncthing-linux-armv6-$version"
|
||||
|
||||
export GOARM=5
|
||||
build
|
||||
tarDist "syncthing-linux-armv5-$version"
|
||||
|
||||
;;
|
||||
|
||||
upload)
|
||||
|
||||
@@ -1,72 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
var logger *log.Logger
|
||||
|
||||
func init() {
|
||||
log.SetOutput(os.Stderr)
|
||||
logger = log.New(os.Stderr, "", log.Flags())
|
||||
}
|
||||
|
||||
func debugln(vals ...interface{}) {
|
||||
s := fmt.Sprintln(vals...)
|
||||
logger.Output(2, "DEBUG: "+s)
|
||||
}
|
||||
|
||||
func debugf(format string, vals ...interface{}) {
|
||||
s := fmt.Sprintf(format, vals...)
|
||||
logger.Output(2, "DEBUG: "+s)
|
||||
}
|
||||
|
||||
func infoln(vals ...interface{}) {
|
||||
s := fmt.Sprintln(vals...)
|
||||
logger.Output(2, "INFO: "+s)
|
||||
}
|
||||
|
||||
func infof(format string, vals ...interface{}) {
|
||||
s := fmt.Sprintf(format, vals...)
|
||||
logger.Output(2, "INFO: "+s)
|
||||
}
|
||||
|
||||
func okln(vals ...interface{}) {
|
||||
s := fmt.Sprintln(vals...)
|
||||
logger.Output(2, "OK: "+s)
|
||||
}
|
||||
|
||||
func okf(format string, vals ...interface{}) {
|
||||
s := fmt.Sprintf(format, vals...)
|
||||
logger.Output(2, "OK: "+s)
|
||||
}
|
||||
|
||||
func warnln(vals ...interface{}) {
|
||||
s := fmt.Sprintln(vals...)
|
||||
logger.Output(2, "WARNING: "+s)
|
||||
}
|
||||
|
||||
func warnf(format string, vals ...interface{}) {
|
||||
s := fmt.Sprintf(format, vals...)
|
||||
logger.Output(2, "WARNING: "+s)
|
||||
}
|
||||
|
||||
func fatalln(vals ...interface{}) {
|
||||
s := fmt.Sprintln(vals...)
|
||||
logger.Output(2, "FATAL: "+s)
|
||||
os.Exit(3)
|
||||
}
|
||||
|
||||
func fatalf(format string, vals ...interface{}) {
|
||||
s := fmt.Sprintf(format, vals...)
|
||||
logger.Output(2, "FATAL: "+s)
|
||||
os.Exit(3)
|
||||
}
|
||||
|
||||
func fatalErr(err error) {
|
||||
if err != nil {
|
||||
fatalf(err.Error())
|
||||
}
|
||||
}
|
||||
@@ -1,24 +1,11 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/sha256"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/base32"
|
||||
"encoding/pem"
|
||||
"math/big"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
tlsRSABits = 3072
|
||||
tlsName = "syncthing"
|
||||
)
|
||||
|
||||
func loadCert(dir string) (tls.Certificate, error) {
|
||||
@@ -31,41 +18,3 @@ func certID(bs []byte) string {
|
||||
id := hf.Sum(nil)
|
||||
return strings.Trim(base32.StdEncoding.EncodeToString(id), "=")
|
||||
}
|
||||
|
||||
func newCertificate(dir string) {
|
||||
infoln("Generating RSA certificate and key...")
|
||||
|
||||
priv, err := rsa.GenerateKey(rand.Reader, tlsRSABits)
|
||||
fatalErr(err)
|
||||
|
||||
notBefore := time.Now()
|
||||
notAfter := time.Date(2049, 12, 31, 23, 59, 59, 0, time.UTC)
|
||||
|
||||
template := x509.Certificate{
|
||||
SerialNumber: new(big.Int).SetInt64(0),
|
||||
Subject: pkix.Name{
|
||||
CommonName: tlsName,
|
||||
},
|
||||
NotBefore: notBefore,
|
||||
NotAfter: notAfter,
|
||||
|
||||
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
|
||||
BasicConstraintsValid: true,
|
||||
}
|
||||
|
||||
derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv)
|
||||
fatalErr(err)
|
||||
|
||||
certOut, err := os.Create(filepath.Join(dir, "cert.pem"))
|
||||
fatalErr(err)
|
||||
pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
|
||||
certOut.Close()
|
||||
okln("Created RSA certificate file")
|
||||
|
||||
keyOut, err := os.OpenFile(filepath.Join(dir, "key.pem"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
||||
fatalErr(err)
|
||||
pem.Encode(keyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)})
|
||||
keyOut.Close()
|
||||
okln("Created RSA key file")
|
||||
}
|
||||
|
||||
@@ -285,3 +285,13 @@ func ensureNodePresent(nodes []NodeConfiguration, myID string) []NodeConfigurati
|
||||
|
||||
return nodes
|
||||
}
|
||||
|
||||
func invalidateRepo(repoID string, err error) {
|
||||
for i := range cfg.Repositories {
|
||||
repo := &cfg.Repositories[i]
|
||||
if repo.ID == repoID {
|
||||
repo.Invalid = err.Error()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"time"
|
||||
|
||||
"code.google.com/p/go.crypto/bcrypt"
|
||||
"github.com/calmh/syncthing/scanner"
|
||||
"github.com/codegangsta/martini"
|
||||
)
|
||||
|
||||
@@ -166,23 +165,6 @@ func restPostReset(req *http.Request) {
|
||||
go restart()
|
||||
}
|
||||
|
||||
type guiFile scanner.File
|
||||
|
||||
func (f guiFile) MarshalJSON() ([]byte, error) {
|
||||
type t struct {
|
||||
Name string
|
||||
Size int64
|
||||
Modified int64
|
||||
Flags uint32
|
||||
}
|
||||
return json.Marshal(t{
|
||||
Name: f.Name,
|
||||
Size: scanner.File(f).Size,
|
||||
Modified: f.Modified,
|
||||
Flags: f.Flags,
|
||||
})
|
||||
}
|
||||
|
||||
var cpuUsagePercent [10]float64 // The last ten seconds
|
||||
var cpuUsageLock sync.RWMutex
|
||||
|
||||
|
||||
@@ -190,6 +190,14 @@ func main() {
|
||||
},
|
||||
}
|
||||
|
||||
port, err := getFreePort("127.0.0.1", 8080)
|
||||
fatalErr(err)
|
||||
cfg.GUI.Address = fmt.Sprintf("127.0.0.1:%d", port)
|
||||
|
||||
port, err = getFreePort("", 22000)
|
||||
fatalErr(err)
|
||||
cfg.Options.ListenAddress = []string{fmt.Sprintf(":%d", port)}
|
||||
|
||||
saveConfig()
|
||||
infof("Edit %s to taste or use the GUI\n", cfgFile)
|
||||
}
|
||||
@@ -662,3 +670,35 @@ func getHomeDir() string {
|
||||
|
||||
return home
|
||||
}
|
||||
|
||||
// getFreePort returns a free TCP port fort listening on. The ports given are
|
||||
// tried in succession and the first to succeed is returned. If none succeed,
|
||||
// a random high port is returned.
|
||||
func getFreePort(host string, ports ...int) (int, error) {
|
||||
for _, port := range ports {
|
||||
c, err := net.Listen("tcp", fmt.Sprintf("%s:%d", host, port))
|
||||
if err == nil {
|
||||
c.Close()
|
||||
return port, nil
|
||||
}
|
||||
}
|
||||
|
||||
c, err := net.Listen("tcp", host+":0")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
addr := c.Addr().String()
|
||||
c.Close()
|
||||
|
||||
_, portstr, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
port, err := strconv.Atoi(portstr)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return port, nil
|
||||
}
|
||||
|
||||
@@ -323,13 +323,15 @@ func (m *Model) Request(nodeID, repo, name string, offset int64, size int) ([]by
|
||||
}
|
||||
|
||||
lf := r.Get(cid.LocalID, name)
|
||||
if offset > lf.Size {
|
||||
warnf("SECURITY (nonexistent file) REQ(in): %s: %q o=%d s=%d", nodeID, name, offset, size)
|
||||
return nil, ErrNoSuchFile
|
||||
if lf.Suppressed || lf.Flags&protocol.FlagDeleted != 0 {
|
||||
return nil, ErrInvalid
|
||||
}
|
||||
|
||||
if lf.Suppressed {
|
||||
return nil, ErrInvalid
|
||||
if offset > lf.Size {
|
||||
if debugNet {
|
||||
dlog.Printf("REQ(in; nonexistent): %s: %q o=%d s=%d", nodeID, name, offset, size)
|
||||
}
|
||||
return nil, ErrNoSuchFile
|
||||
}
|
||||
|
||||
if debugNet && nodeID != "<local>" {
|
||||
@@ -559,7 +561,7 @@ func (m *Model) ScanRepos() {
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Model) ScanRepo(repo string) {
|
||||
func (m *Model) ScanRepo(repo string) error {
|
||||
sup := &suppressor{threshold: int64(cfg.Options.MaxChangeKbps)}
|
||||
m.rmut.RLock()
|
||||
w := &scanner.Walker{
|
||||
@@ -572,9 +574,13 @@ func (m *Model) ScanRepo(repo string) {
|
||||
}
|
||||
m.rmut.RUnlock()
|
||||
m.setState(repo, RepoScanning)
|
||||
fs, _ := w.Walk()
|
||||
fs, _, err := w.Walk()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.ReplaceLocal(repo, fs)
|
||||
m.setState(repo, RepoIdle)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Model) SaveIndexes(dir string) {
|
||||
|
||||
@@ -173,7 +173,11 @@ func (p *puller) run() {
|
||||
if debugPull {
|
||||
dlog.Printf("%q: time for rescan", p.repo)
|
||||
}
|
||||
p.model.ScanRepo(p.repo)
|
||||
err := p.model.ScanRepo(p.repo)
|
||||
if err != nil {
|
||||
invalidateRepo(p.repo, err)
|
||||
return
|
||||
}
|
||||
|
||||
default:
|
||||
}
|
||||
@@ -190,7 +194,11 @@ func (p *puller) runRO() {
|
||||
if debugPull {
|
||||
dlog.Printf("%q: time for rescan", p.repo)
|
||||
}
|
||||
p.model.ScanRepo(p.repo)
|
||||
err := p.model.ScanRepo(p.repo)
|
||||
if err != nil {
|
||||
invalidateRepo(p.repo, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -10,32 +10,6 @@ import (
|
||||
"github.com/calmh/syncthing/scanner"
|
||||
)
|
||||
|
||||
func MetricPrefix(n int64) string {
|
||||
if n > 1e9 {
|
||||
return fmt.Sprintf("%.02f G", float64(n)/1e9)
|
||||
}
|
||||
if n > 1e6 {
|
||||
return fmt.Sprintf("%.02f M", float64(n)/1e6)
|
||||
}
|
||||
if n > 1e3 {
|
||||
return fmt.Sprintf("%.01f k", float64(n)/1e3)
|
||||
}
|
||||
return fmt.Sprintf("%d ", n)
|
||||
}
|
||||
|
||||
func BinaryPrefix(n int64) string {
|
||||
if n > 1<<30 {
|
||||
return fmt.Sprintf("%.02f Gi", float64(n)/(1<<30))
|
||||
}
|
||||
if n > 1<<20 {
|
||||
return fmt.Sprintf("%.02f Mi", float64(n)/(1<<20))
|
||||
}
|
||||
if n > 1<<10 {
|
||||
return fmt.Sprintf("%.01f Ki", float64(n)/(1<<10))
|
||||
}
|
||||
return fmt.Sprintf("%d ", n)
|
||||
}
|
||||
|
||||
func Rename(from, to string) error {
|
||||
if runtime.GOOS == "windows" {
|
||||
err := os.Remove(to)
|
||||
|
||||
1
gui/angular.min.js
vendored
1
gui/angular.min.js
vendored
@@ -200,4 +200,3 @@ isolateScope:Ea.isolateScope,controller:Ea.controller,injector:Ea.injector,inher
|
||||
$$csp:Tb});Ta=Uc(Z);try{Ta("ngLocale")}catch(c){Ta("ngLocale",[]).provider("$locale",rd)}Ta("ng",["ngLocale"],["$provide",function(a){a.provider({$$sanitizeUri:Bd});a.provider("$compile",ic).directive({a:Wd,input:Lc,textarea:Lc,form:Xd,script:De,select:Ge,style:Ie,option:He,ngBind:he,ngBindHtml:je,ngBindTemplate:ie,ngClass:ke,ngClassEven:me,ngClassOdd:le,ngCloak:ne,ngController:oe,ngForm:Yd,ngHide:xe,ngIf:pe,ngInclude:qe,ngInit:se,ngNonBindable:te,ngPluralize:ue,ngRepeat:ve,ngShow:we,ngStyle:ye,ngSwitch:ze,
|
||||
ngSwitchWhen:Ae,ngSwitchDefault:Be,ngOptions:Fe,ngTransclude:Ce,ngModel:ce,ngList:ee,ngChange:de,required:Mc,ngRequired:Mc,ngValue:ge}).directive({ngInclude:re}).directive(Nb).directive(Nc);a.provider({$anchorScroll:cd,$animate:Td,$browser:ed,$cacheFactory:fd,$controller:id,$document:jd,$exceptionHandler:kd,$filter:Ac,$interpolate:pd,$interval:qd,$http:ld,$httpBackend:nd,$location:td,$log:ud,$parse:xd,$rootScope:Ad,$q:yd,$sce:Ed,$sceDelegate:Dd,$sniffer:Fd,$templateCache:gd,$timeout:Gd,$window:Hd})}])})(Na);
|
||||
A(Q).ready(function(){Sc(Q,Yb)})})(window,document);!angular.$$csp()&&angular.element(document).find("head").prepend('<style type="text/css">@charset "UTF-8";[ng\\:cloak],[ng-cloak],[data-ng-cloak],[x-ng-cloak],.ng-cloak,.x-ng-cloak,.ng-hide{display:none !important;}ng\\:form{display:block;}</style>');
|
||||
//# sourceMappingURL=angular.min.js.map
|
||||
|
||||
52
gui/app.js
52
gui/app.js
@@ -44,10 +44,12 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
|
||||
|
||||
function getSucceeded() {
|
||||
if (!getOK) {
|
||||
$scope.init();
|
||||
$('#networkError').modal('hide');
|
||||
getOK = true;
|
||||
}
|
||||
if (restarting) {
|
||||
$scope.init();
|
||||
$('#restarting').modal('hide');
|
||||
restarting = false;
|
||||
}
|
||||
@@ -75,6 +77,13 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
|
||||
return a.NodeID > b.NodeID;
|
||||
}
|
||||
|
||||
function repoCompare(a, b) {
|
||||
if (a.Directory < b.Directory) {
|
||||
return -1;
|
||||
}
|
||||
return a.Directory > b.Directory;
|
||||
}
|
||||
|
||||
$scope.refresh = function () {
|
||||
$http.get(urlbase + '/system').success(function (data) {
|
||||
getSucceeded();
|
||||
@@ -142,7 +151,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
|
||||
}
|
||||
|
||||
if ($scope.model[repo].invalid !== '') {
|
||||
return 'text-warning';
|
||||
return 'text-danger';
|
||||
}
|
||||
|
||||
var state = '' + $scope.model[repo].state;
|
||||
@@ -421,32 +430,35 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
|
||||
$http.post(urlbase + '/config', JSON.stringify($scope.config), {headers: {'Content-Type': 'application/json'}});
|
||||
};
|
||||
|
||||
$http.get(urlbase + '/version').success(function (data) {
|
||||
$scope.version = data;
|
||||
});
|
||||
$scope.init = function() {
|
||||
$http.get(urlbase + '/version').success(function (data) {
|
||||
$scope.version = data;
|
||||
});
|
||||
|
||||
$http.get(urlbase + '/system').success(function (data) {
|
||||
$scope.system = data;
|
||||
$scope.myID = data.myID;
|
||||
});
|
||||
$http.get(urlbase + '/system').success(function (data) {
|
||||
$scope.system = data;
|
||||
$scope.myID = data.myID;
|
||||
});
|
||||
|
||||
$http.get(urlbase + '/config').success(function (data) {
|
||||
$scope.config = data;
|
||||
$scope.config.Options.ListenStr = $scope.config.Options.ListenAddress.join(', ');
|
||||
$http.get(urlbase + '/config').success(function (data) {
|
||||
$scope.config = data;
|
||||
$scope.config.Options.ListenStr = $scope.config.Options.ListenAddress.join(', ');
|
||||
|
||||
var nodes = $scope.config.Nodes;
|
||||
nodes.sort(nodeCompare);
|
||||
$scope.nodes = nodes;
|
||||
$scope.nodes = $scope.config.Nodes;
|
||||
$scope.nodes.sort(nodeCompare);
|
||||
|
||||
$scope.repos = $scope.config.Repositories;
|
||||
$scope.repos = $scope.config.Repositories;
|
||||
$scope.repos.sort(repoCompare);
|
||||
|
||||
$scope.refresh();
|
||||
});
|
||||
$scope.refresh();
|
||||
});
|
||||
|
||||
$http.get(urlbase + '/config/sync').success(function (data) {
|
||||
$scope.configInSync = data.configInSync;
|
||||
});
|
||||
$http.get(urlbase + '/config/sync').success(function (data) {
|
||||
$scope.configInSync = data.configInSync;
|
||||
});
|
||||
};
|
||||
|
||||
$scope.init();
|
||||
setInterval($scope.refresh, 10000);
|
||||
});
|
||||
|
||||
|
||||
@@ -134,7 +134,7 @@
|
||||
<ul class="list-unstyled" ng-repeat="repo in repos">
|
||||
<li>
|
||||
<span class="text-monospace">{{repo.Directory}}</span>
|
||||
<span ng-if="repo.Invalid" class="label label-danger">Invalid: {{repo.Invalid}}</span>
|
||||
<span ng-if="model[repo.ID].invalid" class="label label-danger">{{model[repo.ID].invalid}}</span>
|
||||
<ul class="list-no-bullet">
|
||||
<li>
|
||||
<div class="li-column" title="Repository ID">
|
||||
@@ -415,9 +415,10 @@
|
||||
<div class="form-group">
|
||||
<div class="checkbox">
|
||||
<label>
|
||||
<input type="checkbox" ng-model="currentRepo.ReadOnly"> Read Only
|
||||
<input type="checkbox" ng-model="currentRepo.ReadOnly"> Repository Master
|
||||
</label>
|
||||
</div>
|
||||
<p class="help-block">Files are protected from changes made on other nodes, but changes made on <em>this</em> node will be sent to the rest of the cluster.</p>
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label for="nodes">Nodes</label>
|
||||
|
||||
1
gui/jquery-2.0.3.min.js
vendored
1
gui/jquery-2.0.3.min.js
vendored
File diff suppressed because one or more lines are too long
@@ -9,6 +9,9 @@
|
||||
<node id="373HSRPQLPNLIJYKZVQFP4PKZ6R2ZE6K3YD442UJHBGBQGWWXAHA" name="s3">
|
||||
<address>127.0.0.1:22003</address>
|
||||
</node>
|
||||
<node id="EJHMPAQOGCVORISB4IS3SYYVJXTKJGLTU66DIQPGJ5D2GXGQ3OWQ" name="s4">
|
||||
<address>127.0.0.1:22004</address>
|
||||
</node>
|
||||
</repository>
|
||||
<repository id="s12" directory="s12-1">
|
||||
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA" name="s1">
|
||||
|
||||
23
integration/h4/cert.pem
Normal file
23
integration/h4/cert.pem
Normal file
@@ -0,0 +1,23 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIID3jCCAkigAwIBAgIBADALBgkqhkiG9w0BAQswFDESMBAGA1UEAxMJc3luY3Ro
|
||||
aW5nMB4XDTE0MDUxMDAwNTM0N1oXDTQ5MTIzMTIzNTk1OVowFDESMBAGA1UEAxMJ
|
||||
c3luY3RoaW5nMIIBojANBgkqhkiG9w0BAQEFAAOCAY8AMIIBigKCAYEA9MRyBtAr
|
||||
Sjt29azNoCWxx5xZF3RodBcQu+wv5sRR8lWozrr4brfUJLslcQHowqaAprOU1NP+
|
||||
BH12P5CSymsUrwAmCwSQ54CimXrNi5RiNMl7dtInJksk4Kp6nJgfyR7TqeQgqxtv
|
||||
+skVWdJY7ptxqpVuDfkf1JnNr68dbANw8hEJpPaGm3qOt81YvSg37R75HiOCzv+h
|
||||
FcSjKpPyFMvPARMCOHuZS0fYRJtI5nwmR0mWtKfnH/2204YNiQUne/8h2fgtkpxy
|
||||
OjxKOs2KJxbmpV6Uur/YyGyinb5+Aa0df3KCBuZmE+i/AsZcTsk0fgefe+bshWG/
|
||||
hzrNfV0wsX3TYjYOSBJ04+f/uQW00G1GGSxPwTsShGqVuwfJkTqkjAXX5wcH+PgJ
|
||||
ewG/dyMzKklMg19Y65WkhpWa/19o2KSZNw6TO8YM1arwT0STcMc+4fdrVB09lX6q
|
||||
NJA8UL8hUX+jbKBzatDY64h1d9E8PE0ODHYgYFO2Ko7e2GnWCQeijGmnAgMBAAGj
|
||||
PzA9MA4GA1UdDwEB/wQEAwIAoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUH
|
||||
AwIwDAYDVR0TAQH/BAIwADALBgkqhkiG9w0BAQsDggGBANFiHcATP5Lm11o65wbh
|
||||
sKk7yteTapRohMoLNdW44YNyM8ZkELnrdNY8pe3CWSGy3spBH01+4jbUT+gSltQr
|
||||
KTLVxSZ7f91696Og5ag4BQCeFY6ghKD/G9+PlBSj6yb3Y98NZsx8huLfylH+XuJw
|
||||
2gP5Nqov4uXaKgYylx2gdaeCb2M+wM/br1DO2HCPCmgbZE5g8RM5JxzojGn/41Le
|
||||
IbCd39zdI6NKj9c7T1Bxmt20uzca4nRgXVVzJymedEoF+//sBRk6PQzqgjgn/r3S
|
||||
h9vrqo5j8ly/+ojFjBaVY7gq2XHM6/q0LTjeKkv2MUQw+vEEZX65GpBOgBZ8U0Wb
|
||||
/NMUUhhDjGE/0G6TCJgq/HdkjmsNaWjO5sWjhnwXNImYXBdH4OenhXIrHcLhcnxN
|
||||
2n5sPkDc6n0LVVV7VAjBPXcTmu2uOSK02yqNZLLWJygp1Wl6lbiqLS3bJgYrUv2m
|
||||
YkRaR+IqVPw5EPs/QlH0qLBeCyIasaSWUVZeitVwRmqIUA==
|
||||
-----END CERTIFICATE-----
|
||||
36
integration/h4/config.xml
Normal file
36
integration/h4/config.xml
Normal file
@@ -0,0 +1,36 @@
|
||||
<configuration version="2">
|
||||
<repository id="unique" directory="s4" ro="false">
|
||||
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA" name="s1"></node>
|
||||
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ" name="s2"></node>
|
||||
<node id="373HSRPQLPNLIJYKZVQFP4PKZ6R2ZE6K3YD442UJHBGBQGWWXAHA" name="s3"></node>
|
||||
<node id="EJHMPAQOGCVORISB4IS3SYYVJXTKJGLTU66DIQPGJ5D2GXGQ3OWQ" name="s4"></node>
|
||||
</repository>
|
||||
<node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA" name="s1">
|
||||
<address>127.0.0.1:22001</address>
|
||||
</node>
|
||||
<node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ" name="s2">
|
||||
<address>127.0.0.1:22002</address>
|
||||
</node>
|
||||
<node id="373HSRPQLPNLIJYKZVQFP4PKZ6R2ZE6K3YD442UJHBGBQGWWXAHA" name="s3">
|
||||
<address>127.0.0.1:22003</address>
|
||||
</node>
|
||||
<node id="EJHMPAQOGCVORISB4IS3SYYVJXTKJGLTU66DIQPGJ5D2GXGQ3OWQ" name="s4">
|
||||
<address>dynamic</address>
|
||||
</node>
|
||||
<gui enabled="true">
|
||||
<address>127.0.0.1:8084</address>
|
||||
</gui>
|
||||
<options>
|
||||
<listenAddress>:22004</listenAddress>
|
||||
<globalAnnounceServer>announce.syncthing.net:22025</globalAnnounceServer>
|
||||
<globalAnnounceEnabled>false</globalAnnounceEnabled>
|
||||
<localAnnounceEnabled>true</localAnnounceEnabled>
|
||||
<parallelRequests>16</parallelRequests>
|
||||
<maxSendKbps>0</maxSendKbps>
|
||||
<rescanIntervalS>60</rescanIntervalS>
|
||||
<reconnectionIntervalS>10</reconnectionIntervalS>
|
||||
<maxChangeKbps>1000</maxChangeKbps>
|
||||
<startBrowser>false</startBrowser>
|
||||
<upnpEnabled>false</upnpEnabled>
|
||||
</options>
|
||||
</configuration>
|
||||
39
integration/h4/key.pem
Normal file
39
integration/h4/key.pem
Normal file
@@ -0,0 +1,39 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIG5AIBAAKCAYEA9MRyBtArSjt29azNoCWxx5xZF3RodBcQu+wv5sRR8lWozrr4
|
||||
brfUJLslcQHowqaAprOU1NP+BH12P5CSymsUrwAmCwSQ54CimXrNi5RiNMl7dtIn
|
||||
Jksk4Kp6nJgfyR7TqeQgqxtv+skVWdJY7ptxqpVuDfkf1JnNr68dbANw8hEJpPaG
|
||||
m3qOt81YvSg37R75HiOCzv+hFcSjKpPyFMvPARMCOHuZS0fYRJtI5nwmR0mWtKfn
|
||||
H/2204YNiQUne/8h2fgtkpxyOjxKOs2KJxbmpV6Uur/YyGyinb5+Aa0df3KCBuZm
|
||||
E+i/AsZcTsk0fgefe+bshWG/hzrNfV0wsX3TYjYOSBJ04+f/uQW00G1GGSxPwTsS
|
||||
hGqVuwfJkTqkjAXX5wcH+PgJewG/dyMzKklMg19Y65WkhpWa/19o2KSZNw6TO8YM
|
||||
1arwT0STcMc+4fdrVB09lX6qNJA8UL8hUX+jbKBzatDY64h1d9E8PE0ODHYgYFO2
|
||||
Ko7e2GnWCQeijGmnAgMBAAECggGBAIjKaLdqC2d3CCqQonJH3q0hsaCsC9wlL9L2
|
||||
UmbzfKCkQq0WTNUDo2nLtUcMvBpclzWS0zCGMUYtH7Kyh3bclTigKqKpsJnQiA6i
|
||||
VNEW4jOCDp//HqYGBNwSKmftlIX/1mbx+VfnA5PyYR5LsivXb5TX4iOpAKL+Obdf
|
||||
dF/zJGIEJ5GrvNqTicMq3dcI7Qh18N9pFSe+MTZLKK0Y9Yetx0hgaTNL0AYEZtcg
|
||||
uYMmCvZ4J+Namo6EanKYTmQvHzvq/tZVMvud9Gcr6uKKtVBcgex9S/R7IicaKg78
|
||||
oDTgH0nDrpI55pZCX8vuVGk8nVTXXLTsMR1XojOpiYjS6ucfTkPEw3fOW/YRhHg5
|
||||
93TrdDiWkqSWube5LNUF87q65t/aw/y2EH2aTNqcPD5OQ+EZRS8OGYPqOrJ4Ycbp
|
||||
j6CMSE+LX2IDMQyJ+9J0vPHtFsAviBKQkPoQ1L6mvhJuw6ksy34NQGykNDHz7nQK
|
||||
SeqvCJ6XCtaWNkq+00lC3UFaGsjuUQKBwQD8+y370co5G7G5GDLbLE3i+pguUN7L
|
||||
5YfDj5qqsM9hOJNqeKAHrKFP2ii0F9WxGw/ruY0k8k7zUt6LepgwkCI5BYfckRKJ
|
||||
g8YsNTizjqPLRGtiqL9Garjo+xPxFGj+TkTg9fYD4xTWFa1I15zzCu7Ye7xObeEH
|
||||
LRtcm3R4fU54JDrKtKDccoQmTEAzsxRdNXi9ifc7qgjGBH9W02guuGPY4ltT1aZR
|
||||
bcO5vpi44Fnl2h6d7N6iwCtFJ0CaT1pAZ4UCgcEA97Asf5DTDWKByZBhk+VvuT1b
|
||||
6nMYjqKxDNMmCaomCmk8Mif0w9SEJmAg0b/gbs/H6T78a+9WjbN5q9xHcDU91uax
|
||||
TdCenTq7H981AjgUG7OA7XwYn+AKy+hGSnsTJglMJzJm6TGt+Sq0oO9EahBRDlsP
|
||||
PiQRot2gyQfubwcl3rhdErRwaCM92BUyPkC2fy2OppAeZOOxxuzxrvHflDOuDGCZ
|
||||
KPCmy6U9HV0JOAO2FSNJeZdNLBixXa1Pk8TgbLY7AoHBAPG7lhn9Qg3Fz9H9NINH
|
||||
13jfWdFQB0SwJEWTEAiwgMj2ha6Eau5KX63s2V4VNGVSZakqmZtHSneppOuEjq5A
|
||||
2+K+zS7PFPaACzos9OxmjU7rJu2UL4m66sv9NvXzOcxev+RyQs0+DKfw+K8VEG0Q
|
||||
8l+8BJiw2AjCalXYWbfUjMmyXNdbOCbN6kaqL+L26KuUL7Z1gd/qPw3wODmgMvoJ
|
||||
yabxzLDUA2PlzdPMMyTdhCllfkILmEXN+MrQkiOhVa0a/QKBwGZjAhH9ePD4fnQm
|
||||
5d8wIb3uGlfRGh6kLBIEGp42IqF9HPASykBFUhdW91odOhY0eAv4CHpJpnrO7QXY
|
||||
+gLtT1HNbQ+gpGCUTZQAPbZcHhvRWQNSoA8+mtftfVj+hUzc3Qj68cWFzsfIGoDI
|
||||
R3ycoBUSGTvzxwKPIQ7Y43wr9UCa74Zy5mB16POw12MadxYda/F4c8f6w5taiRFr
|
||||
VKO7tT/Skp101U4rURcZRV1NU3BrdMz5eWI4FuGFafbIlIj7zwKBwHCt3VQt+JmZ
|
||||
OhCJR+8Q+jT0JvnMu1zi4CcMRiT8FbNdZDY/3B0wG4ySTNrEikFzIjihF4zIp2nv
|
||||
nD3qKQs+THl51GA8AnP9bNk7hknD7rXUuScndccTW58+PGrjqfwJp/1MEeOJQpoX
|
||||
0JML1w+dIKHzsKN0X6UL7Gyq8m+0SJKmQQguan3d3M8CMpnW0srgqOfJ+q1+bz8b
|
||||
6FuJeijoaN8+zyKkN+9R91Erw5pk+7vJRzEpDtkhprEE5tLNDKrXJw==
|
||||
-----END RSA PRIVATE KEY-----
|
||||
@@ -14,7 +14,7 @@ go build json.go
|
||||
|
||||
start() {
|
||||
echo "Starting..."
|
||||
for i in 1 2 3 ; do
|
||||
for i in 1 2 3 4 ; do
|
||||
STPROFILER=":909$i" syncthing -home "h$i" &
|
||||
done
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/calmh/syncthing/buffers"
|
||||
"github.com/calmh/syncthing/xdr"
|
||||
)
|
||||
|
||||
@@ -77,13 +76,13 @@ type rawConnection struct {
|
||||
xw *xdr.Writer
|
||||
wmut sync.Mutex
|
||||
|
||||
close chan error
|
||||
closed chan struct{}
|
||||
|
||||
awaiting map[int]chan asyncResult
|
||||
nextID int
|
||||
indexSent map[string]map[string][2]int64
|
||||
awaiting []chan asyncResult
|
||||
imut sync.Mutex
|
||||
|
||||
nextID chan int
|
||||
outbox chan []encodable
|
||||
closed chan struct{}
|
||||
}
|
||||
|
||||
type asyncResult struct {
|
||||
@@ -92,7 +91,7 @@ type asyncResult struct {
|
||||
}
|
||||
|
||||
const (
|
||||
pingTimeout = 2 * time.Minute
|
||||
pingTimeout = 4 * time.Minute
|
||||
pingIdleTime = 5 * time.Minute
|
||||
)
|
||||
|
||||
@@ -117,15 +116,17 @@ func NewConnection(nodeID string, reader io.Reader, writer io.Writer, receiver M
|
||||
cw: cw,
|
||||
wb: wb,
|
||||
xw: xdr.NewWriter(wb),
|
||||
close: make(chan error),
|
||||
closed: make(chan struct{}),
|
||||
awaiting: make(map[int]chan asyncResult),
|
||||
awaiting: make([]chan asyncResult, 0x1000),
|
||||
indexSent: make(map[string]map[string][2]int64),
|
||||
outbox: make(chan []encodable),
|
||||
nextID: make(chan int),
|
||||
closed: make(chan struct{}),
|
||||
}
|
||||
|
||||
go c.closer()
|
||||
go c.readerLoop()
|
||||
go c.writerLoop()
|
||||
go c.pingerLoop()
|
||||
go c.idGenerator()
|
||||
|
||||
return wireFormatConnection{&c}
|
||||
}
|
||||
@@ -136,10 +137,6 @@ func (c *rawConnection) ID() string {
|
||||
|
||||
// Index writes the list of file information to the connected peer node
|
||||
func (c *rawConnection) Index(repo string, idx []FileInfo) {
|
||||
if c.isClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
c.imut.Lock()
|
||||
var msgType int
|
||||
if c.indexSent[repo] == nil {
|
||||
@@ -162,48 +159,32 @@ func (c *rawConnection) Index(repo string, idx []FileInfo) {
|
||||
}
|
||||
idx = diff
|
||||
}
|
||||
|
||||
id := c.nextID
|
||||
c.nextID = (c.nextID + 1) & 0xfff
|
||||
c.imut.Unlock()
|
||||
|
||||
c.wmut.Lock()
|
||||
header{0, id, msgType}.encodeXDR(c.xw)
|
||||
IndexMessage{repo, idx}.encodeXDR(c.xw)
|
||||
err := c.flush()
|
||||
c.wmut.Unlock()
|
||||
|
||||
if err != nil {
|
||||
c.close <- err
|
||||
return
|
||||
}
|
||||
c.send(header{0, -1, msgType}, IndexMessage{repo, idx})
|
||||
}
|
||||
|
||||
// Request returns the bytes for the specified block after fetching them from the connected peer.
|
||||
func (c *rawConnection) Request(repo string, name string, offset int64, size int) ([]byte, error) {
|
||||
if c.isClosed() {
|
||||
var id int
|
||||
select {
|
||||
case id = <-c.nextID:
|
||||
case <-c.closed:
|
||||
return nil, ErrClosed
|
||||
}
|
||||
|
||||
c.imut.Lock()
|
||||
id := c.nextID
|
||||
c.nextID = (c.nextID + 1) & 0xfff
|
||||
rc := make(chan asyncResult)
|
||||
if _, ok := c.awaiting[id]; ok {
|
||||
if ch := c.awaiting[id]; ch != nil {
|
||||
panic("id taken")
|
||||
}
|
||||
rc := make(chan asyncResult)
|
||||
c.awaiting[id] = rc
|
||||
c.imut.Unlock()
|
||||
|
||||
c.wmut.Lock()
|
||||
header{0, id, messageTypeRequest}.encodeXDR(c.xw)
|
||||
RequestMessage{repo, name, uint64(offset), uint32(size)}.encodeXDR(c.xw)
|
||||
err := c.flush()
|
||||
c.wmut.Unlock()
|
||||
|
||||
if err != nil {
|
||||
c.close <- err
|
||||
return nil, err
|
||||
ok := c.send(header{0, id, messageTypeRequest},
|
||||
RequestMessage{repo, name, uint64(offset), uint32(size)})
|
||||
if !ok {
|
||||
return nil, ErrClosed
|
||||
}
|
||||
|
||||
res, ok := <-rc
|
||||
@@ -215,45 +196,24 @@ func (c *rawConnection) Request(repo string, name string, offset int64, size int
|
||||
|
||||
// ClusterConfig send the cluster configuration message to the peer and returns any error
|
||||
func (c *rawConnection) ClusterConfig(config ClusterConfigMessage) {
|
||||
if c.isClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
c.imut.Lock()
|
||||
id := c.nextID
|
||||
c.nextID = (c.nextID + 1) & 0xfff
|
||||
c.imut.Unlock()
|
||||
|
||||
c.wmut.Lock()
|
||||
header{0, id, messageTypeClusterConfig}.encodeXDR(c.xw)
|
||||
config.encodeXDR(c.xw)
|
||||
err := c.flush()
|
||||
c.wmut.Unlock()
|
||||
|
||||
if err != nil {
|
||||
c.close <- err
|
||||
}
|
||||
c.send(header{0, -1, messageTypeClusterConfig}, config)
|
||||
}
|
||||
|
||||
func (c *rawConnection) ping() bool {
|
||||
if c.isClosed() {
|
||||
var id int
|
||||
select {
|
||||
case id = <-c.nextID:
|
||||
case <-c.closed:
|
||||
return false
|
||||
}
|
||||
|
||||
c.imut.Lock()
|
||||
id := c.nextID
|
||||
c.nextID = (c.nextID + 1) & 0xfff
|
||||
rc := make(chan asyncResult, 1)
|
||||
c.imut.Lock()
|
||||
c.awaiting[id] = rc
|
||||
c.imut.Unlock()
|
||||
|
||||
c.wmut.Lock()
|
||||
header{0, id, messageTypePing}.encodeXDR(c.xw)
|
||||
err := c.flush()
|
||||
c.wmut.Unlock()
|
||||
|
||||
if err != nil {
|
||||
c.close <- err
|
||||
ok := c.send(header{0, id, messageTypePing})
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -261,6 +221,196 @@ func (c *rawConnection) ping() bool {
|
||||
return ok && res.err == nil
|
||||
}
|
||||
|
||||
func (c *rawConnection) readerLoop() (err error) {
|
||||
defer func() {
|
||||
c.close(err)
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-c.closed:
|
||||
return ErrClosed
|
||||
default:
|
||||
}
|
||||
|
||||
var hdr header
|
||||
hdr.decodeXDR(c.xr)
|
||||
if err := c.xr.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
if hdr.version != 0 {
|
||||
return fmt.Errorf("protocol error: %s: unknown message version %#x", c.id, hdr.version)
|
||||
}
|
||||
|
||||
switch hdr.msgType {
|
||||
case messageTypeIndex:
|
||||
if err := c.handleIndex(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case messageTypeIndexUpdate:
|
||||
if err := c.handleIndexUpdate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case messageTypeRequest:
|
||||
if err := c.handleRequest(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case messageTypeResponse:
|
||||
if err := c.handleResponse(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case messageTypePing:
|
||||
c.send(header{0, hdr.msgID, messageTypePong})
|
||||
|
||||
case messageTypePong:
|
||||
c.handlePong(hdr)
|
||||
|
||||
case messageTypeClusterConfig:
|
||||
if err := c.handleClusterConfig(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("protocol error: %s: unknown message type %#x", c.id, hdr.msgType)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *rawConnection) handleIndex() error {
|
||||
var im IndexMessage
|
||||
im.decodeXDR(c.xr)
|
||||
if err := c.xr.Error(); err != nil {
|
||||
return err
|
||||
} else {
|
||||
|
||||
// We run this (and the corresponding one for update, below)
|
||||
// in a separate goroutine to avoid blocking the read loop.
|
||||
// There is otherwise a potential deadlock where both sides
|
||||
// has the model locked because it's sending a large index
|
||||
// update and can't receive the large index update from the
|
||||
// other side.
|
||||
|
||||
go c.receiver.Index(c.id, im.Repository, im.Files)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *rawConnection) handleIndexUpdate() error {
|
||||
var im IndexMessage
|
||||
im.decodeXDR(c.xr)
|
||||
if err := c.xr.Error(); err != nil {
|
||||
return err
|
||||
} else {
|
||||
go c.receiver.IndexUpdate(c.id, im.Repository, im.Files)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *rawConnection) handleRequest(hdr header) error {
|
||||
var req RequestMessage
|
||||
req.decodeXDR(c.xr)
|
||||
if err := c.xr.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
go c.processRequest(hdr.msgID, req)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *rawConnection) handleResponse(hdr header) error {
|
||||
data := c.xr.ReadBytesMax(256 * 1024) // Sufficiently larger than max expected block size
|
||||
|
||||
if err := c.xr.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
go func(hdr header, err error) {
|
||||
c.imut.Lock()
|
||||
rc := c.awaiting[hdr.msgID]
|
||||
c.awaiting[hdr.msgID] = nil
|
||||
c.imut.Unlock()
|
||||
|
||||
if rc != nil {
|
||||
rc <- asyncResult{data, err}
|
||||
close(rc)
|
||||
}
|
||||
}(hdr, c.xr.Error())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *rawConnection) handlePong(hdr header) {
|
||||
c.imut.Lock()
|
||||
if rc := c.awaiting[hdr.msgID]; rc != nil {
|
||||
go func() {
|
||||
rc <- asyncResult{}
|
||||
close(rc)
|
||||
}()
|
||||
|
||||
c.awaiting[hdr.msgID] = nil
|
||||
}
|
||||
c.imut.Unlock()
|
||||
}
|
||||
|
||||
func (c *rawConnection) handleClusterConfig() error {
|
||||
var cm ClusterConfigMessage
|
||||
cm.decodeXDR(c.xr)
|
||||
if err := c.xr.Error(); err != nil {
|
||||
return err
|
||||
} else {
|
||||
go c.receiver.ClusterConfig(c.id, cm)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type encodable interface {
|
||||
encodeXDR(*xdr.Writer) (int, error)
|
||||
}
|
||||
type encodableBytes []byte
|
||||
|
||||
func (e encodableBytes) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
return xw.WriteBytes(e)
|
||||
}
|
||||
|
||||
func (c *rawConnection) send(h header, es ...encodable) bool {
|
||||
if h.msgID < 0 {
|
||||
select {
|
||||
case id := <-c.nextID:
|
||||
h.msgID = id
|
||||
case <-c.closed:
|
||||
return false
|
||||
}
|
||||
}
|
||||
msg := append([]encodable{h}, es...)
|
||||
|
||||
select {
|
||||
case c.outbox <- msg:
|
||||
return true
|
||||
case <-c.closed:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (c *rawConnection) writerLoop() {
|
||||
var err error
|
||||
for es := range c.outbox {
|
||||
c.wmut.Lock()
|
||||
for _, e := range es {
|
||||
e.encodeXDR(c.xw)
|
||||
}
|
||||
|
||||
if err = c.flush(); err != nil {
|
||||
c.wmut.Unlock()
|
||||
c.close(err)
|
||||
return
|
||||
}
|
||||
c.wmut.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
type flusher interface {
|
||||
Flush() error
|
||||
}
|
||||
@@ -281,155 +431,41 @@ func (c *rawConnection) flush() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *rawConnection) closer() {
|
||||
err := <-c.close
|
||||
func (c *rawConnection) close(err error) {
|
||||
c.imut.Lock()
|
||||
c.wmut.Lock()
|
||||
defer c.imut.Unlock()
|
||||
defer c.wmut.Unlock()
|
||||
|
||||
close(c.closed)
|
||||
for _, ch := range c.awaiting {
|
||||
close(ch)
|
||||
}
|
||||
c.awaiting = nil
|
||||
c.writer.Close()
|
||||
c.reader.Close()
|
||||
|
||||
c.receiver.Close(c.id, err)
|
||||
}
|
||||
|
||||
func (c *rawConnection) isClosed() bool {
|
||||
select {
|
||||
case <-c.closed:
|
||||
return true
|
||||
return
|
||||
default:
|
||||
return false
|
||||
close(c.closed)
|
||||
|
||||
for i, ch := range c.awaiting {
|
||||
if ch != nil {
|
||||
close(ch)
|
||||
c.awaiting[i] = nil
|
||||
}
|
||||
}
|
||||
|
||||
c.writer.Close()
|
||||
c.reader.Close()
|
||||
|
||||
c.receiver.Close(c.id, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *rawConnection) readerLoop() {
|
||||
loop:
|
||||
for !c.isClosed() {
|
||||
var hdr header
|
||||
hdr.decodeXDR(c.xr)
|
||||
if err := c.xr.Error(); err != nil {
|
||||
c.close <- err
|
||||
break loop
|
||||
func (c *rawConnection) idGenerator() {
|
||||
nextID := 0
|
||||
for {
|
||||
nextID = (nextID + 1) & 0xfff
|
||||
select {
|
||||
case c.nextID <- nextID:
|
||||
case <-c.closed:
|
||||
return
|
||||
}
|
||||
if hdr.version != 0 {
|
||||
c.close <- fmt.Errorf("protocol error: %s: unknown message version %#x", c.id, hdr.version)
|
||||
break loop
|
||||
}
|
||||
|
||||
switch hdr.msgType {
|
||||
case messageTypeIndex:
|
||||
var im IndexMessage
|
||||
im.decodeXDR(c.xr)
|
||||
if err := c.xr.Error(); err != nil {
|
||||
c.close <- err
|
||||
break loop
|
||||
} else {
|
||||
|
||||
// We run this (and the corresponding one for update, below)
|
||||
// in a separate goroutine to avoid blocking the read loop.
|
||||
// There is otherwise a potential deadlock where both sides
|
||||
// has the model locked because it's sending a large index
|
||||
// update and can't receive the large index update from the
|
||||
// other side.
|
||||
|
||||
go c.receiver.Index(c.id, im.Repository, im.Files)
|
||||
}
|
||||
|
||||
case messageTypeIndexUpdate:
|
||||
var im IndexMessage
|
||||
im.decodeXDR(c.xr)
|
||||
if err := c.xr.Error(); err != nil {
|
||||
c.close <- err
|
||||
break loop
|
||||
} else {
|
||||
go c.receiver.IndexUpdate(c.id, im.Repository, im.Files)
|
||||
}
|
||||
|
||||
case messageTypeRequest:
|
||||
var req RequestMessage
|
||||
req.decodeXDR(c.xr)
|
||||
if err := c.xr.Error(); err != nil {
|
||||
c.close <- err
|
||||
break loop
|
||||
}
|
||||
go c.processRequest(hdr.msgID, req)
|
||||
|
||||
case messageTypeResponse:
|
||||
data := c.xr.ReadBytesMax(256 * 1024) // Sufficiently larger than max expected block size
|
||||
|
||||
if err := c.xr.Error(); err != nil {
|
||||
c.close <- err
|
||||
break loop
|
||||
}
|
||||
|
||||
go func(hdr header, err error) {
|
||||
c.imut.Lock()
|
||||
rc, ok := c.awaiting[hdr.msgID]
|
||||
delete(c.awaiting, hdr.msgID)
|
||||
c.imut.Unlock()
|
||||
|
||||
if ok {
|
||||
rc <- asyncResult{data, err}
|
||||
close(rc)
|
||||
}
|
||||
}(hdr, c.xr.Error())
|
||||
|
||||
case messageTypePing:
|
||||
c.wmut.Lock()
|
||||
header{0, hdr.msgID, messageTypePong}.encodeXDR(c.xw)
|
||||
err := c.flush()
|
||||
c.wmut.Unlock()
|
||||
if err != nil {
|
||||
c.close <- err
|
||||
break loop
|
||||
}
|
||||
|
||||
case messageTypePong:
|
||||
c.imut.Lock()
|
||||
rc, ok := c.awaiting[hdr.msgID]
|
||||
|
||||
if ok {
|
||||
go func() {
|
||||
rc <- asyncResult{}
|
||||
close(rc)
|
||||
}()
|
||||
|
||||
delete(c.awaiting, hdr.msgID)
|
||||
}
|
||||
c.imut.Unlock()
|
||||
|
||||
case messageTypeClusterConfig:
|
||||
var cm ClusterConfigMessage
|
||||
cm.decodeXDR(c.xr)
|
||||
if err := c.xr.Error(); err != nil {
|
||||
c.close <- err
|
||||
break loop
|
||||
} else {
|
||||
go c.receiver.ClusterConfig(c.id, cm)
|
||||
}
|
||||
|
||||
default:
|
||||
c.close <- fmt.Errorf("protocol error: %s: unknown message type %#x", c.id, hdr.msgType)
|
||||
break loop
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *rawConnection) processRequest(msgID int, req RequestMessage) {
|
||||
data, _ := c.receiver.Request(c.id, req.Repository, req.Name, int64(req.Offset), int(req.Size))
|
||||
|
||||
c.wmut.Lock()
|
||||
header{0, msgID, messageTypeResponse}.encodeXDR(c.xw)
|
||||
c.xw.WriteBytes(data)
|
||||
err := c.flush()
|
||||
c.wmut.Unlock()
|
||||
|
||||
buffers.Put(data)
|
||||
|
||||
if err != nil {
|
||||
c.close <- err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -445,17 +481,27 @@ func (c *rawConnection) pingerLoop() {
|
||||
select {
|
||||
case ok := <-rc:
|
||||
if !ok {
|
||||
c.close <- fmt.Errorf("ping failure")
|
||||
c.close(fmt.Errorf("ping failure"))
|
||||
}
|
||||
case <-time.After(pingTimeout):
|
||||
c.close <- fmt.Errorf("ping timeout")
|
||||
c.close(fmt.Errorf("ping timeout"))
|
||||
case <-c.closed:
|
||||
return
|
||||
}
|
||||
|
||||
case <-c.closed:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *rawConnection) processRequest(msgID int, req RequestMessage) {
|
||||
data, _ := c.receiver.Request(c.id, req.Repository, req.Name, int64(req.Offset), int(req.Size))
|
||||
|
||||
c.send(header{0, msgID, messageTypeResponse},
|
||||
encodableBytes(data))
|
||||
}
|
||||
|
||||
type Statistics struct {
|
||||
At time.Time
|
||||
InBytesTotal int
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"io"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestHeaderFunctions(t *testing.T) {
|
||||
@@ -173,17 +172,9 @@ func TestClose(t *testing.T) {
|
||||
c0 := NewConnection("c0", ar, bw, m0).(wireFormatConnection).next.(*rawConnection)
|
||||
NewConnection("c1", br, aw, m1)
|
||||
|
||||
c0.close <- nil
|
||||
c0.close(nil)
|
||||
|
||||
select {
|
||||
case <-c0.closed:
|
||||
case <-time.After(1 * time.Second):
|
||||
t.Fatal("Did not close within a second")
|
||||
}
|
||||
|
||||
if !c0.isClosed() {
|
||||
t.Fatal("Connection should be closed")
|
||||
}
|
||||
<-c0.closed
|
||||
if !m0.isClosed() {
|
||||
t.Fatal("Connection should be closed")
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package scanner
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
@@ -51,12 +52,18 @@ type CurrentFiler interface {
|
||||
|
||||
// Walk returns the list of files found in the local repository by scanning the
|
||||
// file system. Files are blockwise hashed.
|
||||
func (w *Walker) Walk() (files []File, ignore map[string][]string) {
|
||||
func (w *Walker) Walk() (files []File, ignore map[string][]string, err error) {
|
||||
w.lazyInit()
|
||||
|
||||
if debug {
|
||||
dlog.Println("Walk", w.Dir, w.BlockSize, w.IgnoreFile)
|
||||
}
|
||||
|
||||
err = checkDir(w.Dir)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
t0 := time.Now()
|
||||
|
||||
ignore = make(map[string][]string)
|
||||
@@ -70,6 +77,8 @@ func (w *Walker) Walk() (files []File, ignore map[string][]string) {
|
||||
d := t1.Sub(t0).Seconds()
|
||||
dlog.Printf("Walk in %.02f ms, %.0f files/s", d*1000, float64(len(files))/d)
|
||||
}
|
||||
|
||||
err = checkDir(w.Dir)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -272,3 +281,12 @@ func (w *Walker) ignoreFile(patterns map[string][]string, file string) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func checkDir(dir string) error {
|
||||
if info, err := os.Stat(dir); err != nil {
|
||||
return err
|
||||
} else if !info.IsDir() {
|
||||
return errors.New(dir + ": not a directory")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -27,7 +27,11 @@ func TestWalk(t *testing.T) {
|
||||
BlockSize: 128 * 1024,
|
||||
IgnoreFile: ".stignore",
|
||||
}
|
||||
files, ignores := w.Walk()
|
||||
files, ignores, err := w.Walk()
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if l1, l2 := len(files), len(testdata); l1 != l2 {
|
||||
t.Fatalf("Incorrect number of walked files %d != %d", l1, l2)
|
||||
@@ -54,6 +58,30 @@ func TestWalk(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestWalkError(t *testing.T) {
|
||||
w := Walker{
|
||||
Dir: "testdata-missing",
|
||||
BlockSize: 128 * 1024,
|
||||
IgnoreFile: ".stignore",
|
||||
}
|
||||
_, _, err := w.Walk()
|
||||
|
||||
if err == nil {
|
||||
t.Error("no error from missing directory")
|
||||
}
|
||||
|
||||
w = Walker{
|
||||
Dir: "testdata/bar",
|
||||
BlockSize: 128 * 1024,
|
||||
IgnoreFile: ".stignore",
|
||||
}
|
||||
_, _, err = w.Walk()
|
||||
|
||||
if err == nil {
|
||||
t.Error("no error from non-directory")
|
||||
}
|
||||
}
|
||||
|
||||
func TestIgnore(t *testing.T) {
|
||||
var patterns = map[string][]string{
|
||||
"": {"t2"},
|
||||
|
||||
Reference in New Issue
Block a user