Compare commits

...

38 Commits

Author SHA1 Message Date
Jakob Borg
b3c2ffc96a Test for previous commit and never ignore entire repo directory 2014-03-09 12:36:26 +01:00
Jakob Borg
b5f652a815 Do not descend into ignored directories (fixes #86) 2014-03-09 12:21:07 +01:00
Jakob Borg
9ec7de643e Refactor profiler startup / logging 2014-03-09 09:18:28 +01:00
Jakob Borg
2553ba0463 Discover & main tracing 2014-03-09 09:15:36 +01:00
Jakob Borg
52ee7d5724 Discovery tracing 2014-03-09 08:58:03 +01:00
Jakob Borg
d4ef6a6285 Document env vars, start profiler based on STPROFILER 2014-03-09 08:48:29 +01:00
Jakob Borg
56b7d3c28d Don't start browser on restart 2014-03-09 08:35:53 +01:00
Jakob Borg
ae94b726a7 Don't expose a -delay paramater 2014-03-09 08:35:38 +01:00
Jakob Borg
a88e4db1ee Option to not start browser (fixes #84) 2014-03-08 23:19:33 +01:00
Jakob Borg
0ebd4a6ba1 Fix relative path open bug 2014-03-08 23:18:50 +01:00
Jakob Borg
1448cfe66a Refactor out file scanner into separate package 2014-03-08 23:02:01 +01:00
Jakob Borg
d6c9afd07f Fix handling of default values in config (fixes #83) 2014-03-04 22:29:48 +01:00
Jakob Borg
799f55e7ae Add basic config tests 2014-03-04 22:17:39 +01:00
Jakob Borg
04a3db132f Merge pull request #81 from filoozom/patch-1
Fix isTempName to work on Windows (fixes #80)
2014-03-04 21:56:23 +01:00
Philippe Schommers
d06204959e Fix isTempName to work on Windows (fixes #80)
```path.Base()``` is for slash-separated paths, whereas Windows uses "\" to separate paths. Just convert the \ to / and it works.
2014-03-04 18:48:03 +01:00
Jakob Borg
2d0600de38 Merge pull request #78 from filoozom/patch-1
Update config.go to handle default slices correctly (fixes #76)
2014-03-04 15:09:47 +01:00
filoozom
6a1c055288 Delete comment in config.go 2014-03-04 15:02:26 +01:00
filoozom
b9ec30ebdb Update config.go 2014-03-04 11:29:51 +01:00
filoozom
428164f395 Update config.go to handle default slices correctly 2014-03-04 11:25:10 +01:00
Jakob Borg
ba59e0d3f0 Use undirected broadcast and WriteTo (fixes #75) 2014-03-03 18:19:32 +01:00
Jakob Borg
5d8f0f835e Merge branch 'filoozom-patch-1'
* filoozom-patch-1:
  Fix divided by zero when the sync folder is empty (tot = 0)
  Delete cfgFile before renaming it on Windows
  Set the right config and home dir for each OS
  Update getHomeDir() to use "os/user"
2014-03-03 14:06:15 +01:00
filoozom
b4a1aadd1b Fix divided by zero when the sync folder is empty (tot = 0) 2014-03-03 08:47:52 +01:00
filoozom
8f41d90ab1 Delete cfgFile before renaming it on Windows 2014-03-03 08:46:20 +01:00
Jakob Borg
9743386166 Re-add inadvertently ignored files 2014-03-02 23:58:24 +01:00
Jakob Borg
0afcb5b7e7 Clean up build.sh 2014-03-02 23:55:08 +01:00
filoozom
043dea760f Set the right config and home dir for each OS
Use %AppData%\syncthing for the config files and %USERPROFILE%\Sync as sync folder for Windows.
2014-03-02 23:49:51 +01:00
Jakob Borg
0618e2b9b4 Don't include timestamp in auto generated files 2014-03-02 23:15:56 +01:00
Jakob Borg
3c171d281c Move cmd files into subdir 2014-03-02 23:13:04 +01:00
Jakob Borg
c217b7cd22 Change default announce server to announce.syncthing.net 2014-03-02 17:29:35 +01:00
filoozom
23593c3d20 Update getHomeDir() to use "os/user"
os.Getenv("HOME") doesn't work properly on Windows (and maybe other systems?) and the package "os/user" gives us a convenient way to find the home directory for every OS.
2014-03-02 16:07:12 +01:00
Jakob Borg
192117dc11 Merge branch 'v0.6'
* v0.6:
  Open GUI on startup
2014-03-02 13:08:05 +01:00
Jakob Borg
24b8f9211a Open GUI on startup 2014-03-02 12:52:32 +01:00
Jakob Borg
51788d6f0e Add some support packages 2014-03-01 11:11:37 +01:00
Jakob Borg
ea0bed2238 drone.io badge 2014-02-24 14:06:22 +01:00
Jakob Borg
e2fe57c440 deadcode 2014-02-24 13:34:24 +01:00
Jakob Borg
434a0ccf2a golint 2014-02-24 13:29:30 +01:00
Jakob Borg
e7bf3ac108 go vet 2014-02-24 13:24:03 +01:00
Jakob Borg
c5bdaebf2b Remove spurious debug print 2014-02-23 15:08:15 +01:00
56 changed files with 13266 additions and 12428 deletions

3
.gitignore vendored
View File

@@ -1,3 +1,4 @@
syncthing
syncthing.exe
*.tar.gz
dist
*.zip

View File

@@ -1,4 +1,4 @@
syncthing
syncthing [![Build Status](https://drone.io/github.com/calmh/syncthing/status.png)](https://drone.io/github.com/calmh/syncthing/latest)
=========
This is the `syncthing` project. The following are the project goals:

View File

@@ -18,7 +18,7 @@ cd gui
for f in $(find . -type f) ; do
f="${f#./}"
echo "gr, _ = gzip.NewReader(bytes.NewBuffer([]byte{"
gzip -c $f | od -vt x1 | sed 's/^[0-9a-f]*//' | sed 's/\([0-9a-f][0-9a-f]\)/0x\1,/g'
gzip -n -c $f | od -vt x1 | sed 's/^[0-9a-f]*//' | sed 's/\([0-9a-f][0-9a-f]\)/0x\1,/g'
echo "}))"
echo "data, _ = ioutil.ReadAll(gr)"
echo "Assets[\"$f\"] = data"

View File

File diff suppressed because it is too large Load Diff

118
build.sh
View File

@@ -2,48 +2,90 @@
export COPYFILE_DISABLE=true
distFiles=(README.md LICENSE) # apart from the binary itself
version=$(git describe --always)
buildDir=dist
if [[ $fast != yes ]] ; then
build() {
go build -ldflags "-w -X main.Version $version" ./cmd/syncthing
}
prepare() {
./assets.sh | gofmt > auto/gui.files.go
go get -d
}
test() {
go test ./...
fi
}
if [[ -z $1 ]] ; then
go build -ldflags "-X main.Version $version"
elif [[ $1 == "tar" ]] ; then
go build -ldflags "-X main.Version $version" \
&& mkdir syncthing-dist \
&& cp syncthing README.md LICENSE syncthing-dist \
&& tar zcvf syncthing-dist.tar.gz syncthing-dist \
&& rm -rf syncthing-dist
elif [[ $1 == "all" ]] ; then
rm -rf "$buildDir"
mkdir -p "$buildDir" || exit 1
tarDist() {
name="$1"
mkdir -p "$name"
cp syncthing "${distFiles[@]}" "$name"
tar zcvf "$name.tar.gz" "$name"
rm -rf "$name"
}
export GOARM=7
for os in darwin-amd64 linux-amd64 linux-arm freebsd-amd64 windows-amd64 ; do
echo "$os"
export name="syncthing-$os"
export GOOS=${os%-*}
export GOARCH=${os#*-}
go build -ldflags "-X main.Version $version"
mkdir -p "$name"
cp README.md LICENSE "$name"
case $GOOS in
windows)
cp syncthing.exe "$buildDir/$name.exe"
mv syncthing.exe "$name"
zip -qr "$buildDir/$name.zip" "$name"
;;
*)
cp syncthing "$buildDir/$name"
mv syncthing "$name"
tar zcf "$buildDir/$name.tar.gz" "$name"
;;
esac
rm -r "$name"
done
fi
zipDist() {
name="$1"
mkdir -p "$name"
cp syncthing.exe "${distFiles[@]}" "$name"
zip -r "$name.zip" "$name"
rm -rf "$name"
}
case "$1" in
"")
build
;;
tar)
rm -f *.tar.gz *.zip
prepare
test || exit 1
build
eval $(go env)
name="syncthing-$GOOS-$GOARCH-$version"
tarDist "$name"
;;
all)
rm -f *.tar.gz *.zip
prepare
test || exit 1
export GOARM=7
for os in darwin-amd64 linux-amd64 linux-arm freebsd-amd64 windows-amd64 ; do
export GOOS=${os%-*}
export GOARCH=${os#*-}
build
name="syncthing-$os-$version"
case $GOOS in
windows)
zipDist "$name"
rm -f syncthing.exe
;;
*)
tarDist "$name"
rm -f syncthing
;;
esac
done
;;
upload)
tag=$(git describe)
shopt -s nullglob
for f in *gz *zip ; do
relup calmh/syncthing "$tag" "$f"
done
;;
*)
echo "Unknown build parameter $1"
;;
esac

42
cid/cid.go Normal file
View File

@@ -0,0 +1,42 @@
package cid
type Map struct {
toCid map[string]int
toName []string
}
func NewMap() *Map {
return &Map{
toCid: make(map[string]int),
}
}
func (m *Map) Get(name string) int {
cid, ok := m.toCid[name]
if ok {
return cid
}
// Find a free slot to get a new ID
for i, n := range m.toName {
if n == "" {
m.toName[i] = name
m.toCid[name] = i
return i
}
}
// Add it to the end since we didn't find a free slot
m.toName = append(m.toName, name)
cid = len(m.toName) - 1
m.toCid[name] = cid
return cid
}
func (m *Map) Clear(name string) {
cid, ok := m.toCid[name]
if ok {
m.toName[cid] = ""
delete(m.toCid, name)
}
}

1
cmd/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
!syncthing

View File

@@ -36,7 +36,7 @@ type OptionsConfiguration struct {
FollowSymlinks bool `xml:"followSymlinks" default:"true" ini:"follow-symlinks"`
GUIEnabled bool `xml:"guiEnabled" default:"true" ini:"gui-enabled"`
GUIAddress string `xml:"guiAddress" default:"127.0.0.1:8080" ini:"gui-address"`
GlobalAnnServer string `xml:"globalAnnounceServer" default:"syncthing.nym.se:22025" ini:"global-announce-server"`
GlobalAnnServer string `xml:"globalAnnounceServer" default:"announce.syncthing.net:22025" ini:"global-announce-server"`
GlobalAnnEnabled bool `xml:"globalAnnounceEnabled" default:"true" ini:"global-announce-enabled"`
LocalAnnEnabled bool `xml:"localAnnounceEnabled" default:"true" ini:"local-announce-enabled"`
ParallelRequests int `xml:"parallelRequests" default:"16" ini:"parallel-requests"`
@@ -44,6 +44,7 @@ type OptionsConfiguration struct {
RescanIntervalS int `xml:"rescanIntervalS" default:"60" ini:"rescan-interval"`
ReconnectIntervalS int `xml:"reconnectionIntervalS" default:"60" ini:"reconnection-interval"`
MaxChangeKbps int `xml:"maxChangeKbps" default:"1000" ini:"max-change-bw"`
StartBrowser bool `xml:"startBrowser" default:"true"`
}
func setDefaults(data interface{}) error {
@@ -60,11 +61,6 @@ func setDefaults(data interface{}) error {
case string:
f.SetString(v)
case []string:
rv := reflect.MakeSlice(reflect.TypeOf([]string{}), 1, 1)
rv.Index(0).SetString(v)
f.Set(rv)
case int:
i, err := strconv.ParseInt(v, 10, 64)
if err != nil {
@@ -75,6 +71,11 @@ func setDefaults(data interface{}) error {
case bool:
f.SetBool(v == "true")
case []string:
// We don't do anything with string slices here. Any default
// we set will be appended to by the XML decoder, so we fill
// those after decoding.
default:
panic(f.Type())
}
@@ -83,6 +84,30 @@ func setDefaults(data interface{}) error {
return nil
}
// fillNilSlices sets default value on slices that are still nil.
func fillNilSlices(data interface{}) error {
s := reflect.ValueOf(data).Elem()
t := s.Type()
for i := 0; i < s.NumField(); i++ {
f := s.Field(i)
tag := t.Field(i).Tag
v := tag.Get("default")
if len(v) > 0 {
switch f.Interface().(type) {
case []string:
if f.IsNil() {
rv := reflect.MakeSlice(reflect.TypeOf([]string{}), 1, 1)
rv.Index(0).SetString(v)
f.Set(rv)
}
}
}
}
return nil
}
func readConfigINI(m map[string]string, data interface{}) error {
s := reflect.ValueOf(data).Elem()
t := s.Type()
@@ -154,6 +179,8 @@ func readConfigXML(rd io.Reader) (Configuration, error) {
err = xml.NewDecoder(rd).Decode(&cfg)
}
fillNilSlices(&cfg.Options)
cfg.Options.ListenAddress = uniqueStrings(cfg.Options.ListenAddress)
return cfg, err
}

View File

@@ -0,0 +1,116 @@
package main
import (
"bytes"
"io"
"reflect"
"testing"
)
func TestDefaultValues(t *testing.T) {
expected := OptionsConfiguration{
ListenAddress: []string{":22000"},
ReadOnly: false,
AllowDelete: true,
FollowSymlinks: true,
GUIEnabled: true,
GUIAddress: "127.0.0.1:8080",
GlobalAnnServer: "announce.syncthing.net:22025",
GlobalAnnEnabled: true,
LocalAnnEnabled: true,
ParallelRequests: 16,
MaxSendKbps: 0,
RescanIntervalS: 60,
ReconnectIntervalS: 60,
MaxChangeKbps: 1000,
StartBrowser: true,
}
cfg, err := readConfigXML(bytes.NewReader(nil))
if err != io.EOF {
t.Error(err)
}
if !reflect.DeepEqual(cfg.Options, expected) {
t.Errorf("Default config differs;\n E: %#v\n A: %#v", expected, cfg.Options)
}
}
func TestNoListenAddress(t *testing.T) {
data := []byte(`<configuration version="1">
<repository directory="~/Sync">
<node id="..." name="...">
<address>dynamic</address>
</node>
</repository>
<options>
<listenAddress></listenAddress>
</options>
</configuration>
`)
cfg, err := readConfigXML(bytes.NewReader(data))
if err != nil {
t.Error(err)
}
expected := []string{""}
if !reflect.DeepEqual(cfg.Options.ListenAddress, expected) {
t.Errorf("Unexpected ListenAddress %#v", cfg.Options.ListenAddress)
}
}
func TestOverriddenValues(t *testing.T) {
data := []byte(`<configuration version="1">
<repository directory="~/Sync">
<node id="..." name="...">
<address>dynamic</address>
</node>
</repository>
<options>
<listenAddress>:23000</listenAddress>
<readOnly>true</readOnly>
<allowDelete>false</allowDelete>
<followSymlinks>false</followSymlinks>
<guiEnabled>false</guiEnabled>
<guiAddress>125.2.2.2:8080</guiAddress>
<globalAnnounceServer>syncthing.nym.se:22025</globalAnnounceServer>
<globalAnnounceEnabled>false</globalAnnounceEnabled>
<localAnnounceEnabled>false</localAnnounceEnabled>
<parallelRequests>32</parallelRequests>
<maxSendKbps>1234</maxSendKbps>
<rescanIntervalS>600</rescanIntervalS>
<reconnectionIntervalS>6000</reconnectionIntervalS>
<maxChangeKbps>2345</maxChangeKbps>
<startBrowser>false</startBrowser>
</options>
</configuration>
`)
expected := OptionsConfiguration{
ListenAddress: []string{":23000"},
ReadOnly: true,
AllowDelete: false,
FollowSymlinks: false,
GUIEnabled: false,
GUIAddress: "125.2.2.2:8080",
GlobalAnnServer: "syncthing.nym.se:22025",
GlobalAnnEnabled: false,
LocalAnnEnabled: false,
ParallelRequests: 32,
MaxSendKbps: 1234,
RescanIntervalS: 600,
ReconnectIntervalS: 6000,
MaxChangeKbps: 2345,
StartBrowser: false,
}
cfg, err := readConfigXML(bytes.NewReader(data))
if err != nil {
t.Error(err)
}
if !reflect.DeepEqual(cfg.Options, expected) {
t.Errorf("Overridden config differs;\n E: %#v\n A: %#v", expected, cfg.Options)
}
}

15
cmd/syncthing/debug.go Normal file
View File

@@ -0,0 +1,15 @@
package main
import (
"log"
"os"
"strings"
)
var (
dlog = log.New(os.Stderr, "main: ", log.Lmicroseconds|log.Lshortfile)
debugNet = strings.Contains(os.Getenv("STTRACE"), "net")
debugIdx = strings.Contains(os.Getenv("STTRACE"), "idx")
debugNeed = strings.Contains(os.Getenv("STTRACE"), "need")
debugPull = strings.Contains(os.Getenv("STTRACE"), "pull")
)

View File

@@ -4,13 +4,13 @@ import (
"bytes"
"errors"
"fmt"
"log"
"os"
"path"
"sync"
"time"
"github.com/calmh/syncthing/buffers"
"github.com/calmh/syncthing/scanner"
)
type fileMonitor struct {
@@ -18,18 +18,18 @@ type fileMonitor struct {
path string // full path
writeDone sync.WaitGroup
model *Model
global File
localBlocks []Block
global scanner.File
localBlocks []scanner.Block
copyError error
writeError error
}
func (m *fileMonitor) FileBegins(cc <-chan content) error {
if m.model.trace["file"] {
log.Printf("FILE: FileBegins: " + m.name)
if debugPull {
dlog.Println("file begins:", m.name)
}
tmp := tempName(m.path, m.global.Modified)
tmp := defTempNamer.TempName(m.path)
dir := path.Dir(tmp)
_, err := os.Stat(dir)
@@ -109,13 +109,13 @@ func (m *fileMonitor) copyRemoteBlocks(cc <-chan content, outFile *os.File, writ
}
func (m *fileMonitor) FileDone() error {
if m.model.trace["file"] {
log.Printf("FILE: FileDone: " + m.name)
if debugPull {
dlog.Println("file done:", m.name)
}
m.writeDone.Wait()
tmp := tempName(m.path, m.global.Modified)
tmp := defTempNamer.TempName(m.path)
defer os.Remove(tmp)
if m.copyError != nil {
@@ -149,14 +149,14 @@ func (m *fileMonitor) FileDone() error {
return nil
}
func hashCheck(name string, correct []Block) error {
func hashCheck(name string, correct []scanner.Block) error {
rf, err := os.Open(name)
if err != nil {
return err
}
defer rf.Close()
current, err := Blocks(rf, BlockSize)
current, err := scanner.Blocks(rf, BlockSize)
if err != nil {
return err
}

View File

@@ -5,6 +5,8 @@ import (
"sort"
"sync"
"time"
"github.com/calmh/syncthing/scanner"
)
type Monitor interface {
@@ -23,7 +25,7 @@ type FileQueue struct {
type queuedFile struct {
name string
blocks []Block
blocks []scanner.Block
activeBlocks []bool
given int
remaining int
@@ -54,7 +56,7 @@ func (l queuedFileList) Less(a, b int) bool {
type queuedBlock struct {
name string
block Block
block scanner.Block
index int
}
@@ -65,7 +67,7 @@ func NewFileQueue() *FileQueue {
}
}
func (q *FileQueue) Add(name string, blocks []Block, monitor Monitor) {
func (q *FileQueue) Add(name string, blocks []scanner.Block, monitor Monitor) {
q.fmut.Lock()
defer q.fmut.Unlock()

View File

@@ -5,6 +5,8 @@ import (
"sync"
"sync/atomic"
"testing"
"github.com/calmh/syncthing/scanner"
)
func TestFileQueueAdd(t *testing.T) {
@@ -17,8 +19,8 @@ func TestFileQueueAddSorting(t *testing.T) {
q.SetAvailable("zzz", []string{"nodeID"})
q.SetAvailable("aaa", []string{"nodeID"})
q.Add("zzz", []Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
q.Add("aaa", []Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
q.Add("zzz", []scanner.Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
q.Add("aaa", []scanner.Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
b, _ := q.Get("nodeID")
if b.name != "aaa" {
t.Errorf("Incorrectly sorted get: %+v", b)
@@ -28,12 +30,12 @@ func TestFileQueueAddSorting(t *testing.T) {
q.SetAvailable("zzz", []string{"nodeID"})
q.SetAvailable("aaa", []string{"nodeID"})
q.Add("zzz", []Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
q.Add("zzz", []scanner.Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
b, _ = q.Get("nodeID") // Start on zzzz
if b.name != "zzz" {
t.Errorf("Incorrectly sorted get: %+v", b)
}
q.Add("aaa", []Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
q.Add("aaa", []scanner.Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
b, _ = q.Get("nodeID")
if b.name != "zzz" {
// Continue rather than starting a new file
@@ -56,12 +58,12 @@ func TestFileQueueGet(t *testing.T) {
q.SetAvailable("foo", []string{"nodeID"})
q.SetAvailable("bar", []string{"nodeID"})
q.Add("foo", []Block{
q.Add("foo", []scanner.Block{
{Offset: 0, Size: 128, Hash: []byte("some foo hash bytes")},
{Offset: 128, Size: 128, Hash: []byte("some other foo hash bytes")},
{Offset: 256, Size: 128, Hash: []byte("more foo hash bytes")},
}, nil)
q.Add("bar", []Block{
q.Add("bar", []scanner.Block{
{Offset: 0, Size: 128, Hash: []byte("some bar hash bytes")},
{Offset: 128, Size: 128, Hash: []byte("some other bar hash bytes")},
}, nil)
@@ -70,7 +72,7 @@ func TestFileQueueGet(t *testing.T) {
expected := queuedBlock{
name: "bar",
block: Block{
block: scanner.Block{
Offset: 0,
Size: 128,
Hash: []byte("some bar hash bytes"),
@@ -89,7 +91,7 @@ func TestFileQueueGet(t *testing.T) {
expected = queuedBlock{
name: "bar",
block: Block{
block: scanner.Block{
Offset: 128,
Size: 128,
Hash: []byte("some other bar hash bytes"),
@@ -109,7 +111,7 @@ func TestFileQueueGet(t *testing.T) {
expected = queuedBlock{
name: "foo",
block: Block{
block: scanner.Block{
Offset: 0,
Size: 128,
Hash: []byte("some foo hash bytes"),
@@ -150,7 +152,7 @@ func TestFileQueueDone(t *testing.T) {
}()
q := FileQueue{resolver: fakeResolver{}}
q.Add("foo", []Block{
q.Add("foo", []scanner.Block{
{Offset: 0, Length: 128, Hash: []byte("some foo hash bytes")},
{Offset: 128, Length: 128, Hash: []byte("some other foo hash bytes")},
}, ch)
@@ -181,19 +183,19 @@ func TestFileQueueGetNodeIDs(t *testing.T) {
q.SetAvailable("a-foo", []string{"nodeID", "a"})
q.SetAvailable("b-bar", []string{"nodeID", "b"})
q.Add("a-foo", []Block{
q.Add("a-foo", []scanner.Block{
{Offset: 0, Size: 128, Hash: []byte("some foo hash bytes")},
{Offset: 128, Size: 128, Hash: []byte("some other foo hash bytes")},
{Offset: 256, Size: 128, Hash: []byte("more foo hash bytes")},
}, nil)
q.Add("b-bar", []Block{
q.Add("b-bar", []scanner.Block{
{Offset: 0, Size: 128, Hash: []byte("some bar hash bytes")},
{Offset: 128, Size: 128, Hash: []byte("some other bar hash bytes")},
}, nil)
expected := queuedBlock{
name: "b-bar",
block: Block{
block: scanner.Block{
Offset: 0,
Size: 128,
Hash: []byte("some bar hash bytes"),
@@ -209,7 +211,7 @@ func TestFileQueueGetNodeIDs(t *testing.T) {
expected = queuedBlock{
name: "a-foo",
block: Block{
block: scanner.Block{
Offset: 0,
Size: 128,
Hash: []byte("some foo hash bytes"),
@@ -225,7 +227,7 @@ func TestFileQueueGetNodeIDs(t *testing.T) {
expected = queuedBlock{
name: "a-foo",
block: Block{
block: scanner.Block{
Offset: 128,
Size: 128,
Hash: []byte("some other foo hash bytes"),
@@ -246,9 +248,9 @@ func TestFileQueueThreadHandling(t *testing.T) {
const n = 100
var total int
var blocks []Block
var blocks []scanner.Block
for i := 1; i <= n; i++ {
blocks = append(blocks, Block{Offset: int64(i), Size: 1})
blocks = append(blocks, scanner.Block{Offset: int64(i), Size: 1})
total += i
}
@@ -272,7 +274,7 @@ func TestFileQueueThreadHandling(t *testing.T) {
close(start)
wg.Wait()
if int(gotTot) != total {
t.Error("Total mismatch; %d != %d", gotTot, total)
t.Errorf("Total mismatch; %d != %d", gotTot, total)
}
}
@@ -283,13 +285,13 @@ func TestDeleteAt(t *testing.T) {
q.files = queuedFileList{{name: "a"}, {name: "b"}, {name: "c"}, {name: "d"}}
q.deleteAt(i)
if l := len(q.files); l != 3 {
t.Fatal("deleteAt(%d) failed; %d != 3", i, l)
t.Fatalf("deleteAt(%d) failed; %d != 3", i, l)
}
}
q.files = queuedFileList{{name: "a"}}
q.deleteAt(0)
if l := len(q.files); l != 0 {
t.Fatal("deleteAt(only) failed; %d != 0", l)
t.Fatalf("deleteAt(only) failed; %d != 0", l)
}
}

View File

@@ -9,6 +9,7 @@ import (
"sync"
"time"
"github.com/calmh/syncthing/scanner"
"github.com/codegangsta/martini"
)
@@ -107,7 +108,7 @@ func restPostRestart(req *http.Request) {
restart()
}
type guiFile File
type guiFile scanner.File
func (f guiFile) MarshalJSON() ([]byte, error) {
type t struct {
@@ -116,7 +117,7 @@ func (f guiFile) MarshalJSON() ([]byte, error) {
}
return json.Marshal(t{
Name: f.Name,
Size: File(f).Size,
Size: scanner.File(f).Size,
})
}

View File

View File

View File

@@ -13,16 +13,6 @@ func init() {
logger = log.New(os.Stderr, "", log.Flags())
}
func debugln(vals ...interface{}) {
s := fmt.Sprintln(vals...)
logger.Output(2, "DEBUG: "+s)
}
func debugf(format string, vals ...interface{}) {
s := fmt.Sprintf(format, vals...)
logger.Output(2, "DEBUG: "+s)
}
func infoln(vals ...interface{}) {
s := fmt.Sprintln(vals...)
logger.Output(2, "INFO: "+s)

View File

@@ -21,37 +21,51 @@ import (
"github.com/calmh/ini"
"github.com/calmh/syncthing/discover"
"github.com/calmh/syncthing/protocol"
"github.com/calmh/syncthing/scanner"
)
const BlockSize = 128 * 1024
var cfg Configuration
var Version string = "unknown-dev"
var Version = "unknown-dev"
var (
myID string
config ini.Config
myID string
)
var (
showVersion bool
confDir string
trace string
profiler string
verbose bool
startupDelay int
showVersion bool
confDir string
verbose bool
)
const (
usage = "syncthing [options]"
extraUsage = `The following environemnt variables can be set to facilitate debugging:
STPROFILER Set to a listen address such as "127.0.0.1:9090" to start the
profiler with HTTP access.
STTRACE A comma separated string of facilities to trace. The valid
facility strings:
- "scanner" (the file change scanner)
- "discover" (the node discovery package)
- "net" (connecting and disconnecting, sent/received messages)
- "idx" (index sending and receiving)
- "need" (file need calculations)
- "pull" (file pull activity)`
)
func main() {
flag.StringVar(&confDir, "home", "~/.syncthing", "Set configuration directory")
flag.StringVar(&trace, "debug.trace", "", "(connect,net,idx,file,pull)")
flag.StringVar(&profiler, "debug.profiler", "", "(addr)")
flag.StringVar(&confDir, "home", getDefaultConfDir(), "Set configuration directory")
flag.BoolVar(&showVersion, "version", false, "Show version")
flag.BoolVar(&verbose, "v", false, "Be more verbose")
flag.IntVar(&startupDelay, "delay", 0, "Startup delay (s)")
flag.Usage = usageFor(flag.CommandLine, "syncthing [options]")
flag.Usage = usageFor(flag.CommandLine, usage, extraUsage)
flag.Parse()
if startupDelay > 0 {
time.Sleep(time.Duration(startupDelay) * time.Second)
if len(os.Getenv("STRESTART")) > 0 {
// Give the parent process time to exit and release sockets etc.
time.Sleep(1 * time.Second)
}
if showVersion {
@@ -67,10 +81,6 @@ func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
}
if len(trace) > 0 {
log.SetFlags(log.Lshortfile | log.Ldate | log.Ltime | log.Lmicroseconds)
logger.SetFlags(log.Lshortfile | log.Ldate | log.Ltime | log.Lmicroseconds)
}
confDir = expandTilde(confDir)
// Ensure that our home directory exists and that we have a certificate and key.
@@ -83,7 +93,7 @@ func main() {
fatalErr(err)
}
myID = string(certId(cert.Certificate[0]))
myID = string(certID(cert.Certificate[0]))
log.SetPrefix("[" + myID[0:5] + "] ")
logger.SetPrefix("[" + myID[0:5] + "] ")
@@ -139,7 +149,7 @@ func main() {
cfg, err = readConfigXML(nil)
cfg.Repositories = []RepositoryConfiguration{
{
Directory: "~/Sync",
Directory: path.Join(getHomeDir(), "Sync"),
Nodes: []NodeConfiguration{
{NodeID: myID, Addresses: []string{"dynamic"}},
},
@@ -155,11 +165,12 @@ func main() {
var dir = expandTilde(cfg.Repositories[0].Directory)
if len(profiler) > 0 {
if profiler := os.Getenv("STPROFILER"); len(profiler) > 0 {
go func() {
dlog.Println("Starting profiler on", profiler)
err := http.ListenAndServe(profiler, nil)
if err != nil {
warnln(err)
dlog.Fatal(err)
}
}()
}
@@ -179,25 +190,34 @@ func main() {
ensureDir(dir, -1)
m := NewModel(dir, cfg.Options.MaxChangeKbps*1000)
for _, t := range strings.Split(trace, ",") {
m.Trace(t)
}
if cfg.Options.MaxSendKbps > 0 {
m.LimitRate(cfg.Options.MaxSendKbps)
}
// GUI
if cfg.Options.GUIEnabled && cfg.Options.GUIAddress != "" {
host, port, err := net.SplitHostPort(cfg.Options.GUIAddress)
addr, err := net.ResolveTCPAddr("tcp", cfg.Options.GUIAddress)
if err != nil {
warnf("Cannot start GUI on %q: %v", cfg.Options.GUIAddress, err)
} else {
if len(host) > 0 {
infof("Starting web GUI on http://%s", cfg.Options.GUIAddress)
} else {
infof("Starting web GUI on port %s", port)
var hostOpen, hostShow string
switch {
case addr.IP == nil:
hostOpen = "localhost"
hostShow = "0.0.0.0"
case addr.IP.IsUnspecified():
hostOpen = "localhost"
hostShow = addr.IP.String()
default:
hostOpen = addr.IP.String()
hostShow = hostOpen
}
infof("Starting web GUI on http://%s:%d/", hostShow, addr.Port)
startGUI(cfg.Options.GUIAddress, m)
if cfg.Options.StartBrowser && len(os.Getenv("STRESTART")) == 0 {
openURL(fmt.Sprintf("http://%s:%d", hostOpen, addr.Port))
}
}
}
@@ -208,7 +228,17 @@ func main() {
infoln("Populating repository index")
}
loadIndex(m)
updateLocalModel(m)
sup := &suppressor{threshold: int64(cfg.Options.MaxChangeKbps)}
w := &scanner.Walker{
Dir: m.dir,
IgnoreFile: ".stignore",
FollowSymlinks: cfg.Options.FollowSymlinks,
BlockSize: BlockSize,
Suppressor: sup,
TempNamer: defTempNamer,
}
updateLocalModel(m, w)
connOpts := map[string]string{
"clientId": "syncthing",
@@ -254,7 +284,7 @@ func main() {
for {
time.Sleep(td)
if m.LocalAge() > (td / 2).Seconds() {
updateLocalModel(m)
updateLocalModel(m, w)
}
}
}()
@@ -269,24 +299,17 @@ func main() {
func restart() {
infoln("Restarting")
args := os.Args
doAppend := true
for _, arg := range args {
if arg == "-delay" {
doAppend = false
break
}
}
if doAppend {
args = append(args, "-delay", "2")
env := os.Environ()
if len(os.Getenv("STRESTART")) == 0 {
env = append(env, "STRESTART=1")
}
pgm, err := exec.LookPath(os.Args[0])
if err != nil {
warnln(err)
return
}
proc, err := os.StartProcess(pgm, args, &os.ProcAttr{
Env: os.Environ(),
proc, err := os.StartProcess(pgm, os.Args, &os.ProcAttr{
Env: env,
Files: []*os.File{os.Stdin, os.Stdout, os.Stderr},
})
if err != nil {
@@ -319,6 +342,13 @@ func saveConfigLoop(cfgFile string) {
continue
}
if runtime.GOOS == "windows" {
err := os.Remove(cfgFile)
if err != nil && !os.IsNotExist(err) {
warnln(err)
}
}
err = os.Rename(cfgFile+".tmp", cfgFile)
if err != nil {
warnln(err)
@@ -362,8 +392,8 @@ func printStatsLoop(m *Model) {
}
func listen(myID string, addr string, m *Model, tlsCfg *tls.Config, connOpts map[string]string) {
if strings.Contains(trace, "connect") {
debugln("NET: Listening on", addr)
if debugNet {
dlog.Println("listening on", addr)
}
l, err := tls.Listen("tcp", addr, tlsCfg)
fatalErr(err)
@@ -376,8 +406,8 @@ listen:
continue
}
if strings.Contains(trace, "connect") {
debugln("NET: Connect from", conn.RemoteAddr())
if debugNet {
dlog.Println("connect from", conn.RemoteAddr())
}
tc := conn.(*tls.Conn)
@@ -388,7 +418,7 @@ listen:
continue
}
remoteID := certId(tc.ConnectionState().PeerCertificates[0].Raw)
remoteID := certID(tc.ConnectionState().PeerCertificates[0].Raw)
if remoteID == myID {
warnf("Connect from myself (%s) - should not happen", remoteID)
@@ -458,18 +488,18 @@ func connect(myID string, disc *discover.Discoverer, m *Model, tlsCfg *tls.Confi
}
}
if strings.Contains(trace, "connect") {
debugln("NET: Dial", nodeCfg.NodeID, addr)
if debugNet {
dlog.Println("dial", nodeCfg.NodeID, addr)
}
conn, err := tls.Dial("tcp", addr, tlsCfg)
if err != nil {
if strings.Contains(trace, "connect") {
debugln("NET:", err)
if debugNet {
dlog.Println(err)
}
continue
}
remoteID := certId(conn.ConnectionState().PeerCertificates[0].Raw)
remoteID := certID(conn.ConnectionState().PeerCertificates[0].Raw)
if remoteID != nodeCfg.NodeID {
warnln("Unexpected nodeID", remoteID, "!=", nodeCfg.NodeID)
conn.Close()
@@ -486,8 +516,8 @@ func connect(myID string, disc *discover.Discoverer, m *Model, tlsCfg *tls.Confi
}
}
func updateLocalModel(m *Model) {
files, _ := m.Walk(cfg.Options.FollowSymlinks)
func updateLocalModel(m *Model, w *scanner.Walker) {
files, _ := w.Walk()
m.ReplaceLocal(files)
saveIndex(m)
}
@@ -502,7 +532,10 @@ func saveIndex(m *Model) {
gzw := gzip.NewWriter(idxf)
protocol.IndexMessage{"local", m.ProtocolIndex()}.EncodeXDR(gzw)
protocol.IndexMessage{
Repository: "local",
Files: m.ProtocolIndex(),
}.EncodeXDR(gzw)
gzw.Close()
idxf.Close()
os.Rename(fullName+".tmp", fullName)
@@ -542,16 +575,38 @@ func ensureDir(dir string, mode int) {
}
func expandTilde(p string) string {
if runtime.GOOS == "windows" {
return p
}
if strings.HasPrefix(p, "~/") {
return strings.Replace(p, "~", getHomeDir(), 1)
return strings.Replace(p, "~", getUnixHomeDir(), 1)
}
return p
}
func getHomeDir() string {
func getUnixHomeDir() string {
home := os.Getenv("HOME")
if home == "" {
fatalln("No home directory?")
}
return home
}
func getHomeDir() string {
if runtime.GOOS == "windows" {
home := os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
if home == "" {
home = os.Getenv("USERPROFILE")
}
return home
}
return getUnixHomeDir()
}
func getDefaultConfDir() string {
if runtime.GOOS == "windows" {
return path.Join(os.Getenv("AppData"), "syncthing")
}
return expandTilde("~/.syncthing")
}

View File

@@ -13,16 +13,17 @@ import (
"github.com/calmh/syncthing/buffers"
"github.com/calmh/syncthing/protocol"
"github.com/calmh/syncthing/scanner"
)
type Model struct {
dir string
global map[string]File // the latest version of each file as it exists in the cluster
gmut sync.RWMutex // protects global
local map[string]File // the files we currently have locally on disk
lmut sync.RWMutex // protects local
remote map[string]map[string]File
global map[string]scanner.File // the latest version of each file as it exists in the cluster
gmut sync.RWMutex // protects global
local map[string]scanner.File // the files we currently have locally on disk
lmut sync.RWMutex // protects local
remote map[string]map[string]scanner.File
rmut sync.RWMutex // protects remote
protoConn map[string]Connection
rawConn map[string]io.Closer
@@ -31,7 +32,7 @@ type Model struct {
// Queue for files to fetch. fq can call back into the model, so we must ensure
// to hold no locks when calling methods on fq.
fq *FileQueue
dq chan File // queue for files to delete
dq chan scanner.File // queue for files to delete
updatedLocal int64 // timestamp of last update to local
updateGlobal int64 // timestamp of last update to remote
@@ -43,8 +44,6 @@ type Model struct {
delete bool
initmut sync.Mutex // protects rwRunning and delete
trace map[string]bool
sup suppressor
parallelRequests int
@@ -64,9 +63,6 @@ type Connection interface {
const (
idxBcastHoldtime = 15 * time.Second // Wait at least this long after the last index modification
idxBcastMaxDelay = 120 * time.Second // Unless we've already waited this long
minFileHoldTimeS = 60 // Never allow file changes more often than this
maxFileHoldTimeS = 600 // Always allow file changes at least this often
)
var (
@@ -80,16 +76,15 @@ var (
func NewModel(dir string, maxChangeBw int) *Model {
m := &Model{
dir: dir,
global: make(map[string]File),
local: make(map[string]File),
remote: make(map[string]map[string]File),
global: make(map[string]scanner.File),
local: make(map[string]scanner.File),
remote: make(map[string]map[string]scanner.File),
protoConn: make(map[string]Connection),
rawConn: make(map[string]io.Closer),
lastIdxBcast: time.Now(),
trace: make(map[string]bool),
sup: suppressor{threshold: int64(maxChangeBw)},
fq: NewFileQueue(),
dq: make(chan File),
dq: make(chan scanner.File),
}
go m.broadcastIndexLoop()
@@ -111,11 +106,6 @@ func (m *Model) LimitRate(kbps int) {
}()
}
// Trace enables trace logging of the given facility. This is a debugging function; grep for m.trace.
func (m *Model) Trace(t string) {
m.trace[t] = true
}
// StartRW starts read/write processing on the current model. When in
// read/write mode the model will attempt to keep in sync with the cluster by
// pulling needed files from peer nodes.
@@ -131,7 +121,6 @@ func (m *Model) StartRW(del bool, threads int) {
m.delete = del
m.parallelRequests = threads
go m.cleanTempFiles()
if del {
go m.deleteLoop()
}
@@ -194,7 +183,10 @@ func (m *Model) ConnectionStats() map[string]ConnectionInfo {
}
}
ci.Completion = int(100 * have / tot)
ci.Completion = 100
if tot != 0 {
ci.Completion = int(100 * have / tot)
}
res[node] = ci
}
@@ -260,7 +252,7 @@ func (m *Model) InSyncSize() (files, bytes int64) {
}
// NeedFiles returns the list of currently needed files and the total size.
func (m *Model) NeedFiles() (files []File, bytes int64) {
func (m *Model) NeedFiles() (files []scanner.File, bytes int64) {
qf := m.fq.QueuedFiles()
m.gmut.RLock()
@@ -278,7 +270,7 @@ func (m *Model) NeedFiles() (files []File, bytes int64) {
// Index is called when a new node is connected and we receive their full index.
// Implements the protocol.Model interface.
func (m *Model) Index(nodeID string, fs []protocol.FileInfo) {
var files = make([]File, len(fs))
var files = make([]scanner.File, len(fs))
for i := range fs {
files[i] = fileFromFileInfo(fs[i])
}
@@ -286,11 +278,11 @@ func (m *Model) Index(nodeID string, fs []protocol.FileInfo) {
m.imut.Lock()
defer m.imut.Unlock()
if m.trace["net"] {
debugf("NET IDX(in): %s: %d files", nodeID, len(fs))
if debugNet {
dlog.Printf("IDX(in): %s: %d files", nodeID, len(fs))
}
repo := make(map[string]File)
repo := make(map[string]scanner.File)
for _, f := range files {
m.indexUpdate(repo, f)
}
@@ -306,7 +298,7 @@ func (m *Model) Index(nodeID string, fs []protocol.FileInfo) {
// IndexUpdate is called for incremental updates to connected nodes' indexes.
// Implements the protocol.Model interface.
func (m *Model) IndexUpdate(nodeID string, fs []protocol.FileInfo) {
var files = make([]File, len(fs))
var files = make([]scanner.File, len(fs))
for i := range fs {
files[i] = fileFromFileInfo(fs[i])
}
@@ -314,8 +306,8 @@ func (m *Model) IndexUpdate(nodeID string, fs []protocol.FileInfo) {
m.imut.Lock()
defer m.imut.Unlock()
if m.trace["net"] {
debugf("NET IDXUP(in): %s: %d files", nodeID, len(files))
if debugNet {
dlog.Printf("IDXUP(in): %s: %d files", nodeID, len(files))
}
m.rmut.Lock()
@@ -335,13 +327,13 @@ func (m *Model) IndexUpdate(nodeID string, fs []protocol.FileInfo) {
m.recomputeNeedForFiles(files)
}
func (m *Model) indexUpdate(repo map[string]File, f File) {
if m.trace["idx"] {
func (m *Model) indexUpdate(repo map[string]scanner.File, f scanner.File) {
if debugIdx {
var flagComment string
if f.Flags&protocol.FlagDeleted != 0 {
flagComment = " (deleted)"
}
debugf("IDX(in): %q m=%d f=%o%s v=%d (%d blocks)", f.Name, f.Modified, f.Flags, flagComment, f.Version, len(f.Blocks))
dlog.Printf("IDX(in): %q m=%d f=%o%s v=%d (%d blocks)", f.Name, f.Modified, f.Flags, flagComment, f.Version, len(f.Blocks))
}
if extraFlags := f.Flags &^ (protocol.FlagInvalid | protocol.FlagDeleted | 0xfff); extraFlags != 0 {
@@ -355,8 +347,8 @@ func (m *Model) indexUpdate(repo map[string]File, f File) {
// Close removes the peer from the model and closes the underlying connection if possible.
// Implements the protocol.Model interface.
func (m *Model) Close(node string, err error) {
if m.trace["net"] {
debugf("NET: %s: %v", node, err)
if debugNet {
dlog.Printf("%s: %v", node, err)
}
if err == protocol.ErrClusterHash {
warnf("Connection to %s closed due to mismatched cluster hash. Ensure that the configured cluster members are identical on both nodes.", node)
@@ -405,8 +397,8 @@ func (m *Model) Request(nodeID, repo, name string, offset int64, size int) ([]by
return nil, ErrInvalid
}
if m.trace["net"] && nodeID != "<local>" {
debugf("NET REQ(in): %s: %q o=%d s=%d", nodeID, name, offset, size)
if debugNet && nodeID != "<local>" {
dlog.Printf("REQ(in): %s: %q o=%d s=%d", nodeID, name, offset, size)
}
fn := path.Join(m.dir, name)
fd, err := os.Open(fn) // XXX: Inefficient, should cache fd?
@@ -431,9 +423,9 @@ func (m *Model) Request(nodeID, repo, name string, offset int64, size int) ([]by
}
// ReplaceLocal replaces the local repository index with the given list of files.
func (m *Model) ReplaceLocal(fs []File) {
func (m *Model) ReplaceLocal(fs []scanner.File) {
var updated bool
var newLocal = make(map[string]File)
var newLocal = make(map[string]scanner.File)
m.lmut.RLock()
for _, f := range fs {
@@ -474,7 +466,7 @@ func (m *Model) ReplaceLocal(fs []File) {
// the local index from a cache file at startup.
func (m *Model) SeedLocal(fs []protocol.FileInfo) {
m.lmut.Lock()
m.local = make(map[string]File)
m.local = make(map[string]scanner.File)
for _, f := range fs {
m.local[f.Name] = fileFromFileInfo(f)
}
@@ -509,6 +501,9 @@ func (m *Model) AddConnection(rawConn io.Closer, protoConn Connection) {
go func() {
idx := m.ProtocolIndex()
if debugNet {
dlog.Printf("IDX(out/initial): %s: %d files", nodeID, len(idx))
}
protoConn.Index("default", idx)
}()
@@ -522,14 +517,14 @@ func (m *Model) AddConnection(rawConn io.Closer, protoConn Connection) {
for i := 0; i < m.parallelRequests; i++ {
i := i
go func() {
if m.trace["pull"] {
debugln("PULL: Starting", nodeID, i)
if debugPull {
dlog.Println("starting puller:", nodeID, i)
}
for {
m.pmut.RLock()
if _, ok := m.protoConn[nodeID]; !ok {
if m.trace["pull"] {
debugln("PULL: Exiting", nodeID, i)
if debugPull {
dlog.Println("stopping puller:", nodeID, i)
}
m.pmut.RUnlock()
return
@@ -538,8 +533,8 @@ func (m *Model) AddConnection(rawConn io.Closer, protoConn Connection) {
qb, ok := m.fq.Get(nodeID)
if ok {
if m.trace["pull"] {
debugln("PULL: Request", nodeID, i, qb.name, qb.block.Offset)
if debugPull {
dlog.Println("request: out", nodeID, i, qb.name, qb.block.Offset)
}
data, _ := protoConn.Request("default", qb.name, qb.block.Offset, int(qb.block.Size))
m.fq.Done(qb.name, qb.block.Offset, data)
@@ -560,12 +555,12 @@ func (m *Model) ProtocolIndex() []protocol.FileInfo {
for _, f := range m.local {
mf := fileInfoFromFile(f)
if m.trace["idx"] {
if debugIdx {
var flagComment string
if mf.Flags&protocol.FlagDeleted != 0 {
flagComment = " (deleted)"
}
debugf("IDX(out): %q m=%d f=%o%s v=%d (%d blocks)", mf.Name, mf.Modified, mf.Flags, flagComment, mf.Version, len(mf.Blocks))
dlog.Printf("IDX(out): %q m=%d f=%o%s v=%d (%d blocks)", mf.Name, mf.Modified, mf.Flags, flagComment, mf.Version, len(mf.Blocks))
}
index = append(index, mf)
}
@@ -583,8 +578,8 @@ func (m *Model) requestGlobal(nodeID, name string, offset int64, size int, hash
return nil, fmt.Errorf("requestGlobal: no such node: %s", nodeID)
}
if m.trace["net"] {
debugf("NET REQ(out): %s: %q o=%d s=%d h=%x", nodeID, name, offset, size, hash)
if debugNet {
dlog.Printf("REQ(out): %s: %q o=%d s=%d h=%x", nodeID, name, offset, size, hash)
}
return nc.Request("default", name, offset, size)
@@ -611,8 +606,8 @@ func (m *Model) broadcastIndexLoop() {
m.pmut.RLock()
for _, node := range m.protoConn {
node := node
if m.trace["net"] {
debugf("NET IDX(out/loop): %s: %d files", node.ID(), len(idx))
if debugNet {
dlog.Printf("IDX(out/loop): %s: %d files", node.ID(), len(idx))
}
go func() {
node.Index("default", idx)
@@ -628,7 +623,7 @@ func (m *Model) broadcastIndexLoop() {
}
// markDeletedLocals sets the deleted flag on files that have gone missing locally.
func (m *Model) markDeletedLocals(newLocal map[string]File) bool {
func (m *Model) markDeletedLocals(newLocal map[string]scanner.File) bool {
// For every file in the existing local table, check if they are also
// present in the new local table. If they are not, check that we already
// had the newest version available according to the global table and if so
@@ -658,7 +653,7 @@ func (m *Model) markDeletedLocals(newLocal map[string]File) bool {
return updated
}
func (m *Model) updateLocal(f File) {
func (m *Model) updateLocal(f scanner.File) {
var updated bool
m.lmut.Lock()
@@ -685,7 +680,7 @@ func (m *Model) updateLocal(f File) {
/*
XXX: Not done, needs elegant handling of availability
func (m *Model) recomputeGlobalFor(files []File) bool {
func (m *Model) recomputeGlobalFor(files []scanner.File) bool {
m.gmut.Lock()
defer m.gmut.Unlock()
@@ -702,7 +697,7 @@ func (m *Model) recomputeGlobalFor(files []File) bool {
*/
func (m *Model) recomputeGlobal() {
var newGlobal = make(map[string]File)
var newGlobal = make(map[string]scanner.File)
m.lmut.RLock()
for n, f := range m.local {
@@ -761,12 +756,12 @@ func (m *Model) recomputeGlobal() {
type addOrder struct {
n string
remote []Block
remote []scanner.Block
fm *fileMonitor
}
func (m *Model) recomputeNeedForGlobal() {
var toDelete []File
var toDelete []scanner.File
var toAdd []addOrder
m.gmut.RLock()
@@ -785,8 +780,8 @@ func (m *Model) recomputeNeedForGlobal() {
}
}
func (m *Model) recomputeNeedForFiles(files []File) {
var toDelete []File
func (m *Model) recomputeNeedForFiles(files []scanner.File) {
var toDelete []scanner.File
var toAdd []addOrder
m.gmut.RLock()
@@ -805,7 +800,7 @@ func (m *Model) recomputeNeedForFiles(files []File) {
}
}
func (m *Model) recomputeNeedForFile(gf File, toAdd []addOrder, toDelete []File) ([]addOrder, []File) {
func (m *Model) recomputeNeedForFile(gf scanner.File, toAdd []addOrder, toDelete []scanner.File) ([]addOrder, []scanner.File) {
m.lmut.RLock()
lf, ok := m.local[gf.Name]
m.lmut.RUnlock()
@@ -823,14 +818,14 @@ func (m *Model) recomputeNeedForFile(gf File, toAdd []addOrder, toDelete []File)
// Don't have the file, so don't need to delete it
return toAdd, toDelete
}
if m.trace["need"] {
debugf("NEED: lf:%v gf:%v", lf, gf)
if debugNeed {
dlog.Printf("need: lf:%v gf:%v", lf, gf)
}
if gf.Flags&protocol.FlagDeleted != 0 {
toDelete = append(toDelete, gf)
} else {
local, remote := BlockDiff(lf.Blocks, gf.Blocks)
local, remote := scanner.BlockDiff(lf.Blocks, gf.Blocks)
fm := fileMonitor{
name: gf.Name,
path: path.Clean(path.Join(m.dir, gf.Name)),
@@ -865,8 +860,8 @@ func (m *Model) WhoHas(name string) []string {
func (m *Model) deleteLoop() {
for file := range m.dq {
if m.trace["file"] {
debugln("FILE: Delete", file.Name)
if debugPull {
dlog.Println("delete", file.Name)
}
path := path.Clean(path.Join(m.dir, file.Name))
err := os.Remove(path)
@@ -878,18 +873,18 @@ func (m *Model) deleteLoop() {
}
}
func fileFromFileInfo(f protocol.FileInfo) File {
var blocks = make([]Block, len(f.Blocks))
func fileFromFileInfo(f protocol.FileInfo) scanner.File {
var blocks = make([]scanner.Block, len(f.Blocks))
var offset int64
for i, b := range f.Blocks {
blocks[i] = Block{
blocks[i] = scanner.Block{
Offset: offset,
Size: b.Size,
Hash: b.Hash,
}
offset += int64(b.Size)
}
return File{
return scanner.File{
Name: f.Name,
Size: offset,
Flags: f.Flags,
@@ -899,7 +894,7 @@ func fileFromFileInfo(f protocol.FileInfo) File {
}
}
func fileInfoFromFile(f File) protocol.FileInfo {
func fileInfoFromFile(f scanner.File) protocol.FileInfo {
var blocks = make([]protocol.BlockInfo, len(f.Blocks))
for i, b := range f.Blocks {
blocks[i] = protocol.BlockInfo{

View File

@@ -9,6 +9,7 @@ import (
"time"
"github.com/calmh/syncthing/protocol"
"github.com/calmh/syncthing/scanner"
)
func TestNewModel(t *testing.T) {
@@ -27,27 +28,27 @@ func TestNewModel(t *testing.T) {
}
}
var testDataExpected = map[string]File{
"foo": File{
var testDataExpected = map[string]scanner.File{
"foo": scanner.File{
Name: "foo",
Flags: 0,
Modified: 0,
Size: 7,
Blocks: []Block{{Offset: 0x0, Size: 0x7, Hash: []uint8{0xae, 0xc0, 0x70, 0x64, 0x5f, 0xe5, 0x3e, 0xe3, 0xb3, 0x76, 0x30, 0x59, 0x37, 0x61, 0x34, 0xf0, 0x58, 0xcc, 0x33, 0x72, 0x47, 0xc9, 0x78, 0xad, 0xd1, 0x78, 0xb6, 0xcc, 0xdf, 0xb0, 0x1, 0x9f}}},
Blocks: []scanner.Block{{Offset: 0x0, Size: 0x7, Hash: []uint8{0xae, 0xc0, 0x70, 0x64, 0x5f, 0xe5, 0x3e, 0xe3, 0xb3, 0x76, 0x30, 0x59, 0x37, 0x61, 0x34, 0xf0, 0x58, 0xcc, 0x33, 0x72, 0x47, 0xc9, 0x78, 0xad, 0xd1, 0x78, 0xb6, 0xcc, 0xdf, 0xb0, 0x1, 0x9f}}},
},
"empty": File{
"empty": scanner.File{
Name: "empty",
Flags: 0,
Modified: 0,
Size: 0,
Blocks: []Block{{Offset: 0x0, Size: 0x0, Hash: []uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}}},
Blocks: []scanner.Block{{Offset: 0x0, Size: 0x0, Hash: []uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}}},
},
"bar": File{
"bar": scanner.File{
Name: "bar",
Flags: 0,
Modified: 0,
Size: 10,
Blocks: []Block{{Offset: 0x0, Size: 0xa, Hash: []uint8{0x2f, 0x72, 0xcc, 0x11, 0xa6, 0xfc, 0xd0, 0x27, 0x1e, 0xce, 0xf8, 0xc6, 0x10, 0x56, 0xee, 0x1e, 0xb1, 0x24, 0x3b, 0xe3, 0x80, 0x5b, 0xf9, 0xa9, 0xdf, 0x98, 0xf9, 0x2f, 0x76, 0x36, 0xb0, 0x5c}}},
Blocks: []scanner.Block{{Offset: 0x0, Size: 0xa, Hash: []uint8{0x2f, 0x72, 0xcc, 0x11, 0xa6, 0xfc, 0xd0, 0x27, 0x1e, 0xce, 0xf8, 0xc6, 0x10, 0x56, 0xee, 0x1e, 0xb1, 0x24, 0x3b, 0xe3, 0x80, 0x5b, 0xf9, 0xa9, 0xdf, 0x98, 0xf9, 0x2f, 0x76, 0x36, 0xb0, 0x5c}}},
},
}
@@ -63,7 +64,8 @@ func init() {
func TestUpdateLocal(t *testing.T) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
if fs, _ := m.NeedFiles(); len(fs) > 0 {
@@ -105,7 +107,8 @@ func TestUpdateLocal(t *testing.T) {
func TestRemoteUpdateExisting(t *testing.T) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
newFile := protocol.FileInfo{
@@ -122,7 +125,8 @@ func TestRemoteUpdateExisting(t *testing.T) {
func TestRemoteAddNew(t *testing.T) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
newFile := protocol.FileInfo{
@@ -139,7 +143,8 @@ func TestRemoteAddNew(t *testing.T) {
func TestRemoteUpdateOld(t *testing.T) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
oldTimeStamp := int64(1234)
@@ -157,7 +162,8 @@ func TestRemoteUpdateOld(t *testing.T) {
func TestRemoteIndexUpdate(t *testing.T) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
foo := protocol.FileInfo{
@@ -190,7 +196,8 @@ func TestRemoteIndexUpdate(t *testing.T) {
func TestDelete(t *testing.T) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
if l1, l2 := len(m.local), len(fs); l1 != l2 {
@@ -201,10 +208,10 @@ func TestDelete(t *testing.T) {
}
ot := time.Now().Unix()
newFile := File{
newFile := scanner.File{
Name: "a new file",
Modified: ot,
Blocks: []Block{{0, 100, []byte("some hash bytes")}},
Blocks: []scanner.Block{{0, 100, []byte("some hash bytes")}},
}
m.updateLocal(newFile)
@@ -292,7 +299,8 @@ func TestDelete(t *testing.T) {
func TestForgetNode(t *testing.T) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
if l1, l2 := len(m.local), len(fs); l1 != l2 {
@@ -345,7 +353,8 @@ func TestForgetNode(t *testing.T) {
func TestRequest(t *testing.T) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
bs, err := m.Request("some node", "default", "foo", 0, 6)
@@ -367,7 +376,8 @@ func TestRequest(t *testing.T) {
func TestIgnoreWithUnknownFlags(t *testing.T) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
valid := protocol.FileInfo{
@@ -410,7 +420,8 @@ func genFiles(n int) []protocol.FileInfo {
func BenchmarkIndex10000(b *testing.B) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
files := genFiles(10000)
@@ -422,7 +433,8 @@ func BenchmarkIndex10000(b *testing.B) {
func BenchmarkIndex00100(b *testing.B) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
files := genFiles(100)
@@ -434,7 +446,8 @@ func BenchmarkIndex00100(b *testing.B) {
func BenchmarkIndexUpdate10000f10000(b *testing.B) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
files := genFiles(10000)
m.Index("42", files)
@@ -447,7 +460,8 @@ func BenchmarkIndexUpdate10000f10000(b *testing.B) {
func BenchmarkIndexUpdate10000f00100(b *testing.B) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
files := genFiles(10000)
m.Index("42", files)
@@ -461,7 +475,8 @@ func BenchmarkIndexUpdate10000f00100(b *testing.B) {
func BenchmarkIndexUpdate10000f00001(b *testing.B) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
files := genFiles(10000)
m.Index("42", files)
@@ -506,7 +521,8 @@ func (FakeConnection) Statistics() protocol.Statistics {
func BenchmarkRequest(b *testing.B) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
const n = 1000

34
cmd/syncthing/openurl.go Normal file
View File

@@ -0,0 +1,34 @@
/*
Copyright 2011 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"os/exec"
"runtime"
)
func openURL(url string) error {
if runtime.GOOS == "windows" {
return exec.Command("cmd.exe", "/C", "start "+url).Run()
}
if runtime.GOOS == "darwin" {
return exec.Command("open", url).Run()
}
return exec.Command("xdg-open", url).Run()
}

View File

@@ -1,12 +1,13 @@
package main
import (
"os"
"sync"
"time"
)
const (
MAX_CHANGE_HISTORY = 4
MaxChangeHistory = 4
)
type change struct {
@@ -45,12 +46,17 @@ func (h changeHistory) bandwidth(t time.Time) int64 {
func (h *changeHistory) append(size int64, t time.Time) {
c := change{size, t}
if len(h.changes) == MAX_CHANGE_HISTORY {
h.changes = h.changes[1:MAX_CHANGE_HISTORY]
if len(h.changes) == MaxChangeHistory {
h.changes = h.changes[1:MaxChangeHistory]
}
h.changes = append(h.changes, c)
}
func (s *suppressor) Suppress(name string, fi os.FileInfo) bool {
sup, _ := s.suppress(name, fi.Size(), time.Now())
return sup
}
func (s *suppressor) suppress(name string, size int64, t time.Time) (bool, bool) {
s.Lock()

View File

@@ -21,7 +21,7 @@ func TestSuppressor(t *testing.T) {
// bw is 10000 / 10 = 1000
t1 = t0.Add(10 * time.Second)
if bw := s.changes["foo"].bandwidth(t1); bw != 1000 {
t.Error("Incorrect bw %d", bw)
t.Errorf("Incorrect bw %d", bw)
}
sup, prev = s.suppress("foo", 10000, t1)
if sup {
@@ -34,7 +34,7 @@ func TestSuppressor(t *testing.T) {
// bw is (10000 + 10000) / 11 = 1818
t1 = t0.Add(11 * time.Second)
if bw := s.changes["foo"].bandwidth(t1); bw != 1818 {
t.Error("Incorrect bw %d", bw)
t.Errorf("Incorrect bw %d", bw)
}
sup, prev = s.suppress("foo", 100500, t1)
if sup {
@@ -47,7 +47,7 @@ func TestSuppressor(t *testing.T) {
// bw is (10000 + 10000 + 100500) / 12 = 10041
t1 = t0.Add(12 * time.Second)
if bw := s.changes["foo"].bandwidth(t1); bw != 10041 {
t.Error("Incorrect bw %d", bw)
t.Errorf("Incorrect bw %d", bw)
}
sup, prev = s.suppress("foo", 10000000, t1) // value will be ignored
if !sup {
@@ -60,7 +60,7 @@ func TestSuppressor(t *testing.T) {
// bw is (10000 + 10000 + 100500) / 15 = 8033
t1 = t0.Add(15 * time.Second)
if bw := s.changes["foo"].bandwidth(t1); bw != 8033 {
t.Error("Incorrect bw %d", bw)
t.Errorf("Incorrect bw %d", bw)
}
sup, prev = s.suppress("foo", 10000000, t1)
if sup {
@@ -84,29 +84,29 @@ func TestHistory(t *testing.T) {
t.Errorf("Incorrect first record size %d", s)
}
for i := 1; i < MAX_CHANGE_HISTORY; i++ {
for i := 1; i < MaxChangeHistory; i++ {
h.append(int64(40+i), t0.Add(time.Duration(i)*time.Second))
}
if l := len(h.changes); l != MAX_CHANGE_HISTORY {
if l := len(h.changes); l != MaxChangeHistory {
t.Errorf("Incorrect history length %d", l)
}
if s := h.changes[0].size; s != 40 {
t.Errorf("Incorrect first record size %d", s)
}
if s := h.changes[MAX_CHANGE_HISTORY-1].size; s != 40+MAX_CHANGE_HISTORY-1 {
if s := h.changes[MaxChangeHistory-1].size; s != 40+MaxChangeHistory-1 {
t.Errorf("Incorrect last record size %d", s)
}
h.append(999, t0.Add(time.Duration(999)*time.Second))
if l := len(h.changes); l != MAX_CHANGE_HISTORY {
if l := len(h.changes); l != MaxChangeHistory {
t.Errorf("Incorrect history length %d", l)
}
if s := h.changes[0].size; s != 41 {
t.Errorf("Incorrect first record size %d", s)
}
if s := h.changes[MAX_CHANGE_HISTORY-1].size; s != 999 {
if s := h.changes[MaxChangeHistory-1].size; s != 999 {
t.Errorf("Incorrect last record size %d", s)
}

28
cmd/syncthing/tempname.go Normal file
View File

@@ -0,0 +1,28 @@
package main
import (
"fmt"
"path"
"path/filepath"
"runtime"
"strings"
)
type tempNamer struct {
prefix string
}
var defTempNamer = tempNamer{".syncthing"}
func (t tempNamer) IsTemporary(name string) bool {
if runtime.GOOS == "windows" {
name = filepath.ToSlash(name)
}
return strings.HasPrefix(path.Base(name), t.prefix)
}
func (t tempNamer) TempName(name string) string {
tdir := path.Dir(name)
tname := fmt.Sprintf("%s.%s", t.prefix, path.Base(name))
return path.Join(tdir, tname)
}

View File

View File

View File

View File

View File

View File

@@ -25,7 +25,7 @@ func loadCert(dir string) (tls.Certificate, error) {
return tls.LoadX509KeyPair(path.Join(dir, "cert.pem"), path.Join(dir, "key.pem"))
}
func certId(bs []byte) string {
func certID(bs []byte) string {
hf := sha256.New()
hf.Write(bs)
id := hf.Sum(nil)

View File

@@ -22,18 +22,14 @@ func optionTable(w io.Writer, rows [][]string) {
tw.Flush()
}
func usageFor(fs *flag.FlagSet, usage string) func() {
func usageFor(fs *flag.FlagSet, usage string, extra string) func() {
return func() {
var b bytes.Buffer
b.WriteString("Usage:\n " + usage + "\n")
var options [][]string
fs.VisitAll(func(f *flag.Flag) {
var dash = "-"
if len(f.Name) > 1 {
dash = "--"
}
var opt = " " + dash + f.Name
var opt = " -" + f.Name
if f.DefValue != "false" {
opt += "=" + f.DefValue
@@ -48,5 +44,9 @@ func usageFor(fs *flag.FlagSet, usage string) func() {
}
fmt.Println(b.String())
if len(extra) > 0 {
fmt.Println(extra)
}
}
}

View File

12
discover/debug.go Normal file
View File

@@ -0,0 +1,12 @@
package discover
import (
"log"
"os"
"strings"
)
var (
dlog = log.New(os.Stderr, "discover: ", log.Lmicroseconds|log.Lshortfile)
debug = strings.Contains(os.Getenv("STTRACE"), "discover")
)

View File

@@ -15,7 +15,6 @@ import (
const (
AnnouncementPort = 21025
Debug = false
)
type Discoverer struct {
@@ -34,7 +33,7 @@ type Discoverer struct {
}
var (
ErrIncorrectMagic = errors.New("Incorrect magic number")
ErrIncorrectMagic = errors.New("incorrect magic number")
)
// We tolerate a certain amount of errors because we might be running on
@@ -80,24 +79,75 @@ func (d *Discoverer) sendAnnouncements() {
var errCounter = 0
var err error
remote := &net.UDPAddr{
IP: net.IP{255, 255, 255, 255},
Port: AnnouncementPort,
}
for errCounter < maxErrors {
for _, ipStr := range allBroadcasts() {
var addrStr = ipStr + ":21025"
intfs, err := net.Interfaces()
if err != nil {
log.Printf("discover/listInterfaces: %v; no local announcements", err)
return
}
remote, err := net.ResolveUDPAddr("udp4", addrStr)
if err != nil {
log.Printf("discover/external: %v; no external announcements", err)
return
}
for _, intf := range intfs {
if intf.Flags&(net.FlagBroadcast|net.FlagLoopback) == net.FlagBroadcast {
addrs, err := intf.Addrs()
if err != nil {
log.Println("discover/listAddrs: warning:", err)
errCounter++
continue
}
if Debug {
fmt.Println("send announcement -> ", remote)
}
_, _, err = d.conn.WriteMsgUDP(buf, nil, remote)
if err != nil {
log.Println("discover/write: warning:", err)
errCounter++
} else {
var srcAddr string
for _, addr := range addrs {
if strings.Contains(addr.String(), ".") {
// Found an IPv4 adress
parts := strings.Split(addr.String(), "/")
srcAddr = parts[0]
break
}
}
if len(srcAddr) == 0 {
if debug {
dlog.Println("no source address found on interface", intf.Name)
}
continue
}
iaddr, err := net.ResolveUDPAddr("udp4", srcAddr+":0")
if err != nil {
log.Println("discover/resolve: warning:", err)
errCounter++
continue
}
conn, err := net.ListenUDP("udp4", iaddr)
if err != nil {
log.Println("discover/listen: warning:", err)
errCounter++
continue
}
if debug {
dlog.Println("send announcement from", conn.LocalAddr(), "to", remote, "on", intf.Name)
}
_, err = conn.WriteTo(buf, remote)
if err != nil {
// Some interfaces don't seem to support broadcast even though the flags claims they do, i.e. vmnet
conn.Close()
if debug {
log.Println(err)
}
errCounter++
continue
}
conn.Close()
errCounter = 0
}
}
@@ -122,10 +172,10 @@ func (d *Discoverer) sendExtAnnouncements() {
var errCounter = 0
for errCounter < maxErrors {
if Debug {
fmt.Println("send announcement -> ", remote)
if debug {
dlog.Println("send announcement -> ", remote)
}
_, _, err = d.conn.WriteMsgUDP(buf, nil, remote)
_, err = d.conn.WriteTo(buf, remote)
if err != nil {
log.Println("discover/write: warning:", err)
errCounter++
@@ -134,7 +184,7 @@ func (d *Discoverer) sendExtAnnouncements() {
}
time.Sleep(d.ExtBroadcastIntv)
}
log.Println("discover/write: %v: stopping due to too many errors:", remote, err)
log.Printf("discover/write: %v: stopping due to too many errors: %v", remote, err)
}
func (d *Discoverer) recvAnnouncements() {
@@ -149,8 +199,8 @@ func (d *Discoverer) recvAnnouncements() {
continue
}
if Debug {
log.Printf("read announcement:\n%s", hex.Dump(buf[:n]))
if debug {
dlog.Printf("read announcement:\n%s", hex.Dump(buf[:n]))
}
var pkt AnnounceV2
@@ -161,8 +211,8 @@ func (d *Discoverer) recvAnnouncements() {
continue
}
if Debug {
log.Printf("read announcement: %#v", pkt)
if debug {
dlog.Printf("parsed announcement: %#v", pkt)
}
errCounter = 0
@@ -178,13 +228,12 @@ func (d *Discoverer) recvAnnouncements() {
}
addrs = append(addrs, nodeAddr)
}
if Debug {
log.Printf("register: %#v", addrs)
if debug {
dlog.Printf("register: %#v", addrs)
}
d.registryLock.Lock()
_, seen := d.registry[pkt.NodeID]
if !seen {
fmt.Println("new node seen, forced announce")
select {
case d.forcedBroadcastTick <- time.Now():
}
@@ -237,8 +286,8 @@ func (d *Discoverer) externalLookup(node string) []string {
return nil
}
if Debug {
log.Printf("read external:\n%s", hex.Dump(buf[:n]))
if debug {
dlog.Printf("read external:\n%s", hex.Dump(buf[:n]))
}
var pkt AnnounceV2
@@ -248,8 +297,8 @@ func (d *Discoverer) externalLookup(node string) []string {
return nil
}
if Debug {
log.Printf("read external: %#v", pkt)
if debug {
dlog.Printf("parsed external: %#v", pkt)
}
var addrs []string
@@ -290,38 +339,3 @@ func ipStr(ip []byte) string {
}
return strings.Join(ss, s)
}
func allBroadcasts() []string {
var bcasts = make(map[string]bool)
addrs, err := net.InterfaceAddrs()
if err != nil {
return nil
}
for _, addr := range addrs {
switch {
case strings.HasPrefix(addr.String(), "127."):
// Ignore v4 localhost
case strings.Contains(addr.String(), ":"):
// Ignore all v6, because we need link local multicast there which I haven't implemented
default:
if in, ok := addr.(*net.IPNet); ok {
il := len(in.IP) - 1
ml := len(in.Mask) - 1
for i := range in.Mask {
in.IP[il-i] = in.IP[il-i] | ^in.Mask[ml-i]
}
parts := strings.Split(in.String(), "/")
bcasts[parts[0]] = true
}
}
}
var l []string
for ip := range bcasts {
l = append(l, ip)
}
return l
}

173
files/set.go Normal file
View File

@@ -0,0 +1,173 @@
package fileset
import "sync"
type File struct {
Key Key
Modified int64
Flags uint32
Data interface{}
}
type Key struct {
Name string
Version uint32
}
type fileRecord struct {
Usage int
File File
}
type bitset uint64
func (a Key) newerThan(b Key) bool {
return a.Version > b.Version
}
type Set struct {
mutex sync.RWMutex
files map[Key]fileRecord
remoteKey [64]map[string]Key
globalAvailability map[string]bitset
globalKey map[string]Key
}
func NewSet() *Set {
var m = Set{
files: make(map[Key]fileRecord),
globalAvailability: make(map[string]bitset),
globalKey: make(map[string]Key),
}
return &m
}
func (m *Set) AddLocal(fs []File) {
m.mutex.Lock()
m.unlockedAddRemote(0, fs)
m.mutex.Unlock()
}
func (m *Set) SetLocal(fs []File) {
m.mutex.Lock()
m.unlockedSetRemote(0, fs)
m.mutex.Unlock()
}
func (m *Set) AddRemote(cid uint, fs []File) {
if cid < 1 || cid > 63 {
panic("Connection ID must be in the range 1 - 63 inclusive")
}
m.mutex.Lock()
m.unlockedAddRemote(cid, fs)
m.mutex.Unlock()
}
func (m *Set) SetRemote(cid uint, fs []File) {
if cid < 1 || cid > 63 {
panic("Connection ID must be in the range 1 - 63 inclusive")
}
m.mutex.Lock()
m.unlockedSetRemote(cid, fs)
m.mutex.Unlock()
}
func (m *Set) unlockedAddRemote(cid uint, fs []File) {
remFiles := m.remoteKey[cid]
for _, f := range fs {
n := f.Key.Name
if ck, ok := remFiles[n]; ok && ck == f.Key {
// The remote already has exactly this file, skip it
continue
}
remFiles[n] = f.Key
// Keep the block list or increment the usage
if br, ok := m.files[f.Key]; !ok {
m.files[f.Key] = fileRecord{
Usage: 1,
File: f,
}
} else {
br.Usage++
m.files[f.Key] = br
}
// Update global view
gk, ok := m.globalKey[n]
switch {
case ok && f.Key == gk:
av := m.globalAvailability[n]
av |= 1 << cid
m.globalAvailability[n] = av
case f.Key.newerThan(gk):
m.globalKey[n] = f.Key
m.globalAvailability[n] = 1 << cid
}
}
}
func (m *Set) unlockedSetRemote(cid uint, fs []File) {
// Decrement usage for all files belonging to this remote, and remove
// those that are no longer needed.
for _, fk := range m.remoteKey[cid] {
br, ok := m.files[fk]
switch {
case ok && br.Usage == 1:
delete(m.files, fk)
case ok && br.Usage > 1:
br.Usage--
m.files[fk] = br
}
}
// Clear existing remote remoteKey
m.remoteKey[cid] = make(map[string]Key)
// Recalculate global based on all remaining remoteKey
for n := range m.globalKey {
var nk Key // newest key
var na bitset // newest availability
for i, rem := range m.remoteKey {
if rk, ok := rem[n]; ok {
switch {
case rk == nk:
na |= 1 << uint(i)
case rk.newerThan(nk):
nk = rk
na = 1 << uint(i)
}
}
}
if na != 0 {
// Someone had the file
m.globalKey[n] = nk
m.globalAvailability[n] = na
} else {
// Noone had the file
delete(m.globalKey, n)
delete(m.globalAvailability, n)
}
}
// Add new remote remoteKey to the mix
m.unlockedAddRemote(cid, fs)
}
func (m *Set) Need(cid uint) []File {
var fs []File
m.mutex.Lock()
for name, gk := range m.globalKey {
if gk.newerThan(m.remoteKey[cid][name]) {
fs = append(fs, m.files[gk].File)
}
}
m.mutex.Unlock()
return fs
}

207
files/set_test.go Normal file
View File

@@ -0,0 +1,207 @@
package fileset
import (
"fmt"
"reflect"
"testing"
)
func TestGlobalSet(t *testing.T) {
m := NewSet()
local := []File{
File{Key{"a", 1000}, 0, 0, nil},
File{Key{"b", 1000}, 0, 0, nil},
File{Key{"c", 1000}, 0, 0, nil},
File{Key{"d", 1000}, 0, 0, nil},
}
remote := []File{
File{Key{"a", 1000}, 0, 0, nil},
File{Key{"b", 1001}, 0, 0, nil},
File{Key{"c", 1002}, 0, 0, nil},
File{Key{"e", 1000}, 0, 0, nil},
}
expectedGlobal := map[string]Key{
"a": local[0].Key,
"b": remote[1].Key,
"c": remote[2].Key,
"d": local[3].Key,
"e": remote[3].Key,
}
m.SetLocal(local)
m.SetRemote(1, remote)
if !reflect.DeepEqual(m.globalKey, expectedGlobal) {
t.Errorf("Global incorrect;\n%v !=\n%v", m.globalKey, expectedGlobal)
}
if lb := len(m.files); lb != 7 {
t.Errorf("Num files incorrect %d != 7\n%v", lb, m.files)
}
}
func BenchmarkSetLocal10k(b *testing.B) {
m := NewSet()
var local []File
for i := 0; i < 10000; i++ {
local = append(local, File{Key{fmt.Sprintf("file%d"), 1000}, 0, 0, nil})
}
var remote []File
for i := 0; i < 10000; i++ {
remote = append(remote, File{Key{fmt.Sprintf("file%d"), 1000}, 0, 0, nil})
}
m.SetRemote(1, remote)
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.SetLocal(local)
}
}
func BenchmarkSetLocal10(b *testing.B) {
m := NewSet()
var local []File
for i := 0; i < 10; i++ {
local = append(local, File{Key{fmt.Sprintf("file%d"), 1000}, 0, 0, nil})
}
var remote []File
for i := 0; i < 10000; i++ {
remote = append(remote, File{Key{fmt.Sprintf("file%d"), 1000}, 0, 0, nil})
}
m.SetRemote(1, remote)
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.SetLocal(local)
}
}
func BenchmarkAddLocal10k(b *testing.B) {
m := NewSet()
var local []File
for i := 0; i < 10000; i++ {
local = append(local, File{Key{fmt.Sprintf("file%d"), 1000}, 0, 0, nil})
}
var remote []File
for i := 0; i < 10000; i++ {
remote = append(remote, File{Key{fmt.Sprintf("file%d"), 1000}, 0, 0, nil})
}
m.SetRemote(1, remote)
m.SetLocal(local)
b.ResetTimer()
for i := 0; i < b.N; i++ {
b.StopTimer()
for j := range local {
local[j].Key.Version++
}
b.StartTimer()
m.AddLocal(local)
}
}
func BenchmarkAddLocal10(b *testing.B) {
m := NewSet()
var local []File
for i := 0; i < 10; i++ {
local = append(local, File{Key{fmt.Sprintf("file%d"), 1000}, 0, 0, nil})
}
var remote []File
for i := 0; i < 10000; i++ {
remote = append(remote, File{Key{fmt.Sprintf("file%d"), 1000}, 0, 0, nil})
}
m.SetRemote(1, remote)
m.SetLocal(local)
b.ResetTimer()
for i := 0; i < b.N; i++ {
for j := range local {
local[j].Key.Version++
}
m.AddLocal(local)
}
}
func TestGlobalReset(t *testing.T) {
m := NewSet()
local := []File{
File{Key{"a", 1000}, 0, 0, nil},
File{Key{"b", 1000}, 0, 0, nil},
File{Key{"c", 1000}, 0, 0, nil},
File{Key{"d", 1000}, 0, 0, nil},
}
remote := []File{
File{Key{"a", 1000}, 0, 0, nil},
File{Key{"b", 1001}, 0, 0, nil},
File{Key{"c", 1002}, 0, 0, nil},
File{Key{"e", 1000}, 0, 0, nil},
}
expectedGlobalKey := map[string]Key{
"a": local[0].Key,
"b": local[1].Key,
"c": local[2].Key,
"d": local[3].Key,
}
m.SetLocal(local)
m.SetRemote(1, remote)
m.SetRemote(1, nil)
if !reflect.DeepEqual(m.globalKey, expectedGlobalKey) {
t.Errorf("Global incorrect;\n%v !=\n%v", m.globalKey, expectedGlobalKey)
}
if lb := len(m.files); lb != 4 {
t.Errorf("Num files incorrect %d != 4\n%v", lb, m.files)
}
}
func TestNeed(t *testing.T) {
m := NewSet()
local := []File{
File{Key{"a", 1000}, 0, 0, nil},
File{Key{"b", 1000}, 0, 0, nil},
File{Key{"c", 1000}, 0, 0, nil},
File{Key{"d", 1000}, 0, 0, nil},
}
remote := []File{
File{Key{"a", 1000}, 0, 0, nil},
File{Key{"b", 1001}, 0, 0, nil},
File{Key{"c", 1002}, 0, 0, nil},
File{Key{"e", 1000}, 0, 0, nil},
}
shouldNeed := []File{
File{Key{"b", 1001}, 0, 0, nil},
File{Key{"c", 1002}, 0, 0, nil},
File{Key{"e", 1000}, 0, 0, nil},
}
m.SetLocal(local)
m.SetRemote(1, remote)
need := m.Need(0)
if !reflect.DeepEqual(need, shouldNeed) {
t.Errorf("Need incorrect;\n%v !=\n%v", need, shouldNeed)
}
}

View File

@@ -32,6 +32,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
{id: 'FollowSymlinks', descr: 'Follow Symlinks', type: 'bool', restart: true},
{id: 'GlobalAnnEnabled', descr: 'Global Announce', type: 'bool', restart: true},
{id: 'LocalAnnEnabled', descr: 'Local Announce', type: 'bool', restart: true},
{id: 'StartBrowser', descr: 'Start Browser', type: 'bool'},
];
function modelGetSucceeded() {
@@ -317,7 +318,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
}
return errors;
};
$scope.clearErrors = function () {
$scope.seenError = $scope.errors[$scope.errors.length - 1].Time;
};

View File

@@ -46,7 +46,6 @@ func (e *ErrPipe) Write(data []byte) (int, error) {
e.PipeWriter.CloseWithError(e.err)
e.closed = true
return n, e.err
} else {
return e.PipeWriter.Write(data)
}
return e.PipeWriter.Write(data)
}

View File

@@ -31,7 +31,8 @@ const (
)
var (
ErrClusterHash = fmt.Errorf("Configuration error: mismatched cluster hash")
ErrClusterHash = fmt.Errorf("configuration error: mismatched cluster hash")
ErrClosed = errors.New("connection closed")
)
type Model interface {
@@ -56,7 +57,7 @@ type Connection struct {
xw *xdr.Writer
closed bool
awaiting map[int]chan asyncResult
nextId int
nextID int
indexSent map[string]map[string][2]int64
peerOptions map[string]string
myOptions map[string]string
@@ -68,8 +69,6 @@ type Connection struct {
statisticsLock sync.Mutex
}
var ErrClosed = errors.New("Connection closed")
type asyncResult struct {
val []byte
err error
@@ -105,7 +104,7 @@ func NewConnection(nodeID string, reader io.Reader, writer io.Writer, receiver M
c.myOptions = options
go func() {
c.Lock()
header{0, c.nextId, messageTypeOptions}.encodeXDR(c.xw)
header{0, c.nextID, messageTypeOptions}.encodeXDR(c.xw)
var om OptionsMessage
for k, v := range options {
om.Options = append(om.Options, Option{k, v})
@@ -118,7 +117,7 @@ func NewConnection(nodeID string, reader io.Reader, writer io.Writer, receiver M
if err != nil {
log.Println("Warning: Write error during initial handshake:", err)
}
c.nextId++
c.nextID++
c.Unlock()
}()
}
@@ -155,12 +154,12 @@ func (c *Connection) Index(repo string, idx []FileInfo) {
idx = diff
}
header{0, c.nextId, msgType}.encodeXDR(c.xw)
header{0, c.nextID, msgType}.encodeXDR(c.xw)
_, err := IndexMessage{repo, idx}.encodeXDR(c.xw)
if err == nil {
err = c.flush()
}
c.nextId = (c.nextId + 1) & 0xfff
c.nextID = (c.nextID + 1) & 0xfff
c.hasSentIndex = true
c.Unlock()
@@ -178,8 +177,8 @@ func (c *Connection) Request(repo string, name string, offset int64, size int) (
return nil, ErrClosed
}
rc := make(chan asyncResult)
c.awaiting[c.nextId] = rc
header{0, c.nextId, messageTypeRequest}.encodeXDR(c.xw)
c.awaiting[c.nextID] = rc
header{0, c.nextID, messageTypeRequest}.encodeXDR(c.xw)
_, err := RequestMessage{repo, name, uint64(offset), uint32(size)}.encodeXDR(c.xw)
if err == nil {
err = c.flush()
@@ -189,7 +188,7 @@ func (c *Connection) Request(repo string, name string, offset int64, size int) (
c.close(err)
return nil, err
}
c.nextId = (c.nextId + 1) & 0xfff
c.nextID = (c.nextID + 1) & 0xfff
c.Unlock()
res, ok := <-rc
@@ -206,8 +205,8 @@ func (c *Connection) ping() bool {
return false
}
rc := make(chan asyncResult, 1)
c.awaiting[c.nextId] = rc
header{0, c.nextId, messageTypePing}.encodeXDR(c.xw)
c.awaiting[c.nextID] = rc
header{0, c.nextID, messageTypePing}.encodeXDR(c.xw)
err := c.flush()
if err != nil {
c.Unlock()
@@ -218,7 +217,7 @@ func (c *Connection) ping() bool {
c.close(c.xw.Error())
return false
}
c.nextId = (c.nextId + 1) & 0xfff
c.nextID = (c.nextID + 1) & 0xfff
c.Unlock()
res, ok := <-rc
@@ -268,7 +267,7 @@ loop:
break loop
}
if hdr.version != 0 {
c.close(fmt.Errorf("Protocol error: %s: unknown message version %#x", c.ID, hdr.version))
c.close(fmt.Errorf("protocol error: %s: unknown message version %#x", c.id, hdr.version))
break loop
}
@@ -371,7 +370,7 @@ loop:
}
default:
c.close(fmt.Errorf("Protocol error: %s: unknown message type %#x", c.ID, hdr.msgType))
c.close(fmt.Errorf("protocol error: %s: unknown message type %#x", c.id, hdr.msgType))
break loop
}
}
@@ -410,10 +409,10 @@ func (c *Connection) pingerLoop() {
select {
case ok := <-rc:
if !ok {
c.close(fmt.Errorf("Ping failure"))
c.close(fmt.Errorf("ping failure"))
}
case <-time.After(pingTimeout):
c.close(fmt.Errorf("Ping timeout"))
c.close(fmt.Errorf("ping timeout"))
}
}
}

View File

@@ -38,7 +38,7 @@ func TestPing(t *testing.T) {
}
func TestPingErr(t *testing.T) {
e := errors.New("Something broke")
e := errors.New("something broke")
for i := 0; i < 12; i++ {
for j := 0; j < 12; j++ {
@@ -64,7 +64,7 @@ func TestPingErr(t *testing.T) {
}
func TestRequestResponseErr(t *testing.T) {
e := errors.New("Something broke")
e := errors.New("something broke")
var pass bool
for i := 0; i < 48; i++ {
@@ -99,16 +99,16 @@ func TestRequestResponseErr(t *testing.T) {
t.Errorf("Incorrect response data %q", string(d))
}
if m0.repo != "default" {
t.Error("Incorrect repo %q", m0.repo)
t.Errorf("Incorrect repo %q", m0.repo)
}
if m0.name != "tn" {
t.Error("Incorrect name %q", m0.name)
t.Errorf("Incorrect name %q", m0.name)
}
if m0.offset != 1234 {
t.Error("Incorrect offset %d", m0.offset)
t.Errorf("Incorrect offset %d", m0.offset)
}
if m0.size != 5678 {
t.Error("Incorrect size %d", m0.size)
t.Errorf("Incorrect size %d", m0.size)
}
t.Logf("Pass at %d+%d bytes", i, j)
pass = true

View File

@@ -1,4 +1,4 @@
package main
package scanner
import (
"bytes"
@@ -17,7 +17,7 @@ func Blocks(r io.Reader, blocksize int) ([]Block, error) {
var blocks []Block
var offset int64
for {
lr := &io.LimitedReader{r, int64(blocksize)}
lr := &io.LimitedReader{R: r, N: int64(blocksize)}
hf := sha256.New()
n, err := io.Copy(hf, lr)
if err != nil {

View File

@@ -1,4 +1,4 @@
package main
package scanner
import (
"bytes"

12
scanner/debug.go Normal file
View File

@@ -0,0 +1,12 @@
package scanner
import (
"log"
"os"
"strings"
)
var (
dlog = log.New(os.Stderr, "scanner: ", log.Lmicroseconds|log.Lshortfile)
debug = strings.Contains(os.Getenv("STTRACE"), "scanner")
)

25
scanner/file.go Normal file
View File

@@ -0,0 +1,25 @@
package scanner
import "fmt"
type File struct {
Name string
Flags uint32
Modified int64
Version uint32
Size int64
Blocks []Block
}
func (f File) String() string {
return fmt.Sprintf("File{Name:%q, Flags:0x%x, Modified:%d, Version:%d, Size:%d, NumBlocks:%d}",
f.Name, f.Flags, f.Modified, f.Version, f.Size, len(f.Blocks))
}
func (f File) Equals(o File) bool {
return f.Modified == o.Modified && f.Version == o.Version
}
func (f File) NewerThan(o File) bool {
return f.Modified > o.Modified || (f.Modified == o.Modified && f.Version > o.Version)
}

0
scanner/testdata/.foo/bar vendored Normal file
View File

2
scanner/testdata/.stignore vendored Normal file
View File

@@ -0,0 +1,2 @@
.*
quux

1
scanner/testdata/bar vendored Normal file
View File

@@ -0,0 +1 @@
foobarbaz

1
scanner/testdata/baz/quux vendored Normal file
View File

@@ -0,0 +1 @@
baazquux

0
scanner/testdata/empty vendored Normal file
View File

1
scanner/testdata/foo vendored Normal file
View File

@@ -0,0 +1 @@
foobar

268
scanner/walk.go Normal file
View File

@@ -0,0 +1,268 @@
package scanner
import (
"bytes"
"io/ioutil"
"log"
"os"
"path"
"path/filepath"
"strings"
"time"
"github.com/calmh/syncthing/protocol"
)
type Walker struct {
// Dir is the base directory for the walk
Dir string
// If FollowSymlinks is true, symbolic links directly under Dir will be followed.
// Symbolic links at deeper levels are never followed regardless of this flag.
FollowSymlinks bool
// BlockSize controls the size of the block used when hashing.
BlockSize int
// If IgnoreFile is not empty, it is the name used for the file that holds ignore patterns.
IgnoreFile string
// If TempNamer is not nil, it is used to ignore tempory files when walking.
TempNamer TempNamer
// If Suppressor is not nil, it is queried for supression of modified files.
Suppressor Suppressor
previous map[string]File // file name -> last seen file state
suppressed map[string]bool // file name -> suppression status
}
type TempNamer interface {
// Temporary returns a temporary name for the filed referred to by path.
TempName(path string) string
// IsTemporary returns true if path refers to the name of temporary file.
IsTemporary(path string) bool
}
type Suppressor interface {
// Supress returns true if the update to the named file should be ignored.
Suppress(name string, fi os.FileInfo) bool
}
// Walk returns the list of files found in the local repository by scanning the
// file system. Files are blockwise hashed.
func (w *Walker) Walk() (files []File, ignore map[string][]string) {
w.lazyInit()
if debug {
dlog.Println("Walk", w.Dir, w.FollowSymlinks, w.BlockSize, w.IgnoreFile)
}
t0 := time.Now()
ignore = make(map[string][]string)
hashFiles := w.walkAndHashFiles(&files, ignore)
filepath.Walk(w.Dir, w.loadIgnoreFiles(w.Dir, ignore))
filepath.Walk(w.Dir, hashFiles)
if w.FollowSymlinks {
d, err := os.Open(w.Dir)
if err != nil {
return
}
defer d.Close()
fis, err := d.Readdir(-1)
if err != nil {
return
}
for _, info := range fis {
if info.Mode()&os.ModeSymlink != 0 {
dir := path.Join(w.Dir, info.Name()) + "/"
filepath.Walk(dir, w.loadIgnoreFiles(dir, ignore))
filepath.Walk(dir, hashFiles)
}
}
}
if debug {
t1 := time.Now()
d := t1.Sub(t0).Seconds()
dlog.Printf("Walk in %.02f ms, %.0f files/s", d*1000, float64(len(files))/d)
}
return
}
// CleanTempFiles removes all files that match the temporary filename pattern.
func (w *Walker) CleanTempFiles() {
filepath.Walk(w.Dir, w.cleanTempFile)
}
func (w *Walker) lazyInit() {
if w.previous == nil {
w.previous = make(map[string]File)
w.suppressed = make(map[string]bool)
}
}
func (w *Walker) loadIgnoreFiles(dir string, ign map[string][]string) filepath.WalkFunc {
return func(p string, info os.FileInfo, err error) error {
if err != nil {
return nil
}
rn, err := filepath.Rel(dir, p)
if err != nil {
return nil
}
if pn, sn := path.Split(rn); sn == w.IgnoreFile {
pn := strings.Trim(pn, "/")
bs, _ := ioutil.ReadFile(p)
lines := bytes.Split(bs, []byte("\n"))
var patterns []string
for _, line := range lines {
if len(line) > 0 {
patterns = append(patterns, string(line))
}
}
ign[pn] = patterns
}
return nil
}
}
func (w *Walker) walkAndHashFiles(res *[]File, ign map[string][]string) filepath.WalkFunc {
return func(p string, info os.FileInfo, err error) error {
if err != nil {
if debug {
dlog.Println("error:", p, info, err)
}
return nil
}
rn, err := filepath.Rel(w.Dir, p)
if err != nil {
if debug {
dlog.Println("rel error:", p, err)
}
return nil
}
if w.TempNamer != nil && w.TempNamer.IsTemporary(rn) {
if debug {
dlog.Println("temporary:", rn)
}
return nil
}
if _, sn := path.Split(rn); sn == w.IgnoreFile {
if debug {
dlog.Println("ignorefile:", rn)
}
return nil
}
if rn != "." && w.ignoreFile(ign, rn) {
if debug {
dlog.Println("ignored:", rn)
}
if info.IsDir() {
return filepath.SkipDir
}
return nil
}
if info.Mode()&os.ModeType == 0 {
modified := info.ModTime().Unix()
pf := w.previous[rn]
if pf.Modified == modified {
if nf := uint32(info.Mode()); nf != pf.Flags {
if debug {
dlog.Println("new flags:", rn)
}
pf.Flags = nf
pf.Version++
w.previous[rn] = pf
} else if debug {
dlog.Println("unchanged:", rn)
}
*res = append(*res, pf)
return nil
}
if w.Suppressor != nil && w.Suppressor.Suppress(rn, info) {
if debug {
dlog.Println("suppressed:", rn)
}
if !w.suppressed[rn] {
w.suppressed[rn] = true
log.Printf("INFO: Changes to %q are being temporarily suppressed because it changes too frequently.", p)
}
f := pf
f.Flags = protocol.FlagInvalid
f.Blocks = nil
*res = append(*res, f)
return nil
} else if w.suppressed[rn] {
log.Printf("INFO: Changes to %q are no longer suppressed.", p)
delete(w.suppressed, rn)
}
fd, err := os.Open(p)
if err != nil {
if debug {
dlog.Println("open:", p, err)
}
return nil
}
defer fd.Close()
t0 := time.Now()
blocks, err := Blocks(fd, w.BlockSize)
if err != nil {
if debug {
dlog.Println("hash error:", rn, err)
}
return nil
}
if debug {
t1 := time.Now()
dlog.Println("hashed:", rn, ";", len(blocks), "blocks;", info.Size(), "bytes;", int(float64(info.Size())/1024/t1.Sub(t0).Seconds()), "KB/s")
}
f := File{
Name: rn,
Size: info.Size(),
Flags: uint32(info.Mode()),
Modified: modified,
Blocks: blocks,
}
w.previous[rn] = f
*res = append(*res, f)
}
return nil
}
}
func (w *Walker) cleanTempFile(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.Mode()&os.ModeType == 0 && w.TempNamer.IsTemporary(path) {
os.Remove(path)
}
return nil
}
func (w *Walker) ignoreFile(patterns map[string][]string, file string) bool {
first, last := path.Split(file)
for prefix, pats := range patterns {
if len(prefix) == 0 || prefix == first || strings.HasPrefix(first, prefix+"/") {
for _, pattern := range pats {
if match, _ := path.Match(pattern, last); match {
return true
}
}
}
}
return false
}

View File

@@ -1,4 +1,4 @@
package main
package scanner
import (
"fmt"
@@ -22,8 +22,12 @@ var correctIgnores = map[string][]string{
}
func TestWalk(t *testing.T) {
m := NewModel("testdata", 1e6)
files, ignores := m.Walk(false)
w := Walker{
Dir: "testdata",
BlockSize: 128 * 1024,
IgnoreFile: ".stignore",
}
files, ignores := w.Walk()
if l1, l2 := len(files), len(testdata); l1 != l2 {
t.Fatalf("Incorrect number of walked files %d != %d", l1, l2)
@@ -75,8 +79,9 @@ func TestIgnore(t *testing.T) {
{"foo/bazz/quux", false},
}
w := Walker{}
for i, tc := range tests {
if r := ignoreFile(patterns, tc.f); r != tc.r {
if r := w.ignoreFile(patterns, tc.f); r != tc.r {
t.Errorf("Incorrect ignoreFile() #%d; E: %v, A: %v", i, tc.r, r)
}
}

238
walk.go
View File

@@ -1,238 +0,0 @@
package main
import (
"bytes"
"fmt"
"io/ioutil"
"log"
"os"
"path"
"path/filepath"
"strings"
"time"
"github.com/calmh/syncthing/protocol"
)
const BlockSize = 128 * 1024
type File struct {
Name string
Flags uint32
Modified int64
Version uint32
Size int64
Blocks []Block
}
func (f File) String() string {
return fmt.Sprintf("File{Name:%q, Flags:0x%x, Modified:%d, Version:%d, Size:%d, NumBlocks:%d}",
f.Name, f.Flags, f.Modified, f.Version, f.Size, len(f.Blocks))
}
func (f File) Equals(o File) bool {
return f.Modified == o.Modified && f.Version == o.Version
}
func (f File) NewerThan(o File) bool {
return f.Modified > o.Modified || (f.Modified == o.Modified && f.Version > o.Version)
}
func isTempName(name string) bool {
return strings.HasPrefix(path.Base(name), ".syncthing.")
}
func tempName(name string, modified int64) string {
tdir := path.Dir(name)
tname := fmt.Sprintf(".syncthing.%s.%d", path.Base(name), modified)
return path.Join(tdir, tname)
}
func (m *Model) loadIgnoreFiles(ign map[string][]string) filepath.WalkFunc {
return func(p string, info os.FileInfo, err error) error {
if err != nil {
return nil
}
rn, err := filepath.Rel(m.dir, p)
if err != nil {
return nil
}
if pn, sn := path.Split(rn); sn == ".stignore" {
pn := strings.Trim(pn, "/")
bs, _ := ioutil.ReadFile(p)
lines := bytes.Split(bs, []byte("\n"))
var patterns []string
for _, line := range lines {
if len(line) > 0 {
patterns = append(patterns, string(line))
}
}
ign[pn] = patterns
}
return nil
}
}
func (m *Model) walkAndHashFiles(res *[]File, ign map[string][]string) filepath.WalkFunc {
return func(p string, info os.FileInfo, err error) error {
if err != nil {
if m.trace["file"] {
log.Printf("FILE: %q: %v", p, err)
}
return nil
}
if isTempName(p) {
return nil
}
rn, err := filepath.Rel(m.dir, p)
if err != nil {
return nil
}
if _, sn := path.Split(rn); sn == ".stignore" {
// We never sync the .stignore files
return nil
}
if ignoreFile(ign, rn) {
if m.trace["file"] {
log.Println("FILE: IGNORE:", rn)
}
return nil
}
if info.Mode()&os.ModeType == 0 {
modified := info.ModTime().Unix()
m.lmut.RLock()
lf, ok := m.local[rn]
m.lmut.RUnlock()
if ok && lf.Modified == modified {
if nf := uint32(info.Mode()); nf != lf.Flags {
lf.Flags = nf
lf.Version++
}
*res = append(*res, lf)
} else {
if cur, prev := m.sup.suppress(rn, info.Size(), time.Now()); cur {
if m.trace["file"] {
log.Printf("FILE: SUPPRESS: %q change bw over threshold", rn)
}
if !prev {
log.Printf("INFO: Changes to %q are being temporarily suppressed because it changes too frequently.", rn)
}
if ok {
lf.Flags = protocol.FlagInvalid
lf.Version++
*res = append(*res, lf)
}
return nil
} else if prev && !cur {
log.Printf("INFO: Changes to %q are no longer suppressed.", rn)
}
if m.trace["file"] {
log.Printf("FILE: Hash %q", p)
}
fd, err := os.Open(p)
if err != nil {
if m.trace["file"] {
log.Printf("FILE: %q: %v", p, err)
}
return nil
}
defer fd.Close()
blocks, err := Blocks(fd, BlockSize)
if err != nil {
if m.trace["file"] {
log.Printf("FILE: %q: %v", p, err)
}
return nil
}
f := File{
Name: rn,
Size: info.Size(),
Flags: uint32(info.Mode()),
Modified: modified,
Blocks: blocks,
}
*res = append(*res, f)
}
}
return nil
}
}
// Walk returns the list of files found in the local repository by scanning the
// file system. Files are blockwise hashed.
func (m *Model) Walk(followSymlinks bool) (files []File, ignore map[string][]string) {
ignore = make(map[string][]string)
hashFiles := m.walkAndHashFiles(&files, ignore)
filepath.Walk(m.dir, m.loadIgnoreFiles(ignore))
filepath.Walk(m.dir, hashFiles)
if followSymlinks {
d, err := os.Open(m.dir)
if err != nil {
return
}
defer d.Close()
fis, err := d.Readdir(-1)
if err != nil {
return
}
for _, info := range fis {
if info.Mode()&os.ModeSymlink != 0 {
dir := path.Join(m.dir, info.Name()) + "/"
filepath.Walk(dir, m.loadIgnoreFiles(ignore))
filepath.Walk(dir, hashFiles)
}
}
}
return
}
func (m *Model) cleanTempFile(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.Mode()&os.ModeType == 0 && isTempName(path) {
if m.trace["file"] {
log.Printf("FILE: Remove %q", path)
}
os.Remove(path)
}
return nil
}
func (m *Model) cleanTempFiles() {
filepath.Walk(m.dir, m.cleanTempFile)
}
func ignoreFile(patterns map[string][]string, file string) bool {
first, last := path.Split(file)
for prefix, pats := range patterns {
if len(prefix) == 0 || prefix == first || strings.HasPrefix(first, prefix+"/") {
for _, pattern := range pats {
if match, _ := path.Match(pattern, last); match {
return true
}
}
}
}
return false
}

View File

@@ -5,7 +5,7 @@ import (
"io"
)
var ErrElementSizeExceeded = errors.New("Element size exceeded")
var ErrElementSizeExceeded = errors.New("element size exceeded")
type Reader struct {
r io.Reader