Compare commits

..

31 Commits

Author SHA1 Message Date
Jakob Borg
17cd49fbdc Indicate aproximativeness of repo sizes... 2014-08-12 23:59:20 +02:00
Jakob Borg
ad273adb78 Slightly more conservative guess on file size 2014-08-12 16:36:24 +02:00
Jakob Borg
150e7daf2d Fix set tests 2014-08-12 16:17:32 +02:00
Jakob Borg
b004155e8f Small goleveldb hack to reduce allocations somewhat 2014-08-12 15:39:24 +02:00
Jakob Borg
92eed3b33b Don't load block lists from db unless necessary 2014-08-12 15:04:32 +02:00
Jakob Borg
fe7b77198c Rip out the Suppressor (maybe to be reintroduced) 2014-08-12 15:04:02 +02:00
Jakob Borg
f51b775698 Reduce allocations while hash scanning 2014-08-12 15:04:02 +02:00
Jakob Borg
939dd5cb31 Add heap profiling support 2014-08-12 15:04:01 +02:00
Jakob Borg
adcbe13ecd Update goleveldb 2014-08-12 09:24:36 +02:00
Jakob Borg
97dda6a4bb Correct the memory stats in perfstats-*.csv 2014-08-11 22:10:15 +02:00
Jakob Borg
9e395eb883 Use a slightly heavier Raleway for headings (fixes #493) 2014-08-11 21:50:15 +02:00
Jakob Borg
60da59623e Limit size of sent indexes a bit, taking number of blocks into account 2014-08-11 20:54:59 +02:00
Jakob Borg
9752ea9ac3 Implement external scan request (fixes #9) 2014-08-11 20:20:01 +02:00
Jakob Borg
279693078a Update deps 2014-08-11 14:24:20 +02:00
Jakob Borg
19b93045a4 Merge pull request #508 from AudriusButkevicius/modals
Fix and refactor modals
2014-08-11 12:12:28 +02:00
Jakob Borg
5231a09820 Add ./build.sh noupgrade and all-noupgrade 2014-08-11 11:59:33 +02:00
Jakob Borg
ab952e6103 Add ./build.sh clean 2014-08-11 11:54:48 +02:00
Jakob Borg
a418771c04 Puller entrance warning 2014-08-11 07:52:03 +02:00
Audrius Butkevicius
b41590ce38 Fix and refactor modals 2014-08-10 23:28:04 +01:00
Jakob Borg
c7dde9499f Verify locking and correct update order for global 2014-08-10 07:27:24 +02:00
Jakob Borg
528cbf62ec POST to /config should return an error when something bad happens (fixes #489) 2014-08-08 14:09:27 +02:00
Jakob Borg
1be4b8bb5d Merge pull request #486 from AudriusButkevicius/windows
Add Windows upgrade support
2014-08-07 23:20:26 +02:00
Jakob Borg
c832fc9917 Merge pull request #485 from tojrobinson/world-writable-root
World writable root
2014-08-07 23:17:42 +02:00
Jakob Borg
4797a94689 Add explicit GC calls after expensive db ops (ref #468) 2014-08-07 23:09:50 +02:00
Audrius Butkevicius
6948903084 Add Windows upgrade support 2014-08-07 21:07:21 +01:00
treefingers
94164611ae Fix root being left world writable 2014-08-08 05:45:50 +10:00
treefingers
ae298e8902 Merge branch 'master' of https://github.com/syncthing/syncthing 2014-08-08 05:06:42 +10:00
Jakob Borg
3d8771ecb0 Woops, broke the build 2014-08-07 15:58:48 +02:00
Jakob Borg
28db264e90 Upgrade debugging, fix upgrade on ARM (fixes #482) 2014-08-07 15:57:20 +02:00
Jakob Borg
6af9fa4b81 Localize Close button in standard modals (fixes #481) 2014-08-07 12:35:38 +02:00
Tully Robinson
c45b18cc75 Merge branch 'master' into browser-flag 2014-08-06 23:01:35 +10:00
47 changed files with 1271 additions and 612 deletions

20
Godeps/Godeps.json generated
View File

@@ -12,23 +12,23 @@
},
{
"ImportPath": "code.google.com/p/go.crypto/bcrypt",
"Comment": "null-213",
"Rev": "aa2644fe4aa50e3b38d75187b4799b1f0c9ddcef"
"Comment": "null-216",
"Rev": "41cd4647fccc72b0b79ef1bd1fe6735e718257cd"
},
{
"ImportPath": "code.google.com/p/go.crypto/blowfish",
"Comment": "null-213",
"Rev": "aa2644fe4aa50e3b38d75187b4799b1f0c9ddcef"
"Comment": "null-216",
"Rev": "41cd4647fccc72b0b79ef1bd1fe6735e718257cd"
},
{
"ImportPath": "code.google.com/p/go.text/transform",
"Comment": "null-89",
"Rev": "df15baaf13e3f62b6b7a901e74caa3818a7c0a7c"
"Comment": "null-90",
"Rev": "d65bffbc88a153d23a6d2a864531e6e7c2cde59b"
},
{
"ImportPath": "code.google.com/p/go.text/unicode/norm",
"Comment": "null-89",
"Rev": "df15baaf13e3f62b6b7a901e74caa3818a7c0a7c"
"Comment": "null-90",
"Rev": "d65bffbc88a153d23a6d2a864531e6e7c2cde59b"
},
{
"ImportPath": "code.google.com/p/snappy-go/snappy",
@@ -41,7 +41,7 @@
},
{
"ImportPath": "github.com/calmh/xdr",
"Rev": "694859acb207675085232438780db923ceb43e96"
"Rev": "e1714bbe4764b15490fcc8ebd25d4bd9ea50a4b9"
},
{
"ImportPath": "github.com/juju/ratelimit",
@@ -49,7 +49,7 @@
},
{
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
"Rev": "c5955912e3287376475731c5bc59c79a5a799105"
"Rev": "c9d6b7be1428942d4cf4f54055b991a8513392eb"
},
{
"ImportPath": "github.com/vitrun/qart/coding",

View File

@@ -4,6 +4,22 @@
package blowfish
// getNextWord returns the next big-endian uint32 value from the byte slice
// at the given position in a circular manner, updating the position.
func getNextWord(b []byte, pos *int) uint32 {
var w uint32
j := *pos
for i := 0; i < 4; i++ {
w = w<<8 | uint32(b[j])
j++
if j >= len(b) {
j = 0
}
}
*pos = j
return w
}
// ExpandKey performs a key expansion on the given *Cipher. Specifically, it
// performs the Blowfish algorithm's key schedule which sets up the *Cipher's
// pi and substitution tables for calls to Encrypt. This is used, primarily,
@@ -12,6 +28,7 @@ package blowfish
func ExpandKey(key []byte, c *Cipher) {
j := 0
for i := 0; i < 18; i++ {
// Using inlined getNextWord for performance.
var d uint32
for k := 0; k < 4; k++ {
d = d<<8 | uint32(key[j])
@@ -54,86 +71,44 @@ func ExpandKey(key []byte, c *Cipher) {
func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) {
j := 0
for i := 0; i < 18; i++ {
var d uint32
for k := 0; k < 4; k++ {
d = d<<8 | uint32(key[j])
j++
if j >= len(key) {
j = 0
}
}
c.p[i] ^= d
c.p[i] ^= getNextWord(key, &j)
}
j = 0
var expandedSalt [4]uint32
for i := range expandedSalt {
var d uint32
for k := 0; k < 4; k++ {
d = d<<8 | uint32(salt[j])
j++
if j >= len(salt) {
j = 0
}
}
expandedSalt[i] = d
}
var l, r uint32
for i := 0; i < 18; i += 2 {
l ^= expandedSalt[i&2]
r ^= expandedSalt[(i&2)+1]
l ^= getNextWord(salt, &j)
r ^= getNextWord(salt, &j)
l, r = encryptBlock(l, r, c)
c.p[i], c.p[i+1] = l, r
}
for i := 0; i < 256; i += 4 {
l ^= expandedSalt[2]
r ^= expandedSalt[3]
for i := 0; i < 256; i += 2 {
l ^= getNextWord(salt, &j)
r ^= getNextWord(salt, &j)
l, r = encryptBlock(l, r, c)
c.s0[i], c.s0[i+1] = l, r
l ^= expandedSalt[0]
r ^= expandedSalt[1]
l, r = encryptBlock(l, r, c)
c.s0[i+2], c.s0[i+3] = l, r
}
for i := 0; i < 256; i += 4 {
l ^= expandedSalt[2]
r ^= expandedSalt[3]
for i := 0; i < 256; i += 2 {
l ^= getNextWord(salt, &j)
r ^= getNextWord(salt, &j)
l, r = encryptBlock(l, r, c)
c.s1[i], c.s1[i+1] = l, r
l ^= expandedSalt[0]
r ^= expandedSalt[1]
l, r = encryptBlock(l, r, c)
c.s1[i+2], c.s1[i+3] = l, r
}
for i := 0; i < 256; i += 4 {
l ^= expandedSalt[2]
r ^= expandedSalt[3]
for i := 0; i < 256; i += 2 {
l ^= getNextWord(salt, &j)
r ^= getNextWord(salt, &j)
l, r = encryptBlock(l, r, c)
c.s2[i], c.s2[i+1] = l, r
l ^= expandedSalt[0]
r ^= expandedSalt[1]
l, r = encryptBlock(l, r, c)
c.s2[i+2], c.s2[i+3] = l, r
}
for i := 0; i < 256; i += 4 {
l ^= expandedSalt[2]
r ^= expandedSalt[3]
for i := 0; i < 256; i += 2 {
l ^= getNextWord(salt, &j)
r ^= getNextWord(salt, &j)
l, r = encryptBlock(l, r, c)
c.s3[i], c.s3[i+1] = l, r
l ^= expandedSalt[0]
r ^= expandedSalt[1]
l, r = encryptBlock(l, r, c)
c.s3[i+2], c.s3[i+3] = l, r
}
}
@@ -182,9 +157,3 @@ func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) {
xr ^= c.p[0]
return xr, xl
}
func zero(x []uint32) {
for i := range x {
x[i] = 0
}
}

View File

@@ -4,9 +4,7 @@
package blowfish
import (
"testing"
)
import "testing"
type CryptTest struct {
key []byte
@@ -202,3 +200,75 @@ func TestSaltedCipherKeyLength(t *testing.T) {
t.Errorf("NewSaltedCipher with long key, gave error %#v", err)
}
}
// Test vectors generated with Blowfish from OpenSSH.
var saltedVectors = [][8]byte{
{0x0c, 0x82, 0x3b, 0x7b, 0x8d, 0x01, 0x4b, 0x7e},
{0xd1, 0xe1, 0x93, 0xf0, 0x70, 0xa6, 0xdb, 0x12},
{0xfc, 0x5e, 0xba, 0xde, 0xcb, 0xf8, 0x59, 0xad},
{0x8a, 0x0c, 0x76, 0xe7, 0xdd, 0x2c, 0xd3, 0xa8},
{0x2c, 0xcb, 0x7b, 0xee, 0xac, 0x7b, 0x7f, 0xf8},
{0xbb, 0xf6, 0x30, 0x6f, 0xe1, 0x5d, 0x62, 0xbf},
{0x97, 0x1e, 0xc1, 0x3d, 0x3d, 0xe0, 0x11, 0xe9},
{0x06, 0xd7, 0x4d, 0xb1, 0x80, 0xa3, 0xb1, 0x38},
{0x67, 0xa1, 0xa9, 0x75, 0x0e, 0x5b, 0xc6, 0xb4},
{0x51, 0x0f, 0x33, 0x0e, 0x4f, 0x67, 0xd2, 0x0c},
{0xf1, 0x73, 0x7e, 0xd8, 0x44, 0xea, 0xdb, 0xe5},
{0x14, 0x0e, 0x16, 0xce, 0x7f, 0x4a, 0x9c, 0x7b},
{0x4b, 0xfe, 0x43, 0xfd, 0xbf, 0x36, 0x04, 0x47},
{0xb1, 0xeb, 0x3e, 0x15, 0x36, 0xa7, 0xbb, 0xe2},
{0x6d, 0x0b, 0x41, 0xdd, 0x00, 0x98, 0x0b, 0x19},
{0xd3, 0xce, 0x45, 0xce, 0x1d, 0x56, 0xb7, 0xfc},
{0xd9, 0xf0, 0xfd, 0xda, 0xc0, 0x23, 0xb7, 0x93},
{0x4c, 0x6f, 0xa1, 0xe4, 0x0c, 0xa8, 0xca, 0x57},
{0xe6, 0x2f, 0x28, 0xa7, 0x0c, 0x94, 0x0d, 0x08},
{0x8f, 0xe3, 0xf0, 0xb6, 0x29, 0xe3, 0x44, 0x03},
{0xff, 0x98, 0xdd, 0x04, 0x45, 0xb4, 0x6d, 0x1f},
{0x9e, 0x45, 0x4d, 0x18, 0x40, 0x53, 0xdb, 0xef},
{0xb7, 0x3b, 0xef, 0x29, 0xbe, 0xa8, 0x13, 0x71},
{0x02, 0x54, 0x55, 0x41, 0x8e, 0x04, 0xfc, 0xad},
{0x6a, 0x0a, 0xee, 0x7c, 0x10, 0xd9, 0x19, 0xfe},
{0x0a, 0x22, 0xd9, 0x41, 0xcc, 0x23, 0x87, 0x13},
{0x6e, 0xff, 0x1f, 0xff, 0x36, 0x17, 0x9c, 0xbe},
{0x79, 0xad, 0xb7, 0x40, 0xf4, 0x9f, 0x51, 0xa6},
{0x97, 0x81, 0x99, 0xa4, 0xde, 0x9e, 0x9f, 0xb6},
{0x12, 0x19, 0x7a, 0x28, 0xd0, 0xdc, 0xcc, 0x92},
{0x81, 0xda, 0x60, 0x1e, 0x0e, 0xdd, 0x65, 0x56},
{0x7d, 0x76, 0x20, 0xb2, 0x73, 0xc9, 0x9e, 0xee},
}
func TestSaltedCipher(t *testing.T) {
var key, salt [32]byte
for i := range key {
key[i] = byte(i)
salt[i] = byte(i + 32)
}
for i, v := range saltedVectors {
c, err := NewSaltedCipher(key[:], salt[:i])
if err != nil {
t.Fatal(err)
}
var buf [8]byte
c.Encrypt(buf[:], buf[:])
if v != buf {
t.Errorf("%d: expected %x, got %x", i, v, buf)
}
}
}
func BenchmarkExpandKeyWithSalt(b *testing.B) {
key := make([]byte, 32)
salt := make([]byte, 16)
c, _ := NewCipher(key)
for i := 0; i < b.N; i++ {
expandKeyWithSalt(key, salt, c)
}
}
func BenchmarkExpandKey(b *testing.B) {
key := make([]byte, 32)
c, _ := NewCipher(key)
for i := 0; i < b.N; i++ {
ExpandKey(key, c)
}
}

View File

@@ -40,8 +40,11 @@ func NewCipher(key []byte) (*Cipher, error) {
// NewSaltedCipher creates a returns a Cipher that folds a salt into its key
// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is
// sufficient and desirable. For bcrypt compatiblity, the key can be over 56
// bytes. Only the first 16 bytes of salt are used.
// bytes.
func NewSaltedCipher(key, salt []byte) (*Cipher, error) {
if len(salt) == 0 {
return NewCipher(key)
}
var result Cipher
if k := len(key); k < 1 {
return nil, KeySizeError(k)

View File

@@ -7,6 +7,8 @@ import (
"io"
"io/ioutil"
"testing"
"github.com/calmh/xdr"
)
type XDRBenchStruct struct {
@@ -58,6 +60,16 @@ func BenchmarkThisEncode(b *testing.B) {
}
}
func BenchmarkThisEncoder(b *testing.B) {
w := xdr.NewWriter(ioutil.Discard)
for i := 0; i < b.N; i++ {
_, err := s.encodeXDR(w)
if err != nil {
b.Fatal(err)
}
}
}
type repeatReader struct {
data []byte
}
@@ -86,3 +98,16 @@ func BenchmarkThisDecode(b *testing.B) {
rr.Reset(e)
}
}
func BenchmarkThisDecoder(b *testing.B) {
rr := &repeatReader{e}
r := xdr.NewReader(rr)
var t XDRBenchStruct
for i := 0; i < b.N; i++ {
err := t.decodeXDR(r)
if err != nil {
b.Fatal(err)
}
rr.Reset(e)
}
}

View File

@@ -7,6 +7,8 @@ package xdr
import (
"errors"
"io"
"reflect"
"unsafe"
)
var ErrElementSizeExceeded = errors.New("element size exceeded")
@@ -15,7 +17,6 @@ type Reader struct {
r io.Reader
err error
b [8]byte
sb []byte
}
func NewReader(r io.Reader) *Reader {
@@ -35,23 +36,17 @@ func (r *Reader) ReadRaw(bs []byte) (int, error) {
}
func (r *Reader) ReadString() string {
if r.sb == nil {
r.sb = make([]byte, 64)
} else {
r.sb = r.sb[:cap(r.sb)]
}
r.sb = r.ReadBytesInto(r.sb)
return string(r.sb)
return r.ReadStringMax(0)
}
func (r *Reader) ReadStringMax(max int) string {
if r.sb == nil {
r.sb = make([]byte, 64)
} else {
r.sb = r.sb[:cap(r.sb)]
buf := r.ReadBytesMaxInto(max, nil)
bh := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
sh := reflect.StringHeader{
Data: bh.Data,
Len: bh.Len,
}
r.sb = r.ReadBytesMaxInto(max, r.sb)
return string(r.sb)
return *((*string)(unsafe.Pointer(&sh)))
}
func (r *Reader) ReadBytes() []byte {
@@ -80,10 +75,10 @@ func (r *Reader) ReadBytesMaxInto(max int, dst []byte) []byte {
return nil
}
if l+pad(l) > len(dst) {
dst = make([]byte, l+pad(l))
if fullLen := l + pad(l); fullLen > len(dst) {
dst = make([]byte, fullLen)
} else {
dst = dst[:l+pad(l)]
dst = dst[:fullLen]
}
var n int

View File

@@ -3,7 +3,11 @@
package xdr
import "io"
import (
"io"
"reflect"
"unsafe"
)
var padBytes = []byte{0, 0, 0}
@@ -38,7 +42,13 @@ func (w *Writer) WriteRaw(bs []byte) (int, error) {
}
func (w *Writer) WriteString(s string) (int, error) {
return w.WriteBytes([]byte(s))
sh := *((*reflect.StringHeader)(unsafe.Pointer(&s)))
bh := reflect.SliceHeader{
Data: sh.Data,
Len: sh.Len,
Cap: sh.Len,
}
return w.WriteBytes(*(*[]byte)(unsafe.Pointer(&bh)))
}
func (w *Writer) WriteBytes(bs []byte) (int, error) {

View File

@@ -481,10 +481,11 @@ func (db *DB) recoverJournal() error {
buf.Reset()
if _, err := buf.ReadFrom(r); err != nil {
if strict {
if err == io.ErrUnexpectedEOF {
continue
} else {
return err
}
continue
}
if err := batch.decode(buf.Bytes()); err != nil {
return err

View File

@@ -103,18 +103,18 @@ type flusher interface {
Flush() error
}
// DroppedError is the error type that passed to Dropper.Drop method.
type DroppedError struct {
// ErrCorrupted is the error type that generated by corrupted block or chunk.
type ErrCorrupted struct {
Size int
Reason string
}
func (e DroppedError) Error() string {
return fmt.Sprintf("leveldb/journal: dropped %d bytes: %s", e.Size, e.Reason)
func (e ErrCorrupted) Error() string {
return fmt.Sprintf("leveldb/journal: corrupted %d bytes: %s", e.Size, e.Reason)
}
// Dropper is the interface that wrap simple Drop method. The Drop
// method will be called when the journal reader dropping a chunk.
// method will be called when the journal reader dropping a block or chunk.
type Dropper interface {
Drop(err error)
}
@@ -158,76 +158,78 @@ func NewReader(r io.Reader, dropper Dropper, strict, checksum bool) *Reader {
}
}
var errSkip = errors.New("leveldb/journal: skipped")
func (r *Reader) corrupt(n int, reason string, skip bool) error {
if r.dropper != nil {
r.dropper.Drop(ErrCorrupted{n, reason})
}
if r.strict && !skip {
r.err = ErrCorrupted{n, reason}
return r.err
}
return errSkip
}
// nextChunk sets r.buf[r.i:r.j] to hold the next chunk's payload, reading the
// next block into the buffer if necessary.
func (r *Reader) nextChunk(wantFirst, skip bool) error {
func (r *Reader) nextChunk(first bool) error {
for {
if r.j+headerSize <= r.n {
checksum := binary.LittleEndian.Uint32(r.buf[r.j+0 : r.j+4])
length := binary.LittleEndian.Uint16(r.buf[r.j+4 : r.j+6])
chunkType := r.buf[r.j+6]
var err error
if checksum == 0 && length == 0 && chunkType == 0 {
// Drop entire block.
err = DroppedError{r.n - r.j, "zero header"}
m := r.n - r.j
r.i = r.n
r.j = r.n
return r.corrupt(m, "zero header", false)
} else {
m := r.n - r.j
r.i = r.j + headerSize
r.j = r.j + headerSize + int(length)
if r.j > r.n {
// Drop entire block.
err = DroppedError{m, "chunk length overflows block"}
r.i = r.n
r.j = r.n
return r.corrupt(m, "chunk length overflows block", false)
} else if r.checksum && checksum != util.NewCRC(r.buf[r.i-1:r.j]).Value() {
// Drop entire block.
err = DroppedError{m, "checksum mismatch"}
r.i = r.n
r.j = r.n
return r.corrupt(m, "checksum mismatch", false)
}
}
if wantFirst && err == nil && chunkType != fullChunkType && chunkType != firstChunkType {
if skip {
// The chunk are intentionally skipped.
if chunkType == lastChunkType {
skip = false
}
continue
} else {
// Drop the chunk.
err = DroppedError{r.j - r.i + headerSize, "orphan chunk"}
}
if first && chunkType != fullChunkType && chunkType != firstChunkType {
m := r.j - r.i
r.i = r.j
// Report the error, but skip it.
return r.corrupt(m+headerSize, "orphan chunk", true)
}
if err == nil {
r.last = chunkType == fullChunkType || chunkType == lastChunkType
} else {
if r.dropper != nil {
r.dropper.Drop(err)
}
if r.strict {
r.err = err
}
r.last = chunkType == fullChunkType || chunkType == lastChunkType
return nil
}
// The last block.
if r.n < blockSize && r.n > 0 {
if !first {
return r.corrupt(0, "missing chunk part", false)
}
r.err = io.EOF
return r.err
}
// Read block.
n, err := io.ReadFull(r.r, r.buf[:])
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
return err
}
if r.n < blockSize && r.n > 0 {
// This is the last block.
if r.j != r.n {
r.err = io.ErrUnexpectedEOF
} else {
r.err = io.EOF
}
return r.err
}
n, err := io.ReadFull(r.r, r.buf[:])
if err != nil && err != io.ErrUnexpectedEOF {
r.err = err
return r.err
}
if n == 0 {
if !first {
return r.corrupt(0, "missing chunk part", false)
}
r.err = io.EOF
return r.err
}
@@ -237,29 +239,26 @@ func (r *Reader) nextChunk(wantFirst, skip bool) error {
// Next returns a reader for the next journal. It returns io.EOF if there are no
// more journals. The reader returned becomes stale after the next Next call,
// and should no longer be used.
// and should no longer be used. If strict is false, the reader will returns
// io.ErrUnexpectedEOF error when found corrupted journal.
func (r *Reader) Next() (io.Reader, error) {
r.seq++
if r.err != nil {
return nil, r.err
}
skip := !r.last
r.i = r.j
for {
r.i = r.j
if r.nextChunk(true, skip) != nil {
// So that 'orphan chunk' drop will be reported.
skip = false
} else {
if err := r.nextChunk(true); err == nil {
break
}
if r.err != nil {
return nil, r.err
} else if err != errSkip {
return nil, err
}
}
return &singleReader{r, r.seq, nil}, nil
}
// Reset resets the journal reader, allows reuse of the journal reader.
// Reset resets the journal reader, allows reuse of the journal reader. Reset returns
// last accumulated error.
func (r *Reader) Reset(reader io.Reader, dropper Dropper, strict, checksum bool) error {
r.seq++
err := r.err
@@ -296,7 +295,11 @@ func (x *singleReader) Read(p []byte) (int, error) {
if r.last {
return 0, io.EOF
}
if x.err = r.nextChunk(false, false); x.err != nil {
x.err = r.nextChunk(false)
if x.err != nil {
if x.err == errSkip {
x.err = io.ErrUnexpectedEOF
}
return 0, x.err
}
}
@@ -320,7 +323,11 @@ func (x *singleReader) ReadByte() (byte, error) {
if r.last {
return 0, io.EOF
}
if x.err = r.nextChunk(false, false); x.err != nil {
x.err = r.nextChunk(false)
if x.err != nil {
if x.err == errSkip {
x.err = io.ErrUnexpectedEOF
}
return 0, x.err
}
}

View File

@@ -326,3 +326,148 @@ func TestStaleWriter(t *testing.T) {
t.Fatalf("stale write #1: unexpected error: %v", err)
}
}
func TestCorrupt_MissingLastBlock(t *testing.T) {
buf := new(bytes.Buffer)
w := NewWriter(buf)
// First record.
ww, err := w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-1024)); err != nil {
t.Fatalf("write #0: unexpected error: %v", err)
}
// Second record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
t.Fatalf("write #1: unexpected error: %v", err)
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
// Cut the last block.
b := buf.Bytes()[:blockSize]
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
// First read.
rr, err := r.Next()
if err != nil {
t.Fatal(err)
}
n, err := io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #0: %v", err)
}
if n != blockSize-1024 {
t.Fatalf("read #0: got %d bytes want %d", n, blockSize-1024)
}
// Second read.
rr, err = r.Next()
if err != nil {
t.Fatal(err)
}
n, err = io.Copy(ioutil.Discard, rr)
if err != io.ErrUnexpectedEOF {
t.Fatalf("read #1: unexpected error: %v", err)
}
}
func TestCorrupt_CorruptedMiddleBlock(t *testing.T) {
buf := new(bytes.Buffer)
w := NewWriter(buf)
// First record.
ww, err := w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
t.Fatalf("write #0: unexpected error: %v", err)
}
// Second record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
t.Fatalf("write #1: unexpected error: %v", err)
}
// Third record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
t.Fatalf("write #2: unexpected error: %v", err)
}
// Fourth record.
ww, err = w.Next()
if err != nil {
t.Fatal(err)
}
if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil {
t.Fatalf("write #3: unexpected error: %v", err)
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
b := buf.Bytes()
// Corrupting block #1.
for i := 0; i < 1024; i++ {
b[blockSize+i] = '1'
}
r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
// First read.
rr, err := r.Next()
if err != nil {
t.Fatal(err)
}
n, err := io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #0: %v", err)
}
if want := int64(blockSize / 2); n != want {
t.Fatalf("read #0: got %d bytes want %d", n, want)
}
// Second read.
rr, err = r.Next()
if err != nil {
t.Fatal(err)
}
n, err = io.Copy(ioutil.Discard, rr)
if err != io.ErrUnexpectedEOF {
t.Fatalf("read #1: unexpected error: %v", err)
}
// Third read.
rr, err = r.Next()
if err != nil {
t.Fatal(err)
}
n, err = io.Copy(ioutil.Discard, rr)
if err != nil {
t.Fatalf("read #2: %v", err)
}
if want := int64(blockSize-headerSize) + 2; n != want {
t.Fatalf("read #2: got %d bytes want %d", n, want)
}
}

View File

@@ -22,7 +22,7 @@ type dropper struct {
}
func (d dropper) Drop(err error) {
if e, ok := err.(journal.DroppedError); ok {
if e, ok := err.(journal.ErrCorrupted); ok {
d.s.logf("journal@drop %s-%d S·%s %q", d.file.Type(), d.file.Num(), shortenb(e.Size), e.Reason)
} else {
d.s.logf("journal@drop %s-%d %q", d.file.Type(), d.file.Num(), err)

View File

@@ -13,6 +13,7 @@ import (
"io"
"sort"
"strings"
"sync"
"code.google.com/p/snappy-go/snappy"
@@ -528,6 +529,8 @@ type Reader struct {
dataEnd int64
indexBlock *block
filterBlock *filterBlock
blockPool sync.Pool
}
func verifyChecksum(data []byte) bool {
@@ -538,7 +541,13 @@ func verifyChecksum(data []byte) bool {
}
func (r *Reader) readRawBlock(bh blockHandle, checksum bool) ([]byte, error) {
data := make([]byte, bh.length+blockTrailerLen)
data, _ := r.blockPool.Get().([]byte) // data is either nil or a valid []byte from the pool
if l := bh.length + blockTrailerLen; uint64(len(data)) >= l {
data = data[:l]
} else {
r.blockPool.Put(data)
data = make([]byte, l)
}
if _, err := r.reader.ReadAt(data, int64(bh.offset)); err != nil && err != io.EOF {
return nil, err
}
@@ -552,10 +561,13 @@ func (r *Reader) readRawBlock(bh blockHandle, checksum bool) ([]byte, error) {
data = data[:bh.length]
case blockTypeSnappyCompression:
var err error
data, err = snappy.Decode(nil, data[:bh.length])
decData, _ := r.blockPool.Get().([]byte)
decData, err = snappy.Decode(decData, data[:bh.length])
if err != nil {
return nil, err
}
r.blockPool.Put(data[:cap(data)])
data = decData
default:
return nil, fmt.Errorf("leveldb/table: Reader: unknown block compression type: %d", data[bh.length])
}

View File

File diff suppressed because one or more lines are too long

103
build.sh
View File

@@ -119,6 +119,52 @@ transifex() {
assets
}
build-all() {
rm -f *.tar.gz *.zip
test -short || exit 1
assets
rm -rf bin Godeps/_workspace/pkg $GOPATH/pkg/*/github.com/syncthing
for os in darwin-amd64 freebsd-amd64 freebsd-386 linux-amd64 linux-386 windows-amd64 windows-386 solaris-amd64 ; do
export GOOS=${os%-*}
export GOARCH=${os#*-}
build $*
name="syncthing-${os/darwin/macosx}-$version"
case $GOOS in
windows)
zipDist "$name"
rm -f syncthing.exe
;;
*)
tarDist "$name"
rm -f syncthing
;;
esac
done
export GOOS=linux
export GOARCH=arm
origldflags="$ldflags"
export GOARM=7
ldflags="$origldflags -X main.GoArchExtra v7"
build $*
tarDist "syncthing-linux-armv7-$version"
export GOARM=6
ldflags="$origldflags -X main.GoArchExtra v6"
build $*
tarDist "syncthing-linux-armv6-$version"
export GOARM=5
ldflags="$origldflags -X main.GoArchExtra v5"
build $*
tarDist "syncthing-linux-armv5-$version"
}
case "$1" in
"")
shift
@@ -126,6 +172,15 @@ case "$1" in
godep go install $* -ldflags "$ldflags" ./cmd/...
;;
clean)
rm -rf bin Godeps/_workspace/pkg $GOPATH/pkg/*/github.com/syncthing
;;
noupgrade)
export GOBIN=$(pwd)/bin
godep go install -tags noupgrade -ldflags "$ldflags" ./cmd/...
;;
race)
build -race
;;
@@ -156,49 +211,13 @@ case "$1" in
;;
all)
rm -f *.tar.gz *.zip
test -short || exit 1
assets
for os in darwin-amd64 freebsd-amd64 freebsd-386 linux-amd64 linux-386 windows-amd64 windows-386 solaris-amd64 ; do
export GOOS=${os%-*}
export GOARCH=${os#*-}
build
name="syncthing-${os/darwin/macosx}-$version"
case $GOOS in
windows)
zipDist "$name"
rm -f syncthing.exe
;;
*)
tarDist "$name"
rm -f syncthing
;;
esac
done
export GOOS=linux
export GOARCH=arm
origldflags="$ldflags"
export GOARM=7
ldflags="$origldflags -X main.GoArchExtra v7"
build
tarDist "syncthing-linux-armv7-$version"
export GOARM=6
ldflags="$origldflags -X main.GoArchExtra v6"
build
tarDist "syncthing-linux-armv6-$version"
export GOARM=5
ldflags="$origldflags -X main.GoArchExtra v5"
build
tarDist "syncthing-linux-armv5-$version"
shift
build-all
;;
all-noupgrade)
shift
build-all -tags noupgrade
;;
upload)

View File

@@ -32,7 +32,8 @@ func main() {
if *node == "" {
log.Printf("*** Global index for repo %q", *repo)
fs.WithGlobal(func(f protocol.FileInfo) bool {
fs.WithGlobalTruncated(func(fi protocol.FileIntf) bool {
f := fi.(protocol.FileInfoTruncated)
fmt.Println(f)
fmt.Println("\t", fs.Availability(f.Name))
return true
@@ -43,7 +44,8 @@ func main() {
log.Fatal(err)
}
log.Printf("*** Have index for repo %q node %q", *repo, n)
fs.WithHave(n, func(f protocol.FileInfo) bool {
fs.WithHaveTruncated(n, func(fi protocol.FileIntf) bool {
f := fi.(protocol.FileInfoTruncated)
fmt.Println(f)
return true
})

View File

@@ -126,6 +126,7 @@ func startGUI(cfg config.GUIConfiguration, assetDir string, m *model.Model) erro
postRestMux.HandleFunc("/rest/restart", restPostRestart)
postRestMux.HandleFunc("/rest/shutdown", restPostShutdown)
postRestMux.HandleFunc("/rest/upgrade", restPostUpgrade)
postRestMux.HandleFunc("/rest/scan", withModel(m, restPostScan))
// A handler that splits requests between the two above and disables
// caching
@@ -278,6 +279,8 @@ func restPostConfig(m *model.Model, w http.ResponseWriter, r *http.Request) {
err := json.NewDecoder(r.Body).Decode(&newCfg)
if err != nil {
l.Warnln(err)
http.Error(w, err.Error(), 500)
return
} else {
if newCfg.GUI.Password == "" {
// Leave it empty
@@ -287,6 +290,8 @@ func restPostConfig(m *model.Model, w http.ResponseWriter, r *http.Request) {
hash, err := bcrypt.GenerateFromPassword([]byte(newCfg.GUI.Password), 0)
if err != nil {
l.Warnln(err)
http.Error(w, err.Error(), 500)
return
} else {
newCfg.GUI.Password = string(hash)
}
@@ -515,7 +520,7 @@ func restPostUpgrade(w http.ResponseWriter, r *http.Request) {
}
if upgrade.CompareVersions(rel.Tag, Version) == 1 {
err = upgrade.UpgradeTo(rel)
err = upgrade.UpgradeTo(rel, GoArchExtra)
if err != nil {
l.Warnln(err)
http.Error(w, err.Error(), 500)
@@ -526,6 +531,16 @@ func restPostUpgrade(w http.ResponseWriter, r *http.Request) {
}
}
func restPostScan(m *model.Model, w http.ResponseWriter, r *http.Request) {
qs := r.URL.Query()
repo := qs.Get("repo")
sub := qs.Get("sub")
err := m.ScanRepoSub(repo, sub)
if err != nil {
http.Error(w, err.Error(), 500)
}
}
func getQR(w http.ResponseWriter, r *http.Request) {
var qs = r.URL.Query()
var text = qs.Get("text")

46
cmd/syncthing/heapprof.go Normal file
View File

@@ -0,0 +1,46 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build heapprof
package main
import (
"fmt"
"os"
"runtime"
"runtime/pprof"
"syscall"
"time"
)
func init() {
go saveHeapProfiles()
}
func saveHeapProfiles() {
runtime.MemProfileRate = 1
var memstats, prevMemstats runtime.MemStats
t0 := time.Now()
for t := range time.NewTicker(250 * time.Millisecond).C {
startms := int(t.Sub(t0).Seconds() * 1000)
runtime.ReadMemStats(&memstats)
if memstats.HeapInuse > prevMemstats.HeapInuse {
fd, err := os.Create(fmt.Sprintf("heap-%05d-%07d.pprof", syscall.Getpid(), startms))
if err != nil {
panic(err)
}
err = pprof.WriteHeapProfile(fd)
if err != nil {
panic(err)
}
err = fd.Close()
if err != nil {
panic(err)
}
prevMemstats = memstats
}
}
}

View File

@@ -47,6 +47,7 @@ var (
BuildHost = "unknown"
BuildUser = "unknown"
LongVersion string
GoArchExtra string // "", "v5", "v6", "v7"
)
var l = logger.DefaultLogger
@@ -194,7 +195,7 @@ func main() {
l.Infof("Upgrade available (current %q < latest %q)", Version, rel.Tag)
if doUpgrade {
err = upgrade.UpgradeTo(rel)
err = upgrade.UpgradeTo(rel, GoArchExtra)
if err != nil {
l.Fatalln("Upgrade:", err) // exits 1
}

View File

@@ -1,3 +1,7 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build perfstats
package main
@@ -40,6 +44,6 @@ func savePerfStats(file string) {
startms := int(t.Sub(t0).Seconds() * 1000)
fmt.Fprintf(fd, "%d\t%f\t%d\t%d\n", startms, cpuUsagePercent, memstats.Alloc, memstats.Sys)
fmt.Fprintf(fd, "%d\t%f\t%d\t%d\n", startms, cpuUsagePercent, memstats.Alloc, memstats.Sys-memstats.HeapReleased)
}
}

View File

@@ -110,7 +110,6 @@ type OptionsConfiguration struct {
MaxSendKbps int `xml:"maxSendKbps"`
RescanIntervalS int `xml:"rescanIntervalS" default:"60"`
ReconnectIntervalS int `xml:"reconnectionIntervalS" default:"60"`
MaxChangeKbps int `xml:"maxChangeKbps" default:"10000"`
StartBrowser bool `xml:"startBrowser" default:"true"`
UPnPEnabled bool `xml:"upnpEnabled" default:"true"`
URAccepted int `xml:"urAccepted"` // Accepted usage reporting version; 0 for off (undecided), -1 for off (permanently)

View File

@@ -34,7 +34,6 @@ func TestDefaultValues(t *testing.T) {
MaxSendKbps: 0,
RescanIntervalS: 60,
ReconnectIntervalS: 60,
MaxChangeKbps: 10000,
StartBrowser: true,
UPnPEnabled: true,
}
@@ -189,7 +188,6 @@ func TestOverriddenValues(t *testing.T) {
<maxSendKbps>1234</maxSendKbps>
<rescanIntervalS>600</rescanIntervalS>
<reconnectionIntervalS>6000</reconnectionIntervalS>
<maxChangeKbps>2345</maxChangeKbps>
<startBrowser>false</startBrowser>
<upnpEnabled>false</upnpEnabled>
</options>
@@ -206,7 +204,6 @@ func TestOverriddenValues(t *testing.T) {
MaxSendKbps: 1234,
RescanIntervalS: 600,
ReconnectIntervalS: 6000,
MaxChangeKbps: 2345,
StartBrowser: false,
UPnPEnabled: false,
}

View File

@@ -2,6 +2,7 @@ package files
import (
"bytes"
"runtime"
"sort"
"sync"
@@ -118,9 +119,11 @@ func globalKeyName(key []byte) []byte {
type deletionHandler func(db dbReader, batch dbWriter, repo, node, name []byte, dbi iterator.Iterator) uint64
type fileIterator func(f protocol.FileInfo) bool
type fileIterator func(f protocol.FileIntf) bool
func ldbGenericReplace(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo, deleteFn deletionHandler) uint64 {
defer runtime.GC()
sort.Sort(fileList(fs)) // sort list on name, same as on disk
start := nodeKey(repo, node, nil) // before all repo/node files
@@ -178,7 +181,7 @@ func ldbGenericReplace(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo
case moreFs && moreDb && cmp == 0:
// File exists on both sides - compare versions.
var ef protocol.FileInfo
var ef protocol.FileInfoTruncated
ef.UnmarshalXDR(dbi.Value())
if fs[fsi].Version > ef.Version {
if lv := ldbInsert(batch, repo, node, newName, fs[fsi]); lv > maxLocalVer {
@@ -223,20 +226,23 @@ func ldbReplace(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo) uint6
func ldbReplaceWithDelete(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo) uint64 {
return ldbGenericReplace(db, repo, node, fs, func(db dbReader, batch dbWriter, repo, node, name []byte, dbi iterator.Iterator) uint64 {
var f protocol.FileInfo
err := f.UnmarshalXDR(dbi.Value())
var tf protocol.FileInfoTruncated
err := tf.UnmarshalXDR(dbi.Value())
if err != nil {
panic(err)
}
if !protocol.IsDeleted(f.Flags) {
if !tf.IsDeleted() {
if debug {
l.Debugf("mark deleted; repo=%q node=%v name=%q", repo, protocol.NodeIDFromBytes(node), name)
}
ts := clock(f.LocalVersion)
f.Blocks = nil
f.Version = lamport.Default.Tick(f.Version)
f.Flags |= protocol.FlagDeleted
f.LocalVersion = ts
ts := clock(tf.LocalVersion)
f := protocol.FileInfo{
Name: tf.Name,
Version: lamport.Default.Tick(tf.Version),
LocalVersion: ts,
Flags: tf.Flags | protocol.FlagDeleted,
Modified: tf.Modified,
}
batch.Put(dbi.Key(), f.MarshalXDR())
ldbUpdateGlobal(db, batch, repo, node, nodeKeyName(dbi.Key()), f.Version)
return ts
@@ -246,6 +252,8 @@ func ldbReplaceWithDelete(db *leveldb.DB, repo, node []byte, fs []protocol.FileI
}
func ldbUpdate(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo) uint64 {
defer runtime.GC()
batch := new(leveldb.Batch)
snap, err := db.GetSnapshot()
if err != nil {
@@ -266,7 +274,7 @@ func ldbUpdate(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo) uint64
continue
}
var ef protocol.FileInfo
var ef protocol.FileInfoTruncated
err = ef.UnmarshalXDR(bs)
if err != nil {
panic(err)
@@ -390,7 +398,7 @@ func ldbRemoveFromGlobal(db dbReader, batch dbWriter, repo, node, file []byte) {
}
}
func ldbWithHave(db *leveldb.DB, repo, node []byte, fn fileIterator) {
func ldbWithHave(db *leveldb.DB, repo, node []byte, truncate bool, fn fileIterator) {
start := nodeKey(repo, node, nil) // before all repo/node files
limit := nodeKey(repo, node, []byte{0xff, 0xff, 0xff, 0xff}) // after all repo/node files
snap, err := db.GetSnapshot()
@@ -402,8 +410,7 @@ func ldbWithHave(db *leveldb.DB, repo, node []byte, fn fileIterator) {
defer dbi.Release()
for dbi.Next() {
var f protocol.FileInfo
err := f.UnmarshalXDR(dbi.Value())
f, err := unmarshalTrunc(dbi.Value(), truncate)
if err != nil {
panic(err)
}
@@ -413,7 +420,9 @@ func ldbWithHave(db *leveldb.DB, repo, node []byte, fn fileIterator) {
}
}
func ldbWithAllRepo(db *leveldb.DB, repo []byte, fn func(node []byte, f protocol.FileInfo) bool) {
func ldbWithAllRepoTruncated(db *leveldb.DB, repo []byte, fn func(node []byte, f protocol.FileInfoTruncated) bool) {
defer runtime.GC()
start := nodeKey(repo, nil, nil) // before all repo/node files
limit := nodeKey(repo, protocol.LocalNodeID[:], []byte{0xff, 0xff, 0xff, 0xff}) // after all repo/node files
snap, err := db.GetSnapshot()
@@ -426,7 +435,7 @@ func ldbWithAllRepo(db *leveldb.DB, repo []byte, fn func(node []byte, f protocol
for dbi.Next() {
node := nodeKeyNode(dbi.Key())
var f protocol.FileInfo
var f protocol.FileInfoTruncated
err := f.UnmarshalXDR(dbi.Value())
if err != nil {
panic(err)
@@ -437,40 +446,6 @@ func ldbWithAllRepo(db *leveldb.DB, repo []byte, fn func(node []byte, f protocol
}
}
/*
func ldbCheckGlobalConsistency(db *leveldb.DB, repo []byte) {
l.Debugf("Checking global consistency for %q", repo)
start := nodeKey(repo, nil, nil) // before all repo/node files
limit := nodeKey(repo, protocol.LocalNodeID[:], []byte{0xff, 0xff, 0xff, 0xff}) // after all repo/node files
snap, err := db.GetSnapshot()
if err != nil {
panic(err)
}
defer snap.Release()
dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
defer dbi.Release()
batch := new(leveldb.Batch)
i := 0
for dbi.Next() {
repo := nodeKeyRepo(dbi.Key())
node := nodeKeyNode(dbi.Key())
var f protocol.FileInfo
err := f.UnmarshalXDR(dbi.Value())
if err != nil {
panic(err)
}
if ldbUpdateGlobal(snap, batch, repo, node, []byte(f.Name), f.Version) {
var nodeID protocol.NodeID
copy(nodeID[:], node)
l.Debugf("fixed global for %q %s %q", repo, nodeID, f.Name)
}
i++
}
l.Debugln("Done", i)
}
*/
func ldbGet(db *leveldb.DB, repo, node, file []byte) protocol.FileInfo {
nk := nodeKey(repo, node, file)
bs, err := db.Get(nk, nil)
@@ -529,7 +504,9 @@ func ldbGetGlobal(db *leveldb.DB, repo, file []byte) protocol.FileInfo {
return f
}
func ldbWithGlobal(db *leveldb.DB, repo []byte, fn fileIterator) {
func ldbWithGlobal(db *leveldb.DB, repo []byte, truncate bool, fn fileIterator) {
defer runtime.GC()
start := globalKey(repo, nil)
limit := globalKey(repo, []byte{0xff, 0xff, 0xff, 0xff})
snap, err := db.GetSnapshot()
@@ -556,8 +533,7 @@ func ldbWithGlobal(db *leveldb.DB, repo []byte, fn fileIterator) {
panic(err)
}
var f protocol.FileInfo
err = f.UnmarshalXDR(bs)
f, err := unmarshalTrunc(bs, truncate)
if err != nil {
panic(err)
}
@@ -596,7 +572,9 @@ func ldbAvailability(db *leveldb.DB, repo, file []byte) []protocol.NodeID {
return nodes
}
func ldbWithNeed(db *leveldb.DB, repo, node []byte, fn fileIterator) {
func ldbWithNeed(db *leveldb.DB, repo, node []byte, truncate bool, fn fileIterator) {
defer runtime.GC()
start := globalKey(repo, nil)
limit := globalKey(repo, []byte{0xff, 0xff, 0xff, 0xff})
snap, err := db.GetSnapshot()
@@ -638,13 +616,12 @@ func ldbWithNeed(db *leveldb.DB, repo, node []byte, fn fileIterator) {
panic(err)
}
var gf protocol.FileInfo
err = gf.UnmarshalXDR(bs)
gf, err := unmarshalTrunc(bs, truncate)
if err != nil {
panic(err)
}
if protocol.IsDeleted(gf.Flags) && !have {
if gf.IsDeleted() && !have {
// We don't need deleted files that we don't have
continue
}
@@ -659,3 +636,15 @@ func ldbWithNeed(db *leveldb.DB, repo, node []byte, fn fileIterator) {
}
}
}
func unmarshalTrunc(bs []byte, truncate bool) (protocol.FileIntf, error) {
if truncate {
var tf protocol.FileInfoTruncated
err := tf.UnmarshalXDR(bs)
return tf, err
} else {
var tf protocol.FileInfo
err := tf.UnmarshalXDR(bs)
return tf, err
}
}

View File

@@ -36,7 +36,7 @@ func NewSet(repo string, db *leveldb.DB) *Set {
}
var nodeID protocol.NodeID
ldbWithAllRepo(db, []byte(repo), func(node []byte, f protocol.FileInfo) bool {
ldbWithAllRepoTruncated(db, []byte(repo), func(node []byte, f protocol.FileInfoTruncated) bool {
copy(nodeID[:], node)
if f.LocalVersion > s.localVersion[nodeID] {
s.localVersion[nodeID] = f.LocalVersion
@@ -87,21 +87,42 @@ func (s *Set) WithNeed(node protocol.NodeID, fn fileIterator) {
if debug {
l.Debugf("%s WithNeed(%v)", s.repo, node)
}
ldbWithNeed(s.db, []byte(s.repo), node[:], fn)
ldbWithNeed(s.db, []byte(s.repo), node[:], false, fn)
}
func (s *Set) WithNeedTruncated(node protocol.NodeID, fn fileIterator) {
if debug {
l.Debugf("%s WithNeedTruncated(%v)", s.repo, node)
}
ldbWithNeed(s.db, []byte(s.repo), node[:], true, fn)
}
func (s *Set) WithHave(node protocol.NodeID, fn fileIterator) {
if debug {
l.Debugf("%s WithHave(%v)", s.repo, node)
}
ldbWithHave(s.db, []byte(s.repo), node[:], fn)
ldbWithHave(s.db, []byte(s.repo), node[:], false, fn)
}
func (s *Set) WithHaveTruncated(node protocol.NodeID, fn fileIterator) {
if debug {
l.Debugf("%s WithHaveTruncated(%v)", s.repo, node)
}
ldbWithHave(s.db, []byte(s.repo), node[:], true, fn)
}
func (s *Set) WithGlobal(fn fileIterator) {
if debug {
l.Debugf("%s WithGlobal()", s.repo)
}
ldbWithGlobal(s.db, []byte(s.repo), fn)
ldbWithGlobal(s.db, []byte(s.repo), false, fn)
}
func (s *Set) WithGlobalTruncated(fn fileIterator) {
if debug {
l.Debugf("%s WithGlobalTruncated()", s.repo)
}
ldbWithGlobal(s.db, []byte(s.repo), true, fn)
}
func (s *Set) Get(node protocol.NodeID, file string) protocol.FileInfo {

View File

@@ -37,7 +37,8 @@ func genBlocks(n int) []protocol.BlockInfo {
func globalList(s *files.Set) []protocol.FileInfo {
var fs []protocol.FileInfo
s.WithGlobal(func(f protocol.FileInfo) bool {
s.WithGlobal(func(fi protocol.FileIntf) bool {
f := fi.(protocol.FileInfo)
fs = append(fs, f)
return true
})
@@ -46,7 +47,8 @@ func globalList(s *files.Set) []protocol.FileInfo {
func haveList(s *files.Set, n protocol.NodeID) []protocol.FileInfo {
var fs []protocol.FileInfo
s.WithHave(n, func(f protocol.FileInfo) bool {
s.WithHave(n, func(fi protocol.FileIntf) bool {
f := fi.(protocol.FileInfo)
fs = append(fs, f)
return true
})
@@ -55,7 +57,8 @@ func haveList(s *files.Set, n protocol.NodeID) []protocol.FileInfo {
func needList(s *files.Set, n protocol.NodeID) []protocol.FileInfo {
var fs []protocol.FileInfo
s.WithNeed(n, func(f protocol.FileInfo) bool {
s.WithNeed(n, func(fi protocol.FileIntf) bool {
f := fi.(protocol.FileInfo)
fs = append(fs, f)
return true
})
@@ -592,3 +595,64 @@ func TestLocalVersion(t *testing.T) {
t.Fatal("Local version number should be unchanged")
}
}
/*
var gf protocol.FileInfo
func TestStressGlobalVersion(t *testing.T) {
dur := 15 * time.Second
if testing.Short() {
dur = 1 * time.Second
}
set1 := []protocol.FileInfo{
protocol.FileInfo{Name: "a", Version: 1000},
protocol.FileInfo{Name: "b", Version: 1000},
}
set2 := []protocol.FileInfo{
protocol.FileInfo{Name: "b", Version: 1001},
protocol.FileInfo{Name: "c", Version: 1000},
}
db, err := leveldb.OpenFile("testdata/global.db", nil)
if err != nil {
t.Fatal(err)
}
m := files.NewSet("test", db)
done := make(chan struct{})
go stressWriter(m, remoteNode, set1, nil, done)
go stressWriter(m, protocol.LocalNodeID, set2, nil, done)
t0 := time.Now()
for time.Since(t0) < dur {
m.WithGlobal(func(f protocol.FileInfo) bool {
gf = f
return true
})
}
close(done)
}
func stressWriter(s *files.Set, id protocol.NodeID, set1, set2 []protocol.FileInfo, done chan struct{}) {
one := true
i := 0
for {
select {
case <-done:
return
default:
if one {
s.Replace(id, set1)
} else {
s.Replace(id, set2)
}
one = !one
}
i++
}
}
*/

View File

@@ -128,7 +128,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
$scope.$on('UIOffline', function (event, arg) {
console.log('UIOffline');
if (!restarting) {
$('#networkError').modal({backdrop: 'static', keyboard: false});
$('#networkError').modal();
}
});
@@ -188,7 +188,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
document.cookie = "firstVisit=" + Date.now() + ";max-age=" + 30*24*3600;
} else {
if (+firstVisit < Date.now() - 4*3600*1000){
$('#ur').modal({backdrop: 'static', keyboard: false});
$('#ur').modal();
}
}
}
@@ -471,7 +471,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
$scope.tmpOptions = angular.copy($scope.config.Options);
$scope.tmpOptions.UREnabled = ($scope.tmpOptions.URAccepted > 0);
$scope.tmpGUI = angular.copy($scope.config.GUI);
$('#settings').modal({backdrop: 'static', keyboard: true});
$('#settings').modal();
};
$scope.saveConfig = function() {
@@ -514,7 +514,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
$scope.restart = function () {
restarting = true;
$('#restarting').modal({backdrop: 'static', keyboard: false});
$('#restarting').modal();
$http.post(urlbase + '/restart');
$scope.configInSync = true;
@@ -536,9 +536,9 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
$scope.upgrade = function () {
restarting = true;
$('#upgrading').modal({backdrop: 'static', keyboard: false});
$('#upgrading').modal();
$http.post(urlbase + '/upgrade').success(function () {
$('#restarting').modal({backdrop: 'static', keyboard: false});
$('#restarting').modal();
$('#upgrading').modal('hide');
}).error(function () {
$('#upgrading').modal('hide');
@@ -548,7 +548,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
$scope.shutdown = function () {
restarting = true;
$http.post(urlbase + '/shutdown').success(function () {
$('#shutdown').modal({backdrop: 'static', keyboard: false});
$('#shutdown').modal();
});
$scope.configInSync = true;
};
@@ -559,7 +559,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
$scope.editingSelf = (nodeCfg.NodeID == $scope.myID);
$scope.currentNode.AddressesStr = nodeCfg.Addresses.join(', ');
$scope.nodeEditor.$setPristine();
$('#editNode').modal({backdrop: 'static', keyboard: true});
$('#editNode').modal();
};
$scope.idNode = function () {
@@ -571,7 +571,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
$scope.editingExisting = false;
$scope.editingSelf = false;
$scope.nodeEditor.$setPristine();
$('#editNode').modal({backdrop: 'static', keyboard: true});
$('#editNode').modal();
};
$scope.deleteNode = function () {
@@ -679,14 +679,14 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
$scope.currentRepo.simpleKeep = $scope.currentRepo.simpleKeep || 5;
$scope.editingExisting = true;
$scope.repoEditor.$setPristine();
$('#editRepo').modal({backdrop: 'static', keyboard: true});
$('#editRepo').modal();
};
$scope.addRepo = function () {
$scope.currentRepo = {selectedNodes: {}};
$scope.editingExisting = false;
$scope.repoEditor.$setPristine();
$('#editRepo').modal({backdrop: 'static', keyboard: true});
$('#editRepo').modal();
};
$scope.saveRepo = function () {
@@ -763,7 +763,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http, $translate, $loca
$scope.showNeed = function (repo) {
$scope.neededLoaded = false;
$('#needed').modal({backdrop: 'static', keyboard: true});
$('#needed').modal();
$http.get(urlbase + "/need?repo=" + encodeURIComponent(repo)).success(function (data) {
$scope.needed = data;
$scope.neededLoaded = true;

View File

@@ -195,16 +195,16 @@
</tr>
<tr>
<th><span class="glyphicon glyphicon-globe"></span>&emsp;<span translate>Global Repository</span></th>
<td class="text-right">{{model[repo.ID].globalFiles | alwaysNumber}} <span translate>items</span>, {{model[repo.ID].globalBytes | binary}}B</td>
<td class="text-right">{{model[repo.ID].globalFiles | alwaysNumber}} <span translate>items</span>, ~{{model[repo.ID].globalBytes | binary}}B</td>
</tr>
<tr>
<th><span class="glyphicon glyphicon-home"></span>&emsp;<span translate>Local Repository</span></th>
<td class="text-right">{{model[repo.ID].localFiles | alwaysNumber}} <span translate>items</span>, {{model[repo.ID].localBytes | binary}}B</td>
<td class="text-right">{{model[repo.ID].localFiles | alwaysNumber}} <span translate>items</span>, ~{{model[repo.ID].localBytes | binary}}B</td>
</tr>
<tr>
<th><span class="glyphicon glyphicon-cloud-download"></span>&emsp;<span translate>Out Of Sync</span></th>
<td class="text-right">
<a ng-if="model[repo.ID].needFiles > 0" ng-click="showNeed(repo.ID)" href="">{{model[repo.ID].needFiles | alwaysNumber}} <span translate>items</span>, {{model[repo.ID].needBytes | binary}}B</a>
<a ng-if="model[repo.ID].needFiles > 0" ng-click="showNeed(repo.ID)" href="">{{model[repo.ID].needFiles | alwaysNumber}} <span translate>items</span>, ~{{model[repo.ID].needBytes | binary}}B</a>
<span ng-if="model[repo.ID].needFiles == 0">0 <span translate>items</span>, 0 B</span>
</td>
</tr>
@@ -395,6 +395,8 @@
<p><span translate>Syncthing is restarting.</span> <span translate>Please wait</span>...</p>
</modal>
<!-- Upgrading modal -->
<modal id="upgrading" icon="refresh" title="{{'Upgrading' | translate}}" status="info">
<p><span translate>Syncthing is upgrading.</span> <span translate>Please wait</span>...</p>
</modal>
@@ -414,7 +416,7 @@
<!-- Node editor modal -->
<div id="editNode" class="modal fade">
<div id="editNode" class="modal fade" tabindex="-1">
<div class="modal-dialog modal-lg">
<div class="modal-content">
<div class="modal-header">
@@ -464,7 +466,7 @@
<!-- Repo editor modal -->
<div id="editRepo" class="modal fade">
<div id="editRepo" class="modal fade" tabindex="-1">
<div class="modal-dialog modal-lg">
<div class="modal-content">
<div class="modal-header">
@@ -558,7 +560,7 @@
<!-- Settings modal -->
<div id="settings" class="modal fade">
<div id="settings" class="modal fade" tabindex="-1">
<div class="modal-dialog modal-lg">
<div class="modal-content">
<div class="modal-header">
@@ -684,7 +686,7 @@
<!-- Usage report modal -->
<div id="ur" class="modal fade">
<div id="ur" class="modal fade" data-backdrop="static" data-keyboard="false" tabindex="-1">
<div class="modal-dialog modal-lg">
<div class="modal-content">
<div class="modal-header alert alert-success">

View File

@@ -1,4 +1,4 @@
<div class="modal fade">
<div class="modal fade" tabindex="-1" ng-attr-data-backdrop="{{ close ? true : 'static' }}" ng-attr-data-keyboard="{{ Boolean(close) }}">
<div class="modal-dialog" ng-class="{'modal-lg': large}">
<div class="modal-content">
<div class="modal-header alert alert-{{status}}">
@@ -10,7 +10,7 @@
<div class="modal-body" ng-transclude>
</div>
<div ng-if="close" class="modal-footer">
<button type="button" class="btn btn-default" data-dismiss="modal"><span class="glyphicon glyphicon-remove"></span>&emsp;Close</button>
<button type="button" class="btn btn-default" data-dismiss="modal"><span class="glyphicon glyphicon-remove"></span>&emsp;<span translate>Close</span></button>
</div>
</div>
</div>

View File

Binary file not shown.

BIN
gui/raleway-500.ttf Normal file
View File

Binary file not shown.

View File

Binary file not shown.

View File

@@ -1,12 +1,6 @@
@font-face {
font-family: 'Raleway';
font-style: normal;
font-weight: 400;
src: local('Raleway'), url(raleway-400.ttf) format('truetype');
}
@font-face {
font-family: 'Raleway';
font-style: normal;
font-weight: 700;
src: local('Raleway Bold'), local('Raleway-Bold'), url(raleway-700.ttf) format('truetype');
font-weight: 500;
src: local('Raleway'), url(raleway-500.ttf) format('truetype');
}

View File

@@ -8,6 +8,7 @@ package main
import (
"bufio"
"bytes"
"flag"
"fmt"
"io/ioutil"
@@ -76,7 +77,9 @@ func main() {
if len(csrfToken) > 0 {
// If we have a CSRF token, verify that POST succeeds with it
tests = append(tests, testing.InternalTest{"TestPOSTWithCSRF", TestPOSTWithCSRF})
tests = append(tests, testing.InternalTest{"TestPostWitchCSRF", TestPostWitchCSRF})
tests = append(tests, testing.InternalTest{"TestGetPostConfigOK", TestGetPostConfigOK})
tests = append(tests, testing.InternalTest{"TestGetPostConfigFail", TestGetPostConfigFail})
}
fmt.Printf("Testing HTTP: CSRF=%v, API=%v, Auth=%v\n", len(csrfToken) > 0, len(apiKey) > 0, len(authUser) > 0)
@@ -184,7 +187,7 @@ func TestPOSTNoCSRF(t *testing.T) {
}
}
func TestPOSTWithCSRF(t *testing.T) {
func TestPostWitchCSRF(t *testing.T) {
r, err := http.NewRequest("POST", "http://"+target+"/rest/error/clear", nil)
if err != nil {
t.Fatal(err)
@@ -204,6 +207,96 @@ func TestPOSTWithCSRF(t *testing.T) {
}
}
func TestGetPostConfigOK(t *testing.T) {
// Get config
r, err := http.NewRequest("GET", "http://"+target+"/rest/config", nil)
if err != nil {
t.Fatal(err)
}
if len(csrfToken) > 0 {
r.Header.Set("X-CSRF-Token", csrfToken)
}
if len(authUser) > 0 {
r.SetBasicAuth(authUser, authPass)
}
res, err := http.DefaultClient.Do(r)
if err != nil {
t.Fatal(err)
}
if res.StatusCode != 200 {
t.Fatalf("Status %d != 200 for POST", res.StatusCode)
}
bs, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal(err)
}
res.Body.Close()
// Post same config back
r, err = http.NewRequest("POST", "http://"+target+"/rest/config", bytes.NewBuffer(bs))
if err != nil {
t.Fatal(err)
}
if len(csrfToken) > 0 {
r.Header.Set("X-CSRF-Token", csrfToken)
}
if len(authUser) > 0 {
r.SetBasicAuth(authUser, authPass)
}
res, err = http.DefaultClient.Do(r)
if err != nil {
t.Fatal(err)
}
if res.StatusCode != 200 {
t.Fatalf("Status %d != 200 for POST", res.StatusCode)
}
}
func TestGetPostConfigFail(t *testing.T) {
// Get config
r, err := http.NewRequest("GET", "http://"+target+"/rest/config", nil)
if err != nil {
t.Fatal(err)
}
if len(csrfToken) > 0 {
r.Header.Set("X-CSRF-Token", csrfToken)
}
if len(authUser) > 0 {
r.SetBasicAuth(authUser, authPass)
}
res, err := http.DefaultClient.Do(r)
if err != nil {
t.Fatal(err)
}
if res.StatusCode != 200 {
t.Fatalf("Status %d != 200 for POST", res.StatusCode)
}
bs, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal(err)
}
res.Body.Close()
// Post same config back, with some characters missing to create a syntax error
r, err = http.NewRequest("POST", "http://"+target+"/rest/config", bytes.NewBuffer(bs[2:]))
if err != nil {
t.Fatal(err)
}
if len(csrfToken) > 0 {
r.Header.Set("X-CSRF-Token", csrfToken)
}
if len(authUser) > 0 {
r.SetBasicAuth(authUser, authPass)
}
res, err = http.DefaultClient.Do(r)
if err != nil {
t.Fatal(err)
}
if res.StatusCode != 500 {
t.Fatalf("Status %d != 500 for POST", res.StatusCode)
}
}
func TestJSONEndpointsNoAuth(t *testing.T) {
for _, p := range jsonEndpoints {
r, err := http.NewRequest("GET", "http://"+target+p, nil)

View File

@@ -12,6 +12,7 @@ import (
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
@@ -55,7 +56,12 @@ func (s repoState) String() string {
const zeroEntrySize = 128
// How many files to send in each Index/IndexUpdate message.
const indexBatchSize = 1000
const (
indexTargetSize = 250 * 1024 // Aim for making index messages no larger than 250 KiB (uncompressed)
indexPerFileSize = 250 // Each FileInfo is approximately this big, in bytes, excluding BlockInfos
IndexPerBlockSize = 40 // Each BlockInfo is approximately this big
indexBatchSize = 1000 // Either way, don't include more files than this
)
type Model struct {
indexDir string
@@ -65,12 +71,11 @@ type Model struct {
clientName string
clientVersion string
repoCfgs map[string]config.RepositoryConfiguration // repo -> cfg
repoFiles map[string]*files.Set // repo -> files
repoNodes map[string][]protocol.NodeID // repo -> nodeIDs
nodeRepos map[protocol.NodeID][]string // nodeID -> repos
suppressor map[string]*suppressor // repo -> suppressor
rmut sync.RWMutex // protects the above
repoCfgs map[string]config.RepositoryConfiguration // repo -> cfg
repoFiles map[string]*files.Set // repo -> files
repoNodes map[string][]protocol.NodeID // repo -> nodeIDs
nodeRepos map[protocol.NodeID][]string // nodeID -> repos
rmut sync.RWMutex // protects the above
repoState map[string]repoState // repo -> state
repoStateChanged map[string]time.Time // repo -> time when state changed
@@ -84,8 +89,6 @@ type Model struct {
sentLocalVer map[protocol.NodeID]map[string]uint64
slMut sync.Mutex
sup suppressor
addedRepo bool
started bool
}
@@ -111,12 +114,10 @@ func NewModel(indexDir string, cfg *config.Configuration, clientName, clientVers
nodeRepos: make(map[protocol.NodeID][]string),
repoState: make(map[string]repoState),
repoStateChanged: make(map[string]time.Time),
suppressor: make(map[string]*suppressor),
protoConn: make(map[protocol.NodeID]protocol.Connection),
rawConn: make(map[protocol.NodeID]io.Closer),
nodeVer: make(map[protocol.NodeID]string),
sentLocalVer: make(map[protocol.NodeID]map[string]uint64),
sup: suppressor{threshold: int64(cfg.Options.MaxChangeKbps)},
}
var timeout = 20 * 60 // seconds
@@ -207,15 +208,9 @@ func (m *Model) Completion(node protocol.NodeID, repo string) float64 {
return 0 // Repo doesn't exist, so we hardly have any of it
}
rf.WithGlobal(func(f protocol.FileInfo) bool {
if !protocol.IsDeleted(f.Flags) {
var size int64
if protocol.IsDirectory(f.Flags) {
size = zeroEntrySize
} else {
size = f.Size()
}
tot += size
rf.WithGlobalTruncated(func(f protocol.FileIntf) bool {
if !f.IsDeleted() {
tot += f.Size()
}
return true
})
@@ -225,20 +220,19 @@ func (m *Model) Completion(node protocol.NodeID, repo string) float64 {
}
var need int64
rf.WithNeed(node, func(f protocol.FileInfo) bool {
if !protocol.IsDeleted(f.Flags) {
var size int64
if protocol.IsDirectory(f.Flags) {
size = zeroEntrySize
} else {
size = f.Size()
}
need += size
rf.WithNeedTruncated(node, func(f protocol.FileIntf) bool {
if !f.IsDeleted() {
need += f.Size()
}
return true
})
return 100 * (1 - float64(need)/float64(tot))
res := 100 * (1 - float64(need)/float64(tot))
if debug {
l.Debugf("Completion(%s, %q): %f (%d / %d)", node, repo, res, need, tot)
}
return res
}
func sizeOf(fs []protocol.FileInfo) (files, deleted int, bytes int64) {
@@ -251,18 +245,13 @@ func sizeOf(fs []protocol.FileInfo) (files, deleted int, bytes int64) {
return
}
func sizeOfFile(f protocol.FileInfo) (files, deleted int, bytes int64) {
if !protocol.IsDeleted(f.Flags) {
func sizeOfFile(f protocol.FileIntf) (files, deleted int, bytes int64) {
if !f.IsDeleted() {
files++
if !protocol.IsDirectory(f.Flags) {
bytes += f.Size()
} else {
bytes += zeroEntrySize
}
} else {
deleted++
bytes += zeroEntrySize
}
bytes += f.Size()
return
}
@@ -272,7 +261,7 @@ func (m *Model) GlobalSize(repo string) (files, deleted int, bytes int64) {
m.rmut.RLock()
defer m.rmut.RUnlock()
if rf, ok := m.repoFiles[repo]; ok {
rf.WithGlobal(func(f protocol.FileInfo) bool {
rf.WithGlobalTruncated(func(f protocol.FileIntf) bool {
fs, de, by := sizeOfFile(f)
files += fs
deleted += de
@@ -289,7 +278,7 @@ func (m *Model) LocalSize(repo string) (files, deleted int, bytes int64) {
m.rmut.RLock()
defer m.rmut.RUnlock()
if rf, ok := m.repoFiles[repo]; ok {
rf.WithHave(protocol.LocalNodeID, func(f protocol.FileInfo) bool {
rf.WithHaveTruncated(protocol.LocalNodeID, func(f protocol.FileIntf) bool {
fs, de, by := sizeOfFile(f)
files += fs
deleted += de
@@ -305,13 +294,16 @@ func (m *Model) NeedSize(repo string) (files int, bytes int64) {
m.rmut.RLock()
defer m.rmut.RUnlock()
if rf, ok := m.repoFiles[repo]; ok {
rf.WithNeed(protocol.LocalNodeID, func(f protocol.FileInfo) bool {
rf.WithNeedTruncated(protocol.LocalNodeID, func(f protocol.FileIntf) bool {
fs, de, by := sizeOfFile(f)
files += fs + de
bytes += by
return true
})
}
if debug {
l.Debugf("NeedSize(%q): %d %d", repo, files, bytes)
}
return
}
@@ -321,8 +313,8 @@ func (m *Model) NeedFilesRepo(repo string) []protocol.FileInfo {
defer m.rmut.RUnlock()
if rf, ok := m.repoFiles[repo]; ok {
fs := make([]protocol.FileInfo, 0, indexBatchSize)
rf.WithNeed(protocol.LocalNodeID, func(f protocol.FileInfo) bool {
fs = append(fs, f)
rf.WithNeed(protocol.LocalNodeID, func(f protocol.FileIntf) bool {
fs = append(fs, f.(protocol.FileInfo))
return len(fs) < indexBatchSize
})
return fs
@@ -592,10 +584,12 @@ func sendIndexTo(initial bool, minLocalVer uint64, conn protocol.Connection, rep
nodeID := conn.ID()
name := conn.Name()
batch := make([]protocol.FileInfo, 0, indexBatchSize)
currentBatchSize := 0
maxLocalVer := uint64(0)
var err error
fs.WithHave(protocol.LocalNodeID, func(f protocol.FileInfo) bool {
fs.WithHave(protocol.LocalNodeID, func(fi protocol.FileIntf) bool {
f := fi.(protocol.FileInfo)
if f.LocalVersion <= minLocalVer {
return true
}
@@ -604,13 +598,13 @@ func sendIndexTo(initial bool, minLocalVer uint64, conn protocol.Connection, rep
maxLocalVer = f.LocalVersion
}
if len(batch) == indexBatchSize {
if len(batch) == indexBatchSize || currentBatchSize > indexTargetSize {
if initial {
if err = conn.Index(repo, batch); err != nil {
return false
}
if debug {
l.Debugf("sendIndexes for %s-%s/%q: %d files (initial index)", nodeID, name, repo, len(batch))
l.Debugf("sendIndexes for %s-%s/%q: %d files (<%d bytes) (initial index)", nodeID, name, repo, len(batch), currentBatchSize)
}
initial = false
} else {
@@ -618,14 +612,16 @@ func sendIndexTo(initial bool, minLocalVer uint64, conn protocol.Connection, rep
return false
}
if debug {
l.Debugf("sendIndexes for %s-%s/%q: %d files (batched update)", nodeID, name, repo, len(batch))
l.Debugf("sendIndexes for %s-%s/%q: %d files (<%d bytes) (batched update)", nodeID, name, repo, len(batch), currentBatchSize)
}
}
batch = make([]protocol.FileInfo, 0, indexBatchSize)
currentBatchSize = 0
}
batch = append(batch, f)
currentBatchSize += indexPerFileSize + len(f.Blocks)*IndexPerBlockSize
return true
})
@@ -685,7 +681,6 @@ func (m *Model) AddRepo(cfg config.RepositoryConfiguration) {
m.rmut.Lock()
m.repoCfgs[cfg.ID] = cfg
m.repoFiles[cfg.ID] = files.NewSet(cfg.ID, m.db)
m.suppressor[cfg.ID] = &suppressor{threshold: int64(m.cfg.Options.MaxChangeKbps)}
m.repoNodes[cfg.ID] = make([]protocol.NodeID, len(cfg.Nodes))
for i, node := range cfg.Nodes {
@@ -744,20 +739,31 @@ func (m *Model) CleanRepos() {
}
func (m *Model) ScanRepo(repo string) error {
return m.ScanRepoSub(repo, "")
}
func (m *Model) ScanRepoSub(repo, sub string) error {
if p := filepath.Clean(filepath.Join(repo, sub)); !strings.HasPrefix(p, repo) {
return errors.New("invalid subpath")
}
m.rmut.RLock()
fs := m.repoFiles[repo]
fs, ok := m.repoFiles[repo]
dir := m.repoCfgs[repo].Directory
w := &scanner.Walker{
Dir: dir,
Sub: sub,
IgnoreFile: ".stignore",
BlockSize: scanner.StandardBlockSize,
TempNamer: defTempNamer,
Suppressor: m.suppressor[repo],
CurrentFiler: cFiler{m, repo},
IgnorePerms: m.repoCfgs[repo].IgnorePerms,
}
m.rmut.RUnlock()
if !ok {
return errors.New("no such repo")
}
m.setState(repo, RepoScanning)
fchan, _, err := w.Walk()
@@ -786,7 +792,14 @@ func (m *Model) ScanRepo(repo string) error {
}
batch = batch[:0]
fs.WithHave(protocol.LocalNodeID, func(f protocol.FileInfo) bool {
// TODO: We should limit the Have scanning to start at sub
seenPrefix := false
fs.WithHaveTruncated(protocol.LocalNodeID, func(fi protocol.FileIntf) bool {
f := fi.(protocol.FileInfoTruncated)
if !strings.HasPrefix(f.Name, sub) {
return !seenPrefix
}
seenPrefix = true
if !protocol.IsDeleted(f.Flags) {
if len(batch) == batchSize {
fs.Update(protocol.LocalNodeID, batch)
@@ -794,10 +807,12 @@ func (m *Model) ScanRepo(repo string) error {
}
if _, err := os.Stat(filepath.Join(dir, f.Name)); err != nil && os.IsNotExist(err) {
// File has been deleted
f.Blocks = nil
f.Flags |= protocol.FlagDeleted
f.Version = lamport.Default.Tick(f.Version)
f.LocalVersion = 0
nf := protocol.FileInfo{
Name: f.Name,
Flags: f.Flags | protocol.FlagDeleted,
Modified: f.Modified,
Version: lamport.Default.Tick(f.Version),
}
events.Default.Log(events.LocalIndexUpdated, map[string]interface{}{
"repo": repo,
"name": f.Name,
@@ -805,7 +820,7 @@ func (m *Model) ScanRepo(repo string) error {
"flags": fmt.Sprintf("0%o", f.Flags),
"size": f.Size(),
})
batch = append(batch, f)
batch = append(batch, nf)
}
}
return true
@@ -878,7 +893,8 @@ func (m *Model) Override(repo string) {
m.rmut.RUnlock()
batch := make([]protocol.FileInfo, 0, indexBatchSize)
fs.WithNeed(protocol.LocalNodeID, func(need protocol.FileInfo) bool {
fs.WithNeed(protocol.LocalNodeID, func(fi protocol.FileIntf) bool {
need := fi.(protocol.FileInfo)
if len(batch) == indexBatchSize {
fs.Update(protocol.LocalNodeID, batch)
batch = batch[:0]

View File

@@ -2,6 +2,25 @@
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
/*
__ __ _ _
\ \ / /_ _ _ __ _ __ (_)_ __ __ _| |
\ \ /\ / / _` | '__| '_ \| | '_ \ / _` | |
\ V V / (_| | | | | | | | | | | (_| |_|
\_/\_/ \__,_|_| |_| |_|_|_| |_|\__, (_)
|___/
The code in this file is a piece of crap. Don't base anything on it.
Refactorin ongoing in new-puller branch.
__ __ _ _
\ \ / /_ _ _ __ _ __ (_)_ __ __ _| |
\ \ /\ / / _` | '__| '_ \| | '_ \ / _` | |
\ V V / (_| | | | | | | | | | | (_| |_|
\_/\_/ \__,_|_| |_| |_|_|_| |_|\__, (_)
|___/
*/
package model
import (
@@ -441,7 +460,9 @@ func (p *puller) handleBlock(b bqBlock) bool {
err = os.MkdirAll(dirName, 0777)
} else {
// We need to make sure the directory is writeable so we can create files in it
err = os.Chmod(dirName, 0777)
if dirName != p.repoCfg.Directory {
err = os.Chmod(dirName, 0777)
}
}
if err != nil {
l.Infof("mkdir: error: %q / %q: %v", p.repoCfg.ID, f.Name, err)
@@ -600,9 +621,13 @@ func (p *puller) handleEmptyBlock(b bqBlock) {
l.Debugf("pull: delete %q", f.Name)
}
os.Remove(of.temp)
// Ensure the file and the directory it is in is writeable so we can remove the file
dirName := filepath.Dir(of.filepath)
os.Chmod(of.filepath, 0666)
os.Chmod(filepath.Dir(of.filepath), 0777)
if dirName != p.repoCfg.Directory {
os.Chmod(dirName, 0777)
}
if p.versioner != nil {
if debug {
l.Debugln("pull: deleting with versioner")
@@ -703,7 +728,7 @@ func (p *puller) closeFile(f protocol.FileInfo) {
l.Infof("open: error: %q / %q: %v", p.repoCfg.ID, f.Name, err)
return
}
hb, _ := scanner.Blocks(fd, scanner.StandardBlockSize)
hb, _ := scanner.Blocks(fd, scanner.StandardBlockSize, f.Size())
fd.Close()
if l0, l1 := len(hb), len(f.Blocks); l0 != l1 {

View File

@@ -1,81 +0,0 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package model
import (
"os"
"sync"
"time"
)
const (
MaxChangeHistory = 4
)
type change struct {
size int64
when time.Time
}
type changeHistory struct {
changes []change
next int64
prevSup bool
}
type suppressor struct {
sync.Mutex
changes map[string]changeHistory
threshold int64 // bytes/s
}
func (h changeHistory) bandwidth(t time.Time) int64 {
if len(h.changes) == 0 {
return 0
}
var t0 = h.changes[0].when
if t == t0 {
return 0
}
var bw float64
for _, c := range h.changes {
bw += float64(c.size)
}
return int64(bw / t.Sub(t0).Seconds())
}
func (h *changeHistory) append(size int64, t time.Time) {
c := change{size, t}
if len(h.changes) == MaxChangeHistory {
h.changes = h.changes[1:MaxChangeHistory]
}
h.changes = append(h.changes, c)
}
func (s *suppressor) Suppress(name string, fi os.FileInfo) (cur, prev bool) {
return s.suppress(name, fi.Size(), time.Now())
}
func (s *suppressor) suppress(name string, size int64, t time.Time) (bool, bool) {
s.Lock()
if s.changes == nil {
s.changes = make(map[string]changeHistory)
}
h := s.changes[name]
sup := h.bandwidth(t) > s.threshold
prevSup := h.prevSup
h.prevSup = sup
if !sup {
h.append(size, t)
}
s.changes[name] = h
s.Unlock()
return sup, prevSup
}

View File

@@ -1,117 +0,0 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package model
import (
"testing"
"time"
)
func TestSuppressor(t *testing.T) {
s := suppressor{threshold: 10000}
t0 := time.Now()
t1 := t0
sup, prev := s.suppress("foo", 10000, t1)
if sup {
t.Fatal("Never suppress first change")
}
if prev {
t.Fatal("Incorrect prev status")
}
// bw is 10000 / 10 = 1000
t1 = t0.Add(10 * time.Second)
if bw := s.changes["foo"].bandwidth(t1); bw != 1000 {
t.Errorf("Incorrect bw %d", bw)
}
sup, prev = s.suppress("foo", 10000, t1)
if sup {
t.Fatal("Should still be fine")
}
if prev {
t.Fatal("Incorrect prev status")
}
// bw is (10000 + 10000) / 11 = 1818
t1 = t0.Add(11 * time.Second)
if bw := s.changes["foo"].bandwidth(t1); bw != 1818 {
t.Errorf("Incorrect bw %d", bw)
}
sup, prev = s.suppress("foo", 100500, t1)
if sup {
t.Fatal("Should still be fine")
}
if prev {
t.Fatal("Incorrect prev status")
}
// bw is (10000 + 10000 + 100500) / 12 = 10041
t1 = t0.Add(12 * time.Second)
if bw := s.changes["foo"].bandwidth(t1); bw != 10041 {
t.Errorf("Incorrect bw %d", bw)
}
sup, prev = s.suppress("foo", 10000000, t1) // value will be ignored
if !sup {
t.Fatal("Should be over threshold")
}
if prev {
t.Fatal("Incorrect prev status")
}
// bw is (10000 + 10000 + 100500) / 15 = 8033
t1 = t0.Add(15 * time.Second)
if bw := s.changes["foo"].bandwidth(t1); bw != 8033 {
t.Errorf("Incorrect bw %d", bw)
}
sup, prev = s.suppress("foo", 10000000, t1)
if sup {
t.Fatal("Should be Ok")
}
if !prev {
t.Fatal("Incorrect prev status")
}
}
func TestHistory(t *testing.T) {
h := changeHistory{}
t0 := time.Now()
h.append(40, t0)
if l := len(h.changes); l != 1 {
t.Errorf("Incorrect history length %d", l)
}
if s := h.changes[0].size; s != 40 {
t.Errorf("Incorrect first record size %d", s)
}
for i := 1; i < MaxChangeHistory; i++ {
h.append(int64(40+i), t0.Add(time.Duration(i)*time.Second))
}
if l := len(h.changes); l != MaxChangeHistory {
t.Errorf("Incorrect history length %d", l)
}
if s := h.changes[0].size; s != 40 {
t.Errorf("Incorrect first record size %d", s)
}
if s := h.changes[MaxChangeHistory-1].size; s != 40+MaxChangeHistory-1 {
t.Errorf("Incorrect last record size %d", s)
}
h.append(999, t0.Add(time.Duration(999)*time.Second))
if l := len(h.changes); l != MaxChangeHistory {
t.Errorf("Incorrect history length %d", l)
}
if s := h.changes[0].size; s != 41 {
t.Errorf("Incorrect first record size %d", s)
}
if s := h.changes[MaxChangeHistory-1].size; s != 999 {
t.Errorf("Incorrect last record size %d", s)
}
}

View File

@@ -26,12 +26,50 @@ func (f FileInfo) String() string {
}
func (f FileInfo) Size() (bytes int64) {
if IsDeleted(f.Flags) || IsDirectory(f.Flags) {
return 128
}
for _, b := range f.Blocks {
bytes += int64(b.Size)
}
return
}
func (f FileInfo) IsDeleted() bool {
return IsDeleted(f.Flags)
}
// Used for unmarshalling a FileInfo structure but skipping the actual block list
type FileInfoTruncated struct {
Name string // max:1024
Flags uint32
Modified int64
Version uint64
LocalVersion uint64
NumBlocks uint32
}
// Returns a statistical guess on the size, not the exact figure
func (f FileInfoTruncated) Size() int64 {
if IsDeleted(f.Flags) || IsDirectory(f.Flags) {
return 128
}
if f.NumBlocks < 2 {
return BlockSize / 2
} else {
return int64(f.NumBlocks-1)*BlockSize + BlockSize/2
}
}
func (f FileInfoTruncated) IsDeleted() bool {
return IsDeleted(f.Flags)
}
type FileIntf interface {
Size() int64
IsDeleted() bool
}
type BlockInfo struct {
Offset int64 // noencode (cache only)
Size uint32

View File

@@ -199,6 +199,98 @@ func (o *FileInfo) decodeXDR(xr *xdr.Reader) error {
/*
FileInfoTruncated Structure:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Length of Name |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ Name (variable length) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Flags |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
+ Modified (64 bits) +
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
+ Version (64 bits) +
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
+ Local Version (64 bits) +
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Num Blocks |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
struct FileInfoTruncated {
string Name<1024>;
unsigned int Flags;
hyper Modified;
unsigned hyper Version;
unsigned hyper LocalVersion;
unsigned int NumBlocks;
}
*/
func (o FileInfoTruncated) EncodeXDR(w io.Writer) (int, error) {
var xw = xdr.NewWriter(w)
return o.encodeXDR(xw)
}
func (o FileInfoTruncated) MarshalXDR() []byte {
return o.AppendXDR(make([]byte, 0, 128))
}
func (o FileInfoTruncated) AppendXDR(bs []byte) []byte {
var aw = xdr.AppendWriter(bs)
var xw = xdr.NewWriter(&aw)
o.encodeXDR(xw)
return []byte(aw)
}
func (o FileInfoTruncated) encodeXDR(xw *xdr.Writer) (int, error) {
if len(o.Name) > 1024 {
return xw.Tot(), xdr.ErrElementSizeExceeded
}
xw.WriteString(o.Name)
xw.WriteUint32(o.Flags)
xw.WriteUint64(uint64(o.Modified))
xw.WriteUint64(o.Version)
xw.WriteUint64(o.LocalVersion)
xw.WriteUint32(o.NumBlocks)
return xw.Tot(), xw.Error()
}
func (o *FileInfoTruncated) DecodeXDR(r io.Reader) error {
xr := xdr.NewReader(r)
return o.decodeXDR(xr)
}
func (o *FileInfoTruncated) UnmarshalXDR(bs []byte) error {
var br = bytes.NewReader(bs)
var xr = xdr.NewReader(br)
return o.decodeXDR(xr)
}
func (o *FileInfoTruncated) decodeXDR(xr *xdr.Reader) error {
o.Name = xr.ReadStringMax(1024)
o.Flags = xr.ReadUint32()
o.Modified = int64(xr.ReadUint64())
o.Version = xr.ReadUint64()
o.LocalVersion = xr.ReadUint64()
o.NumBlocks = xr.ReadUint32()
return xr.Error()
}
/*
BlockInfo Structure:
0 1 2 3

View File

@@ -49,7 +49,15 @@ func hashFile(dir string, blockSize int, outbox, inbox chan protocol.FileInfo) {
continue
}
blocks, err := Blocks(fd, blockSize)
fi, err := fd.Stat()
if err != nil {
fd.Close()
if debug {
l.Debugln("stat:", err)
}
continue
}
blocks, err := Blocks(fd, blockSize, fi.Size())
fd.Close()
if err != nil {

View File

@@ -17,12 +17,15 @@ const StandardBlockSize = 128 * 1024
var sha256OfNothing = []uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}
// Blocks returns the blockwise hash of the reader.
func Blocks(r io.Reader, blocksize int) ([]protocol.BlockInfo, error) {
func Blocks(r io.Reader, blocksize int, sizehint int64) ([]protocol.BlockInfo, error) {
var blocks []protocol.BlockInfo
if sizehint > 0 {
blocks = make([]protocol.BlockInfo, 0, int(sizehint/int64(blocksize)))
}
var offset int64
hf := sha256.New()
for {
lr := &io.LimitedReader{R: r, N: int64(blocksize)}
hf := sha256.New()
n, err := io.Copy(hf, lr)
if err != nil {
return nil, err
@@ -39,6 +42,8 @@ func Blocks(r io.Reader, blocksize int) ([]protocol.BlockInfo, error) {
}
blocks = append(blocks, b)
offset += int64(n)
hf.Reset()
}
if len(blocks) == 0 {

View File

@@ -49,7 +49,7 @@ var blocksTestData = []struct {
func TestBlocks(t *testing.T) {
for _, test := range blocksTestData {
buf := bytes.NewBuffer(test.data)
blocks, err := Blocks(buf, test.blocksize)
blocks, err := Blocks(buf, test.blocksize, 0)
if err != nil {
t.Fatal(err)
@@ -103,8 +103,8 @@ var diffTestData = []struct {
func TestDiff(t *testing.T) {
for i, test := range diffTestData {
a, _ := Blocks(bytes.NewBufferString(test.a), test.s)
b, _ := Blocks(bytes.NewBufferString(test.b), test.s)
a, _ := Blocks(bytes.NewBufferString(test.a), test.s, 0)
b, _ := Blocks(bytes.NewBufferString(test.b), test.s, 0)
_, d := BlockDiff(a, b)
if len(d) != len(test.d) {
t.Fatalf("Incorrect length for diff %d; %d != %d", i, len(d), len(test.d))

View File

@@ -22,6 +22,8 @@ import (
type Walker struct {
// Dir is the base directory for the walk
Dir string
// Limit walking to this path within Dir, or no limit if Sub is blank
Sub string
// BlockSize controls the size of the block used when hashing.
BlockSize int
// If IgnoreFile is not empty, it is the name used for the file that holds ignore patterns.
@@ -30,10 +32,6 @@ type Walker struct {
TempNamer TempNamer
// If CurrentFiler is not nil, it is queried for the current file before rescanning.
CurrentFiler CurrentFiler
// If Suppressor is not nil, it is queried for supression of modified files.
// Suppressed files will be returned with empty metadata and the Suppressed flag set.
// Requires CurrentFiler to be set.
Suppressor Suppressor
// If IgnorePerms is true, changes to permission bits will not be
// detected. Scanned files will get zero permission bits and the
// NoPermissionBits flag set.
@@ -47,11 +45,6 @@ type TempNamer interface {
IsTemporary(path string) bool
}
type Suppressor interface {
// Supress returns true if the update to the named file should be ignored.
Suppress(name string, fi os.FileInfo) (bool, bool)
}
type CurrentFiler interface {
// CurrentFile returns the file as seen at last scan.
CurrentFile(name string) protocol.FileInfo
@@ -61,7 +54,7 @@ type CurrentFiler interface {
// file system. Files are blockwise hashed.
func (w *Walker) Walk() (chan protocol.FileInfo, map[string][]string, error) {
if debug {
l.Debugln("Walk", w.Dir, w.BlockSize, w.IgnoreFile)
l.Debugln("Walk", w.Dir, w.Sub, w.BlockSize, w.IgnoreFile)
}
err := checkDir(w.Dir)
@@ -77,7 +70,7 @@ func (w *Walker) Walk() (chan protocol.FileInfo, map[string][]string, error) {
go func() {
filepath.Walk(w.Dir, w.loadIgnoreFiles(w.Dir, ignore))
filepath.Walk(w.Dir, hashFiles)
filepath.Walk(filepath.Join(w.Dir, w.Sub), hashFiles)
close(files)
}()
@@ -199,22 +192,6 @@ func (w *Walker) walkAndHashFiles(fchan chan protocol.FileInfo, ign map[string][
return nil
}
if w.Suppressor != nil {
if cur, prev := w.Suppressor.Suppress(rn, info); cur && !prev {
l.Infof("Changes to %q are being temporarily suppressed because it changes too frequently.", p)
cf.Flags |= protocol.FlagInvalid
cf.Version = lamport.Default.Tick(cf.Version)
cf.LocalVersion = 0
if debug {
l.Debugln("suppressed:", cf)
}
fchan <- cf
return nil
} else if prev && !cur {
l.Infof("Changes to %q are no longer suppressed.", p)
}
}
if debug {
l.Debugln("rescan:", cf, info.ModTime().Unix(), info.Mode()&os.ModePerm)
}

View File

@@ -29,6 +29,30 @@ var correctIgnores = map[string][]string{
".": {".*", "quux"},
}
func TestWalkSub(t *testing.T) {
w := Walker{
Dir: "testdata",
Sub: "foo",
BlockSize: 128 * 1024,
IgnoreFile: ".stignore",
}
fchan, _, err := w.Walk()
var files []protocol.FileInfo
for f := range fchan {
files = append(files, f)
}
if err != nil {
t.Fatal(err)
}
if len(files) != 1 {
t.Fatalf("Incorrect length %d != 1", len(files))
}
if files[0].Name != "foo" {
t.Errorf("Incorrect file %v != foo", files[0])
}
}
func TestWalk(t *testing.T) {
w := Walker{
Dir: "testdata",

17
upgrade/debug.go Normal file
View File

@@ -0,0 +1,17 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package upgrade
import (
"os"
"strings"
"github.com/syncthing/syncthing/logger"
)
var (
debug = strings.Contains(os.Getenv("STTRACE"), "upgrade") || os.Getenv("STTRACE") == "all"
l = logger.DefaultLogger
)

View File

@@ -23,10 +23,8 @@ import (
"bitbucket.org/kardianos/osext"
)
var GoArchExtra string // "", "v5", "v6", "v7"
// Upgrade to the given release, saving the previous binary with a ".old" extension.
func UpgradeTo(rel Release) error {
func UpgradeTo(rel Release, archExtra string) error {
path, err := osext.Executable()
if err != nil {
return err
@@ -38,8 +36,14 @@ func UpgradeTo(rel Release) error {
// sense for people downloading them
osName = "macosx"
}
expectedRelease := fmt.Sprintf("syncthing-%s-%s%s-%s.", osName, runtime.GOARCH, GoArchExtra, rel.Tag)
expectedRelease := fmt.Sprintf("syncthing-%s-%s%s-%s.", osName, runtime.GOARCH, archExtra, rel.Tag)
if debug {
l.Debugf("expected release asset %q", expectedRelease)
}
for _, asset := range rel.Assets {
if debug {
l.Debugln("considering release", asset)
}
if strings.HasPrefix(asset.Name, expectedRelease) {
if strings.HasSuffix(asset.Name, ".tar.gz") {
fname, err := readTarGZ(asset.URL, filepath.Dir(path))
@@ -97,6 +101,10 @@ func LatestRelease(prerelease bool) (Release, error) {
}
func readTarGZ(url string, dir string) (string, error) {
if debug {
l.Debugf("loading %q", url)
}
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return "", err
@@ -129,6 +137,9 @@ func readTarGZ(url string, dir string) (string, error) {
if err != nil {
return "", err
}
if debug {
l.Debugf("considering file %q", hdr.Name)
}
if path.Base(hdr.Name) == "syncthing" {
of, err := ioutil.TempFile(dir, "syncthing")

View File

@@ -2,11 +2,11 @@
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build windows solaris noupgrade
// +build solaris noupgrade
package upgrade
func UpgradeTo(rel Release) error {
func UpgradeTo(rel Release, extra string) error {
return ErrUpgradeUnsupported
}

166
upgrade/upgrade_windows.go Executable file
View File

@@ -0,0 +1,166 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build windows,!noupgrade
package upgrade
import (
"archive/zip"
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"bitbucket.org/kardianos/osext"
)
// Upgrade to the given release, saving the previous binary with a ".old" extension.
func UpgradeTo(rel Release, archExtra string) error {
path, err := osext.Executable()
if err != nil {
return err
}
expectedRelease := fmt.Sprintf("syncthing-%s-%s%s-%s.", runtime.GOOS, runtime.GOARCH, archExtra, rel.Tag)
if debug {
l.Debugf("expected release asset %q", expectedRelease)
}
for _, asset := range rel.Assets {
if debug {
l.Debugln("considering release", asset)
}
if strings.HasPrefix(asset.Name, expectedRelease) {
if strings.HasSuffix(asset.Name, ".zip") {
fname, err := readZip(asset.URL, filepath.Dir(path))
if err != nil {
return err
}
old := path + ".old"
os.Remove(old)
err = os.Rename(path, old)
if err != nil {
return err
}
err = os.Rename(fname, path)
if err != nil {
return err
}
return nil
}
}
}
return ErrVersionUnknown
}
// Returns the latest release, including prereleases or not depending on the argument
func LatestRelease(prerelease bool) (Release, error) {
resp, err := http.Get("https://api.github.com/repos/syncthing/syncthing/releases?per_page=10")
if err != nil {
return Release{}, err
}
if resp.StatusCode > 299 {
return Release{}, fmt.Errorf("API call returned HTTP error: %s", resp.Status)
}
var rels []Release
json.NewDecoder(resp.Body).Decode(&rels)
resp.Body.Close()
if len(rels) == 0 {
return Release{}, ErrVersionUnknown
}
if prerelease {
// We are a beta version. Use the latest.
return rels[0], nil
} else {
// We are a regular release. Only consider non-prerelease versions for upgrade.
for _, rel := range rels {
if !rel.Prerelease {
return rel, nil
}
}
return Release{}, ErrVersionUnknown
}
}
func readZip(url, dir string) (string, error) {
if debug {
l.Debugf("loading %q", url)
}
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return "", err
}
req.Header.Add("Accept", "application/octet-stream")
resp, err := http.DefaultClient.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
archive, err := zip.NewReader(bytes.NewReader(body), resp.ContentLength)
if err != nil {
return "", err
}
// Iterate through the files in the archive.
for _, file := range archive.File {
if debug {
l.Debugf("considering file %q", file.Name)
}
if path.Base(file.Name) == "syncthing.exe" {
infile, err := file.Open()
if err != nil {
return "", err
}
outfile, err := ioutil.TempFile(dir, "syncthing")
if err != nil {
return "", err
}
_, err = io.Copy(outfile, infile)
if err != nil {
return "", err
}
err = infile.Close()
if err != nil {
return "", err
}
err = outfile.Close()
if err != nil {
os.Remove(outfile.Name())
return "", err
}
os.Chmod(outfile.Name(), file.Mode())
return outfile.Name(), nil
}
}
return "", fmt.Errorf("No upgrade found")
}