Compare commits

...

61 Commits

Author SHA1 Message Date
Jakob Borg
6807d9bd4c Fix upgrade non-support on Windows 2014-05-02 20:19:21 +02:00
Jakob Borg
699ecc7140 Some places should use RLock instead of Lock (ref #169) 2014-05-02 17:15:04 +02:00
Jakob Borg
b374ec9355 Save temporary in correct dir during upgrade 2014-05-02 17:04:45 +02:00
Jakob Borg
9659d021cb Merge pull request #170 from andrew-d/patch-1
Fix typo in header name
2014-05-02 16:59:08 +02:00
Jakob Borg
a4ad9eb134 Add andrew-d 2014-05-02 16:58:55 +02:00
Andrew
a455258a62 Fix typo in header name 2014-05-02 01:26:12 -07:00
Jakob Borg
0ae342673a Update saved dependencies 2014-05-02 10:05:48 +02:00
Jakob Borg
33d75a264d Built in upgrade functionality 2014-05-02 10:01:09 +02:00
Jakob Borg
89dc5bb951 Windows doesn't have SysProcAttr 2014-05-02 08:57:34 +02:00
Jakob Borg
45403917de Minor cleanup in discovery 2014-05-02 08:53:19 +02:00
Jakob Borg
ed476271a6 Start xdg-open in new process group (fixes #164) 2014-05-02 08:53:05 +02:00
Jakob Borg
1e92c47960 Don't bother starting without GUI (fixes #156) 2014-04-30 22:52:38 +02:00
Jakob Borg
4f2fe07ae4 Show node ID in regular text not disabled control (fixes #162) 2014-04-30 22:42:39 +02:00
Jakob Borg
aff3cd01c5 Don't show Offline badge when global disco is disabled (fixes #167) 2014-04-30 22:17:43 +02:00
Jakob Borg
ac74ee1468 Don't redirect to absolute URL (fixes #166) 2014-04-30 22:10:13 +02:00
Jakob Borg
0d55cf4be5 Don't use absolute URL for rest calls (fixes #166) 2014-04-30 22:02:34 +02:00
Jakob Borg
5399a25532 Getting started 2014-04-30 16:13:29 +02:00
Jakob Borg
ae882c93c9 Links to discourse 2014-04-30 15:14:42 +02:00
Jakob Borg
f398ca77c1 Better trace output from mc 2014-04-30 15:13:54 +02:00
Jakob Borg
dcd7d278aa Handle and indicate duplicate repo ID:s (fixes #153) 2014-04-27 21:53:27 +02:00
Jakob Borg
89f5f3bf9a Fix small data races 2014-04-27 21:33:57 +02:00
Jakob Borg
76ef42ee07 No drone.io badge 2014-04-27 13:37:53 +02:00
Jakob Borg
92c1ce57a6 Fix protocol close test 2014-04-27 13:25:35 +02:00
Jakob Borg
116f232f5a Streamline error handling and locking 2014-04-27 13:10:50 +02:00
Jakob Borg
ef81a36654 Extract method closeFile 2014-04-27 12:14:53 +02:00
Jakob Borg
9fd2724d73 Simplify requestSlots filling 2014-04-27 12:06:11 +02:00
Jakob Borg
07d49b61d0 Debug utility to print index file 2014-04-25 08:28:56 +02:00
Jakob Borg
0c4e6ae7de Safety: don't start if repo dir is missing (ref #154) 2014-04-24 10:27:43 +02:00
Jakob Borg
65ec129dfb Only create default config if it is actually missing (fixes #139) 2014-04-23 10:28:36 +02:00
Jakob Borg
3e4d628f54 Handle non-word characters in repo name (fixes #152) 2014-04-23 10:04:25 +02:00
Jakob Borg
71684bfa45 Use a more lenient cluster config check (fixes #148) 2014-04-22 16:42:25 +02:00
Jakob Borg
e73b7e0398 Show properly formatted time (fixes #149) 2014-04-22 15:59:16 +02:00
Jakob Borg
35ebdc76ff Hide temporary files on Windows (fixes #146) 2014-04-22 14:27:31 +02:00
Jakob Borg
90d0896848 Change default config directory (fixes #145) 2014-04-22 14:27:09 +02:00
Jakob Borg
5528db9693 Fix config test (hostname check) 2014-04-22 12:06:32 +02:00
Jakob Borg
aa78fbb09d Don't offer to delete this node (fixes #144) 2014-04-22 12:01:09 +02:00
Jakob Borg
d53b193e09 Ensure sensible node config on load (fixes #143) 2014-04-22 11:46:08 +02:00
Jakob Borg
e0e16c371f Don't include test utils in testing 2014-04-22 08:27:00 +02:00
Jakob Borg
53cd877899 More portable hostname 2014-04-22 08:25:40 +02:00
Jakob Borg
1207223f3d Report rates over the wire, not uncompressed 2014-04-21 12:49:47 +02:00
Jakob Borg
39be6932b5 discosrv: Better statistics 2014-04-19 23:14:56 +02:00
Jakob Borg
44a194d226 discosrv: Remove deprecated v1 support 2014-04-19 23:02:14 +02:00
Jakob Borg
9349eb77cd Let absence of password be absence 2014-04-19 22:36:24 +02:00
Jakob Borg
c64549471a Include build user and host in long version 2014-04-19 16:44:28 +02:00
Jakob Borg
264bcbc78c Always print long version at startup 2014-04-19 16:40:19 +02:00
Jakob Borg
f76fe1ac7a Include build date in -version output 2014-04-19 16:38:11 +02:00
Jakob Borg
6364c4ff3f Save bcrypt hash of password (fixes #138) 2014-04-19 13:33:51 +02:00
Jakob Borg
292a50de04 Use pseudo-random high port for UPnP mapping 2014-04-18 14:09:54 +02:00
Jakob Borg
a08cba9c85 Config option to enable/disable UPnP 2014-04-18 13:39:51 +02:00
Jakob Borg
9fb60d6935 UPnP Port Mapping (fixes #79) 2014-04-18 13:28:51 +02:00
Jakob Borg
f2ed2d98d8 Updated assets for previous commit 2014-04-17 10:56:33 +02:00
Jakob Borg
b802cb1e36 Show status of global announce server (fixes #71) 2014-04-16 17:36:09 +02:00
Jakob Borg
31bfd8c039 Decouple local from global announcing (fixes #132) 2014-04-16 16:49:01 +02:00
Jakob Borg
f72ee7a69e Set name of first node to the local hostname (fixes #121) 2014-04-16 16:35:29 +02:00
Jakob Borg
a98d75edaa Clear acknowledged errors server-side as well (fixes #128) 2014-04-16 16:30:49 +02:00
Jakob Borg
622568c327 Handle static addresses without port (fixes #131) 2014-04-16 15:28:45 +02:00
Jakob Borg
1ca7e47fd6 Show restarting notification instead of network error (fixes #129) 2014-04-16 15:16:44 +02:00
Jakob Borg
116203aef8 discosrv: Clean up debug logging 2014-04-16 15:06:54 +02:00
Jakob Borg
1bf128612d Prevent GUI from rendering before it's ready (fixes #127) 2014-04-15 19:14:46 +02:00
Jakob Borg
935a8eb9a7 Sort nodes on name if set, otherwise ID (fixes #119) 2014-04-15 10:57:17 +02:00
Jakob Borg
7e5b350096 Explanatory tooltips on data (fixes #118) 2014-04-15 10:34:34 +02:00
61 changed files with 3123 additions and 837 deletions

View File

@@ -1,6 +1,6 @@
Please do contribute! If you want to contribute but are unsure where to
start, the [Contributions Needed
page](https://github.com/calmh/syncthing/wiki/Contributions-Needed)
topic](http://discourse.syncthing.net/t/contributions-needed/49)
lists areas in need of attention.
## Licensing
@@ -15,7 +15,8 @@ will ensure that you are added to the CONTRIBUTORS file.
## Building
[See the wiki](https://github.com/calmh/syncthing/wiki/Building)
[See the
documentation](http://discourse.syncthing.net/t/building-syncthing/44)
## Branches
@@ -46,7 +47,7 @@ Yes please!
## Documentation
[Hack it here](https://github.com/calmh/syncthing/wiki)
[Over here!](http://discourse.syncthing.net/category/documentation)
## License

View File

@@ -1,4 +1,5 @@
Aaron Bieber <qbit@deftly.net>
Andrew Dunham <andrew@du.nham.ca>
Brandon Philips <brandon@ifup.org>
James Patterson <jamespatterson@operamail.com>
Philippe Schommers <philippe@schommers.be>

20
Godeps/Godeps.json generated
View File

@@ -8,6 +8,21 @@
"./discover/cmd/discosrv"
],
"Deps": [
{
"ImportPath": "bitbucket.org/kardianos/osext",
"Comment": "null-9",
"Rev": "364fb577de68fb646c4cb39cc0e09c887ee16376"
},
{
"ImportPath": "code.google.com/p/go.crypto/bcrypt",
"Comment": "null-185",
"Rev": "6478cc9340cbbe6c04511280c5007722269108e9"
},
{
"ImportPath": "code.google.com/p/go.crypto/blowfish",
"Comment": "null-185",
"Rev": "6478cc9340cbbe6c04511280c5007722269108e9"
},
{
"ImportPath": "code.google.com/p/go.text/transform",
"Comment": "null-81",
@@ -27,11 +42,6 @@
"Comment": "v0.1-142-g8659df7",
"Rev": "8659df7a51aebe6c6120268cd5a8b4c34fa8441a"
},
{
"ImportPath": "github.com/codegangsta/martini-contrib/auth",
"Comment": "v0.1-159-g8ce6181",
"Rev": "8ce6181c2609699e4c7cd30994b76a850a9cdadc"
},
{
"ImportPath": "github.com/golang/groupcache/lru",
"Rev": "d781998583680cda80cf61e0b37dd0cd8da2eb52"

View File

@@ -0,0 +1,20 @@
Copyright (c) 2012 Daniel Theophanes
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source
distribution.

View File

@@ -0,0 +1,32 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Extensions to the standard "os" package.
package osext
import "path/filepath"
// Executable returns an absolute path that can be used to
// re-invoke the current program.
// It may not be valid after the current program exits.
func Executable() (string, error) {
p, err := executable()
return filepath.Clean(p), err
}
// Returns same path as Executable, returns just the folder
// path. Excludes the executable name.
func ExecutableFolder() (string, error) {
p, err := Executable()
if err != nil {
return "", err
}
folder, _ := filepath.Split(p)
return folder, nil
}
// Depricated. Same as Executable().
func GetExePath() (exePath string, err error) {
return Executable()
}

View File

@@ -0,0 +1,16 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package osext
import "syscall"
func executable() (string, error) {
f, err := Open("/proc/" + itoa(Getpid()) + "/text")
if err != nil {
return "", err
}
defer f.Close()
return syscall.Fd2path(int(f.Fd()))
}

View File

@@ -0,0 +1,25 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux netbsd openbsd
package osext
import (
"errors"
"os"
"runtime"
)
func executable() (string, error) {
switch runtime.GOOS {
case "linux":
return os.Readlink("/proc/self/exe")
case "netbsd":
return os.Readlink("/proc/curproc/exe")
case "openbsd":
return os.Readlink("/proc/curproc/file")
}
return "", errors.New("ExecPath not implemented for " + runtime.GOOS)
}

View File

@@ -0,0 +1,82 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin freebsd
package osext
import (
"os"
"path/filepath"
"runtime"
"syscall"
"unsafe"
)
var startUpcwd, getwdError = os.Getwd()
func executable() (string, error) {
var mib [4]int32
switch runtime.GOOS {
case "freebsd":
mib = [4]int32{1 /* CTL_KERN */, 14 /* KERN_PROC */, 12 /* KERN_PROC_PATHNAME */, -1}
case "darwin":
mib = [4]int32{1 /* CTL_KERN */, 38 /* KERN_PROCARGS */, int32(os.Getpid()), -1}
}
n := uintptr(0)
// get length
_, _, err := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, 0, uintptr(unsafe.Pointer(&n)), 0, 0)
if err != 0 {
return "", err
}
if n == 0 { // shouldn't happen
return "", nil
}
buf := make([]byte, n)
_, _, err = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&n)), 0, 0)
if err != 0 {
return "", err
}
if n == 0 { // shouldn't happen
return "", nil
}
for i, v := range buf {
if v == 0 {
buf = buf[:i]
break
}
}
var strpath string
if buf[0] != '/' {
var e error
if strpath, e = getAbs(buf); e != nil {
return strpath, e
}
} else {
strpath = string(buf)
}
// darwin KERN_PROCARGS may return the path to a symlink rather than the
// actual executable
if runtime.GOOS == "darwin" {
if strpath, err := filepath.EvalSymlinks(strpath); err != nil {
return strpath, err
}
}
return strpath, nil
}
func getAbs(buf []byte) (string, error) {
if getwdError != nil {
return string(buf), getwdError
} else {
if buf[0] == '.' {
buf = buf[1:]
}
if startUpcwd[len(startUpcwd)-1] != '/' && buf[0] != '/' {
return startUpcwd + "/" + string(buf), nil
}
return startUpcwd + string(buf), nil
}
}

View File

@@ -0,0 +1,79 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin linux freebsd netbsd windows
package osext
import (
"fmt"
"os"
oexec "os/exec"
"path/filepath"
"runtime"
"testing"
)
const execPath_EnvVar = "OSTEST_OUTPUT_EXECPATH"
func TestExecPath(t *testing.T) {
ep, err := Executable()
if err != nil {
t.Fatalf("ExecPath failed: %v", err)
}
// we want fn to be of the form "dir/prog"
dir := filepath.Dir(filepath.Dir(ep))
fn, err := filepath.Rel(dir, ep)
if err != nil {
t.Fatalf("filepath.Rel: %v", err)
}
cmd := &oexec.Cmd{}
// make child start with a relative program path
cmd.Dir = dir
cmd.Path = fn
// forge argv[0] for child, so that we can verify we could correctly
// get real path of the executable without influenced by argv[0].
cmd.Args = []string{"-", "-test.run=XXXX"}
cmd.Env = []string{fmt.Sprintf("%s=1", execPath_EnvVar)}
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("exec(self) failed: %v", err)
}
outs := string(out)
if !filepath.IsAbs(outs) {
t.Fatalf("Child returned %q, want an absolute path", out)
}
if !sameFile(outs, ep) {
t.Fatalf("Child returned %q, not the same file as %q", out, ep)
}
}
func sameFile(fn1, fn2 string) bool {
fi1, err := os.Stat(fn1)
if err != nil {
return false
}
fi2, err := os.Stat(fn2)
if err != nil {
return false
}
return os.SameFile(fi1, fi2)
}
func init() {
if e := os.Getenv(execPath_EnvVar); e != "" {
// first chdir to another path
dir := "/"
if runtime.GOOS == "windows" {
dir = filepath.VolumeName(".")
}
os.Chdir(dir)
if ep, err := Executable(); err != nil {
fmt.Fprint(os.Stderr, "ERROR: ", err)
} else {
fmt.Fprint(os.Stderr, ep)
}
os.Exit(0)
}
}

View File

@@ -0,0 +1,34 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package osext
import (
"syscall"
"unicode/utf16"
"unsafe"
)
var (
kernel = syscall.MustLoadDLL("kernel32.dll")
getModuleFileNameProc = kernel.MustFindProc("GetModuleFileNameW")
)
// GetModuleFileName() with hModule = NULL
func executable() (exePath string, err error) {
return getModuleFileName()
}
func getModuleFileName() (string, error) {
var n uint32
b := make([]uint16, syscall.MAX_PATH)
size := uint32(len(b))
r0, _, e1 := getModuleFileNameProc.Call(0, uintptr(unsafe.Pointer(&b[0])), uintptr(size))
n = uint32(r0)
if n == 0 {
return "", e1
}
return string(utf16.Decode(b[0:n])), nil
}

View File

@@ -0,0 +1,35 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bcrypt
import "encoding/base64"
const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
var bcEncoding = base64.NewEncoding(alphabet)
func base64Encode(src []byte) []byte {
n := bcEncoding.EncodedLen(len(src))
dst := make([]byte, n)
bcEncoding.Encode(dst, src)
for dst[n-1] == '=' {
n--
}
return dst[:n]
}
func base64Decode(src []byte) ([]byte, error) {
numOfEquals := 4 - (len(src) % 4)
for i := 0; i < numOfEquals; i++ {
src = append(src, '=')
}
dst := make([]byte, bcEncoding.DecodedLen(len(src)))
n, err := bcEncoding.Decode(dst, src)
if err != nil {
return nil, err
}
return dst[:n], nil
}

View File

@@ -0,0 +1,294 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing
// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf
package bcrypt
// The code is a port of Provos and Mazières's C implementation.
import (
"code.google.com/p/go.crypto/blowfish"
"crypto/rand"
"crypto/subtle"
"errors"
"fmt"
"io"
"strconv"
)
const (
MinCost int = 4 // the minimum allowable cost as passed in to GenerateFromPassword
MaxCost int = 31 // the maximum allowable cost as passed in to GenerateFromPassword
DefaultCost int = 10 // the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword
)
// The error returned from CompareHashAndPassword when a password and hash do
// not match.
var ErrMismatchedHashAndPassword = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password")
// The error returned from CompareHashAndPassword when a hash is too short to
// be a bcrypt hash.
var ErrHashTooShort = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password")
// The error returned from CompareHashAndPassword when a hash was created with
// a bcrypt algorithm newer than this implementation.
type HashVersionTooNewError byte
func (hv HashVersionTooNewError) Error() string {
return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion)
}
// The error returned from CompareHashAndPassword when a hash starts with something other than '$'
type InvalidHashPrefixError byte
func (ih InvalidHashPrefixError) Error() string {
return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih))
}
type InvalidCostError int
func (ic InvalidCostError) Error() string {
return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), int(MinCost), int(MaxCost))
}
const (
majorVersion = '2'
minorVersion = 'a'
maxSaltSize = 16
maxCryptedHashSize = 23
encodedSaltSize = 22
encodedHashSize = 31
minHashSize = 59
)
// magicCipherData is an IV for the 64 Blowfish encryption calls in
// bcrypt(). It's the string "OrpheanBeholderScryDoubt" in big-endian bytes.
var magicCipherData = []byte{
0x4f, 0x72, 0x70, 0x68,
0x65, 0x61, 0x6e, 0x42,
0x65, 0x68, 0x6f, 0x6c,
0x64, 0x65, 0x72, 0x53,
0x63, 0x72, 0x79, 0x44,
0x6f, 0x75, 0x62, 0x74,
}
type hashed struct {
hash []byte
salt []byte
cost int // allowed range is MinCost to MaxCost
major byte
minor byte
}
// GenerateFromPassword returns the bcrypt hash of the password at the given
// cost. If the cost given is less than MinCost, the cost will be set to
// DefaultCost, instead. Use CompareHashAndPassword, as defined in this package,
// to compare the returned hashed password with its cleartext version.
func GenerateFromPassword(password []byte, cost int) ([]byte, error) {
p, err := newFromPassword(password, cost)
if err != nil {
return nil, err
}
return p.Hash(), nil
}
// CompareHashAndPassword compares a bcrypt hashed password with its possible
// plaintext equivalent. Returns nil on success, or an error on failure.
func CompareHashAndPassword(hashedPassword, password []byte) error {
p, err := newFromHash(hashedPassword)
if err != nil {
return err
}
otherHash, err := bcrypt(password, p.cost, p.salt)
if err != nil {
return err
}
otherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor}
if subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 {
return nil
}
return ErrMismatchedHashAndPassword
}
// Cost returns the hashing cost used to create the given hashed
// password. When, in the future, the hashing cost of a password system needs
// to be increased in order to adjust for greater computational power, this
// function allows one to establish which passwords need to be updated.
func Cost(hashedPassword []byte) (int, error) {
p, err := newFromHash(hashedPassword)
if err != nil {
return 0, err
}
return p.cost, nil
}
func newFromPassword(password []byte, cost int) (*hashed, error) {
if cost < MinCost {
cost = DefaultCost
}
p := new(hashed)
p.major = majorVersion
p.minor = minorVersion
err := checkCost(cost)
if err != nil {
return nil, err
}
p.cost = cost
unencodedSalt := make([]byte, maxSaltSize)
_, err = io.ReadFull(rand.Reader, unencodedSalt)
if err != nil {
return nil, err
}
p.salt = base64Encode(unencodedSalt)
hash, err := bcrypt(password, p.cost, p.salt)
if err != nil {
return nil, err
}
p.hash = hash
return p, err
}
func newFromHash(hashedSecret []byte) (*hashed, error) {
if len(hashedSecret) < minHashSize {
return nil, ErrHashTooShort
}
p := new(hashed)
n, err := p.decodeVersion(hashedSecret)
if err != nil {
return nil, err
}
hashedSecret = hashedSecret[n:]
n, err = p.decodeCost(hashedSecret)
if err != nil {
return nil, err
}
hashedSecret = hashedSecret[n:]
// The "+2" is here because we'll have to append at most 2 '=' to the salt
// when base64 decoding it in expensiveBlowfishSetup().
p.salt = make([]byte, encodedSaltSize, encodedSaltSize+2)
copy(p.salt, hashedSecret[:encodedSaltSize])
hashedSecret = hashedSecret[encodedSaltSize:]
p.hash = make([]byte, len(hashedSecret))
copy(p.hash, hashedSecret)
return p, nil
}
func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) {
cipherData := make([]byte, len(magicCipherData))
copy(cipherData, magicCipherData)
c, err := expensiveBlowfishSetup(password, uint32(cost), salt)
if err != nil {
return nil, err
}
for i := 0; i < 24; i += 8 {
for j := 0; j < 64; j++ {
c.Encrypt(cipherData[i:i+8], cipherData[i:i+8])
}
}
// Bug compatibility with C bcrypt implementations. We only encode 23 of
// the 24 bytes encrypted.
hsh := base64Encode(cipherData[:maxCryptedHashSize])
return hsh, nil
}
func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) {
csalt, err := base64Decode(salt)
if err != nil {
return nil, err
}
// Bug compatibility with C bcrypt implementations. They use the trailing
// NULL in the key string during expansion.
ckey := append(key, 0)
c, err := blowfish.NewSaltedCipher(ckey, csalt)
if err != nil {
return nil, err
}
var i, rounds uint64
rounds = 1 << cost
for i = 0; i < rounds; i++ {
blowfish.ExpandKey(ckey, c)
blowfish.ExpandKey(csalt, c)
}
return c, nil
}
func (p *hashed) Hash() []byte {
arr := make([]byte, 60)
arr[0] = '$'
arr[1] = p.major
n := 2
if p.minor != 0 {
arr[2] = p.minor
n = 3
}
arr[n] = '$'
n += 1
copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost)))
n += 2
arr[n] = '$'
n += 1
copy(arr[n:], p.salt)
n += encodedSaltSize
copy(arr[n:], p.hash)
n += encodedHashSize
return arr[:n]
}
func (p *hashed) decodeVersion(sbytes []byte) (int, error) {
if sbytes[0] != '$' {
return -1, InvalidHashPrefixError(sbytes[0])
}
if sbytes[1] > majorVersion {
return -1, HashVersionTooNewError(sbytes[1])
}
p.major = sbytes[1]
n := 3
if sbytes[2] != '$' {
p.minor = sbytes[2]
n++
}
return n, nil
}
// sbytes should begin where decodeVersion left off.
func (p *hashed) decodeCost(sbytes []byte) (int, error) {
cost, err := strconv.Atoi(string(sbytes[0:2]))
if err != nil {
return -1, err
}
err = checkCost(cost)
if err != nil {
return -1, err
}
p.cost = cost
return 3, nil
}
func (p *hashed) String() string {
return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor)
}
func checkCost(cost int) error {
if cost < MinCost || cost > MaxCost {
return InvalidCostError(cost)
}
return nil
}

View File

@@ -0,0 +1,217 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bcrypt
import (
"bytes"
"fmt"
"testing"
)
func TestBcryptingIsEasy(t *testing.T) {
pass := []byte("mypassword")
hp, err := GenerateFromPassword(pass, 0)
if err != nil {
t.Fatalf("GenerateFromPassword error: %s", err)
}
if CompareHashAndPassword(hp, pass) != nil {
t.Errorf("%v should hash %s correctly", hp, pass)
}
notPass := "notthepass"
err = CompareHashAndPassword(hp, []byte(notPass))
if err != ErrMismatchedHashAndPassword {
t.Errorf("%v and %s should be mismatched", hp, notPass)
}
}
func TestBcryptingIsCorrect(t *testing.T) {
pass := []byte("allmine")
salt := []byte("XajjQvNhvvRt5GSeFk1xFe")
expectedHash := []byte("$2a$10$XajjQvNhvvRt5GSeFk1xFeyqRrsxkhBkUiQeg0dt.wU1qD4aFDcga")
hash, err := bcrypt(pass, 10, salt)
if err != nil {
t.Fatalf("bcrypt blew up: %v", err)
}
if !bytes.HasSuffix(expectedHash, hash) {
t.Errorf("%v should be the suffix of %v", hash, expectedHash)
}
h, err := newFromHash(expectedHash)
if err != nil {
t.Errorf("Unable to parse %s: %v", string(expectedHash), err)
}
// This is not the safe way to compare these hashes. We do this only for
// testing clarity. Use bcrypt.CompareHashAndPassword()
if err == nil && !bytes.Equal(expectedHash, h.Hash()) {
t.Errorf("Parsed hash %v should equal %v", h.Hash(), expectedHash)
}
}
func TestTooLongPasswordsWork(t *testing.T) {
salt := []byte("XajjQvNhvvRt5GSeFk1xFe")
// One byte over the usual 56 byte limit that blowfish has
tooLongPass := []byte("012345678901234567890123456789012345678901234567890123456")
tooLongExpected := []byte("$2a$10$XajjQvNhvvRt5GSeFk1xFe5l47dONXg781AmZtd869sO8zfsHuw7C")
hash, err := bcrypt(tooLongPass, 10, salt)
if err != nil {
t.Fatalf("bcrypt blew up on long password: %v", err)
}
if !bytes.HasSuffix(tooLongExpected, hash) {
t.Errorf("%v should be the suffix of %v", hash, tooLongExpected)
}
}
type InvalidHashTest struct {
err error
hash []byte
}
var invalidTests = []InvalidHashTest{
{ErrHashTooShort, []byte("$2a$10$fooo")},
{ErrHashTooShort, []byte("$2a")},
{HashVersionTooNewError('3'), []byte("$3a$10$sssssssssssssssssssssshhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh")},
{InvalidHashPrefixError('%'), []byte("%2a$10$sssssssssssssssssssssshhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh")},
{InvalidCostError(32), []byte("$2a$32$sssssssssssssssssssssshhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh")},
}
func TestInvalidHashErrors(t *testing.T) {
check := func(name string, expected, err error) {
if err == nil {
t.Errorf("%s: Should have returned an error", name)
}
if err != nil && err != expected {
t.Errorf("%s gave err %v but should have given %v", name, err, expected)
}
}
for _, iht := range invalidTests {
_, err := newFromHash(iht.hash)
check("newFromHash", iht.err, err)
err = CompareHashAndPassword(iht.hash, []byte("anything"))
check("CompareHashAndPassword", iht.err, err)
}
}
func TestUnpaddedBase64Encoding(t *testing.T) {
original := []byte{101, 201, 101, 75, 19, 227, 199, 20, 239, 236, 133, 32, 30, 109, 243, 30}
encodedOriginal := []byte("XajjQvNhvvRt5GSeFk1xFe")
encoded := base64Encode(original)
if !bytes.Equal(encodedOriginal, encoded) {
t.Errorf("Encoded %v should have equaled %v", encoded, encodedOriginal)
}
decoded, err := base64Decode(encodedOriginal)
if err != nil {
t.Fatalf("base64Decode blew up: %s", err)
}
if !bytes.Equal(decoded, original) {
t.Errorf("Decoded %v should have equaled %v", decoded, original)
}
}
func TestCost(t *testing.T) {
suffix := "XajjQvNhvvRt5GSeFk1xFe5l47dONXg781AmZtd869sO8zfsHuw7C"
for _, vers := range []string{"2a", "2"} {
for _, cost := range []int{4, 10} {
s := fmt.Sprintf("$%s$%02d$%s", vers, cost, suffix)
h := []byte(s)
actual, err := Cost(h)
if err != nil {
t.Errorf("Cost, error: %s", err)
continue
}
if actual != cost {
t.Errorf("Cost, expected: %d, actual: %d", cost, actual)
}
}
}
_, err := Cost([]byte("$a$a$" + suffix))
if err == nil {
t.Errorf("Cost, malformed but no error returned")
}
}
func TestCostValidationInHash(t *testing.T) {
if testing.Short() {
return
}
pass := []byte("mypassword")
for c := 0; c < MinCost; c++ {
p, _ := newFromPassword(pass, c)
if p.cost != DefaultCost {
t.Errorf("newFromPassword should default costs below %d to %d, but was %d", MinCost, DefaultCost, p.cost)
}
}
p, _ := newFromPassword(pass, 14)
if p.cost != 14 {
t.Errorf("newFromPassword should default cost to 14, but was %d", p.cost)
}
hp, _ := newFromHash(p.Hash())
if p.cost != hp.cost {
t.Errorf("newFromHash should maintain the cost at %d, but was %d", p.cost, hp.cost)
}
_, err := newFromPassword(pass, 32)
if err == nil {
t.Fatalf("newFromPassword: should return a cost error")
}
if err != InvalidCostError(32) {
t.Errorf("newFromPassword: should return cost error, got %#v", err)
}
}
func TestCostReturnsWithLeadingZeroes(t *testing.T) {
hp, _ := newFromPassword([]byte("abcdefgh"), 7)
cost := hp.Hash()[4:7]
expected := []byte("07$")
if !bytes.Equal(expected, cost) {
t.Errorf("single digit costs in hash should have leading zeros: was %v instead of %v", cost, expected)
}
}
func TestMinorNotRequired(t *testing.T) {
noMinorHash := []byte("$2$10$XajjQvNhvvRt5GSeFk1xFeyqRrsxkhBkUiQeg0dt.wU1qD4aFDcga")
h, err := newFromHash(noMinorHash)
if err != nil {
t.Fatalf("No minor hash blew up: %s", err)
}
if h.minor != 0 {
t.Errorf("Should leave minor version at 0, but was %d", h.minor)
}
if !bytes.Equal(noMinorHash, h.Hash()) {
t.Errorf("Should generate hash %v, but created %v", noMinorHash, h.Hash())
}
}
func BenchmarkEqual(b *testing.B) {
b.StopTimer()
passwd := []byte("somepasswordyoulike")
hash, _ := GenerateFromPassword(passwd, 10)
b.StartTimer()
for i := 0; i < b.N; i++ {
CompareHashAndPassword(hash, passwd)
}
}
func BenchmarkGeneration(b *testing.B) {
b.StopTimer()
passwd := []byte("mylongpassword1234")
b.StartTimer()
for i := 0; i < b.N; i++ {
GenerateFromPassword(passwd, 10)
}
}

View File

@@ -0,0 +1,190 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package blowfish
// ExpandKey performs a key expansion on the given *Cipher. Specifically, it
// performs the Blowfish algorithm's key schedule which sets up the *Cipher's
// pi and substitution tables for calls to Encrypt. This is used, primarily,
// by the bcrypt package to reuse the Blowfish key schedule during its
// set up. It's unlikely that you need to use this directly.
func ExpandKey(key []byte, c *Cipher) {
j := 0
for i := 0; i < 18; i++ {
var d uint32
for k := 0; k < 4; k++ {
d = d<<8 | uint32(key[j])
j++
if j >= len(key) {
j = 0
}
}
c.p[i] ^= d
}
var l, r uint32
for i := 0; i < 18; i += 2 {
l, r = encryptBlock(l, r, c)
c.p[i], c.p[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l, r = encryptBlock(l, r, c)
c.s0[i], c.s0[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l, r = encryptBlock(l, r, c)
c.s1[i], c.s1[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l, r = encryptBlock(l, r, c)
c.s2[i], c.s2[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l, r = encryptBlock(l, r, c)
c.s3[i], c.s3[i+1] = l, r
}
}
// This is similar to ExpandKey, but folds the salt during the key
// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero
// salt passed in, reusing ExpandKey turns out to be a place of inefficiency
// and specializing it here is useful.
func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) {
j := 0
for i := 0; i < 18; i++ {
var d uint32
for k := 0; k < 4; k++ {
d = d<<8 | uint32(key[j])
j++
if j >= len(key) {
j = 0
}
}
c.p[i] ^= d
}
j = 0
var expandedSalt [4]uint32
for i := range expandedSalt {
var d uint32
for k := 0; k < 4; k++ {
d = d<<8 | uint32(salt[j])
j++
if j >= len(salt) {
j = 0
}
}
expandedSalt[i] = d
}
var l, r uint32
for i := 0; i < 18; i += 2 {
l ^= expandedSalt[i&2]
r ^= expandedSalt[(i&2)+1]
l, r = encryptBlock(l, r, c)
c.p[i], c.p[i+1] = l, r
}
for i := 0; i < 256; i += 4 {
l ^= expandedSalt[2]
r ^= expandedSalt[3]
l, r = encryptBlock(l, r, c)
c.s0[i], c.s0[i+1] = l, r
l ^= expandedSalt[0]
r ^= expandedSalt[1]
l, r = encryptBlock(l, r, c)
c.s0[i+2], c.s0[i+3] = l, r
}
for i := 0; i < 256; i += 4 {
l ^= expandedSalt[2]
r ^= expandedSalt[3]
l, r = encryptBlock(l, r, c)
c.s1[i], c.s1[i+1] = l, r
l ^= expandedSalt[0]
r ^= expandedSalt[1]
l, r = encryptBlock(l, r, c)
c.s1[i+2], c.s1[i+3] = l, r
}
for i := 0; i < 256; i += 4 {
l ^= expandedSalt[2]
r ^= expandedSalt[3]
l, r = encryptBlock(l, r, c)
c.s2[i], c.s2[i+1] = l, r
l ^= expandedSalt[0]
r ^= expandedSalt[1]
l, r = encryptBlock(l, r, c)
c.s2[i+2], c.s2[i+3] = l, r
}
for i := 0; i < 256; i += 4 {
l ^= expandedSalt[2]
r ^= expandedSalt[3]
l, r = encryptBlock(l, r, c)
c.s3[i], c.s3[i+1] = l, r
l ^= expandedSalt[0]
r ^= expandedSalt[1]
l, r = encryptBlock(l, r, c)
c.s3[i+2], c.s3[i+3] = l, r
}
}
func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) {
xl, xr := l, r
xl ^= c.p[0]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16]
xr ^= c.p[17]
return xr, xl
}
func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) {
xl, xr := l, r
xl ^= c.p[17]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1]
xr ^= c.p[0]
return xr, xl
}
func zero(x []uint32) {
for i := range x {
x[i] = 0
}
}

View File

@@ -0,0 +1,210 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package blowfish
import (
"testing"
)
type CryptTest struct {
key []byte
in []byte
out []byte
}
// Test vector values are from http://www.schneier.com/code/vectors.txt.
var encryptTests = []CryptTest{
{
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
[]byte{0x4E, 0xF9, 0x97, 0x45, 0x61, 0x98, 0xDD, 0x78}},
{
[]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
[]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
[]byte{0x51, 0x86, 0x6F, 0xD5, 0xB8, 0x5E, 0xCB, 0x8A}},
{
[]byte{0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
[]byte{0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
[]byte{0x7D, 0x85, 0x6F, 0x9A, 0x61, 0x30, 0x63, 0xF2}},
{
[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11},
[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11},
[]byte{0x24, 0x66, 0xDD, 0x87, 0x8B, 0x96, 0x3C, 0x9D}},
{
[]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11},
[]byte{0x61, 0xF9, 0xC3, 0x80, 0x22, 0x81, 0xB0, 0x96}},
{
[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11},
[]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
[]byte{0x7D, 0x0C, 0xC6, 0x30, 0xAF, 0xDA, 0x1E, 0xC7}},
{
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
[]byte{0x4E, 0xF9, 0x97, 0x45, 0x61, 0x98, 0xDD, 0x78}},
{
[]byte{0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10},
[]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
[]byte{0x0A, 0xCE, 0xAB, 0x0F, 0xC6, 0xA0, 0xA2, 0x8D}},
{
[]byte{0x7C, 0xA1, 0x10, 0x45, 0x4A, 0x1A, 0x6E, 0x57},
[]byte{0x01, 0xA1, 0xD6, 0xD0, 0x39, 0x77, 0x67, 0x42},
[]byte{0x59, 0xC6, 0x82, 0x45, 0xEB, 0x05, 0x28, 0x2B}},
{
[]byte{0x01, 0x31, 0xD9, 0x61, 0x9D, 0xC1, 0x37, 0x6E},
[]byte{0x5C, 0xD5, 0x4C, 0xA8, 0x3D, 0xEF, 0x57, 0xDA},
[]byte{0xB1, 0xB8, 0xCC, 0x0B, 0x25, 0x0F, 0x09, 0xA0}},
{
[]byte{0x07, 0xA1, 0x13, 0x3E, 0x4A, 0x0B, 0x26, 0x86},
[]byte{0x02, 0x48, 0xD4, 0x38, 0x06, 0xF6, 0x71, 0x72},
[]byte{0x17, 0x30, 0xE5, 0x77, 0x8B, 0xEA, 0x1D, 0xA4}},
{
[]byte{0x38, 0x49, 0x67, 0x4C, 0x26, 0x02, 0x31, 0x9E},
[]byte{0x51, 0x45, 0x4B, 0x58, 0x2D, 0xDF, 0x44, 0x0A},
[]byte{0xA2, 0x5E, 0x78, 0x56, 0xCF, 0x26, 0x51, 0xEB}},
{
[]byte{0x04, 0xB9, 0x15, 0xBA, 0x43, 0xFE, 0xB5, 0xB6},
[]byte{0x42, 0xFD, 0x44, 0x30, 0x59, 0x57, 0x7F, 0xA2},
[]byte{0x35, 0x38, 0x82, 0xB1, 0x09, 0xCE, 0x8F, 0x1A}},
{
[]byte{0x01, 0x13, 0xB9, 0x70, 0xFD, 0x34, 0xF2, 0xCE},
[]byte{0x05, 0x9B, 0x5E, 0x08, 0x51, 0xCF, 0x14, 0x3A},
[]byte{0x48, 0xF4, 0xD0, 0x88, 0x4C, 0x37, 0x99, 0x18}},
{
[]byte{0x01, 0x70, 0xF1, 0x75, 0x46, 0x8F, 0xB5, 0xE6},
[]byte{0x07, 0x56, 0xD8, 0xE0, 0x77, 0x47, 0x61, 0xD2},
[]byte{0x43, 0x21, 0x93, 0xB7, 0x89, 0x51, 0xFC, 0x98}},
{
[]byte{0x43, 0x29, 0x7F, 0xAD, 0x38, 0xE3, 0x73, 0xFE},
[]byte{0x76, 0x25, 0x14, 0xB8, 0x29, 0xBF, 0x48, 0x6A},
[]byte{0x13, 0xF0, 0x41, 0x54, 0xD6, 0x9D, 0x1A, 0xE5}},
{
[]byte{0x07, 0xA7, 0x13, 0x70, 0x45, 0xDA, 0x2A, 0x16},
[]byte{0x3B, 0xDD, 0x11, 0x90, 0x49, 0x37, 0x28, 0x02},
[]byte{0x2E, 0xED, 0xDA, 0x93, 0xFF, 0xD3, 0x9C, 0x79}},
{
[]byte{0x04, 0x68, 0x91, 0x04, 0xC2, 0xFD, 0x3B, 0x2F},
[]byte{0x26, 0x95, 0x5F, 0x68, 0x35, 0xAF, 0x60, 0x9A},
[]byte{0xD8, 0x87, 0xE0, 0x39, 0x3C, 0x2D, 0xA6, 0xE3}},
{
[]byte{0x37, 0xD0, 0x6B, 0xB5, 0x16, 0xCB, 0x75, 0x46},
[]byte{0x16, 0x4D, 0x5E, 0x40, 0x4F, 0x27, 0x52, 0x32},
[]byte{0x5F, 0x99, 0xD0, 0x4F, 0x5B, 0x16, 0x39, 0x69}},
{
[]byte{0x1F, 0x08, 0x26, 0x0D, 0x1A, 0xC2, 0x46, 0x5E},
[]byte{0x6B, 0x05, 0x6E, 0x18, 0x75, 0x9F, 0x5C, 0xCA},
[]byte{0x4A, 0x05, 0x7A, 0x3B, 0x24, 0xD3, 0x97, 0x7B}},
{
[]byte{0x58, 0x40, 0x23, 0x64, 0x1A, 0xBA, 0x61, 0x76},
[]byte{0x00, 0x4B, 0xD6, 0xEF, 0x09, 0x17, 0x60, 0x62},
[]byte{0x45, 0x20, 0x31, 0xC1, 0xE4, 0xFA, 0xDA, 0x8E}},
{
[]byte{0x02, 0x58, 0x16, 0x16, 0x46, 0x29, 0xB0, 0x07},
[]byte{0x48, 0x0D, 0x39, 0x00, 0x6E, 0xE7, 0x62, 0xF2},
[]byte{0x75, 0x55, 0xAE, 0x39, 0xF5, 0x9B, 0x87, 0xBD}},
{
[]byte{0x49, 0x79, 0x3E, 0xBC, 0x79, 0xB3, 0x25, 0x8F},
[]byte{0x43, 0x75, 0x40, 0xC8, 0x69, 0x8F, 0x3C, 0xFA},
[]byte{0x53, 0xC5, 0x5F, 0x9C, 0xB4, 0x9F, 0xC0, 0x19}},
{
[]byte{0x4F, 0xB0, 0x5E, 0x15, 0x15, 0xAB, 0x73, 0xA7},
[]byte{0x07, 0x2D, 0x43, 0xA0, 0x77, 0x07, 0x52, 0x92},
[]byte{0x7A, 0x8E, 0x7B, 0xFA, 0x93, 0x7E, 0x89, 0xA3}},
{
[]byte{0x49, 0xE9, 0x5D, 0x6D, 0x4C, 0xA2, 0x29, 0xBF},
[]byte{0x02, 0xFE, 0x55, 0x77, 0x81, 0x17, 0xF1, 0x2A},
[]byte{0xCF, 0x9C, 0x5D, 0x7A, 0x49, 0x86, 0xAD, 0xB5}},
{
[]byte{0x01, 0x83, 0x10, 0xDC, 0x40, 0x9B, 0x26, 0xD6},
[]byte{0x1D, 0x9D, 0x5C, 0x50, 0x18, 0xF7, 0x28, 0xC2},
[]byte{0xD1, 0xAB, 0xB2, 0x90, 0x65, 0x8B, 0xC7, 0x78}},
{
[]byte{0x1C, 0x58, 0x7F, 0x1C, 0x13, 0x92, 0x4F, 0xEF},
[]byte{0x30, 0x55, 0x32, 0x28, 0x6D, 0x6F, 0x29, 0x5A},
[]byte{0x55, 0xCB, 0x37, 0x74, 0xD1, 0x3E, 0xF2, 0x01}},
{
[]byte{0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01},
[]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
[]byte{0xFA, 0x34, 0xEC, 0x48, 0x47, 0xB2, 0x68, 0xB2}},
{
[]byte{0x1F, 0x1F, 0x1F, 0x1F, 0x0E, 0x0E, 0x0E, 0x0E},
[]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
[]byte{0xA7, 0x90, 0x79, 0x51, 0x08, 0xEA, 0x3C, 0xAE}},
{
[]byte{0xE0, 0xFE, 0xE0, 0xFE, 0xF1, 0xFE, 0xF1, 0xFE},
[]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
[]byte{0xC3, 0x9E, 0x07, 0x2D, 0x9F, 0xAC, 0x63, 0x1D}},
{
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
[]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
[]byte{0x01, 0x49, 0x33, 0xE0, 0xCD, 0xAF, 0xF6, 0xE4}},
{
[]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
[]byte{0xF2, 0x1E, 0x9A, 0x77, 0xB7, 0x1C, 0x49, 0xBC}},
{
[]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
[]byte{0x24, 0x59, 0x46, 0x88, 0x57, 0x54, 0x36, 0x9A}},
{
[]byte{0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10},
[]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
[]byte{0x6B, 0x5C, 0x5A, 0x9C, 0x5D, 0x9E, 0x0A, 0x5A}},
}
func TestCipherEncrypt(t *testing.T) {
for i, tt := range encryptTests {
c, err := NewCipher(tt.key)
if err != nil {
t.Errorf("NewCipher(%d bytes) = %s", len(tt.key), err)
continue
}
ct := make([]byte, len(tt.out))
c.Encrypt(ct, tt.in)
for j, v := range ct {
if v != tt.out[j] {
t.Errorf("Cipher.Encrypt, test vector #%d: cipher-text[%d] = %#x, expected %#x", i, j, v, tt.out[j])
break
}
}
}
}
func TestCipherDecrypt(t *testing.T) {
for i, tt := range encryptTests {
c, err := NewCipher(tt.key)
if err != nil {
t.Errorf("NewCipher(%d bytes) = %s", len(tt.key), err)
continue
}
pt := make([]byte, len(tt.in))
c.Decrypt(pt, tt.out)
for j, v := range pt {
if v != tt.in[j] {
t.Errorf("Cipher.Decrypt, test vector #%d: plain-text[%d] = %#x, expected %#x", i, j, v, tt.in[j])
break
}
}
}
}
func TestSaltedCipherKeyLength(t *testing.T) {
var key []byte
for i := 0; i < 4; i++ {
_, err := NewSaltedCipher(key, []byte{'a'})
if err != KeySizeError(i) {
t.Errorf("NewSaltedCipher with short key, gave error %#v, expected %#v", err, KeySizeError(i))
}
key = append(key, 'a')
}
// A 57-byte key. One over the typical blowfish restriction.
key = []byte("012345678901234567890123456789012345678901234567890123456")
_, err := NewSaltedCipher(key, []byte{'a'})
if err != nil {
t.Errorf("NewSaltedCipher with long key, gave error %#v", err)
}
}

View File

@@ -0,0 +1,90 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm.
package blowfish
// The code is a port of Bruce Schneier's C implementation.
// See http://www.schneier.com/blowfish.html.
import "strconv"
// The Blowfish block size in bytes.
const BlockSize = 8
// A Cipher is an instance of Blowfish encryption using a particular key.
type Cipher struct {
p [18]uint32
s0, s1, s2, s3 [256]uint32
}
type KeySizeError int
func (k KeySizeError) Error() string {
return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k))
}
// NewCipher creates and returns a Cipher.
// The key argument should be the Blowfish key, 4 to 56 bytes.
func NewCipher(key []byte) (*Cipher, error) {
var result Cipher
k := len(key)
if k < 4 || k > 56 {
return nil, KeySizeError(k)
}
initCipher(key, &result)
ExpandKey(key, &result)
return &result, nil
}
// NewSaltedCipher creates a returns a Cipher that folds a salt into its key
// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is
// sufficient and desirable. For bcrypt compatiblity, the key can be over 56
// bytes. Only the first 16 bytes of salt are used.
func NewSaltedCipher(key, salt []byte) (*Cipher, error) {
var result Cipher
k := len(key)
if k < 4 {
return nil, KeySizeError(k)
}
initCipher(key, &result)
expandKeyWithSalt(key, salt, &result)
return &result, nil
}
// BlockSize returns the Blowfish block size, 8 bytes.
// It is necessary to satisfy the Block interface in the
// package "crypto/cipher".
func (c *Cipher) BlockSize() int { return BlockSize }
// Encrypt encrypts the 8-byte buffer src using the key k
// and stores the result in dst.
// Note that for amounts of data larger than a block,
// it is not safe to just call Encrypt on successive blocks;
// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go).
func (c *Cipher) Encrypt(dst, src []byte) {
l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
l, r = encryptBlock(l, r, c)
dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l)
dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r)
}
// Decrypt decrypts the 8-byte buffer src using the key k
// and stores the result in dst.
func (c *Cipher) Decrypt(dst, src []byte) {
l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
l, r = decryptBlock(l, r, c)
dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l)
dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r)
}
func initCipher(key []byte, c *Cipher) {
copy(c.p[0:], p[0:])
copy(c.s0[0:], s0[0:])
copy(c.s1[0:], s1[0:])
copy(c.s2[0:], s2[0:])
copy(c.s3[0:], s3[0:])
}

View File

@@ -0,0 +1,199 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// The startup permutation array and substitution boxes.
// They are the hexadecimal digits of PI; see:
// http://www.schneier.com/code/constants.txt.
package blowfish
var s0 = [256]uint32{
0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96,
0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16,
0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658,
0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013,
0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e,
0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60,
0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6,
0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a,
0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c,
0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193,
0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1,
0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239,
0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a,
0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3,
0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176,
0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe,
0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706,
0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b,
0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b,
0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463,
0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c,
0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3,
0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a,
0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8,
0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760,
0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db,
0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8,
0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b,
0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33,
0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4,
0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0,
0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c,
0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777,
0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299,
0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705,
0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf,
0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e,
0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa,
0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9,
0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915,
0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f,
0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664,
0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a,
}
var s1 = [256]uint32{
0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d,
0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1,
0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65,
0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1,
0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9,
0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737,
0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d,
0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd,
0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc,
0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41,
0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908,
0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af,
0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124,
0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c,
0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908,
0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd,
0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b,
0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e,
0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa,
0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a,
0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d,
0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66,
0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5,
0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84,
0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96,
0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14,
0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca,
0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7,
0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77,
0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99,
0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054,
0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73,
0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea,
0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105,
0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646,
0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285,
0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea,
0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb,
0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e,
0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc,
0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd,
0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20,
0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7,
}
var s2 = [256]uint32{
0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7,
0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af,
0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af,
0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504,
0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4,
0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee,
0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec,
0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b,
0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332,
0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527,
0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58,
0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c,
0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22,
0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17,
0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60,
0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115,
0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99,
0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0,
0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74,
0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d,
0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3,
0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3,
0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979,
0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c,
0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa,
0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a,
0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086,
0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc,
0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24,
0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2,
0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84,
0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c,
0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09,
0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10,
0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe,
0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027,
0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0,
0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634,
0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188,
0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc,
0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8,
0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837,
0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0,
}
var s3 = [256]uint32{
0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742,
0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b,
0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79,
0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6,
0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a,
0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4,
0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1,
0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59,
0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797,
0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28,
0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6,
0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28,
0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba,
0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a,
0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5,
0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f,
0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce,
0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680,
0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd,
0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb,
0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb,
0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370,
0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc,
0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048,
0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc,
0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9,
0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a,
0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f,
0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a,
0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1,
0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b,
0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e,
0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e,
0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f,
0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623,
0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc,
0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a,
0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6,
0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3,
0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060,
0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c,
0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f,
0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6,
}
var p = [18]uint32{
0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0,
0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c,
0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b,
}

View File

@@ -1,25 +0,0 @@
# auth
Martini middleware/handler for http basic authentication.
[API Reference](http://godoc.org/github.com/codegangsta/martini-contrib/auth)
## Usage
~~~ go
import (
"github.com/codegangsta/martini"
"github.com/codegangsta/martini-contrib/auth"
)
func main() {
m := martini.Classic()
// authenticate every request
m.Use(auth.Basic("username", "secretpassword"))
m.Run()
}
~~~
## Authors
* [Jeremy Saenz](http://github.com/codegangsta)
* [Brendon Murphy](http://github.com/bemurphy)

View File

@@ -1,19 +0,0 @@
package auth
import (
"encoding/base64"
"net/http"
)
// Basic returns a Handler that authenticates via Basic Auth. Writes a http.StatusUnauthorized
// if authentication fails
func Basic(username string, password string) http.HandlerFunc {
var siteAuth = base64.StdEncoding.EncodeToString([]byte(username + ":" + password))
return func(res http.ResponseWriter, req *http.Request) {
auth := req.Header.Get("Authorization")
if !SecureCompare(auth, "Basic "+siteAuth) {
res.Header().Set("WWW-Authenticate", "Basic realm=\"Authorization Required\"")
http.Error(res, "Not Authorized", http.StatusUnauthorized)
}
}
}

View File

@@ -1,45 +0,0 @@
package auth
import (
"encoding/base64"
"github.com/codegangsta/martini"
"net/http"
"net/http/httptest"
"testing"
)
func Test_BasicAuth(t *testing.T) {
recorder := httptest.NewRecorder()
auth := "Basic " + base64.StdEncoding.EncodeToString([]byte("foo:bar"))
m := martini.New()
m.Use(Basic("foo", "bar"))
m.Use(func(res http.ResponseWriter, req *http.Request) {
res.Write([]byte("hello"))
})
r, _ := http.NewRequest("GET", "foo", nil)
m.ServeHTTP(recorder, r)
if recorder.Code != 401 {
t.Error("Response not 401")
}
if recorder.Body.String() == "hello" {
t.Error("Auth block failed")
}
recorder = httptest.NewRecorder()
r.Header.Set("Authorization", auth)
m.ServeHTTP(recorder, r)
if recorder.Code == 401 {
t.Error("Response is 401")
}
if recorder.Body.String() != "hello" {
t.Error("Auth failed, got: ", recorder.Body.String())
}
}

View File

@@ -1,15 +0,0 @@
package auth
import (
"crypto/subtle"
)
// SecureCompare performs a constant time compare of two strings to limit timing attacks.
func SecureCompare(given string, actual string) bool {
if subtle.ConstantTimeEq(int32(len(given)), int32(len(actual))) == 1 {
return subtle.ConstantTimeCompare([]byte(given), []byte(actual)) == 1
} else {
/* Securely compare actual to itself to keep constant time, but always return false */
return subtle.ConstantTimeCompare([]byte(actual), []byte(actual)) == 1 && false
}
}

View File

@@ -1,26 +0,0 @@
package auth
import (
"testing"
)
var comparetests = []struct {
a string
b string
val bool
}{
{"foo", "foo", true},
{"bar", "bar", true},
{"password", "password", true},
{"Foo", "foo", false},
{"foo", "foobar", false},
{"password", "pass", false},
}
func Test_SecureCompare(t *testing.T) {
for _, tt := range comparetests {
if SecureCompare(tt.a, tt.b) != tt.val {
t.Errorf("Expected SecureCompare(%v, %v) to return %v but did not", tt.a, tt.b, tt.val)
}
}
}

View File

@@ -1,4 +1,4 @@
syncthing [![Build Status](https://drone.io/github.com/calmh/syncthing/status.png)](https://drone.io/github.com/calmh/syncthing/latest)
syncthing
=========
This is the `syncthing` project. The following are the project goals:
@@ -25,6 +25,11 @@ making sure large swarms of selfish agents behave and somehow work
towards a common goal. Here we have a much smaller swarm of cooperative
agents and a simpler approach will suffice.
Getting Started
---------------
Take a look at the [getting started guide](http://discourse.syncthing.net/t/getting-started/46).
Signed Releases
---------------
@@ -35,8 +40,9 @@ normal release bundle as `syncthing.asc` or `syncthing.exe.asc`.
Documentation
=============
The syncthing documentation is kept on the
[GitHub Wiki](https://github.com/calmh/syncthing/wiki).
The [syncthing
documentation](http://discourse.syncthing.net/category/documentation) is
on the discourse site.
License
=======

View File

File diff suppressed because one or more lines are too long

View File

@@ -4,6 +4,11 @@ export COPYFILE_DISABLE=true
distFiles=(README.md LICENSE) # apart from the binary itself
version=$(git describe --always --dirty)
date=$(date +%s)
user=$(whoami)
host=$(hostname)
host=${host%%.*}
ldflags="-w -X main.Version $version -X main.BuildStamp $date -X main.BuildUser $user -X main.BuildHost $host"
build() {
if command -v godep >/dev/null ; then
@@ -14,8 +19,8 @@ build() {
go get -d ./cmd/syncthing
godep=
fi
${godep} go build $* -ldflags "-w -X main.Version $version" ./cmd/syncthing
${godep} go build -ldflags "-w -X main.Version $version" ./cmd/stcli
${godep} go build $* -ldflags "$ldflags" ./cmd/syncthing
${godep} go build -ldflags "$ldflags" ./cmd/stcli
}
assets() {

52
cmd/stpidx/main.go Normal file
View File

@@ -0,0 +1,52 @@
package main
import (
"compress/gzip"
"flag"
"log"
"os"
"github.com/calmh/syncthing/protocol"
)
func main() {
log.SetFlags(0)
log.SetOutput(os.Stdout)
showBlocks := flag.Bool("b", false, "Show blocks")
flag.Parse()
name := flag.Arg(0)
idxf, err := os.Open(name)
if err != nil {
log.Fatal(err)
}
defer idxf.Close()
gzr, err := gzip.NewReader(idxf)
if err != nil {
log.Fatal(err)
}
defer gzr.Close()
var im protocol.IndexMessage
err = im.DecodeXDR(gzr)
if err != nil {
log.Fatal(err)
}
log.Printf("Repo: %q, Files: %d", im.Repository, len(im.Files))
for _, file := range im.Files {
del := file.Flags&protocol.FlagDeleted != 0
inv := file.Flags&protocol.FlagInvalid != 0
dir := file.Flags&protocol.FlagDirectory != 0
prm := file.Flags & 0777
log.Printf("File: %q, Del: %v, Inv: %v, Dir: %v, Perm: 0%03o, Modified: %d, Blocks: %d",
file.Name, del, inv, dir, prm, file.Modified, len(file.Blocks))
if *showBlocks {
for _, block := range file.Blocks {
log.Printf(" Size: %6d, Hash: %x", block.Size, block.Hash)
}
}
}
}

View File

@@ -1,6 +1,10 @@
package main
import "github.com/calmh/syncthing/scanner"
import (
"sync/atomic"
"github.com/calmh/syncthing/scanner"
)
type bqAdd struct {
file scanner.File
@@ -20,6 +24,7 @@ type blockQueue struct {
outbox chan bqBlock
queued []bqBlock
qlen uint32
}
func newBlockQueue() *blockQueue {
@@ -77,6 +82,7 @@ func (q *blockQueue) run() {
q.queued = q.queued[1:]
}
}
atomic.StoreUint32(&q.qlen, uint32(len(q.queued)))
}
}
@@ -89,6 +95,7 @@ func (q *blockQueue) get() bqBlock {
}
func (q *blockQueue) empty() bool {
// There is a race condition here. We're only mostly sure the queue is empty if the expression below is true.
return len(q.queued) == 0 && len(q.inbox) == 0 && len(q.outbox) == 0
var l uint32
atomic.LoadUint32(&l)
return l == 0
}

View File

@@ -3,9 +3,12 @@ package main
import (
"encoding/xml"
"io"
"os"
"reflect"
"sort"
"strconv"
"code.google.com/p/go.crypto/bcrypt"
)
type Configuration struct {
@@ -22,6 +25,7 @@ type RepositoryConfiguration struct {
Directory string `xml:"directory,attr"`
Nodes []NodeConfiguration `xml:"node"`
ReadOnly bool `xml:"ro,attr"`
Invalid string `xml:"-"` // Set at runtime when there is an error, not saved
nodeIDs []string
}
@@ -51,6 +55,7 @@ type OptionsConfiguration struct {
ReconnectIntervalS int `xml:"reconnectionIntervalS" default:"60"`
MaxChangeKbps int `xml:"maxChangeKbps" default:"1000"`
StartBrowser bool `xml:"startBrowser" default:"true"`
UPnPEnabled bool `xml:"upnpEnabled" default:"true"`
Deprecated_ReadOnly bool `xml:"readOnly,omitempty"`
Deprecated_GUIEnabled bool `xml:"guiEnabled,omitempty"`
@@ -150,7 +155,7 @@ func uniqueStrings(ss []string) []string {
return us
}
func readConfigXML(rd io.Reader) (Configuration, error) {
func readConfigXML(rd io.Reader, myID string) (Configuration, error) {
var cfg Configuration
setDefaults(&cfg)
@@ -166,23 +171,53 @@ func readConfigXML(rd io.Reader) (Configuration, error) {
cfg.Options.ListenAddress = uniqueStrings(cfg.Options.ListenAddress)
var seenRepos = map[string]bool{}
// Check for missing or duplicate repository ID:s
var seenRepos = map[string]*RepositoryConfiguration{}
for i := range cfg.Repositories {
if cfg.Repositories[i].ID == "" {
cfg.Repositories[i].ID = "default"
repo := &cfg.Repositories[i]
if repo.ID == "" {
repo.ID = "default"
}
id := cfg.Repositories[i].ID
if seenRepos[id] {
panic("duplicate repository ID " + id)
if seen, ok := seenRepos[repo.ID]; ok {
seen.Invalid = "duplicate repository ID"
repo.Invalid = "duplicate repository ID"
warnf("Multiple repositories with ID %q; disabling", repo.ID)
} else {
seenRepos[repo.ID] = repo
}
seenRepos[id] = true
}
// Upgrade to v2 configuration if appropriate
if cfg.Version == 1 {
convertV1V2(&cfg)
}
// Hash old cleartext passwords
if len(cfg.GUI.Password) > 0 && cfg.GUI.Password[0] != '$' {
hash, err := bcrypt.GenerateFromPassword([]byte(cfg.GUI.Password), 0)
if err != nil {
warnln(err)
} else {
cfg.GUI.Password = string(hash)
}
}
// Ensure this node is present in all relevant places
cfg.Nodes = ensureNodePresent(cfg.Nodes, myID)
for i := range cfg.Repositories {
cfg.Repositories[i].Nodes = ensureNodePresent(cfg.Repositories[i].Nodes, myID)
}
// An empty address list is equivalent to a single "dynamic" entry
for i := range cfg.Nodes {
n := &cfg.Nodes[i]
if len(n.Addresses) == 0 || len(n.Addresses) == 1 && n.Addresses[0] == "" {
n.Addresses = []string{"dynamic"}
}
}
return cfg, err
}
@@ -229,7 +264,7 @@ func (l NodeConfigurationList) Len() int {
return len(l)
}
func cleanNodeList(nodes []NodeConfiguration, myID string) []NodeConfiguration {
func ensureNodePresent(nodes []NodeConfiguration, myID string) []NodeConfiguration {
var myIDExists bool
for _, node := range nodes {
if node.NodeID == myID {
@@ -239,10 +274,10 @@ func cleanNodeList(nodes []NodeConfiguration, myID string) []NodeConfiguration {
}
if !myIDExists {
name, _ := os.Hostname()
nodes = append(nodes, NodeConfiguration{
NodeID: myID,
Addresses: []string{"dynamic"},
Name: "",
NodeID: myID,
Name: name,
})
}

View File

@@ -3,6 +3,7 @@ package main
import (
"bytes"
"io"
"os"
"reflect"
"testing"
)
@@ -19,9 +20,10 @@ func TestDefaultValues(t *testing.T) {
ReconnectIntervalS: 60,
MaxChangeKbps: 1000,
StartBrowser: true,
UPnPEnabled: true,
}
cfg, err := readConfigXML(bytes.NewReader(nil))
cfg, err := readConfigXML(bytes.NewReader(nil), "nodeID")
if err != io.EOF {
t.Error(err)
}
@@ -64,7 +66,7 @@ func TestNodeConfig(t *testing.T) {
`)
for i, data := range [][]byte{v1data, v2data} {
cfg, err := readConfigXML(bytes.NewReader(data))
cfg, err := readConfigXML(bytes.NewReader(data), "node1")
if err != nil {
t.Error(err)
}
@@ -119,7 +121,7 @@ func TestNoListenAddress(t *testing.T) {
</configuration>
`)
cfg, err := readConfigXML(bytes.NewReader(data))
cfg, err := readConfigXML(bytes.NewReader(data), "nodeID")
if err != nil {
t.Error(err)
}
@@ -149,6 +151,7 @@ func TestOverriddenValues(t *testing.T) {
<reconnectionIntervalS>6000</reconnectionIntervalS>
<maxChangeKbps>2345</maxChangeKbps>
<startBrowser>false</startBrowser>
<upnpEnabled>false</upnpEnabled>
</options>
</configuration>
`)
@@ -164,9 +167,10 @@ func TestOverriddenValues(t *testing.T) {
ReconnectIntervalS: 6000,
MaxChangeKbps: 2345,
StartBrowser: false,
UPnPEnabled: false,
}
cfg, err := readConfigXML(bytes.NewReader(data))
cfg, err := readConfigXML(bytes.NewReader(data), "nodeID")
if err != nil {
t.Error(err)
}
@@ -175,3 +179,48 @@ func TestOverriddenValues(t *testing.T) {
t.Errorf("Overridden config differs;\n E: %#v\n A: %#v", expected, cfg.Options)
}
}
func TestNodeAddresses(t *testing.T) {
data := []byte(`
<configuration version="2">
<node id="n1">
<address>dynamic</address>
</node>
<node id="n2">
<address></address>
</node>
<node id="n3">
</node>
</configuration>
`)
name, _ := os.Hostname()
expected := []NodeConfiguration{
{
NodeID: "n1",
Addresses: []string{"dynamic"},
},
{
NodeID: "n2",
Addresses: []string{"dynamic"},
},
{
NodeID: "n3",
Addresses: []string{"dynamic"},
},
{
NodeID: "n4",
Name: name, // Set when auto created
Addresses: []string{"dynamic"},
},
}
cfg, err := readConfigXML(bytes.NewReader(data), "n4")
if err != nil {
t.Error(err)
}
if !reflect.DeepEqual(cfg.Nodes, expected) {
t.Errorf("Nodes differ;\n E: %#v\n A: %#v", expected, cfg.Nodes)
}
}

View File

@@ -1,17 +1,21 @@
package main
import (
"bytes"
"encoding/base64"
"encoding/json"
"io/ioutil"
"log"
"math/rand"
"net"
"net/http"
"runtime"
"sync"
"time"
"code.google.com/p/go.crypto/bcrypt"
"github.com/calmh/syncthing/scanner"
"github.com/codegangsta/martini"
"github.com/codegangsta/martini-contrib/auth"
)
type guiError struct {
@@ -23,17 +27,27 @@ var (
configInSync = true
guiErrors = []guiError{}
guiErrorsMut sync.Mutex
static = embeddedStatic()
staticFunc = static.(func(http.ResponseWriter, *http.Request, *log.Logger))
)
func startGUI(cfg GUIConfiguration, m *Model) {
const (
unchangedPassword = "--password-unchanged--"
)
func startGUI(cfg GUIConfiguration, m *Model) error {
l, err := net.Listen("tcp", cfg.Address)
if err != nil {
return err
}
router := martini.NewRouter()
router.Get("/", getRoot)
router.Get("/rest/version", restGetVersion)
router.Get("/rest/model/:repo", restGetModel)
router.Get("/rest/model", restGetModel)
router.Get("/rest/connections", restGetConnections)
router.Get("/rest/config", restGetConfig)
router.Get("/rest/config/sync", restGetConfigInSync)
router.Get("/rest/need/:repo", restGetNeed)
router.Get("/rest/system", restGetSystem)
router.Get("/rest/errors", restGetErrors)
@@ -41,26 +55,26 @@ func startGUI(cfg GUIConfiguration, m *Model) {
router.Post("/rest/restart", restPostRestart)
router.Post("/rest/reset", restPostReset)
router.Post("/rest/error", restPostError)
router.Post("/rest/error/clear", restClearErrors)
go func() {
mr := martini.New()
if len(cfg.User) > 0 && len(cfg.Password) > 0 {
mr.Use(auth.Basic(cfg.User, cfg.Password))
}
mr.Use(embeddedStatic())
mr.Use(martini.Recovery())
mr.Use(restMiddleware)
mr.Action(router.Handle)
mr.Map(m)
err := http.ListenAndServe(cfg.Address, mr)
if err != nil {
warnln("GUI not possible:", err)
}
}()
mr := martini.New()
if len(cfg.User) > 0 && len(cfg.Password) > 0 {
mr.Use(basic(cfg.User, cfg.Password))
}
mr.Use(static)
mr.Use(martini.Recovery())
mr.Use(restMiddleware)
mr.Action(router.Handle)
mr.Map(m)
go http.Serve(l, mr)
return nil
}
func getRoot(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, "/index.html", 302)
r.URL.Path = "/index.html"
staticFunc(w, r, nil)
}
func restMiddleware(w http.ResponseWriter, r *http.Request) {
@@ -73,10 +87,18 @@ func restGetVersion() string {
return Version
}
func restGetModel(m *Model, w http.ResponseWriter, params martini.Params) {
var repo = params["repo"]
func restGetModel(m *Model, w http.ResponseWriter, r *http.Request) {
var qs = r.URL.Query()
var repo = qs.Get("repo")
var res = make(map[string]interface{})
for _, cr := range cfg.Repositories {
if cr.ID == repo {
res["invalid"] = cr.Invalid
break
}
}
globalFiles, globalDeleted, globalBytes := m.GlobalSize(repo)
res["globalFiles"], res["globalDeleted"], res["globalBytes"] = globalFiles, globalDeleted, globalBytes
@@ -101,14 +123,31 @@ func restGetConnections(m *Model, w http.ResponseWriter) {
}
func restGetConfig(w http.ResponseWriter) {
json.NewEncoder(w).Encode(cfg)
encCfg := cfg
if encCfg.GUI.Password != "" {
encCfg.GUI.Password = unchangedPassword
}
json.NewEncoder(w).Encode(encCfg)
}
func restPostConfig(req *http.Request) {
var prevPassHash = cfg.GUI.Password
err := json.NewDecoder(req.Body).Decode(&cfg)
if err != nil {
log.Println(err)
warnln(err)
} else {
if cfg.GUI.Password == "" {
// Leave it empty
} else if cfg.GUI.Password != unchangedPassword {
hash, err := bcrypt.GenerateFromPassword([]byte(cfg.GUI.Password), 0)
if err != nil {
warnln(err)
} else {
cfg.GUI.Password = string(hash)
}
} else {
cfg.GUI.Password = prevPassHash
}
saveConfig()
configInSync = false
}
@@ -144,17 +183,6 @@ func (f guiFile) MarshalJSON() ([]byte, error) {
})
}
func restGetNeed(m *Model, w http.ResponseWriter, params martini.Params) {
repo := params["repo"]
files := m.NeedFilesRepo(repo)
gfs := make([]guiFile, len(files))
for i, f := range files {
gfs[i] = guiFile(f)
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(gfs)
}
var cpuUsagePercent [10]float64 // The last ten seconds
var cpuUsageLock sync.RWMutex
@@ -167,6 +195,9 @@ func restGetSystem(w http.ResponseWriter) {
res["goroutines"] = runtime.NumGoroutine()
res["alloc"] = m.Alloc
res["sys"] = m.Sys
if cfg.Options.GlobalAnnEnabled && discoverer != nil {
res["extAnnounceOK"] = discoverer.ExtAnnounceOK()
}
cpuUsageLock.RLock()
var cpusum float64
for _, p := range cpuUsagePercent {
@@ -191,6 +222,12 @@ func restPostError(req *http.Request) {
showGuiError(string(bs))
}
func restClearErrors() {
guiErrorsMut.Lock()
guiErrors = nil
guiErrorsMut.Unlock()
}
func showGuiError(err string) {
guiErrorsMut.Lock()
guiErrors = append(guiErrors, guiError{time.Now(), err})
@@ -199,3 +236,42 @@ func showGuiError(err string) {
}
guiErrorsMut.Unlock()
}
func basic(username string, passhash string) http.HandlerFunc {
return func(res http.ResponseWriter, req *http.Request) {
error := func() {
time.Sleep(time.Duration(rand.Intn(100)+100) * time.Millisecond)
res.Header().Set("WWW-Authenticate", "Basic realm=\"Authorization Required\"")
http.Error(res, "Not Authorized", http.StatusUnauthorized)
}
hdr := req.Header.Get("Authorization")
if len(hdr) < len("Basic ") || hdr[:6] != "Basic " {
error()
return
}
hdr = hdr[6:]
bs, err := base64.StdEncoding.DecodeString(hdr)
if err != nil {
error()
return
}
fields := bytes.SplitN(bs, []byte(":"), 2)
if len(fields) != 2 {
error()
return
}
if string(fields[0]) != username {
error()
return
}
if err := bcrypt.CompareHashAndPassword([]byte(passhash), fields[1]); err != nil {
error()
return
}
}
}

View File

@@ -32,7 +32,7 @@ func embeddedStatic() interface{} {
if len(mtype) != 0 {
res.Header().Set("Content-Type", mtype)
}
res.Header().Set("Content-Size", fmt.Sprintf("%d", len(bs)))
res.Header().Set("Content-Length", fmt.Sprintf("%d", len(bs)))
res.Header().Set("Last-Modified", modt)
res.Write(bs)

View File

@@ -6,6 +6,7 @@ import (
"fmt"
"io"
"log"
"math/rand"
"net"
"net/http"
_ "net/http/pprof"
@@ -15,24 +16,42 @@ import (
"runtime"
"runtime/debug"
"runtime/pprof"
"strconv"
"strings"
"time"
"github.com/calmh/syncthing/discover"
"github.com/calmh/syncthing/protocol"
"github.com/calmh/syncthing/upnp"
"github.com/juju/ratelimit"
)
const BlockSize = 128 * 1024
var cfg Configuration
var Version = "unknown-dev"
var (
Version = "unknown-dev"
BuildStamp = "0"
BuildDate time.Time
BuildHost = "unknown"
BuildUser = "unknown"
LongVersion string
)
func init() {
stamp, _ := strconv.Atoi(BuildStamp)
BuildDate = time.Unix(int64(stamp), 0)
date := BuildDate.UTC().Format(time.RFC3339)
LongVersion = fmt.Sprintf("syncthing %s (%s %s-%s) %s@%s %s", Version, runtime.Version(), runtime.GOOS, runtime.GOARCH, BuildUser, BuildHost, date)
}
var (
cfg Configuration
myID string
confDir string
rateBucket *ratelimit.Bucket
stop = make(chan bool)
discoverer *discover.Discoverer
)
const (
@@ -56,6 +75,7 @@ const (
- "net" (connecting and disconnecting, network messages)
- "pull" (file pull activity)
- "scanner" (the file change scanner)
- "upnp" (the upnp port mapper)
STCPUPROFILE Write CPU profile to the specified file.`
)
@@ -63,9 +83,11 @@ const (
func main() {
var reset bool
var showVersion bool
var doUpgrade bool
flag.StringVar(&confDir, "home", getDefaultConfDir(), "Set configuration directory")
flag.BoolVar(&reset, "reset", false, "Prepare to resync from cluster")
flag.BoolVar(&showVersion, "version", false, "Show version")
flag.BoolVar(&doUpgrade, "upgrade", false, "Perform upgrade")
flag.Usage = usageFor(flag.CommandLine, usage, extraUsage)
flag.Parse()
@@ -75,7 +97,15 @@ func main() {
}
if showVersion {
fmt.Printf("syncthing %s (%s %s-%s)\n", Version, runtime.Version(), runtime.GOOS, runtime.GOARCH)
fmt.Println(LongVersion)
return
}
if doUpgrade {
err := upgrade()
if err != nil {
fatalln(err)
}
return
}
@@ -89,6 +119,22 @@ func main() {
confDir = expandTilde(confDir)
if _, err := os.Stat(confDir); err != nil && confDir == getDefaultConfDir() {
// We are supposed to use the default configuration directory. It
// doesn't exist. In the past our default has been ~/.syncthing, so if
// that directory exists we move it to the new default location and
// continue. We don't much care if this fails at this point, we will
// be checking that later.
oldDefault := expandTilde("~/.syncthing")
if _, err := os.Stat(oldDefault); err == nil {
os.MkdirAll(filepath.Dir(confDir), 0700)
if err := os.Rename(oldDefault, confDir); err == nil {
infoln("Moved config dir", oldDefault, "to", confDir)
}
}
}
// Ensure that our home directory exists and that we have a certificate and key.
ensureDir(confDir, 0700)
@@ -99,11 +145,11 @@ func main() {
fatalErr(err)
}
myID = string(certID(cert.Certificate[0]))
myID = certID(cert.Certificate[0])
log.SetPrefix("[" + myID[0:5] + "] ")
logger.SetPrefix("[" + myID[0:5] + "] ")
infoln("Version", Version)
infoln(LongVersion)
infoln("My ID:", myID)
// Prepare to be able to save configuration
@@ -117,26 +163,31 @@ func main() {
cf, err := os.Open(cfgFile)
if err == nil {
// Read config.xml
cfg, err = readConfigXML(cf)
cfg, err = readConfigXML(cf, myID)
if err != nil {
fatalln(err)
}
cf.Close()
}
if len(cfg.Repositories) == 0 {
} else {
infoln("No config file; starting with empty defaults")
name, _ := os.Hostname()
defaultRepo := filepath.Join(getHomeDir(), "Sync")
ensureDir(defaultRepo, 0755)
cfg, err = readConfigXML(nil)
cfg, err = readConfigXML(nil, myID)
cfg.Repositories = []RepositoryConfiguration{
{
ID: "default",
Directory: filepath.Join(getHomeDir(), "Sync"),
Directory: defaultRepo,
Nodes: []NodeConfiguration{{NodeID: myID}},
},
}
cfg.Nodes = []NodeConfiguration{
{NodeID: myID, Addresses: []string{"dynamic"}},
{
NodeID: myID,
Addresses: []string{"dynamic"},
Name: name,
},
}
saveConfig()
@@ -180,18 +231,19 @@ func main() {
m := NewModel(cfg.Options.MaxChangeKbps * 1000)
for i := range cfg.Repositories {
cfg.Repositories[i].Nodes = cleanNodeList(cfg.Repositories[i].Nodes, myID)
dir := expandTilde(cfg.Repositories[i].Directory)
ensureDir(dir, -1)
m.AddRepo(cfg.Repositories[i].ID, dir, cfg.Repositories[i].Nodes)
for _, repo := range cfg.Repositories {
if repo.Invalid != "" {
continue
}
dir := expandTilde(repo.Directory)
m.AddRepo(repo.ID, dir, repo.Nodes)
}
// GUI
if cfg.GUI.Enabled && cfg.GUI.Address != "" {
addr, err := net.ResolveTCPAddr("tcp", cfg.GUI.Address)
if err != nil {
warnf("Cannot start GUI on %q: %v", cfg.GUI.Address, err)
fatalf("Cannot start GUI on %q: %v", cfg.GUI.Address, err)
} else {
var hostOpen, hostShow string
switch {
@@ -207,7 +259,10 @@ func main() {
}
infof("Starting web GUI on http://%s:%d/", hostShow, addr.Port)
startGUI(cfg.GUI, m)
err := startGUI(cfg.GUI, m)
if err != nil {
fatalln("Cannot start GUI:", err)
}
if cfg.Options.StartBrowser && len(os.Getenv("STRESTART")) == 0 {
openURL(fmt.Sprintf("http://%s:%d", hostOpen, addr.Port))
}
@@ -219,14 +274,51 @@ func main() {
infoln("Populating repository index")
m.LoadIndexes(confDir)
for _, repo := range cfg.Repositories {
if repo.Invalid != "" {
continue
}
dir := expandTilde(repo.Directory)
// Safety check. If the cached index contains files but the repository
// doesn't exist, we have a problem. We would assume that all files
// have been deleted which might not be the case, so abort instead.
if files, _, _ := m.LocalSize(repo.ID); files > 0 {
if fi, err := os.Stat(dir); err != nil || !fi.IsDir() {
warnf("Configured repository %q has index but directory %q is missing; not starting.", repo.ID, repo.Directory)
fatalf("Ensure that directory is present or remove repository from configuration.")
}
}
// Ensure that repository directories exist for newly configured repositories.
ensureDir(dir, -1)
}
m.ScanRepos()
m.SaveIndexes(confDir)
// UPnP
var externalPort = 0
if cfg.Options.UPnPEnabled {
// We seed the random number generator with the node ID to get a
// repeatable sequence of random external ports.
rand.Seed(certSeed(cert.Certificate[0]))
externalPort = setupUPnP()
}
// Routine to connect out to configured nodes
disc := discovery()
go listenConnect(myID, disc, m, tlsCfg)
discoverer = discovery(externalPort)
go listenConnect(myID, m, tlsCfg)
for _, repo := range cfg.Repositories {
if repo.Invalid != "" {
continue
}
// Routine to pull blocks from other nodes to synchronize the local
// repository. Does not run when we are in read only (publish only) mode.
if repo.ReadOnly {
@@ -250,6 +342,39 @@ func main() {
<-stop
}
func setupUPnP() int {
var externalPort = 0
if len(cfg.Options.ListenAddress) == 1 {
_, portStr, err := net.SplitHostPort(cfg.Options.ListenAddress[0])
if err != nil {
warnln(err)
} else {
// Set up incoming port forwarding, if necessary and possible
port, _ := strconv.Atoi(portStr)
igd, err := upnp.Discover()
if err == nil {
for i := 0; i < 10; i++ {
r := 1024 + rand.Intn(65535-1024)
err := igd.AddPortMapping(upnp.TCP, r, port, "syncthing", 0)
if err == nil {
externalPort = r
infoln("Created UPnP port mapping - external port", externalPort)
break
}
}
if externalPort == 0 {
warnln("Failed to create UPnP port mapping")
}
} else {
infof("No UPnP IGD device found, no port mapping created (%v)", err)
}
}
} else {
warnln("Multiple listening addresses; not attempting UPnP port mapping")
}
return externalPort
}
func resetRepositories() {
suffix := fmt.Sprintf(".syncthing-reset-%d", time.Now().UnixNano())
for _, repo := range cfg.Repositories {
@@ -332,7 +457,7 @@ func saveConfig() {
saveConfigCh <- struct{}{}
}
func listenConnect(myID string, disc *discover.Discoverer, m *Model, tlsCfg *tls.Config) {
func listenConnect(myID string, m *Model, tlsCfg *tls.Config) {
var conns = make(chan *tls.Conn)
// Listen
@@ -384,8 +509,8 @@ func listenConnect(myID string, disc *discover.Discoverer, m *Model, tlsCfg *tls
var addrs []string
for _, addr := range nodeCfg.Addresses {
if addr == "dynamic" {
if disc != nil {
t := disc.Lookup(nodeCfg.NodeID)
if discoverer != nil {
t := discoverer.Lookup(nodeCfg.NodeID)
if len(t) == 0 {
continue
}
@@ -397,6 +522,14 @@ func listenConnect(myID string, disc *discover.Discoverer, m *Model, tlsCfg *tls
}
for _, addr := range addrs {
host, port, err := net.SplitHostPort(addr)
if err != nil && strings.HasPrefix(err.Error(), "missing port") {
// addr is on the form "1.2.3.4"
addr = net.JoinHostPort(addr, "22000")
} else if err == nil && port == "" {
// addr is on the form "1.2.3.4:"
addr = net.JoinHostPort(host, "22000")
}
if debugNet {
dlog.Println("dial", nodeCfg.NodeID, addr)
}
@@ -454,23 +587,21 @@ next:
}
}
func discovery() *discover.Discoverer {
if !cfg.Options.LocalAnnEnabled {
func discovery(extPort int) *discover.Discoverer {
disc, err := discover.NewDiscoverer(myID, cfg.Options.ListenAddress)
if err != nil {
warnf("No discovery possible (%v)", err)
return nil
}
infoln("Sending local discovery announcements")
if !cfg.Options.GlobalAnnEnabled {
cfg.Options.GlobalAnnServer = ""
} else {
infoln("Sending external discovery announcements")
if cfg.Options.LocalAnnEnabled {
infoln("Sending local discovery announcements")
disc.StartLocal()
}
disc, err := discover.NewDiscoverer(myID, cfg.Options.ListenAddress, cfg.Options.GlobalAnnServer)
if err != nil {
warnf("No discovery possible (%v)", err)
if cfg.Options.GlobalAnnEnabled {
infoln("Sending global discovery announcements")
disc.StartGlobal(cfg.Options.GlobalAnnServer, uint16(extPort))
}
return disc
@@ -487,39 +618,47 @@ func ensureDir(dir string, mode int) {
}
}
func getDefaultConfDir() string {
switch runtime.GOOS {
case "windows":
return filepath.Join(os.Getenv("AppData"), "Syncthing")
case "darwin":
return expandTilde("~/Library/Application Support/Syncthing")
default:
if xdgCfg := os.Getenv("XDG_CONFIG_HOME"); xdgCfg != "" {
return filepath.Join(xdgCfg, "syncthing")
} else {
return expandTilde("~/.config/syncthing")
}
}
}
func expandTilde(p string) string {
if runtime.GOOS == "windows" {
if runtime.GOOS == "windows" || !strings.HasPrefix(p, "~/") {
return p
}
if strings.HasPrefix(p, "~/") {
return strings.Replace(p, "~", getUnixHomeDir(), 1)
}
return p
}
func getUnixHomeDir() string {
home := os.Getenv("HOME")
if home == "" {
fatalln("No home directory?")
}
return home
return filepath.Join(getHomeDir(), p[2:])
}
func getHomeDir() string {
if runtime.GOOS == "windows" {
home := os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
if home == "" {
home = os.Getenv("USERPROFILE")
}
return home
}
return getUnixHomeDir()
}
var home string
func getDefaultConfDir() string {
if runtime.GOOS == "windows" {
return filepath.Join(os.Getenv("AppData"), "syncthing")
switch runtime.GOOS {
case "windows":
home = filepath.Join(os.Getenv("HomeDrive"), os.Getenv("HomePath"))
if home == "" {
home = os.Getenv("UserProfile")
}
default:
home = os.Getenv("HOME")
}
return expandTilde("~/.syncthing")
if home == "" {
fatalln("No home directory found - set $HOME (or the platform equivalent).")
}
return home
}

View File

@@ -80,8 +80,8 @@ func NewModel(maxChangeBw int) *Model {
// read/write mode the model will attempt to keep in sync with the cluster by
// pulling needed files from peer nodes.
func (m *Model) StartRepoRW(repo string, threads int) {
m.rmut.Lock()
defer m.rmut.Unlock()
m.rmut.RLock()
defer m.rmut.RUnlock()
if dir, ok := m.repoDirs[repo]; !ok {
panic("cannot start without repo")
@@ -423,14 +423,16 @@ func (m *Model) AddConnection(rawConn io.Closer, protoConn protocol.Connection)
cm := m.clusterConfig(nodeID)
protoConn.ClusterConfig(cm)
var idxToSend = make(map[string][]protocol.FileInfo)
m.rmut.RLock()
for _, repo := range m.nodeRepos[nodeID] {
idxToSend[repo] = m.protocolIndex(repo)
}
m.rmut.RUnlock()
go func() {
m.rmut.RLock()
repos := m.nodeRepos[nodeID]
m.rmut.RUnlock()
for _, repo := range repos {
m.rmut.RLock()
idx := m.protocolIndex(repo)
m.rmut.RUnlock()
for repo, idx := range idxToSend {
if debugNet {
dlog.Printf("IDX(out/initial): %s: %q: %d files", nodeID, repo, len(idx))
}
@@ -559,7 +561,7 @@ func (m *Model) ScanRepos() {
func (m *Model) ScanRepo(repo string) {
sup := &suppressor{threshold: int64(cfg.Options.MaxChangeKbps)}
m.rmut.Lock()
m.rmut.RLock()
w := &scanner.Walker{
Dir: m.repoDirs[repo],
IgnoreFile: ".stignore",
@@ -568,7 +570,7 @@ func (m *Model) ScanRepo(repo string) {
Suppressor: sup,
CurrentFiler: cFiler{m, repo},
}
m.rmut.Unlock()
m.rmut.RUnlock()
m.setState(repo, RepoScanning)
fs, _ := w.Walk()
m.ReplaceLocal(repo, fs)
@@ -648,7 +650,7 @@ func (m *Model) clusterConfig(node string) protocol.ClusterConfigMessage {
ClientVersion: Version,
}
m.rmut.Lock()
m.rmut.RLock()
for _, repo := range m.nodeRepos[node] {
cr := protocol.Repository{
ID: repo,
@@ -662,7 +664,7 @@ func (m *Model) clusterConfig(node string) protocol.ClusterConfigMessage {
}
cm.Repositories = append(cm.Repositories, cr)
}
m.rmut.Unlock()
m.rmut.RUnlock()
return cm
}
@@ -674,9 +676,9 @@ func (m *Model) setState(repo string, state repoState) {
}
func (m *Model) State(repo string) string {
m.rmut.Lock()
m.rmut.RLock()
state := m.repoState[repo]
m.rmut.Unlock()
m.rmut.RUnlock()
switch state {
case RepoIdle:
return "idle"

View File

@@ -1,34 +0,0 @@
/*
Copyright 2011 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"os/exec"
"runtime"
)
func openURL(url string) error {
if runtime.GOOS == "windows" {
return exec.Command("cmd.exe", "/C", "start "+url).Run()
}
if runtime.GOOS == "darwin" {
return exec.Command("open", url).Run()
}
return exec.Command("xdg-open", url).Run()
}

View File

@@ -0,0 +1,23 @@
// +build !windows
package main
import (
"os/exec"
"runtime"
"syscall"
)
func openURL(url string) error {
switch runtime.GOOS {
case "darwin":
return exec.Command("open", url).Run()
default:
cmd := exec.Command("xdg-open", url)
cmd.SysProcAttr = &syscall.SysProcAttr{
Setpgid: true,
}
return cmd.Run()
}
}

View File

@@ -0,0 +1,9 @@
// +build windows
package main
import "os/exec"
func openURL(url string) error {
return exec.Command("cmd.exe", "/C", "start "+url).Run()
}

View File

@@ -135,7 +135,10 @@ func (p *puller) run() {
case b := <-p.blocks:
p.model.setState(p.repo, RepoSyncing)
changed = true
p.handleBlock(b)
if p.handleBlock(b) {
// Block was fully handled, free up the slot
p.requestSlots <- true
}
case <-timeout:
if len(p.openFiles) == 0 && p.bq.empty() {
@@ -193,7 +196,7 @@ func (p *puller) runRO() {
func (p *puller) fixupDirectories() {
var deleteDirs []string
fn := func(path string, info os.FileInfo, err error) error {
filepath.Walk(p.dir, func(path string, info os.FileInfo, err error) error {
if !info.IsDir() {
return nil
}
@@ -242,8 +245,7 @@ func (p *puller) fixupDirectories() {
}
return nil
}
filepath.Walk(p.dir, fn)
})
// Delete any queued directories
for i := len(deleteDirs) - 1; i >= 0; i-- {
@@ -278,53 +280,14 @@ func (p *puller) handleRequestResult(res requestResult) {
}
if of.done && of.outstanding == 0 {
if debugPull {
dlog.Printf("pull: closing %q / %q", p.repo, f.Name)
}
of.file.Close()
defer os.Remove(of.temp)
delete(p.openFiles, f.Name)
fd, err := os.Open(of.temp)
if err != nil {
if debugPull {
dlog.Printf("pull: error: %q / %q: %v", p.repo, f.Name, err)
}
return
}
hb, _ := scanner.Blocks(fd, BlockSize)
fd.Close()
if l0, l1 := len(hb), len(f.Blocks); l0 != l1 {
if debugPull {
dlog.Printf("pull: %q / %q: nblocks %d != %d", p.repo, f.Name, l0, l1)
}
return
}
for i := range hb {
if bytes.Compare(hb[i].Hash, f.Blocks[i].Hash) != 0 {
dlog.Printf("pull: %q / %q: block %d hash mismatch", p.repo, f.Name, i)
return
}
}
t := time.Unix(f.Modified, 0)
os.Chtimes(of.temp, t, t)
os.Chmod(of.temp, os.FileMode(f.Flags&0777))
if debugPull {
dlog.Printf("pull: rename %q / %q: %q", p.repo, f.Name, of.filepath)
}
if err := Rename(of.temp, of.filepath); err == nil {
p.model.updateLocal(p.repo, f)
} else {
dlog.Printf("pull: error: %q / %q: %v", p.repo, f.Name, err)
}
p.closeFile(f)
}
}
func (p *puller) handleBlock(b bqBlock) {
// handleBlock fulfills the block request by copying, ignoring or fetching
// from the network. Returns true if the block was fully handled
// synchronously, i.e. if the slot can be reused.
func (p *puller) handleBlock(b bqBlock) bool {
f := b.file
// For directories, simply making sure they exist is enough
@@ -335,8 +298,7 @@ func (p *puller) handleBlock(b bqBlock) {
os.MkdirAll(path, 0777)
}
p.model.updateLocal(p.repo, f)
p.requestSlots <- true
return
return true
}
of, ok := p.openFiles[f.Name]
@@ -368,9 +330,9 @@ func (p *puller) handleBlock(b bqBlock) {
if !b.last {
p.openFiles[f.Name] = of
}
p.requestSlots <- true
return
return true
}
defTempNamer.Hide(of.temp)
}
if of.err != nil {
@@ -383,8 +345,7 @@ func (p *puller) handleBlock(b bqBlock) {
delete(p.openFiles, f.Name)
}
p.requestSlots <- true
return
return true
}
p.openFiles[f.Name] = of
@@ -392,15 +353,14 @@ func (p *puller) handleBlock(b bqBlock) {
switch {
case len(b.copy) > 0:
p.handleCopyBlock(b)
p.requestSlots <- true
return true
case b.block.Size > 0:
p.handleRequestBlock(b)
// Request slot gets freed in <-p.blocks case
return p.handleRequestBlock(b)
default:
p.handleEmptyBlock(b)
p.requestSlots <- true
return true
}
}
@@ -448,11 +408,15 @@ func (p *puller) handleCopyBlock(b bqBlock) {
}
}
func (p *puller) handleRequestBlock(b bqBlock) {
// We have a block to get from the network
// handleRequestBlock tries to pull a block from the network. Returns true if
// the block could _not_ be fetched (i.e. it was fully handled, matching the
// return criteria of handleBlock)
func (p *puller) handleRequestBlock(b bqBlock) bool {
f := b.file
of := p.openFiles[f.Name]
of, ok := p.openFiles[f.Name]
if !ok {
panic("bug: request for non-open file")
}
node := p.oustandingPerNode.leastBusyNode(of.availability, p.model.cm)
if len(node) == 0 {
@@ -467,8 +431,7 @@ func (p *puller) handleRequestBlock(b bqBlock) {
} else {
p.openFiles[f.Name] = of
}
p.requestSlots <- true
return
return true
}
of.outstanding++
@@ -489,6 +452,8 @@ func (p *puller) handleRequestBlock(b bqBlock) {
err: err,
}
}(node, b)
return false
}
func (p *puller) handleEmptyBlock(b bqBlock) {
@@ -514,6 +479,7 @@ func (p *puller) handleEmptyBlock(b bqBlock) {
t := time.Unix(f.Modified, 0)
os.Chtimes(of.temp, t, t)
os.Chmod(of.temp, os.FileMode(f.Flags&0777))
defTempNamer.Show(of.temp)
Rename(of.temp, of.filepath)
}
delete(p.openFiles, f.Name)
@@ -539,3 +505,52 @@ func (p *puller) queueNeededBlocks() {
dlog.Printf("%q: queued %d blocks", p.repo, queued)
}
}
func (p *puller) closeFile(f scanner.File) {
if debugPull {
dlog.Printf("pull: closing %q / %q", p.repo, f.Name)
}
of := p.openFiles[f.Name]
of.file.Close()
defer os.Remove(of.temp)
delete(p.openFiles, f.Name)
fd, err := os.Open(of.temp)
if err != nil {
if debugPull {
dlog.Printf("pull: error: %q / %q: %v", p.repo, f.Name, err)
}
return
}
hb, _ := scanner.Blocks(fd, BlockSize)
fd.Close()
if l0, l1 := len(hb), len(f.Blocks); l0 != l1 {
if debugPull {
dlog.Printf("pull: %q / %q: nblocks %d != %d", p.repo, f.Name, l0, l1)
}
return
}
for i := range hb {
if bytes.Compare(hb[i].Hash, f.Blocks[i].Hash) != 0 {
dlog.Printf("pull: %q / %q: block %d hash mismatch", p.repo, f.Name, i)
return
}
}
t := time.Unix(f.Modified, 0)
os.Chtimes(of.temp, t, t)
os.Chmod(of.temp, os.FileMode(f.Flags&0777))
defTempNamer.Show(of.temp)
if debugPull {
dlog.Printf("pull: rename %q / %q: %q", p.repo, f.Name, of.filepath)
}
if err := Rename(of.temp, of.filepath); err == nil {
p.model.updateLocal(p.repo, f)
} else {
dlog.Printf("pull: error: %q / %q: %v", p.repo, f.Name, err)
}
}

View File

@@ -1,3 +1,5 @@
// +build !windows
package main
import (
@@ -21,3 +23,11 @@ func (t tempNamer) TempName(name string) string {
tname := fmt.Sprintf("%s.%s", t.prefix, filepath.Base(name))
return filepath.Join(tdir, tname)
}
func (t tempNamer) Hide(path string) error {
return nil
}
func (t tempNamer) Show(path string) error {
return nil
}

View File

@@ -0,0 +1,56 @@
// +build windows
package main
import (
"fmt"
"path/filepath"
"strings"
"syscall"
)
type tempNamer struct {
prefix string
}
var defTempNamer = tempNamer{"~syncthing~"}
func (t tempNamer) IsTemporary(name string) bool {
return strings.HasPrefix(filepath.Base(name), t.prefix)
}
func (t tempNamer) TempName(name string) string {
tdir := filepath.Dir(name)
tname := fmt.Sprintf("%s.%s.tmp", t.prefix, filepath.Base(name))
return filepath.Join(tdir, tname)
}
func (t tempNamer) Hide(path string) error {
p, err := syscall.UTF16PtrFromString(path)
if err != nil {
return err
}
attrs, err := syscall.GetFileAttributes(p)
if err != nil {
return err
}
attrs |= syscall.FILE_ATTRIBUTE_HIDDEN
return syscall.SetFileAttributes(p, attrs)
}
func (t tempNamer) Show(path string) error {
p, err := syscall.UTF16PtrFromString(path)
if err != nil {
return err
}
attrs, err := syscall.GetFileAttributes(p)
if err != nil {
return err
}
attrs &^= syscall.FILE_ATTRIBUTE_HIDDEN
return syscall.SetFileAttributes(p, attrs)
}

View File

@@ -8,6 +8,7 @@ import (
"crypto/x509"
"crypto/x509/pkix"
"encoding/base32"
"encoding/binary"
"encoding/pem"
"math/big"
"os"
@@ -32,6 +33,13 @@ func certID(bs []byte) string {
return strings.Trim(base32.StdEncoding.EncodeToString(id), "=")
}
func certSeed(bs []byte) int64 {
hf := sha256.New()
hf.Write(bs)
id := hf.Sum(nil)
return int64(binary.BigEndian.Uint64(id))
}
func newCertificate(dir string) {
infoln("Generating RSA certificate and key...")

View File

@@ -0,0 +1,146 @@
// +build !windows
package main
import (
"archive/tar"
"compress/gzip"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"bitbucket.org/kardianos/osext"
)
type githubRelease struct {
Tag string `json:"tag_name"`
Prelease bool `json:"prerelease"`
Assets []githubAsset `json:"assets"`
}
type githubAsset struct {
URL string `json:"url"`
Name string `json:"name"`
}
func upgrade() error {
path, err := osext.Executable()
if err != nil {
return err
}
resp, err := http.Get("https://api.github.com/repos/calmh/syncthing/releases?per_page=1")
if err != nil {
return err
}
var rels []githubRelease
json.NewDecoder(resp.Body).Decode(&rels)
resp.Body.Close()
if len(rels) != 1 {
return fmt.Errorf("Unexpected number of releases: %d", len(rels))
}
rel := rels[0]
if rel.Tag > Version {
infof("Attempting upgrade to %s...", rel.Tag)
} else if rel.Tag == Version {
okf("Already running the latest version, %s. Not upgrading.", Version)
return nil
} else {
okf("Current version %s is newer than latest release %s. Not upgrading.", Version, rel.Tag)
return nil
}
expectedRelease := fmt.Sprintf("syncthing-%s-%s-%s.", runtime.GOOS, runtime.GOARCH, rel.Tag)
for _, asset := range rel.Assets {
if strings.HasPrefix(asset.Name, expectedRelease) {
if strings.HasSuffix(asset.Name, ".tar.gz") {
infof("Downloading %s...", asset.Name)
fname, err := readTarGZ(asset.URL, filepath.Dir(path))
if err != nil {
return err
}
old := path + "." + Version
err = os.Rename(path, old)
if err != nil {
return err
}
err = os.Rename(fname, path)
if err != nil {
return err
}
okf("Upgraded %q to %s.", path, rel.Tag)
okf("Previous version saved in %q.", old)
return nil
}
}
}
return nil
}
func readTarGZ(url string, dir string) (string, error) {
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return "", err
}
req.Header.Add("Accept", "application/octet-stream")
resp, err := http.DefaultClient.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
gr, err := gzip.NewReader(resp.Body)
if err != nil {
return "", err
}
tr := tar.NewReader(gr)
if err != nil {
return "", err
}
// Iterate through the files in the archive.
for {
hdr, err := tr.Next()
if err == io.EOF {
// end of tar archive
break
}
if err != nil {
return "", err
}
if path.Base(hdr.Name) == "syncthing" {
of, err := ioutil.TempFile(dir, "syncthing")
if err != nil {
return "", err
}
io.Copy(of, tr)
err = of.Close()
if err != nil {
os.Remove(of.Name())
return "", err
}
os.Chmod(of.Name(), os.FileMode(hdr.Mode))
return of.Name(), nil
}
}
return "", fmt.Errorf("No upgrade found")
}

View File

@@ -0,0 +1,9 @@
// +build windows
package main
import "errors"
func upgrade() error {
return errors.New("Upgrade currently unsupported on Windows")
}

View File

@@ -117,8 +117,6 @@ func compareClusterConfig(local, remote protocol.ClusterConfigMessage) error {
if lflags&protocol.FlagShareBits != rflags&protocol.FlagShareBits {
return ClusterConfigMismatch(fmt.Errorf("remote has different sharing flags for node %q in repository %q", node, repo))
}
} else {
return ClusterConfigMismatch(fmt.Errorf("remote is missing node %q in repository %q", node, repo))
}
}
} else {
@@ -126,14 +124,8 @@ func compareClusterConfig(local, remote protocol.ClusterConfigMessage) error {
}
}
for repo, rnodes := range rm {
if lnodes, ok := lm[repo]; ok {
for node := range rnodes {
if _, ok := lnodes[node]; !ok {
return ClusterConfigMismatch(fmt.Errorf("remote has extra node %q in repository %q", node, repo))
}
}
} else {
for repo := range rm {
if _, ok := lm[repo]; !ok {
return ClusterConfigMismatch(fmt.Errorf("remote has extra repository %q", repo))
}

View File

@@ -103,7 +103,7 @@ var testcases = []struct {
{ID: "bar"},
},
},
err: `remote is missing node "a" in repository "foo"`,
err: "",
},
{
@@ -130,7 +130,7 @@ var testcases = []struct {
{ID: "bar"},
},
},
err: `remote has extra node "b" in repository "foo"`,
err: "",
},
{

View File

@@ -4,6 +4,7 @@ import (
"encoding/binary"
"encoding/hex"
"flag"
"fmt"
"log"
"net"
"os"
@@ -26,22 +27,28 @@ type Address struct {
}
var (
nodes = make(map[string]Node)
lock sync.Mutex
queries = 0
answered = 0
limited = 0
debug = false
limiter = lru.New(1024)
nodes = make(map[string]Node)
lock sync.Mutex
queries = 0
announces = 0
answered = 0
limited = 0
unknowns = 0
debug = false
limiter = lru.New(1024)
)
func main() {
var listen string
var timestamp bool
var statsIntv int
var statsFile string
flag.StringVar(&listen, "listen", ":22025", "Listen address")
flag.BoolVar(&debug, "debug", false, "Enable debug output")
flag.BoolVar(&timestamp, "timestamp", true, "Timestamp the log output")
flag.IntVar(&statsIntv, "stats-intv", 0, "Statistics output interval (s)")
flag.StringVar(&statsFile, "stats-file", "/var/log/discosrv.stats", "Statistics file name")
flag.Parse()
log.SetOutput(os.Stdout)
@@ -55,7 +62,9 @@ func main() {
log.Fatal(err)
}
go logStats()
if statsIntv > 0 {
go logStats(statsFile, statsIntv)
}
var buf = make([]byte, 1024)
for {
@@ -80,17 +89,16 @@ func main() {
magic := binary.BigEndian.Uint32(buf)
switch magic {
case discover.AnnouncementMagicV1:
handleAnnounceV1(addr, buf)
case discover.QueryMagicV1:
handleQueryV1(conn, addr, buf)
case discover.AnnouncementMagicV2:
handleAnnounceV2(addr, buf)
case discover.QueryMagicV2:
handleQueryV2(conn, addr, buf)
default:
lock.Lock()
unknowns++
lock.Unlock()
}
}
}
@@ -107,16 +115,14 @@ func limit(addr *net.UDPAddr) bool {
if bkt.TakeAvailable(1) != 1 {
// Rate limit exceeded; ignore packet
if debug {
log.Printf("Rate limit exceeded for", key)
log.Println("Rate limit exceeded for", key)
}
limited++
return true
} else if debug {
log.Printf("Rate limit OK for", key)
}
} else {
if debug {
log.Printf("New limiter for", key)
log.Println("New limiter for", key)
}
// One packet per ten seconds average rate, burst ten packets
limiter.Add(key, ratelimit.NewBucket(10*time.Second, 10))
@@ -125,75 +131,6 @@ func limit(addr *net.UDPAddr) bool {
return false
}
func handleAnnounceV1(addr *net.UDPAddr, buf []byte) {
var pkt discover.AnnounceV1
err := pkt.UnmarshalXDR(buf)
if err != nil {
log.Println("AnnounceV1 Unmarshal:", err)
log.Println(hex.Dump(buf))
return
}
if debug {
log.Printf("<- %v %#v", addr, pkt)
}
ip := addr.IP.To4()
if ip == nil {
ip = addr.IP.To16()
}
node := Node{
Addresses: []Address{{
IP: ip,
Port: pkt.Port,
}},
Updated: time.Now(),
}
lock.Lock()
nodes[pkt.NodeID] = node
lock.Unlock()
}
func handleQueryV1(conn *net.UDPConn, addr *net.UDPAddr, buf []byte) {
var pkt discover.QueryV1
err := pkt.UnmarshalXDR(buf)
if err != nil {
log.Println("QueryV1 Unmarshal:", err)
log.Println(hex.Dump(buf))
return
}
if debug {
log.Printf("<- %v %#v", addr, pkt)
}
lock.Lock()
node, ok := nodes[pkt.NodeID]
queries++
lock.Unlock()
if ok && len(node.Addresses) > 0 {
pkt := discover.AnnounceV1{
Magic: discover.AnnouncementMagicV1,
NodeID: pkt.NodeID,
Port: node.Addresses[0].Port,
IP: node.Addresses[0].IP,
}
if debug {
log.Printf("-> %v %#v", addr, pkt)
}
tb := pkt.MarshalXDR()
_, _, err = conn.WriteMsgUDP(tb, nil, addr)
if err != nil {
log.Println("QueryV1 response write:", err)
}
lock.Lock()
answered++
lock.Unlock()
}
}
func handleAnnounceV2(addr *net.UDPAddr, buf []byte) {
var pkt discover.AnnounceV2
err := pkt.UnmarshalXDR(buf)
@@ -206,6 +143,10 @@ func handleAnnounceV2(addr *net.UDPAddr, buf []byte) {
log.Printf("<- %v %#v", addr, pkt)
}
lock.Lock()
announces++
lock.Unlock()
ip := addr.IP.To4()
if ip == nil {
ip = addr.IP.To16()
@@ -274,9 +215,21 @@ func handleQueryV2(conn *net.UDPConn, addr *net.UDPAddr, buf []byte) {
}
}
func logStats() {
func next(intv int) time.Time {
d := time.Duration(intv) * time.Second
t0 := time.Now()
t1 := t0.Add(d).Truncate(d)
time.Sleep(t1.Sub(t0))
return t1
}
func logStats(file string, intv int) {
f, err := os.OpenFile(file, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
if err != nil {
log.Fatal(err)
}
for {
time.Sleep(600 * time.Second)
t := next(intv)
lock.Lock()
@@ -288,11 +241,15 @@ func logStats() {
}
}
log.Printf("Expired %d nodes; %d nodes in registry; %d queries (%d answered)", deleted, len(nodes), queries, answered)
log.Printf("Limited %d queries; %d entries in limiter cache", limited, limiter.Len())
fmt.Fprintf(f, "%d Nr:%d Ne:%d Qt:%d Qa:%d A:%d U:%d Lq:%d Lc:%d\n",
t.Unix(), len(nodes), deleted, queries, answered, announces, unknowns, limited, limiter.Len())
f.Sync()
queries = 0
announces = 0
answered = 0
limited = 0
unknowns = 0
lock.Unlock()
}

View File

@@ -6,7 +6,6 @@ import (
"fmt"
"log"
"net"
"strings"
"sync"
"time"
@@ -19,16 +18,19 @@ const (
)
type Discoverer struct {
MyID string
ListenAddresses []string
BroadcastIntv time.Duration
ExtBroadcastIntv time.Duration
beacon *mc.Beacon
registry map[string][]string
registryLock sync.RWMutex
extServer string
localBroadcastTick <-chan time.Time
forcedBroadcastTick chan time.Time
myID string
listenAddrs []string
localBcastIntv time.Duration
globalBcastIntv time.Duration
beacon *mc.Beacon
registry map[string][]string
registryLock sync.RWMutex
extServer string
extPort uint16
localBcastTick <-chan time.Time
forcedBcastTick chan time.Time
extAnnounceOK bool
extAnnounceOKmut sync.Mutex
}
var (
@@ -40,43 +42,56 @@ var (
// When we hit this many errors in succession, we stop.
const maxErrors = 30
func NewDiscoverer(id string, addresses []string, extServer string) (*Discoverer, error) {
func NewDiscoverer(id string, addresses []string) (*Discoverer, error) {
disc := &Discoverer{
MyID: id,
ListenAddresses: addresses,
BroadcastIntv: 30 * time.Second,
ExtBroadcastIntv: 1800 * time.Second,
beacon: mc.NewBeacon("239.21.0.25", 21025),
registry: make(map[string][]string),
extServer: extServer,
myID: id,
listenAddrs: addresses,
localBcastIntv: 30 * time.Second,
globalBcastIntv: 1800 * time.Second,
beacon: mc.NewBeacon("239.21.0.25", 21025),
registry: make(map[string][]string),
}
// Receive announcements sent to the local multicast group.
go disc.recvAnnouncements()
// If we got a list of addresses that we listen on, announce those
// locally.
if len(disc.ListenAddresses) > 0 {
disc.localBroadcastTick = time.Tick(disc.BroadcastIntv)
disc.forcedBroadcastTick = make(chan time.Time)
go disc.sendLocalAnnouncements()
// If we have an external server address, also announce to that
// server.
if len(disc.extServer) > 0 {
go disc.sendExternalAnnouncements()
}
}
return disc, nil
}
func (d *Discoverer) StartLocal() {
d.localBcastTick = time.Tick(d.localBcastIntv)
d.forcedBcastTick = make(chan time.Time)
go d.sendLocalAnnouncements()
}
func (d *Discoverer) StartGlobal(server string, extPort uint16) {
d.extServer = server
d.extPort = extPort
go d.sendExternalAnnouncements()
}
func (d *Discoverer) ExtAnnounceOK() bool {
d.extAnnounceOKmut.Lock()
defer d.extAnnounceOKmut.Unlock()
return d.extAnnounceOK
}
func (d *Discoverer) Lookup(node string) []string {
d.registryLock.Lock()
addr, ok := d.registry[node]
d.registryLock.Unlock()
if ok {
return addr
} else if len(d.extServer) != 0 {
// We might want to cache this, but not permanently so it needs some intelligence
return d.externalLookup(node)
}
return nil
}
func (d *Discoverer) announcementPkt() []byte {
var addrs []Address
for _, astr := range d.ListenAddresses {
for _, astr := range d.listenAddrs {
addr, err := net.ResolveTCPAddr("tcp", astr)
if err != nil {
log.Printf("discover/announcement: %v: not announcing %s", err, astr)
@@ -94,7 +109,7 @@ func (d *Discoverer) announcementPkt() []byte {
}
var pkt = AnnounceV2{
Magic: AnnouncementMagicV2,
NodeID: d.MyID,
NodeID: d.myID,
Addresses: addrs,
}
return pkt.MarshalXDR()
@@ -107,8 +122,8 @@ func (d *Discoverer) sendLocalAnnouncements() {
d.beacon.Send(buf)
select {
case <-d.localBroadcastTick:
case <-d.forcedBroadcastTick:
case <-d.localBcastTick:
case <-d.forcedBcastTick:
}
}
}
@@ -126,21 +141,54 @@ func (d *Discoverer) sendExternalAnnouncements() {
return
}
var buf = d.announcementPkt()
var buf []byte
if d.extPort != 0 {
var pkt = AnnounceV2{
Magic: AnnouncementMagicV2,
NodeID: d.myID,
Addresses: []Address{{Port: d.extPort}},
}
buf = pkt.MarshalXDR()
} else {
buf = d.announcementPkt()
}
var errCounter = 0
for errCounter < maxErrors {
var ok bool
if debug {
dlog.Println("send announcement -> ", remote)
dlog.Printf("send announcement -> %v\n%s", remote, hex.Dump(buf))
}
_, err = conn.WriteTo(buf, remote)
if err != nil {
log.Println("discover/write: warning:", err)
errCounter++
ok = false
} else {
errCounter = 0
// Verify that the announce server responds positively for our node ID
time.Sleep(1 * time.Second)
res := d.externalLookup(d.myID)
if debug {
dlog.Println("external lookup check:", res)
}
ok = len(res) > 0
}
d.extAnnounceOKmut.Lock()
d.extAnnounceOK = ok
d.extAnnounceOKmut.Unlock()
if ok {
time.Sleep(d.globalBcastIntv)
} else {
time.Sleep(60 * time.Second)
}
time.Sleep(d.ExtBroadcastIntv)
}
log.Printf("discover/write: %v: stopping due to too many errors: %v", remote, err)
}
@@ -163,12 +211,12 @@ func (d *Discoverer) recvAnnouncements() {
dlog.Printf("parsed announcement: %#v", pkt)
}
if pkt.NodeID != d.MyID {
if pkt.NodeID != d.myID {
var addrs []string
for _, a := range pkt.Addresses {
var nodeAddr string
if len(a.IP) > 0 {
nodeAddr = fmt.Sprintf("%s:%d", ipStr(a.IP), a.Port)
nodeAddr = fmt.Sprintf("%s:%d", net.IP(a.IP), a.Port)
} else {
ua := addr.(*net.UDPAddr)
ua.Port = int(a.Port)
@@ -183,7 +231,7 @@ func (d *Discoverer) recvAnnouncements() {
_, seen := d.registry[pkt.NodeID]
if !seen {
select {
case d.forcedBroadcastTick <- time.Now():
case d.forcedBcastTick <- time.Now():
}
}
d.registry[pkt.NodeID] = addrs
@@ -250,39 +298,8 @@ func (d *Discoverer) externalLookup(node string) []string {
var addrs []string
for _, a := range pkt.Addresses {
var nodeAddr string
if len(a.IP) > 0 {
nodeAddr = fmt.Sprintf("%s:%d", ipStr(a.IP), a.Port)
}
nodeAddr := fmt.Sprintf("%s:%d", net.IP(a.IP), a.Port)
addrs = append(addrs, nodeAddr)
}
return addrs
}
func (d *Discoverer) Lookup(node string) []string {
d.registryLock.Lock()
addr, ok := d.registry[node]
d.registryLock.Unlock()
if ok {
return addr
} else if len(d.extServer) != 0 {
// We might want to cache this, but not permanently so it needs some intelligence
return d.externalLookup(node)
}
return nil
}
func ipStr(ip []byte) string {
var f = "%d"
var s = "."
if len(ip) > 4 {
f = "%x"
s = ":"
}
var ss = make([]string, len(ip))
for i := range ip {
ss[i] = fmt.Sprintf(f, ip[i])
}
return strings.Join(ss, s)
}

View File

@@ -1,22 +1,5 @@
package discover
const (
AnnouncementMagicV1 = 0x20121025
QueryMagicV1 = 0x19760309
)
type QueryV1 struct {
Magic uint32
NodeID string // max:64
}
type AnnounceV1 struct {
Magic uint32
Port uint16
NodeID string // max:64
IP []byte // max:16
}
const (
AnnouncementMagicV2 = 0x029E4C77
QueryMagicV2 = 0x23D63A9A

View File

@@ -7,89 +7,6 @@ import (
"github.com/calmh/syncthing/xdr"
)
func (o QueryV1) EncodeXDR(w io.Writer) (int, error) {
var xw = xdr.NewWriter(w)
return o.encodeXDR(xw)
}
func (o QueryV1) MarshalXDR() []byte {
var buf bytes.Buffer
var xw = xdr.NewWriter(&buf)
o.encodeXDR(xw)
return buf.Bytes()
}
func (o QueryV1) encodeXDR(xw *xdr.Writer) (int, error) {
xw.WriteUint32(o.Magic)
if len(o.NodeID) > 64 {
return xw.Tot(), xdr.ErrElementSizeExceeded
}
xw.WriteString(o.NodeID)
return xw.Tot(), xw.Error()
}
func (o *QueryV1) DecodeXDR(r io.Reader) error {
xr := xdr.NewReader(r)
return o.decodeXDR(xr)
}
func (o *QueryV1) UnmarshalXDR(bs []byte) error {
var buf = bytes.NewBuffer(bs)
var xr = xdr.NewReader(buf)
return o.decodeXDR(xr)
}
func (o *QueryV1) decodeXDR(xr *xdr.Reader) error {
o.Magic = xr.ReadUint32()
o.NodeID = xr.ReadStringMax(64)
return xr.Error()
}
func (o AnnounceV1) EncodeXDR(w io.Writer) (int, error) {
var xw = xdr.NewWriter(w)
return o.encodeXDR(xw)
}
func (o AnnounceV1) MarshalXDR() []byte {
var buf bytes.Buffer
var xw = xdr.NewWriter(&buf)
o.encodeXDR(xw)
return buf.Bytes()
}
func (o AnnounceV1) encodeXDR(xw *xdr.Writer) (int, error) {
xw.WriteUint32(o.Magic)
xw.WriteUint16(o.Port)
if len(o.NodeID) > 64 {
return xw.Tot(), xdr.ErrElementSizeExceeded
}
xw.WriteString(o.NodeID)
if len(o.IP) > 16 {
return xw.Tot(), xdr.ErrElementSizeExceeded
}
xw.WriteBytes(o.IP)
return xw.Tot(), xw.Error()
}
func (o *AnnounceV1) DecodeXDR(r io.Reader) error {
xr := xdr.NewReader(r)
return o.decodeXDR(xr)
}
func (o *AnnounceV1) UnmarshalXDR(bs []byte) error {
var buf = bytes.NewBuffer(bs)
var xr = xdr.NewReader(buf)
return o.decodeXDR(xr)
}
func (o *AnnounceV1) decodeXDR(xr *xdr.Reader) error {
o.Magic = xr.ReadUint32()
o.Port = xr.ReadUint16()
o.NodeID = xr.ReadStringMax(64)
o.IP = xr.ReadBytesMax(16)
return xr.Error()
}
func (o QueryV2) EncodeXDR(w io.Writer) (int, error) {
var xw = xdr.NewWriter(w)
return o.encodeXDR(xw)

View File

@@ -108,8 +108,8 @@ func (m *Set) Need(id uint) []scanner.File {
if debug {
dlog.Printf("Need(%d)", id)
}
var fs = make([]scanner.File, 0, len(m.globalKey)/2) // Just a guess, but avoids too many reallocations
m.Lock()
var fs = make([]scanner.File, 0, len(m.globalKey)/2) // Just a guess, but avoids too many reallocations
rkID := m.remoteKey[id]
for gk, gf := range m.files {
if !gf.Global {
@@ -145,8 +145,8 @@ func (m *Set) Global() []scanner.File {
if debug {
dlog.Printf("Global()")
}
var fs = make([]scanner.File, 0, len(m.globalKey))
m.Lock()
var fs = make([]scanner.File, 0, len(m.globalKey))
for _, file := range m.files {
if file.Global {
fs = append(fs, file.File)

View File

@@ -4,10 +4,12 @@
'use strict';
var syncthing = angular.module('syncthing', []);
var urlbase = 'rest';
syncthing.controller('SyncthingCtrl', function ($scope, $http) {
var prevDate = 0,
getOK = true;
var prevDate = 0;
var getOK = true;
var restarting = false;
$scope.connections = {};
$scope.config = {};
@@ -31,6 +33,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
{id: 'GlobalAnnEnabled', descr: 'Global Announce', type: 'bool', restart: true},
{id: 'LocalAnnEnabled', descr: 'Local Announce', type: 'bool', restart: true},
{id: 'StartBrowser', descr: 'Start Browser', type: 'bool'},
{id: 'UPnPEnabled', descr: 'Enable UPnP', type: 'bool'},
];
$scope.guiSettings = [
@@ -44,9 +47,16 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
$('#networkError').modal('hide');
getOK = true;
}
if (restarting) {
$('#restarting').modal('hide');
restarting = false;
}
}
function getFailed() {
if (restarting) {
return;
}
if (getOK) {
$('#networkError').modal({backdrop: 'static', keyboard: false});
getOK = false;
@@ -54,11 +64,10 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
}
function nodeCompare(a, b) {
if (a.NodeID === $scope.myID) {
return -1;
}
if (b.NodeID === $scope.myID) {
return 1;
if (typeof a.Name !== 'undefined' && typeof b.Name !== 'undefined') {
if (a.Name < b.Name)
return -1;
return a.Name > b.Name;
}
if (a.NodeID < b.NodeID) {
return -1;
@@ -67,18 +76,18 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
}
$scope.refresh = function () {
$http.get('/rest/system').success(function (data) {
$http.get(urlbase + '/system').success(function (data) {
getSucceeded();
$scope.system = data;
}).error(function () {
getFailed();
});
$scope.repos.forEach(function (repo) {
$http.get('/rest/model/' + repo.ID).success(function (data) {
$http.get(urlbase + '/model?repo=' + encodeURIComponent(repo.ID)).success(function (data) {
$scope.model[repo.ID] = data;
});
});
$http.get('/rest/connections').success(function (data) {
$http.get(urlbase + '/connections').success(function (data) {
var now = Date.now(),
td = (now - prevDate) / 1000,
id;
@@ -103,7 +112,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
}
$scope.connections = data;
});
$http.get('/rest/errors').success(function (data) {
$http.get(urlbase + '/errors').success(function (data) {
$scope.errors = data;
});
};
@@ -113,6 +122,10 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
return 'Unknown';
}
if ($scope.model[repo].invalid !== '') {
return 'Stopped';
}
var state = '' + $scope.model[repo].state;
state = state[0].toUpperCase() + state.substr(1);
@@ -128,6 +141,10 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
return 'text-info';
}
if ($scope.model[repo].invalid !== '') {
return 'text-warning';
}
var state = '' + $scope.model[repo].state;
if (state == 'idle') {
return 'text-success';
@@ -230,18 +247,21 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
$scope.saveSettings = function () {
$scope.configInSync = false;
$scope.config.Options.ListenAddress = $scope.config.Options.ListenStr.split(',').map(function (x) { return x.trim(); });
$http.post('/rest/config', JSON.stringify($scope.config), {headers: {'Content-Type': 'application/json'}});
$http.post(urlbase + '/config', JSON.stringify($scope.config), {headers: {'Content-Type': 'application/json'}});
$('#settings').modal("hide");
};
$scope.restart = function () {
$http.post('/rest/restart');
restarting = true;
$('#restarting').modal('show');
$http.post(urlbase + '/restart');
$scope.configInSync = true;
};
$scope.editNode = function (nodeCfg) {
$scope.currentNode = $.extend({}, nodeCfg);
$scope.editingExisting = true;
$scope.editingSelf = (nodeCfg.NodeID == $scope.myID);
$scope.currentNode.AddressesStr = nodeCfg.Addresses.join(', ');
$('#editNode').modal({backdrop: 'static', keyboard: true});
};
@@ -249,6 +269,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
$scope.addNode = function () {
$scope.currentNode = {AddressesStr: 'dynamic'};
$scope.editingExisting = false;
$scope.editingSelf = false;
$('#editNode').modal({backdrop: 'static', keyboard: true});
};
@@ -270,7 +291,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
}
$scope.configInSync = false;
$http.post('/rest/config', JSON.stringify($scope.config), {headers: {'Content-Type': 'application/json'}});
$http.post(urlbase + '/config', JSON.stringify($scope.config), {headers: {'Content-Type': 'application/json'}});
};
$scope.saveNode = function () {
@@ -297,7 +318,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
$scope.nodes.sort(nodeCompare);
$scope.config.Nodes = $scope.nodes;
$http.post('/rest/config', JSON.stringify($scope.config), {headers: {'Content-Type': 'application/json'}});
$http.post(urlbase + '/config', JSON.stringify($scope.config), {headers: {'Content-Type': 'application/json'}});
};
$scope.otherNodes = function () {
@@ -325,6 +346,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
$scope.clearErrors = function () {
$scope.seenError = $scope.errors[$scope.errors.length - 1].Time;
$http.post(urlbase + '/error/clear');
};
$scope.friendlyNodes = function (str) {
@@ -380,7 +402,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
$scope.config.Repositories = $scope.repos;
$http.post('/rest/config', JSON.stringify($scope.config), {headers: {'Content-Type': 'application/json'}});
$http.post(urlbase + '/config', JSON.stringify($scope.config), {headers: {'Content-Type': 'application/json'}});
};
$scope.deleteRepo = function () {
@@ -396,19 +418,19 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
$scope.config.Repositories = $scope.repos;
$scope.configInSync = false;
$http.post('/rest/config', JSON.stringify($scope.config), {headers: {'Content-Type': 'application/json'}});
$http.post(urlbase + '/config', JSON.stringify($scope.config), {headers: {'Content-Type': 'application/json'}});
};
$http.get('/rest/version').success(function (data) {
$http.get(urlbase + '/version').success(function (data) {
$scope.version = data;
});
$http.get('/rest/system').success(function (data) {
$http.get(urlbase + '/system').success(function (data) {
$scope.system = data;
$scope.myID = data.myID;
});
$http.get('/rest/config').success(function (data) {
$http.get(urlbase + '/config').success(function (data) {
$scope.config = data;
$scope.config.Options.ListenStr = $scope.config.Options.ListenAddress.join(', ');
@@ -421,7 +443,7 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
$scope.refresh();
});
$http.get('/rest/config/sync').success(function (data) {
$http.get(urlbase + '/config/sync').success(function (data) {
$scope.configInSync = data.configInSync;
});

View File

File diff suppressed because one or more lines are too long

View File

@@ -84,10 +84,14 @@
text-align: right;
display: inline-block;
}
.ng-cloak {
display: none !important;
}
</style>
</head>
<body ng-controller="SyncthingCtrl">
<body ng-controller="SyncthingCtrl" class="ng-cloak">
<!-- Top bar -->
@@ -110,7 +114,7 @@
<p>The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.</p>
</div>
<div class="panel-footer">
<button type="button" class="btn btn-sm btn-default pull-right" ng-click="restart()"><span class="glyphicon glyphicon-off"></span> Restart Now</button>
<button type="button" class="btn btn-sm btn-default pull-right" ng-click="restart()"><span class="glyphicon glyphicon-refresh"></span> Restart Now</button>
<div class="clearfix"></div>
</div>
</div>
@@ -130,29 +134,30 @@
<ul class="list-unstyled" ng-repeat="repo in repos">
<li>
<span class="text-monospace">{{repo.Directory}}</span>
<span ng-if="repo.Invalid" class="label label-danger">Invalid: {{repo.Invalid}}</span>
<ul class="list-no-bullet">
<li>
<div class="li-column">
<div class="li-column" title="Repository ID">
<span class="text-muted glyphicon glyphicon-tag"></span>
<span class="data">{{repo.ID}}</span>
</div>
<div class="li-column">
<div class="li-column" title="Repository synchronization status">
<span class="text-muted glyphicon glyphicon-comment"></span>
<span class="data" ng-class="repoClass(repo.ID)">{{repoStatus(repo.ID)}}</span>
</div>
</li>
<li>
<div class="li-column">
<div class="li-column" title="Global repository files">
<span class="text-muted glyphicon glyphicon-globe"></span>
<span class="data">{{model[repo.ID].globalFiles | alwaysNumber}} files, {{model[repo.ID].globalBytes | binary}}B</span>
</div>
<div class="li-column">
<div class="li-column" title="Local repository files">
<span class="text-muted glyphicon glyphicon-home"></span>
<span class="data">{{model[repo.ID].localFiles | alwaysNumber}} files, {{model[repo.ID].localBytes | binary}}B</span>
</div>
</li>
<li>
<div class="li-column">
<div class="li-column" title="Unsynchronized files">
<span class="text-muted glyphicon glyphicon-cloud-download"></span>
<span class="data">{{model[repo.ID].needFiles | alwaysNumber}} files, {{model[repo.ID].needBytes | binary}}B</span>
</div>
@@ -185,27 +190,27 @@
<span class="text-monospace">{{nodeName(nodeCfg)}}</span>
<ul class="list-no-bullet">
<li>
<div class="li-column">
<div class="li-column" title="Node address">
<span class="text-muted glyphicon glyphicon-link"></span>
<span class="data">{{nodeAddr(nodeCfg)}}</span>
</div>
<div class="li-column">
<div class="li-column" title="Node synchronization status">
<span class="text-muted glyphicon glyphicon-comment"></span>
<span class="data text-{{nodeClass(nodeCfg)}}">{{nodeStatus(nodeCfg)}}</span>
</div>
</li>
<li>
<div class="li-column">
<div class="li-column" title="Download rate">
<span class="text-muted glyphicon glyphicon-cloud-download"></span>
<span class="data">{{connections[nodeCfg.NodeID].inbps | metric}}bps</span>
</div>
<div class="li-column">
<div class="li-column" title="Upload rate">
<span class="text-muted glyphicon glyphicon-cloud-upload"></span>
<span class="data">{{connections[nodeCfg.NodeID].outbps | metric}}bps</span>
</div>
</li>
<li>
<div class="li-column">
<div class="li-column" title="Node version">
<span class="text-muted glyphicon glyphicon-tag"></span>
<span class="data">{{nodeVer(nodeCfg)}}</span>
</div>
@@ -224,26 +229,31 @@
<span class="text-monospace">{{nodeName(nodeCfg)}}</span>&emsp;
<ul class="list-no-bullet">
<li>
<div class="li-column">
<div class="li-column" title="Current RAM utilization">
<span class="text-muted glyphicon glyphicon-th"></span>
<span class="data">{{system.sys | binary}}B RAM</span>
</div>
<div class="li-column">
<div class="li-column" title="Current CPU utilization (10 s)">
<span class="text-muted glyphicon glyphicon-tasks"></span>
<span class="data">{{system.cpuPercent | alwaysNumber | natural:1}}% CPU</span>
</div>
</li>
<li>
<div class="li-column">
<div class="li-column" title="Download rate (total)">
<span class="text-muted glyphicon glyphicon-cloud-download"></span>
<span class="data">{{inbps | metric}}bps</span>
</div>
<div class="li-column">
<div class="li-column" title="Upload rate (total)">
<span class="text-muted glyphicon glyphicon-cloud-upload"></span>
<span class="data">{{outbps | metric}}bps</span>
</div>
</li>
<li>
<div ng-if="system.extAnnounceOK != undefined" class="li-column" title="Global announce server">
<span class="text-muted glyphicon glyphicon-bullhorn"></span>
<span class="data text-success" ng-if="system.extAnnounceOK">Online</span>
<span class="data text-danger" ng-if="!system.extAnnounceOK">Offline</span>
</div>
<div class="li-column">
<span class="text-muted glyphicon glyphicon-cog"></span>
<span class="data"><a href="" ng-click="editNode(nodeCfg)"><span class="glyphicon glyphicon-pencil"></span> Edit</a></span>
@@ -269,7 +279,7 @@
<div class="panel panel-warning">
<div class="panel-heading"><h3 class="panel-title">Notice</h3></div>
<div class="panel-body">
<p ng-repeat="err in errorList()"><small>{{err.Time | date:"hh:mm:ss.sss"}}:</small> {{friendlyNodes(err.Error)}}</p>
<p ng-repeat="err in errorList()"><small>{{err.Time | date:"H:mm:ss"}}:</small> {{friendlyNodes(err.Error)}}</p>
</div>
<div class="panel-footer">
<button type="button" class="pull-right btn btn-sm btn-default" ng-click="clearErrors()">OK</button>
@@ -317,6 +327,26 @@
</div>
</div>
<!-- Restarting modal -->
<div id="restarting" class="modal fade">
<div class="modal-dialog">
<div class="modal-content">
<div class="modal-header alert alert-info">
<h4 class="modal-title">
<span class="glyphicon glyphicon-refresh"></span>
Restarting
</h4>
</div>
<div class="modal-body">
<p>
Syncthing is restarting. Please hold&hellip;
</p>
</div>
</div>
</div>
</div>
<!-- Node editor modal -->
<div id="editNode" class="modal fade">
@@ -331,8 +361,9 @@
<form role="form">
<div class="form-group">
<label for="nodeID">Node ID</label>
<input ng-disabled="editingExisting" id="nodeID" class="form-control" type="text" ng-model="currentNode.NodeID"></input>
<p class="help-block">The node ID can be found in the logs or in the "Add Node" dialog on the other node.</p>
<input ng-if="!editingExisting" id="nodeID" class="form-control" type="text" ng-model="currentNode.NodeID"></input>
<div ng-if="editingExisting" class="well well-sm">{{currentNode.NodeID}}</div>
<p class="help-block">The node ID can be found in the "Add Node" dialog on the other node.</p>
</div>
<div class="form-group">
<label for="name">Name</label>
@@ -353,7 +384,7 @@
<div class="modal-footer">
<button type="button" class="btn btn-primary" ng-click="saveNode()">Save</button>
<button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
<button ng-if="editingExisting" type="button" class="btn btn-danger pull-left" ng-click="deleteNode()">Delete</button>
<button ng-if="editingExisting && !editingSelf" type="button" class="btn btn-danger pull-left" ng-click="deleteNode()">Delete</button>
</div>
</div>
</div>

View File

@@ -1,3 +1,5 @@
// +build ignore
package main
import (

View File

@@ -1,3 +1,5 @@
// +build ignore
package main
import (

View File

@@ -1,3 +1,5 @@
// +build ignore
package main
import (

View File

@@ -45,6 +45,9 @@ func (b *Beacon) run() {
if err != nil {
log.Fatal(err)
}
if debug {
dlog.Printf("trying %d interfaces", len(intfs))
}
for _, intf := range intfs {
intf := intf
@@ -55,10 +58,13 @@ func (b *Beacon) run() {
conn, err := net.ListenMulticastUDP("udp4", &intf, group)
if err != nil {
if debug {
dlog.Printf("listen for multicast group on %q: %v", intf.Name, err)
dlog.Printf("failed to listen for multicast group on %q: %v", intf.Name, err)
}
} else {
b.conns = append(b.conns, conn)
if debug {
dlog.Printf("listening for multicast group on %q", intf.Name)
}
}
}
@@ -72,6 +78,9 @@ func (b *Beacon) run() {
dlog.Println(err)
return
}
if debug {
dlog.Printf("recv %d bytes from %s on %v", n, addr, conn)
}
b.outbox <- recv{bs[:n], addr}
}
}()
@@ -85,6 +94,9 @@ func (b *Beacon) run() {
dlog.Println(err)
return
}
if debug {
dlog.Printf("sent %d bytes to %s on %v", len(bs), group, conn)
}
}
}
}()

36
protocol/counting.go Normal file
View File

@@ -0,0 +1,36 @@
package protocol
import (
"io"
"sync/atomic"
)
type countingReader struct {
io.Reader
tot uint64
}
func (c *countingReader) Read(bs []byte) (int, error) {
n, err := c.Reader.Read(bs)
atomic.AddUint64(&c.tot, uint64(n))
return n, err
}
func (c *countingReader) Tot() uint64 {
return atomic.LoadUint64(&c.tot)
}
type countingWriter struct {
io.Writer
tot uint64
}
func (c *countingWriter) Write(bs []byte) (int, error) {
n, err := c.Writer.Write(bs)
atomic.AddUint64(&c.tot, uint64(n))
return n, err
}
func (c *countingWriter) Tot() uint64 {
return atomic.LoadUint64(&c.tot)
}

View File

@@ -64,22 +64,26 @@ type Connection interface {
}
type rawConnection struct {
sync.RWMutex
id string
receiver Model
reader io.ReadCloser
cr *countingReader
xr *xdr.Reader
writer io.WriteCloser
cw *countingWriter
wb *bufio.Writer
xw *xdr.Writer
wmut sync.Mutex
close chan error
closed chan struct{}
id string
receiver Model
reader io.ReadCloser
xr *xdr.Reader
writer io.WriteCloser
wb *bufio.Writer
xw *xdr.Writer
closed chan struct{}
awaiting map[int]chan asyncResult
nextID int
indexSent map[string]map[string][2]int64
hasSentIndex bool
hasRecvdIndex bool
imut sync.Mutex
}
type asyncResult struct {
@@ -93,8 +97,11 @@ const (
)
func NewConnection(nodeID string, reader io.Reader, writer io.Writer, receiver Model) Connection {
flrd := flate.NewReader(reader)
flwr, err := flate.NewWriter(writer, flate.BestSpeed)
cr := &countingReader{Reader: reader}
cw := &countingWriter{Writer: writer}
flrd := flate.NewReader(cr)
flwr, err := flate.NewWriter(cw, flate.BestSpeed)
if err != nil {
panic(err)
}
@@ -104,15 +111,19 @@ func NewConnection(nodeID string, reader io.Reader, writer io.Writer, receiver M
id: nodeID,
receiver: nativeModel{receiver},
reader: flrd,
cr: cr,
xr: xdr.NewReader(flrd),
writer: flwr,
cw: cw,
wb: wb,
xw: xdr.NewWriter(wb),
close: make(chan error),
closed: make(chan struct{}),
awaiting: make(map[int]chan asyncResult),
indexSent: make(map[string]map[string][2]int64),
}
go c.closer()
go c.readerLoop()
go c.pingerLoop()
@@ -125,11 +136,11 @@ func (c *rawConnection) ID() string {
// Index writes the list of file information to the connected peer node
func (c *rawConnection) Index(repo string, idx []FileInfo) {
c.Lock()
if c.isClosed() {
c.Unlock()
return
}
c.imut.Lock()
var msgType int
if c.indexSent[repo] == nil {
// This is the first time we send an index.
@@ -152,45 +163,48 @@ func (c *rawConnection) Index(repo string, idx []FileInfo) {
idx = diff
}
header{0, c.nextID, msgType}.encodeXDR(c.xw)
_, err := IndexMessage{repo, idx}.encodeXDR(c.xw)
if err == nil {
err = c.flush()
}
id := c.nextID
c.nextID = (c.nextID + 1) & 0xfff
c.hasSentIndex = true
c.Unlock()
c.imut.Unlock()
c.wmut.Lock()
header{0, id, msgType}.encodeXDR(c.xw)
IndexMessage{repo, idx}.encodeXDR(c.xw)
err := c.flush()
c.wmut.Unlock()
if err != nil {
c.close(err)
c.close <- err
return
}
}
// Request returns the bytes for the specified block after fetching them from the connected peer.
func (c *rawConnection) Request(repo string, name string, offset int64, size int) ([]byte, error) {
c.Lock()
if c.isClosed() {
c.Unlock()
return nil, ErrClosed
}
c.imut.Lock()
id := c.nextID
c.nextID = (c.nextID + 1) & 0xfff
rc := make(chan asyncResult)
if _, ok := c.awaiting[c.nextID]; ok {
if _, ok := c.awaiting[id]; ok {
panic("id taken")
}
c.awaiting[c.nextID] = rc
header{0, c.nextID, messageTypeRequest}.encodeXDR(c.xw)
_, err := RequestMessage{repo, name, uint64(offset), uint32(size)}.encodeXDR(c.xw)
if err == nil {
err = c.flush()
}
c.awaiting[id] = rc
c.imut.Unlock()
c.wmut.Lock()
header{0, id, messageTypeRequest}.encodeXDR(c.xw)
RequestMessage{repo, name, uint64(offset), uint32(size)}.encodeXDR(c.xw)
err := c.flush()
c.wmut.Unlock()
if err != nil {
c.Unlock()
c.close(err)
c.close <- err
return nil, err
}
c.nextID = (c.nextID + 1) & 0xfff
c.Unlock()
res, ok := <-rc
if !ok {
@@ -201,46 +215,47 @@ func (c *rawConnection) Request(repo string, name string, offset int64, size int
// ClusterConfig send the cluster configuration message to the peer and returns any error
func (c *rawConnection) ClusterConfig(config ClusterConfigMessage) {
c.Lock()
defer c.Unlock()
if c.isClosed() {
return
}
header{0, c.nextID, messageTypeClusterConfig}.encodeXDR(c.xw)
c.imut.Lock()
id := c.nextID
c.nextID = (c.nextID + 1) & 0xfff
c.imut.Unlock()
c.wmut.Lock()
header{0, id, messageTypeClusterConfig}.encodeXDR(c.xw)
config.encodeXDR(c.xw)
err := c.flush()
c.wmut.Unlock()
_, err := config.encodeXDR(c.xw)
if err == nil {
err = c.flush()
}
if err != nil {
c.close(err)
c.close <- err
}
}
func (c *rawConnection) ping() bool {
c.Lock()
if c.isClosed() {
c.Unlock()
return false
}
rc := make(chan asyncResult, 1)
c.awaiting[c.nextID] = rc
header{0, c.nextID, messageTypePing}.encodeXDR(c.xw)
err := c.flush()
if err != nil {
c.Unlock()
c.close(err)
return false
} else if c.xw.Error() != nil {
c.Unlock()
c.close(c.xw.Error())
return false
}
c.imut.Lock()
id := c.nextID
c.nextID = (c.nextID + 1) & 0xfff
c.Unlock()
rc := make(chan asyncResult, 1)
c.awaiting[id] = rc
c.imut.Unlock()
c.wmut.Lock()
header{0, id, messageTypePing}.encodeXDR(c.xw)
err := c.flush()
c.wmut.Unlock()
if err != nil {
c.close <- err
return false
}
res, ok := <-rc
return ok && res.err == nil
@@ -251,21 +266,24 @@ type flusher interface {
}
func (c *rawConnection) flush() error {
c.wb.Flush()
if err := c.xw.Error(); err != nil {
return err
}
if err := c.wb.Flush(); err != nil {
return err
}
if f, ok := c.writer.(flusher); ok {
return f.Flush()
}
return nil
}
func (c *rawConnection) close(err error) {
c.Lock()
select {
case <-c.closed:
c.Unlock()
return
default:
}
func (c *rawConnection) closer() {
err := <-c.close
close(c.closed)
for _, ch := range c.awaiting {
close(ch)
@@ -273,7 +291,6 @@ func (c *rawConnection) close(err error) {
c.awaiting = nil
c.writer.Close()
c.reader.Close()
c.Unlock()
c.receiver.Close(c.id, err)
}
@@ -292,12 +309,12 @@ loop:
for !c.isClosed() {
var hdr header
hdr.decodeXDR(c.xr)
if c.xr.Error() != nil {
c.close(c.xr.Error())
if err := c.xr.Error(); err != nil {
c.close <- err
break loop
}
if hdr.version != 0 {
c.close(fmt.Errorf("protocol error: %s: unknown message version %#x", c.id, hdr.version))
c.close <- fmt.Errorf("protocol error: %s: unknown message version %#x", c.id, hdr.version)
break loop
}
@@ -305,8 +322,8 @@ loop:
case messageTypeIndex:
var im IndexMessage
im.decodeXDR(c.xr)
if c.xr.Error() != nil {
c.close(c.xr.Error())
if err := c.xr.Error(); err != nil {
c.close <- err
break loop
} else {
@@ -319,15 +336,12 @@ loop:
go c.receiver.Index(c.id, im.Repository, im.Files)
}
c.Lock()
c.hasRecvdIndex = true
c.Unlock()
case messageTypeIndexUpdate:
var im IndexMessage
im.decodeXDR(c.xr)
if c.xr.Error() != nil {
c.close(c.xr.Error())
if err := c.xr.Error(); err != nil {
c.close <- err
break loop
} else {
go c.receiver.IndexUpdate(c.id, im.Repository, im.Files)
@@ -336,8 +350,8 @@ loop:
case messageTypeRequest:
var req RequestMessage
req.decodeXDR(c.xr)
if c.xr.Error() != nil {
c.close(c.xr.Error())
if err := c.xr.Error(); err != nil {
c.close <- err
break loop
}
go c.processRequest(hdr.msgID, req)
@@ -345,16 +359,16 @@ loop:
case messageTypeResponse:
data := c.xr.ReadBytesMax(256 * 1024) // Sufficiently larger than max expected block size
if c.xr.Error() != nil {
c.close(c.xr.Error())
if err := c.xr.Error(); err != nil {
c.close <- err
break loop
}
go func(hdr header, err error) {
c.Lock()
c.imut.Lock()
rc, ok := c.awaiting[hdr.msgID]
delete(c.awaiting, hdr.msgID)
c.Unlock()
c.imut.Unlock()
if ok {
rc <- asyncResult{data, err}
@@ -363,44 +377,41 @@ loop:
}(hdr, c.xr.Error())
case messageTypePing:
c.Lock()
c.wmut.Lock()
header{0, hdr.msgID, messageTypePong}.encodeXDR(c.xw)
err := c.flush()
c.Unlock()
c.wmut.Unlock()
if err != nil {
c.close(err)
break loop
} else if c.xw.Error() != nil {
c.close(c.xw.Error())
c.close <- err
break loop
}
case messageTypePong:
c.RLock()
c.imut.Lock()
rc, ok := c.awaiting[hdr.msgID]
c.RUnlock()
if ok {
rc <- asyncResult{}
close(rc)
go func() {
rc <- asyncResult{}
close(rc)
}()
c.Lock()
delete(c.awaiting, hdr.msgID)
c.Unlock()
}
c.imut.Unlock()
case messageTypeClusterConfig:
var cm ClusterConfigMessage
cm.decodeXDR(c.xr)
if c.xr.Error() != nil {
c.close(c.xr.Error())
if err := c.xr.Error(); err != nil {
c.close <- err
break loop
} else {
go c.receiver.ClusterConfig(c.id, cm)
}
default:
c.close(fmt.Errorf("protocol error: %s: unknown message type %#x", c.id, hdr.msgType))
c.close <- fmt.Errorf("protocol error: %s: unknown message type %#x", c.id, hdr.msgType)
break loop
}
}
@@ -409,17 +420,16 @@ loop:
func (c *rawConnection) processRequest(msgID int, req RequestMessage) {
data, _ := c.receiver.Request(c.id, req.Repository, req.Name, int64(req.Offset), int(req.Size))
c.Lock()
c.wmut.Lock()
header{0, msgID, messageTypeResponse}.encodeXDR(c.xw)
_, err := c.xw.WriteBytes(data)
if err == nil {
err = c.flush()
}
c.Unlock()
c.xw.WriteBytes(data)
err := c.flush()
c.wmut.Unlock()
buffers.Put(data)
if err != nil {
c.close(err)
c.close <- err
}
}
@@ -429,22 +439,16 @@ func (c *rawConnection) pingerLoop() {
for {
select {
case <-ticker:
c.RLock()
ready := c.hasRecvdIndex && c.hasSentIndex
c.RUnlock()
if ready {
go func() {
rc <- c.ping()
}()
select {
case ok := <-rc:
if !ok {
c.close(fmt.Errorf("ping failure"))
}
case <-time.After(pingTimeout):
c.close(fmt.Errorf("ping timeout"))
go func() {
rc <- c.ping()
}()
select {
case ok := <-rc:
if !ok {
c.close <- fmt.Errorf("ping failure")
}
case <-time.After(pingTimeout):
c.close <- fmt.Errorf("ping timeout")
}
case <-c.closed:
return
@@ -461,7 +465,7 @@ type Statistics struct {
func (c *rawConnection) Statistics() Statistics {
return Statistics{
At: time.Now(),
InBytesTotal: int(c.xr.Tot()),
OutBytesTotal: int(c.xw.Tot()),
InBytesTotal: int(c.cr.Tot()),
OutBytesTotal: int(c.cw.Tot()),
}
}

View File

@@ -5,6 +5,7 @@ import (
"io"
"testing"
"testing/quick"
"time"
)
func TestHeaderFunctions(t *testing.T) {
@@ -172,7 +173,13 @@ func TestClose(t *testing.T) {
c0 := NewConnection("c0", ar, bw, m0).(wireFormatConnection).next.(*rawConnection)
NewConnection("c1", br, aw, m1)
c0.close(nil)
c0.close <- nil
select {
case <-c0.closed:
case <-time.After(1 * time.Second):
t.Fatal("Did not close within a second")
}
if !c0.isClosed() {
t.Fatal("Connection should be closed")

12
upnp/debug.go Normal file
View File

@@ -0,0 +1,12 @@
package upnp
import (
"log"
"os"
"strings"
)
var (
dlog = log.New(os.Stderr, "upnp: ", log.Lmicroseconds|log.Lshortfile)
debug = strings.Contains(os.Getenv("STTRACE"), "upnp")
)

278
upnp/upnp.go Normal file
View File

@@ -0,0 +1,278 @@
// Package upnp implements UPnP Internet Gateway upnpDevice port mappings
package upnp
// Adapted from https://github.com/jackpal/Taipei-Torrent/blob/dd88a8bfac6431c01d959ce3c745e74b8a911793/IGD.go
// Copyright (c) 2010 Jack Palevich (https://github.com/jackpal/Taipei-Torrent/blob/dd88a8bfac6431c01d959ce3c745e74b8a911793/LICENSE)
// Copyright (c) 2014 Jakob Borg
import (
"bufio"
"bytes"
"encoding/xml"
"errors"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"strings"
"time"
)
type IGD struct {
serviceURL string
ourIP string
}
type Protocol string
const (
TCP Protocol = "TCP"
UDP = "UDP"
)
type upnpService struct {
ServiceType string `xml:"serviceType"`
ControlURL string `xml:"controlURL"`
}
type upnpDevice struct {
DeviceType string `xml:"deviceType"`
Devices []upnpDevice `xml:"deviceList>device"`
Services []upnpService `xml:"serviceList>service"`
}
type upnpRoot struct {
Device upnpDevice `xml:"device"`
}
func Discover() (*IGD, error) {
ssdp := &net.UDPAddr{IP: []byte{239, 255, 255, 250}, Port: 1900}
socket, err := net.ListenUDP("udp4", &net.UDPAddr{})
if err != nil {
return nil, err
}
defer socket.Close()
err = socket.SetDeadline(time.Now().Add(3 * time.Second))
if err != nil {
return nil, err
}
search := []byte(`
M-SEARCH * HTTP/1.1
Host: 239.255.255.250:1900
St: urn:schemas-upnp-org:device:InternetGatewayDevice:1
Man: "ssdp:discover"
Mx: 3
`)
_, err = socket.WriteTo(search, ssdp)
if err != nil {
return nil, err
}
resp := make([]byte, 1500)
n, _, err := socket.ReadFrom(resp)
if err != nil {
return nil, err
}
if debug {
dlog.Println(string(resp[:n]))
}
reader := bufio.NewReader(bytes.NewBuffer(resp[:n]))
request := &http.Request{}
response, err := http.ReadResponse(reader, request)
if response.Header.Get("St") != "urn:schemas-upnp-org:device:InternetGatewayDevice:1" {
return nil, errors.New("no igd")
}
locURL := response.Header.Get("Location")
if locURL == "" {
return nil, errors.New("no location")
}
serviceURL, err := getServiceURL(locURL)
if err != nil {
return nil, err
}
// Figure out our IP number, on the network used to reach the IGD. We
// do this in a fairly roundabout way by connecting to the IGD and
// checking the address of the local end of the socket. I'm open to
// suggestions on a better way to do this...
ourIP, err := localIP(locURL)
if err != nil {
return nil, err
}
igd := &IGD{
serviceURL: serviceURL,
ourIP: ourIP,
}
return igd, nil
}
func localIP(tgt string) (string, error) {
url, err := url.Parse(tgt)
if err != nil {
return "", err
}
conn, err := net.Dial("tcp", url.Host)
if err != nil {
return "", err
}
defer conn.Close()
ourIP, _, err := net.SplitHostPort(conn.LocalAddr().String())
if err != nil {
return "", err
}
return ourIP, nil
}
func getChildDevice(d upnpDevice, deviceType string) (upnpDevice, bool) {
for _, dev := range d.Devices {
if dev.DeviceType == deviceType {
return dev, true
}
}
return upnpDevice{}, false
}
func getChildService(d upnpDevice, serviceType string) (upnpService, bool) {
for _, svc := range d.Services {
if svc.ServiceType == serviceType {
return svc, true
}
}
return upnpService{}, false
}
func getServiceURL(rootURL string) (string, error) {
r, err := http.Get(rootURL)
if err != nil {
return "", err
}
defer r.Body.Close()
if r.StatusCode >= 400 {
return "", errors.New(r.Status)
}
var upnpRoot upnpRoot
err = xml.NewDecoder(r.Body).Decode(&upnpRoot)
if err != nil {
return "", err
}
dev := upnpRoot.Device
if dev.DeviceType != "urn:schemas-upnp-org:device:InternetGatewayDevice:1" {
return "", errors.New("No InternetGatewayDevice")
}
dev, ok := getChildDevice(dev, "urn:schemas-upnp-org:device:WANDevice:1")
if !ok {
return "", errors.New("No WANDevice")
}
dev, ok = getChildDevice(dev, "urn:schemas-upnp-org:device:WANConnectionDevice:1")
if !ok {
return "", errors.New("No WANConnectionDevice")
}
svc, ok := getChildService(dev, "urn:schemas-upnp-org:service:WANIPConnection:1")
if !ok {
return "", errors.New("No WANIPConnection")
}
if len(svc.ControlURL) == 0 {
return "", errors.New("no controlURL")
}
u, _ := url.Parse(rootURL)
if svc.ControlURL[0] == '/' {
u.Path = svc.ControlURL
} else {
u.Path += svc.ControlURL
}
return u.String(), nil
}
func soapRequest(url, function, message string) error {
tpl := `<?xml version="1.0" ?>
<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
<s:Body>%s</s:Body>
</s:Envelope>
`
body := fmt.Sprintf(tpl, message)
req, err := http.NewRequest("POST", url, strings.NewReader(body))
if err != nil {
return err
}
req.Header.Set("Content-Type", `text/xml; charset="utf-8"`)
req.Header.Set("User-Agent", "syncthing/1.0")
req.Header.Set("SOAPAction", `"urn:schemas-upnp-org:service:WANIPConnection:1#`+function+`"`)
req.Header.Set("Connection", "Close")
req.Header.Set("Cache-Control", "no-cache")
req.Header.Set("Pragma", "no-cache")
if debug {
dlog.Println(req.Header.Get("SOAPAction"))
dlog.Println(body)
}
r, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
if debug {
resp, _ := ioutil.ReadAll(r.Body)
dlog.Println(string(resp))
}
r.Body.Close()
if r.StatusCode >= 400 {
return errors.New(function + ": " + r.Status)
}
return nil
}
func (n *IGD) AddPortMapping(protocol Protocol, externalPort, internalPort int, description string, timeout int) error {
tpl := `<u:AddPortMapping xmlns:u="urn:schemas-upnp-org:service:WANIPConnection:1">
<NewRemoteHost></NewRemoteHost>
<NewExternalPort>%d</NewExternalPort>
<NewProtocol>%s</NewProtocol>
<NewInternalPort>%d</NewInternalPort>
<NewInternalClient>%s</NewInternalClient>
<NewEnabled>1</NewEnabled>
<NewPortMappingDescription>%s</NewPortMappingDescription>
<NewLeaseDuration>%d</NewLeaseDuration>
</u:AddPortMapping>
`
body := fmt.Sprintf(tpl, externalPort, protocol, internalPort, n.ourIP, description, timeout)
return soapRequest(n.serviceURL, "AddPortMapping", body)
}
func (n *IGD) DeletePortMapping(protocol Protocol, externalPort int) (err error) {
tpl := `<u:DeletePortMapping xmlns:u="urn:schemas-upnp-org:service:WANIPConnection:1">
<NewRemoteHost></NewRemoteHost>
<NewExternalPort>%d</NewExternalPort>
<NewProtocol>%s</NewProtocol>
</u:DeletePortMapping>
`
body := fmt.Sprintf(tpl, externalPort, protocol)
return soapRequest(n.serviceURL, "DeletePortMapping", body)
}