mirror of
https://github.com/syncthing/syncthing.git
synced 2026-01-14 16:59:13 -05:00
Compare commits
60 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fe9c2b9857 | ||
|
|
2a2177e7fa | ||
|
|
d1d565e58b | ||
|
|
891ff383ec | ||
|
|
d322ebd0b9 | ||
|
|
50190236bb | ||
|
|
d5a0f91cb4 | ||
|
|
467c1b26fb | ||
|
|
3cabecda04 | ||
|
|
6d3160b0ab | ||
|
|
d328e0fb75 | ||
|
|
5f01afb7ea | ||
|
|
6fe2fa5ff0 | ||
|
|
b371b1fe34 | ||
|
|
90c0a39df8 | ||
|
|
70c5a5dff1 | ||
|
|
da0b7cc7f2 | ||
|
|
139e9b144e | ||
|
|
77c0a19451 | ||
|
|
58cbd19742 | ||
|
|
9bf6917ae8 | ||
|
|
897cca0a82 | ||
|
|
6af09c61be | ||
|
|
c3c7798446 | ||
|
|
06dc91fadf | ||
|
|
526cab538a | ||
|
|
81d19a00aa | ||
|
|
ca755ec9e0 | ||
|
|
4f6206cb2d | ||
|
|
7fb53ec954 | ||
|
|
d8b5070ca8 | ||
|
|
5e99d38412 | ||
|
|
3990014073 | ||
|
|
3e51206a6b | ||
|
|
7569b75d61 | ||
|
|
8fcabac518 | ||
|
|
abb0cfde72 | ||
|
|
7990ffcc60 | ||
|
|
49910a1d85 | ||
|
|
46a143e80e | ||
|
|
69b7f26e4c | ||
|
|
5b37d0356c | ||
|
|
1188ebbb7b | ||
|
|
76b903b2e0 | ||
|
|
be38c2111f | ||
|
|
1de787fab8 | ||
|
|
81f683a61c | ||
|
|
db6f68d031 | ||
|
|
d0a1c805e9 | ||
|
|
00a654845f | ||
|
|
04dad8485a | ||
|
|
0b1475169f | ||
|
|
6ec4fbc82b | ||
|
|
18cc7a663b | ||
|
|
cf5febad47 | ||
|
|
42849af5a8 | ||
|
|
e6364407a9 | ||
|
|
480b78f2c8 | ||
|
|
fa8f339478 | ||
|
|
7776839c82 |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -5,6 +5,7 @@ stdiscosrv.exe
|
||||
*.tar.gz
|
||||
*.zip
|
||||
*.asc
|
||||
*.deb
|
||||
.jshintrc
|
||||
coverage.out
|
||||
files/pidx
|
||||
|
||||
3
AUTHORS
3
AUTHORS
@@ -51,7 +51,7 @@ Gilli Sigurdsson (gillisig) <gilli@vx.is>
|
||||
Jaakko Hannikainen (jgke) <jgke@jgke.fi>
|
||||
Jacek Szafarkiewicz (hadogenes) <szafar@linux.pl>
|
||||
Jake Peterson (acogdev) <jake@acogdev.com>
|
||||
Jakob Borg (calmh) <jakob@nym.se>
|
||||
Jakob Borg (calmh) <jakob@nym.se> <jakob@kastelo.net>
|
||||
James Patterson (jpjp) <jamespatterson@operamail.com> <jpjp@users.noreply.github.com>
|
||||
Jaroslav Malec (dzarda) <dzardacz@gmail.com>
|
||||
Jens Diemer (jedie) <github.com@jensdiemer.de> <git@jensdiemer.de>
|
||||
@@ -87,6 +87,7 @@ Sergey Mishin (ralder) <ralder@yandex.ru>
|
||||
Stefan Kuntz (Stefan-Code) <stefan.github@gmail.com> <Stefan.github@gmail.com>
|
||||
Stefan Tatschner (rumpelsepp) <stefan@sevenbyte.org> <rumpelsepp@sevenbyte.org>
|
||||
Tim Abell (timabell) <tim@timwise.co.uk>
|
||||
Tim Howes (timhowes) <timhowes@berkeley.edu>
|
||||
Tobias Nygren (tnn2) <tnn@nygren.pp.se>
|
||||
Tomas Cerveny (kozec) <kozec@kozec.com>
|
||||
Tully Robinson (tojrobinson) <tully@tojr.org>
|
||||
|
||||
2
NICKS
2
NICKS
@@ -23,6 +23,7 @@ buinsky <vix_booja@tut.by>
|
||||
burkemw3 <mburke@amplify.com>
|
||||
burkemw3 <burkemw3@gmail.com>
|
||||
calmh <jakob@nym.se>
|
||||
calmh <jakob@kastelo.net>
|
||||
canton7 <antony.male@gmail.com>
|
||||
Cathryne <cathryne.linenweaver@gmail.com>
|
||||
Cathryne <Cathryne@users.noreply.github.com>
|
||||
@@ -100,6 +101,7 @@ snnd <dw@risu.io>
|
||||
Stefan-Code <stefan.github@gmail.com>
|
||||
Stefan-Code <Stefan.github@gmail.com>
|
||||
timabell <tim@timwise.co.uk>
|
||||
timhowes <timhowes@berkeley.edu>
|
||||
tnn2 <tnn@nygren.pp.se>
|
||||
tojrobinson <tully@tojr.org>
|
||||
tpng <benny.tpng@gmail.com>
|
||||
|
||||
82
build.go
82
build.go
@@ -27,7 +27,6 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"text/template"
|
||||
"time"
|
||||
)
|
||||
|
||||
@@ -43,12 +42,12 @@ var (
|
||||
)
|
||||
|
||||
type target struct {
|
||||
name string
|
||||
buildPkg string
|
||||
binaryName string
|
||||
archiveFiles []archiveFile
|
||||
debianFiles []archiveFile
|
||||
tags []string
|
||||
name string
|
||||
buildPkg string
|
||||
binaryName string
|
||||
archiveFiles []archiveFile
|
||||
installationFiles []archiveFile
|
||||
tags []string
|
||||
}
|
||||
|
||||
type archiveFile struct {
|
||||
@@ -76,7 +75,7 @@ var targets = map[string]target{
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0644},
|
||||
// All files from etc/ and extra/ added automatically in init().
|
||||
},
|
||||
debianFiles: []archiveFile{
|
||||
installationFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0755},
|
||||
{src: "README.md", dst: "deb/usr/share/doc/syncthing/README.txt", perm: 0644},
|
||||
{src: "LICENSE", dst: "deb/usr/share/doc/syncthing/LICENSE.txt", perm: 0644},
|
||||
@@ -106,7 +105,7 @@ var targets = map[string]target{
|
||||
{src: "cmd/stdiscosrv/LICENSE", dst: "LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0644},
|
||||
},
|
||||
debianFiles: []archiveFile{
|
||||
installationFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0755},
|
||||
{src: "cmd/stdiscosrv/README.md", dst: "deb/usr/share/doc/stdiscosrv/README.txt", perm: 0644},
|
||||
{src: "cmd/stdiscosrv/LICENSE", dst: "deb/usr/share/doc/stdiscosrv/LICENSE.txt", perm: 0644},
|
||||
@@ -125,7 +124,7 @@ var targets = map[string]target{
|
||||
{src: "cmd/strelaysrv/LICENSE", dst: "LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0644},
|
||||
},
|
||||
debianFiles: []archiveFile{
|
||||
installationFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0755},
|
||||
{src: "cmd/strelaysrv/README.md", dst: "deb/usr/share/doc/strelaysrv/README.txt", perm: 0644},
|
||||
{src: "cmd/strelaysrv/LICENSE", dst: "deb/usr/share/doc/strelaysrv/LICENSE.txt", perm: 0644},
|
||||
@@ -143,7 +142,7 @@ var targets = map[string]target{
|
||||
{src: "cmd/strelaypoolsrv/LICENSE", dst: "LICENSE.txt", perm: 0644},
|
||||
{src: "AUTHORS", dst: "AUTHORS.txt", perm: 0644},
|
||||
},
|
||||
debianFiles: []archiveFile{
|
||||
installationFiles: []archiveFile{
|
||||
{src: "{{binary}}", dst: "deb/usr/bin/{{binary}}", perm: 0755},
|
||||
{src: "cmd/strelaypoolsrv/README.md", dst: "deb/usr/share/doc/relaysrv/README.txt", perm: 0644},
|
||||
{src: "cmd/strelaypoolsrv/LICENSE", dst: "deb/usr/share/doc/relaysrv/LICENSE.txt", perm: 0644},
|
||||
@@ -163,12 +162,12 @@ func init() {
|
||||
syncthingPkg.archiveFiles = append(syncthingPkg.archiveFiles, archiveFile{src: file, dst: file, perm: 0644})
|
||||
}
|
||||
for _, file := range listFiles("extra") {
|
||||
syncthingPkg.debianFiles = append(syncthingPkg.debianFiles, archiveFile{src: file, dst: "deb/usr/share/doc/syncthing/" + filepath.Base(file), perm: 0644})
|
||||
syncthingPkg.installationFiles = append(syncthingPkg.installationFiles, archiveFile{src: file, dst: "deb/usr/share/doc/syncthing/" + filepath.Base(file), perm: 0644})
|
||||
}
|
||||
targets["syncthing"] = syncthingPkg
|
||||
}
|
||||
|
||||
const minGoVersion = 1.3
|
||||
const minGoVersion = 1.5
|
||||
|
||||
func main() {
|
||||
log.SetOutput(os.Stdout)
|
||||
@@ -298,6 +297,9 @@ func runCommand(cmd string, target target) {
|
||||
}
|
||||
}
|
||||
|
||||
case "version":
|
||||
fmt.Println(getVersion())
|
||||
|
||||
default:
|
||||
log.Fatalf("Unknown command %q", cmd)
|
||||
}
|
||||
@@ -354,7 +356,6 @@ func setup() {
|
||||
runPrint("go", "get", "-v", "github.com/FiloSottile/gvt")
|
||||
runPrint("go", "get", "-v", "github.com/axw/gocov/gocov")
|
||||
runPrint("go", "get", "-v", "github.com/AlekSi/gocov-xml")
|
||||
runPrint("go", "get", "-v", "bitbucket.org/tebeka/go2xunit")
|
||||
runPrint("go", "get", "-v", "github.com/alecthomas/gometalinter")
|
||||
runPrint("go", "get", "-v", "github.com/mitchellh/go-wordwrap")
|
||||
}
|
||||
@@ -491,46 +492,31 @@ func buildDeb(target target) {
|
||||
|
||||
build(target, []string{"noupgrade"})
|
||||
|
||||
for i := range target.debianFiles {
|
||||
target.debianFiles[i].src = strings.Replace(target.debianFiles[i].src, "{{binary}}", target.binaryName, 1)
|
||||
target.debianFiles[i].dst = strings.Replace(target.debianFiles[i].dst, "{{binary}}", target.binaryName, 1)
|
||||
for i := range target.installationFiles {
|
||||
target.installationFiles[i].src = strings.Replace(target.installationFiles[i].src, "{{binary}}", target.binaryName, 1)
|
||||
target.installationFiles[i].dst = strings.Replace(target.installationFiles[i].dst, "{{binary}}", target.binaryName, 1)
|
||||
}
|
||||
|
||||
for _, af := range target.debianFiles {
|
||||
for _, af := range target.installationFiles {
|
||||
if err := copyFile(af.src, af.dst, af.perm); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
os.MkdirAll("deb/DEBIAN", 0755)
|
||||
|
||||
data := map[string]string{
|
||||
"name": target.name,
|
||||
"arch": debarch,
|
||||
"version": version[1:],
|
||||
"date": time.Now().Format(time.RFC1123),
|
||||
}
|
||||
|
||||
debTemplateFiles := append(listFiles("debtpl/common"), listFiles("debtpl/"+target.name)...)
|
||||
for _, file := range debTemplateFiles {
|
||||
tpl, err := template.New(filepath.Base(file)).ParseFiles(file)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
outFile := filepath.Join("deb/DEBIAN", filepath.Base(file))
|
||||
out, err := os.Create(outFile)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err := tpl.Execute(out, data); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err := out.Close(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
info, _ := os.Lstat(file)
|
||||
os.Chmod(outFile, info.Mode())
|
||||
maintainer := "Syncthing Release Management <release@syncthing.net>"
|
||||
debver := version
|
||||
if strings.HasPrefix(debver, "v") {
|
||||
debver = debver[1:]
|
||||
}
|
||||
runPrint("fpm", "-t", "deb", "-s", "dir", "-C", "deb",
|
||||
"-n", "syncthing", "-v", debver, "-a", debarch,
|
||||
"--vendor", maintainer, "-m", maintainer,
|
||||
"-d", "libc6",
|
||||
"-d", "procps", // because postinst script
|
||||
"--url", "https://syncthing.net/",
|
||||
"--description", "Open Source Continuous File Synchronization",
|
||||
"--after-upgrade", "script/post-upgrade",
|
||||
"--license", "MPL-2")
|
||||
}
|
||||
|
||||
func copyFile(src, dst string, perm os.FileMode) error {
|
||||
@@ -642,7 +628,9 @@ func ldflags() string {
|
||||
|
||||
func rmr(paths ...string) {
|
||||
for _, path := range paths {
|
||||
log.Println("rm -r", path)
|
||||
if debug {
|
||||
log.Println("rm -r", path)
|
||||
}
|
||||
os.RemoveAll(path)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
"github.com/thejerf/suture"
|
||||
)
|
||||
|
||||
@@ -86,7 +87,11 @@ func main() {
|
||||
if !useHTTP {
|
||||
cert, err = tls.LoadX509KeyPair(certFile, keyFile)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to load X509 key pair:", err)
|
||||
log.Println("Failed to load keypair. Generating one, this might take a while...")
|
||||
cert, err = tlsutil.NewCertificate(certFile, keyFile, "stdiscosrv", 3072)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to generate X509 key pair:", err)
|
||||
}
|
||||
}
|
||||
|
||||
devID := protocol.NewDeviceID(cert.Certificate[0])
|
||||
|
||||
@@ -27,8 +27,9 @@ func postgresSetup(db *sql.DB) error {
|
||||
return err
|
||||
}
|
||||
|
||||
var tmp string
|
||||
row := db.QueryRow(`SELECT 'DevicesDeviceIDIndex'::regclass`)
|
||||
if err = row.Scan(nil); err != nil {
|
||||
if err = row.Scan(&tmp); err != nil {
|
||||
_, err = db.Exec(`CREATE INDEX DevicesDeviceIDIndex ON Devices (DeviceID)`)
|
||||
}
|
||||
if err != nil {
|
||||
@@ -36,7 +37,7 @@ func postgresSetup(db *sql.DB) error {
|
||||
}
|
||||
|
||||
row = db.QueryRow(`SELECT 'DevicesSeenIndex'::regclass`)
|
||||
if err = row.Scan(nil); err != nil {
|
||||
if err = row.Scan(&tmp); err != nil {
|
||||
_, err = db.Exec(`CREATE INDEX DevicesSeenIndex ON Devices (Seen)`)
|
||||
}
|
||||
if err != nil {
|
||||
@@ -46,14 +47,14 @@ func postgresSetup(db *sql.DB) error {
|
||||
_, err = db.Exec(`CREATE TABLE IF NOT EXISTS Addresses (
|
||||
DeviceID CHAR(63) NOT NULL,
|
||||
Seen TIMESTAMP NOT NULL,
|
||||
Address VARCHAR(256) NOT NULL
|
||||
Address VARCHAR(2048) NOT NULL
|
||||
)`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
row = db.QueryRow(`SELECT 'AddressesDeviceIDSeenIndex'::regclass`)
|
||||
if err = row.Scan(nil); err != nil {
|
||||
if err = row.Scan(&tmp); err != nil {
|
||||
_, err = db.Exec(`CREATE INDEX AddressesDeviceIDSeenIndex ON Addresses (DeviceID, Seen)`)
|
||||
}
|
||||
if err != nil {
|
||||
@@ -61,7 +62,7 @@ func postgresSetup(db *sql.DB) error {
|
||||
}
|
||||
|
||||
row = db.QueryRow(`SELECT 'AddressesDeviceIDAddressIndex'::regclass`)
|
||||
if err = row.Scan(nil); err != nil {
|
||||
if err = row.Scan(&tmp); err != nil {
|
||||
_, err = db.Exec(`CREATE INDEX AddressesDeviceIDAddressIndex ON Addresses (DeviceID, Address)`)
|
||||
}
|
||||
if err != nil {
|
||||
|
||||
@@ -451,7 +451,7 @@ func (s *querysrv) getDeviceSeen(device protocol.DeviceID) (time.Time, error) {
|
||||
if err := row.Scan(&seen); err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
return seen, nil
|
||||
return seen.In(time.UTC), nil
|
||||
}
|
||||
|
||||
func handlePing(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
BIN
cmd/stdiscosrv/stdiscosrv
Executable file
BIN
cmd/stdiscosrv/stdiscosrv
Executable file
Binary file not shown.
@@ -66,7 +66,7 @@ func checkServers(deviceID protocol.DeviceID, servers ...string) {
|
||||
}()
|
||||
}
|
||||
|
||||
for _ = range servers {
|
||||
for range servers {
|
||||
res := <-resc
|
||||
|
||||
u, _ := url.Parse(res.server)
|
||||
|
||||
@@ -76,6 +76,7 @@ var (
|
||||
permRelaysFile string
|
||||
ipHeader string
|
||||
geoipPath string
|
||||
proto string
|
||||
|
||||
getMut = sync.NewRWMutex()
|
||||
getLRUCache *lru.Cache
|
||||
@@ -105,6 +106,7 @@ func main() {
|
||||
flag.StringVar(&permRelaysFile, "perm-relays", "", "Path to list of permanent relays")
|
||||
flag.StringVar(&ipHeader, "ip-header", "", "Name of header which holds clients ip:port. Only meaningful when running behind a reverse proxy.")
|
||||
flag.StringVar(&geoipPath, "geoip", "GeoLite2-City.mmdb", "Path to GeoLite2-City database")
|
||||
flag.StringVar(&proto, "protocol", "tcp", "Protocol used for listening. 'tcp' for IPv4 and IPv6, 'tcp4' for IPv4, 'tcp6' for IPv6")
|
||||
|
||||
flag.Parse()
|
||||
|
||||
@@ -154,12 +156,12 @@ func main() {
|
||||
},
|
||||
}
|
||||
|
||||
listener, err = tls.Listen("tcp", listen, tlsCfg)
|
||||
listener, err = tls.Listen(proto, listen, tlsCfg)
|
||||
} else {
|
||||
if debug {
|
||||
log.Println("Starting plain listener on", listen)
|
||||
}
|
||||
listener, err = net.Listen("tcp", listen)
|
||||
listener, err = net.Listen(proto, listen)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
||||
@@ -5,7 +5,7 @@ strelaysrv
|
||||
|
||||
This is the relay server for the `syncthing` project.
|
||||
|
||||
To get it, run `go get github.com/syncthing/strelaysrv` or download the
|
||||
To get it, run `go get github.com/syncthing/syncthing/cmd/strelaysrv` or download the
|
||||
[latest build](http://build.syncthing.net/job/strelaysrv/lastSuccessfulBuild/artifact/)
|
||||
from the build server.
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ var (
|
||||
numConnections int64
|
||||
)
|
||||
|
||||
func listener(addr string, config *tls.Config) {
|
||||
func listener(proto, addr string, config *tls.Config) {
|
||||
tcpListener, err := net.Listen("tcp", addr)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
@@ -167,10 +167,16 @@ func protocolConnectionHandler(tcpConn net.Conn, config *tls.Config) {
|
||||
continue
|
||||
}
|
||||
|
||||
peerOutbox <- serverInvitation
|
||||
select {
|
||||
case peerOutbox <- serverInvitation:
|
||||
if debug {
|
||||
log.Println("Sent invitation from", id, "to", requestedPeer)
|
||||
}
|
||||
default:
|
||||
if debug {
|
||||
log.Println("Could not send invitation from", id, "to", requestedPeer, "as peer disconnected")
|
||||
}
|
||||
|
||||
if debug {
|
||||
log.Println("Sent invitation from", id, "to", requestedPeer)
|
||||
}
|
||||
conn.Close()
|
||||
|
||||
|
||||
@@ -24,6 +24,11 @@ import (
|
||||
"github.com/syncthing/syncthing/lib/relay/protocol"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/nat"
|
||||
_ "github.com/syncthing/syncthing/lib/pmp"
|
||||
_ "github.com/syncthing/syncthing/lib/upnp"
|
||||
|
||||
syncthingprotocol "github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
@@ -48,6 +53,7 @@ func init() {
|
||||
var (
|
||||
listen string
|
||||
debug bool
|
||||
proto string
|
||||
|
||||
sessionAddress []byte
|
||||
sessionPort uint16
|
||||
@@ -70,12 +76,17 @@ var (
|
||||
pools []string
|
||||
providedBy string
|
||||
defaultPoolAddrs = "https://relays.syncthing.net/endpoint"
|
||||
|
||||
natEnabled bool
|
||||
natLease int
|
||||
natRenewal int
|
||||
natTimeout int
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.SetFlags(log.Lshortfile | log.LstdFlags)
|
||||
|
||||
var dir, extAddress string
|
||||
var dir, extAddress, proto string
|
||||
|
||||
flag.StringVar(&listen, "listen", ":22067", "Protocol listen address")
|
||||
flag.StringVar(&dir, "keys", ".", "Directory where cert.pem and key.pem is stored")
|
||||
@@ -89,14 +100,22 @@ func main() {
|
||||
flag.StringVar(&poolAddrs, "pools", defaultPoolAddrs, "Comma separated list of relay pool addresses to join")
|
||||
flag.StringVar(&providedBy, "provided-by", "", "An optional description about who provides the relay")
|
||||
flag.StringVar(&extAddress, "ext-address", "", "An optional address to advertise as being available on.\n\tAllows listening on an unprivileged port with port forwarding from e.g. 443, and be connected to on port 443.")
|
||||
|
||||
flag.StringVar(&proto, "protocol", "tcp", "Protocol used for listening. 'tcp' for IPv4 and IPv6, 'tcp4' for IPv4, 'tcp6' for IPv6")
|
||||
flag.BoolVar(&natEnabled, "nat", false, "Use UPnP/NAT-PMP to acquire external port mapping")
|
||||
flag.IntVar(&natLease, "nat-lease", 60, "NAT lease length in minutes")
|
||||
flag.IntVar(&natRenewal, "nat-renewal", 30, "NAT renewal frequency in minutes")
|
||||
flag.IntVar(&natTimeout, "nat-timeout", 10, "NAT discovery timeout in seconds")
|
||||
flag.Parse()
|
||||
|
||||
if extAddress == "" {
|
||||
extAddress = listen
|
||||
}
|
||||
|
||||
addr, err := net.ResolveTCPAddr("tcp", extAddress)
|
||||
if len(providedBy) > 30 {
|
||||
log.Fatal("Provided-by cannot be longer than 30 characters")
|
||||
}
|
||||
|
||||
addr, err := net.ResolveTCPAddr(proto, extAddress)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
@@ -149,6 +168,37 @@ func main() {
|
||||
log.Println("ID:", id)
|
||||
}
|
||||
|
||||
wrapper := config.Wrap("config", config.New(id))
|
||||
wrapper.SetOptions(config.OptionsConfiguration{
|
||||
NATLeaseM: natLease,
|
||||
NATRenewalM: natRenewal,
|
||||
NATTimeoutS: natTimeout,
|
||||
})
|
||||
natSvc := nat.NewService(id, wrapper)
|
||||
mapping := mapping{natSvc.NewMapping(nat.TCP, addr.IP, addr.Port)}
|
||||
|
||||
if natEnabled {
|
||||
go natSvc.Serve()
|
||||
found := make(chan struct{})
|
||||
mapping.OnChanged(func(_ *nat.Mapping, _, _ []nat.Address) {
|
||||
select {
|
||||
case found <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
})
|
||||
|
||||
// Need to wait a few extra seconds, since NAT library waits exactly natTimeout seconds on all interfaces.
|
||||
timeout := time.Duration(natTimeout+2) * time.Second
|
||||
log.Printf("Waiting %s to acquire NAT mapping", timeout)
|
||||
|
||||
select {
|
||||
case <-found:
|
||||
log.Printf("Found NAT mapping: %s", mapping.ExternalAddresses())
|
||||
case <-time.After(timeout):
|
||||
log.Println("Timeout out waiting for NAT mapping.")
|
||||
}
|
||||
}
|
||||
|
||||
if sessionLimitBps > 0 {
|
||||
sessionLimiter = ratelimit.NewBucketWithRate(float64(sessionLimitBps), int64(2*sessionLimitBps))
|
||||
}
|
||||
@@ -160,7 +210,7 @@ func main() {
|
||||
go statusService(statusAddr)
|
||||
}
|
||||
|
||||
uri, err := url.Parse(fmt.Sprintf("relay://%s/?id=%s&pingInterval=%s&networkTimeout=%s&sessionLimitBps=%d&globalLimitBps=%d&statusAddr=%s&providedBy=%s", extAddress, id, pingInterval, networkTimeout, sessionLimitBps, globalLimitBps, statusAddr, providedBy))
|
||||
uri, err := url.Parse(fmt.Sprintf("relay://%s/?id=%s&pingInterval=%s&networkTimeout=%s&sessionLimitBps=%d&globalLimitBps=%d&statusAddr=%s&providedBy=%s", mapping.Address(), id, pingInterval, networkTimeout, sessionLimitBps, globalLimitBps, statusAddr, providedBy))
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to construct URI", err)
|
||||
}
|
||||
@@ -178,11 +228,11 @@ func main() {
|
||||
for _, pool := range pools {
|
||||
pool = strings.TrimSpace(pool)
|
||||
if len(pool) > 0 {
|
||||
go poolHandler(pool, uri)
|
||||
go poolHandler(pool, uri, mapping)
|
||||
}
|
||||
}
|
||||
|
||||
go listener(listen, tlsCfg)
|
||||
go listener(proto, listen, tlsCfg)
|
||||
|
||||
sigs := make(chan os.Signal, 1)
|
||||
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
|
||||
@@ -212,7 +262,7 @@ func main() {
|
||||
|
||||
func monitorLimits() {
|
||||
limitCheckTimer = time.NewTimer(time.Minute)
|
||||
for _ = range limitCheckTimer.C {
|
||||
for range limitCheckTimer.C {
|
||||
if atomic.LoadInt64(&numConnections)+atomic.LoadInt64(&numProxies) > descriptorLimit {
|
||||
atomic.StoreInt32(&overLimit, 1)
|
||||
log.Println("Gone past our connection limits. Starting to refuse new/drop idle connections.")
|
||||
@@ -222,3 +272,15 @@ func monitorLimits() {
|
||||
limitCheckTimer.Reset(time.Minute)
|
||||
}
|
||||
}
|
||||
|
||||
type mapping struct {
|
||||
*nat.Mapping
|
||||
}
|
||||
|
||||
func (m *mapping) Address() nat.Address {
|
||||
ext := m.ExternalAddresses()
|
||||
if len(ext) > 0 {
|
||||
return ext[0]
|
||||
}
|
||||
return m.Mapping.Address()
|
||||
}
|
||||
|
||||
@@ -12,16 +12,19 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
func poolHandler(pool string, uri *url.URL) {
|
||||
func poolHandler(pool string, uri *url.URL, mapping mapping) {
|
||||
if debug {
|
||||
log.Println("Joining", pool)
|
||||
}
|
||||
for {
|
||||
uriCopy := *uri
|
||||
uriCopy.Host = mapping.Address().String()
|
||||
|
||||
var b bytes.Buffer
|
||||
json.NewEncoder(&b).Encode(struct {
|
||||
URL string `json:"url"`
|
||||
}{
|
||||
uri.String(),
|
||||
uriCopy.String(),
|
||||
})
|
||||
|
||||
resp, err := http.Post(pool, "application/json", &b)
|
||||
@@ -39,7 +42,7 @@ func poolHandler(pool string, uri *url.URL) {
|
||||
log.Println(pool, "under load, will retry in a minute")
|
||||
time.Sleep(time.Minute)
|
||||
continue
|
||||
} else if resp.StatusCode == 403 {
|
||||
} else if resp.StatusCode == 401 {
|
||||
log.Println(pool, "failed to join due to IP address not matching external address. Aborting")
|
||||
return
|
||||
} else if resp.StatusCode == 200 {
|
||||
|
||||
@@ -130,7 +130,7 @@ func printProgress(prefix string, count *int64) {
|
||||
expectedIterations := float64(int(1) << uint(wantBits))
|
||||
fmt.Printf("Want %d bits for prefix %q, about %.2g certs to test (statistically speaking)\n", wantBits, prefix, expectedIterations)
|
||||
|
||||
for _ = range time.NewTicker(15 * time.Second).C {
|
||||
for range time.NewTicker(15 * time.Second).C {
|
||||
tried := atomic.LoadInt64(count)
|
||||
elapsed := time.Since(started)
|
||||
rate := float64(tried) / elapsed.Seconds()
|
||||
|
||||
@@ -53,6 +53,7 @@ type apiService struct {
|
||||
statics *staticsServer
|
||||
model modelIntf
|
||||
eventSub events.BufferedSubscription
|
||||
diskEventSub events.BufferedSubscription
|
||||
discoverer discover.CachingMux
|
||||
connectionsService connectionsIntf
|
||||
fss *folderSummaryService
|
||||
@@ -68,10 +69,10 @@ type apiService struct {
|
||||
|
||||
type modelIntf interface {
|
||||
GlobalDirectoryTree(folder, prefix string, levels int, dirsonly bool) map[string]interface{}
|
||||
Completion(device protocol.DeviceID, folder string) float64
|
||||
Completion(device protocol.DeviceID, folder string) model.FolderCompletion
|
||||
Override(folder string)
|
||||
NeedFolderFiles(folder string, page, perpage int) ([]db.FileInfoTruncated, []db.FileInfoTruncated, []db.FileInfoTruncated, int)
|
||||
NeedSize(folder string) (nfiles int, bytes int64)
|
||||
NeedSize(folder string) (nfiles, ndeletes int, bytes int64)
|
||||
ConnectionStats() map[string]interface{}
|
||||
DeviceStatistics() map[string]stats.DeviceStatistics
|
||||
FolderStatistics() map[string]stats.FolderStatistics
|
||||
@@ -113,7 +114,7 @@ type connectionsIntf interface {
|
||||
Status() map[string]interface{}
|
||||
}
|
||||
|
||||
func newAPIService(id protocol.DeviceID, cfg configIntf, httpsCertFile, httpsKeyFile, assetDir string, m modelIntf, eventSub events.BufferedSubscription, discoverer discover.CachingMux, connectionsService connectionsIntf, errors, systemLog logger.Recorder) *apiService {
|
||||
func newAPIService(id protocol.DeviceID, cfg configIntf, httpsCertFile, httpsKeyFile, assetDir string, m modelIntf, eventSub events.BufferedSubscription, diskEventSub events.BufferedSubscription, discoverer discover.CachingMux, connectionsService connectionsIntf, errors, systemLog logger.Recorder) *apiService {
|
||||
service := &apiService{
|
||||
id: id,
|
||||
cfg: cfg,
|
||||
@@ -122,6 +123,7 @@ func newAPIService(id protocol.DeviceID, cfg configIntf, httpsCertFile, httpsKey
|
||||
statics: newStaticsServer(cfg.GUI().Theme, assetDir),
|
||||
model: m,
|
||||
eventSub: eventSub,
|
||||
diskEventSub: diskEventSub,
|
||||
discoverer: discoverer,
|
||||
connectionsService: connectionsService,
|
||||
systemConfigMut: sync.NewMutex(),
|
||||
@@ -229,7 +231,8 @@ func (s *apiService) Serve() {
|
||||
getRestMux.HandleFunc("/rest/db/need", s.getDBNeed) // folder [perpage] [page]
|
||||
getRestMux.HandleFunc("/rest/db/status", s.getDBStatus) // folder
|
||||
getRestMux.HandleFunc("/rest/db/browse", s.getDBBrowse) // folder [prefix] [dirsonly] [levels]
|
||||
getRestMux.HandleFunc("/rest/events", s.getEvents) // since [limit]
|
||||
getRestMux.HandleFunc("/rest/events", s.getIndexEvents) // since [limit]
|
||||
getRestMux.HandleFunc("/rest/events/disk", s.getDiskEvents) // since [limit]
|
||||
getRestMux.HandleFunc("/rest/stats/device", s.getDeviceStats) // -
|
||||
getRestMux.HandleFunc("/rest/stats/folder", s.getFolderStats) // -
|
||||
getRestMux.HandleFunc("/rest/svc/deviceid", s.getDeviceID) // id
|
||||
@@ -313,6 +316,11 @@ func (s *apiService) Serve() {
|
||||
// Add the CORS handling
|
||||
handler = corsMiddleware(handler)
|
||||
|
||||
if addressIsLocalhost(guiCfg.Address()) && !guiCfg.InsecureSkipHostCheck {
|
||||
// Verify source host
|
||||
handler = localhostMiddleware(handler)
|
||||
}
|
||||
|
||||
handler = debugMiddleware(handler)
|
||||
|
||||
srv := http.Server{
|
||||
@@ -437,10 +445,12 @@ func corsMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Process OPTIONS requests
|
||||
if r.Method == "OPTIONS" {
|
||||
// Add a generous access-control-allow-origin header for CORS requests
|
||||
w.Header().Add("Access-Control-Allow-Origin", "*")
|
||||
// Only GET/POST Methods are supported
|
||||
w.Header().Set("Access-Control-Allow-Methods", "GET, POST")
|
||||
// Only this custom header can be set
|
||||
w.Header().Set("Access-Control-Allow-Headers", "X-API-Key")
|
||||
// Only these headers can be set
|
||||
w.Header().Set("Access-Control-Allow-Headers", "Content-Type, X-API-Key")
|
||||
// The request is meant to be cached 10 minutes
|
||||
w.Header().Set("Access-Control-Max-Age", "600")
|
||||
|
||||
@@ -495,6 +505,17 @@ func withDetailsMiddleware(id protocol.DeviceID, h http.Handler) http.Handler {
|
||||
})
|
||||
}
|
||||
|
||||
func localhostMiddleware(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if addressIsLocalhost(r.Host) {
|
||||
h.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
http.Error(w, "Host check error", http.StatusForbidden)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *apiService) whenDebugging(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if s.cfg.GUI().Debugging {
|
||||
@@ -503,7 +524,6 @@ func (s *apiService) whenDebugging(h http.Handler) http.Handler {
|
||||
}
|
||||
|
||||
http.Error(w, "Debugging disabled", http.StatusBadRequest)
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
@@ -583,8 +603,12 @@ func (s *apiService) getDBCompletion(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
sendJSON(w, map[string]float64{
|
||||
"completion": s.model.Completion(device, folder),
|
||||
comp := s.model.Completion(device, folder)
|
||||
sendJSON(w, map[string]interface{}{
|
||||
"completion": comp.CompletionPct,
|
||||
"needBytes": comp.NeedBytes,
|
||||
"globalBytes": comp.GlobalBytes,
|
||||
"needDeletes": comp.NeedDeletes,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -605,8 +629,8 @@ func folderSummary(cfg configIntf, m modelIntf, folder string) map[string]interf
|
||||
localFiles, localDeleted, localBytes := m.LocalSize(folder)
|
||||
res["localFiles"], res["localDeleted"], res["localBytes"] = localFiles, localDeleted, localBytes
|
||||
|
||||
needFiles, needBytes := m.NeedSize(folder)
|
||||
res["needFiles"], res["needBytes"] = needFiles, needBytes
|
||||
needFiles, needDeletes, needBytes := m.NeedSize(folder)
|
||||
res["needFiles"], res["needDeletes"], res["needBytes"] = needFiles, needDeletes, needBytes
|
||||
|
||||
res["inSyncFiles"], res["inSyncBytes"] = globalFiles-needFiles, globalBytes-needBytes
|
||||
|
||||
@@ -744,11 +768,13 @@ func (s *apiService) postSystemConfig(w http.ResponseWriter, r *http.Request) {
|
||||
// Activate and save
|
||||
|
||||
if err := s.cfg.Replace(to); err != nil {
|
||||
l.Warnln("Replacing config:", err)
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.cfg.Save(); err != nil {
|
||||
l.Warnln("Saving config:", err)
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
@@ -970,15 +996,22 @@ func (s *apiService) postDBIgnores(w http.ResponseWriter, r *http.Request) {
|
||||
s.getDBIgnores(w, r)
|
||||
}
|
||||
|
||||
func (s *apiService) getEvents(w http.ResponseWriter, r *http.Request) {
|
||||
func (s *apiService) getIndexEvents(w http.ResponseWriter, r *http.Request) {
|
||||
s.fss.gotEventRequest()
|
||||
s.getEvents(w, r, s.eventSub)
|
||||
}
|
||||
|
||||
func (s *apiService) getDiskEvents(w http.ResponseWriter, r *http.Request) {
|
||||
s.getEvents(w, r, s.diskEventSub)
|
||||
}
|
||||
|
||||
func (s *apiService) getEvents(w http.ResponseWriter, r *http.Request, eventSub events.BufferedSubscription) {
|
||||
qs := r.URL.Query()
|
||||
sinceStr := qs.Get("since")
|
||||
limitStr := qs.Get("limit")
|
||||
since, _ := strconv.Atoi(sinceStr)
|
||||
limit, _ := strconv.Atoi(limitStr)
|
||||
|
||||
s.fss.gotEventRequest()
|
||||
|
||||
// Flush before blocking, to indicate that we've received the request and
|
||||
// that it should not be retried. Must set Content-Type header before
|
||||
// flushing.
|
||||
@@ -986,7 +1019,7 @@ func (s *apiService) getEvents(w http.ResponseWriter, r *http.Request) {
|
||||
f := w.(http.Flusher)
|
||||
f.Flush()
|
||||
|
||||
evs := s.eventSub.Since(since, nil)
|
||||
evs := eventSub.Since(since, nil)
|
||||
if 0 < limit && limit < len(evs) {
|
||||
evs = evs[len(evs)-limit:]
|
||||
}
|
||||
@@ -1142,7 +1175,7 @@ func (s *apiService) getPeerCompletion(w http.ResponseWriter, r *http.Request) {
|
||||
for _, device := range folder.DeviceIDs() {
|
||||
deviceStr := device.String()
|
||||
if s.model.ConnectedTo(device) {
|
||||
tot[deviceStr] += s.model.Completion(device, folder.ID)
|
||||
tot[deviceStr] += s.model.Completion(device, folder.ID).CompletionPct
|
||||
} else {
|
||||
tot[deviceStr] = 0
|
||||
}
|
||||
@@ -1288,3 +1321,17 @@ func dirNames(dir string) []string {
|
||||
sort.Strings(dirs)
|
||||
return dirs
|
||||
}
|
||||
|
||||
func addressIsLocalhost(addr string) bool {
|
||||
host, _, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
// There was no port, so we assume the address was just a hostname
|
||||
host = addr
|
||||
}
|
||||
switch strings.ToLower(host) {
|
||||
case "127.0.0.1", "::1", "localhost":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -37,6 +37,9 @@ func csrfMiddleware(unique string, prefix string, cfg config.GUIConfiguration, n
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Allow requests carrying a valid API key
|
||||
if cfg.IsValidAPIKey(r.Header.Get("X-API-Key")) {
|
||||
// Set the access-control-allow-origin header for CORS requests
|
||||
// since a valid API key has been provided
|
||||
w.Header().Add("Access-Control-Allow-Origin", "*")
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -78,7 +78,7 @@ func trackCPUUsage() {
|
||||
var prevTime = time.Now().UnixNano()
|
||||
var rusage prusage_t
|
||||
var pid = os.Getpid()
|
||||
for _ = range time.NewTicker(time.Second).C {
|
||||
for range time.NewTicker(time.Second).C {
|
||||
err := solarisPrusage(pid, &rusage)
|
||||
if err != nil {
|
||||
l.Warnln("getting prusage:", err)
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -69,7 +70,7 @@ func TestStopAfterBrokenConfig(t *testing.T) {
|
||||
}
|
||||
w := config.Wrap("/dev/null", cfg)
|
||||
|
||||
srv := newAPIService(protocol.LocalDeviceID, w, "../../test/h1/https-cert.pem", "../../test/h1/https-key.pem", "", nil, nil, nil, nil, nil, nil)
|
||||
srv := newAPIService(protocol.LocalDeviceID, w, "../../test/h1/https-cert.pem", "../../test/h1/https-key.pem", "", nil, nil, nil, nil, nil, nil, nil)
|
||||
srv.started = make(chan string)
|
||||
|
||||
sup := suture.NewSimple("test")
|
||||
@@ -460,7 +461,6 @@ func TestHTTPLogin(t *testing.T) {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Errorf("Unexpected non-200 return code %d for authed request (ISO-8859-1)", resp.StatusCode)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func startHTTP(cfg *mockedConfig) (string, error) {
|
||||
@@ -469,6 +469,7 @@ func startHTTP(cfg *mockedConfig) (string, error) {
|
||||
httpsKeyFile := "../../test/h1/https-key.pem"
|
||||
assetDir := "../../gui"
|
||||
eventSub := new(mockedEventSub)
|
||||
diskEventSub := new(mockedEventSub)
|
||||
discoverer := new(mockedCachingMux)
|
||||
connections := new(mockedConnections)
|
||||
errorLog := new(mockedLoggerRecorder)
|
||||
@@ -477,7 +478,7 @@ func startHTTP(cfg *mockedConfig) (string, error) {
|
||||
|
||||
// Instantiate the API service
|
||||
svc := newAPIService(protocol.LocalDeviceID, cfg, httpsCertFile, httpsKeyFile, assetDir, model,
|
||||
eventSub, discoverer, connections, errorLog, systemLog)
|
||||
eventSub, diskEventSub, discoverer, connections, errorLog, systemLog)
|
||||
svc.started = addrChan
|
||||
|
||||
// Actually start the API service
|
||||
@@ -491,7 +492,12 @@ func startHTTP(cfg *mockedConfig) (string, error) {
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Weird address from API service: %v", err)
|
||||
}
|
||||
baseURL := fmt.Sprintf("http://127.0.0.1:%d", tcpAddr.Port)
|
||||
|
||||
host, _, _ := net.SplitHostPort(cfg.gui.RawAddress)
|
||||
if host == "" || host == "0.0.0.0" {
|
||||
host = "127.0.0.1"
|
||||
}
|
||||
baseURL := fmt.Sprintf("http://%s", net.JoinHostPort(host, strconv.Itoa(tcpAddr.Port)))
|
||||
|
||||
return baseURL, nil
|
||||
}
|
||||
@@ -666,3 +672,252 @@ func testConfigPost(data io.Reader) (*http.Response, error) {
|
||||
req.Header.Set("X-API-Key", testAPIKey)
|
||||
return cli.Do(req)
|
||||
}
|
||||
|
||||
func TestHostCheck(t *testing.T) {
|
||||
// An API service bound to localhost should reject non-localhost host Headers
|
||||
|
||||
cfg := new(mockedConfig)
|
||||
cfg.gui.RawAddress = "127.0.0.1:0"
|
||||
baseURL, err := startHTTP(cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// A normal HTTP get to the localhost-bound service should succeed
|
||||
|
||||
resp, err := http.Get(baseURL)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Error("Regular HTTP get: expected 200 OK, not", resp.Status)
|
||||
}
|
||||
|
||||
// A request with a suspicious Host header should fail
|
||||
|
||||
req, _ := http.NewRequest("GET", baseURL, nil)
|
||||
req.Host = "example.com"
|
||||
resp, err = http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusForbidden {
|
||||
t.Error("Suspicious Host header: expected 403 Forbidden, not", resp.Status)
|
||||
}
|
||||
|
||||
// A request with an explicit "localhost:8384" Host header should pass
|
||||
|
||||
req, _ = http.NewRequest("GET", baseURL, nil)
|
||||
req.Host = "localhost:8384"
|
||||
resp, err = http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Error("Explicit localhost:8384: expected 200 OK, not", resp.Status)
|
||||
}
|
||||
|
||||
// A request with an explicit "localhost" Host header (no port) should pass
|
||||
|
||||
req, _ = http.NewRequest("GET", baseURL, nil)
|
||||
req.Host = "localhost"
|
||||
resp, err = http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Error("Explicit localhost: expected 200 OK, not", resp.Status)
|
||||
}
|
||||
|
||||
// A server with InsecureSkipHostCheck set behaves differently
|
||||
|
||||
cfg = new(mockedConfig)
|
||||
cfg.gui.RawAddress = "127.0.0.1:0"
|
||||
cfg.gui.InsecureSkipHostCheck = true
|
||||
baseURL, err = startHTTP(cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// A request with a suspicious Host header should be allowed
|
||||
|
||||
req, _ = http.NewRequest("GET", baseURL, nil)
|
||||
req.Host = "example.com"
|
||||
resp, err = http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Error("Incorrect host header, check disabled: expected 200 OK, not", resp.Status)
|
||||
}
|
||||
|
||||
// A server bound to a wildcard address also doesn't do the check
|
||||
|
||||
cfg = new(mockedConfig)
|
||||
cfg.gui.RawAddress = "0.0.0.0:0"
|
||||
cfg.gui.InsecureSkipHostCheck = true
|
||||
baseURL, err = startHTTP(cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// A request with a suspicious Host header should be allowed
|
||||
|
||||
req, _ = http.NewRequest("GET", baseURL, nil)
|
||||
req.Host = "example.com"
|
||||
resp, err = http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Error("Incorrect host header, wildcard bound: expected 200 OK, not", resp.Status)
|
||||
}
|
||||
|
||||
// This should all work over IPv6 as well
|
||||
|
||||
cfg = new(mockedConfig)
|
||||
cfg.gui.RawAddress = "[::1]:0"
|
||||
baseURL, err = startHTTP(cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// A normal HTTP get to the localhost-bound service should succeed
|
||||
|
||||
resp, err = http.Get(baseURL)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Error("Regular HTTP get (IPv6): expected 200 OK, not", resp.Status)
|
||||
}
|
||||
|
||||
// A request with a suspicious Host header should fail
|
||||
|
||||
req, _ = http.NewRequest("GET", baseURL, nil)
|
||||
req.Host = "example.com"
|
||||
resp, err = http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusForbidden {
|
||||
t.Error("Suspicious Host header (IPv6): expected 403 Forbidden, not", resp.Status)
|
||||
}
|
||||
|
||||
// A request with an explicit "localhost:8384" Host header should pass
|
||||
|
||||
req, _ = http.NewRequest("GET", baseURL, nil)
|
||||
req.Host = "localhost:8384"
|
||||
resp, err = http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Error("Explicit localhost:8384 (IPv6): expected 200 OK, not", resp.Status)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddressIsLocalhost(t *testing.T) {
|
||||
testcases := []struct {
|
||||
address string
|
||||
result bool
|
||||
}{
|
||||
// These are all valid localhost addresses
|
||||
{"localhost", true},
|
||||
{"LOCALHOST", true},
|
||||
{"::1", true},
|
||||
{"127.0.0.1", true},
|
||||
{"localhost:8080", true},
|
||||
{"LOCALHOST:8000", true},
|
||||
{"[::1]:8080", true},
|
||||
{"127.0.0.1:8080", true},
|
||||
|
||||
// These are all non-localhost addresses
|
||||
{"example.com", false},
|
||||
{"example.com:8080", false},
|
||||
{"192.0.2.10", false},
|
||||
{"192.0.2.10:8080", false},
|
||||
{"0.0.0.0", false},
|
||||
{"0.0.0.0:8080", false},
|
||||
{"::", false},
|
||||
{"[::]:8080", false},
|
||||
{":8080", false},
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
result := addressIsLocalhost(tc.address)
|
||||
if result != tc.result {
|
||||
t.Errorf("addressIsLocalhost(%q)=%v, expected %v", tc.address, result, tc.result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAccessControlAllowOriginHeader(t *testing.T) {
|
||||
const testAPIKey = "foobarbaz"
|
||||
cfg := new(mockedConfig)
|
||||
cfg.gui.APIKey = testAPIKey
|
||||
baseURL, err := startHTTP(cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cli := &http.Client{
|
||||
Timeout: time.Second,
|
||||
}
|
||||
|
||||
req, _ := http.NewRequest("GET", baseURL+"/rest/system/status", nil)
|
||||
req.Header.Set("X-API-Key", testAPIKey)
|
||||
resp, err := cli.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Fatal("GET on /rest/system/status should succeed, not", resp.Status)
|
||||
}
|
||||
if resp.Header.Get("Access-Control-Allow-Origin") != "*" {
|
||||
t.Fatal("GET on /rest/system/status should return a 'Access-Control-Allow-Origin: *' header")
|
||||
}
|
||||
}
|
||||
|
||||
func TestOptionsRequest(t *testing.T) {
|
||||
const testAPIKey = "foobarbaz"
|
||||
cfg := new(mockedConfig)
|
||||
cfg.gui.APIKey = testAPIKey
|
||||
baseURL, err := startHTTP(cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cli := &http.Client{
|
||||
Timeout: time.Second,
|
||||
}
|
||||
|
||||
req, _ := http.NewRequest("OPTIONS", baseURL+"/rest/system/status", nil)
|
||||
resp, err := cli.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusNoContent {
|
||||
t.Fatal("OPTIONS on /rest/system/status should succeed, not", resp.Status)
|
||||
}
|
||||
if resp.Header.Get("Access-Control-Allow-Origin") != "*" {
|
||||
t.Fatal("OPTIONS on /rest/system/status should return a 'Access-Control-Allow-Origin: *' header")
|
||||
}
|
||||
if resp.Header.Get("Access-Control-Allow-Methods") != "GET, POST" {
|
||||
t.Fatal("OPTIONS on /rest/system/status should return a 'Access-Control-Allow-Methods: GET, POST' header")
|
||||
}
|
||||
if resp.Header.Get("Access-Control-Allow-Headers") != "Content-Type, X-API-Key" {
|
||||
t.Fatal("OPTIONS on /rest/system/status should return a 'Access-Control-Allow-Headers: Content-Type, X-API-KEY' header")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ func trackCPUUsage() {
|
||||
var prevUsage int64
|
||||
var prevTime = time.Now().UnixNano()
|
||||
var rusage syscall.Rusage
|
||||
for _ = range time.NewTicker(time.Second).C {
|
||||
for range time.NewTicker(time.Second).C {
|
||||
syscall.Getrusage(syscall.RUSAGE_SELF, &rusage)
|
||||
curTime := time.Now().UnixNano()
|
||||
timeDiff := curTime - prevTime
|
||||
|
||||
@@ -34,7 +34,7 @@ func trackCPUUsage() {
|
||||
prevTime := ctime.Nanoseconds()
|
||||
prevUsage := ktime.Nanoseconds() + utime.Nanoseconds() // Always overflows
|
||||
|
||||
for _ = range time.NewTicker(time.Second).C {
|
||||
for range time.NewTicker(time.Second).C {
|
||||
err := syscall.GetProcessTimes(handle, &ctime, &etime, &ktime, &utime)
|
||||
if err != nil {
|
||||
continue
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"runtime/pprof"
|
||||
"sort"
|
||||
"strconv"
|
||||
@@ -41,6 +42,7 @@ import (
|
||||
"github.com/syncthing/syncthing/lib/osutil"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/rand"
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
"github.com/syncthing/syncthing/lib/symlinks"
|
||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||
"github.com/syncthing/syncthing/lib/upgrade"
|
||||
@@ -166,6 +168,11 @@ are mostly useful for developers. Use with care.
|
||||
|
||||
STNOUPGRADE Disable automatic upgrades.
|
||||
|
||||
STHASHING Select the SHA256 hashing package to use. Possible values
|
||||
are "standard" for the Go standard library implementation,
|
||||
"minio" for the github.com/minio/sha256-simd implementation,
|
||||
and blank (the default) for auto detection.
|
||||
|
||||
GOMAXPROCS Set the maximum number of CPU cores to use. Defaults to all
|
||||
available CPU cores.
|
||||
|
||||
@@ -274,6 +281,9 @@ func parseCommandLineOptions() RuntimeOptions {
|
||||
}
|
||||
|
||||
func main() {
|
||||
// We want all (our) goroutines in panic traces.
|
||||
debug.SetTraceback("all")
|
||||
|
||||
options := parseCommandLineOptions()
|
||||
l.SetFlags(options.logFlags)
|
||||
|
||||
@@ -542,6 +552,7 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
// events. The LocalChangeDetected event might overwhelm the event
|
||||
// receiver in some situations so we will not subscribe to it here.
|
||||
apiSub := events.NewBufferedSubscription(events.Default.Subscribe(events.AllEvents&^events.LocalChangeDetected), 1000)
|
||||
diskSub := events.NewBufferedSubscription(events.Default.Subscribe(events.LocalChangeDetected), 1000)
|
||||
|
||||
if len(os.Getenv("GOMAXPROCS")) == 0 {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
@@ -567,7 +578,9 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
|
||||
l.Infoln(LongVersion)
|
||||
l.Infoln("My ID:", myID)
|
||||
printHashRate()
|
||||
|
||||
sha256.SelectAlgo()
|
||||
sha256.Report()
|
||||
|
||||
// Emit the Starting event, now that we know who we are.
|
||||
|
||||
@@ -740,7 +753,7 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
|
||||
// GUI
|
||||
|
||||
setupGUI(mainService, cfg, m, apiSub, cachedDiscovery, connectionsService, errors, systemLog, runtimeOptions)
|
||||
setupGUI(mainService, cfg, m, apiSub, diskSub, cachedDiscovery, connectionsService, errors, systemLog, runtimeOptions)
|
||||
|
||||
if runtimeOptions.cpuProfile {
|
||||
f, err := os.Create(fmt.Sprintf("cpu-%d.pprof", os.Getpid()))
|
||||
@@ -840,22 +853,6 @@ func setupSignalHandling() {
|
||||
}()
|
||||
}
|
||||
|
||||
// printHashRate prints the hashing performance in MB/s, formatting it with
|
||||
// appropriate precision for the value, i.e. 182 MB/s, 18 MB/s, 1.8 MB/s, 0.18
|
||||
// MB/s.
|
||||
func printHashRate() {
|
||||
hashRate := cpuBench(3, 100*time.Millisecond)
|
||||
|
||||
decimals := 0
|
||||
if hashRate < 1 {
|
||||
decimals = 2
|
||||
} else if hashRate < 10 {
|
||||
decimals = 1
|
||||
}
|
||||
|
||||
l.Infof("Single thread hash performance is ~%.*f MB/s", decimals, hashRate)
|
||||
}
|
||||
|
||||
func loadConfig() (*config.Wrapper, error) {
|
||||
cfgFile := locations[locConfigFile]
|
||||
cfg, err := config.Load(cfgFile, myID)
|
||||
@@ -889,19 +886,32 @@ func loadOrCreateConfig() *config.Wrapper {
|
||||
}
|
||||
|
||||
func archiveAndSaveConfig(cfg *config.Wrapper) error {
|
||||
// To prevent previous config from being cleaned up, quickly touch it too
|
||||
now := time.Now()
|
||||
_ = os.Chtimes(cfg.ConfigPath(), now, now) // May return error on Android etc; no worries
|
||||
|
||||
// Copy the existing config to an archive copy
|
||||
archivePath := cfg.ConfigPath() + fmt.Sprintf(".v%d", cfg.Raw().OriginalVersion)
|
||||
l.Infoln("Archiving a copy of old config file format at:", archivePath)
|
||||
if err := osutil.Rename(cfg.ConfigPath(), archivePath); err != nil {
|
||||
if err := copyFile(cfg.ConfigPath(), archivePath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Do a regular atomic config sve
|
||||
return cfg.Save()
|
||||
}
|
||||
|
||||
func copyFile(src, dst string) error {
|
||||
bs, err := ioutil.ReadFile(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(dst, bs, 0600); err != nil {
|
||||
// Attempt to clean up
|
||||
os.Remove(dst)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func startAuditing(mainService *suture.Supervisor) {
|
||||
auditFile := timestampedLoc(locAuditLog)
|
||||
fd, err := os.OpenFile(auditFile, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
|
||||
@@ -919,7 +929,7 @@ func startAuditing(mainService *suture.Supervisor) {
|
||||
l.Infoln("Audit log in", auditFile)
|
||||
}
|
||||
|
||||
func setupGUI(mainService *suture.Supervisor, cfg *config.Wrapper, m *model.Model, apiSub events.BufferedSubscription, discoverer discover.CachingMux, connectionsService *connections.Service, errors, systemLog logger.Recorder, runtimeOptions RuntimeOptions) {
|
||||
func setupGUI(mainService *suture.Supervisor, cfg *config.Wrapper, m *model.Model, apiSub events.BufferedSubscription, diskSub events.BufferedSubscription, discoverer discover.CachingMux, connectionsService *connections.Service, errors, systemLog logger.Recorder, runtimeOptions RuntimeOptions) {
|
||||
guiCfg := cfg.GUI()
|
||||
|
||||
if !guiCfg.Enabled {
|
||||
@@ -930,7 +940,7 @@ func setupGUI(mainService *suture.Supervisor, cfg *config.Wrapper, m *model.Mode
|
||||
l.Warnln("Insecure admin access is enabled.")
|
||||
}
|
||||
|
||||
api := newAPIService(myID, cfg, locations[locHTTPSCertFile], locations[locHTTPSKeyFile], runtimeOptions.assetDir, m, apiSub, discoverer, connectionsService, errors, systemLog)
|
||||
api := newAPIService(myID, cfg, locations[locHTTPSCertFile], locations[locHTTPSKeyFile], runtimeOptions.assetDir, m, apiSub, diskSub, discoverer, connectionsService, errors, systemLog)
|
||||
cfg.Subscribe(api)
|
||||
mainService.Add(api)
|
||||
|
||||
@@ -946,7 +956,7 @@ func defaultConfig(myName string) config.Configuration {
|
||||
|
||||
if !noDefaultFolder {
|
||||
l.Infoln("Default folder created and/or linked to new config")
|
||||
folderID := rand.String(5) + "-" + rand.String(5)
|
||||
folderID := strings.ToLower(rand.String(5) + "-" + rand.String(5))
|
||||
defaultFolder = config.NewFolderConfiguration(folderID, locations[locDefFolder])
|
||||
defaultFolder.Label = "Default Folder (" + folderID + ")"
|
||||
defaultFolder.RescanIntervalS = 60
|
||||
@@ -1124,15 +1134,16 @@ func autoUpgrade(cfg *config.Wrapper) {
|
||||
// suitable time after they have gone out of fashion.
|
||||
func cleanConfigDirectory() {
|
||||
patterns := map[string]time.Duration{
|
||||
"panic-*.log": 7 * 24 * time.Hour, // keep panic logs for a week
|
||||
"audit-*.log": 7 * 24 * time.Hour, // keep audit logs for a week
|
||||
"index": 14 * 24 * time.Hour, // keep old index format for two weeks
|
||||
"index-v0.11.0.db": 14 * 24 * time.Hour, // keep old index format for two weeks
|
||||
"index-v0.13.0.db": 14 * 24 * time.Hour, // keep old index format for two weeks
|
||||
"index*.converted": 14 * 24 * time.Hour, // keep old converted indexes for two weeks
|
||||
"config.xml.v*": 30 * 24 * time.Hour, // old config versions for a month
|
||||
"*.idx.gz": 30 * 24 * time.Hour, // these should for sure no longer exist
|
||||
"backup-of-v0.8": 30 * 24 * time.Hour, // these neither
|
||||
"panic-*.log": 7 * 24 * time.Hour, // keep panic logs for a week
|
||||
"audit-*.log": 7 * 24 * time.Hour, // keep audit logs for a week
|
||||
"index": 14 * 24 * time.Hour, // keep old index format for two weeks
|
||||
"index-v0.11.0.db": 14 * 24 * time.Hour, // keep old index format for two weeks
|
||||
"index-v0.13.0.db": 14 * 24 * time.Hour, // keep old index format for two weeks
|
||||
"index*.converted": 14 * 24 * time.Hour, // keep old converted indexes for two weeks
|
||||
"config.xml.v*": 30 * 24 * time.Hour, // old config versions for a month
|
||||
"*.idx.gz": 30 * 24 * time.Hour, // these should for sure no longer exist
|
||||
"backup-of-v0.8": 30 * 24 * time.Hour, // these neither
|
||||
"tmp-index-sorter.*": time.Minute, // these should never exist on startup
|
||||
}
|
||||
|
||||
for pat, dur := range patterns {
|
||||
|
||||
@@ -20,9 +20,8 @@ var (
|
||||
func memorySize() (int64, error) {
|
||||
var memoryStatusEx [64]byte
|
||||
binary.LittleEndian.PutUint32(memoryStatusEx[:], 64)
|
||||
p := uintptr(unsafe.Pointer(&memoryStatusEx[0]))
|
||||
|
||||
ret, _, callErr := syscall.Syscall(uintptr(globalMemoryStatusEx), 1, p, 0, 0)
|
||||
ret, _, callErr := syscall.Syscall(uintptr(globalMemoryStatusEx), 1, uintptr(unsafe.Pointer(&memoryStatusEx[0])), 0, 0)
|
||||
if ret == 0 {
|
||||
return 0, callErr
|
||||
}
|
||||
|
||||
@@ -21,8 +21,8 @@ func (m *mockedModel) GlobalDirectoryTree(folder, prefix string, levels int, dir
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockedModel) Completion(device protocol.DeviceID, folder string) float64 {
|
||||
return 0
|
||||
func (m *mockedModel) Completion(device protocol.DeviceID, folder string) model.FolderCompletion {
|
||||
return model.FolderCompletion{}
|
||||
}
|
||||
|
||||
func (m *mockedModel) Override(folder string) {}
|
||||
@@ -31,8 +31,8 @@ func (m *mockedModel) NeedFolderFiles(folder string, page, perpage int) ([]db.Fi
|
||||
return nil, nil, nil, 0
|
||||
}
|
||||
|
||||
func (m *mockedModel) NeedSize(folder string) (nfiles int, bytes int64) {
|
||||
return 0, 0
|
||||
func (m *mockedModel) NeedSize(folder string) (nfiles, ndeletes int, bytes int64) {
|
||||
return 0, 0, 0
|
||||
}
|
||||
|
||||
func (m *mockedModel) ConnectionStats() map[string]interface{} {
|
||||
|
||||
@@ -178,6 +178,22 @@ func copyStderr(stderr io.Reader, dst io.Writer) {
|
||||
if panicFd == nil {
|
||||
dst.Write([]byte(line))
|
||||
|
||||
if strings.Contains(line, "SIGILL") {
|
||||
l.Warnln(`
|
||||
*******************************************************************************
|
||||
* Crash due to illegal instruction detected. This is most likely due to a CPU *
|
||||
* incompatibility with the high performance hashing package. Switching to the *
|
||||
* standard hashing package instead. Please report this issue at: *
|
||||
* *
|
||||
* https://github.com/syncthing/syncthing/issues *
|
||||
* *
|
||||
* Include the details of your CPU. *
|
||||
*******************************************************************************
|
||||
`)
|
||||
os.Setenv("STHASHING", "standard")
|
||||
return
|
||||
}
|
||||
|
||||
if strings.HasPrefix(line, "panic:") || strings.HasPrefix(line, "fatal error:") {
|
||||
panicFd, err = os.Create(timestampedLoc(locPanicLog))
|
||||
if err != nil {
|
||||
|
||||
@@ -207,9 +207,11 @@ func (c *folderSummaryService) sendSummary(folder string) {
|
||||
// remote device.
|
||||
comp := c.model.Completion(devCfg.DeviceID, folder)
|
||||
events.Default.Log(events.FolderCompletion, map[string]interface{}{
|
||||
"folder": folder,
|
||||
"device": devCfg.DeviceID.String(),
|
||||
"completion": comp,
|
||||
"folder": folder,
|
||||
"device": devCfg.DeviceID.String(),
|
||||
"completion": comp.CompletionPct,
|
||||
"needBytes": comp.NeedBytes,
|
||||
"globalBytes": comp.GlobalBytes,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ package main
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
@@ -23,6 +22,7 @@ import (
|
||||
"github.com/syncthing/syncthing/lib/dialer"
|
||||
"github.com/syncthing/syncthing/lib/model"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
"github.com/syncthing/syncthing/lib/upgrade"
|
||||
"github.com/thejerf/suture"
|
||||
)
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
{{.name}} ({{.version}}); urgency=medium
|
||||
|
||||
* Packaging of {{.version}}.
|
||||
|
||||
-- Syncthing Release Management <release@syncthing.net> {{.date}}
|
||||
@@ -1 +0,0 @@
|
||||
9
|
||||
@@ -1,16 +0,0 @@
|
||||
Package: syncthing
|
||||
Version: {{.version}}
|
||||
Priority: optional
|
||||
Section: net
|
||||
Architecture: {{.arch}}
|
||||
Depends: libc6, procps
|
||||
Homepage: https://syncthing.net/
|
||||
Maintainer: Syncthing Release Management <release@syncthing.net>
|
||||
Description: Open Source Continuous File Synchronization
|
||||
Syncthing is an application that lets you synchronize your files across
|
||||
multiple devices. This means the creation, modification or deletion of files
|
||||
on one machine will automatically be replicated to your other devices. We
|
||||
believe your data is your data alone and you deserve to choose where it is
|
||||
stored. Therefore Syncthing does not upload your data to the cloud but
|
||||
exchanges your data across your machines as soon as they are online at the
|
||||
same time.
|
||||
@@ -1,6 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
if [[ ${1:-} == configure ]]; then
|
||||
pkill -HUP -x syncthing || true
|
||||
fi
|
||||
@@ -5,7 +5,7 @@ After=suspend.target
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart=/usr/bin/pkill -HUP -x syncthing
|
||||
ExecStart=-/usr/bin/pkill -HUP -x syncthing
|
||||
|
||||
[Install]
|
||||
WantedBy=suspend.target
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
[Unit]
|
||||
Description=Syncthing - Open Source Continuous File Synchronization
|
||||
Documentation=man:syncthing(1)
|
||||
After=network.target
|
||||
Wants=syncthing-inotify.service
|
||||
|
||||
[Service]
|
||||
|
||||
@@ -5,11 +5,13 @@ Dark theme
|
||||
Author: alessandro.g89
|
||||
Source: https://userstyles.org/styles/122502/syncthing-dark
|
||||
|
||||
Modified by: fti7
|
||||
|
||||
**/
|
||||
|
||||
body {
|
||||
color: #aaa !important;
|
||||
background-color: black !important;
|
||||
background-color: #272727 !important;
|
||||
}
|
||||
|
||||
a:hover,a:focus,a.focus{
|
||||
@@ -20,8 +22,10 @@ a:hover,a:focus,a.focus{
|
||||
/* navbar */
|
||||
.navbar {
|
||||
background-color: #333 !important;
|
||||
border-color: #333 !important;
|
||||
border-width: 2px !important;
|
||||
border-color: #424242 !important;
|
||||
border-top-width: 1px !important;
|
||||
border-bottom-width: 1px !important;
|
||||
|
||||
}
|
||||
|
||||
.navbar-text, .dropdown>a, .dropdown-menu>li>a, .hidden-xs>a, .navbar-link {
|
||||
@@ -29,7 +33,7 @@ a:hover,a:focus,a.focus{
|
||||
}
|
||||
|
||||
.dropdown-menu {
|
||||
border-color: #333 !important;
|
||||
border-color: #424242 !important;
|
||||
border-width: 2px !important;
|
||||
background-color: #222 !important;
|
||||
}
|
||||
@@ -40,7 +44,7 @@ a:hover,a:focus,a.focus{
|
||||
}
|
||||
|
||||
.open>.dropdown-toggle, .dropdown-toggle:hover {
|
||||
border-color: #333 !important;
|
||||
border-color: #424242 !important;
|
||||
background-color: #222 !important;
|
||||
}
|
||||
|
||||
@@ -51,7 +55,7 @@ a:hover,a:focus,a.focus{
|
||||
|
||||
li.hidden-xs:hover, .navbar-link:hover, .navbar-link:focus {
|
||||
outline: none !important;
|
||||
border-color: #333 !important;
|
||||
border-color: #424242 !important;
|
||||
background-color: #222 !important;
|
||||
}
|
||||
|
||||
@@ -63,18 +67,18 @@ li.hidden-xs:hover, .navbar-link:hover, .navbar-link:focus {
|
||||
|
||||
/* main panel */
|
||||
.panel {
|
||||
background-color: #111 !important;
|
||||
background-color: #323232 !important;
|
||||
border-width: 2px !important;
|
||||
}
|
||||
|
||||
.panel-default {
|
||||
border-color: #222 !important;
|
||||
border-color: #424242 !important;
|
||||
}
|
||||
|
||||
.panel-default > .panel-heading {
|
||||
color: #aaa !important;
|
||||
border-color: #222 !important;
|
||||
background-color: #222 !important;
|
||||
background-color: #3B3B3B !important;
|
||||
}
|
||||
.panel-warning > .panel-heading {
|
||||
color: #222 !important;
|
||||
@@ -85,16 +89,16 @@ li.hidden-xs:hover, .navbar-link:hover, .navbar-link:focus {
|
||||
}
|
||||
|
||||
.panel-footer {
|
||||
background-color: #111 !important;
|
||||
background-color: #2D2D2D !important;
|
||||
border-width: 0 !important;
|
||||
}
|
||||
|
||||
.table-striped>tbody>tr:nth-of-type(odd) {
|
||||
background-color: #181818 !important;
|
||||
background-color: #2E2E2E !important;
|
||||
}
|
||||
|
||||
.panel-group .panel-heading+.panel-collapse>.panel-body, .panel-group .panel-heading+.panel-collapse>.list-group {
|
||||
border-top: 1px solid #222 !important;
|
||||
border-top: 1px solid #424242 !important;
|
||||
}
|
||||
|
||||
.identicon rect {
|
||||
@@ -156,11 +160,11 @@ li.hidden-xs:hover, .navbar-link:hover, .navbar-link:focus {
|
||||
|
||||
/* modal dialogs */
|
||||
.modal-header {
|
||||
border-bottom-color: #222 !important;
|
||||
border-bottom-color: #424242 !important;
|
||||
}
|
||||
|
||||
.modal-header:not(.alert) {
|
||||
background-color: #222;
|
||||
background-color: #3B3B3B;
|
||||
}
|
||||
|
||||
.alert-info {
|
||||
@@ -179,12 +183,12 @@ li.hidden-xs:hover, .navbar-link:hover, .navbar-link:focus {
|
||||
.modal-content {
|
||||
border-color: #666 !important;
|
||||
border-width: 2px !important;
|
||||
background-color: #111 !important;
|
||||
background-color: #272727 !important;
|
||||
}
|
||||
|
||||
.modal-footer {
|
||||
border-color: #111 !important;
|
||||
background-color: #111 !important;
|
||||
border-color: #303030 !important;
|
||||
background-color: #2D2D2D !important;
|
||||
}
|
||||
|
||||
.help-block {
|
||||
@@ -193,8 +197,8 @@ li.hidden-xs:hover, .navbar-link:hover, .navbar-link:focus {
|
||||
|
||||
.form-control {
|
||||
color: #aaa !important;
|
||||
border-color: #444 !important;
|
||||
background-color: black !important;
|
||||
border-color: #424242 !important;
|
||||
background-color: #3B3B3B !important;
|
||||
}
|
||||
|
||||
code.ng-binding{
|
||||
@@ -204,8 +208,8 @@ code.ng-binding{
|
||||
|
||||
.well, .form-control[readonly="readonly"], .popover { /* read-only fields*/
|
||||
color: #666 !important;
|
||||
border-color: #444 !important;
|
||||
background-color: #111 !important;
|
||||
border-color: #424242 !important;
|
||||
background-color: #3B3B3B !important;
|
||||
}
|
||||
|
||||
/* buttons for pagination */
|
||||
|
||||
@@ -117,7 +117,7 @@
|
||||
"New Folder": "Nouveau partage",
|
||||
"Newest First": "Les plus récents en premier",
|
||||
"No": "Non",
|
||||
"No File Versioning": "Pas de version de fichier",
|
||||
"No File Versioning": "Pas de préservation",
|
||||
"Normal": "Normal",
|
||||
"Notice": "Notification",
|
||||
"OK": "OK",
|
||||
@@ -159,15 +159,15 @@
|
||||
"Save": "Sauver",
|
||||
"Scan Time Remaining": "Temps d'analyse restant",
|
||||
"Scanning": "Analyse en cours",
|
||||
"Select the devices to share this folder with.": "Sélectionner les appareils invités à ce partage.",
|
||||
"Select the devices to share this folder with.": "Synchroniser avec :",
|
||||
"Select the folders to share with this device.": "Sélectionner les partages auxquels participe cet appareil.",
|
||||
"Settings": "Configuration",
|
||||
"Share": "Partager",
|
||||
"Share Folder": "Partager",
|
||||
"Share Folders With Device": "Partages avec cet appareil",
|
||||
"Share With Devices": "Partage avec des appareils",
|
||||
"Share With Devices": "Synchroniser avec des appareils",
|
||||
"Share this folder?": "Acceptez-vous ce partage ?",
|
||||
"Shared With": "Partagé avec",
|
||||
"Shared With": "Synchronisé avec",
|
||||
"Short identifier for the folder. Must be the same on all cluster devices.": "Identifiant court du partage. Il sera le même sur l'ensemble des appareils du groupe.",
|
||||
"Show ID": "Afficher mon ID",
|
||||
"Show QR": "Afficher l'image QR",
|
||||
@@ -175,11 +175,11 @@
|
||||
"Shown instead of Device ID in the cluster status. Will be updated to the name the device advertises if left empty.": "Affiché à la place de l'ID de l'appareil dans le groupe. Si laissé vide, il sera mis à jour par le nom proposé par l'appareil distant.",
|
||||
"Shutdown": "Arrêter",
|
||||
"Shutdown Complete": "Arrêté !",
|
||||
"Simple File Versioning": "Suivi simple des versions de fichier",
|
||||
"Simple File Versioning": "Suivi simple des versions de fichiers",
|
||||
"Single level wildcard (matches within a directory only)": "Astérisque à un seul niveau (correspond uniquement à l’intérieur du répertoire)",
|
||||
"Smallest First": "Les plus petits en premier",
|
||||
"Source Code": "Code source",
|
||||
"Staggered File Versioning": "Versions échelonnées de fichier",
|
||||
"Staggered File Versioning": "Versions échelonnées de fichiers",
|
||||
"Start Browser": "Démarrer le navigateur web",
|
||||
"Statistics": "Statistiques",
|
||||
"Stopped": "Arrêté",
|
||||
@@ -222,7 +222,7 @@
|
||||
"This Device": "Cet appareil",
|
||||
"This can easily give hackers access to read and change any files on your computer.": "Ceci peut aisément permettre à un intrus de lire et modifier n'importe quel fichier de votre ordinateur. ",
|
||||
"This is a major version upgrade.": "Ceci est une mise à jour majeure.",
|
||||
"Trash Can File Versioning": "Gestion des versions de fichier style poubelle.",
|
||||
"Trash Can File Versioning": "Style poubelle.",
|
||||
"Unknown": "Inconnu",
|
||||
"Unshared": "Non partagé",
|
||||
"Unused": "Non utilisé",
|
||||
|
||||
@@ -93,7 +93,7 @@
|
||||
"Ignore Permissions": "Ignorer les permissions",
|
||||
"Incoming Rate Limit (KiB/s)": "Limite du débit de réception (Ko/s)",
|
||||
"Incorrect configuration may damage your folder contents and render Syncthing inoperable.": "Une configuration incorrecte peut créer des dommages dans vos répertoires et mettre Syncthing hors-service.",
|
||||
"Introducer": "introductrice",
|
||||
"Introducer": "introducteur",
|
||||
"Inversion of the given condition (i.e. do not exclude)": "Inverser la condition donnée (i.e. ne pas exclure)",
|
||||
"Keep Versions": "Conserver les versions",
|
||||
"Largest First": "Les plus volumineux en premier",
|
||||
@@ -117,7 +117,7 @@
|
||||
"New Folder": "Nouveau partage",
|
||||
"Newest First": "Les plus récents en premier",
|
||||
"No": "Non",
|
||||
"No File Versioning": "Pas de préservation des fichiers",
|
||||
"No File Versioning": "Pas de préservation",
|
||||
"Normal": "Normal",
|
||||
"Notice": "Notification",
|
||||
"OK": "OK",
|
||||
@@ -129,7 +129,7 @@
|
||||
"Out of Sync Items": "Fichiers non synchronisés",
|
||||
"Outgoing Rate Limit (KiB/s)": "Limite du débit d'émission (Ko/s)",
|
||||
"Override Changes": "Écraser les changements",
|
||||
"Path to the folder on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Chemin vers le répertoire dans l'appareil local. Il sera créé s'il n'existe pas. Le caractère tilde (~) peut être utilisé comme raccourci vers",
|
||||
"Path to the folder on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Chemin vers le répertoire dans l'appareil local. Il sera créé s'il n'existe pas. Le caractère tilde (~, ou ~+Espace sous Windows+Azerty) peut être utilisé comme raccourci vers",
|
||||
"Path where versions should be stored (leave empty for the default .stversions folder in the folder).": "Chemin où les copies doivent être conservées (laisser vide pour le chemin par défaut de .stversions dans le répertoire)",
|
||||
"Pause": "Pause",
|
||||
"Paused": "En pause",
|
||||
@@ -159,15 +159,15 @@
|
||||
"Save": "Enregistrer",
|
||||
"Scan Time Remaining": "Temps d'analyse restant",
|
||||
"Scanning": "Analyse en cours",
|
||||
"Select the devices to share this folder with.": "Sélectionner les appareils invités à ce partage.",
|
||||
"Select the folders to share with this device.": "Sélectionner les partages auxquels cet appareil participe.",
|
||||
"Select the devices to share this folder with.": "Synchroniser avec :",
|
||||
"Select the folders to share with this device.": "Sélectionner les partages auxquels participe cet appareil.",
|
||||
"Settings": "Configuration",
|
||||
"Share": "Partager",
|
||||
"Share Folder": "Partager",
|
||||
"Share Folders With Device": "Partages avec cet appareil",
|
||||
"Share With Devices": "Partage avec des appareils",
|
||||
"Share With Devices": "Synchroniser avec des appareils",
|
||||
"Share this folder?": "Acceptez-vous ce partage ?",
|
||||
"Shared With": "Partagé avec",
|
||||
"Shared With": "Synchronisé avec",
|
||||
"Short identifier for the folder. Must be the same on all cluster devices.": "Identifiant court du partage. Il sera le même sur l'ensemble des appareils du groupe.",
|
||||
"Show ID": "Afficher mon ID",
|
||||
"Show QR": "Afficher le QR",
|
||||
@@ -175,11 +175,11 @@
|
||||
"Shown instead of Device ID in the cluster status. Will be updated to the name the device advertises if left empty.": "Affiché à la place de l'ID de l'appareil dans le groupe. Si laissé vide, il sera mis à jour par le nom proposé par l'appareil distant.",
|
||||
"Shutdown": "Arrêter",
|
||||
"Shutdown Complete": "Arrêté !",
|
||||
"Simple File Versioning": "Suivi simple des versions de fichier",
|
||||
"Simple File Versioning": "Suivi simple des versions de fichiers",
|
||||
"Single level wildcard (matches within a directory only)": "Astérisque à un seul niveau (correspond uniquement à l’intérieur du répertoire)",
|
||||
"Smallest First": "Les plus petits en premier",
|
||||
"Source Code": "Code source",
|
||||
"Staggered File Versioning": "Versions échelonnées de fichier",
|
||||
"Staggered File Versioning": "Versions échelonnées de fichiers",
|
||||
"Start Browser": "Démarrer le navigateur web",
|
||||
"Statistics": "Statistiques",
|
||||
"Stopped": "Arrêté",
|
||||
@@ -222,7 +222,7 @@
|
||||
"This Device": "Cet appareil",
|
||||
"This can easily give hackers access to read and change any files on your computer.": "Ceci peut aisément permettre à un intrus de lire et modifier n'importe quel fichier de votre ordinateur.",
|
||||
"This is a major version upgrade.": "Ceci est une mise à jour majeure.",
|
||||
"Trash Can File Versioning": "Préservation style poubelle",
|
||||
"Trash Can File Versioning": "Style poubelle",
|
||||
"Unknown": "Inconnu",
|
||||
"Unshared": "Non partagé",
|
||||
"Unused": "Non utilisé",
|
||||
|
||||
@@ -35,7 +35,7 @@
|
||||
"Connection Type": "Tilkoplingstype",
|
||||
"Copied from elsewhere": "Kopiert frå ein annan stad",
|
||||
"Copied from original": "Kopiert frå originalen",
|
||||
"Copyright © 2014-2016 the following Contributors:": "Copyright © 2014-2016 the following Contributors:",
|
||||
"Copyright © 2014-2016 the following Contributors:": "Opphavsrett © 2014-2016 for følgjande bidragsyterar:",
|
||||
"Copyright © 2015 the following Contributors:": "Opphavsrett © 2015 og desse bidragsytarane:",
|
||||
"Danger!": "Fare!",
|
||||
"Delete": "Slett",
|
||||
@@ -123,7 +123,7 @@
|
||||
"OK": "OK",
|
||||
"Off": "Av",
|
||||
"Oldest First": "Elste fyrst",
|
||||
"Optional descriptive label for the folder. Can be different on each device.": "Optional descriptive label for the folder. Can be different on each device.",
|
||||
"Optional descriptive label for the folder. Can be different on each device.": "Valfri merkelapp på katalogen. Denne kan være ulik på andre enheter.",
|
||||
"Options": "Val",
|
||||
"Out of Sync": "Ikkje synkronisert",
|
||||
"Out of Sync Items": "Ikkje-synkroniserte element",
|
||||
@@ -147,7 +147,7 @@
|
||||
"Release Notes": "Utgivingsnotat",
|
||||
"Remote Devices": "Eksterne Einingar",
|
||||
"Remove": "Fjern",
|
||||
"Required identifier for the folder. Must be the same on all cluster devices.": "Required identifier for the folder. Must be the same on all cluster devices.",
|
||||
"Required identifier for the folder. Must be the same on all cluster devices.": "Påkrevd identifikator for katalogen. Denne må være lik på alle einingar i samme klynge.",
|
||||
"Rescan": "Skann På Ny",
|
||||
"Rescan All": "Skann alle på nytt",
|
||||
"Rescan Interval": "Skanneintervall",
|
||||
@@ -197,7 +197,7 @@
|
||||
"The aggregated statistics are publicly available at {%url%}.": "Samla statistikk er opent tilgjengeleg på {{url}}.",
|
||||
"The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "Instillingane har blitt lagra men ikkje aktivert. Syncthing må starta på ny for å aktivera dei nye instillingane.",
|
||||
"The device ID cannot be blank.": "Eining ID kan ikkje vera tom.",
|
||||
"The device ID to enter here can be found in the \"Actions > Show ID\" dialog on the other device. Spaces and dashes are optional (ignored).": "The device ID to enter here can be found in the \"Actions > Show ID\" dialog on the other device. Spaces and dashes are optional (ignored).",
|
||||
"The device ID to enter here can be found in the \"Actions > Show ID\" dialog on the other device. Spaces and dashes are optional (ignored).": "Einings-IDen som skal oppgis her kan hentast fram via \"Rediger > Vis ID\"-dialogboksen på den andre eininga. Mellomrom og bindestrek er valfritt (blir ignorert).",
|
||||
"The device ID to enter here can be found in the \"Edit > Show ID\" dialog on the other device. Spaces and dashes are optional (ignored).": "Einings-ID-en du skal nytta her finn du i meldingsvindauget \"Rediger > Vis ID\" på den andre eininga. Mellomrom og bindestrek er valfrie (vert ignorert).",
|
||||
"The encrypted usage report is sent daily. It is used to track common platforms, folder sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.": "Den krypterte bruksrapporten vert send dagleg. Han vert nytta til å spora vanlege plattformer, mappestorleikar og programutgåvene. Om datasettet endrar seg, vil dette meldingsvindauget dukka opp att og du vil verta beden om å godkjenna det.",
|
||||
"The entered device ID does not look valid. It should be a 52 or 56 character string consisting of letters and numbers, with spaces and dashes being optional.": "Einings-ID-en er ikkje gyldig. Han må vera på 52 eller 56 teikn og vera samansett av bokstavar og tal med valfrie mellomrom og bindestrekar.",
|
||||
@@ -237,7 +237,7 @@
|
||||
"Version": "Versjon",
|
||||
"Versions Path": "Utgåvebane",
|
||||
"Versions are automatically deleted if they are older than the maximum age or exceed the number of files allowed in an interval.": "Utgåver vert automatisk sletta når maksimal levetid er nådd eller når det høgaste tillate talet på filer innan eit intervall vert overskride.",
|
||||
"Warning, this path is a subdirectory of an existing folder \"{%otherFolder%}\".": "Warning, this path is a subdirectory of an existing folder \"{{otherFolder}}\".",
|
||||
"Warning, this path is a subdirectory of an existing folder \"{%otherFolder%}\".": "Åtvaring, denne banen er ei undermappe av den eksisterande mappa \"{{otherFolder}}\".",
|
||||
"When adding a new device, keep in mind that this device must be added on the other side too.": "Hugs at når ei ny eining vert lagt til må ho òg leggjast til på andre sida.",
|
||||
"When adding a new folder, keep in mind that the Folder ID is used to tie folders together between devices. They are case sensitive and must match exactly between all devices.": "Hugs at når ei ny mappe vert lagt til, vert mappe-ID-en brukt til å binda saman mappene mellom einingane. Det er skilnad på store og små bokstavar, så ID-ane må vera identiske på alle einingane.",
|
||||
"Yes": "Ja",
|
||||
|
||||
@@ -40,7 +40,7 @@
|
||||
"Danger!": "Perigo!",
|
||||
"Delete": "Apagar",
|
||||
"Deleted": "Apagado",
|
||||
"Device \"{%name%}\" ({%device%} at {%address%}) wants to connect. Add new device?": "Dispositivo \"{{name}}\" ({{device}} em {{address}}) deseja se conectar. Adicionar novo dispositivo?",
|
||||
"Device \"{%name%}\" ({%device%} at {%address%}) wants to connect. Add new device?": "Dispositivo \"{{name}}\" ({{device}} em {{address}}) quer se conectar. Adicionar novo dispositivo?",
|
||||
"Device ID": "ID do dispositivo",
|
||||
"Device Identification": "Identificação do dispositivo",
|
||||
"Device Name": "Nome do dispositivo",
|
||||
@@ -246,6 +246,6 @@
|
||||
"full documentation": "documentação completa",
|
||||
"items": "itens",
|
||||
"{%device%} wants to share folder \"{%folder%}\".": "{{device}} quer compartilhar a pasta \"{{folder}}\".",
|
||||
"{%device%} wants to share folder \"{%folderLabel%}\" ({%folder%}).": "{{device}} deseja compartilhar a pasta \"{{folderLabel}}\" ({{folder}}).",
|
||||
"{%device%} wants to share folder \"{%folderLabel%}\" ({%folder%}).": "{{device}} quer compartilhar a pasta \"{{folderLabel}}\" ({{folder}}).",
|
||||
"{%device%} wants to share folder \"{%folderlabel%}\" ({%folder%}).": "{{device}} quer compartilhar a pasta \"{{folderlabel}}\" ({{folder}})."
|
||||
}
|
||||
@@ -42,12 +42,12 @@
|
||||
"Deleted": "Borttaget",
|
||||
"Device \"{%name%}\" ({%device%} at {%address%}) wants to connect. Add new device?": "Enhet \"{{name}}\" ({{device}} på {{address}}) vill ansluta. Lägg till ny enhet?",
|
||||
"Device ID": "Enhets ID",
|
||||
"Device Identification": "Enhetsidentifikation",
|
||||
"Device Name": "Enhetens namn",
|
||||
"Device Identification": "Enhets identifikation",
|
||||
"Device Name": "Enhets namn",
|
||||
"Device {%device%} ({%address%}) wants to connect. Add new device?": "Enheten {{device}} ({{address}}) vill ansluta. Lägg till ny enhet?",
|
||||
"Devices": "Enheter",
|
||||
"Disconnected": "Frånkopplad",
|
||||
"Discovery": "Upptäckt",
|
||||
"Discovery": "Annonsering",
|
||||
"Documentation": "Dokumentation",
|
||||
"Download Rate": "Nedladdningshastighet",
|
||||
"Downloaded": "Hämtat",
|
||||
@@ -80,18 +80,18 @@
|
||||
"GUI": "Grafiskt gränssnitt",
|
||||
"GUI Authentication Password": "Lösenord för GUI",
|
||||
"GUI Authentication User": "Användare för GUI",
|
||||
"GUI Listen Addresses": "Lyssningsadresser för GUI",
|
||||
"GUI Listen Addresses": "Lyssnadresser för GUI",
|
||||
"Generate": "Generera",
|
||||
"Global Discovery": "Global upptäckt",
|
||||
"Global Discovery Server": "Global upptäcktsserver",
|
||||
"Global Discovery Servers": "Globala upptäcktsservrar",
|
||||
"Global Discovery": "Global annonsering",
|
||||
"Global Discovery Server": "Global annonseringsserver",
|
||||
"Global Discovery Servers": "Globala annonseringsservrar",
|
||||
"Global State": "Global status",
|
||||
"Help": "Hjälp",
|
||||
"Home page": "Hemsida",
|
||||
"Ignore": "Ignorera",
|
||||
"Ignore Patterns": "Ignorera mönster",
|
||||
"Ignore Permissions": "Ignorera rättigheter",
|
||||
"Incoming Rate Limit (KiB/s)": "Nedladdningshastighetsgräns (KiB/s)",
|
||||
"Incoming Rate Limit (KiB/s)": "Inkommande hastighetsbegränsning (KiB/s)",
|
||||
"Incorrect configuration may damage your folder contents and render Syncthing inoperable.": "Inkorrekt konfiguration kan skada innehållet i katalogen and få Syncthing att sluta fungera.",
|
||||
"Introducer": "introduktör",
|
||||
"Inversion of the given condition (i.e. do not exclude)": "Vänder på villkoret, d.v.s. exkluderar inte.",
|
||||
@@ -102,7 +102,7 @@
|
||||
"Last seen": "Senast sedd",
|
||||
"Later": "Senare",
|
||||
"Listeners": "Lyssnare",
|
||||
"Local Discovery": "Lokal upptäckt",
|
||||
"Local Discovery": "Lokal annonsering",
|
||||
"Local State": "Lokal status",
|
||||
"Local State (Total)": "Lokal status (totalt)",
|
||||
"Major Upgrade": "Stor uppgradering",
|
||||
@@ -127,7 +127,7 @@
|
||||
"Options": "Alternativ",
|
||||
"Out of Sync": "Osynkroniserad",
|
||||
"Out of Sync Items": "Osynkroniserade objekt",
|
||||
"Outgoing Rate Limit (KiB/s)": "Uppladdningshastighetsgräns (KiB/s)",
|
||||
"Outgoing Rate Limit (KiB/s)": "Utgående hastighetsbegränsning (KiB/s)",
|
||||
"Override Changes": "Åsidosätt förändringar",
|
||||
"Path to the folder on the local computer. Will be created if it does not exist. The tilde character (~) can be used as a shortcut for": "Sökväg till katalogen på din dator. Kommer att skapas om det inte finns. Tecknet tilde (~) kan användas som en genväg för",
|
||||
"Path where versions should be stored (leave empty for the default .stversions folder in the folder).": "Sökväg där versioner sparas (lämna tomt för att använda .stversions i den ordinarie katalogen).",
|
||||
@@ -171,8 +171,8 @@
|
||||
"Short identifier for the folder. Must be the same on all cluster devices.": "Kort identifieringssträng för katalogen. Måste vara samma på alla enheter i klustret.",
|
||||
"Show ID": "Visa ID",
|
||||
"Show QR": "Visa QR",
|
||||
"Shown instead of Device ID in the cluster status. Will be advertised to other devices as an optional default name.": "Visas i stället för enhetens ID. Skickas till andra enheter som namn på denna enhet.",
|
||||
"Shown instead of Device ID in the cluster status. Will be updated to the name the device advertises if left empty.": "Visas i stället för enhetens ID. Sätts till namnet på den andra enheten vid första anslutning om det lämnas tomt.",
|
||||
"Shown instead of Device ID in the cluster status. Will be advertised to other devices as an optional default name.": "Visas i stället för enhets ID i samlingsstatusen. Skickas till andra enheter som namn på denna enhet.",
|
||||
"Shown instead of Device ID in the cluster status. Will be updated to the name the device advertises if left empty.": "Visas i stället för enhets ID i samlingsstatusen. Sätts till namnet på den andra enheten vid första anslutning om det lämnas tomt.",
|
||||
"Shutdown": "Stäng av",
|
||||
"Shutdown Complete": "Avstängning klar",
|
||||
"Simple File Versioning": "Enkel filversionshantering",
|
||||
@@ -184,7 +184,7 @@
|
||||
"Statistics": "Statistik",
|
||||
"Stopped": "Stoppad",
|
||||
"Support": "Support",
|
||||
"Sync Protocol Listen Addresses": "Synkroniseringsprotokollets lyssningsadresser",
|
||||
"Sync Protocol Listen Addresses": "Synkroniseringsprotokollets lyssnaradresser",
|
||||
"Syncing": "Synkroniserar",
|
||||
"Syncthing has been shut down.": "Syncthing har stängts.",
|
||||
"Syncthing includes the following software or portions thereof:": "Syncthing innehåller följande mjukvarupaket eller delar av dem:",
|
||||
@@ -196,11 +196,11 @@
|
||||
"The aggregated statistics are publicly available at the URL below.": "Den aggregerade statistiken är offentligt tillgängliga på webbadressen nedan.",
|
||||
"The aggregated statistics are publicly available at {%url%}.": "Sammanställd statistik finns publikt tillgänglig på {{url}}.",
|
||||
"The configuration has been saved but not activated. Syncthing must restart to activate the new configuration.": "Konfigurationen har sparats men inte aktiverats. Syncthing måste startas om för att aktivera den nya konfigurationen.",
|
||||
"The device ID cannot be blank.": "Enhetens ID kan inte vara tomt.",
|
||||
"The device ID to enter here can be found in the \"Actions > Show ID\" dialog on the other device. Spaces and dashes are optional (ignored).": "Enhetens ID som behövs här kan du hitta i \"Redigera > Visa ID\"-dialogen på den andra enheten. Mellanrum och bindestreck är valfria (ignoreras).",
|
||||
"The device ID to enter here can be found in the \"Edit > Show ID\" dialog on the other device. Spaces and dashes are optional (ignored).": "Enhetens ID som behövs här kan du hitta i \"Redigera > Visa ID\"-dialogen på den andra enheten. Mellanrum och bindestreck är valfria (ignoreras).",
|
||||
"The device ID cannot be blank.": "Enhets ID:t kan inte vara tomt.",
|
||||
"The device ID to enter here can be found in the \"Actions > Show ID\" dialog on the other device. Spaces and dashes are optional (ignored).": "Enhets ID:t som behövs här kan du hitta i \"Funktioner > Visa ID\"-dialogrutan på den andra enheten. Mellanrum och bindestreck är valfria (ignoreras).",
|
||||
"The device ID to enter here can be found in the \"Edit > Show ID\" dialog on the other device. Spaces and dashes are optional (ignored).": "Enhets ID:t som behövs här kan du hitta i \"Funktioner > Visa ID\"-dialogrutan på den andra enheten. Mellanrum och bindestreck är valfria (ignoreras).",
|
||||
"The encrypted usage report is sent daily. It is used to track common platforms, folder sizes and app versions. If the reported data set is changed you will be prompted with this dialog again.": "Den krypterade användarstatistiken skickas dagligen. Den används för att spåra vanliga plattformar, katalogstorlekar och versioner. Om datan som rapporteras ändras så kommer du att bli tillfrågad igen.",
|
||||
"The entered device ID does not look valid. It should be a 52 or 56 character string consisting of letters and numbers, with spaces and dashes being optional.": "Det inmatade enhetens ID verkar inte korrekt. Det ska vara en 52 eller 56 teckens sträng bestående av siffror och bokstäver, eventuellt med mellanrum och bindestreck.",
|
||||
"The entered device ID does not look valid. It should be a 52 or 56 character string consisting of letters and numbers, with spaces and dashes being optional.": "Det inmatade enhets ID:t verkar inte korrekt. Det ska vara en 52 eller 56 teckens sträng bestående av siffror och bokstäver, eventuellt med mellanrum och bindestreck.",
|
||||
"The first command line parameter is the folder path and the second parameter is the relative path in the folder.": "Den första kommandoparametern är sökvägen till mappen och den andra parametern är den relativa sökvägen i mappen.",
|
||||
"The folder ID cannot be blank.": "Katalogens ID får inte vara tomt.",
|
||||
"The folder ID must be a short identifier (64 characters or less) consisting of letters, numbers and the dot (.), dash (-) and underscode (_) characters only.": "Katalogens ID måste vara en kort sträng (64 tecken eller mindre), bestående av endast bokstäver, siffror, punkt (.), bindestreck (-) och understreck (_).",
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
<p translate>Copyright © 2014-2016 the following Contributors:</p>
|
||||
<div class="row">
|
||||
<div class="col-md-12" id="contributor-list">
|
||||
Jakob Borg, Audrius Butkevicius, Alexander Graf, Anderson Mesquita, Antony Male, Ben Schulz, Caleb Callaway, Daniel Harte, Lars K.W. Gohlke, Lode Hoste, Michael Ploujnikov, Philippe Schommers, Ryan Sullivan, Sergey Mishin, Stefan Tatschner, Aaron Bieber, Adam Piggott, Alessandro G., Alexandre Viau, Andrew Dunham, Andrey D, Antoine Lamielle, Arthur Axel fREW Schmidt, Bart De Vries, Ben Curthoys, Ben Sidhom, Benny Ng, Brandon Philips, Brendan Long, Brian R. Becker, Carsten Hagemann, Cathryne Linenweaver, Cedric Staniewski, Chris Howie, Chris Joel, Colin Kennedy, Daniel Bergmann, Daniel Martí, David Rimmer, Denis A., Dennis Wilson, Dominik Heidler, Elias Jarlebring, Emil Hessman, Erik Meitner, Federico Castagnini, Felix Ableitner, Felix Unterpaintner, Francois-Xavier Gsell, Frank Isemann, Gilli Sigurdsson, Jaakko Hannikainen, Jacek Szafarkiewicz, Jake Peterson, James Patterson, Jaroslav Malec, Jens Diemer, Jochen Voss, Johan Vromans, Karol Różycki, Kelong Cong, Ken'ichi Kamada, Kevin Allen, Laurent Etiemble, Lord Landon Agahnim, Majed Abdulaziz, Marc Laporte, Marc Pujol, Marcin Dziadus, Mateusz Naściszewski, Matt Burke, Max Schulze, Michael Jephcote, Michael Tilli, Nate Morrison, Pascal Jungblut, Peter Hoeg, Phill Luby, Piotr Bejda, Scott Klupfel, Stefan Kuntz, Tim Abell, Tobias Nygren, Tomas Cerveny, Tully Robinson, Tyler Brazier, Veeti Paananen, Victor Buinsky, Vil Brekin, William A. Kennington III, Wulf Weich, Yannic A.
|
||||
Jakob Borg, Audrius Butkevicius, Alexander Graf, Anderson Mesquita, Antony Male, Ben Schulz, Caleb Callaway, Daniel Harte, Lars K.W. Gohlke, Lode Hoste, Michael Ploujnikov, Philippe Schommers, Ryan Sullivan, Sergey Mishin, Stefan Tatschner, Aaron Bieber, Adam Piggott, Alessandro G., Alexandre Viau, Andrew Dunham, Andrey D, Antoine Lamielle, Arthur Axel fREW Schmidt, Bart De Vries, Ben Curthoys, Ben Sidhom, Benny Ng, Brandon Philips, Brendan Long, Brian R. Becker, Carsten Hagemann, Cathryne Linenweaver, Cedric Staniewski, Chris Howie, Chris Joel, Colin Kennedy, Daniel Bergmann, Daniel Martí, David Rimmer, Denis A., Dennis Wilson, Dominik Heidler, Elias Jarlebring, Emil Hessman, Erik Meitner, Federico Castagnini, Felix Ableitner, Felix Unterpaintner, Francois-Xavier Gsell, Frank Isemann, Gilli Sigurdsson, Jaakko Hannikainen, Jacek Szafarkiewicz, Jake Peterson, James Patterson, Jaroslav Malec, Jens Diemer, Jochen Voss, Johan Vromans, Karol Różycki, Kelong Cong, Ken'ichi Kamada, Kevin Allen, Laurent Etiemble, Lord Landon Agahnim, Majed Abdulaziz, Marc Laporte, Marc Pujol, Marcin Dziadus, Mateusz Naściszewski, Matt Burke, Max Schulze, Michael Jephcote, Michael Tilli, Nate Morrison, Pascal Jungblut, Peter Hoeg, Phill Luby, Piotr Bejda, Scott Klupfel, Stefan Kuntz, Tim Abell, Tim Howes, Tobias Nygren, Tomas Cerveny, Tully Robinson, Tyler Brazier, Veeti Paananen, Victor Buinsky, Vil Brekin, William A. Kennington III, Wulf Weich, Yannic A.
|
||||
</div>
|
||||
</div>
|
||||
<hr/>
|
||||
|
||||
@@ -309,28 +309,8 @@ angular.module('syncthing.core')
|
||||
if (!$scope.completion[data.device]) {
|
||||
$scope.completion[data.device] = {};
|
||||
}
|
||||
$scope.completion[data.device][data.folder] = data.completion;
|
||||
|
||||
var tot = 0,
|
||||
cnt = 0,
|
||||
isComplete = true;
|
||||
for (var cmp in $scope.completion[data.device]) {
|
||||
if (cmp === "_total") {
|
||||
continue;
|
||||
}
|
||||
tot += $scope.completion[data.device][cmp] * $scope.model[cmp].globalBytes;
|
||||
cnt += $scope.model[cmp].globalBytes;
|
||||
if ($scope.completion[data.device][cmp] != 100) {
|
||||
isComplete = false;
|
||||
}
|
||||
}
|
||||
//To be sure that we won't get any rounding errors resulting in non-100% status when it should be
|
||||
if (isComplete) {
|
||||
$scope.completion[data.device]._total = 100;
|
||||
}
|
||||
else {
|
||||
$scope.completion[data.device]._total = tot / cnt;
|
||||
}
|
||||
$scope.completion[data.device][data.folder] = data;
|
||||
recalcCompletion(data.device);
|
||||
});
|
||||
|
||||
$scope.$on(Events.FOLDER_ERRORS, function (event, arg) {
|
||||
@@ -458,6 +438,32 @@ angular.module('syncthing.core')
|
||||
}
|
||||
}
|
||||
|
||||
function recalcCompletion(device) {
|
||||
var total = 0, needed = 0, deletes = 0;
|
||||
for (var folder in $scope.completion[device]) {
|
||||
if (folder === "_total") {
|
||||
continue;
|
||||
}
|
||||
total += $scope.completion[device][folder].globalBytes;
|
||||
needed += $scope.completion[device][folder].needBytes;
|
||||
deletes += $scope.completion[device][folder].needDeletes;
|
||||
}
|
||||
if (total == 0) {
|
||||
$scope.completion[device]._total = 100;
|
||||
} else {
|
||||
$scope.completion[device]._total = 100 * (1 - needed / total);
|
||||
}
|
||||
|
||||
if (needed == 0 && deletes > 0) {
|
||||
// We don't need any data, but we have deletes that we need
|
||||
// to do. Drop down the completion percentage to indicate
|
||||
// that we have stuff to do.
|
||||
$scope.completion[device]._total = 95;
|
||||
}
|
||||
|
||||
console.log("recalcCompletion", device, $scope.completion[device]);
|
||||
}
|
||||
|
||||
function refreshCompletion(device, folder) {
|
||||
if (device === $scope.myID) {
|
||||
return;
|
||||
@@ -467,30 +473,8 @@ angular.module('syncthing.core')
|
||||
if (!$scope.completion[device]) {
|
||||
$scope.completion[device] = {};
|
||||
}
|
||||
$scope.completion[device][folder] = data.completion;
|
||||
|
||||
var tot = 0,
|
||||
cnt = 0,
|
||||
isComplete = true;
|
||||
for (var cmp in $scope.completion[device]) {
|
||||
if (cmp === "_total") {
|
||||
continue;
|
||||
}
|
||||
tot += $scope.completion[device][cmp] * $scope.model[cmp].globalBytes;
|
||||
cnt += $scope.model[cmp].globalBytes;
|
||||
if ($scope.completion[device][cmp] != 100) {
|
||||
isComplete = false;
|
||||
}
|
||||
}
|
||||
//To be sure that we won't get any rounding errors resulting in non-100% status when it should be
|
||||
if (isComplete) {
|
||||
$scope.completion[device]._total = 100;
|
||||
}
|
||||
else {
|
||||
$scope.completion[device]._total = tot / cnt;
|
||||
}
|
||||
|
||||
console.log("refreshCompletion", device, folder, $scope.completion[device]);
|
||||
$scope.completion[device][folder] = data;
|
||||
recalcCompletion(device);
|
||||
}).error($scope.emitHTTPError);
|
||||
}
|
||||
|
||||
@@ -670,7 +654,7 @@ angular.module('syncthing.core')
|
||||
if (state === 'error') {
|
||||
return 'stopped'; // legacy, the state is called "stopped" in the GUI
|
||||
}
|
||||
if (state === 'idle' && $scope.model[folderCfg.id].needFiles > 0) {
|
||||
if (state === 'idle' && $scope.model[folderCfg.id].needFiles + $scope.model[folderCfg.id].needDeletes > 0) {
|
||||
return 'outofsync';
|
||||
}
|
||||
if (state === 'scanning') {
|
||||
|
||||
51
jenkins/build-linux.bash
Executable file
51
jenkins/build-linux.bash
Executable file
@@ -0,0 +1,51 @@
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
# Copyright (C) 2016 The Syncthing Authors.
|
||||
#
|
||||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
# You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
# This script should be run by Jenkins as './src/github.com/syncthing/syncthing/jenkins/build-linux.bash',
|
||||
# that is, it should be run from $GOPATH.
|
||||
|
||||
. src/github.com/syncthing/syncthing/jenkins/common.bash
|
||||
|
||||
init
|
||||
|
||||
# after init we are in the source directory
|
||||
|
||||
clean
|
||||
fetchExtra
|
||||
buildSource
|
||||
build
|
||||
test
|
||||
testWithCoverage
|
||||
|
||||
platforms=(
|
||||
dragonfly-amd64
|
||||
freebsd-amd64 freebsd-386
|
||||
linux-amd64 linux-386 linux-arm linux-arm64 linux-ppc64 linux-ppc64le
|
||||
netbsd-amd64 netbsd-386
|
||||
openbsd-amd64 openbsd-386
|
||||
solaris-amd64
|
||||
)
|
||||
|
||||
echo Building
|
||||
for plat in "${platforms[@]}"; do
|
||||
echo Building "$plat"
|
||||
|
||||
goos="${plat%-*}"
|
||||
goarch="${plat#*-}"
|
||||
go run build.go -goos "$goos" -goarch "$goarch" tar
|
||||
mv *.tar.gz "$WORKSPACE"
|
||||
echo
|
||||
done
|
||||
|
||||
go run build.go -goarch amd64 deb
|
||||
go run build.go -goarch i386 deb
|
||||
go run build.go -goarch armel deb
|
||||
go run build.go -goarch armhf deb
|
||||
|
||||
mv *.deb "$WORKSPACE"
|
||||
37
jenkins/build-macos.bash
Executable file
37
jenkins/build-macos.bash
Executable file
@@ -0,0 +1,37 @@
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
# Copyright (C) 2016 The Syncthing Authors.
|
||||
#
|
||||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
# You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
# This script should be run by Jenkins as './src/github.com/syncthing/syncthing/jenkins/build-macos.bash',
|
||||
# that is, it should be run from $GOPATH.
|
||||
|
||||
. src/github.com/syncthing/syncthing/jenkins/common.bash
|
||||
|
||||
init
|
||||
|
||||
# after init we are in the source directory
|
||||
|
||||
clean
|
||||
fetchExtra
|
||||
build
|
||||
test
|
||||
|
||||
platforms=(
|
||||
darwin-amd64 darwin-386
|
||||
)
|
||||
|
||||
echo Building
|
||||
for plat in "${platforms[@]}"; do
|
||||
echo Building "$plat"
|
||||
|
||||
goos="${plat%-*}"
|
||||
goarch="${plat#*-}"
|
||||
go run build.go -goos "$goos" -goarch "$goarch" tar
|
||||
mv *.tar.gz "$WORKSPACE"
|
||||
echo
|
||||
done
|
||||
55
jenkins/build-windows.bat
Normal file
55
jenkins/build-windows.bat
Normal file
@@ -0,0 +1,55 @@
|
||||
@echo off
|
||||
|
||||
rem Copyright (C) 2016 The Syncthing Authors.
|
||||
rem
|
||||
rem This Source Code Form is subject to the terms of the Mozilla Public
|
||||
rem License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
rem You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
rem This batch file should be run from the GOPATH.
|
||||
rem It expects to run on amd64, for windows-amd64 Go to be installed in C:\go
|
||||
rem and for windows-386 Go to be installed in C:\go-386.
|
||||
|
||||
rem cURL should be installed in C:\Program Files\cURL.
|
||||
|
||||
set ORIGPATH="C:\Program Files\cURL\bin";%PATH%
|
||||
set PATH=c:\go\bin;%ORIGPATH%
|
||||
set GOROOT=c:\go
|
||||
|
||||
cd >gopath
|
||||
set /p GOPATH= <gopath
|
||||
|
||||
cd src\github.com\syncthing\syncthing
|
||||
|
||||
echo Initializing ^& cleaning
|
||||
go version
|
||||
git clean -fxd || goto error
|
||||
go run build.go version
|
||||
echo.
|
||||
|
||||
echo Fetching extras
|
||||
mkdir extra
|
||||
curl -s -L -o extra/Getting-Started.pdf https://docs.syncthing.net/pdf/Getting-Started.pdf || goto :error
|
||||
curl -s -L -o extra/FAQ.pdf https://docs.syncthing.net/pdf/FAQ.pdf || goto :error
|
||||
echo.
|
||||
|
||||
echo Testing
|
||||
go run build.go test || goto :error
|
||||
echo.
|
||||
|
||||
echo Building (amd64)
|
||||
go run build.go zip || goto :error
|
||||
echo.
|
||||
|
||||
set PATH=c:\go-386\bin;%ORIGPATH%
|
||||
set GOROOT=c:\go-386
|
||||
|
||||
echo building (386)
|
||||
go run build.go zip || goto :error
|
||||
echo.
|
||||
|
||||
goto :EOF
|
||||
|
||||
:error
|
||||
echo code #%errorlevel%.
|
||||
exit /b %errorlevel%
|
||||
84
jenkins/common.bash
Normal file
84
jenkins/common.bash
Normal file
@@ -0,0 +1,84 @@
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
# Copyright (C) 2016 The Syncthing Authors.
|
||||
#
|
||||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
# You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
ulimit -t 600 || true
|
||||
ulimit -d 1024000 || true
|
||||
ulimit -m 1024000 || true
|
||||
|
||||
export CGO_ENABLED=0
|
||||
export GO386=387
|
||||
export GOARM=5
|
||||
|
||||
function init {
|
||||
echo Initializing
|
||||
export GOPATH=$(pwd)
|
||||
export WORKSPACE="${WORKSPACE:-$GOPATH}"
|
||||
go version
|
||||
rm -f *.tar.gz *.zip *.deb
|
||||
cd src/github.com/syncthing/syncthing
|
||||
|
||||
version=$(go run build.go version)
|
||||
echo "Building $version"
|
||||
echo
|
||||
}
|
||||
|
||||
function clean {
|
||||
echo Cleaning
|
||||
rm -rf "$GOPATH/pkg"
|
||||
git clean -fxd
|
||||
echo
|
||||
}
|
||||
|
||||
function fetchExtra {
|
||||
echo Fetching extra resources
|
||||
mkdir extra
|
||||
curl -s -o extra/Getting-Started.pdf http://docs.syncthing.net/pdf/Getting-Started.pdf
|
||||
curl -s -o extra/FAQ.pdf http://docs.syncthing.net/pdf/FAQ.pdf
|
||||
echo
|
||||
}
|
||||
|
||||
function checkAuthorsCopyright {
|
||||
echo Basic metadata checks
|
||||
go run script/check-authors.go
|
||||
go run script/check-copyright.go lib/ cmd/ script/
|
||||
echo
|
||||
}
|
||||
|
||||
function build {
|
||||
echo Build
|
||||
go run build.go
|
||||
echo
|
||||
}
|
||||
|
||||
function test {
|
||||
echo Test with race
|
||||
CGO_ENABLED=1 go run build.go -race test
|
||||
echo
|
||||
}
|
||||
|
||||
function testWithCoverage {
|
||||
echo Test with coverage
|
||||
CGO_ENABLED=1 ./build.sh test-cov
|
||||
|
||||
notCovered=$(egrep -c '\s0$' coverage.out)
|
||||
total=$(wc -l coverage.out | awk '{print $1}')
|
||||
coverPct=$(awk "BEGIN{print (1 - $notCovered / $total) * 100}")
|
||||
echo "$coverPct" > "coverage.txt"
|
||||
echo "Test coverage is $coverPct%%"
|
||||
echo
|
||||
}
|
||||
|
||||
function buildSource {
|
||||
echo Archiving source
|
||||
echo "$version" > RELEASE
|
||||
pushd .. >/dev/null
|
||||
tar c -z -f "$WORKSPACE/syncthing-source-$version.tar.gz" --exclude .git syncthing
|
||||
popd >/dev/null
|
||||
echo
|
||||
}
|
||||
@@ -42,16 +42,16 @@ var (
|
||||
// DefaultDiscoveryServersV4 should be substituted when the configuration
|
||||
// contains <globalAnnounceServer>default-v4</globalAnnounceServer>.
|
||||
DefaultDiscoveryServersV4 = []string{
|
||||
"https://discovery-v4-1.syncthing.net/v2/?id=SR7AARM-TCBUZ5O-VFAXY4D-CECGSDE-3Q6IZ4G-XG7AH75-OBIXJQV-QJ6NLQA", // 194.126.249.5, Sweden
|
||||
"https://discovery-v4-2.syncthing.net/v2/?id=DVU36WY-H3LVZHW-E6LLFRE-YAFN5EL-HILWRYP-OC2M47J-Z4PE62Y-ADIBDQC", // 45.55.230.38, USA
|
||||
"https://discovery-v4-3.syncthing.net/v2/?id=VK6HNJ3-VVMM66S-HRVWSCR-IXEHL2H-U4AQ4MW-UCPQBWX-J2L2UBK-NVZRDQZ", // 128.199.95.124, Singapore
|
||||
"https://discovery-v4-4.syncthing.net/v2/?id=LYXKCHX-VI3NYZR-ALCJBHF-WMZYSPK-QG6QJA3-MPFYMSO-U56GTUK-NA2MIAW", // 95.85.19.244, NL
|
||||
}
|
||||
// DefaultDiscoveryServersV6 should be substituted when the configuration
|
||||
// contains <globalAnnounceServer>default-v6</globalAnnounceServer>.
|
||||
DefaultDiscoveryServersV6 = []string{
|
||||
"https://discovery-v6-1.syncthing.net/v2/?id=SR7AARM-TCBUZ5O-VFAXY4D-CECGSDE-3Q6IZ4G-XG7AH75-OBIXJQV-QJ6NLQA", // 2001:470:28:4d6::5, Sweden
|
||||
"https://discovery-v6-2.syncthing.net/v2/?id=DVU36WY-H3LVZHW-E6LLFRE-YAFN5EL-HILWRYP-OC2M47J-Z4PE62Y-ADIBDQC", // 2604:a880:800:10::182:a001, USA
|
||||
"https://discovery-v6-3.syncthing.net/v2/?id=VK6HNJ3-VVMM66S-HRVWSCR-IXEHL2H-U4AQ4MW-UCPQBWX-J2L2UBK-NVZRDQZ", // 2400:6180:0:d0::d9:d001, Singapore
|
||||
"https://discovery-v6-4.syncthing.net/v2/?id=LYXKCHX-VI3NYZR-ALCJBHF-WMZYSPK-QG6QJA3-MPFYMSO-U56GTUK-NA2MIAW", // 2a03:b0c0:0:1010::4ed:3001, NL
|
||||
}
|
||||
// DefaultDiscoveryServers should be substituted when the configuration
|
||||
// contains <globalAnnounceServer>default</globalAnnounceServer>.
|
||||
|
||||
@@ -64,6 +64,7 @@ func TestDefaultValues(t *testing.T) {
|
||||
AlwaysLocalNets: []string{},
|
||||
OverwriteRemoteDevNames: false,
|
||||
TempIndexMinBlocks: 10,
|
||||
UnackedNotificationIDs: []string{},
|
||||
}
|
||||
|
||||
cfg := New(device1)
|
||||
@@ -103,6 +104,9 @@ func TestDeviceConfig(t *testing.T) {
|
||||
AutoNormalize: true,
|
||||
MinDiskFreePct: 1,
|
||||
MaxConflicts: -1,
|
||||
Versioning: VersioningConfiguration{
|
||||
Params: map[string]string{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -194,6 +198,7 @@ func TestOverriddenValues(t *testing.T) {
|
||||
AlwaysLocalNets: []string{},
|
||||
OverwriteRemoteDevNames: true,
|
||||
TempIndexMinBlocks: 100,
|
||||
UnackedNotificationIDs: []string{},
|
||||
}
|
||||
|
||||
cfg, err := Load("testdata/overridenvalues.xml", device1)
|
||||
|
||||
@@ -13,15 +13,16 @@ import (
|
||||
)
|
||||
|
||||
type GUIConfiguration struct {
|
||||
Enabled bool `xml:"enabled,attr" json:"enabled" default:"true"`
|
||||
RawAddress string `xml:"address" json:"address" default:"127.0.0.1:8384"`
|
||||
User string `xml:"user,omitempty" json:"user"`
|
||||
Password string `xml:"password,omitempty" json:"password"`
|
||||
RawUseTLS bool `xml:"tls,attr" json:"useTLS"`
|
||||
APIKey string `xml:"apikey,omitempty" json:"apiKey"`
|
||||
InsecureAdminAccess bool `xml:"insecureAdminAccess,omitempty" json:"insecureAdminAccess"`
|
||||
Theme string `xml:"theme" json:"theme" default:"default"`
|
||||
Debugging bool `xml:"debugging,attr" json:"debugging"`
|
||||
Enabled bool `xml:"enabled,attr" json:"enabled" default:"true"`
|
||||
RawAddress string `xml:"address" json:"address" default:"127.0.0.1:8384"`
|
||||
User string `xml:"user,omitempty" json:"user"`
|
||||
Password string `xml:"password,omitempty" json:"password"`
|
||||
RawUseTLS bool `xml:"tls,attr" json:"useTLS"`
|
||||
APIKey string `xml:"apikey,omitempty" json:"apiKey"`
|
||||
InsecureAdminAccess bool `xml:"insecureAdminAccess,omitempty" json:"insecureAdminAccess"`
|
||||
Theme string `xml:"theme" json:"theme" default:"default"`
|
||||
Debugging bool `xml:"debugging,attr" json:"debugging"`
|
||||
InsecureSkipHostCheck bool `xml:"insecureSkipHostcheck,omitempty" json:"insecureSkipHostcheck"`
|
||||
}
|
||||
|
||||
func (c GUIConfiguration) Address() string {
|
||||
|
||||
@@ -304,15 +304,18 @@ func (w *Wrapper) Device(id protocol.DeviceID) (DeviceConfiguration, bool) {
|
||||
func (w *Wrapper) Save() error {
|
||||
fd, err := osutil.CreateAtomic(w.path, 0600)
|
||||
if err != nil {
|
||||
l.Debugln("CreateAtomic:", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := w.cfg.WriteXML(fd); err != nil {
|
||||
l.Debugln("WriteXML:", err)
|
||||
fd.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
if err := fd.Close(); err != nil {
|
||||
l.Debugln("Close:", err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -47,8 +47,11 @@ func (f FileInfoTruncated) HasPermissionBits() bool {
|
||||
}
|
||||
|
||||
func (f FileInfoTruncated) FileSize() int64 {
|
||||
if f.IsDirectory() || f.IsDeleted() {
|
||||
return 128
|
||||
if f.Deleted {
|
||||
return 0
|
||||
}
|
||||
if f.IsDirectory() {
|
||||
return protocol.SyntheticDirectorySize
|
||||
}
|
||||
return f.Size
|
||||
}
|
||||
|
||||
@@ -12,13 +12,11 @@ import (
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/osutil"
|
||||
)
|
||||
|
||||
func TestMtimeFS(t *testing.T) {
|
||||
osutil.RemoveAll("testdata")
|
||||
defer osutil.RemoveAll("testdata")
|
||||
os.RemoveAll("testdata")
|
||||
defer os.RemoveAll("testdata")
|
||||
os.Mkdir("testdata", 0755)
|
||||
ioutil.WriteFile("testdata/exists0", []byte("hello"), 0644)
|
||||
ioutil.WriteFile("testdata/exists1", []byte("hello"), 0644)
|
||||
|
||||
@@ -345,7 +345,7 @@ func parseIgnoreFile(fd io.Reader, currentFile string, seen map[string]bool) ([]
|
||||
case strings.HasSuffix(line, "/**"):
|
||||
err = addPattern(line)
|
||||
case strings.HasSuffix(line, "/"):
|
||||
err = addPattern(line)
|
||||
err = addPattern(line + "**")
|
||||
default:
|
||||
err = addPattern(line)
|
||||
if err == nil {
|
||||
|
||||
@@ -718,3 +718,22 @@ func TestIssue3174(t *testing.T) {
|
||||
t.Error("Should match")
|
||||
}
|
||||
}
|
||||
|
||||
func TestIssue3639(t *testing.T) {
|
||||
stignore := `
|
||||
foo/
|
||||
`
|
||||
pats := New(true)
|
||||
err := pats.Parse(bytes.NewBufferString(stignore), ".stignore")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !pats.Match("foo/bar").IsIgnored() {
|
||||
t.Error("Should match 'foo/bar'")
|
||||
}
|
||||
|
||||
if pats.Match("foo").IsIgnored() {
|
||||
t.Error("Should not match 'foo'")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -396,8 +396,8 @@ func (info ConnectionInfo) MarshalJSON() ([]byte, error) {
|
||||
|
||||
// ConnectionStats returns a map with connection statistics for each device.
|
||||
func (m *Model) ConnectionStats() map[string]interface{} {
|
||||
m.pmut.RLock()
|
||||
m.fmut.RLock()
|
||||
m.pmut.RLock()
|
||||
|
||||
res := make(map[string]interface{})
|
||||
devs := m.cfg.Devices()
|
||||
@@ -426,8 +426,8 @@ func (m *Model) ConnectionStats() map[string]interface{} {
|
||||
|
||||
res["connections"] = conns
|
||||
|
||||
m.fmut.RUnlock()
|
||||
m.pmut.RUnlock()
|
||||
m.fmut.RUnlock()
|
||||
|
||||
in, out := protocol.TotalInOut()
|
||||
res["total"] = ConnectionInfo{
|
||||
@@ -459,33 +459,49 @@ func (m *Model) FolderStatistics() map[string]stats.FolderStatistics {
|
||||
return res
|
||||
}
|
||||
|
||||
type FolderCompletion struct {
|
||||
CompletionPct float64
|
||||
NeedBytes int64
|
||||
GlobalBytes int64
|
||||
NeedDeletes int64
|
||||
}
|
||||
|
||||
// Completion returns the completion status, in percent, for the given device
|
||||
// and folder.
|
||||
func (m *Model) Completion(device protocol.DeviceID, folder string) float64 {
|
||||
func (m *Model) Completion(device protocol.DeviceID, folder string) FolderCompletion {
|
||||
m.fmut.RLock()
|
||||
rf, ok := m.folderFiles[folder]
|
||||
m.fmut.RUnlock()
|
||||
if !ok {
|
||||
return 0 // Folder doesn't exist, so we hardly have any of it
|
||||
return FolderCompletion{} // Folder doesn't exist, so we hardly have any of it
|
||||
}
|
||||
|
||||
_, _, tot := rf.GlobalSize()
|
||||
if tot == 0 {
|
||||
return 100 // Folder is empty, so we have all of it
|
||||
// Folder is empty, so we have all of it
|
||||
return FolderCompletion{
|
||||
CompletionPct: 100,
|
||||
}
|
||||
}
|
||||
|
||||
m.pmut.RLock()
|
||||
counts := m.deviceDownloads[device].GetBlockCounts(folder)
|
||||
m.pmut.RUnlock()
|
||||
|
||||
var need, fileNeed, downloaded int64
|
||||
var need, fileNeed, downloaded, deletes int64
|
||||
rf.WithNeedTruncated(device, func(f db.FileIntf) bool {
|
||||
ft := f.(db.FileInfoTruncated)
|
||||
|
||||
// If the file is deleted, we account it only in the deleted column.
|
||||
if ft.Deleted {
|
||||
deletes++
|
||||
return true
|
||||
}
|
||||
|
||||
// This might might be more than it really is, because some blocks can be of a smaller size.
|
||||
downloaded = int64(counts[ft.Name] * protocol.BlockSize)
|
||||
|
||||
fileNeed = ft.Size - downloaded
|
||||
fileNeed = ft.FileSize() - downloaded
|
||||
if fileNeed < 0 {
|
||||
fileNeed = 0
|
||||
}
|
||||
@@ -496,9 +512,23 @@ func (m *Model) Completion(device protocol.DeviceID, folder string) float64 {
|
||||
|
||||
needRatio := float64(need) / float64(tot)
|
||||
completionPct := 100 * (1 - needRatio)
|
||||
|
||||
// If the completion is 100% but there are deletes we need to handle,
|
||||
// drop it down a notch. Hack for consumers that look only at the
|
||||
// percentage (our own GUI does the same calculation as here on it's own
|
||||
// and needs the same fixup).
|
||||
if need == 0 && deletes > 0 {
|
||||
completionPct = 95 // chosen by fair dice roll
|
||||
}
|
||||
|
||||
l.Debugf("%v Completion(%s, %q): %f (%d / %d = %f)", m, device, folder, completionPct, need, tot, needRatio)
|
||||
|
||||
return completionPct
|
||||
return FolderCompletion{
|
||||
CompletionPct: completionPct,
|
||||
NeedBytes: need,
|
||||
GlobalBytes: tot,
|
||||
NeedDeletes: deletes,
|
||||
}
|
||||
}
|
||||
|
||||
func sizeOfFile(f db.FileIntf) (files, deleted int, bytes int64) {
|
||||
@@ -534,7 +564,7 @@ func (m *Model) LocalSize(folder string) (nfiles, deleted int, bytes int64) {
|
||||
}
|
||||
|
||||
// NeedSize returns the number and total size of currently needed files.
|
||||
func (m *Model) NeedSize(folder string) (nfiles int, bytes int64) {
|
||||
func (m *Model) NeedSize(folder string) (nfiles, ndeletes int, bytes int64) {
|
||||
m.fmut.RLock()
|
||||
defer m.fmut.RUnlock()
|
||||
if rf, ok := m.folderFiles[folder]; ok {
|
||||
@@ -546,7 +576,8 @@ func (m *Model) NeedSize(folder string) (nfiles int, bytes int64) {
|
||||
}
|
||||
|
||||
fs, de, by := sizeOfFile(f)
|
||||
nfiles += fs + de
|
||||
nfiles += fs
|
||||
ndeletes += de
|
||||
bytes += by
|
||||
return true
|
||||
})
|
||||
@@ -1434,12 +1465,12 @@ func sendIndexTo(minSequence int64, conn protocol.Connection, folder string, fs
|
||||
func (m *Model) updateLocalsFromScanning(folder string, fs []protocol.FileInfo) {
|
||||
m.updateLocals(folder, fs)
|
||||
|
||||
// Fire the LocalChangeDetected event to notify listeners about local
|
||||
// updates.
|
||||
m.fmut.RLock()
|
||||
path := m.folderCfgs[folder].Path()
|
||||
folderCfg := m.folderCfgs[folder]
|
||||
m.fmut.RUnlock()
|
||||
m.localChangeDetected(folder, path, fs)
|
||||
|
||||
// Fire the LocalChangeDetected event to notify listeners about local updates.
|
||||
m.localChangeDetected(folderCfg, fs)
|
||||
}
|
||||
|
||||
func (m *Model) updateLocalsFromPulling(folder string, fs []protocol.FileInfo) {
|
||||
@@ -1469,9 +1500,8 @@ func (m *Model) updateLocals(folder string, fs []protocol.FileInfo) {
|
||||
})
|
||||
}
|
||||
|
||||
func (m *Model) localChangeDetected(folder, path string, files []protocol.FileInfo) {
|
||||
// For windows paths, strip unwanted chars from the front
|
||||
path = strings.Replace(path, `\\?\`, "", 1)
|
||||
func (m *Model) localChangeDetected(folderCfg config.FolderConfiguration, files []protocol.FileInfo) {
|
||||
path := strings.Replace(folderCfg.Path(), `\\?\`, "", 1)
|
||||
|
||||
for _, file := range files {
|
||||
objType := "file"
|
||||
@@ -1495,14 +1525,16 @@ func (m *Model) localChangeDetected(folder, path string, files []protocol.FileIn
|
||||
action = "deleted"
|
||||
}
|
||||
|
||||
// The full file path, adjusted to the local path separator character.
|
||||
// The full file path, adjusted to the local path separator character. Also
|
||||
// for windows paths, strip unwanted chars from the front.
|
||||
path := filepath.Join(path, filepath.FromSlash(file.Name))
|
||||
|
||||
events.Default.Log(events.LocalChangeDetected, map[string]string{
|
||||
"folder": folder,
|
||||
"action": action,
|
||||
"type": objType,
|
||||
"path": path,
|
||||
"folderID": folderCfg.ID,
|
||||
"label": folderCfg.Label,
|
||||
"action": action,
|
||||
"type": objType,
|
||||
"path": path,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1704,41 +1736,46 @@ func (m *Model) internalScanFolderSubdirs(folder string, subDirs []string) error
|
||||
subDirs = []string{""}
|
||||
}
|
||||
|
||||
// Do a scan of the database for each prefix, to check for deleted files.
|
||||
// Do a scan of the database for each prefix, to check for deleted and
|
||||
// ignored files.
|
||||
batch = batch[:0]
|
||||
for _, sub := range subDirs {
|
||||
var iterError error
|
||||
|
||||
fs.WithPrefixedHaveTruncated(protocol.LocalDeviceID, sub, func(fi db.FileIntf) bool {
|
||||
f := fi.(db.FileInfoTruncated)
|
||||
if !f.IsDeleted() {
|
||||
if len(batch) == batchSizeFiles {
|
||||
if err := m.CheckFolderHealth(folder); err != nil {
|
||||
iterError = err
|
||||
return false
|
||||
}
|
||||
m.updateLocalsFromScanning(folder, batch)
|
||||
batch = batch[:0]
|
||||
if len(batch) == batchSizeFiles {
|
||||
if err := m.CheckFolderHealth(folder); err != nil {
|
||||
iterError = err
|
||||
return false
|
||||
}
|
||||
m.updateLocalsFromScanning(folder, batch)
|
||||
batch = batch[:0]
|
||||
}
|
||||
|
||||
if !f.IsInvalid() && (ignores.Match(f.Name).IsIgnored() || symlinkInvalid(folder, f)) {
|
||||
// File has been ignored or an unsupported symlink. Set invalid bit.
|
||||
l.Debugln("setting invalid bit on ignored", f)
|
||||
nf := protocol.FileInfo{
|
||||
Name: f.Name,
|
||||
Type: f.Type,
|
||||
Size: f.Size,
|
||||
ModifiedS: f.ModifiedS,
|
||||
ModifiedNs: f.ModifiedNs,
|
||||
Permissions: f.Permissions,
|
||||
NoPermissions: f.NoPermissions,
|
||||
Invalid: true,
|
||||
Version: f.Version, // The file is still the same, so don't bump version
|
||||
}
|
||||
batch = append(batch, nf)
|
||||
} else if _, err := mtimefs.Lstat(filepath.Join(folderCfg.Path(), f.Name)); err != nil {
|
||||
// File has been deleted.
|
||||
switch {
|
||||
case !f.IsInvalid() && (ignores.Match(f.Name).IsIgnored() || symlinkInvalid(folder, f)):
|
||||
// File was valid at last pass but has been ignored or is an
|
||||
// unsupported symlink. Set invalid bit.
|
||||
l.Debugln("setting invalid bit on ignored", f)
|
||||
nf := protocol.FileInfo{
|
||||
Name: f.Name,
|
||||
Type: f.Type,
|
||||
Size: f.Size,
|
||||
ModifiedS: f.ModifiedS,
|
||||
ModifiedNs: f.ModifiedNs,
|
||||
Permissions: f.Permissions,
|
||||
NoPermissions: f.NoPermissions,
|
||||
Invalid: true,
|
||||
Version: f.Version, // The file is still the same, so don't bump version
|
||||
}
|
||||
batch = append(batch, nf)
|
||||
|
||||
case !f.IsInvalid() && !f.IsDeleted():
|
||||
// The file is valid and not deleted. Lets check if it's
|
||||
// still here.
|
||||
|
||||
if _, err := mtimefs.Lstat(filepath.Join(folderCfg.Path(), f.Name)); err != nil {
|
||||
// We don't specifically verify that the error is
|
||||
// os.IsNotExist because there is a corner case when a
|
||||
// directory is suddenly transformed into a file. When that
|
||||
@@ -1749,7 +1786,7 @@ func (m *Model) internalScanFolderSubdirs(folder string, subDirs []string) error
|
||||
nf := protocol.FileInfo{
|
||||
Name: f.Name,
|
||||
Type: f.Type,
|
||||
Size: f.Size,
|
||||
Size: 0,
|
||||
ModifiedS: f.ModifiedS,
|
||||
ModifiedNs: f.ModifiedNs,
|
||||
Deleted: true,
|
||||
@@ -1914,6 +1951,7 @@ func (m *Model) Override(folder string) {
|
||||
need.Deleted = true
|
||||
need.Blocks = nil
|
||||
need.Version = need.Version.Update(m.shortID)
|
||||
need.Size = 0
|
||||
} else {
|
||||
// We have the file, replace with our version
|
||||
have.Version = have.Version.Merge(need.Version).Update(m.shortID)
|
||||
|
||||
@@ -93,6 +93,7 @@ func TestRequest(t *testing.T) {
|
||||
m.AddFolder(defaultFolderConfig)
|
||||
m.StartFolder("default")
|
||||
m.ServeBackground()
|
||||
defer m.Stop()
|
||||
m.ScanFolder("default")
|
||||
|
||||
bs := make([]byte, protocol.BlockSize)
|
||||
@@ -168,6 +169,7 @@ func benchmarkIndex(b *testing.B, nfiles int) {
|
||||
m.AddFolder(defaultFolderConfig)
|
||||
m.StartFolder("default")
|
||||
m.ServeBackground()
|
||||
defer m.Stop()
|
||||
|
||||
files := genFiles(nfiles)
|
||||
m.Index(device1, "default", files)
|
||||
@@ -197,6 +199,7 @@ func benchmarkIndexUpdate(b *testing.B, nfiles, nufiles int) {
|
||||
m.AddFolder(defaultFolderConfig)
|
||||
m.StartFolder("default")
|
||||
m.ServeBackground()
|
||||
defer m.Stop()
|
||||
|
||||
files := genFiles(nfiles)
|
||||
ufiles := genFiles(nufiles)
|
||||
@@ -278,6 +281,7 @@ func BenchmarkRequest(b *testing.B) {
|
||||
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
|
||||
m.AddFolder(defaultFolderConfig)
|
||||
m.ServeBackground()
|
||||
defer m.Stop()
|
||||
m.ScanFolder("default")
|
||||
|
||||
const n = 1000
|
||||
@@ -346,6 +350,7 @@ func TestDeviceRename(t *testing.T) {
|
||||
m.AddConnection(conn, hello)
|
||||
|
||||
m.ServeBackground()
|
||||
defer m.Stop()
|
||||
|
||||
if cfg.Devices()[device1].Name != "" {
|
||||
t.Errorf("Device already has a name")
|
||||
@@ -424,6 +429,7 @@ func TestClusterConfig(t *testing.T) {
|
||||
m.AddFolder(cfg.Folders[0])
|
||||
m.AddFolder(cfg.Folders[1])
|
||||
m.ServeBackground()
|
||||
defer m.Stop()
|
||||
|
||||
cm := m.generateClusterConfig(device2)
|
||||
|
||||
@@ -495,6 +501,7 @@ func TestIgnores(t *testing.T) {
|
||||
m.AddFolder(defaultFolderConfig)
|
||||
m.StartFolder("default")
|
||||
m.ServeBackground()
|
||||
defer m.Stop()
|
||||
|
||||
expected := []string{
|
||||
".*",
|
||||
@@ -590,6 +597,7 @@ func TestROScanRecovery(t *testing.T) {
|
||||
m.AddFolder(fcfg)
|
||||
m.StartFolder("default")
|
||||
m.ServeBackground()
|
||||
defer m.Stop()
|
||||
|
||||
waitFor := func(status string) error {
|
||||
timeout := time.Now().Add(2 * time.Second)
|
||||
@@ -676,6 +684,7 @@ func TestRWScanRecovery(t *testing.T) {
|
||||
m.AddFolder(fcfg)
|
||||
m.StartFolder("default")
|
||||
m.ServeBackground()
|
||||
defer m.Stop()
|
||||
|
||||
waitFor := func(status string) error {
|
||||
timeout := time.Now().Add(2 * time.Second)
|
||||
@@ -739,6 +748,7 @@ func TestGlobalDirectoryTree(t *testing.T) {
|
||||
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
|
||||
m.AddFolder(defaultFolderConfig)
|
||||
m.ServeBackground()
|
||||
defer m.Stop()
|
||||
|
||||
b := func(isfile bool, path ...string) protocol.FileInfo {
|
||||
typ := protocol.FileInfoTypeDirectory
|
||||
@@ -1343,8 +1353,8 @@ func TestIssue3028(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIssue3164(t *testing.T) {
|
||||
osutil.RemoveAll("testdata/issue3164")
|
||||
defer osutil.RemoveAll("testdata/issue3164")
|
||||
os.RemoveAll("testdata/issue3164")
|
||||
defer os.RemoveAll("testdata/issue3164")
|
||||
|
||||
if err := os.MkdirAll("testdata/issue3164/oktodelete/foobar", 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -1445,7 +1455,7 @@ func TestIssue2782(t *testing.T) {
|
||||
|
||||
testName := ".syncthing-test." + srand.String(16)
|
||||
testDir := filepath.Join(home, testName)
|
||||
if err := osutil.RemoveAll(testDir); err != nil {
|
||||
if err := os.RemoveAll(testDir); err != nil {
|
||||
t.Skip(err)
|
||||
}
|
||||
if err := osutil.MkdirAll(testDir+"/syncdir", 0755); err != nil {
|
||||
@@ -1457,7 +1467,7 @@ func TestIssue2782(t *testing.T) {
|
||||
if err := os.Symlink("syncdir", testDir+"/synclink"); err != nil {
|
||||
t.Skip(err)
|
||||
}
|
||||
defer osutil.RemoveAll(testDir)
|
||||
defer os.RemoveAll(testDir)
|
||||
|
||||
db := db.OpenMemory()
|
||||
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
|
||||
@@ -1646,6 +1656,109 @@ func TestSharedWithClearedOnDisconnect(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestIssue3496(t *testing.T) {
|
||||
// It seems like lots of deleted files can cause negative completion
|
||||
// percentages. Lets make sure that doesn't happen. Also do some general
|
||||
// checks on the completion calculation stuff.
|
||||
|
||||
dbi := db.OpenMemory()
|
||||
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", dbi, nil)
|
||||
m.AddFolder(defaultFolderConfig)
|
||||
m.StartFolder("default")
|
||||
m.ServeBackground()
|
||||
defer m.Stop()
|
||||
|
||||
m.ScanFolder("default")
|
||||
|
||||
addFakeConn(m, device1)
|
||||
addFakeConn(m, device2)
|
||||
|
||||
// Reach into the model and grab the current file list...
|
||||
|
||||
m.fmut.RLock()
|
||||
fs := m.folderFiles["default"]
|
||||
m.fmut.RUnlock()
|
||||
var localFiles []protocol.FileInfo
|
||||
fs.WithHave(protocol.LocalDeviceID, func(i db.FileIntf) bool {
|
||||
localFiles = append(localFiles, i.(protocol.FileInfo))
|
||||
return true
|
||||
})
|
||||
|
||||
// Mark all files as deleted and fake it as update from device1
|
||||
|
||||
for i := range localFiles {
|
||||
localFiles[i].Deleted = true
|
||||
localFiles[i].Version = localFiles[i].Version.Update(device1.Short())
|
||||
localFiles[i].Blocks = nil
|
||||
}
|
||||
|
||||
// Also add a small file that we're supposed to need, or the global size
|
||||
// stuff will bail out early due to the entire folder being zero size.
|
||||
|
||||
localFiles = append(localFiles, protocol.FileInfo{
|
||||
Name: "fake",
|
||||
Size: 1234,
|
||||
Type: protocol.FileInfoTypeFile,
|
||||
Version: protocol.Vector{Counters: []protocol.Counter{{ID: device1.Short(), Value: 42}}},
|
||||
})
|
||||
|
||||
m.IndexUpdate(device1, "default", localFiles)
|
||||
|
||||
// Check that the completion percentage for us makes sense
|
||||
|
||||
comp := m.Completion(protocol.LocalDeviceID, "default")
|
||||
if comp.NeedBytes > comp.GlobalBytes {
|
||||
t.Errorf("Need more bytes than exist, not possible: %d > %d", comp.NeedBytes, comp.GlobalBytes)
|
||||
}
|
||||
if comp.CompletionPct < 0 {
|
||||
t.Errorf("Less than zero percent complete, not possible: %.02f%%", comp.CompletionPct)
|
||||
}
|
||||
if comp.NeedBytes == 0 {
|
||||
t.Error("Need no bytes even though some files are deleted")
|
||||
}
|
||||
if comp.CompletionPct == 100 {
|
||||
t.Errorf("Fully complete, not possible: %.02f%%", comp.CompletionPct)
|
||||
}
|
||||
t.Log(comp)
|
||||
|
||||
// Check that NeedSize does the correct thing
|
||||
files, deletes, bytes := m.NeedSize("default")
|
||||
if files != 1 || bytes != 1234 {
|
||||
// The one we added synthetically above
|
||||
t.Errorf("Incorrect need size; %d, %d != 1, 1234", files, bytes)
|
||||
}
|
||||
if deletes != len(localFiles)-1 {
|
||||
// The rest
|
||||
t.Errorf("Incorrect need deletes; %d != %d", deletes, len(localFiles)-1)
|
||||
}
|
||||
}
|
||||
|
||||
func addFakeConn(m *Model, dev protocol.DeviceID) {
|
||||
conn1 := connections.Connection{
|
||||
IntermediateConnection: connections.IntermediateConnection{
|
||||
Conn: tls.Client(&fakeConn{}, nil),
|
||||
Type: "foo",
|
||||
Priority: 10,
|
||||
},
|
||||
Connection: &FakeConnection{
|
||||
id: dev,
|
||||
},
|
||||
}
|
||||
m.AddConnection(conn1, protocol.HelloResult{})
|
||||
|
||||
m.ClusterConfig(device1, protocol.ClusterConfig{
|
||||
Folders: []protocol.Folder{
|
||||
{
|
||||
ID: "default",
|
||||
Devices: []protocol.Device{
|
||||
{ID: device1[:]},
|
||||
{ID: device2[:]},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
type fakeAddr struct{}
|
||||
|
||||
func (fakeAddr) Network() string {
|
||||
|
||||
@@ -190,6 +190,9 @@ func (t *ProgressEmitter) CommitConfiguration(from, to config.Configuration) boo
|
||||
defer t.mut.Unlock()
|
||||
|
||||
t.interval = time.Duration(to.Options.ProgressUpdateIntervalS) * time.Second
|
||||
if t.interval < time.Second {
|
||||
t.interval = time.Second
|
||||
}
|
||||
t.minBlocks = to.Options.TempIndexMinBlocks
|
||||
l.Debugln("progress emitter: updated interval", t.interval)
|
||||
|
||||
|
||||
@@ -60,6 +60,7 @@ func TestProgressEmitter(t *testing.T) {
|
||||
|
||||
p := NewProgressEmitter(c)
|
||||
go p.Serve()
|
||||
p.interval = 0
|
||||
|
||||
expectTimeout(w, t)
|
||||
|
||||
|
||||
@@ -273,7 +273,7 @@ func BenchmarkJobQueuePushPopDone10k(b *testing.B) {
|
||||
for _, f := range files {
|
||||
q.Push(f.Name, 0, time.Time{})
|
||||
}
|
||||
for _ = range files {
|
||||
for range files {
|
||||
n, _ := q.Pop()
|
||||
q.Done(n)
|
||||
}
|
||||
|
||||
@@ -602,7 +602,7 @@ func (f *rwFolder) handleDir(file protocol.FileInfo) {
|
||||
// Most likely a file/link is getting replaced with a directory.
|
||||
// Remove the file/link and fall through to directory creation.
|
||||
case err == nil && (!info.IsDir() || info.Mode()&os.ModeSymlink != 0):
|
||||
err = osutil.InWritableDir(osutil.Remove, realName)
|
||||
err = osutil.InWritableDir(os.Remove, realName)
|
||||
if err != nil {
|
||||
l.Infof("Puller (folder %q, dir %q): %v", f.folderID, file.Name, err)
|
||||
f.newError(file.Name, err)
|
||||
@@ -687,13 +687,13 @@ func (f *rwFolder) deleteDir(file protocol.FileInfo, matcher *ignore.Matcher) {
|
||||
for _, dirFile := range files {
|
||||
fullDirFile := filepath.Join(file.Name, dirFile)
|
||||
if defTempNamer.IsTemporary(dirFile) || (matcher != nil && matcher.Match(fullDirFile).IsDeletable()) {
|
||||
osutil.RemoveAll(filepath.Join(f.dir, fullDirFile))
|
||||
os.RemoveAll(filepath.Join(f.dir, fullDirFile))
|
||||
}
|
||||
}
|
||||
dir.Close()
|
||||
}
|
||||
|
||||
err = osutil.InWritableDir(osutil.Remove, realName)
|
||||
err = osutil.InWritableDir(os.Remove, realName)
|
||||
if err == nil || os.IsNotExist(err) {
|
||||
// It was removed or it doesn't exist to start with
|
||||
f.dbUpdates <- dbUpdateJob{file, dbUpdateDeleteDir}
|
||||
@@ -740,7 +740,7 @@ func (f *rwFolder) deleteFile(file protocol.FileInfo) {
|
||||
} else if f.versioner != nil {
|
||||
err = osutil.InWritableDir(f.versioner.Archive, realName)
|
||||
} else {
|
||||
err = osutil.InWritableDir(osutil.Remove, realName)
|
||||
err = osutil.InWritableDir(os.Remove, realName)
|
||||
}
|
||||
|
||||
if err == nil || os.IsNotExist(err) {
|
||||
@@ -825,7 +825,7 @@ func (f *rwFolder) renameFile(source, target protocol.FileInfo) {
|
||||
// get rid of. Attempt to delete it instead so that we make *some*
|
||||
// progress. The target is unhandled.
|
||||
|
||||
err = osutil.InWritableDir(osutil.Remove, from)
|
||||
err = osutil.InWritableDir(os.Remove, from)
|
||||
if err != nil {
|
||||
l.Infof("Puller (folder %q, file %q): delete %q after failed rename: %v", f.folderID, target.Name, source.Name, err)
|
||||
f.newError(target.Name, err)
|
||||
@@ -976,7 +976,7 @@ func (f *rwFolder) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocks
|
||||
// Otherwise, discard the file ourselves in order for the
|
||||
// sharedpuller not to panic when it fails to exclusively create a
|
||||
// file which already exists
|
||||
osutil.InWritableDir(osutil.Remove, tempName)
|
||||
osutil.InWritableDir(os.Remove, tempName)
|
||||
}
|
||||
} else {
|
||||
// Copy the blocks, as we don't want to shuffle them on the FileInfo
|
||||
@@ -1245,9 +1245,6 @@ func (f *rwFolder) performFinish(state *sharedPullerState) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Set the correct timestamp on the new file
|
||||
f.mtimeFS.Chtimes(state.tempName, state.file.ModTime(), state.file.ModTime()) // never fails
|
||||
|
||||
if stat, err := f.mtimeFS.Lstat(state.realName); err == nil {
|
||||
// There is an old file or directory already in place. We need to
|
||||
// handle that.
|
||||
@@ -1262,7 +1259,7 @@ func (f *rwFolder) performFinish(state *sharedPullerState) error {
|
||||
// and future hard ignores before attempting a directory delete.
|
||||
// Should share code with f.deletDir().
|
||||
|
||||
if err = osutil.InWritableDir(osutil.Remove, state.realName); err != nil {
|
||||
if err = osutil.InWritableDir(os.Remove, state.realName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1294,6 +1291,9 @@ func (f *rwFolder) performFinish(state *sharedPullerState) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set the correct timestamp on the new file
|
||||
f.mtimeFS.Chtimes(state.realName, state.file.ModTime(), state.file.ModTime()) // never fails
|
||||
|
||||
// If it's a symlink, the target of the symlink is inside the file.
|
||||
if state.file.IsSymlink() {
|
||||
content, err := ioutil.ReadFile(state.realName)
|
||||
@@ -1458,14 +1458,14 @@ func removeAvailability(availabilities []Availability, availability Availability
|
||||
func (f *rwFolder) moveForConflict(name string) error {
|
||||
if strings.Contains(filepath.Base(name), ".sync-conflict-") {
|
||||
l.Infoln("Conflict for", name, "which is already a conflict copy; not copying again.")
|
||||
if err := osutil.Remove(name); err != nil && !os.IsNotExist(err) {
|
||||
if err := os.Remove(name); err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if f.maxConflicts == 0 {
|
||||
if err := osutil.Remove(name); err != nil && !os.IsNotExist(err) {
|
||||
if err := os.Remove(name); err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -1487,7 +1487,7 @@ func (f *rwFolder) moveForConflict(name string) error {
|
||||
if gerr == nil && len(matches) > f.maxConflicts {
|
||||
sort.Sort(sort.Reverse(sort.StringSlice(matches)))
|
||||
for _, match := range matches[f.maxConflicts:] {
|
||||
gerr = osutil.Remove(match)
|
||||
gerr = os.Remove(match)
|
||||
if gerr != nil {
|
||||
l.Debugln(f, "removing extra conflict", gerr)
|
||||
}
|
||||
|
||||
@@ -9,9 +9,9 @@ package model
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sort"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/osutil"
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
@@ -186,7 +186,7 @@ func (s *onDiskIndexSorter) Sorted(fn func(protocol.FileInfo) bool) {
|
||||
func (s *onDiskIndexSorter) Close() {
|
||||
l.Debugf("onDiskIndexSorter %p closes", s)
|
||||
s.db.Close()
|
||||
osutil.RemoveAll(s.dir)
|
||||
os.RemoveAll(s.dir)
|
||||
}
|
||||
|
||||
func (s *onDiskIndexSorter) full() bool {
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/calmh/du"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
@@ -93,93 +92,6 @@ func InWritableDir(fn func(string) error, path string) error {
|
||||
return fn(path)
|
||||
}
|
||||
|
||||
// Remove removes the given path. On Windows, removes the read-only attribute
|
||||
// from the target prior to deletion.
|
||||
func Remove(path string) error {
|
||||
if runtime.GOOS == "windows" {
|
||||
info, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.Mode()&0200 == 0 {
|
||||
os.Chmod(path, 0700)
|
||||
}
|
||||
}
|
||||
return os.Remove(path)
|
||||
}
|
||||
|
||||
// RemoveAll is a copy of os.RemoveAll, but uses osutil.Remove.
|
||||
// RemoveAll removes path and any children it contains.
|
||||
// It removes everything it can but returns the first error
|
||||
// it encounters. If the path does not exist, RemoveAll
|
||||
// returns nil (no error).
|
||||
func RemoveAll(path string) error {
|
||||
// Simple case: if Remove works, we're done.
|
||||
err := Remove(path)
|
||||
if err == nil || os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Otherwise, is this a directory we need to recurse into?
|
||||
dir, serr := os.Lstat(path)
|
||||
if serr != nil {
|
||||
if serr, ok := serr.(*os.PathError); ok && (os.IsNotExist(serr.Err) || serr.Err == syscall.ENOTDIR) {
|
||||
return nil
|
||||
}
|
||||
return serr
|
||||
}
|
||||
if !dir.IsDir() {
|
||||
// Not a directory; return the error from Remove.
|
||||
return err
|
||||
}
|
||||
|
||||
// Directory.
|
||||
fd, err := os.Open(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
// Race. It was deleted between the Lstat and Open.
|
||||
// Return nil per RemoveAll's docs.
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove contents & return first error.
|
||||
err = nil
|
||||
for {
|
||||
names, err1 := fd.Readdirnames(100)
|
||||
for _, name := range names {
|
||||
err1 := RemoveAll(path + string(os.PathSeparator) + name)
|
||||
if err == nil {
|
||||
err = err1
|
||||
}
|
||||
}
|
||||
if err1 == io.EOF {
|
||||
break
|
||||
}
|
||||
// If Readdirnames returned an error, use it.
|
||||
if err == nil {
|
||||
err = err1
|
||||
}
|
||||
if len(names) == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Close directory, because windows won't remove opened directory.
|
||||
fd.Close()
|
||||
|
||||
// Remove directory.
|
||||
err1 := Remove(path)
|
||||
if err1 == nil || os.IsNotExist(err1) {
|
||||
return nil
|
||||
}
|
||||
if err == nil {
|
||||
err = err1
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func ExpandTilde(path string) (string, error) {
|
||||
if path == "~" {
|
||||
return getHomeDir()
|
||||
|
||||
@@ -71,6 +71,8 @@ func TestInWriteableDir(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInWritableDirWindowsRemove(t *testing.T) {
|
||||
// os.Remove should remove read only things on windows
|
||||
|
||||
if runtime.GOOS != "windows" {
|
||||
t.Skipf("Tests not required")
|
||||
return
|
||||
@@ -100,20 +102,49 @@ func TestInWritableDirWindowsRemove(t *testing.T) {
|
||||
os.Chmod("testdata/windows/ro/readonly", 0500)
|
||||
|
||||
for _, path := range []string{"testdata/windows/ro/readonly", "testdata/windows/ro", "testdata/windows"} {
|
||||
err := os.Remove(path)
|
||||
if err == nil {
|
||||
t.Errorf("Expected error %s", path)
|
||||
}
|
||||
}
|
||||
|
||||
for _, path := range []string{"testdata/windows/ro/readonly", "testdata/windows/ro", "testdata/windows"} {
|
||||
err := osutil.InWritableDir(osutil.Remove, path)
|
||||
err := osutil.InWritableDir(os.Remove, path)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error %s: %s", path, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInWritableDirWindowsRemoveAll(t *testing.T) {
|
||||
// os.RemoveAll should remove read only things on windows
|
||||
|
||||
if runtime.GOOS != "windows" {
|
||||
t.Skipf("Tests not required")
|
||||
return
|
||||
}
|
||||
|
||||
err := os.RemoveAll("testdata")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.Chmod("testdata/windows/ro/readonlynew", 0700)
|
||||
defer os.RemoveAll("testdata")
|
||||
|
||||
create := func(name string) error {
|
||||
fd, err := os.Create(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fd.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
os.Mkdir("testdata", 0700)
|
||||
|
||||
os.Mkdir("testdata/windows", 0500)
|
||||
os.Mkdir("testdata/windows/ro", 0500)
|
||||
create("testdata/windows/ro/readonly")
|
||||
os.Chmod("testdata/windows/ro/readonly", 0500)
|
||||
|
||||
if err := os.RemoveAll("testdata/windows"); err != nil {
|
||||
t.Errorf("Unexpected error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInWritableDirWindowsRename(t *testing.T) {
|
||||
if runtime.GOOS != "windows" {
|
||||
t.Skipf("Tests not required")
|
||||
|
||||
@@ -7,13 +7,17 @@ package protocol
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/rand"
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
)
|
||||
|
||||
const (
|
||||
SyntheticDirectorySize = 128
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -56,8 +60,11 @@ func (f FileInfo) HasPermissionBits() bool {
|
||||
}
|
||||
|
||||
func (f FileInfo) FileSize() int64 {
|
||||
if f.IsDirectory() || f.IsDeleted() {
|
||||
return 128
|
||||
if f.Deleted {
|
||||
return 0
|
||||
}
|
||||
if f.IsDirectory() {
|
||||
return SyntheticDirectorySize
|
||||
}
|
||||
return f.Size
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ package protocol
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/base32"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
@@ -12,6 +11,8 @@ import (
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
|
||||
"github.com/calmh/luhn"
|
||||
)
|
||||
|
||||
|
||||
@@ -144,7 +144,7 @@ func TestCompare(t *testing.T) {
|
||||
// Empty vectors are identical
|
||||
{Vector{}, Vector{}, Equal},
|
||||
{Vector{}, Vector{[]Counter{{42, 0}}}, Equal},
|
||||
{Vector{[]Counter{Counter{42, 0}}}, Vector{}, Equal},
|
||||
{Vector{[]Counter{{42, 0}}}, Vector{}, Equal},
|
||||
|
||||
// Zero is the implied value for a missing Counter
|
||||
{
|
||||
|
||||
@@ -8,11 +8,11 @@ package scanner
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
)
|
||||
|
||||
var SHA256OfNothing = []uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}
|
||||
|
||||
@@ -289,8 +289,8 @@ func TestWalkSymlink(t *testing.T) {
|
||||
|
||||
// Create a folder with a symlink in it
|
||||
|
||||
osutil.RemoveAll("_symlinks")
|
||||
defer osutil.RemoveAll("_symlinks")
|
||||
os.RemoveAll("_symlinks")
|
||||
defer os.RemoveAll("_symlinks")
|
||||
|
||||
os.Mkdir("_symlinks", 0755)
|
||||
symlinks.Create("_symlinks/link", "destination", symlinks.TargetUnknown)
|
||||
|
||||
136
lib/sha256/sha256.go
Normal file
136
lib/sha256/sha256.go
Normal file
@@ -0,0 +1,136 @@
|
||||
// Copyright (C) 2016 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package sha256
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
cryptoSha256 "crypto/sha256"
|
||||
"fmt"
|
||||
"hash"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
minioSha256 "github.com/minio/sha256-simd"
|
||||
"github.com/syncthing/syncthing/lib/logger"
|
||||
)
|
||||
|
||||
var l = logger.DefaultLogger.NewFacility("sha256", "SHA256 hashing package")
|
||||
|
||||
const (
|
||||
benchmarkingIterations = 3
|
||||
benchmarkingDuration = 150 * time.Millisecond
|
||||
defaultImpl = "crypto/sha256"
|
||||
minioImpl = "minio/sha256-simd"
|
||||
)
|
||||
|
||||
const (
|
||||
BlockSize = cryptoSha256.BlockSize
|
||||
Size = cryptoSha256.Size
|
||||
)
|
||||
|
||||
// May be switched out for another implementation
|
||||
var (
|
||||
New = cryptoSha256.New
|
||||
Sum256 = cryptoSha256.Sum256
|
||||
)
|
||||
|
||||
var (
|
||||
selectedImpl = defaultImpl
|
||||
cryptoPerf float64
|
||||
minioPerf float64
|
||||
)
|
||||
|
||||
func SelectAlgo() {
|
||||
switch os.Getenv("STHASHING") {
|
||||
case "":
|
||||
// When unset, probe for the fastest implementation.
|
||||
benchmark()
|
||||
if minioPerf > cryptoPerf {
|
||||
selectMinio()
|
||||
}
|
||||
|
||||
case "minio":
|
||||
// When set to "minio", use that. Benchmark anyway to be able to
|
||||
// present the difference.
|
||||
benchmark()
|
||||
selectMinio()
|
||||
|
||||
default:
|
||||
// When set to anything else, such as "standard", use the default Go
|
||||
// implementation. Benchmark that anyway, so we can report something
|
||||
// useful in Report(). Make sure not to touch the minio
|
||||
// implementation as it may be disabled for incompatibility reasons.
|
||||
cryptoPerf = cpuBenchOnce(benchmarkingIterations*benchmarkingDuration, cryptoSha256.New)
|
||||
}
|
||||
}
|
||||
|
||||
// Report prints a line with the measured hash performance rates for the
|
||||
// selected and alternate implementation.
|
||||
func Report() {
|
||||
var otherImpl string
|
||||
var selectedRate, otherRate float64
|
||||
|
||||
switch selectedImpl {
|
||||
case defaultImpl:
|
||||
selectedRate = cryptoPerf
|
||||
otherRate = minioPerf
|
||||
otherImpl = minioImpl
|
||||
|
||||
case minioImpl:
|
||||
selectedRate = minioPerf
|
||||
otherRate = cryptoPerf
|
||||
otherImpl = defaultImpl
|
||||
}
|
||||
|
||||
l.Infof("Single thread hash performance is %s using %s (%s using %s).", formatRate(selectedRate), selectedImpl, formatRate(otherRate), otherImpl)
|
||||
}
|
||||
|
||||
func selectMinio() {
|
||||
New = minioSha256.New
|
||||
Sum256 = minioSha256.Sum256
|
||||
selectedImpl = minioImpl
|
||||
}
|
||||
|
||||
func benchmark() {
|
||||
// Interleave the tests to achieve some sort of fairness if the CPU is
|
||||
// just in the process of spinning up to full speed.
|
||||
for i := 0; i < benchmarkingIterations; i++ {
|
||||
if perf := cpuBenchOnce(benchmarkingDuration, cryptoSha256.New); perf > cryptoPerf {
|
||||
cryptoPerf = perf
|
||||
}
|
||||
if perf := cpuBenchOnce(benchmarkingDuration, minioSha256.New); perf > minioPerf {
|
||||
minioPerf = perf
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func cpuBenchOnce(duration time.Duration, newFn func() hash.Hash) float64 {
|
||||
chunkSize := 100 * 1 << 10
|
||||
h := newFn()
|
||||
bs := make([]byte, chunkSize)
|
||||
rand.Reader.Read(bs)
|
||||
|
||||
t0 := time.Now()
|
||||
b := 0
|
||||
for time.Since(t0) < duration {
|
||||
h.Write(bs)
|
||||
b += chunkSize
|
||||
}
|
||||
h.Sum(nil)
|
||||
d := time.Since(t0)
|
||||
return float64(int(float64(b)/d.Seconds()/(1<<20)*100)) / 100
|
||||
}
|
||||
|
||||
func formatRate(rate float64) string {
|
||||
decimals := 0
|
||||
if rate < 1 {
|
||||
decimals = 2
|
||||
} else if rate < 10 {
|
||||
decimals = 1
|
||||
}
|
||||
return fmt.Sprintf("%.*f MB/s", decimals, rate)
|
||||
}
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"crypto/x509"
|
||||
"encoding/asn1"
|
||||
"encoding/pem"
|
||||
@@ -20,6 +19,8 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/sha256"
|
||||
)
|
||||
|
||||
// GenerateKeys returns a new key pair, with the private and public key
|
||||
|
||||
@@ -198,6 +198,7 @@ func upgradeToURL(archiveName, binary string, url string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.Remove(fname)
|
||||
|
||||
old := binary + ".old"
|
||||
os.Remove(old)
|
||||
@@ -205,7 +206,11 @@ func upgradeToURL(archiveName, binary string, url string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Rename(fname, binary)
|
||||
if os.Rename(fname, binary); err != nil {
|
||||
os.Rename(old, binary)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func readRelease(archiveName, dir, url string) (string, error) {
|
||||
|
||||
@@ -108,19 +108,21 @@ func Discover(renewal, timeout time.Duration) []nat.Device {
|
||||
close(resultChan)
|
||||
}()
|
||||
|
||||
seenResults := make(map[string]bool)
|
||||
nextResult:
|
||||
for result := range resultChan {
|
||||
for _, existingResult := range results {
|
||||
if existingResult.ID() == result.ID() {
|
||||
l.Debugf("Skipping duplicate result %s with services:", result.uuid)
|
||||
for _, service := range result.services {
|
||||
l.Debugf("* [%s] %s", service.ID, service.URL)
|
||||
}
|
||||
continue nextResult
|
||||
if seenResults[result.ID()] {
|
||||
l.Debugf("Skipping duplicate result %s with services:", result.uuid)
|
||||
for _, service := range result.services {
|
||||
l.Debugf("* [%s] %s", service.ID, service.URL)
|
||||
}
|
||||
continue nextResult
|
||||
}
|
||||
|
||||
result := result // Reallocate as we need to keep a pointer
|
||||
results = append(results, &result)
|
||||
seenResults[result.ID()] = true
|
||||
|
||||
l.Debugf("UPnP discovery result %s with services:", result.uuid)
|
||||
for _, service := range result.services {
|
||||
l.Debugf("* [%s] %s", service.ID, service.URL)
|
||||
|
||||
5
lib/versioner/_external_test/external.bat
Normal file
5
lib/versioner/_external_test/external.bat
Normal file
@@ -0,0 +1,5 @@
|
||||
set "FOLDER_PATH=%~1"
|
||||
set "FILE_PATH=%~2"
|
||||
echo "1--%FOLDER_PATH%--"
|
||||
echo "2--%FILE_PATH%--"
|
||||
del "%FOLDER_PATH%\%FILE_PATH%"
|
||||
5
lib/versioner/_external_test/external.sh
Executable file
5
lib/versioner/_external_test/external.sh
Executable file
@@ -0,0 +1,5 @@
|
||||
#!/bin/sh
|
||||
|
||||
echo "1--$1--"
|
||||
echo "2--$2--"
|
||||
rm -f "$1/$2"
|
||||
89
lib/versioner/external_test.go
Normal file
89
lib/versioner/external_test.go
Normal file
@@ -0,0 +1,89 @@
|
||||
// Copyright (C) 2016 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package versioner
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestExternalNoCommand(t *testing.T) {
|
||||
file := "testdata/folder path/long filename.txt"
|
||||
prepForRemoval(t, file)
|
||||
defer os.RemoveAll("testdata")
|
||||
|
||||
// The file should exist before the versioner run.
|
||||
|
||||
if _, err := os.Lstat(file); err != nil {
|
||||
t.Fatal("File should exist")
|
||||
}
|
||||
|
||||
// The versioner should fail due to missing command.
|
||||
|
||||
e := External{
|
||||
command: "nonexistant command",
|
||||
folderPath: "testdata/folder path",
|
||||
}
|
||||
if err := e.Archive(file); err == nil {
|
||||
t.Error("Command should have failed")
|
||||
}
|
||||
|
||||
// The file should not have been removed.
|
||||
|
||||
if _, err := os.Lstat(file); err != nil {
|
||||
t.Fatal("File should still exist")
|
||||
}
|
||||
}
|
||||
|
||||
func TestExternal(t *testing.T) {
|
||||
cmd := "./_external_test/external.sh"
|
||||
if runtime.GOOS == "windows" {
|
||||
cmd = `.\_external_test\external.bat`
|
||||
}
|
||||
|
||||
file := "testdata/folder path/dir (parens)/long filename (parens).txt"
|
||||
prepForRemoval(t, file)
|
||||
defer os.RemoveAll("testdata")
|
||||
|
||||
// The file should exist before the versioner run.
|
||||
|
||||
if _, err := os.Lstat(file); err != nil {
|
||||
t.Fatal("File should exist")
|
||||
}
|
||||
|
||||
// The versioner should run successfully.
|
||||
|
||||
e := External{
|
||||
command: cmd,
|
||||
folderPath: "testdata/folder path",
|
||||
}
|
||||
if err := e.Archive(file); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// The file should no longer exist.
|
||||
|
||||
if _, err := os.Lstat(file); !os.IsNotExist(err) {
|
||||
t.Error("File should no longer exist")
|
||||
}
|
||||
}
|
||||
|
||||
func prepForRemoval(t *testing.T, file string) {
|
||||
if err := os.RemoveAll("testdata"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(filepath.Dir(file), 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := ioutil.WriteFile(file, []byte("hello\n"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -77,7 +77,7 @@ func NewStaggered(folderID, folderPath string, params map[string]string) Version
|
||||
if testCleanDone != nil {
|
||||
close(testCleanDone)
|
||||
}
|
||||
for _ = range time.Tick(time.Duration(cleanInterval) * time.Second) {
|
||||
for range time.Tick(time.Duration(cleanInterval) * time.Second) {
|
||||
s.clean()
|
||||
}
|
||||
}()
|
||||
@@ -165,7 +165,7 @@ func (v Staggered) expire(versions []string) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := osutil.Remove(file); err != nil {
|
||||
if err := os.Remove(file); err != nil {
|
||||
l.Warnf("Versioner: can't remove %q: %v", file, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -144,7 +144,7 @@ func (t *Trashcan) cleanoutArchive() error {
|
||||
// directory was empty and try to remove it. We ignore failure for
|
||||
// the time being.
|
||||
if currentDir != "" && filesInDir == 0 {
|
||||
osutil.Remove(currentDir)
|
||||
os.Remove(currentDir)
|
||||
}
|
||||
currentDir = path
|
||||
filesInDir = 0
|
||||
@@ -153,7 +153,7 @@ func (t *Trashcan) cleanoutArchive() error {
|
||||
|
||||
if info.ModTime().Before(cutoff) {
|
||||
// The file is too old; remove it.
|
||||
osutil.Remove(path)
|
||||
os.Remove(path)
|
||||
} else {
|
||||
// Keep this file, and remember it so we don't unnecessarily try
|
||||
// to remove this directory.
|
||||
@@ -169,7 +169,7 @@ func (t *Trashcan) cleanoutArchive() error {
|
||||
// The last directory seen by the walkFn may not have been removed as it
|
||||
// should be.
|
||||
if currentDir != "" && filesInDir == 0 {
|
||||
osutil.Remove(currentDir)
|
||||
os.Remove(currentDir)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "STDISCOSRV" "1" "August 08, 2016" "v0.14" "Syncthing"
|
||||
.TH "STDISCOSRV" "1" "August 22, 2016" "v0.14" "Syncthing"
|
||||
.SH NAME
|
||||
stdiscosrv \- Syncthing Discovery Server
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "STRELAYSRV" "1" "August 08, 2016" "v0.14" "Syncthing"
|
||||
.TH "STRELAYSRV" "1" "August 22, 2016" "v0.14" "Syncthing"
|
||||
.SH NAME
|
||||
strelaysrv \- Syncthing Relay Server
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-BEP" "7" "August 08, 2016" "v0.14" "Syncthing"
|
||||
.TH "SYNCTHING-BEP" "7" "August 22, 2016" "v0.14" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-bep \- Block Exchange Protocol v1
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-CONFIG" "5" "August 08, 2016" "v0.14" "Syncthing"
|
||||
.TH "SYNCTHING-CONFIG" "5" "August 22, 2016" "v0.14" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-config \- Syncthing Configuration
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-DEVICE-IDS" "7" "August 08, 2016" "v0.14" "Syncthing"
|
||||
.TH "SYNCTHING-DEVICE-IDS" "7" "August 22, 2016" "v0.14" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-device-ids \- Understanding Device IDs
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-EVENT-API" "7" "August 08, 2016" "v0.14" "Syncthing"
|
||||
.TH "SYNCTHING-EVENT-API" "7" "August 22, 2016" "v0.14" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-event-api \- Event API
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-FAQ" "7" "August 08, 2016" "v0.14" "Syncthing"
|
||||
.TH "SYNCTHING-FAQ" "7" "August 22, 2016" "v0.14" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-faq \- Frequently Asked Questions
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-GLOBALDISCO" "7" "August 08, 2016" "v0.14" "Syncthing"
|
||||
.TH "SYNCTHING-GLOBALDISCO" "7" "August 22, 2016" "v0.14" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-globaldisco \- Global Discovery Protocol v3
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-LOCALDISCO" "7" "August 08, 2016" "v0.14" "Syncthing"
|
||||
.TH "SYNCTHING-LOCALDISCO" "7" "August 22, 2016" "v0.14" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-localdisco \- Local Discovery Protocol v4
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-NETWORKING" "7" "August 08, 2016" "v0.14" "Syncthing"
|
||||
.TH "SYNCTHING-NETWORKING" "7" "August 22, 2016" "v0.14" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-networking \- Firewall Setup
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-RELAY" "7" "August 08, 2016" "v0.14" "Syncthing"
|
||||
.TH "SYNCTHING-RELAY" "7" "August 22, 2016" "v0.14" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-relay \- Relay Protocol v1
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-REST-API" "7" "August 08, 2016" "v0.14" "Syncthing"
|
||||
.TH "SYNCTHING-REST-API" "7" "August 22, 2016" "v0.14" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-rest-api \- REST API
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-SECURITY" "7" "August 08, 2016" "v0.14" "Syncthing"
|
||||
.TH "SYNCTHING-SECURITY" "7" "August 22, 2016" "v0.14" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-security \- Security Principles
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING-STIGNORE" "5" "August 08, 2016" "v0.14" "Syncthing"
|
||||
.TH "SYNCTHING-STIGNORE" "5" "August 22, 2016" "v0.14" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing-stignore \- Prevent files from being synchronized to other nodes
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "TODO" "7" "August 08, 2016" "v0.14" "Syncthing"
|
||||
.TH "TODO" "7" "August 22, 2016" "v0.14" "Syncthing"
|
||||
.SH NAME
|
||||
Todo \- Keep automatic backups of deleted files by other nodes
|
||||
.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
.\" Man page generated from reStructuredText.
|
||||
.
|
||||
.TH "SYNCTHING" "1" "August 08, 2016" "v0.14" "Syncthing"
|
||||
.TH "SYNCTHING" "1" "August 22, 2016" "v0.14" "Syncthing"
|
||||
.SH NAME
|
||||
syncthing \- Syncthing
|
||||
.
|
||||
|
||||
3
script/post-upgrade
Normal file
3
script/post-upgrade
Normal file
@@ -0,0 +1,3 @@
|
||||
#!/bin/sh
|
||||
|
||||
pkill -HUP -x syncthing || true
|
||||
@@ -27,6 +27,7 @@ var jsonEndpoints = []string{
|
||||
"/rest/db/status?folder=default",
|
||||
"/rest/db/browse?folder=default",
|
||||
"/rest/events?since=-1&limit=5",
|
||||
"/rest/events/disk?since=-1&limit=5",
|
||||
"/rest/stats/device",
|
||||
"/rest/stats/folder",
|
||||
"/rest/svc/deviceid?id=I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU",
|
||||
|
||||
@@ -208,7 +208,7 @@ func alterFiles(dir string) error {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
err := osutil.Remove(path)
|
||||
err := os.Remove(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
44
vendor/github.com/cznic/b/doc.go
generated
vendored
44
vendor/github.com/cznic/b/doc.go
generated
vendored
@@ -6,10 +6,30 @@
|
||||
//
|
||||
// Changelog
|
||||
//
|
||||
// 2016-07-16: Update benchmark results to newer Go version. Add a note on
|
||||
// concurrency.
|
||||
//
|
||||
// 2014-06-26: Lower GC presure by recycling things.
|
||||
//
|
||||
// 2014-04-18: Added new method Put.
|
||||
//
|
||||
// Concurrency considerations
|
||||
//
|
||||
// Tree.{Clear,Delete,Put,Set} mutate the tree. One can use eg. a
|
||||
// sync.Mutex.Lock/Unlock (or sync.RWMutex.Lock/Unlock) to wrap those calls if
|
||||
// they are to be invoked concurrently.
|
||||
//
|
||||
// Tree.{First,Get,Last,Len,Seek,SeekFirst,SekLast} read but do not mutate the
|
||||
// tree. One can use eg. a sync.RWMutex.RLock/RUnlock to wrap those calls if
|
||||
// they are to be invoked concurrently with any of the tree mutating methods.
|
||||
//
|
||||
// Enumerator.{Next,Prev} mutate the enumerator and read but not mutate the
|
||||
// tree. One can use eg. a sync.RWMutex.RLock/RUnlock to wrap those calls if
|
||||
// they are to be invoked concurrently with any of the tree mutating methods. A
|
||||
// separate mutex for the enumerator, or the whole tree in a simplified
|
||||
// variant, is necessary if the enumerator's Next/Prev methods per se are to
|
||||
// be invoked concurrently.
|
||||
//
|
||||
// Generic types
|
||||
//
|
||||
// Keys and their associated values are interface{} typed, similar to all of
|
||||
@@ -34,20 +54,20 @@
|
||||
// No other changes to int.go are necessary, it compiles just fine.
|
||||
//
|
||||
// Running the benchmarks for 1000 keys on a machine with Intel i5-4670 CPU @
|
||||
// 3.4GHz, Go release 1.4.2.
|
||||
// 3.4GHz, Go 1.7rc1.
|
||||
//
|
||||
// $ go test -bench 1e3 example/all_test.go example/int.go
|
||||
// BenchmarkSetSeq1e3-4 20000 78265 ns/op
|
||||
// BenchmarkGetSeq1e3-4 20000 67980 ns/op
|
||||
// BenchmarkSetRnd1e3-4 10000 172720 ns/op
|
||||
// BenchmarkGetRnd1e3-4 20000 89539 ns/op
|
||||
// BenchmarkDelSeq1e3-4 20000 87863 ns/op
|
||||
// BenchmarkDelRnd1e3-4 10000 130891 ns/op
|
||||
// BenchmarkSeekSeq1e3-4 10000 100118 ns/op
|
||||
// BenchmarkSeekRnd1e3-4 10000 121684 ns/op
|
||||
// BenchmarkNext1e3-4 200000 6330 ns/op
|
||||
// BenchmarkPrev1e3-4 200000 9066 ns/op
|
||||
// PASS
|
||||
// BenchmarkSetSeq1e3 10000 151620 ns/op
|
||||
// BenchmarkGetSeq1e3 10000 115354 ns/op
|
||||
// BenchmarkSetRnd1e3 5000 255865 ns/op
|
||||
// BenchmarkGetRnd1e3 10000 140466 ns/op
|
||||
// BenchmarkDelSeq1e3 10000 143860 ns/op
|
||||
// BenchmarkDelRnd1e3 10000 188228 ns/op
|
||||
// BenchmarkSeekSeq1e3 10000 156448 ns/op
|
||||
// BenchmarkSeekRnd1e3 10000 190587 ns/op
|
||||
// BenchmarkNext1e3 200000 9407 ns/op
|
||||
// BenchmarkPrev1e3 200000 9306 ns/op
|
||||
// ok command-line-arguments 26.369s
|
||||
// ok command-line-arguments 42.531s
|
||||
// $
|
||||
package b
|
||||
|
||||
27
vendor/github.com/cznic/internal/buffer/LICENSE
generated
vendored
Normal file
27
vendor/github.com/cznic/internal/buffer/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
Copyright (c) 2016 The Internal Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the names of the authors nor the names of the
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
55
vendor/github.com/cznic/internal/buffer/buffer.go
generated
vendored
Normal file
55
vendor/github.com/cznic/internal/buffer/buffer.go
generated
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
// Copyright 2016 The Internal Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package buffer implements a pool of pointers to byte slices.
|
||||
//
|
||||
// Example usage pattern
|
||||
//
|
||||
// p := buffer.Get(size)
|
||||
// b := *p // Now you can use b in any way you need.
|
||||
// ...
|
||||
// // When b will not be used anymore
|
||||
// buffer.Put(p)
|
||||
// ...
|
||||
// // If b or p are not going out of scope soon, optionally
|
||||
// b = nil
|
||||
// p = nil
|
||||
//
|
||||
// Otherwise the pool cannot release the buffer on garbage collection.
|
||||
//
|
||||
// Do not do
|
||||
//
|
||||
// p := buffer.Get(size)
|
||||
// b := *p
|
||||
// ...
|
||||
// buffer.Put(&b)
|
||||
//
|
||||
// or
|
||||
//
|
||||
// b := *buffer.Get(size)
|
||||
// ...
|
||||
// buffer.Put(&b)
|
||||
package buffer
|
||||
|
||||
import (
|
||||
"github.com/cznic/internal/slice"
|
||||
)
|
||||
|
||||
// CGet returns a pointer to a byte slice of len size. The pointed to byte
|
||||
// slice is zeroed up to its cap. CGet panics for size < 0.
|
||||
//
|
||||
// CGet is safe for concurrent use by multiple goroutines.
|
||||
func CGet(size int) *[]byte { return slice.Bytes.CGet(size).(*[]byte) }
|
||||
|
||||
// Get returns a pointer to a byte slice of len size. The pointed to byte slice
|
||||
// is not zeroed. Get panics for size < 0.
|
||||
//
|
||||
// Get is safe for concurrent use by multiple goroutines.
|
||||
func Get(size int) *[]byte { return slice.Bytes.Get(size).(*[]byte) }
|
||||
|
||||
// Put puts a pointer to a byte slice into a pool for possible later reuse by
|
||||
// CGet or Get.
|
||||
//
|
||||
// Put is safe for concurrent use by multiple goroutines.
|
||||
func Put(p *[]byte) { slice.Bytes.Put(p) }
|
||||
27
vendor/github.com/cznic/internal/file/LICENSE
generated
vendored
Normal file
27
vendor/github.com/cznic/internal/file/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
Copyright (c) 2016 The Internal Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the names of the authors nor the names of the
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user