upload test

This commit is contained in:
Jarek Kowalski
2016-04-14 18:57:45 -07:00
parent 72f28ed2b2
commit cb0b230e90
5 changed files with 46 additions and 47 deletions

View File

@@ -29,3 +29,6 @@ vtest:
doc:
godoc -http=:33333
coverage:
./coverage.sh

8
coverage.sh Executable file
View File

@@ -0,0 +1,8 @@
echo mode: set > combined.cov
for p in `go list github.com/kopia/kopia/...`; do
go test --coverprofile tmp.cov $p
grep -v "mode: " tmp.cov >> combined.cov
rm tmp.cov
done
go tool cover -html=combined.cov
rm combined.cov

View File

@@ -14,11 +14,12 @@ func (hcr *hashcacheReader) Open(dr *directoryReader) {
hcr.readahead()
}
func (hcr *hashcacheReader) GetEntry(relativeName string) *Entry {
func (hcr *hashcacheReader) GetEntry(relativeName string) (*Entry, int) {
skipCount := 0
//log.Printf("looking for %v", relativeName)
for hcr.nextEntry != nil && isLess(hcr.nextEntry.Name, relativeName) {
hcr.skippedCount++
hcr.readahead()
skipCount++
}
if hcr.nextEntry != nil && relativeName == hcr.nextEntry.Name {
@@ -26,14 +27,14 @@ func (hcr *hashcacheReader) GetEntry(relativeName string) *Entry {
e := hcr.nextEntry
hcr.nextEntry = nil
hcr.readahead()
return e
return e, skipCount
}
// if hcr.reader != nil {
// log.Printf("*** not found hashcache entry: %v", relativeName)
// }
return nil
return nil, skipCount
}
func (hcr *hashcacheReader) SkippedCount() int {

View File

@@ -4,7 +4,6 @@
"errors"
"fmt"
"io"
"log"
"os"
"path/filepath"
"sync/atomic"
@@ -22,7 +21,7 @@
// Uploader supports efficient uploading files and directories to CAS.
type Uploader interface {
UploadFile(path string) (cas.ObjectID, error)
UploadDir(path string, previousObjectID cas.ObjectID) (cas.ObjectID, cas.ObjectID, error)
UploadDir(path string, previousObjectID cas.ObjectID) (objectID cas.ObjectID, manifestObjectID cas.ObjectID, err error)
Cancel()
}
@@ -93,9 +92,6 @@ func (u *uploader) UploadDir(path string, previous cas.ObjectID) (cas.ObjectID,
}
func (u *uploader) uploadDirInternal(path string, relativePath string, hcw *directoryWriter, hcr *hashcacheReader) (cas.ObjectID, bool, error) {
//log.Printf("entering %v", path)
//defer log.Printf("exiting %v", path)
dir, err := u.lister.List(path)
if err != nil {
return cas.NullObjectID, false, err
@@ -111,37 +107,37 @@ func (u *uploader) uploadDirInternal(path string, relativePath string, hcw *dire
allCached := true
s0 := hcr.SkippedCount()
for _, e := range dir {
fullPath := filepath.Join(path, e.Name)
entryRelativePath := relativePath + "/" + e.Name
var oid cas.ObjectID
var cached bool
if e.IsDir() {
oid, cached, err = u.uploadDirInternal(fullPath, entryRelativePath, hcw, hcr)
oid, wasCached, err := u.uploadDirInternal(fullPath, entryRelativePath, hcw, hcr)
if err != nil {
return cas.NullObjectID, false, err
}
allCached = allCached && wasCached
e.ObjectID = oid
} else {
// See if we had this name during previous pass.
cachedEntry := hcr.GetEntry(entryRelativePath)
// ... and whether file metadata is identical to the previous one.
cached = metadataEquals(e, cachedEntry)
cachedEntry, numSkipped := hcr.GetEntry(entryRelativePath)
if cached {
// ... and whether file metadata is identical to the previous one.
cacheMatches := metadataEquals(e, cachedEntry)
allCached = allCached && cacheMatches && numSkipped == 0
if cacheMatches {
// Avoid hashing by reusing previous object ID.
oid = cachedEntry.ObjectID
e.ObjectID = cachedEntry.ObjectID
} else {
oid, err = u.UploadFile(fullPath)
e.ObjectID, err = u.UploadFile(fullPath)
if err != nil {
return cas.NullObjectID, false, fmt.Errorf("unable to hash file: %s", err)
}
}
}
allCached = allCached && cached
e.ObjectID = oid
if err := dw.WriteEntry(e); err != nil {
return cas.NullObjectID, false, err
@@ -157,21 +153,17 @@ func (u *uploader) uploadDirInternal(path string, relativePath string, hcw *dire
}
}
dirEntry := hcr.GetEntry(relativePath + "/")
s1 := hcr.SkippedCount()
if s0 != s1 {
allCached = false
}
var directoryOID cas.ObjectID
log.Printf("allCached: %v %v", relativePath, allCached)
dirEntry, numSkipped := hcr.GetEntry(relativePath + "/")
allCached = allCached && dirEntry != nil && numSkipped == 0
var oid cas.ObjectID
if allCached && dirEntry != nil {
// Avoid hashing directory listingif every entry matched the previous (possibly ignoring ordering).
if allCached {
// Avoid hashing directory listing if every entry matched the cache.
return dirEntry.ObjectID, true, nil
} else {
oid, err = writer.Result(true)
return oid, false, err
directoryOID, err = writer.Result(true)
return directoryOID, false, err
}
}

View File

@@ -2,7 +2,6 @@
import (
"io/ioutil"
"log"
"os"
"path/filepath"
@@ -21,6 +20,7 @@ func TestUpload(t *testing.T) {
return
}
// Prepare directory contents.
os.MkdirAll(filepath.Join(sourceDir, "d1/d1"), 0777)
os.MkdirAll(filepath.Join(sourceDir, "d1/d2"), 0777)
os.MkdirAll(filepath.Join(sourceDir, "d2/d1"), 0777)
@@ -77,16 +77,11 @@ func TestUpload(t *testing.T) {
if err != nil {
t.Errorf("upload failed: %v", err)
}
log.Printf("v = %#v", objectManager.Stats())
log.Printf("oid: %v metadataOID: %v", oid, metadataOID)
oid2, metadataOID2, err := u.UploadDir(sourceDir, oid)
if err != nil {
t.Errorf("upload failed: %v", err)
}
log.Printf("v = %#v", objectManager.Stats())
log.Printf("oid2: %v metadataOID2: %v", oid2, metadataOID2)
if oid2 != oid {
t.Errorf("expected oid==oid2, got %v and %v", oid, oid2)
@@ -96,13 +91,12 @@ func TestUpload(t *testing.T) {
t.Errorf("expected metadataOID2==metadataOID, got %v and %v", metadataOID2, metadataOID)
}
// Add one more file, the oid should change.
ioutil.WriteFile(filepath.Join(sourceDir, "d2/d1/f3"), []byte{1, 2, 3, 4, 5}, 0777)
oid3, metadataOID3, err := u.UploadDir(sourceDir, oid)
log.Printf("v = %#v", objectManager.Stats())
if err != nil {
t.Errorf("upload failed: %v", err)
}
log.Printf("oid3: %v metadataOID3: %v", oid3, metadataOID3)
if oid2 == oid3 {
t.Errorf("expected oid3!=oid2, got %v", oid3)
@@ -112,17 +106,18 @@ func TestUpload(t *testing.T) {
t.Errorf("expected metadataOID3!=metadataOID2, got %v", metadataOID3)
}
// Now remove the added file, OID should be identical to the original before the file got added.
os.Remove(filepath.Join(sourceDir, "d2/d1/f3"))
oid4, metadataOID4, err := u.UploadDir(sourceDir, "")
log.Printf("v = %#v", objectManager.Stats())
if err != nil {
t.Errorf("upload failed: %v", err)
}
log.Printf("oid4: %v", oid4)
if oid3 != oid4 {
t.Errorf("expected oid3!=oid4, got %v and %v", oid3, oid4)
if oid4 != oid {
t.Errorf("expected oid3==oid, got %v and %v", oid4, oid)
}
if metadataOID3 != metadataOID4 {
t.Errorf("expected metadataOID3!=metadataOID4, got %v and %v", metadataOID3, metadataOID4)
if metadataOID4 != metadataOID {
t.Errorf("expected metadataOID3==metadataOID4, got %v and %v", metadataOID4, metadataOID)
}
}