removed support for legacy bundles, now that packs are supported everywhere

This commit is contained in:
Jarek Kowalski
2017-08-19 18:06:15 -07:00
parent fa7c558aaf
commit 484db94e23
8 changed files with 4 additions and 303 deletions

View File

@@ -1,58 +0,0 @@
package dir
import "github.com/kopia/kopia/fs"
// EntryTypeBundle is the identifier of filesystem bundle.
const EntryTypeBundle fs.EntryType = "b"
// Bundle represents a collection of files stored together to minimize the number of storage objects.
type Bundle struct {
metadata *fs.EntryMetadata
Files []fs.File
}
// Parent returns the parent directory of the bundle.
func (b *Bundle) Parent() fs.Directory {
return nil
}
// Metadata returns the bundle metadata.
func (b *Bundle) Metadata() *fs.EntryMetadata {
return b.metadata
}
// Append adds a given file to the bundle.
func (b *Bundle) Append(e fs.File) {
b.Files = append(b.Files, e)
b.metadata.FileSize += e.Metadata().FileSize
emt := e.Metadata().ModTime
if b.metadata.ModTime.IsZero() || b.metadata.ModTime.Before(emt) {
b.metadata.ModTime = emt
}
}
// NewBundle creates a new bundle with given metadata.
func NewBundle(metadata *fs.EntryMetadata) *Bundle {
return &Bundle{metadata, nil}
}
type bundledFile struct {
metadata *fs.EntryMetadata
}
func (f *bundledFile) Parent() fs.Directory {
return nil
}
func (f *bundledFile) Metadata() *fs.EntryMetadata {
return f.metadata
}
func (f *bundledFile) Open() (fs.Reader, error) {
panic("Open() is not meant to be called")
}
// NewBundledFile returns new bundled file.
func NewBundledFile(metadata *fs.EntryMetadata) fs.File {
return &bundledFile{metadata}
}

View File

@@ -2,11 +2,9 @@
import (
"bufio"
"fmt"
"io"
"github.com/kopia/kopia/internal/jsonstream"
"github.com/kopia/kopia/repo"
)
var directoryStreamType = "kopia:directory"
@@ -32,76 +30,5 @@ func ReadEntries(r io.Reader) ([]*Entry, error) {
entries = append(entries, e)
}
return flattenBundles(entries)
}
func flattenBundles(source []*Entry) ([]*Entry, error) {
var entries []*Entry
var bundles [][]*Entry
for _, e := range source {
if e.Type == EntryTypeBundle {
bundle := e.BundledChildren
e.BundledChildren = nil
var currentOffset int64
for _, child := range bundle {
child.ObjectID = repo.SectionObjectID(currentOffset, child.FileSize, e.ObjectID)
currentOffset += child.FileSize
}
if currentOffset != e.FileSize {
return nil, fmt.Errorf("inconsistent size of '%v': %v (got %v)", e.Name, e.FileSize, currentOffset)
}
bundles = append(bundles, bundle)
} else {
entries = append(entries, e)
}
}
if len(bundles) > 0 {
if entries != nil {
bundles = append(bundles, entries)
}
entries = mergeSortN(bundles)
}
return entries, nil
}
func mergeSort2(b1, b2 []*Entry) []*Entry {
combinedLength := len(b1) + len(b2)
result := make([]*Entry, 0, combinedLength)
for len(b1) > 0 && len(b2) > 0 {
if b1[0].Name < b2[0].Name {
result = append(result, b1[0])
b1 = b1[1:]
} else {
result = append(result, b2[0])
b2 = b2[1:]
}
}
result = append(result, b1...)
result = append(result, b2...)
return result
}
func mergeSortN(slices [][]*Entry) []*Entry {
switch len(slices) {
case 1:
return slices[0]
case 2:
return mergeSort2(slices[0], slices[1])
default:
mid := len(slices) / 2
return mergeSort2(
mergeSortN(slices[:mid]),
mergeSortN(slices[mid:]))
}
}

View File

@@ -1,154 +0,0 @@
package dir
import (
"reflect"
"strings"
"testing"
"time"
"github.com/kopia/kopia/fs"
"github.com/kopia/kopia/repo"
)
var (
time1 = mustParseTimestamp("2016-04-06T02:34:10Z")
time2 = mustParseTimestamp("2016-04-02T02:39:44.123456789Z")
time3 = mustParseTimestamp("2016-04-02T02:36:19Z")
)
func bundledFileEntry(n string, l int64) *Entry {
return &Entry{
EntryMetadata: fs.EntryMetadata{
Name: n,
FileSize: l,
Type: fs.EntryTypeFile,
},
}
}
func entryWithSection(n string, l int64, start int64, length int64, baseID repo.ObjectID) *Entry {
return &Entry{
EntryMetadata: fs.EntryMetadata{
Name: n,
FileSize: l,
Type: fs.EntryTypeFile,
},
ObjectID: repo.SectionObjectID(start, length, baseID),
}
}
func bundleEntry(n string, l int64, oid repo.ObjectID, children []*Entry) *Entry {
return &Entry{
EntryMetadata: fs.EntryMetadata{
Name: n,
FileSize: l,
Type: EntryTypeBundle,
},
ObjectID: oid,
BundledChildren: children,
}
}
func TestFlattenBundles(t *testing.T) {
base := repo.ObjectID{StorageBlock: "5555"}
sources := []*Entry{
bundleEntry("bundle1", 170, base, []*Entry{
bundledFileEntry("a1", 50),
bundledFileEntry("z1", 120),
}),
}
entries, err := flattenBundles(sources)
if err != nil {
t.Errorf("can't read directory entries: %v", err)
return
}
expectedEntries := []*Entry{
entryWithSection("a1", 50, 0, 50, base),
entryWithSection("z1", 120, 50, 120, base),
}
verifyDirectory(t, entries, expectedEntries)
}
func TestFlattenBundlesInconsistentBundleSize(t *testing.T) {
sources := []*Entry{
bundleEntry("bundle1", 171, repo.ObjectID{StorageBlock: "5555"}, []*Entry{
bundledFileEntry("a1", 50),
bundledFileEntry("z1", 120),
}),
}
_, err := flattenBundles(sources)
if err == nil {
t.Errorf("expected error")
return
}
if ok := strings.Contains(err.Error(), "inconsistent size of 'bundle1'"); !ok {
t.Errorf("invalid error: %v", err)
}
}
func TestFlattenThreeBundles(t *testing.T) {
base1 := repo.ObjectID{StorageBlock: "5555"}
base2 := repo.ObjectID{StorageBlock: "6666"}
base3 := repo.ObjectID{StorageBlock: "7777"}
sources := []*Entry{
bundleEntry("bundle1", 170, base1, []*Entry{
bundledFileEntry("a1", 50),
bundledFileEntry("z1", 120),
}),
bundleEntry("bundle3", 7, base3, []*Entry{
bundledFileEntry("a3", 5),
bundledFileEntry("z3", 2),
}),
bundleEntry("bundle2", 300, base2, []*Entry{
bundledFileEntry("a2", 100),
bundledFileEntry("z2", 200),
}),
}
entries, err := flattenBundles(sources)
if err != nil {
t.Errorf("can't read directory entries: %v", err)
return
}
expectedEntries := []*Entry{
entryWithSection("a1", 50, 0, 50, base1),
entryWithSection("a2", 100, 0, 100, base2),
entryWithSection("a3", 5, 0, 5, base3),
entryWithSection("z1", 120, 50, 120, base1),
entryWithSection("z2", 200, 100, 200, base2),
entryWithSection("z3", 2, 5, 2, base3),
}
verifyDirectory(t, entries, expectedEntries)
}
func verifyDirectory(t *testing.T, entries []*Entry, expectedEntries []*Entry) {
if len(entries) != len(expectedEntries) {
t.Errorf("expected %v entries, got %v", len(expectedEntries), len(entries))
}
for i, expected := range expectedEntries {
if i < len(entries) {
actual := entries[i]
if !reflect.DeepEqual(expected, actual) {
t.Errorf("invalid entry at index %v:\nexpected: %#v\nactual: %#v", i,
expected, actual)
}
}
}
}
func mustParseTimestamp(s string) time.Time {
t, err := time.Parse(time.RFC3339Nano, s)
if err != nil {
panic("cannot parse timestamp: " + s)
}
return t
}

View File

@@ -17,10 +17,6 @@ func (dw *Writer) WriteEntry(e *Entry) error {
panic("invalid object ID: " + err.Error())
}
if e.Type == EntryTypeBundle && len(e.BundledChildren) == 0 {
panic("empty bundle!")
}
return dw.w.Write(e)
}

View File

@@ -8,6 +8,5 @@
// Entry represents a directory entry as stored in JSON stream.
type Entry struct {
fs.EntryMetadata
ObjectID repo.ObjectID `json:"obj,omitempty"`
BundledChildren []*Entry `json:"bundled,omitempty"`
ObjectID repo.ObjectID `json:"obj,omitempty"`
}

View File

@@ -17,21 +17,14 @@
//
// 2. In a series of storage blocks with an indirect block pointing at them (multiple indirections are allowed). This is used for larger files.
//
// 3. Inline as part of the ObjectID (typically for very small or empty files).
//
// 4. As sections of other objects (bundles).
//
// 5. As sections of other objects (bundles).
// 3. Packed into larger objects (packs).
//
// ObjectIDs have standard string representation (returned by String() and accepted as input to ParseObjectID()) suitable for using
// in user interfaces, such as command-line tools:
//
// Examples:
//
// "B" // empty object
// "BcXVpY2sgYnJvd24gZm94Cg==" // inline content "quick brown fox" (base64-encoded)
// "D295754edeb35c17911b1fdf853f572fe" // storage block
// "I1,2c33acbcba3569f943d9e8aaea7817c5" // level-1 indirection block (legacy)
// "ID2c33acbcba3569f943d9e8aaea7817c5" // level-1 indirection block
// "IID2c33acbcba3569f943d9e8aaea7817c5" // level-2 indirection block
// "S30,50,D295754edeb35c17911b1fdf853f572fe" // section of "D295754edeb35c17911b1fdf853f572fe" between [30,80)

View File

@@ -9,8 +9,6 @@ type Stats struct {
TotalDirectoryCount int `json:"dirCount"`
TotalFileCount int `json:"fileCount"`
TotalFileSize int64 `json:"totalSize"`
TotalBundleCount int `json:"bundleCount"`
TotalBundleSize int64 `json:"totalBundleSize"`
CachedFiles int `json:"cachedFiles"`
NonCachedFiles int `json:"nonCachedFiles"`

View File

@@ -70,8 +70,6 @@ func (u *Uploader) cancelReason() string {
}
func (u *Uploader) uploadFileInternal(f fs.File, relativePath string) (*dir.Entry, uint64, error) {
u.Progress.Started(relativePath, f.Metadata().FileSize)
file, err := f.Open()
if err != nil {
return nil, 0, fmt.Errorf("unable to open file: %v", err)
@@ -83,6 +81,7 @@ func (u *Uploader) uploadFileInternal(f fs.File, relativePath string) (*dir.Entr
})
defer writer.Close()
u.Progress.Started(relativePath, f.Metadata().FileSize)
written, err := u.copyWithProgress(relativePath, writer, file, 0, f.Metadata().FileSize)
if err != nil {
u.Progress.Finished(relativePath, f.Metadata().FileSize, err)
@@ -139,6 +138,7 @@ func (u *Uploader) uploadSymlinkInternal(f fs.Symlink, relativePath string) (*di
u.Progress.Finished(relativePath, 1, nil)
return de, metadataHash(&de.EntryMetadata), nil
}
func (u *Uploader) copyWithProgress(path string, dst io.Writer, src io.Reader, completed int64, length int64) (int64, error) {
if u.uploadBuf == nil {
u.uploadBuf = make([]byte, 128*1024) // 128 KB buffer