packindex: merged package into /block, removed unnecessary visibility and dead code

This commit is contained in:
Jarek Kowalski
2018-10-31 21:14:19 -07:00
parent 849817c2bf
commit 82653d3736
18 changed files with 82 additions and 191 deletions

View File

@@ -7,8 +7,6 @@
"fmt"
"hash/crc32"
"reflect"
"github.com/kopia/repo/internal/packindex"
)
// RecoverIndexFromPackFile attempts to recover index block entries from a given pack file.
@@ -19,10 +17,11 @@ func (bm *Manager) RecoverIndexFromPackFile(ctx context.Context, packFile string
return nil, err
}
ndx, err := packindex.Open(bytes.NewReader(localIndexBytes))
ndx, err := openPackIndex(bytes.NewReader(localIndexBytes))
if err != nil {
return nil, fmt.Errorf("unable to open index in file %v", packFile)
}
defer ndx.Close()
var recovered []Info
@@ -147,7 +146,7 @@ func decodePostamble(payload []byte) *packBlockPostamble {
}
}
func (bm *Manager) buildLocalIndex(pending packindex.Builder) ([]byte, error) {
func (bm *Manager) buildLocalIndex(pending packIndexBuilder) ([]byte, error) {
var buf bytes.Buffer
if err := pending.Build(&buf); err != nil {
return nil, fmt.Errorf("unable to build local index: %v", err)
@@ -157,7 +156,7 @@ func (bm *Manager) buildLocalIndex(pending packindex.Builder) ([]byte, error) {
}
// appendPackFileIndexRecoveryData appends data designed to help with recovery of pack index in case it gets damaged or lost.
func (bm *Manager) appendPackFileIndexRecoveryData(blockData []byte, pending packindex.Builder) ([]byte, error) {
func (bm *Manager) appendPackFileIndexRecoveryData(blockData []byte, pending packIndexBuilder) ([]byte, error) {
// build, encrypt and append local index
localIndexOffset := len(blockData)
localIndex, err := bm.buildLocalIndex(pending)

View File

@@ -18,7 +18,6 @@
"sync/atomic"
"time"
"github.com/kopia/repo/internal/packindex"
"github.com/kopia/repo/internal/repologging"
"github.com/kopia/repo/storage"
)
@@ -44,9 +43,6 @@
indexLoadAttempts = 10
)
// Info is an information about a single block managed by Manager.
type Info = packindex.Info
// IndexInfo is an information about a single index block managed by Manager.
type IndexInfo struct {
FileName string
@@ -67,9 +63,9 @@ type Manager struct {
locked bool
checkInvariantsOnUnlock bool
currentPackItems map[string]Info // blocks that are in the pack block currently being built (all inline)
currentPackDataLength int // total length of all items in the current pack block
packIndexBuilder packindex.Builder // blocks that are in index currently being built (current pack and all packs saved but not committed)
currentPackItems map[string]Info // blocks that are in the pack block currently being built (all inline)
currentPackDataLength int // total length of all items in the current pack block
packIndexBuilder packIndexBuilder // blocks that are in index currently being built (current pack and all packs saved but not committed)
committedBlocks *committedBlockIndex
disableIndexFlushCount int
@@ -300,7 +296,7 @@ func (bm *Manager) flushPackIndexesLocked(ctx context.Context) error {
if err := bm.committedBlocks.addBlock(indexBlockID, dataCopy, true); err != nil {
return fmt.Errorf("unable to add committed block: %v", err)
}
bm.packIndexBuilder = packindex.NewBuilder()
bm.packIndexBuilder = make(packIndexBuilder)
}
bm.flushPackIndexesAfter = bm.timeNow().Add(flushPackIndexTimeout)
@@ -352,7 +348,7 @@ func (bm *Manager) writePackBlockLocked(ctx context.Context) error {
return nil
}
func (bm *Manager) preparePackDataBlock(packFile string) ([]byte, packindex.Builder, error) {
func (bm *Manager) preparePackDataBlock(packFile string) ([]byte, packIndexBuilder, error) {
formatLog.Debugf("preparing block data with %v items", len(bm.currentPackItems))
blockData, err := appendRandomBytes(nil, rand.Intn(bm.maxPreambleLength-bm.minPreambleLength+1)+bm.minPreambleLength)
@@ -360,7 +356,7 @@ func (bm *Manager) preparePackDataBlock(packFile string) ([]byte, packindex.Buil
return nil, nil, fmt.Errorf("unable to prepare block preamble: %v", err)
}
packFileIndex := packindex.Builder{}
packFileIndex := packIndexBuilder{}
for blockID, info := range bm.currentPackItems {
if info.Payload == nil {
continue
@@ -985,7 +981,7 @@ func newManagerWithOptions(ctx context.Context, st storage.Storage, f Formatting
maxPackSize: f.MaxPackSize,
formatter: formatter,
currentPackItems: make(map[string]Info),
packIndexBuilder: packindex.NewBuilder(),
packIndexBuilder: make(packIndexBuilder),
committedBlocks: blockIndex,
minPreambleLength: defaultMinPreambleLength,
maxPreambleLength: defaultMaxPreambleLength,

View File

@@ -5,8 +5,6 @@
"context"
"fmt"
"time"
"github.com/kopia/repo/internal/packindex"
)
var autoCompactionOptions = CompactOptions{
@@ -91,7 +89,7 @@ func (bm *Manager) compactAndDeleteIndexBlocks(ctx context.Context, indexBlocks
formatLog.Debugf("compacting %v blocks", len(indexBlocks))
t0 := time.Now()
bld := packindex.NewBuilder()
bld := make(packIndexBuilder)
for _, indexBlock := range indexBlocks {
if err := bm.addIndexBlocksToBuilder(ctx, bld, indexBlock, opt); err != nil {
return err
@@ -124,13 +122,13 @@ func (bm *Manager) compactAndDeleteIndexBlocks(ctx context.Context, indexBlocks
return nil
}
func (bm *Manager) addIndexBlocksToBuilder(ctx context.Context, bld packindex.Builder, indexBlock IndexInfo, opt CompactOptions) error {
func (bm *Manager) addIndexBlocksToBuilder(ctx context.Context, bld packIndexBuilder, indexBlock IndexInfo, opt CompactOptions) error {
data, err := bm.getPhysicalBlockInternal(ctx, indexBlock.FileName)
if err != nil {
return err
}
index, err := packindex.Open(bytes.NewReader(data))
index, err := openPackIndex(bytes.NewReader(data))
if err != nil {
return fmt.Errorf("unable to open index block %q: %v", indexBlock, err)
}

View File

@@ -15,7 +15,6 @@
"testing"
"time"
"github.com/kopia/repo/internal/packindex"
"github.com/kopia/repo/internal/storagetesting"
"github.com/kopia/repo/storage"
logging "github.com/op/go-logging"
@@ -803,10 +802,10 @@ func dumpBlockManagerData(t *testing.T, data map[string][]byte) {
t.Helper()
for k, v := range data {
if k[0] == 'n' {
ndx, err := packindex.Open(bytes.NewReader(v))
ndx, err := openPackIndex(bytes.NewReader(v))
if err == nil {
t.Logf("index %v (%v bytes)", k, len(v))
ndx.Iterate("", func(i packindex.Info) error {
ndx.Iterate("", func(i Info) error {
t.Logf(" %+v\n", i)
return nil
})

View File

@@ -1,4 +1,4 @@
package packindex
package block
import (
"bufio"
@@ -8,18 +8,18 @@
"sort"
)
// Builder prepares and writes block index for writing.
type Builder map[string]*Info
// packIndexBuilder prepares and writes block index for writing.
type packIndexBuilder map[string]*Info
// Add adds a new entry to the builder or conditionally replaces it if the timestamp is greater.
func (b Builder) Add(i Info) {
func (b packIndexBuilder) Add(i Info) {
old, ok := b[i.BlockID]
if !ok || i.TimestampSeconds >= old.TimestampSeconds {
b[i.BlockID] = &i
}
}
func (b Builder) sortedBlocks() []*Info {
func (b packIndexBuilder) sortedBlocks() []*Info {
var allBlocks []*Info
for _, v := range b {
@@ -42,7 +42,7 @@ type indexLayout struct {
}
// Build writes the pack index to the provided output.
func (b Builder) Build(output io.Writer) error {
func (b packIndexBuilder) Build(output io.Writer) error {
allBlocks := b.sortedBlocks()
layout := &indexLayout{
packFileOffsets: map[string]uint32{},
@@ -145,8 +145,3 @@ func formatEntry(entry []byte, it *Info, layout *indexLayout) error {
binary.BigEndian.PutUint64(entryTimestampAndFlags, timestampAndFlags)
return nil
}
// NewBuilder creates a new Builder.
func NewBuilder() Builder {
return make(map[string]*Info)
}

View File

@@ -5,7 +5,6 @@
"path/filepath"
"sync"
"github.com/kopia/repo/internal/packindex"
"github.com/kopia/repo/storage"
)
@@ -13,14 +12,14 @@ type committedBlockIndex struct {
cache committedBlockIndexCache
mu sync.Mutex
inUse map[string]packindex.Index
merged packindex.Merged
inUse map[string]packIndex
merged mergedIndex
}
type committedBlockIndexCache interface {
hasIndexBlockID(indexBlockID string) (bool, error)
addBlockToCache(indexBlockID string, data []byte) error
openIndex(indexBlockID string) (packindex.Index, error)
openIndex(indexBlockID string) (packIndex, error)
expireUnused(used []string) error
}
@@ -65,7 +64,7 @@ func (b *committedBlockIndex) addBlock(indexBlockID string, data []byte, use boo
func (b *committedBlockIndex) listBlocks(prefix string, cb func(i Info) error) error {
b.mu.Lock()
m := append(packindex.Merged(nil), b.merged...)
m := append(mergedIndex(nil), b.merged...)
b.mu.Unlock()
return m.Iterate(prefix, cb)
@@ -94,8 +93,8 @@ func (b *committedBlockIndex) use(packFiles []string) (bool, error) {
}
log.Debugf("set of index files has changed (had %v, now %v)", len(b.inUse), len(packFiles))
var newMerged packindex.Merged
newInUse := map[string]packindex.Index{}
var newMerged mergedIndex
newInUse := map[string]packIndex{}
defer func() {
newMerged.Close() //nolint:errcheck
}()
@@ -128,12 +127,12 @@ func newCommittedBlockIndex(caching CachingOptions) (*committedBlockIndex, error
cache = &diskCommittedBlockIndexCache{dirname}
} else {
cache = &memoryCommittedBlockIndexCache{
blocks: map[string]packindex.Index{},
blocks: map[string]packIndex{},
}
}
return &committedBlockIndex{
cache: cache,
inUse: map[string]packindex.Index{},
inUse: map[string]packIndex{},
}, nil
}

View File

@@ -8,7 +8,6 @@
"strings"
"time"
"github.com/kopia/repo/internal/packindex"
"golang.org/x/exp/mmap"
)
@@ -25,7 +24,7 @@ func (c *diskCommittedBlockIndexCache) indexBlockPath(indexBlockID string) strin
return filepath.Join(c.dirname, indexBlockID+simpleIndexSuffix)
}
func (c *diskCommittedBlockIndexCache) openIndex(indexBlockID string) (packindex.Index, error) {
func (c *diskCommittedBlockIndexCache) openIndex(indexBlockID string) (packIndex, error) {
fullpath := c.indexBlockPath(indexBlockID)
f, err := mmap.Open(fullpath)
@@ -33,7 +32,7 @@ func (c *diskCommittedBlockIndexCache) openIndex(indexBlockID string) (packindex
return nil, err
}
return packindex.Open(f)
return openPackIndex(f)
}
func (c *diskCommittedBlockIndexCache) hasIndexBlockID(indexBlockID string) (bool, error) {

View File

@@ -4,13 +4,11 @@
"bytes"
"fmt"
"sync"
"github.com/kopia/repo/internal/packindex"
)
type memoryCommittedBlockIndexCache struct {
mu sync.Mutex
blocks map[string]packindex.Index
blocks map[string]packIndex
}
func (m *memoryCommittedBlockIndexCache) hasIndexBlockID(indexBlockID string) (bool, error) {
@@ -24,7 +22,7 @@ func (m *memoryCommittedBlockIndexCache) addBlockToCache(indexBlockID string, da
m.mu.Lock()
defer m.mu.Unlock()
ndx, err := packindex.Open(bytes.NewReader(data))
ndx, err := openPackIndex(bytes.NewReader(data))
if err != nil {
return err
}
@@ -33,7 +31,7 @@ func (m *memoryCommittedBlockIndexCache) addBlockToCache(indexBlockID string, da
return nil
}
func (m *memoryCommittedBlockIndexCache) openIndex(indexBlockID string) (packindex.Index, error) {
func (m *memoryCommittedBlockIndexCache) openIndex(indexBlockID string) (packIndex, error) {
m.mu.Lock()
defer m.mu.Unlock()

View File

@@ -1,4 +1,4 @@
package packindex
package block
import (
"encoding/hex"

View File

@@ -1,4 +1,4 @@
package packindex
package block
import (
"encoding/binary"

View File

@@ -1,4 +1,4 @@
package packindex
package block
import (
"bytes"
@@ -9,8 +9,8 @@
"strings"
)
// Index is a read-only index of packed blocks.
type Index interface {
// packIndex is a read-only index of packed blocks.
type packIndex interface {
io.Closer
GetInfo(blockID string) (*Info, error)
@@ -186,8 +186,8 @@ func (b *index) Close() error {
return nil
}
// Open reads an Index from a given reader. The caller must call Close() when the index is no longer used.
func Open(readerAt io.ReaderAt) (Index, error) {
// openPackIndex reads an Index from a given reader. The caller must call Close() when the index is no longer used.
func openPackIndex(readerAt io.ReaderAt) (packIndex, error) {
h, err := readHeader(readerAt)
if err != nil {
return nil, fmt.Errorf("invalid header: %v", err)

View File

@@ -1,4 +1,4 @@
package packindex
package block
import (
"time"

View File

@@ -1,15 +1,15 @@
package packindex
package block
import (
"container/heap"
"errors"
)
// Merged is an implementation of Index that transparently merges retuns from underlying Indexes.
type Merged []Index
// mergedIndex is an implementation of Index that transparently merges retuns from underlying Indexes.
type mergedIndex []packIndex
// Close closes all underlying indexes.
func (m Merged) Close() error {
func (m mergedIndex) Close() error {
for _, ndx := range m {
if err := ndx.Close(); err != nil {
return err
@@ -20,7 +20,7 @@ func (m Merged) Close() error {
}
// GetInfo returns information about a single block. If a block is not found, returns (nil,nil)
func (m Merged) GetInfo(contentID string) (*Info, error) {
func (m mergedIndex) GetInfo(contentID string) (*Info, error) {
var best *Info
for _, ndx := range m {
i, err := ndx.GetInfo(contentID)
@@ -68,7 +68,7 @@ func (h *nextInfoHeap) Pop() interface{} {
return x
}
func iterateChan(prefix string, ndx Index, done chan bool) <-chan Info {
func iterateChan(prefix string, ndx packIndex, done chan bool) <-chan Info {
ch := make(chan Info)
go func() {
defer close(ch)
@@ -87,7 +87,7 @@ func iterateChan(prefix string, ndx Index, done chan bool) <-chan Info {
// Iterate invokes the provided callback for all unique block IDs in the underlying sources until either
// all blocks have been visited or until an error is returned by the callback.
func (m Merged) Iterate(prefix string, cb func(i Info) error) error {
func (m mergedIndex) Iterate(prefix string, cb func(i Info) error) error {
var minHeap nextInfoHeap
done := make(chan bool)
defer close(done)
@@ -129,4 +129,4 @@ func (m Merged) Iterate(prefix string, cb func(i Info) error) error {
return nil
}
var _ Index = (*Merged)(nil)
var _ packIndex = (*mergedIndex)(nil)

View File

@@ -1,43 +1,41 @@
package packindex_test
package block
import (
"bytes"
"fmt"
"reflect"
"testing"
"github.com/kopia/repo/internal/packindex"
)
func TestMerged(t *testing.T) {
i1, err := indexWithItems(
packindex.Info{BlockID: "aabbcc", TimestampSeconds: 1, PackFile: "xx", PackOffset: 11},
packindex.Info{BlockID: "ddeeff", TimestampSeconds: 1, PackFile: "xx", PackOffset: 111},
packindex.Info{BlockID: "z010203", TimestampSeconds: 1, PackFile: "xx", PackOffset: 111},
packindex.Info{BlockID: "de1e1e", TimestampSeconds: 4, PackFile: "xx", PackOffset: 111},
Info{BlockID: "aabbcc", TimestampSeconds: 1, PackFile: "xx", PackOffset: 11},
Info{BlockID: "ddeeff", TimestampSeconds: 1, PackFile: "xx", PackOffset: 111},
Info{BlockID: "z010203", TimestampSeconds: 1, PackFile: "xx", PackOffset: 111},
Info{BlockID: "de1e1e", TimestampSeconds: 4, PackFile: "xx", PackOffset: 111},
)
if err != nil {
t.Fatalf("can't create index: %v", err)
}
i2, err := indexWithItems(
packindex.Info{BlockID: "aabbcc", TimestampSeconds: 3, PackFile: "yy", PackOffset: 33},
packindex.Info{BlockID: "xaabbcc", TimestampSeconds: 1, PackFile: "xx", PackOffset: 111},
packindex.Info{BlockID: "de1e1e", TimestampSeconds: 4, PackFile: "xx", PackOffset: 222, Deleted: true},
Info{BlockID: "aabbcc", TimestampSeconds: 3, PackFile: "yy", PackOffset: 33},
Info{BlockID: "xaabbcc", TimestampSeconds: 1, PackFile: "xx", PackOffset: 111},
Info{BlockID: "de1e1e", TimestampSeconds: 4, PackFile: "xx", PackOffset: 222, Deleted: true},
)
if err != nil {
t.Fatalf("can't create index: %v", err)
}
i3, err := indexWithItems(
packindex.Info{BlockID: "aabbcc", TimestampSeconds: 2, PackFile: "zz", PackOffset: 22},
packindex.Info{BlockID: "ddeeff", TimestampSeconds: 1, PackFile: "zz", PackOffset: 222},
packindex.Info{BlockID: "k010203", TimestampSeconds: 1, PackFile: "xx", PackOffset: 111},
packindex.Info{BlockID: "k020304", TimestampSeconds: 1, PackFile: "xx", PackOffset: 111},
Info{BlockID: "aabbcc", TimestampSeconds: 2, PackFile: "zz", PackOffset: 22},
Info{BlockID: "ddeeff", TimestampSeconds: 1, PackFile: "zz", PackOffset: 222},
Info{BlockID: "k010203", TimestampSeconds: 1, PackFile: "xx", PackOffset: 111},
Info{BlockID: "k020304", TimestampSeconds: 1, PackFile: "xx", PackOffset: 111},
)
if err != nil {
t.Fatalf("can't create index: %v", err)
}
m := packindex.Merged{i1, i2, i3}
m := mergedIndex{i1, i2, i3}
i, err := m.GetInfo("aabbcc")
if err != nil || i == nil {
t.Fatalf("unable to get info: %v", err)
@@ -47,7 +45,7 @@ func TestMerged(t *testing.T) {
}
var inOrder []string
m.Iterate("", func(i packindex.Info) error {
m.Iterate("", func(i Info) error {
inOrder = append(inOrder, i.BlockID)
if i.BlockID == "de1e1e" {
if i.Deleted {
@@ -81,8 +79,8 @@ func TestMerged(t *testing.T) {
}
}
func indexWithItems(items ...packindex.Info) (packindex.Index, error) {
b := packindex.NewBuilder()
func indexWithItems(items ...Info) (packIndex, error) {
b := make(packIndexBuilder)
for _, it := range items {
b.Add(it)
}
@@ -90,5 +88,5 @@ func indexWithItems(items ...packindex.Info) (packindex.Index, error) {
if err := b.Build(&buf); err != nil {
return nil, fmt.Errorf("build error: %v", err)
}
return packindex.Open(bytes.NewReader(buf.Bytes()))
return openPackIndex(bytes.NewReader(buf.Bytes()))
}

View File

@@ -1,4 +1,4 @@
package packindex
package block
import "testing"

View File

@@ -1,4 +1,4 @@
package packindex_test
package block
import (
"bytes"
@@ -9,8 +9,6 @@
"reflect"
"strings"
"testing"
"github.com/kopia/repo/internal/packindex"
)
func TestPackIndex(t *testing.T) {
@@ -58,11 +56,11 @@ func TestPackIndex(t *testing.T) {
return int64(rand.Int31())
}
var infos []packindex.Info
var infos []Info
// deleted blocks with all information
for i := 0; i < 100; i++ {
infos = append(infos, packindex.Info{
infos = append(infos, Info{
TimestampSeconds: randomUnixTime(),
Deleted: true,
BlockID: deterministicBlockID("deleted-packed", i),
@@ -74,7 +72,7 @@ func TestPackIndex(t *testing.T) {
}
// non-deleted block
for i := 0; i < 100; i++ {
infos = append(infos, packindex.Info{
infos = append(infos, Info{
TimestampSeconds: randomUnixTime(),
BlockID: deterministicBlockID("packed", i),
PackFile: deterministicPackFile(i),
@@ -84,10 +82,10 @@ func TestPackIndex(t *testing.T) {
})
}
infoMap := map[string]packindex.Info{}
b1 := packindex.NewBuilder()
b2 := packindex.NewBuilder()
b3 := packindex.NewBuilder()
infoMap := map[string]Info{}
b1 := make(packIndexBuilder)
b2 := make(packIndexBuilder)
b3 := make(packIndexBuilder)
for _, info := range infos {
infoMap[info.BlockID] = info
@@ -123,7 +121,7 @@ func TestPackIndex(t *testing.T) {
fuzzTestIndexOpen(t, data1)
})
ndx, err := packindex.Open(bytes.NewReader(data1))
ndx, err := openPackIndex(bytes.NewReader(data1))
if err != nil {
t.Fatalf("can't open index: %v", err)
}
@@ -141,7 +139,7 @@ func TestPackIndex(t *testing.T) {
}
cnt := 0
ndx.Iterate("", func(info2 packindex.Info) error {
ndx.Iterate("", func(info2 Info) error {
info := infoMap[info2.BlockID]
if !reflect.DeepEqual(info, info2) {
t.Errorf("invalid value retrieved: %+v, wanted %+v", info2, info)
@@ -168,7 +166,7 @@ func TestPackIndex(t *testing.T) {
for _, prefix := range prefixes {
cnt2 := 0
ndx.Iterate(string(prefix), func(info2 packindex.Info) error {
ndx.Iterate(string(prefix), func(info2 Info) error {
cnt2++
if !strings.HasPrefix(string(info2.BlockID), string(prefix)) {
t.Errorf("unexpected item %v when iterating prefix %v", info2.BlockID, prefix)
@@ -184,13 +182,13 @@ func fuzzTestIndexOpen(t *testing.T, originalData []byte) {
rnd := rand.New(rand.NewSource(12345))
fuzzTest(rnd, originalData, 50000, func(d []byte) {
ndx, err := packindex.Open(bytes.NewReader(d))
ndx, err := openPackIndex(bytes.NewReader(d))
if err != nil {
return
}
defer ndx.Close()
cnt := 0
ndx.Iterate("", func(cb packindex.Info) error {
ndx.Iterate("", func(cb Info) error {
if cnt < 10 {
ndx.GetInfo(cb.BlockID)
}

View File

@@ -1,28 +0,0 @@
package packindex
// IsSubset returns true if all entries in index 'a' are contained in index 'b'.
func IsSubset(a, b Index) bool {
done := make(chan bool)
defer close(done)
ach := iterateChan("", a, done)
bch := iterateChan("", b, done)
for ait := range ach {
bit, ok := <-bch
if !ok {
return false
}
for bit.BlockID < ait.BlockID {
bit, ok = <-bch
if !ok {
return false
}
}
if bit.BlockID != ait.BlockID {
return false
}
}
return true
}

View File

@@ -1,60 +0,0 @@
package packindex_test
import (
"bytes"
"fmt"
"testing"
"github.com/kopia/repo/internal/packindex"
)
func TestSubset(t *testing.T) {
cases := []struct {
aBlocks, bBlocks []string
want bool
}{
{[]string{}, []string{"aa"}, true},
{[]string{}, []string{"aa", "bb"}, true},
{[]string{"aa"}, []string{"aa"}, true},
{[]string{"aa"}, []string{"bb"}, false},
{[]string{"aa"}, []string{"aa", "bb"}, true},
{[]string{"aa"}, []string{"aa", "bb", "cc"}, true},
{[]string{"aa", "bb"}, []string{"bb", "cc"}, false},
{[]string{"aa", "bb"}, []string{"aa"}, false},
{[]string{"aa", "bb"}, []string{}, false},
{[]string{"aa", "bb", "cc", "dd", "ee", "ff"}, []string{"aa", "bb", "cc", "dd", "ee", "ff"}, true},
{[]string{"aa", "bb", "cc", "dd", "ee", "ff"}, []string{"aa", "bb", "cc", "dd", "ef", "ff"}, false},
{[]string{"aa", "bb", "cc", "dd", "ee", "ff"}, []string{"aa", "bb", "cc", "dd", "ee", "ef", "ff"}, true},
}
for _, tc := range cases {
a, err := indexWithBlockIDs(tc.aBlocks)
if err != nil {
t.Fatalf("error building index: %v", err)
}
b, err := indexWithBlockIDs(tc.bBlocks)
if err != nil {
t.Fatalf("error building index: %v", err)
}
if got, want := packindex.IsSubset(a, b), tc.want; got != want {
t.Errorf("invalid value of IsSubset(%v,%v): %v, wanted %v", tc.aBlocks, tc.bBlocks, got, want)
}
}
}
func indexWithBlockIDs(items []string) (packindex.Index, error) {
b := packindex.NewBuilder()
for _, it := range items {
b.Add(packindex.Info{
BlockID: it,
PackFile: "x",
PackOffset: 1,
Length: 1,
})
}
var buf bytes.Buffer
if err := b.Build(&buf); err != nil {
return nil, fmt.Errorf("build error: %v", err)
}
return packindex.Open(bytes.NewReader(buf.Bytes()))
}