refactoring: renamed storage.Storage to blob.Storage

This updates the terminology everywhere - blocks become blobs and
`storage.Storage` becomes `blob.Storage`.

Also introduced blob.ID which is a specialized string type, that's
different from CABS block ID.

Also renamed CLI subcommands from `kopia storage` to `kopia blob`.

While at it introduced `block.ErrBlockNotFound` and
`object.ErrObjectNotFound` that do not leak from lower layers.
This commit is contained in:
Jarek Kowalski
2019-06-01 11:10:29 -07:00
parent 1a7a02ddbe
commit 9e5d0beccd
92 changed files with 1228 additions and 1190 deletions

View File

@@ -12,8 +12,8 @@
"github.com/kopia/kopia/internal/kopialogging"
"github.com/kopia/kopia/internal/serverapi"
"github.com/kopia/kopia/repo"
"github.com/kopia/kopia/repo/blob"
"github.com/kopia/kopia/repo/block"
"github.com/kopia/kopia/repo/storage"
kingpin "gopkg.in/alecthomas/kingpin.v2"
)
@@ -32,7 +32,7 @@
serverCommands = app.Command("server", "Commands to control HTTP API server.")
manifestCommands = app.Command("manifest", "Low-level commands to manipulate manifest items.").Hidden()
blockCommands = app.Command("block", "Commands to manipulate virtual blocks in repository.").Alias("blk").Hidden()
storageCommands = app.Command("storage", "Commands to manipulate raw storage blocks.").Hidden()
blobCommands = app.Command("blob", "Commands to manipulate BLOBs.").Hidden()
blockIndexCommands = app.Command("blockindex", "Commands to manipulate block index.").Hidden()
benchmarkCommands = app.Command("benchmark", "Commands to test performance of algorithms.").Hidden()
)
@@ -60,7 +60,7 @@ func repositoryAction(act func(ctx context.Context, rep *repo.Repository) error)
ctx := context.Background()
ctx = block.UsingBlockCache(ctx, *enableCaching)
ctx = block.UsingListCache(ctx, *enableListCaching)
ctx = storage.WithUploadProgressCallback(ctx, func(desc string, progress, total int64) {
ctx = blob.WithUploadProgressCallback(ctx, func(desc string, progress, total int64) {
cliProgress.Report("upload '"+desc+"'", progress, total)
})
@@ -68,7 +68,7 @@ func repositoryAction(act func(ctx context.Context, rep *repo.Repository) error)
rep := mustOpenRepository(ctx, nil)
repositoryOpenTime := time.Since(t0)
storageType := rep.Storage.ConnectionInfo().Type
storageType := rep.Blobs.ConnectionInfo().Type
reportStartupTime(storageType, rep.Blocks.Format.Version, repositoryOpenTime)

View File

@@ -0,0 +1,30 @@
package cli
import (
"context"
"github.com/pkg/errors"
"github.com/kopia/kopia/repo"
"github.com/kopia/kopia/repo/blob"
)
var (
blobDeleteCommand = blobCommands.Command("delete", "Show contents of blobs").Alias("rm")
blobDeleteBlobIDs = blobDeleteCommand.Arg("blobIDs", "Blob IDs").Required().Strings()
)
func runDeleteStorageBlocks(ctx context.Context, rep *repo.Repository) error {
for _, b := range *blobDeleteBlobIDs {
err := rep.Blobs.DeleteBlob(ctx, blob.ID(b))
if err != nil {
return errors.Wrapf(err, "error deleting %v", b)
}
}
return nil
}
func init() {
blobDeleteCommand.Action(repositoryAction(runDeleteStorageBlocks))
}

35
cli/command_blob_list.go Normal file
View File

@@ -0,0 +1,35 @@
package cli
import (
"context"
"fmt"
"github.com/kopia/kopia/repo"
"github.com/kopia/kopia/repo/blob"
)
var (
blobListCommand = blobCommands.Command("list", "List BLOBs").Alias("ls")
blobListPrefix = blobListCommand.Flag("prefix", "Blob ID prefix").String()
blobListMinSize = blobListCommand.Flag("min-size", "Minimum size").Int64()
blobListMaxSize = blobListCommand.Flag("max-size", "Maximum size").Int64()
)
func runBlobList(ctx context.Context, rep *repo.Repository) error {
return rep.Blobs.ListBlobs(ctx, blob.ID(*blobListPrefix), func(b blob.Metadata) error {
if *blobListMaxSize != 0 && b.Length > *blobListMaxSize {
return nil
}
if *blobListMinSize != 0 && b.Length < *blobListMinSize {
return nil
}
fmt.Printf("%-70v %10v %v\n", b.BlobID, b.Length, formatTimestamp(b.Timestamp))
return nil
})
}
func init() {
blobListCommand.Action(repositoryAction(runBlobList))
}

36
cli/command_blob_show.go Normal file
View File

@@ -0,0 +1,36 @@
package cli
import (
"bytes"
"context"
"io"
"os"
"github.com/pkg/errors"
"github.com/kopia/kopia/repo"
"github.com/kopia/kopia/repo/blob"
)
var (
blobShowCommand = blobCommands.Command("show", "Show contents of BLOBs").Alias("cat")
blobShowIDs = blobShowCommand.Arg("blobID", "Blob IDs").Required().Strings()
)
func runBlobShow(ctx context.Context, rep *repo.Repository) error {
for _, blobID := range *blobShowIDs {
d, err := rep.Blobs.GetBlob(ctx, blob.ID(blobID), 0, -1)
if err != nil {
return errors.Wrapf(err, "error getting %v", blobID)
}
if _, err := io.Copy(os.Stdout, bytes.NewReader(d)); err != nil {
return err
}
}
return nil
}
func init() {
blobShowCommand.Action(repositoryAction(runBlobShow))
}

View File

@@ -14,9 +14,9 @@
)
func runBlockGarbageCollectAction(ctx context.Context, rep *repo.Repository) error {
unused, err := rep.Blocks.FindUnreferencedStorageFiles(ctx)
unused, err := rep.Blocks.FindUnreferencedBlobs(ctx)
if err != nil {
return errors.Wrap(err, "error looking for unreferenced storage files")
return errors.Wrap(err, "error looking for unreferenced blobs")
}
if len(unused) == 0 {
@@ -27,7 +27,7 @@ func runBlockGarbageCollectAction(ctx context.Context, rep *repo.Repository) err
if *blockGarbageCollectCommandDelete != "yes" {
var totalBytes int64
for _, u := range unused {
printStderr("unused %v (%v bytes)\n", u.BlockID, u.Length)
printStderr("unused %v (%v bytes)\n", u.BlobID, u.Length)
totalBytes += u.Length
}
printStderr("Would delete %v unused blocks (%v bytes), pass '--delete=yes' to actually delete.\n", len(unused), totalBytes)
@@ -36,9 +36,9 @@ func runBlockGarbageCollectAction(ctx context.Context, rep *repo.Repository) err
}
for _, u := range unused {
printStderr("Deleting unused block %q (%v bytes)...\n", u.BlockID, u.Length)
if err := rep.Storage.DeleteBlock(ctx, u.BlockID); err != nil {
return errors.Wrapf(err, "unable to delete block %q", u.BlockID)
printStderr("Deleting unused block %q (%v bytes)...\n", u.BlobID, u.Length)
if err := rep.Blobs.DeleteBlob(ctx, u.BlobID); err != nil {
return errors.Wrapf(err, "unable to delete block %q", u.BlobID)
}
}

View File

@@ -31,12 +31,12 @@ func runListBlockIndexesAction(ctx context.Context, rep *repo.Repository) error
})
case "name":
sort.Slice(blks, func(i, j int) bool {
return blks[i].FileName < blks[j].FileName
return blks[i].BlobID < blks[j].BlobID
})
}
for _, b := range blks {
fmt.Printf("%-70v %10v %v\n", b.FileName, b.Length, formatTimestampPrecise(b.Timestamp))
fmt.Printf("%-70v %10v %v\n", b.BlobID, b.Length, formatTimestampPrecise(b.Timestamp))
}
if *blockIndexListSummary {

View File

@@ -4,14 +4,14 @@
"context"
"github.com/kopia/kopia/repo"
"github.com/kopia/kopia/repo/blob"
"github.com/kopia/kopia/repo/block"
"github.com/kopia/kopia/repo/storage"
)
var (
blockIndexRecoverCommand = blockIndexCommands.Command("recover", "Recover block indexes from pack blocks")
blockIndexRecoverPackFile = blockIndexRecoverCommand.Flag("file", "Names of pack files to recover (default=all packs)").Strings()
blockIndexRecoverCommit = blockIndexRecoverCommand.Flag("commit", "Commit recovered blocks").Bool()
blockIndexRecoverCommand = blockIndexCommands.Command("recover", "Recover block indexes from pack blocks")
blockIndexRecoverBlobIDs = blockIndexRecoverCommand.Flag("blobs", "Names of pack blobs to recover (default=all packs)").Strings()
blockIndexRecoverCommit = blockIndexRecoverCommand.Flag("commit", "Commit recovered blocks").Bool()
)
func runRecoverBlockIndexesAction(ctx context.Context, rep *repo.Repository) error {
@@ -30,29 +30,29 @@ func runRecoverBlockIndexesAction(ctx context.Context, rep *repo.Repository) err
}
}()
if len(*blockIndexRecoverPackFile) == 0 {
return rep.Storage.ListBlocks(ctx, block.PackBlockPrefix, func(bm storage.BlockMetadata) error {
recoverIndexFromSinglePackFile(ctx, rep, bm.BlockID, bm.Length, &totalCount)
if len(*blockIndexRecoverBlobIDs) == 0 {
return rep.Blobs.ListBlobs(ctx, block.PackBlobIDPrefix, func(bm blob.Metadata) error {
recoverIndexFromSinglePackFile(ctx, rep, bm.BlobID, bm.Length, &totalCount)
return nil
})
}
for _, packFile := range *blockIndexRecoverPackFile {
recoverIndexFromSinglePackFile(ctx, rep, packFile, 0, &totalCount)
for _, packFile := range *blockIndexRecoverBlobIDs {
recoverIndexFromSinglePackFile(ctx, rep, blob.ID(packFile), 0, &totalCount)
}
return nil
}
func recoverIndexFromSinglePackFile(ctx context.Context, rep *repo.Repository, packFileName string, length int64, totalCount *int) {
recovered, err := rep.Blocks.RecoverIndexFromPackFile(ctx, packFileName, length, *blockIndexRecoverCommit)
func recoverIndexFromSinglePackFile(ctx context.Context, rep *repo.Repository, blobID blob.ID, length int64, totalCount *int) {
recovered, err := rep.Blocks.RecoverIndexFromPackBlob(ctx, blobID, length, *blockIndexRecoverCommit)
if err != nil {
log.Warningf("unable to recover index from %v: %v", packFileName, err)
log.Warningf("unable to recover index from %v: %v", blobID, err)
return
}
*totalCount += len(recovered)
log.Infof("Recovered %v entries from %v (commit=%v)", len(recovered), packFileName, *blockIndexRecoverCommit)
log.Infof("Recovered %v entries from %v (commit=%v)", len(recovered), blobID, *blockIndexRecoverCommit)
}
func init() {

View File

@@ -6,6 +6,7 @@
"sort"
"github.com/kopia/kopia/repo"
"github.com/kopia/kopia/repo/blob"
"github.com/kopia/kopia/repo/block"
)
@@ -31,15 +32,15 @@ func runListBlocksAction(ctx context.Context, rep *repo.Repository) error {
var count int
var totalSize int64
uniquePacks := map[string]bool{}
uniquePacks := map[blob.ID]bool{}
for _, b := range blocks {
if *blockListDeletedOnly && !b.Deleted {
continue
}
totalSize += int64(b.Length)
count++
if b.PackFile != "" {
uniquePacks[b.PackFile] = true
if b.PackBlobID != "" {
uniquePacks[b.PackBlobID] = true
}
if *blockListLong {
optionalDeleted := ""
@@ -49,7 +50,7 @@ func runListBlocksAction(ctx context.Context, rep *repo.Repository) error {
fmt.Printf("%v %v %v %v+%v%v\n",
b.BlockID,
formatTimestamp(b.Timestamp()),
b.PackFile,
b.PackBlobID,
b.PackOffset,
maybeHumanReadableBytes(*blockListHuman, int64(b.Length)),
optionalDeleted)
@@ -88,7 +89,7 @@ func sortBlocks(blocks []block.Info) {
}
func comparePacks(a, b block.Info) bool {
if a, b := a.PackFile, b.PackFile; a != b {
if a, b := a.PackBlobID, b.PackBlobID; a != b {
return a < b
}

View File

@@ -9,6 +9,7 @@
"github.com/pkg/errors"
"github.com/kopia/kopia/repo"
"github.com/kopia/kopia/repo/blob"
"github.com/kopia/kopia/repo/block"
)
@@ -58,7 +59,7 @@ func runRewriteBlocksAction(ctx context.Context, rep *repo.Repository) error {
optDeleted = " (deleted)"
}
printStderr("Rewriting block %v (%v bytes) from pack %v%v\n", b.BlockID, b.Length, b.PackFile, optDeleted)
printStderr("Rewriting block %v (%v bytes) from pack %v%v\n", b.BlockID, b.Length, b.PackBlobID, optDeleted)
mu.Lock()
totalBytes += int64(b.Length)
mu.Unlock()
@@ -128,7 +129,7 @@ func findBlocksWithFormatVersion(ctx context.Context, rep *repo.Repository, ch c
}
for _, b := range infos {
if int(b.FormatVersion) == *blockRewriteFormatVersion && strings.HasPrefix(b.PackFile, *blockRewritePackPrefix) {
if int(b.FormatVersion) == *blockRewriteFormatVersion && strings.HasPrefix(string(b.PackBlobID), *blockRewritePackPrefix) {
ch <- blockInfoOrError{Info: b}
}
}
@@ -154,25 +155,25 @@ func findBlocksInShortPacks(ctx context.Context, rep *repo.Repository, ch chan b
fmt.Printf("Nothing to do, found %v short pack blocks\n", len(shortPackBlocks))
} else {
for _, b := range infos {
if shortPackBlocks[b.PackFile] && strings.HasPrefix(b.PackFile, *blockRewritePackPrefix) {
if shortPackBlocks[b.PackBlobID] && strings.HasPrefix(string(b.PackBlobID), *blockRewritePackPrefix) {
ch <- blockInfoOrError{Info: b}
}
}
}
}
func findShortPackBlocks(infos []block.Info, threshold uint32) (map[string]bool, error) {
packUsage := map[string]uint32{}
func findShortPackBlocks(infos []block.Info, threshold uint32) (map[blob.ID]bool, error) {
packUsage := map[blob.ID]uint32{}
for _, bi := range infos {
packUsage[bi.PackFile] += bi.Length
packUsage[bi.PackBlobID] += bi.Length
}
shortPackBlocks := map[string]bool{}
shortPackBlocks := map[blob.ID]bool{}
for packFile, usage := range packUsage {
for blobID, usage := range packUsage {
if usage < threshold {
shortPackBlocks[packFile] = true
shortPackBlocks[blobID] = true
}
}

View File

@@ -7,8 +7,8 @@
"github.com/pkg/errors"
"github.com/kopia/kopia/repo"
"github.com/kopia/kopia/repo/blob"
"github.com/kopia/kopia/repo/block"
"github.com/kopia/kopia/repo/storage"
"gopkg.in/alecthomas/kingpin.v2"
)
@@ -44,12 +44,12 @@ func init() {
setupConnectOptions(connectCommand)
}
func runConnectCommandWithStorage(ctx context.Context, st storage.Storage) error {
func runConnectCommandWithStorage(ctx context.Context, st blob.Storage) error {
password := mustGetPasswordFromFlags(false, false)
return runConnectCommandWithStorageAndPassword(ctx, st, password)
}
func runConnectCommandWithStorageAndPassword(ctx context.Context, st storage.Storage, password string) error {
func runConnectCommandWithStorageAndPassword(ctx context.Context, st blob.Storage, password string) error {
configFile := repositoryConfigFileName()
if err := repo.Connect(ctx, configFile, st, password, connectOptions()); err != nil {
return err

View File

@@ -8,12 +8,12 @@
kingpin "gopkg.in/alecthomas/kingpin.v2"
"github.com/kopia/kopia/repo"
"github.com/kopia/kopia/repo/storage"
"github.com/kopia/kopia/repo/blob"
)
var connectToStorageFromConfigPath string
func connectToStorageFromConfig(ctx context.Context, isNew bool) (storage.Storage, error) {
func connectToStorageFromConfig(ctx context.Context, isNew bool) (blob.Storage, error) {
if isNew {
return nil, errors.New("not supported")
}
@@ -30,7 +30,7 @@ func connectToStorageFromConfig(ctx context.Context, isNew bool) (storage.Storag
return nil, errors.Wrap(err, "unable to load config")
}
return storage.NewStorage(ctx, cfg.Storage)
return blob.NewStorage(ctx, cfg.Storage)
}
func init() {

View File

@@ -8,9 +8,9 @@
"github.com/kopia/kopia/fs/ignorefs"
"github.com/kopia/kopia/repo"
"github.com/kopia/kopia/repo/blob"
"github.com/kopia/kopia/repo/block"
"github.com/kopia/kopia/repo/object"
"github.com/kopia/kopia/repo/storage"
"github.com/kopia/kopia/snapshot/policy"
)
@@ -53,9 +53,9 @@ func newRepositoryOptionsFromFlags() *repo.NewRepositoryOptions {
}
}
func ensureEmpty(ctx context.Context, s storage.Storage) error {
func ensureEmpty(ctx context.Context, s blob.Storage) error {
hasDataError := errors.New("has data")
err := s.ListBlocks(ctx, "", func(cb storage.BlockMetadata) error {
err := s.ListBlobs(ctx, "", func(cb blob.Metadata) error {
return hasDataError
})
if err == hasDataError {
@@ -65,7 +65,7 @@ func ensureEmpty(ctx context.Context, s storage.Storage) error {
return err
}
func runCreateCommandWithStorage(ctx context.Context, st storage.Storage) error {
func runCreateCommandWithStorage(ctx context.Context, st blob.Storage) error {
err := ensureEmpty(ctx, st)
if err != nil {
return errors.Wrap(err, "unable to get repository storage")

View File

@@ -6,7 +6,7 @@
"github.com/pkg/errors"
"github.com/kopia/kopia/repo"
"github.com/kopia/kopia/repo/storage"
"github.com/kopia/kopia/repo/blob"
)
var (
@@ -17,19 +17,19 @@
repairDryDrun = repairCommand.Flag("dry-run", "Do not modify repository").Short('n').Bool()
)
func runRepairCommandWithStorage(ctx context.Context, st storage.Storage) error {
func runRepairCommandWithStorage(ctx context.Context, st blob.Storage) error {
if err := maybeRecoverFormatBlock(ctx, st, *repairCommandRecoverFormatBlockPrefix); err != nil {
return err
}
return nil
}
func maybeRecoverFormatBlock(ctx context.Context, st storage.Storage, prefix string) error {
func maybeRecoverFormatBlock(ctx context.Context, st blob.Storage, prefix string) error {
switch *repairCommandRecoverFormatBlock {
case "auto":
log.Infof("looking for format block...")
if _, err := st.GetBlock(ctx, repo.FormatBlockID, 0, -1); err == nil {
log.Infof("format block already exists, not recovering, pass --recover-format=yes")
log.Infof("looking for format blob...")
if _, err := st.GetBlob(ctx, repo.FormatBlobID, 0, -1); err == nil {
log.Infof("format blob already exists, not recovering, pass --recover-format=yes")
return nil
}
@@ -40,19 +40,19 @@ func maybeRecoverFormatBlock(ctx context.Context, st storage.Storage, prefix str
return recoverFormatBlock(ctx, st, *repairCommandRecoverFormatBlockPrefix)
}
func recoverFormatBlock(ctx context.Context, st storage.Storage, prefix string) error {
func recoverFormatBlock(ctx context.Context, st blob.Storage, prefix string) error {
errSuccess := errors.New("success")
err := st.ListBlocks(ctx, *repairCommandRecoverFormatBlockPrefix, func(bi storage.BlockMetadata) error {
log.Infof("looking for replica of format block in %v...", bi.BlockID)
if b, err := repo.RecoverFormatBlock(ctx, st, bi.BlockID, bi.Length); err == nil {
err := st.ListBlobs(ctx, blob.ID(*repairCommandRecoverFormatBlockPrefix), func(bi blob.Metadata) error {
log.Infof("looking for replica of format block in %v...", bi.BlobID)
if b, err := repo.RecoverFormatBlock(ctx, st, bi.BlobID, bi.Length); err == nil {
if !*repairDryDrun {
if puterr := st.PutBlock(ctx, repo.FormatBlockID, b); puterr != nil {
if puterr := st.PutBlob(ctx, repo.FormatBlobID, b); puterr != nil {
return puterr
}
}
log.Infof("recovered replica block from %v", bi.BlockID)
log.Infof("recovered replica block from %v", bi.BlobID)
return errSuccess
}

View File

@@ -27,7 +27,7 @@ func runStatusCommand(ctx context.Context, rep *repo.Repository) error {
}
fmt.Println()
ci := rep.Storage.ConnectionInfo()
ci := rep.Blobs.ConnectionInfo()
fmt.Printf("Storage type: %v\n", ci.Type)
if cjson, err := json.MarshalIndent(scrubber.ScrubSensitiveData(reflect.ValueOf(ci.Config)).Interface(), " ", " "); err == nil {

View File

@@ -1,29 +0,0 @@
package cli
import (
"context"
"github.com/pkg/errors"
"github.com/kopia/kopia/repo"
)
var (
storageDeleteCommand = storageCommands.Command("delete", "Show storage blocks").Alias("rm")
storageDeleteBlockIDs = storageDeleteCommand.Arg("blockIDs", "Block IDs").Required().Strings()
)
func runDeleteStorageBlocks(ctx context.Context, rep *repo.Repository) error {
for _, b := range *storageDeleteBlockIDs {
err := rep.Storage.DeleteBlock(ctx, b)
if err != nil {
return errors.Wrapf(err, "error deleting %v", b)
}
}
return nil
}
func init() {
storageDeleteCommand.Action(repositoryAction(runDeleteStorageBlocks))
}

View File

@@ -1,35 +0,0 @@
package cli
import (
"context"
"fmt"
"github.com/kopia/kopia/repo"
"github.com/kopia/kopia/repo/storage"
)
var (
storageListCommand = storageCommands.Command("list", "List storage blocks").Alias("ls")
storageListPrefix = storageListCommand.Flag("prefix", "Block prefix").String()
storageListMinSize = storageListCommand.Flag("min-size", "Minimum size").Int64()
storageListMaxSize = storageListCommand.Flag("max-size", "Maximum size").Int64()
)
func runListStorageBlocks(ctx context.Context, rep *repo.Repository) error {
return rep.Storage.ListBlocks(ctx, *storageListPrefix, func(b storage.BlockMetadata) error {
if *storageListMaxSize != 0 && b.Length > *storageListMaxSize {
return nil
}
if *storageListMinSize != 0 && b.Length < *storageListMinSize {
return nil
}
fmt.Printf("%-70v %10v %v\n", b.BlockID, b.Length, formatTimestamp(b.Timestamp))
return nil
})
}
func init() {
storageListCommand.Action(repositoryAction(runListStorageBlocks))
}

View File

@@ -1,35 +0,0 @@
package cli
import (
"bytes"
"context"
"io"
"os"
"github.com/pkg/errors"
"github.com/kopia/kopia/repo"
)
var (
storageShowCommand = storageCommands.Command("show", "Show storage blocks").Alias("cat")
storageShowBlockIDs = storageShowCommand.Arg("blockIDs", "Block IDs").Required().Strings()
)
func runShowStorageBlocks(ctx context.Context, rep *repo.Repository) error {
for _, b := range *storageShowBlockIDs {
d, err := rep.Storage.GetBlock(ctx, b, 0, -1)
if err != nil {
return errors.Wrapf(err, "error getting %v", b)
}
if _, err := io.Copy(os.Stdout, bytes.NewReader(d)); err != nil {
return err
}
}
return nil
}
func init() {
storageShowCommand.Action(repositoryAction(runShowStorageBlocks))
}

View File

@@ -7,8 +7,8 @@
"gopkg.in/alecthomas/kingpin.v2"
"github.com/kopia/kopia/repo/storage"
"github.com/kopia/kopia/repo/storage/filesystem"
"github.com/kopia/kopia/repo/blob"
"github.com/kopia/kopia/repo/blob/filesystem"
)
var options filesystem.Options
@@ -21,7 +21,7 @@
connectFlat bool
)
func connect(ctx context.Context, isNew bool) (storage.Storage, error) {
func connect(ctx context.Context, isNew bool) (blob.Storage, error) {
fso := options
if v := connectOwnerUID; v != "" {
fso.FileUID = getIntPtrValue(v, 10)

View File

@@ -5,8 +5,8 @@
"gopkg.in/alecthomas/kingpin.v2"
"github.com/kopia/kopia/repo/storage"
"github.com/kopia/kopia/repo/storage/gcs"
"github.com/kopia/kopia/repo/blob"
"github.com/kopia/kopia/repo/blob/gcs"
)
func init() {
@@ -24,7 +24,7 @@ func(cmd *kingpin.CmdClause) {
cmd.Flag("max-upload-speed", "Limit the upload speed.").PlaceHolder("BYTES_PER_SEC").IntVar(&options.MaxUploadSpeedBytesPerSecond)
},
func(ctx context.Context, isNew bool) (storage.Storage, error) {
func(ctx context.Context, isNew bool) (blob.Storage, error) {
return gcs.New(ctx, &options)
},
)

View File

@@ -6,7 +6,7 @@
"github.com/pkg/errors"
kingpin "gopkg.in/alecthomas/kingpin.v2"
"github.com/kopia/kopia/repo/storage"
"github.com/kopia/kopia/repo/blob"
)
// RegisterStorageConnectFlags registers repository subcommand to connect to a storage
@@ -15,7 +15,7 @@ func RegisterStorageConnectFlags(
name string,
description string,
flags func(*kingpin.CmdClause),
connect func(ctx context.Context, isNew bool) (storage.Storage, error)) {
connect func(ctx context.Context, isNew bool) (blob.Storage, error)) {
if name != "from-config" {
// Set up 'create' subcommand

View File

@@ -5,8 +5,8 @@
"gopkg.in/alecthomas/kingpin.v2"
"github.com/kopia/kopia/repo/storage"
"github.com/kopia/kopia/repo/storage/s3"
"github.com/kopia/kopia/repo/blob"
"github.com/kopia/kopia/repo/blob/s3"
)
func init() {
@@ -25,7 +25,7 @@ func(cmd *kingpin.CmdClause) {
cmd.Flag("max-download-speed", "Limit the download speed.").PlaceHolder("BYTES_PER_SEC").IntVar(&s3options.MaxDownloadSpeedBytesPerSecond)
cmd.Flag("max-upload-speed", "Limit the upload speed.").PlaceHolder("BYTES_PER_SEC").IntVar(&s3options.MaxUploadSpeedBytesPerSecond)
},
func(ctx context.Context, isNew bool) (storage.Storage, error) {
func(ctx context.Context, isNew bool) (blob.Storage, error) {
return s3.New(ctx, &s3options)
},
)

View File

@@ -5,8 +5,8 @@
"gopkg.in/alecthomas/kingpin.v2"
"github.com/kopia/kopia/repo/storage"
"github.com/kopia/kopia/repo/storage/webdav"
"github.com/kopia/kopia/repo/blob"
"github.com/kopia/kopia/repo/blob/webdav"
)
func init() {
@@ -22,7 +22,7 @@ func(cmd *kingpin.CmdClause) {
cmd.Flag("url", "URL of WebDAV server").Required().StringVar(&options.URL)
cmd.Flag("flat", "Use flat directory structure").BoolVar(&connectFlat)
},
func(ctx context.Context, isNew bool) (storage.Storage, error) {
func(ctx context.Context, isNew bool) (blob.Storage, error) {
wo := options
if wo.Username != "" {

View File

@@ -9,9 +9,9 @@
"github.com/pkg/errors"
"github.com/kopia/kopia/repo"
"github.com/kopia/kopia/repo/blob/filesystem"
"github.com/kopia/kopia/repo/blob/logging"
"github.com/kopia/kopia/repo/block"
"github.com/kopia/kopia/repo/storage/filesystem"
"github.com/kopia/kopia/repo/storage/logging"
)
const (

View File

@@ -0,0 +1,112 @@
package blobtesting
import (
"bytes"
"context"
"reflect"
"sort"
"testing"
"github.com/kopia/kopia/repo/blob"
)
// AssertGetBlock asserts that the specified storage block has correct content.
func AssertGetBlock(ctx context.Context, t *testing.T, s blob.Storage, block blob.ID, expected []byte) {
t.Helper()
b, err := s.GetBlob(ctx, block, 0, -1)
if err != nil {
t.Errorf("GetBlob(%v) returned error %v, expected data: %v", block, err, expected)
return
}
if !bytes.Equal(b, expected) {
t.Errorf("GetBlob(%v) returned %x, but expected %x", block, b, expected)
}
half := int64(len(expected) / 2)
if half == 0 {
return
}
b, err = s.GetBlob(ctx, block, 0, 0)
if err != nil {
t.Errorf("GetBlob(%v) returned error %v, expected data: %v", block, err, expected)
return
}
if len(b) != 0 {
t.Errorf("GetBlob(%v) returned non-zero length: %v", block, len(b))
return
}
b, err = s.GetBlob(ctx, block, 0, half)
if err != nil {
t.Errorf("GetBlob(%v) returned error %v, expected data: %v", block, err, expected)
return
}
if !bytes.Equal(b, expected[0:half]) {
t.Errorf("GetBlob(%v) returned %x, but expected %x", block, b, expected[0:half])
}
b, err = s.GetBlob(ctx, block, half, int64(len(expected))-half)
if err != nil {
t.Errorf("GetBlob(%v) returned error %v, expected data: %v", block, err, expected)
return
}
if !bytes.Equal(b, expected[len(expected)-int(half):]) {
t.Errorf("GetBlob(%v) returned %x, but expected %x", block, b, expected[len(expected)-int(half):])
}
AssertInvalidOffsetLength(ctx, t, s, block, -3, 1)
AssertInvalidOffsetLength(ctx, t, s, block, int64(len(expected)), 3)
AssertInvalidOffsetLength(ctx, t, s, block, int64(len(expected)-1), 3)
AssertInvalidOffsetLength(ctx, t, s, block, int64(len(expected)+1), 3)
}
// AssertInvalidOffsetLength verifies that the given combination of (offset,length) fails on GetBlob()
func AssertInvalidOffsetLength(ctx context.Context, t *testing.T, s blob.Storage, blobID blob.ID, offset, length int64) {
if _, err := s.GetBlob(ctx, blobID, offset, length); err == nil {
t.Errorf("GetBlob(%v,%v,%v) did not return error for invalid offset/length", blobID, offset, length)
}
}
// AssertGetBlockNotFound asserts that GetBlob() for specified storage block returns ErrNotFound.
func AssertGetBlockNotFound(ctx context.Context, t *testing.T, s blob.Storage, blobID blob.ID) {
t.Helper()
b, err := s.GetBlob(ctx, blobID, 0, -1)
if err != blob.ErrBlobNotFound || b != nil {
t.Errorf("GetBlob(%v) returned %v, %v but expected ErrNotFound", blobID, b, err)
}
}
// AssertListResults asserts that the list results with given prefix return the specified list of names in order.
func AssertListResults(ctx context.Context, t *testing.T, s blob.Storage, prefix blob.ID, want ...blob.ID) {
t.Helper()
var names []blob.ID
if err := s.ListBlobs(ctx, prefix, func(e blob.Metadata) error {
names = append(names, e.BlobID)
return nil
}); err != nil {
t.Fatalf("err: %v", err)
}
names = sorted(names)
want = sorted(want)
if !reflect.DeepEqual(names, want) {
t.Errorf("ListBlobs(%v) returned %v, but wanted %v", prefix, names, want)
}
}
func sorted(s []blob.ID) []blob.ID {
x := append([]blob.ID(nil), s...)
sort.Slice(x, func(i, j int) bool {
return x[i] < x[j]
})
return x
}

View File

@@ -0,0 +1,2 @@
// Package blobtesting is used for testing BLOB storage implementations.
package blobtesting

View File

@@ -1,4 +1,4 @@
package storagetesting
package blobtesting
import (
"context"
@@ -6,7 +6,7 @@
"time"
"github.com/kopia/kopia/internal/repologging"
"github.com/kopia/kopia/repo/storage"
"github.com/kopia/kopia/repo/blob"
)
var log = repologging.Logger("faulty-storage")
@@ -22,51 +22,51 @@ type Fault struct {
// FaultyStorage implements fault injection for Storage.
type FaultyStorage struct {
Base storage.Storage
Base blob.Storage
Faults map[string][]*Fault
mu sync.Mutex
}
// GetBlock implements storage.Storage
func (s *FaultyStorage) GetBlock(ctx context.Context, id string, offset, length int64) ([]byte, error) {
if err := s.getNextFault("GetBlock", id, offset, length); err != nil {
// GetBlob implements blob.Storage
func (s *FaultyStorage) GetBlob(ctx context.Context, id blob.ID, offset, length int64) ([]byte, error) {
if err := s.getNextFault("GetBlob", id, offset, length); err != nil {
return nil, err
}
return s.Base.GetBlock(ctx, id, offset, length)
return s.Base.GetBlob(ctx, id, offset, length)
}
// PutBlock implements storage.Storage
func (s *FaultyStorage) PutBlock(ctx context.Context, id string, data []byte) error {
if err := s.getNextFault("PutBlock", id, len(data)); err != nil {
// PutBlob implements blob.Storage
func (s *FaultyStorage) PutBlob(ctx context.Context, id blob.ID, data []byte) error {
if err := s.getNextFault("PutBlob", id, len(data)); err != nil {
return err
}
return s.Base.PutBlock(ctx, id, data)
return s.Base.PutBlob(ctx, id, data)
}
// DeleteBlock implements storage.Storage
func (s *FaultyStorage) DeleteBlock(ctx context.Context, id string) error {
if err := s.getNextFault("DeleteBlock", id); err != nil {
// DeleteBlob implements blob.Storage
func (s *FaultyStorage) DeleteBlob(ctx context.Context, id blob.ID) error {
if err := s.getNextFault("DeleteBlob", id); err != nil {
return err
}
return s.Base.DeleteBlock(ctx, id)
return s.Base.DeleteBlob(ctx, id)
}
// ListBlocks implements storage.Storage
func (s *FaultyStorage) ListBlocks(ctx context.Context, prefix string, callback func(storage.BlockMetadata) error) error {
if err := s.getNextFault("ListBlocks", prefix); err != nil {
// ListBlobs implements blob.Storage
func (s *FaultyStorage) ListBlobs(ctx context.Context, prefix blob.ID, callback func(blob.Metadata) error) error {
if err := s.getNextFault("ListBlobs", prefix); err != nil {
return err
}
return s.Base.ListBlocks(ctx, prefix, func(bm storage.BlockMetadata) error {
if err := s.getNextFault("ListBlocksItem", prefix); err != nil {
return s.Base.ListBlobs(ctx, prefix, func(bm blob.Metadata) error {
if err := s.getNextFault("ListBlobsItem", prefix); err != nil {
return err
}
return callback(bm)
})
}
// Close implements storage.Storage
// Close implements blob.Storage
func (s *FaultyStorage) Close(ctx context.Context) error {
if err := s.getNextFault("Close"); err != nil {
return err
@@ -74,8 +74,8 @@ func (s *FaultyStorage) Close(ctx context.Context) error {
return s.Base.Close(ctx)
}
// ConnectionInfo implements storage.Storage
func (s *FaultyStorage) ConnectionInfo() storage.ConnectionInfo {
// ConnectionInfo implements blob.Storage
func (s *FaultyStorage) ConnectionInfo() blob.ConnectionInfo {
return s.Base.ConnectionInfo()
}
@@ -112,4 +112,4 @@ func (s *FaultyStorage) getNextFault(method string, args ...interface{}) error {
return f.Err
}
var _ storage.Storage = (*FaultyStorage)(nil)
var _ blob.Storage = (*FaultyStorage)(nil)

View File

@@ -1,4 +1,4 @@
package storagetesting
package blobtesting
import (
"context"
@@ -8,17 +8,19 @@
"sync"
"time"
"github.com/kopia/kopia/repo/storage"
"github.com/kopia/kopia/repo/blob"
)
type DataMap map[blob.ID][]byte
type mapStorage struct {
data map[string][]byte
keyTime map[string]time.Time
data DataMap
keyTime map[blob.ID]time.Time
timeNow func() time.Time
mutex sync.RWMutex
}
func (s *mapStorage) GetBlock(ctx context.Context, id string, offset, length int64) ([]byte, error) {
func (s *mapStorage) GetBlob(ctx context.Context, id blob.ID, offset, length int64) ([]byte, error) {
s.mutex.RLock()
defer s.mutex.RUnlock()
@@ -40,10 +42,10 @@ func (s *mapStorage) GetBlock(ctx context.Context, id string, offset, length int
return data[0:length], nil
}
return nil, storage.ErrBlockNotFound
return nil, blob.ErrBlobNotFound
}
func (s *mapStorage) PutBlock(ctx context.Context, id string, data []byte) error {
func (s *mapStorage) PutBlob(ctx context.Context, id blob.ID, data []byte) error {
s.mutex.Lock()
defer s.mutex.Unlock()
@@ -56,7 +58,7 @@ func (s *mapStorage) PutBlock(ctx context.Context, id string, data []byte) error
return nil
}
func (s *mapStorage) DeleteBlock(ctx context.Context, id string) error {
func (s *mapStorage) DeleteBlob(ctx context.Context, id blob.ID) error {
s.mutex.Lock()
defer s.mutex.Unlock()
@@ -65,18 +67,20 @@ func (s *mapStorage) DeleteBlock(ctx context.Context, id string) error {
return nil
}
func (s *mapStorage) ListBlocks(ctx context.Context, prefix string, callback func(storage.BlockMetadata) error) error {
func (s *mapStorage) ListBlobs(ctx context.Context, prefix blob.ID, callback func(blob.Metadata) error) error {
s.mutex.RLock()
keys := []string{}
keys := []blob.ID{}
for k := range s.data {
if strings.HasPrefix(k, prefix) {
if strings.HasPrefix(string(k), string(prefix)) {
keys = append(keys, k)
}
}
s.mutex.RUnlock()
sort.Strings(keys)
sort.Slice(keys, func(i, j int) bool {
return keys[i] < keys[j]
})
for _, k := range keys {
s.mutex.RLock()
@@ -86,8 +90,8 @@ func (s *mapStorage) ListBlocks(ctx context.Context, prefix string, callback fun
if !ok {
continue
}
if err := callback(storage.BlockMetadata{
BlockID: k,
if err := callback(blob.Metadata{
BlobID: k,
Length: int64(len(v)),
Timestamp: ts,
}); err != nil {
@@ -101,30 +105,30 @@ func (s *mapStorage) Close(ctx context.Context) error {
return nil
}
func (s *mapStorage) TouchBlock(ctx context.Context, blockID string, threshold time.Duration) error {
func (s *mapStorage) TouchBlob(ctx context.Context, blobID blob.ID, threshold time.Duration) error {
s.mutex.Lock()
defer s.mutex.Unlock()
if v, ok := s.keyTime[blockID]; ok {
if v, ok := s.keyTime[blobID]; ok {
n := s.timeNow()
if n.Sub(v) >= threshold {
s.keyTime[blockID] = n
s.keyTime[blobID] = n
}
}
return nil
}
func (s *mapStorage) ConnectionInfo() storage.ConnectionInfo {
func (s *mapStorage) ConnectionInfo() blob.ConnectionInfo {
// unsupported
return storage.ConnectionInfo{}
return blob.ConnectionInfo{}
}
// NewMapStorage returns an implementation of Storage backed by the contents of given map.
// Used primarily for testing.
func NewMapStorage(data map[string][]byte, keyTime map[string]time.Time, timeNow func() time.Time) storage.Storage {
func NewMapStorage(data DataMap, keyTime map[blob.ID]time.Time, timeNow func() time.Time) blob.Storage {
if keyTime == nil {
keyTime = make(map[string]time.Time)
keyTime = make(map[blob.ID]time.Time)
}
if timeNow == nil {
timeNow = time.Now

View File

@@ -1,4 +1,4 @@
package storagetesting
package blobtesting
import (
"context"
@@ -6,7 +6,7 @@
)
func TestMapStorage(t *testing.T) {
data := map[string][]byte{}
data := DataMap{}
r := NewMapStorage(data, nil, nil)
if r == nil {
t.Errorf("unexpected result: %v", r)

View File

@@ -1,4 +1,4 @@
package storagetesting
package blobtesting
import (
"bytes"
@@ -6,20 +6,20 @@
"reflect"
"testing"
"github.com/kopia/kopia/repo/storage"
"github.com/kopia/kopia/repo/blob"
)
// VerifyStorage verifies the behavior of the specified storage.
func VerifyStorage(ctx context.Context, t *testing.T, r storage.Storage) {
func VerifyStorage(ctx context.Context, t *testing.T, r blob.Storage) {
blocks := []struct {
blk string
blk blob.ID
contents []byte
}{
{blk: string("abcdbbf4f0507d054ed5a80a5b65086f602b"), contents: []byte{}},
{blk: string("zxce0e35630770c54668a8cfb4e414c6bf8f"), contents: []byte{1}},
{blk: string("abff4585856ebf0748fd989e1dd623a8963d"), contents: bytes.Repeat([]byte{1}, 1000)},
{blk: string("abgc3dca496d510f492c858a2df1eb824e62"), contents: bytes.Repeat([]byte{1}, 10000)},
{blk: string("kopia.repository"), contents: bytes.Repeat([]byte{2}, 100)},
{blk: "abcdbbf4f0507d054ed5a80a5b65086f602b", contents: []byte{}},
{blk: "zxce0e35630770c54668a8cfb4e414c6bf8f", contents: []byte{1}},
{blk: "abff4585856ebf0748fd989e1dd623a8963d", contents: bytes.Repeat([]byte{1}, 1000)},
{blk: "abgc3dca496d510f492c858a2df1eb824e62", contents: bytes.Repeat([]byte{1}, 10000)},
{blk: "kopia.repository", contents: bytes.Repeat([]byte{2}, 100)},
}
// First verify that blocks don't exist.
@@ -27,13 +27,13 @@ func VerifyStorage(ctx context.Context, t *testing.T, r storage.Storage) {
AssertGetBlockNotFound(ctx, t, r, b.blk)
}
ctx2 := storage.WithUploadProgressCallback(ctx, func(desc string, completed, total int64) {
ctx2 := blob.WithUploadProgressCallback(ctx, func(desc string, completed, total int64) {
log.Infof("progress %v: %v/%v", desc, completed, total)
})
// Now add blocks.
for _, b := range blocks {
if err := r.PutBlock(ctx2, b.blk, b.contents); err != nil {
if err := r.PutBlob(ctx2, b.blk, b.contents); err != nil {
t.Errorf("can't put block: %v", err)
}
@@ -45,17 +45,17 @@ func VerifyStorage(ctx context.Context, t *testing.T, r storage.Storage) {
// Overwrite blocks.
for _, b := range blocks {
if err := r.PutBlock(ctx, b.blk, b.contents); err != nil {
if err := r.PutBlob(ctx, b.blk, b.contents); err != nil {
t.Errorf("can't put block: %v", err)
}
AssertGetBlock(ctx, t, r, b.blk, b.contents)
}
if err := r.DeleteBlock(ctx, blocks[0].blk); err != nil {
if err := r.DeleteBlob(ctx, blocks[0].blk); err != nil {
t.Errorf("unable to delete block: %v", err)
}
if err := r.DeleteBlock(ctx, blocks[0].blk); err != nil {
if err := r.DeleteBlob(ctx, blocks[0].blk); err != nil {
t.Errorf("invalid error when deleting deleted block: %v", err)
}
AssertListResults(ctx, t, r, "ab", blocks[2].blk, blocks[3].blk)
@@ -64,11 +64,11 @@ func VerifyStorage(ctx context.Context, t *testing.T, r storage.Storage) {
// AssertConnectionInfoRoundTrips verifies that the ConnectionInfo returned by a given storage can be used to create
// equivalent storage
func AssertConnectionInfoRoundTrips(ctx context.Context, t *testing.T, s storage.Storage) {
func AssertConnectionInfoRoundTrips(ctx context.Context, t *testing.T, s blob.Storage) {
t.Helper()
ci := s.ConnectionInfo()
s2, err := storage.NewStorage(ctx, ci)
s2, err := blob.NewStorage(ctx, ci)
if err != nil {
t.Fatalf("err: %v", err)
}

View File

@@ -9,10 +9,10 @@
"testing"
"github.com/kopia/kopia/repo"
"github.com/kopia/kopia/repo/blob"
"github.com/kopia/kopia/repo/blob/filesystem"
"github.com/kopia/kopia/repo/block"
"github.com/kopia/kopia/repo/object"
"github.com/kopia/kopia/repo/storage"
"github.com/kopia/kopia/repo/storage/filesystem"
)
const masterPassword = "foobarbazfoobarbaz"
@@ -125,7 +125,7 @@ func (e *Environment) MustReopen(t *testing.T) {
func (e *Environment) VerifyStorageBlockCount(t *testing.T, want int) {
var got int
_ = e.Repository.Storage.ListBlocks(context.Background(), "", func(_ storage.BlockMetadata) error {
_ = e.Repository.Blobs.ListBlobs(context.Background(), "", func(_ blob.Metadata) error {
got++
return nil
})

View File

@@ -16,6 +16,6 @@ func (s *Server) handleStatus(ctx context.Context, r *http.Request) (interface{}
ConfigFile: s.rep.ConfigFile,
CacheDir: s.rep.CacheDirectory,
BlockFormatting: bf,
Storage: s.rep.Storage.ConnectionInfo().Type,
Storage: s.rep.Blobs.ConnectionInfo().Type,
}, nil
}

View File

@@ -1,110 +0,0 @@
package storagetesting
import (
"bytes"
"context"
"reflect"
"sort"
"testing"
"github.com/kopia/kopia/repo/storage"
)
// AssertGetBlock asserts that the specified storage block has correct content.
func AssertGetBlock(ctx context.Context, t *testing.T, s storage.Storage, block string, expected []byte) {
t.Helper()
b, err := s.GetBlock(ctx, block, 0, -1)
if err != nil {
t.Errorf("GetBlock(%v) returned error %v, expected data: %v", block, err, expected)
return
}
if !bytes.Equal(b, expected) {
t.Errorf("GetBlock(%v) returned %x, but expected %x", block, b, expected)
}
half := int64(len(expected) / 2)
if half == 0 {
return
}
b, err = s.GetBlock(ctx, block, 0, 0)
if err != nil {
t.Errorf("GetBlock(%v) returned error %v, expected data: %v", block, err, expected)
return
}
if len(b) != 0 {
t.Errorf("GetBlock(%v) returned non-zero length: %v", block, len(b))
return
}
b, err = s.GetBlock(ctx, block, 0, half)
if err != nil {
t.Errorf("GetBlock(%v) returned error %v, expected data: %v", block, err, expected)
return
}
if !bytes.Equal(b, expected[0:half]) {
t.Errorf("GetBlock(%v) returned %x, but expected %x", block, b, expected[0:half])
}
b, err = s.GetBlock(ctx, block, half, int64(len(expected))-half)
if err != nil {
t.Errorf("GetBlock(%v) returned error %v, expected data: %v", block, err, expected)
return
}
if !bytes.Equal(b, expected[len(expected)-int(half):]) {
t.Errorf("GetBlock(%v) returned %x, but expected %x", block, b, expected[len(expected)-int(half):])
}
AssertInvalidOffsetLength(ctx, t, s, block, -3, 1)
AssertInvalidOffsetLength(ctx, t, s, block, int64(len(expected)), 3)
AssertInvalidOffsetLength(ctx, t, s, block, int64(len(expected)-1), 3)
AssertInvalidOffsetLength(ctx, t, s, block, int64(len(expected)+1), 3)
}
// AssertInvalidOffsetLength verifies that the given combination of (offset,length) fails on GetBlock()
func AssertInvalidOffsetLength(ctx context.Context, t *testing.T, s storage.Storage, block string, offset, length int64) {
if _, err := s.GetBlock(ctx, block, offset, length); err == nil {
t.Errorf("GetBlock(%v,%v,%v) did not return error for invalid offset/length", block, offset, length)
}
}
// AssertGetBlockNotFound asserts that GetBlock() for specified storage block returns ErrBlockNotFound.
func AssertGetBlockNotFound(ctx context.Context, t *testing.T, s storage.Storage, block string) {
t.Helper()
b, err := s.GetBlock(ctx, block, 0, -1)
if err != storage.ErrBlockNotFound || b != nil {
t.Errorf("GetBlock(%v) returned %v, %v but expected ErrBlockNotFound", block, b, err)
}
}
// AssertListResults asserts that the list results with given prefix return the specified list of names in order.
func AssertListResults(ctx context.Context, t *testing.T, s storage.Storage, prefix string, want ...string) {
t.Helper()
var names []string
if err := s.ListBlocks(ctx, prefix, func(e storage.BlockMetadata) error {
names = append(names, e.BlockID)
return nil
}); err != nil {
t.Fatalf("err: %v", err)
}
names = sorted(names)
want = sorted(want)
if !reflect.DeepEqual(names, want) {
t.Errorf("ListBlocks(%v) returned %v, but wanted %v", prefix, names, want)
}
}
func sorted(s []string) []string {
x := append([]string(nil), s...)
sort.Strings(x)
return x
}

View File

@@ -1,2 +0,0 @@
// Package storagetesting is used for testing Storage implementations.
package storagetesting

View File

@@ -1,4 +1,4 @@
package storage
package blob
import (
"encoding/json"

2
repo/blob/doc.go Normal file
View File

@@ -0,0 +1,2 @@
// Package blob implements simple storage of immutable, unstructured binary large objects (BLOBs).
package blob

View File

@@ -15,7 +15,7 @@
"github.com/pkg/errors"
"github.com/kopia/kopia/internal/repologging"
"github.com/kopia/kopia/repo/storage"
"github.com/kopia/kopia/repo/blob"
)
var log = repologging.Logger("repo/filesystem")
@@ -35,12 +35,12 @@ type fsStorage struct {
Options
}
func (fs *fsStorage) GetBlock(ctx context.Context, blockID string, offset, length int64) ([]byte, error) {
_, path := fs.getShardedPathAndFilePath(blockID)
func (fs *fsStorage) GetBlob(ctx context.Context, blobID blob.ID, offset, length int64) ([]byte, error) {
_, path := fs.getShardedPathAndFilePath(blobID)
f, err := os.Open(path)
if os.IsNotExist(err) {
return nil, storage.ErrBlockNotFound
return nil, blob.ErrBlobNotFound
}
if err != nil {
@@ -65,19 +65,19 @@ func (fs *fsStorage) GetBlock(ctx context.Context, blockID string, offset, lengt
return b, nil
}
func getstringFromFileName(name string) (string, bool) {
func getBlobIDFromFileName(name string) (blob.ID, bool) {
if strings.HasSuffix(name, fsStorageChunkSuffix) {
return name[0 : len(name)-len(fsStorageChunkSuffix)], true
return blob.ID(name[0 : len(name)-len(fsStorageChunkSuffix)]), true
}
return string(""), false
return blob.ID(""), false
}
func makeFileName(blockID string) string {
return blockID + fsStorageChunkSuffix
func makeFileName(blobID blob.ID) string {
return string(blobID) + fsStorageChunkSuffix
}
func (fs *fsStorage) ListBlocks(ctx context.Context, prefix string, callback func(storage.BlockMetadata) error) error {
func (fs *fsStorage) ListBlobs(ctx context.Context, prefix blob.ID, callback func(blob.Metadata) error) error {
var walkDir func(string, string) error
walkDir = func(directory string, currentPrefix string) error {
@@ -92,9 +92,9 @@ func (fs *fsStorage) ListBlocks(ctx context.Context, prefix string, callback fun
var match bool
if len(prefix) > len(newPrefix) {
match = strings.HasPrefix(prefix, newPrefix)
match = strings.HasPrefix(string(prefix), newPrefix)
} else {
match = strings.HasPrefix(newPrefix, prefix)
match = strings.HasPrefix(newPrefix, string(prefix))
}
if match {
@@ -102,10 +102,10 @@ func (fs *fsStorage) ListBlocks(ctx context.Context, prefix string, callback fun
return err
}
}
} else if fullID, ok := getstringFromFileName(currentPrefix + e.Name()); ok {
if strings.HasPrefix(fullID, prefix) {
if err := callback(storage.BlockMetadata{
BlockID: fullID,
} else if fullID, ok := getBlobIDFromFileName(currentPrefix + e.Name()); ok {
if strings.HasPrefix(string(fullID), string(prefix)) {
if err := callback(blob.Metadata{
BlobID: fullID,
Length: e.Size(),
Timestamp: e.ModTime(),
}); err != nil {
@@ -121,9 +121,9 @@ func (fs *fsStorage) ListBlocks(ctx context.Context, prefix string, callback fun
return walkDir(fs.Path, "")
}
// TouchBlock updates file modification time to current time if it's sufficiently old.
func (fs *fsStorage) TouchBlock(ctx context.Context, blockID string, threshold time.Duration) error {
_, path := fs.getShardedPathAndFilePath(blockID)
// TouchBlob updates file modification time to current time if it's sufficiently old.
func (fs *fsStorage) TouchBlob(ctx context.Context, blobID blob.ID, threshold time.Duration) error {
_, path := fs.getShardedPathAndFilePath(blobID)
st, err := os.Stat(path)
if err != nil {
return err
@@ -139,8 +139,8 @@ func (fs *fsStorage) TouchBlock(ctx context.Context, blockID string, threshold t
return os.Chtimes(path, n, n)
}
func (fs *fsStorage) PutBlock(ctx context.Context, blockID string, data []byte) error {
_, path := fs.getShardedPathAndFilePath(blockID)
func (fs *fsStorage) PutBlob(ctx context.Context, blobID blob.ID, data []byte) error {
_, path := fs.getShardedPathAndFilePath(blobID)
tempFile := fmt.Sprintf("%s.tmp.%d", path, rand.Int())
f, err := fs.createTempFileAndDir(tempFile)
@@ -185,8 +185,8 @@ func (fs *fsStorage) createTempFileAndDir(tempFile string) (*os.File, error) {
return f, err
}
func (fs *fsStorage) DeleteBlock(ctx context.Context, blockID string) error {
_, path := fs.getShardedPathAndFilePath(blockID)
func (fs *fsStorage) DeleteBlob(ctx context.Context, blobID blob.ID) error {
_, path := fs.getShardedPathAndFilePath(blobID)
err := os.Remove(path)
if err == nil || os.IsNotExist(err) {
return nil
@@ -195,27 +195,27 @@ func (fs *fsStorage) DeleteBlock(ctx context.Context, blockID string) error {
return err
}
func (fs *fsStorage) getShardDirectory(blockID string) (string, string) {
func (fs *fsStorage) getShardDirectory(blobID blob.ID) (string, blob.ID) {
shardPath := fs.Path
if len(blockID) < 20 {
return shardPath, blockID
if len(blobID) < 20 {
return shardPath, blobID
}
for _, size := range fs.shards() {
shardPath = filepath.Join(shardPath, blockID[0:size])
blockID = blockID[size:]
shardPath = filepath.Join(shardPath, string(blobID[0:size]))
blobID = blobID[size:]
}
return shardPath, blockID
return shardPath, blobID
}
func (fs *fsStorage) getShardedPathAndFilePath(blockID string) (string, string) {
shardPath, blockID := fs.getShardDirectory(blockID)
result := filepath.Join(shardPath, makeFileName(blockID))
func (fs *fsStorage) getShardedPathAndFilePath(blobID blob.ID) (string, string) {
shardPath, blobID := fs.getShardDirectory(blobID)
result := filepath.Join(shardPath, makeFileName(blobID))
return shardPath, result
}
func (fs *fsStorage) ConnectionInfo() storage.ConnectionInfo {
return storage.ConnectionInfo{
func (fs *fsStorage) ConnectionInfo() blob.ConnectionInfo {
return blob.ConnectionInfo{
Type: fsStorageType,
Config: &fs.Options,
}
@@ -226,7 +226,7 @@ func (fs *fsStorage) Close(ctx context.Context) error {
}
// New creates new filesystem-backed storage in a specified directory.
func New(ctx context.Context, opts *Options) (storage.Storage, error) {
func New(ctx context.Context, opts *Options) (blob.Storage, error) {
var err error
if _, err = os.Stat(opts.Path); err != nil {
@@ -241,10 +241,10 @@ func New(ctx context.Context, opts *Options) (storage.Storage, error) {
}
func init() {
storage.AddSupportedStorage(
blob.AddSupportedStorage(
fsStorageType,
func() interface{} { return &Options{} },
func(ctx context.Context, o interface{}) (storage.Storage, error) {
func(ctx context.Context, o interface{}) (blob.Storage, error) {
return New(ctx, o.(*Options))
})
}

View File

@@ -0,0 +1,122 @@
package filesystem
import (
"context"
"io/ioutil"
"os"
"reflect"
"sort"
"testing"
"time"
"github.com/kopia/kopia/repo/blob"
"github.com/kopia/kopia/internal/blobtesting"
)
func TestFileStorage(t *testing.T) {
t.Parallel()
ctx := context.Background()
// Test varioush shard configurations.
for _, shardSpec := range [][]int{
{0},
{1},
{3, 3},
{2},
{1, 1},
{1, 2},
{2, 2, 2},
} {
path, _ := ioutil.TempDir("", "r-fs")
defer os.RemoveAll(path)
r, err := New(ctx, &Options{
Path: path,
DirectoryShards: shardSpec,
})
if r == nil || err != nil {
t.Errorf("unexpected result: %v %v", r, err)
}
blobtesting.VerifyStorage(ctx, t, r)
blobtesting.AssertConnectionInfoRoundTrips(ctx, t, r)
if err := r.Close(ctx); err != nil {
t.Fatalf("err: %v", err)
}
}
}
const (
t1 = "392ee1bc299db9f235e046a62625afb84902"
t2 = "2a7ff4f29eddbcd4c18fa9e73fec20bbb71f"
t3 = "0dae5918f83e6a24c8b3e274ca1026e43f24"
)
func TestFileStorageTouch(t *testing.T) {
t.Parallel()
ctx := context.Background()
path, _ := ioutil.TempDir("", "r-fs")
defer os.RemoveAll(path)
r, err := New(ctx, &Options{
Path: path,
})
if r == nil || err != nil {
t.Errorf("unexpected result: %v %v", r, err)
}
fs := r.(*fsStorage)
assertNoError(t, fs.PutBlob(ctx, t1, []byte{1}))
time.Sleep(1 * time.Second) // sleep a bit to accommodate Apple filesystems with low timestamp resolution
assertNoError(t, fs.PutBlob(ctx, t2, []byte{1}))
time.Sleep(1 * time.Second)
assertNoError(t, fs.PutBlob(ctx, t3, []byte{1}))
verifyBlobTimestampOrder(t, fs, t1, t2, t3)
assertNoError(t, fs.TouchBlob(ctx, t2, 1*time.Hour)) // has no effect, all timestamps are very new
verifyBlobTimestampOrder(t, fs, t1, t2, t3)
assertNoError(t, fs.TouchBlob(ctx, t1, 0)) // moves t1 to the top of the pile
verifyBlobTimestampOrder(t, fs, t2, t3, t1)
time.Sleep(1 * time.Second)
assertNoError(t, fs.TouchBlob(ctx, t2, 0)) // moves t2 to the top of the pile
verifyBlobTimestampOrder(t, fs, t3, t1, t2)
time.Sleep(1 * time.Second)
assertNoError(t, fs.TouchBlob(ctx, t1, 0)) // moves t1 to the top of the pile
verifyBlobTimestampOrder(t, fs, t3, t2, t1)
}
func verifyBlobTimestampOrder(t *testing.T, st blob.Storage, want ...blob.ID) {
blobs, err := blob.ListAllBlobs(context.Background(), st, "")
if err != nil {
t.Errorf("error listing blobs: %v", err)
return
}
sort.Slice(blobs, func(i, j int) bool {
return blobs[i].Timestamp.Before(blobs[j].Timestamp)
})
var got []blob.ID
for _, b := range blobs {
got = append(got, b.BlobID)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("incorrect blob order: %v, wanted %v", blobs, want)
}
}
func assertNoError(t *testing.T, err error) {
t.Helper()
if err != nil {
t.Errorf("err: %v", err)
}
}

View File

@@ -18,7 +18,7 @@
"github.com/kopia/kopia/internal/retry"
"github.com/kopia/kopia/internal/throttle"
"github.com/kopia/kopia/repo/storage"
"github.com/kopia/kopia/repo/blob"
gcsclient "cloud.google.com/go/storage"
)
@@ -38,7 +38,7 @@ type gcsStorage struct {
uploadThrottler *iothrottler.IOThrottlerPool
}
func (gcs *gcsStorage) GetBlock(ctx context.Context, b string, offset, length int64) ([]byte, error) {
func (gcs *gcsStorage) GetBlob(ctx context.Context, b blob.ID, offset, length int64) ([]byte, error) {
if offset < 0 {
return nil, errors.Errorf("invalid offset")
}
@@ -53,7 +53,7 @@ func (gcs *gcsStorage) GetBlock(ctx context.Context, b string, offset, length in
return ioutil.ReadAll(reader)
}
v, err := exponentialBackoff(fmt.Sprintf("GetBlock(%q,%v,%v)", b, offset, length), attempt)
v, err := exponentialBackoff(fmt.Sprintf("GetBlob(%q,%v,%v)", b, offset, length), attempt)
if err != nil {
return nil, translateError(err)
}
@@ -92,14 +92,14 @@ func translateError(err error) error {
case nil:
return nil
case gcsclient.ErrObjectNotExist:
return storage.ErrBlockNotFound
return blob.ErrBlobNotFound
case gcsclient.ErrBucketNotExist:
return storage.ErrBlockNotFound
return blob.ErrBlobNotFound
default:
return errors.Wrap(err, "unexpected GCS error")
}
}
func (gcs *gcsStorage) PutBlock(ctx context.Context, b string, data []byte) error {
func (gcs *gcsStorage) PutBlob(ctx context.Context, b blob.ID, data []byte) error {
ctx, cancel := context.WithCancel(ctx)
obj := gcs.bucket.Object(gcs.getObjectNameString(b))
@@ -107,15 +107,15 @@ func (gcs *gcsStorage) PutBlock(ctx context.Context, b string, data []byte) erro
writer.ChunkSize = 1 << 20
writer.ContentType = "application/x-kopia"
progressCallback := storage.ProgressCallback(ctx)
progressCallback := blob.ProgressCallback(ctx)
if progressCallback != nil {
progressCallback(b, 0, int64(len(data)))
defer progressCallback(b, int64(len(data)), int64(len(data)))
progressCallback(string(b), 0, int64(len(data)))
defer progressCallback(string(b), int64(len(data)), int64(len(data)))
writer.ProgressFunc = func(completed int64) {
if completed != int64(len(data)) {
progressCallback(b, completed, int64(len(data)))
progressCallback(string(b), completed, int64(len(data)))
}
}
}
@@ -133,33 +133,33 @@ func (gcs *gcsStorage) PutBlock(ctx context.Context, b string, data []byte) erro
return translateError(writer.Close())
}
func (gcs *gcsStorage) DeleteBlock(ctx context.Context, b string) error {
func (gcs *gcsStorage) DeleteBlob(ctx context.Context, b blob.ID) error {
attempt := func() (interface{}, error) {
return nil, gcs.bucket.Object(gcs.getObjectNameString(b)).Delete(gcs.ctx)
}
_, err := exponentialBackoff(fmt.Sprintf("DeleteBlock(%q)", b), attempt)
_, err := exponentialBackoff(fmt.Sprintf("DeleteBlob(%q)", b), attempt)
err = translateError(err)
if err == storage.ErrBlockNotFound {
if err == blob.ErrBlobNotFound {
return nil
}
return err
}
func (gcs *gcsStorage) getObjectNameString(blockID string) string {
return gcs.Prefix + blockID
func (gcs *gcsStorage) getObjectNameString(blobID blob.ID) string {
return gcs.Prefix + string(blobID)
}
func (gcs *gcsStorage) ListBlocks(ctx context.Context, prefix string, callback func(storage.BlockMetadata) error) error {
func (gcs *gcsStorage) ListBlobs(ctx context.Context, prefix blob.ID, callback func(blob.Metadata) error) error {
lst := gcs.bucket.Objects(gcs.ctx, &gcsclient.Query{
Prefix: gcs.getObjectNameString(prefix),
})
oa, err := lst.Next()
for err == nil {
if err = callback(storage.BlockMetadata{
BlockID: oa.Name[len(gcs.Prefix):],
if err = callback(blob.Metadata{
BlobID: blob.ID(oa.Name[len(gcs.Prefix):]),
Length: oa.Size,
Timestamp: oa.Created,
}); err != nil {
@@ -175,8 +175,8 @@ func (gcs *gcsStorage) ListBlocks(ctx context.Context, prefix string, callback f
return nil
}
func (gcs *gcsStorage) ConnectionInfo() storage.ConnectionInfo {
return storage.ConnectionInfo{
func (gcs *gcsStorage) ConnectionInfo() blob.ConnectionInfo {
return blob.ConnectionInfo{
Type: gcsStorageType,
Config: &gcs.Options,
}
@@ -214,7 +214,7 @@ func tokenSourceFromCredentialsFile(ctx context.Context, fn string, scopes ...st
//
// By default the connection reuses credentials managed by (https://cloud.google.com/sdk/),
// but this can be disabled by setting IgnoreDefaultCredentials to true.
func New(ctx context.Context, opt *Options) (storage.Storage, error) {
func New(ctx context.Context, opt *Options) (blob.Storage, error) {
var ts oauth2.TokenSource
var err error
@@ -259,12 +259,12 @@ func New(ctx context.Context, opt *Options) (storage.Storage, error) {
}
func init() {
storage.AddSupportedStorage(
blob.AddSupportedStorage(
gcsStorageType,
func() interface{} {
return &Options{}
},
func(ctx context.Context, o interface{}) (storage.Storage, error) {
func(ctx context.Context, o interface{}) (blob.Storage, error) {
return New(ctx, o.(*Options))
})
}

View File

@@ -5,10 +5,10 @@
"os"
"testing"
"github.com/kopia/kopia/internal/storagetesting"
"github.com/kopia/kopia/internal/blobtesting"
"github.com/kopia/kopia/repo/storage"
"github.com/kopia/kopia/repo/storage/gcs"
"github.com/kopia/kopia/repo/blob"
"github.com/kopia/kopia/repo/blob/gcs"
)
func TestGCSStorage(t *testing.T) {
@@ -32,18 +32,18 @@ func TestGCSStorage(t *testing.T) {
t.Fatalf("unable to connect to GCS: %v", err)
}
if err := st.ListBlocks(ctx, "", func(bm storage.BlockMetadata) error {
return st.DeleteBlock(ctx, bm.BlockID)
if err := st.ListBlobs(ctx, "", func(bm blob.Metadata) error {
return st.DeleteBlob(ctx, bm.BlobID)
}); err != nil {
t.Fatalf("unable to clear GCS bucket: %v", err)
}
storagetesting.VerifyStorage(ctx, t, st)
storagetesting.AssertConnectionInfoRoundTrips(ctx, t, st)
blobtesting.VerifyStorage(ctx, t, st)
blobtesting.AssertConnectionInfoRoundTrips(ctx, t, st)
// delete everything again
if err := st.ListBlocks(ctx, "", func(bm storage.BlockMetadata) error {
return st.DeleteBlock(ctx, bm.BlockID)
if err := st.ListBlobs(ctx, "", func(bm blob.Metadata) error {
return st.DeleteBlob(ctx, bm.BlobID)
}); err != nil {
t.Fatalf("unable to clear GCS bucket: %v", err)
}
@@ -69,7 +69,7 @@ func TestGCSStorageInvalid(t *testing.T) {
}
defer st.Close(ctx)
if err := st.PutBlock(ctx, "xxx", []byte{1, 2, 3}); err == nil {
if err := st.PutBlob(ctx, "xxx", []byte{1, 2, 3}); err == nil {
t.Errorf("unexpecte success when adding to non-existent bucket")
}
}

View File

@@ -6,53 +6,53 @@
"time"
"github.com/kopia/kopia/internal/repologging"
"github.com/kopia/kopia/repo/storage"
"github.com/kopia/kopia/repo/blob"
)
var log = repologging.Logger("repo/storage")
var log = repologging.Logger("repo/blob")
type loggingStorage struct {
base storage.Storage
base blob.Storage
printf func(string, ...interface{})
prefix string
}
func (s *loggingStorage) GetBlock(ctx context.Context, id string, offset, length int64) ([]byte, error) {
func (s *loggingStorage) GetBlob(ctx context.Context, id blob.ID, offset, length int64) ([]byte, error) {
t0 := time.Now()
result, err := s.base.GetBlock(ctx, id, offset, length)
result, err := s.base.GetBlob(ctx, id, offset, length)
dt := time.Since(t0)
if len(result) < 20 {
s.printf(s.prefix+"GetBlock(%q,%v,%v)=(%#v, %#v) took %v", id, offset, length, result, err, dt)
s.printf(s.prefix+"GetBlob(%q,%v,%v)=(%#v, %#v) took %v", id, offset, length, result, err, dt)
} else {
s.printf(s.prefix+"GetBlock(%q,%v,%v)=({%#v bytes}, %#v) took %v", id, offset, length, len(result), err, dt)
s.printf(s.prefix+"GetBlob(%q,%v,%v)=({%#v bytes}, %#v) took %v", id, offset, length, len(result), err, dt)
}
return result, err
}
func (s *loggingStorage) PutBlock(ctx context.Context, id string, data []byte) error {
func (s *loggingStorage) PutBlob(ctx context.Context, id blob.ID, data []byte) error {
t0 := time.Now()
err := s.base.PutBlock(ctx, id, data)
err := s.base.PutBlob(ctx, id, data)
dt := time.Since(t0)
s.printf(s.prefix+"PutBlock(%q,len=%v)=%#v took %v", id, len(data), err, dt)
s.printf(s.prefix+"PutBlob(%q,len=%v)=%#v took %v", id, len(data), err, dt)
return err
}
func (s *loggingStorage) DeleteBlock(ctx context.Context, id string) error {
func (s *loggingStorage) DeleteBlob(ctx context.Context, id blob.ID) error {
t0 := time.Now()
err := s.base.DeleteBlock(ctx, id)
err := s.base.DeleteBlob(ctx, id)
dt := time.Since(t0)
s.printf(s.prefix+"DeleteBlock(%q)=%#v took %v", id, err, dt)
s.printf(s.prefix+"DeleteBlob(%q)=%#v took %v", id, err, dt)
return err
}
func (s *loggingStorage) ListBlocks(ctx context.Context, prefix string, callback func(storage.BlockMetadata) error) error {
func (s *loggingStorage) ListBlobs(ctx context.Context, prefix blob.ID, callback func(blob.Metadata) error) error {
t0 := time.Now()
cnt := 0
err := s.base.ListBlocks(ctx, prefix, func(bi storage.BlockMetadata) error {
err := s.base.ListBlobs(ctx, prefix, func(bi blob.Metadata) error {
cnt++
return callback(bi)
})
s.printf(s.prefix+"ListBlocks(%q)=%v returned %v items and took %v", prefix, err, cnt, time.Since(t0))
s.printf(s.prefix+"ListBlobs(%q)=%v returned %v items and took %v", prefix, err, cnt, time.Since(t0))
return err
}
@@ -64,7 +64,7 @@ func (s *loggingStorage) Close(ctx context.Context) error {
return err
}
func (s *loggingStorage) ConnectionInfo() storage.ConnectionInfo {
func (s *loggingStorage) ConnectionInfo() blob.ConnectionInfo {
return s.base.ConnectionInfo()
}
@@ -72,7 +72,7 @@ func (s *loggingStorage) ConnectionInfo() storage.ConnectionInfo {
type Option func(s *loggingStorage)
// NewWrapper returns a Storage wrapper that logs all storage commands.
func NewWrapper(wrapped storage.Storage, options ...Option) storage.Storage {
func NewWrapper(wrapped blob.Storage, options ...Option) blob.Storage {
s := &loggingStorage{base: wrapped, printf: log.Debugf}
for _, o := range options {
o(s)

View File

@@ -5,7 +5,7 @@
"strings"
"testing"
"github.com/kopia/kopia/internal/storagetesting"
"github.com/kopia/kopia/internal/blobtesting"
)
func TestLoggingStorage(t *testing.T) {
@@ -18,15 +18,15 @@ func TestLoggingStorage(t *testing.T) {
outputCount++
}
data := map[string][]byte{}
underlying := storagetesting.NewMapStorage(data, nil, nil)
data := blobtesting.DataMap{}
underlying := blobtesting.NewMapStorage(data, nil, nil)
st := NewWrapper(underlying, Output(myOutput), Prefix(myPrefix))
if st == nil {
t.Fatalf("unexpected result: %v", st)
}
ctx := context.Background()
storagetesting.VerifyStorage(ctx, t, st)
blobtesting.VerifyStorage(ctx, t, st)
if err := st.Close(ctx); err != nil {
t.Fatalf("err: %v", err)
}

View File

@@ -1,4 +1,4 @@
package storage
package blob
import "context"

View File

@@ -3,6 +3,6 @@
import (
// Register well-known blob storage providers
_ "github.com/kopia/kopia/repo/storage/filesystem"
_ "github.com/kopia/kopia/repo/storage/gcs"
_ "github.com/kopia/kopia/repo/blob/filesystem"
_ "github.com/kopia/kopia/repo/blob/gcs"
)

View File

@@ -1,4 +1,4 @@
package storage
package blob
import (
"context"

View File

@@ -13,7 +13,7 @@
"github.com/pkg/errors"
"github.com/kopia/kopia/internal/retry"
"github.com/kopia/kopia/repo/storage"
"github.com/kopia/kopia/repo/blob"
)
const (
@@ -31,7 +31,7 @@ type s3Storage struct {
uploadThrottler *iothrottler.IOThrottlerPool
}
func (s *s3Storage) GetBlock(ctx context.Context, b string, offset, length int64) ([]byte, error) {
func (s *s3Storage) GetBlob(ctx context.Context, b blob.ID, offset, length int64) ([]byte, error) {
attempt := func() (interface{}, error) {
var opt minio.GetObjectOptions
if length > 0 {
@@ -67,7 +67,7 @@ func (s *s3Storage) GetBlock(ctx context.Context, b string, offset, length int64
return b, nil
}
v, err := exponentialBackoff(fmt.Sprintf("GetBlock(%q,%v,%v)", b, offset, length), attempt)
v, err := exponentialBackoff(fmt.Sprintf("GetBlob(%q,%v,%v)", b, offset, length), attempt)
if err != nil {
return nil, translateError(err)
}
@@ -94,27 +94,27 @@ func translateError(err error) error {
return nil
}
if me.StatusCode == 404 {
return storage.ErrBlockNotFound
return blob.ErrBlobNotFound
}
}
return err
}
func (s *s3Storage) PutBlock(ctx context.Context, b string, data []byte) error {
func (s *s3Storage) PutBlob(ctx context.Context, b blob.ID, data []byte) error {
throttled, err := s.uploadThrottler.AddReader(ioutil.NopCloser(bytes.NewReader(data)))
if err != nil {
return err
}
progressCallback := storage.ProgressCallback(ctx)
progressCallback := blob.ProgressCallback(ctx)
if progressCallback != nil {
progressCallback(b, 0, int64(len(data)))
defer progressCallback(b, int64(len(data)), int64(len(data)))
progressCallback(string(b), 0, int64(len(data)))
defer progressCallback(string(b), int64(len(data)), int64(len(data)))
}
n, err := s.cli.PutObject(s.BucketName, s.getObjectNameString(b), throttled, -1, minio.PutObjectOptions{
ContentType: "application/x-kopia",
Progress: newProgressReader(progressCallback, b, int64(len(data))),
Progress: newProgressReader(progressCallback, string(b), int64(len(data))),
})
if err == io.EOF && n == 0 {
// special case empty stream
@@ -126,28 +126,28 @@ func (s *s3Storage) PutBlock(ctx context.Context, b string, data []byte) error {
return translateError(err)
}
func (s *s3Storage) DeleteBlock(ctx context.Context, b string) error {
func (s *s3Storage) DeleteBlob(ctx context.Context, b blob.ID) error {
attempt := func() (interface{}, error) {
return nil, s.cli.RemoveObject(s.BucketName, s.getObjectNameString(b))
}
_, err := exponentialBackoff(fmt.Sprintf("DeleteBlock(%q)", b), attempt)
_, err := exponentialBackoff(fmt.Sprintf("DeleteBlob(%q)", b), attempt)
return translateError(err)
}
func (s *s3Storage) getObjectNameString(b string) string {
return s.Prefix + b
func (s *s3Storage) getObjectNameString(b blob.ID) string {
return s.Prefix + string(b)
}
func (s *s3Storage) ListBlocks(ctx context.Context, prefix string, callback func(storage.BlockMetadata) error) error {
oi := s.cli.ListObjects(s.BucketName, s.Prefix+prefix, false, ctx.Done())
func (s *s3Storage) ListBlobs(ctx context.Context, prefix blob.ID, callback func(blob.Metadata) error) error {
oi := s.cli.ListObjects(s.BucketName, s.getObjectNameString(prefix), false, ctx.Done())
for o := range oi {
if err := o.Err; err != nil {
return err
}
bm := storage.BlockMetadata{
BlockID: o.Key[len(s.Prefix):],
bm := blob.Metadata{
BlobID: blob.ID(o.Key[len(s.Prefix):]),
Length: o.Size,
Timestamp: o.LastModified,
}
@@ -160,8 +160,8 @@ func (s *s3Storage) ListBlocks(ctx context.Context, prefix string, callback func
return nil
}
func (s *s3Storage) ConnectionInfo() storage.ConnectionInfo {
return storage.ConnectionInfo{
func (s *s3Storage) ConnectionInfo() blob.ConnectionInfo {
return blob.ConnectionInfo{
Type: s3storageType,
Config: &s.Options,
}
@@ -176,8 +176,8 @@ func (s *s3Storage) String() string {
}
type progressReader struct {
cb storage.ProgressFunc
blockID string
cb blob.ProgressFunc
blobID string
completed int64
totalLength int64
lastReported int64
@@ -186,18 +186,18 @@ type progressReader struct {
func (r *progressReader) Read(b []byte) (int, error) {
r.completed += int64(len(b))
if r.completed >= r.lastReported+1000000 && r.completed < r.totalLength {
r.cb(r.blockID, r.completed, r.totalLength)
r.cb(r.blobID, r.completed, r.totalLength)
r.lastReported = r.completed
}
return len(b), nil
}
func newProgressReader(cb storage.ProgressFunc, blockID string, totalLength int64) io.Reader {
func newProgressReader(cb blob.ProgressFunc, blobID string, totalLength int64) io.Reader {
if cb == nil {
return nil
}
return &progressReader{cb: cb, blockID: blockID, totalLength: totalLength}
return &progressReader{cb: cb, blobID: blobID, totalLength: totalLength}
}
func toBandwidth(bytesPerSecond int) iothrottler.Bandwidth {
@@ -211,7 +211,7 @@ func toBandwidth(bytesPerSecond int) iothrottler.Bandwidth {
// New creates new S3-backed storage with specified options:
//
// - the 'BucketName' field is required and all other parameters are optional.
func New(ctx context.Context, opt *Options) (storage.Storage, error) {
func New(ctx context.Context, opt *Options) (blob.Storage, error) {
if opt.BucketName == "" {
return nil, errors.New("bucket name must be specified")
}
@@ -234,12 +234,12 @@ func New(ctx context.Context, opt *Options) (storage.Storage, error) {
}
func init() {
storage.AddSupportedStorage(
blob.AddSupportedStorage(
s3storageType,
func() interface{} {
return &Options{}
},
func(ctx context.Context, o interface{}) (storage.Storage, error) {
func(ctx context.Context, o interface{}) (blob.Storage, error) {
return New(ctx, o.(*Options))
})
}

View File

@@ -13,8 +13,8 @@
"github.com/minio/minio-go"
"github.com/kopia/kopia/internal/storagetesting"
"github.com/kopia/kopia/repo/storage"
"github.com/kopia/kopia/internal/blobtesting"
"github.com/kopia/kopia/repo/blob"
)
// https://github.com/minio/minio-go
@@ -75,8 +75,8 @@ func TestS3Storage(t *testing.T) {
t.Fatalf("err: %v", err)
}
storagetesting.VerifyStorage(ctx, t, st)
storagetesting.AssertConnectionInfoRoundTrips(ctx, t, st)
blobtesting.VerifyStorage(ctx, t, st)
blobtesting.AssertConnectionInfoRoundTrips(ctx, t, st)
if err := st.Close(ctx); err != nil {
t.Fatalf("err: %v", err)
}
@@ -103,14 +103,14 @@ func cleanupOldData(ctx context.Context, t *testing.T) {
t.Fatalf("err: %v", err)
}
_ = st.ListBlocks(ctx, "", func(it storage.BlockMetadata) error {
_ = st.ListBlobs(ctx, "", func(it blob.Metadata) error {
age := time.Since(it.Timestamp)
if age > cleanupAge {
if err := st.DeleteBlock(ctx, it.BlockID); err != nil {
t.Errorf("warning: unable to delete %q: %v", it.BlockID, err)
if err := st.DeleteBlob(ctx, it.BlobID); err != nil {
t.Errorf("warning: unable to delete %q: %v", it.BlobID, err)
}
} else {
log.Printf("keeping %v", it.BlockID)
log.Printf("keeping %v", it.BlobID)
}
return nil
})

111
repo/blob/storage.go Normal file
View File

@@ -0,0 +1,111 @@
package blob
import (
"context"
"time"
"github.com/pkg/errors"
)
// CancelFunc requests cancellation of a storage operation.
type CancelFunc func()
// Storage encapsulates API for connecting to blob storage.
//
// The underlying storage system must provide:
//
// * high durability, availability and bit-rot protection
// * read-after-write - blob written using PubBlob() must be immediately readable using GetBlob() and ListBlobs()
// * atomicity - it mustn't be possible to observe partial results of PubBlob() via either GetBlob() or ListBlobs()
// * timestamps that don't go back in time (small clock skew up to minutes is allowed)
// * reasonably low latency for retrievals
//
// The required semantics are provided by existing commercial cloud storage products (Google Cloud, AWS, Azure).
type Storage interface {
// PutBlob uploads the blob with given data to the repository or replaces existing blob with the provided
// id with given contents.
PutBlob(ctx context.Context, blobID ID, data []byte) error
// DeleteBlob removes the blob from storage. Future Get() operations will fail with ErrNotFound.
DeleteBlob(ctx context.Context, blobID ID) error
// GetBlob returns full or partial contents of a blob with given ID.
// If length>0, the the function retrieves a range of bytes [offset,offset+length)
// If length<0, the entire blob must be fetched.
GetBlob(ctx context.Context, blobID ID, offset, length int64) ([]byte, error)
// ListBlobs invokes the provided callback for each blob in the storage.
// Iteration continues until the callback returns an error or until all matching blobs have been reported.
ListBlobs(ctx context.Context, blobIDPrefix ID, cb func(bm Metadata) error) error
// ConnectionInfo returns JSON-serializable data structure containing information required to
// connect to storage.
ConnectionInfo() ConnectionInfo
// Close releases all resources associated with storage.
Close(ctx context.Context) error
}
// ID is a string that represents blob identifier.
type ID string
// Metadata represents metadata about a single BLOB in a storage.
type Metadata struct {
BlobID ID
Length int64
Timestamp time.Time
}
// ErrBlobNotFound is returned when a BLOB cannot be found in storage.
var ErrBlobNotFound = errors.New("BLOB not found")
// ListAllBlobs returns Metadata for all blobs in a given storage that have the provided name prefix.
func ListAllBlobs(ctx context.Context, st Storage, prefix ID) ([]Metadata, error) {
var result []Metadata
err := st.ListBlobs(ctx, prefix, func(bm Metadata) error {
result = append(result, bm)
return nil
})
return result, err
}
// ListAllBlobsConsistent lists all blobs with given name prefix in the provided storage until the results are
// consistent. The results are consistent if the list result fetched twice is identical. This guarantees that while
// the first scan was in progress, no new blob was added or removed.
// maxAttempts specifies maximum number of list attempts (must be >= 2)
func ListAllBlobsConsistent(ctx context.Context, st Storage, prefix ID, maxAttempts int) ([]Metadata, error) {
var previous []Metadata
for i := 0; i < maxAttempts; i++ {
result, err := ListAllBlobs(ctx, st, prefix)
if err != nil {
return nil, err
}
if i > 0 && sameBlobs(result, previous) {
return result, nil
}
previous = result
}
return nil, errors.Errorf("unable to achieve consistent snapshot despite %v attempts", maxAttempts)
}
// sameBlobs returns true if b1 & b2 contain the same blobs (ignoring order).
func sameBlobs(b1, b2 []Metadata) bool {
if len(b1) != len(b2) {
return false
}
m := map[ID]Metadata{}
for _, b := range b1 {
m[b.BlobID] = b
}
for _, b := range b2 {
if m[b.BlobID] != b {
return false
}
}
return true
}

57
repo/blob/storage_test.go Normal file
View File

@@ -0,0 +1,57 @@
package blob_test
import (
"context"
"testing"
"time"
"github.com/kopia/kopia/internal/blobtesting"
"github.com/kopia/kopia/repo/blob"
)
func TestListAllBlobsConsistent(t *testing.T) {
ctx := context.Background()
data := blobtesting.DataMap{}
st := blobtesting.NewMapStorage(data, nil, time.Now)
st.PutBlob(ctx, "foo1", []byte{1, 2, 3}) //nolint:errcheck
st.PutBlob(ctx, "foo2", []byte{1, 2, 3}) //nolint:errcheck
st.PutBlob(ctx, "foo3", []byte{1, 2, 3}) //nolint:errcheck
// set up faulty storage that will add a blob while a scan is in progress.
f := &blobtesting.FaultyStorage{
Base: st,
Faults: map[string][]*blobtesting.Fault{
"ListBlobsItem": {
{ErrCallback: func() error {
st.PutBlob(ctx, "foo0", []byte{1, 2, 3}) //nolint:errcheck
return nil
}},
},
},
}
r, err := blob.ListAllBlobsConsistent(ctx, f, "foo", 3)
if err != nil {
t.Fatalf("error: %v", err)
}
// make sure we get the list with 4 items, not 3.
if got, want := len(r), 4; got != want {
t.Errorf("unexpected list result count: %v, want %v", got, want)
}
}
func TestListAllBlobsConsistentEmpty(t *testing.T) {
ctx := context.Background()
data := blobtesting.DataMap{}
st := blobtesting.NewMapStorage(data, nil, time.Now)
r, err := blob.ListAllBlobsConsistent(ctx, st, "foo", 3)
if err != nil {
t.Fatalf("error: %v", err)
}
if got, want := len(r), 0; got != want {
t.Errorf("unexpected list result count: %v, want %v", got, want)
}
}

View File

@@ -13,7 +13,7 @@
"github.com/pkg/errors"
"github.com/studio-b12/gowebdav"
"github.com/kopia/kopia/repo/storage"
"github.com/kopia/kopia/repo/blob"
)
const (
@@ -35,8 +35,8 @@ type davStorage struct {
cli *gowebdav.Client
}
func (d *davStorage) GetBlock(ctx context.Context, blockID string, offset, length int64) ([]byte, error) {
_, path := d.getDirPathAndFilePath(blockID)
func (d *davStorage) GetBlob(ctx context.Context, blobID blob.ID, offset, length int64) ([]byte, error) {
_, path := d.getDirPathAndFilePath(blobID)
data, err := d.cli.Read(path)
if err != nil {
@@ -63,7 +63,7 @@ func (d *davStorage) translateError(err error) error {
case *os.PathError:
switch err.Err.Error() {
case "404":
return storage.ErrBlockNotFound
return blob.ErrBlobNotFound
}
return err
default:
@@ -71,7 +71,7 @@ func (d *davStorage) translateError(err error) error {
}
}
func getBlockIDFromFileName(name string) (string, bool) {
func getBlobIDFromFilename(name string) (string, bool) {
if strings.HasSuffix(name, fsStorageChunkSuffix) {
return name[0 : len(name)-len(fsStorageChunkSuffix)], true
}
@@ -79,11 +79,11 @@ func getBlockIDFromFileName(name string) (string, bool) {
return "", false
}
func makeFileName(blockID string) string {
return blockID + fsStorageChunkSuffix
func makeFileName(blobID blob.ID) string {
return string(blobID) + fsStorageChunkSuffix
}
func (d *davStorage) ListBlocks(ctx context.Context, prefix string, callback func(storage.BlockMetadata) error) error {
func (d *davStorage) ListBlobs(ctx context.Context, prefix blob.ID, callback func(blob.Metadata) error) error {
var walkDir func(string, string) error
walkDir = func(path string, currentPrefix string) error {
@@ -103,9 +103,9 @@ func (d *davStorage) ListBlocks(ctx context.Context, prefix string, callback fun
if len(prefix) > len(newPrefix) {
// looking for 'abcd', got 'ab' so far, worth trying
match = strings.HasPrefix(prefix, newPrefix)
match = strings.HasPrefix(string(prefix), newPrefix)
} else {
match = strings.HasPrefix(newPrefix, prefix)
match = strings.HasPrefix(newPrefix, string(prefix))
}
if match {
@@ -113,10 +113,10 @@ func (d *davStorage) ListBlocks(ctx context.Context, prefix string, callback fun
return err
}
}
} else if fullID, ok := getBlockIDFromFileName(currentPrefix + e.Name()); ok {
if strings.HasPrefix(fullID, prefix) {
if err := callback(storage.BlockMetadata{
BlockID: fullID,
} else if fullID, ok := getBlobIDFromFilename(currentPrefix + e.Name()); ok {
if strings.HasPrefix(fullID, string(prefix)) {
if err := callback(blob.Metadata{
BlobID: blob.ID(fullID),
Length: e.Size(),
Timestamp: e.ModTime(),
}); err != nil {
@@ -132,11 +132,11 @@ func (d *davStorage) ListBlocks(ctx context.Context, prefix string, callback fun
return walkDir("", "")
}
func (d *davStorage) PutBlock(ctx context.Context, blockID string, data []byte) error {
dirPath, filePath := d.getDirPathAndFilePath(blockID)
func (d *davStorage) PutBlob(ctx context.Context, blobID blob.ID, data []byte) error {
dirPath, filePath := d.getDirPathAndFilePath(blobID)
tmpPath := fmt.Sprintf("%v-%v", filePath, rand.Int63())
if err := d.translateError(d.cli.Write(tmpPath, data, 0600)); err != nil {
if err != storage.ErrBlockNotFound {
if err != blob.ErrBlobNotFound {
return err
}
@@ -149,32 +149,32 @@ func (d *davStorage) PutBlock(ctx context.Context, blockID string, data []byte)
return d.translateError(d.cli.Rename(tmpPath, filePath, true))
}
func (d *davStorage) DeleteBlock(ctx context.Context, blockID string) error {
_, filePath := d.getDirPathAndFilePath(blockID)
func (d *davStorage) DeleteBlob(ctx context.Context, blobID blob.ID) error {
_, filePath := d.getDirPathAndFilePath(blobID)
return d.translateError(d.cli.Remove(filePath))
}
func (d *davStorage) getShardDirectory(blockID string) (string, string) {
func (d *davStorage) getShardDirectory(blobID blob.ID) (string, blob.ID) {
shardPath := "/"
if len(blockID) < 20 {
return shardPath, blockID
if len(blobID) < 20 {
return shardPath, blobID
}
for _, size := range d.shards() {
shardPath = filepath.Join(shardPath, blockID[0:size])
blockID = blockID[size:]
shardPath = filepath.Join(shardPath, string(blobID[0:size]))
blobID = blobID[size:]
}
return shardPath, blockID
return shardPath, blobID
}
func (d *davStorage) getDirPathAndFilePath(blockID string) (string, string) {
shardPath, blockID := d.getShardDirectory(blockID)
result := filepath.Join(shardPath, makeFileName(blockID))
func (d *davStorage) getDirPathAndFilePath(blobID blob.ID) (string, string) {
shardPath, blobID := d.getShardDirectory(blobID)
result := filepath.Join(shardPath, makeFileName(blobID))
return shardPath, result
}
func (d *davStorage) ConnectionInfo() storage.ConnectionInfo {
return storage.ConnectionInfo{
func (d *davStorage) ConnectionInfo() blob.ConnectionInfo {
return blob.ConnectionInfo{
Type: davStorageType,
Config: &d.Options,
}
@@ -185,7 +185,7 @@ func (d *davStorage) Close(ctx context.Context) error {
}
// New creates new WebDAV-backed storage in a specified URL.
func New(ctx context.Context, opts *Options) (storage.Storage, error) {
func New(ctx context.Context, opts *Options) (blob.Storage, error) {
r := &davStorage{
Options: *opts,
cli: gowebdav.NewClient(opts.URL, opts.Username, opts.Password),
@@ -202,10 +202,10 @@ func New(ctx context.Context, opts *Options) (storage.Storage, error) {
}
func init() {
storage.AddSupportedStorage(
blob.AddSupportedStorage(
davStorageType,
func() interface{} { return &Options{} },
func(ctx context.Context, o interface{}) (storage.Storage, error) {
func(ctx context.Context, o interface{}) (blob.Storage, error) {
return New(ctx, o.(*Options))
})
}

View File

@@ -11,7 +11,7 @@
"golang.org/x/net/webdav"
"github.com/kopia/kopia/internal/storagetesting"
"github.com/kopia/kopia/internal/blobtesting"
)
func TestWebDAVStorage(t *testing.T) {
@@ -55,8 +55,8 @@ func TestWebDAVStorage(t *testing.T) {
t.Errorf("unexpected result: %v %v", r, err)
}
storagetesting.VerifyStorage(ctx, t, r)
storagetesting.AssertConnectionInfoRoundTrips(ctx, t, r)
blobtesting.VerifyStorage(ctx, t, r)
blobtesting.AssertConnectionInfoRoundTrips(ctx, t, r)
if err := r.Close(ctx); err != nil {
t.Fatalf("err: %v", err)
}

View File

@@ -10,8 +10,8 @@
"github.com/pkg/errors"
"github.com/kopia/kopia/repo/storage"
"github.com/kopia/kopia/repo/storage/filesystem"
"github.com/kopia/kopia/repo/blob"
"github.com/kopia/kopia/repo/blob/filesystem"
)
const (
@@ -20,8 +20,8 @@
)
type blockCache struct {
st storage.Storage
cacheStorage storage.Storage
st blob.Storage
cacheStorage blob.Storage
maxSizeBytes int64
hmacSecret []byte
sweepFrequency time.Duration
@@ -34,10 +34,10 @@ type blockCache struct {
}
type blockToucher interface {
TouchBlock(ctx context.Context, blockID string, threshold time.Duration) error
TouchBlob(ctx context.Context, blockID blob.ID, threshold time.Duration) error
}
func adjustCacheKey(cacheKey string) string {
func adjustCacheKey(cacheKey blob.ID) blob.ID {
// block IDs with odd length have a single-byte prefix.
// move the prefix to the end of cache key to make sure the top level shard is spread 256 ways.
if len(cacheKey)%2 == 1 {
@@ -47,7 +47,7 @@ func adjustCacheKey(cacheKey string) string {
return cacheKey
}
func (c *blockCache) getContentBlock(ctx context.Context, cacheKey string, physicalBlockID string, offset, length int64) ([]byte, error) {
func (c *blockCache) getContentBlock(ctx context.Context, cacheKey blob.ID, blobID blob.ID, offset, length int64) ([]byte, error) {
cacheKey = adjustCacheKey(cacheKey)
useCache := shouldUseBlockCache(ctx) && c.cacheStorage != nil
@@ -57,14 +57,14 @@ func (c *blockCache) getContentBlock(ctx context.Context, cacheKey string, physi
}
}
b, err := c.st.GetBlock(ctx, physicalBlockID, offset, length)
if err == storage.ErrBlockNotFound {
b, err := c.st.GetBlob(ctx, blobID, offset, length)
if err == blob.ErrBlobNotFound {
// not found in underlying storage
return nil, err
}
if err == nil && useCache {
if puterr := c.cacheStorage.PutBlock(ctx, cacheKey, appendHMAC(b, c.hmacSecret)); puterr != nil {
if puterr := c.cacheStorage.PutBlob(ctx, cacheKey, appendHMAC(b, c.hmacSecret)); puterr != nil {
log.Warningf("unable to write cache item %v: %v", cacheKey, puterr)
}
}
@@ -72,13 +72,13 @@ func (c *blockCache) getContentBlock(ctx context.Context, cacheKey string, physi
return b, err
}
func (c *blockCache) readAndVerifyCacheBlock(ctx context.Context, cacheKey string) []byte {
b, err := c.cacheStorage.GetBlock(ctx, cacheKey, 0, -1)
func (c *blockCache) readAndVerifyCacheBlock(ctx context.Context, cacheKey blob.ID) []byte {
b, err := c.cacheStorage.GetBlob(ctx, cacheKey, 0, -1)
if err == nil {
b, err = verifyAndStripHMAC(b, c.hmacSecret)
if err == nil {
if t, ok := c.cacheStorage.(blockToucher); ok {
t.TouchBlock(ctx, cacheKey, c.touchThreshold) //nolint:errcheck
t.TouchBlob(ctx, cacheKey, c.touchThreshold) //nolint:errcheck
}
// retrieved from cache and HMAC valid
@@ -90,7 +90,7 @@ func (c *blockCache) readAndVerifyCacheBlock(ctx context.Context, cacheKey strin
return nil
}
if err != storage.ErrBlockNotFound {
if err != blob.ErrBlobNotFound {
log.Warningf("unable to read cache %v: %v", cacheKey, err)
}
return nil
@@ -115,8 +115,8 @@ func (c *blockCache) sweepDirectoryPeriodically(ctx context.Context) {
}
}
// A blockMetadataHeap implements heap.Interface and holds storage.BlockMetadata.
type blockMetadataHeap []storage.BlockMetadata
// A blockMetadataHeap implements heap.Interface and holds blob.Metadata.
type blockMetadataHeap []blob.Metadata
func (h blockMetadataHeap) Len() int { return len(h) }
@@ -129,7 +129,7 @@ func (h blockMetadataHeap) Swap(i, j int) {
}
func (h *blockMetadataHeap) Push(x interface{}) {
*h = append(*h, x.(storage.BlockMetadata))
*h = append(*h, x.(blob.Metadata))
}
func (h *blockMetadataHeap) Pop() interface{} {
@@ -153,14 +153,14 @@ func (c *blockCache) sweepDirectory(ctx context.Context) (err error) {
var h blockMetadataHeap
var totalRetainedSize int64
err = c.cacheStorage.ListBlocks(ctx, "", func(it storage.BlockMetadata) error {
err = c.cacheStorage.ListBlobs(ctx, "", func(it blob.Metadata) error {
heap.Push(&h, it)
totalRetainedSize += it.Length
if totalRetainedSize > c.maxSizeBytes {
oldest := heap.Pop(&h).(storage.BlockMetadata)
if delerr := c.cacheStorage.DeleteBlock(ctx, oldest.BlockID); delerr != nil {
log.Warningf("unable to remove %v: %v", oldest.BlockID, delerr)
oldest := heap.Pop(&h).(blob.Metadata)
if delerr := c.cacheStorage.DeleteBlob(ctx, oldest.BlobID); delerr != nil {
log.Warningf("unable to remove %v: %v", oldest.BlobID, delerr)
} else {
totalRetainedSize -= oldest.Length
}
@@ -176,8 +176,8 @@ func (c *blockCache) sweepDirectory(ctx context.Context) (err error) {
return nil
}
func newBlockCache(ctx context.Context, st storage.Storage, caching CachingOptions) (*blockCache, error) {
var cacheStorage storage.Storage
func newBlockCache(ctx context.Context, st blob.Storage, caching CachingOptions) (*blockCache, error) {
var cacheStorage blob.Storage
var err error
if caching.MaxCacheSizeBytes > 0 && caching.CacheDirectory != "" {
@@ -201,7 +201,7 @@ func newBlockCache(ctx context.Context, st storage.Storage, caching CachingOptio
return newBlockCacheWithCacheStorage(ctx, st, cacheStorage, caching, defaultTouchThreshold, defaultSweepFrequency)
}
func newBlockCacheWithCacheStorage(ctx context.Context, st, cacheStorage storage.Storage, caching CachingOptions, touchThreshold time.Duration, sweepFrequency time.Duration) (*blockCache, error) {
func newBlockCacheWithCacheStorage(ctx context.Context, st, cacheStorage blob.Storage, caching CachingOptions, touchThreshold time.Duration, sweepFrequency time.Duration) (*blockCache, error) {
c := &blockCache{
st: st,
cacheStorage: cacheStorage,

View File

@@ -13,22 +13,22 @@
"github.com/pkg/errors"
"github.com/kopia/kopia/internal/storagetesting"
"github.com/kopia/kopia/repo/storage"
"github.com/kopia/kopia/internal/blobtesting"
"github.com/kopia/kopia/repo/blob"
)
func newUnderlyingStorageForBlockCacheTesting(t *testing.T) storage.Storage {
func newUnderlyingStorageForBlockCacheTesting(t *testing.T) blob.Storage {
ctx := context.Background()
data := map[string][]byte{}
st := storagetesting.NewMapStorage(data, nil, nil)
assertNoError(t, st.PutBlock(ctx, "block-1", []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}))
assertNoError(t, st.PutBlock(ctx, "block-4k", bytes.Repeat([]byte{1, 2, 3, 4}, 1000))) // 4000 bytes
data := blobtesting.DataMap{}
st := blobtesting.NewMapStorage(data, nil, nil)
assertNoError(t, st.PutBlob(ctx, "block-1", []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}))
assertNoError(t, st.PutBlob(ctx, "block-4k", bytes.Repeat([]byte{1, 2, 3, 4}, 1000))) // 4000 bytes
return st
}
func TestCacheExpiration(t *testing.T) {
cacheData := map[string][]byte{}
cacheStorage := storagetesting.NewMapStorage(cacheData, nil, nil)
cacheData := blobtesting.DataMap{}
cacheStorage := blobtesting.NewMapStorage(cacheData, nil, nil)
underlyingStorage := newUnderlyingStorageForBlockCacheTesting(t)
@@ -56,24 +56,24 @@ func TestCacheExpiration(t *testing.T) {
// 00000a and 00000b will be removed from cache because it's the oldest.
// to verify, let's remove block-4k from the underlying storage and make sure we can still read
// 00000c and 00000d from the cache but not 00000a nor 00000b
assertNoError(t, underlyingStorage.DeleteBlock(ctx, "block-4k"))
assertNoError(t, underlyingStorage.DeleteBlob(ctx, "block-4k"))
cases := []struct {
block string
blobID blob.ID
expectedError error
}{
{"00000a", storage.ErrBlockNotFound},
{"00000b", storage.ErrBlockNotFound},
{"00000a", blob.ErrBlobNotFound},
{"00000b", blob.ErrBlobNotFound},
{"00000c", nil},
{"00000d", nil},
}
for _, tc := range cases {
_, got := cache.getContentBlock(ctx, tc.block, "block-4k", 0, -1)
_, got := cache.getContentBlock(ctx, tc.blobID, "block-4k", 0, -1)
if want := tc.expectedError; got != want {
t.Errorf("unexpected error when getting block %v: %v wanted %v", tc.block, got, want)
t.Errorf("unexpected error when getting block %v: %v wanted %v", tc.blobID, got, want)
} else {
t.Logf("got correct error %v when reading block %v", tc.expectedError, tc.block)
t.Logf("got correct error %v when reading block %v", tc.expectedError, tc.blobID)
}
}
}
@@ -104,10 +104,10 @@ func verifyBlockCache(t *testing.T, cache *blockCache) {
t.Run("GetContentBlock", func(t *testing.T) {
cases := []struct {
cacheKey string
physicalBlockID string
offset int64
length int64
cacheKey blob.ID
blobID blob.ID
offset int64
length int64
expected []byte
err error
@@ -116,15 +116,15 @@ func verifyBlockCache(t *testing.T, cache *blockCache) {
{"xf0f0f2", "block-1", 0, -1, []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, nil},
{"xf0f0f1", "block-1", 1, 5, []byte{2, 3, 4, 5, 6}, nil},
{"xf0f0f2", "block-1", 0, -1, []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, nil},
{"xf0f0f3", "no-such-block", 0, -1, nil, storage.ErrBlockNotFound},
{"xf0f0f4", "no-such-block", 10, 5, nil, storage.ErrBlockNotFound},
{"xf0f0f3", "no-such-block", 0, -1, nil, blob.ErrBlobNotFound},
{"xf0f0f4", "no-such-block", 10, 5, nil, blob.ErrBlobNotFound},
{"f0f0f5", "block-1", 7, 3, []byte{8, 9, 10}, nil},
{"xf0f0f6", "block-1", 11, 10, nil, errors.Errorf("invalid offset")},
{"xf0f0f6", "block-1", -1, 5, nil, errors.Errorf("invalid offset")},
}
for _, tc := range cases {
v, err := cache.getContentBlock(ctx, tc.cacheKey, tc.physicalBlockID, tc.offset, tc.length)
v, err := cache.getContentBlock(ctx, tc.cacheKey, tc.blobID, tc.offset, tc.length)
if (err != nil) != (tc.err != nil) {
t.Errorf("unexpected error for %v: %+v, wanted %+v", tc.cacheKey, err, tc.err)
} else if err != nil && err.Error() != tc.err.Error() {
@@ -139,8 +139,8 @@ func verifyBlockCache(t *testing.T, cache *blockCache) {
})
t.Run("DataCorruption", func(t *testing.T) {
cacheKey := "f0f0f1x"
d, err := cache.cacheStorage.GetBlock(ctx, cacheKey, 0, -1)
var cacheKey blob.ID = "f0f0f1x"
d, err := cache.cacheStorage.GetBlob(ctx, cacheKey, 0, -1)
if err != nil {
t.Fatalf("unable to retrieve data from cache: %v", err)
}
@@ -148,7 +148,7 @@ func verifyBlockCache(t *testing.T, cache *blockCache) {
// corrupt the data and write back
d[0] ^= 1
if err := cache.cacheStorage.PutBlock(ctx, cacheKey, d); err != nil {
if err := cache.cacheStorage.PutBlob(ctx, cacheKey, d); err != nil {
t.Fatalf("unable to write corrupted block: %v", err)
}
@@ -165,19 +165,19 @@ func verifyBlockCache(t *testing.T, cache *blockCache) {
func TestCacheFailureToOpen(t *testing.T) {
someError := errors.New("some error")
cacheData := map[string][]byte{}
cacheStorage := storagetesting.NewMapStorage(cacheData, nil, nil)
cacheData := blobtesting.DataMap{}
cacheStorage := blobtesting.NewMapStorage(cacheData, nil, nil)
underlyingStorage := newUnderlyingStorageForBlockCacheTesting(t)
faultyCache := &storagetesting.FaultyStorage{
faultyCache := &blobtesting.FaultyStorage{
Base: cacheStorage,
Faults: map[string][]*storagetesting.Fault{
"ListBlocks": {
Faults: map[string][]*blobtesting.Fault{
"ListBlobs": {
{Err: someError},
},
},
}
// Will fail because of ListBlocks failure.
// Will fail because of ListBlobs failure.
_, err := newBlockCacheWithCacheStorage(context.Background(), underlyingStorage, faultyCache, CachingOptions{
MaxCacheSizeBytes: 10000,
}, 0, 5*time.Hour)
@@ -185,7 +185,7 @@ func TestCacheFailureToOpen(t *testing.T) {
t.Errorf("invalid error %v, wanted: %v", err, someError)
}
// ListBlocks fails only once, next time it succeeds.
// ListBlobs fails only once, next time it succeeds.
cache, err := newBlockCacheWithCacheStorage(context.Background(), underlyingStorage, faultyCache, CachingOptions{
MaxCacheSizeBytes: 10000,
}, 0, 100*time.Millisecond)
@@ -199,10 +199,10 @@ func TestCacheFailureToOpen(t *testing.T) {
func TestCacheFailureToWrite(t *testing.T) {
someError := errors.New("some error")
cacheData := map[string][]byte{}
cacheStorage := storagetesting.NewMapStorage(cacheData, nil, nil)
cacheData := blobtesting.DataMap{}
cacheStorage := blobtesting.NewMapStorage(cacheData, nil, nil)
underlyingStorage := newUnderlyingStorageForBlockCacheTesting(t)
faultyCache := &storagetesting.FaultyStorage{
faultyCache := &blobtesting.FaultyStorage{
Base: cacheStorage,
}
@@ -216,8 +216,8 @@ func TestCacheFailureToWrite(t *testing.T) {
defer cache.close()
ctx := context.Background()
faultyCache.Faults = map[string][]*storagetesting.Fault{
"PutBlock": {
faultyCache.Faults = map[string][]*blobtesting.Fault{
"PutBlob": {
{Err: someError},
},
}
@@ -231,7 +231,7 @@ func TestCacheFailureToWrite(t *testing.T) {
t.Errorf("unexpected value retrieved from cache: %v, want: %v", got, want)
}
all, err := storage.ListAllBlocks(ctx, cacheStorage, "")
all, err := blob.ListAllBlobs(ctx, cacheStorage, "")
if err != nil {
t.Errorf("error listing cache: %v", err)
}
@@ -243,10 +243,10 @@ func TestCacheFailureToWrite(t *testing.T) {
func TestCacheFailureToRead(t *testing.T) {
someError := errors.New("some error")
cacheData := map[string][]byte{}
cacheStorage := storagetesting.NewMapStorage(cacheData, nil, nil)
cacheData := blobtesting.DataMap{}
cacheStorage := blobtesting.NewMapStorage(cacheData, nil, nil)
underlyingStorage := newUnderlyingStorageForBlockCacheTesting(t)
faultyCache := &storagetesting.FaultyStorage{
faultyCache := &blobtesting.FaultyStorage{
Base: cacheStorage,
}
@@ -260,8 +260,8 @@ func TestCacheFailureToRead(t *testing.T) {
defer cache.close()
ctx := context.Background()
faultyCache.Faults = map[string][]*storagetesting.Fault{
"GetBlock": {
faultyCache.Faults = map[string][]*blobtesting.Fault{
"GetBlob": {
{Err: someError, Repeat: 100},
},
}
@@ -278,15 +278,17 @@ func TestCacheFailureToRead(t *testing.T) {
}
}
func verifyStorageBlockList(t *testing.T, st storage.Storage, expectedBlocks ...string) {
func verifyStorageBlockList(t *testing.T, st blob.Storage, expectedBlocks ...blob.ID) {
t.Helper()
var foundBlocks []string
assertNoError(t, st.ListBlocks(context.Background(), "", func(bm storage.BlockMetadata) error {
foundBlocks = append(foundBlocks, bm.BlockID)
var foundBlocks []blob.ID
assertNoError(t, st.ListBlobs(context.Background(), "", func(bm blob.Metadata) error {
foundBlocks = append(foundBlocks, bm.BlobID)
return nil
}))
sort.Strings(foundBlocks)
sort.Slice(foundBlocks, func(i, j int) bool {
return foundBlocks[i] < foundBlocks[j]
})
if !reflect.DeepEqual(foundBlocks, expectedBlocks) {
t.Errorf("unexpected block list: %v, wanted %v", foundBlocks, expectedBlocks)
}

View File

@@ -8,11 +8,13 @@
"reflect"
"github.com/pkg/errors"
"github.com/kopia/kopia/repo/blob"
)
// RecoverIndexFromPackFile attempts to recover index block entries from a given pack file.
// RecoverIndexFromPackBlob attempts to recover index block entries from a given pack file.
// Pack file length may be provided (if known) to reduce the number of bytes that are read from the storage.
func (bm *Manager) RecoverIndexFromPackFile(ctx context.Context, packFile string, packFileLength int64, commit bool) ([]Info, error) {
func (bm *Manager) RecoverIndexFromPackBlob(ctx context.Context, packFile blob.ID, packFileLength int64, commit bool) ([]Info, error) {
localIndexBytes, err := bm.readPackFileLocalIndex(ctx, packFile, packFileLength)
if err != nil {
return nil, err
@@ -197,8 +199,8 @@ func (bm *Manager) appendPackFileIndexRecoveryData(blockData []byte, pending pac
return blockData, nil
}
func (bm *Manager) readPackFileLocalIndex(ctx context.Context, packFile string, packFileLength int64) ([]byte, error) {
payload, err := bm.st.GetBlock(ctx, packFile, 0, -1)
func (bm *Manager) readPackFileLocalIndex(ctx context.Context, packFile blob.ID, packFileLength int64) ([]byte, error) {
payload, err := bm.st.GetBlob(ctx, packFile, 0, -1)
if err != nil {
return nil, err
}

View File

@@ -5,13 +5,14 @@
"testing"
"time"
"github.com/kopia/kopia/repo/storage"
"github.com/kopia/kopia/internal/blobtesting"
"github.com/kopia/kopia/repo/blob"
)
func TestBlockIndexRecovery(t *testing.T) {
ctx := context.Background()
data := map[string][]byte{}
keyTime := map[string]time.Time{}
data := blobtesting.DataMap{}
keyTime := map[blob.ID]time.Time{}
bm := newTestBlockManager(data, keyTime, nil)
block1 := writeBlockAndVerify(ctx, t, bm, seededRandomData(10, 100))
block2 := writeBlockAndVerify(ctx, t, bm, seededRandomData(11, 100))
@@ -22,9 +23,9 @@ func TestBlockIndexRecovery(t *testing.T) {
}
// delete all index blocks
assertNoError(t, bm.st.ListBlocks(ctx, newIndexBlockPrefix, func(bi storage.BlockMetadata) error {
log.Debugf("deleting %v", bi.BlockID)
return bm.st.DeleteBlock(ctx, bi.BlockID)
assertNoError(t, bm.st.ListBlobs(ctx, newIndexBlockPrefix, func(bi blob.Metadata) error {
log.Debugf("deleting %v", bi.BlobID)
return bm.st.DeleteBlob(ctx, bi.BlobID)
}))
// now with index blocks gone, all blocks appear to not be found
@@ -36,8 +37,8 @@ func TestBlockIndexRecovery(t *testing.T) {
totalRecovered := 0
// pass 1 - just list blocks to recover, but don't commit
err := bm.st.ListBlocks(ctx, PackBlockPrefix, func(bi storage.BlockMetadata) error {
infos, err := bm.RecoverIndexFromPackFile(ctx, bi.BlockID, bi.Length, false)
err := bm.st.ListBlobs(ctx, PackBlobIDPrefix, func(bi blob.Metadata) error {
infos, err := bm.RecoverIndexFromPackBlob(ctx, bi.BlobID, bi.Length, false)
if err != nil {
return err
}
@@ -61,8 +62,8 @@ func TestBlockIndexRecovery(t *testing.T) {
// pass 2 now pass commit=true to add recovered blocks to index
totalRecovered = 0
err = bm.st.ListBlocks(ctx, PackBlockPrefix, func(bi storage.BlockMetadata) error {
infos, err := bm.RecoverIndexFromPackFile(ctx, bi.BlockID, bi.Length, true)
err = bm.st.ListBlobs(ctx, PackBlobIDPrefix, func(bi blob.Metadata) error {
infos, err := bm.RecoverIndexFromPackBlob(ctx, bi.BlobID, bi.Length, true)
if err != nil {
return err
}

View File

@@ -21,7 +21,7 @@
"github.com/pkg/errors"
"github.com/kopia/kopia/internal/repologging"
"github.com/kopia/kopia/repo/storage"
"github.com/kopia/kopia/repo/blob"
)
var (
@@ -29,8 +29,8 @@
formatLog = repologging.Logger("kopia/block/format")
)
// PackBlockPrefix is the prefix for all pack storage blocks.
const PackBlockPrefix = "p"
// PackBlobIDPrefix is the prefix for all pack blobs.
const PackBlobIDPrefix = "p"
const (
parallelFetches = 5 // number of parallel reads goroutines
@@ -47,9 +47,12 @@
indexLoadAttempts = 10
)
// ErrBlockNotFound is returned when block is not found.
var ErrBlockNotFound = errors.New("block not found")
// IndexInfo is an information about a single index block managed by Manager.
type IndexInfo struct {
FileName string
BlobID blob.ID
Length int64
Timestamp time.Time
}
@@ -61,7 +64,7 @@ type Manager struct {
stats Stats
blockCache *blockCache
listCache *listCache
st storage.Storage
st blob.Storage
mu sync.Mutex
locked bool
@@ -105,7 +108,7 @@ func (bm *Manager) DeleteBlock(blockID string) error {
// We have this block in current pack index and it's already deleted there.
if bi, ok := bm.packIndexBuilder[blockID]; ok {
if !bi.Deleted {
if bi.PackFile == "" {
if bi.PackBlobID == "" {
// added and never committed, just forget about it.
delete(bm.packIndexBuilder, blockID)
delete(bm.currentPackItems, blockID)
@@ -219,7 +222,7 @@ func (bm *Manager) verifyInvariantsLocked() {
func (bm *Manager) verifyCurrentPackItemsLocked() {
for k, cpi := range bm.currentPackItems {
bm.assertInvariant(cpi.BlockID == k, "block ID entry has invalid key: %v %v", cpi.BlockID, k)
bm.assertInvariant(cpi.Deleted || cpi.PackFile == "", "block ID entry has unexpected pack block ID %v: %v", cpi.BlockID, cpi.PackFile)
bm.assertInvariant(cpi.Deleted || cpi.PackBlobID == "", "block ID entry has unexpected pack block ID %v: %v", cpi.BlockID, cpi.PackBlobID)
bm.assertInvariant(cpi.TimestampSeconds != 0, "block has no timestamp: %v", cpi.BlockID)
bi, ok := bm.packIndexBuilder[k]
bm.assertInvariant(ok, "block ID entry not present in pack index builder: %v", cpi.BlockID)
@@ -235,9 +238,9 @@ func (bm *Manager) verifyPackIndexBuilderLocked() {
continue
}
if cpi.Deleted {
bm.assertInvariant(cpi.PackFile == "", "block can't be both deleted and have a pack block: %v", cpi.BlockID)
bm.assertInvariant(cpi.PackBlobID == "", "block can't be both deleted and have a pack block: %v", cpi.BlockID)
} else {
bm.assertInvariant(cpi.PackFile != "", "block that's not deleted must have a pack block: %+v", cpi)
bm.assertInvariant(cpi.PackBlobID != "", "block that's not deleted must have a pack block: %+v", cpi)
bm.assertInvariant(cpi.FormatVersion == byte(bm.writeFormatVersion), "block that's not deleted must have a valid format version: %+v", cpi)
}
bm.assertInvariant(cpi.TimestampSeconds != 0, "block has no timestamp: %v", cpi.BlockID)
@@ -279,12 +282,12 @@ func (bm *Manager) flushPackIndexesLocked(ctx context.Context) error {
data := buf.Bytes()
dataCopy := append([]byte(nil), data...)
indexBlockID, err := bm.writePackIndexesNew(ctx, data)
indexBlobID, err := bm.writePackIndexesNew(ctx, data)
if err != nil {
return err
}
if err := bm.committedBlocks.addBlock(indexBlockID, dataCopy, true); err != nil {
if err := bm.committedBlocks.addBlock(indexBlobID, dataCopy, true); err != nil {
return errors.Wrap(err, "unable to add committed block")
}
bm.packIndexBuilder = make(packIndexBuilder)
@@ -294,7 +297,7 @@ func (bm *Manager) flushPackIndexesLocked(ctx context.Context) error {
return nil
}
func (bm *Manager) writePackIndexesNew(ctx context.Context, data []byte) (string, error) {
func (bm *Manager) writePackIndexesNew(ctx context.Context, data []byte) (blob.ID, error) {
return bm.encryptAndWriteBlockNotLocked(ctx, data, newIndexBlockPrefix)
}
@@ -320,7 +323,7 @@ func (bm *Manager) writePackBlockLocked(ctx context.Context) error {
return errors.Wrap(err, "unable to read crypto bytes")
}
packFile := fmt.Sprintf("%v%x", PackBlockPrefix, blockID)
packFile := blob.ID(fmt.Sprintf("%v%x", PackBlobIDPrefix, blockID))
blockData, packFileIndex, err := bm.preparePackDataBlock(packFile)
if err != nil {
@@ -341,7 +344,7 @@ func (bm *Manager) writePackBlockLocked(ctx context.Context) error {
return nil
}
func (bm *Manager) preparePackDataBlock(packFile string) ([]byte, packIndexBuilder, error) {
func (bm *Manager) preparePackDataBlock(packFile blob.ID) ([]byte, packIndexBuilder, error) {
formatLog.Debugf("preparing block data with %v items", len(bm.currentPackItems))
blockData, err := appendRandomBytes(append([]byte(nil), bm.repositoryFormatBytes...), rand.Intn(bm.maxPreambleLength-bm.minPreambleLength+1)+bm.minPreambleLength)
@@ -367,7 +370,7 @@ func (bm *Manager) preparePackDataBlock(packFile string) ([]byte, packIndexBuild
BlockID: blockID,
Deleted: info.Deleted,
FormatVersion: byte(bm.writeFormatVersion),
PackFile: packFile,
PackBlobID: packFile,
PackOffset: uint32(len(blockData)),
Length: uint32(len(encrypted)),
TimestampSeconds: info.TimestampSeconds,
@@ -445,9 +448,9 @@ func (bm *Manager) loadPackIndexesUnlocked(ctx context.Context) ([]IndexInfo, bo
err = bm.tryLoadPackIndexBlocksUnlocked(ctx, blocks)
if err == nil {
var blockIDs []string
var blockIDs []blob.ID
for _, b := range blocks {
blockIDs = append(blockIDs, b.FileName)
blockIDs = append(blockIDs, b.BlobID)
}
var updated bool
updated, err = bm.committedBlocks.use(blockIDs)
@@ -456,7 +459,7 @@ func (bm *Manager) loadPackIndexesUnlocked(ctx context.Context) ([]IndexInfo, bo
}
return blocks, updated, nil
}
if err != storage.ErrBlockNotFound {
if err != blob.ErrBlobNotFound {
return nil, false, err
}
}
@@ -511,19 +514,19 @@ func (bm *Manager) tryLoadPackIndexBlocksUnlocked(ctx context.Context, blocks []
}
// unprocessedIndexBlocksUnlocked returns a closed channel filled with block IDs that are not in committedBlocks cache.
func (bm *Manager) unprocessedIndexBlocksUnlocked(blocks []IndexInfo) (<-chan string, int64, error) {
func (bm *Manager) unprocessedIndexBlocksUnlocked(blocks []IndexInfo) (<-chan blob.ID, int64, error) {
var totalSize int64
ch := make(chan string, len(blocks))
ch := make(chan blob.ID, len(blocks))
for _, block := range blocks {
has, err := bm.committedBlocks.cache.hasIndexBlockID(block.FileName)
has, err := bm.committedBlocks.cache.hasIndexBlockID(block.BlobID)
if err != nil {
return nil, 0, err
}
if has {
log.Debugf("index block %q already in cache, skipping", block.FileName)
log.Debugf("index block %q already in cache, skipping", block.BlobID)
continue
}
ch <- block.FileName
ch <- block.BlobID
totalSize += block.Length
}
close(ch)
@@ -657,16 +660,16 @@ func validatePrefix(prefix string) error {
return errors.Errorf("invalid prefix, must be a empty or single letter between 'g' and 'z'")
}
func (bm *Manager) writePackFileNotLocked(ctx context.Context, packFile string, data []byte) error {
func (bm *Manager) writePackFileNotLocked(ctx context.Context, packFile blob.ID, data []byte) error {
atomic.AddInt32(&bm.stats.WrittenBlocks, 1)
atomic.AddInt64(&bm.stats.WrittenBytes, int64(len(data)))
bm.listCache.deleteListCache(ctx)
return bm.st.PutBlock(ctx, packFile, data)
return bm.st.PutBlob(ctx, packFile, data)
}
func (bm *Manager) encryptAndWriteBlockNotLocked(ctx context.Context, data []byte, prefix string) (string, error) {
func (bm *Manager) encryptAndWriteBlockNotLocked(ctx context.Context, data []byte, prefix blob.ID) (blob.ID, error) {
hash := bm.hashData(data)
physicalBlockID := prefix + hex.EncodeToString(hash)
blobID := prefix + blob.ID(hex.EncodeToString(hash))
// Encrypt the block in-place.
atomic.AddInt64(&bm.stats.EncryptedBytes, int64(len(data)))
@@ -678,11 +681,11 @@ func (bm *Manager) encryptAndWriteBlockNotLocked(ctx context.Context, data []byt
atomic.AddInt32(&bm.stats.WrittenBlocks, 1)
atomic.AddInt64(&bm.stats.WrittenBytes, int64(len(data2)))
bm.listCache.deleteListCache(ctx)
if err := bm.st.PutBlock(ctx, physicalBlockID, data2); err != nil {
if err := bm.st.PutBlob(ctx, blobID, data2); err != nil {
return "", err
}
return physicalBlockID, nil
return blobID, nil
}
func (bm *Manager) hashData(data []byte) []byte {
@@ -697,7 +700,7 @@ func cloneBytes(b []byte) []byte {
return append([]byte{}, b...)
}
// GetBlock gets the contents of a given block. If the block is not found returns blob.ErrBlockNotFound.
// GetBlock gets the contents of a given block. If the block is not found returns blob.ErrBlobNotFound.
func (bm *Manager) GetBlock(ctx context.Context, blockID string) ([]byte, error) {
bi, err := bm.getBlockInfo(blockID)
if err != nil {
@@ -705,7 +708,7 @@ func (bm *Manager) GetBlock(ctx context.Context, blockID string) ([]byte, error)
}
if bi.Deleted {
return nil, storage.ErrBlockNotFound
return nil, ErrBlockNotFound
}
return bm.getBlockContentsUnlocked(ctx, bi)
@@ -740,14 +743,14 @@ func (bm *Manager) BlockInfo(ctx context.Context, blockID string) (Info, error)
if bi.Deleted {
log.Debugf("BlockInfo(%q) - deleted", blockID)
} else {
log.Debugf("BlockInfo(%q) - exists in %v", blockID, bi.PackFile)
log.Debugf("BlockInfo(%q) - exists in %v", blockID, bi.PackBlobID)
}
return bi, err
}
// FindUnreferencedStorageFiles returns the list of unreferenced storage blocks.
func (bm *Manager) FindUnreferencedStorageFiles(ctx context.Context) ([]storage.BlockMetadata, error) {
// FindUnreferencedBlobs returns the list of unreferenced storage blocks.
func (bm *Manager) FindUnreferencedBlobs(ctx context.Context) ([]blob.Metadata, error) {
infos, err := bm.ListBlockInfos("", true)
if err != nil {
return nil, errors.Wrap(err, "unable to list index blocks")
@@ -755,11 +758,11 @@ func (bm *Manager) FindUnreferencedStorageFiles(ctx context.Context) ([]storage.
usedPackBlocks := findPackBlocksInUse(infos)
var unused []storage.BlockMetadata
err = bm.st.ListBlocks(ctx, PackBlockPrefix, func(bi storage.BlockMetadata) error {
u := usedPackBlocks[bi.BlockID]
var unused []blob.Metadata
err = bm.st.ListBlobs(ctx, PackBlobIDPrefix, func(bi blob.Metadata) error {
u := usedPackBlocks[bi.BlobID]
if u > 0 {
log.Debugf("pack %v, in use by %v blocks", bi.BlockID, u)
log.Debugf("pack %v, in use by %v blocks", bi.BlobID, u)
return nil
}
@@ -773,11 +776,11 @@ func (bm *Manager) FindUnreferencedStorageFiles(ctx context.Context) ([]storage.
return unused, nil
}
func findPackBlocksInUse(infos []Info) map[string]int {
packUsage := map[string]int{}
func findPackBlocksInUse(infos []Info) map[blob.ID]int {
packUsage := map[blob.ID]int{}
for _, bi := range infos {
packUsage[bi.PackFile]++
packUsage[bi.PackBlobID]++
}
return packUsage
@@ -788,7 +791,7 @@ func (bm *Manager) getBlockContentsUnlocked(ctx context.Context, bi Info) ([]byt
return cloneBytes(bi.Payload), nil
}
payload, err := bm.blockCache.getContentBlock(ctx, bi.BlockID, bi.PackFile, int64(bi.PackOffset), int64(bi.Length))
payload, err := bm.blockCache.getContentBlock(ctx, blob.ID(bi.BlockID), bi.PackBlobID, int64(bi.PackOffset), int64(bi.Length))
if err != nil {
return nil, err
}
@@ -803,7 +806,7 @@ func (bm *Manager) getBlockContentsUnlocked(ctx context.Context, bi Info) ([]byt
decrypted, err := bm.decryptAndVerify(payload, iv)
if err != nil {
return nil, errors.Wrapf(err, "invalid checksum at %v offset %v length %v", bi.PackFile, bi.PackOffset, len(payload))
return nil, errors.Wrapf(err, "invalid checksum at %v offset %v length %v", bi.PackBlobID, bi.PackOffset, len(payload))
}
return decrypted, nil
@@ -827,13 +830,13 @@ func (bm *Manager) decryptAndVerify(encrypted []byte, iv []byte) ([]byte, error)
return decrypted, bm.verifyChecksum(decrypted, iv)
}
func (bm *Manager) getPhysicalBlockInternal(ctx context.Context, blockID string) ([]byte, error) {
payload, err := bm.blockCache.getContentBlock(ctx, blockID, blockID, 0, -1)
func (bm *Manager) getPhysicalBlockInternal(ctx context.Context, blobID blob.ID) ([]byte, error) {
payload, err := bm.blockCache.getContentBlock(ctx, blobID, blobID, 0, -1)
if err != nil {
return nil, err
}
iv, err := getPhysicalBlockIV(blockID)
iv, err := getPhysicalBlockIV(blobID)
if err != nil {
return nil, err
}
@@ -860,11 +863,11 @@ func getPackedBlockIV(blockID string) ([]byte, error) {
return hex.DecodeString(blockID[len(blockID)-(aes.BlockSize*2):])
}
func getPhysicalBlockIV(s string) ([]byte, error) {
if p := strings.Index(s, "-"); p >= 0 {
func getPhysicalBlockIV(s blob.ID) ([]byte, error) {
if p := strings.Index(string(s), "-"); p >= 0 {
s = s[0:p]
}
return hex.DecodeString(s[len(s)-(aes.BlockSize*2):])
return hex.DecodeString(string(s[len(s)-(aes.BlockSize*2):]))
}
func (bm *Manager) verifyChecksum(data []byte, blockID []byte) error {
@@ -918,8 +921,8 @@ type cachedList struct {
// listIndexBlocksFromStorage returns the list of index blocks in the given storage.
// The list of blocks is not guaranteed to be sorted.
func listIndexBlocksFromStorage(ctx context.Context, st storage.Storage) ([]IndexInfo, error) {
snapshot, err := storage.ListAllBlocksConsistent(ctx, st, newIndexBlockPrefix, math.MaxInt32)
func listIndexBlocksFromStorage(ctx context.Context, st blob.Storage) ([]IndexInfo, error) {
snapshot, err := blob.ListAllBlobsConsistent(ctx, st, newIndexBlockPrefix, math.MaxInt32)
if err != nil {
return nil, err
}
@@ -927,7 +930,7 @@ func listIndexBlocksFromStorage(ctx context.Context, st storage.Storage) ([]Inde
var results []IndexInfo
for _, it := range snapshot {
ii := IndexInfo{
FileName: it.BlockID,
BlobID: it.BlobID,
Timestamp: it.Timestamp,
Length: it.Length,
}
@@ -938,11 +941,11 @@ func listIndexBlocksFromStorage(ctx context.Context, st storage.Storage) ([]Inde
}
// NewManager creates new block manager with given packing options and a formatter.
func NewManager(ctx context.Context, st storage.Storage, f FormattingOptions, caching CachingOptions, repositoryFormatBytes []byte) (*Manager, error) {
func NewManager(ctx context.Context, st blob.Storage, f FormattingOptions, caching CachingOptions, repositoryFormatBytes []byte) (*Manager, error) {
return newManagerWithOptions(ctx, st, f, caching, time.Now, repositoryFormatBytes)
}
func newManagerWithOptions(ctx context.Context, st storage.Storage, f FormattingOptions, caching CachingOptions, timeNow func() time.Time, repositoryFormatBytes []byte) (*Manager, error) {
func newManagerWithOptions(ctx context.Context, st blob.Storage, f FormattingOptions, caching CachingOptions, timeNow func() time.Time, repositoryFormatBytes []byte) (*Manager, error) {
if f.Version < minSupportedReadVersion || f.Version > currentWriteVersion {
return nil, errors.Errorf("can't handle repositories created using version %v (min supported %v, max supported %v)", f.Version, minSupportedReadVersion, maxSupportedReadVersion)
}

View File

@@ -110,13 +110,13 @@ func (bm *Manager) compactAndDeleteIndexBlocks(ctx context.Context, indexBlocks
formatLog.Debugf("wrote compacted index (%v bytes) in %v", compactedIndexBlock, time.Since(t0))
for _, indexBlock := range indexBlocks {
if indexBlock.FileName == compactedIndexBlock {
if indexBlock.BlobID == compactedIndexBlock {
continue
}
bm.listCache.deleteListCache(ctx)
if err := bm.st.DeleteBlock(ctx, indexBlock.FileName); err != nil {
log.Warningf("unable to delete compacted block %q: %v", indexBlock.FileName, err)
if err := bm.st.DeleteBlob(ctx, indexBlock.BlobID); err != nil {
log.Warningf("unable to delete compacted blob %q: %v", indexBlock.BlobID, err)
}
}
@@ -124,7 +124,7 @@ func (bm *Manager) compactAndDeleteIndexBlocks(ctx context.Context, indexBlocks
}
func (bm *Manager) addIndexBlocksToBuilder(ctx context.Context, bld packIndexBuilder, indexBlock IndexInfo, opt CompactOptions) error {
data, err := bm.getPhysicalBlockInternal(ctx, indexBlock.FileName)
data, err := bm.getPhysicalBlockInternal(ctx, indexBlock.BlobID)
if err != nil {
return err
}

View File

@@ -17,8 +17,8 @@
logging "github.com/op/go-logging"
"github.com/kopia/kopia/internal/storagetesting"
"github.com/kopia/kopia/repo/storage"
"github.com/kopia/kopia/internal/blobtesting"
"github.com/kopia/kopia/repo/blob"
)
const (
@@ -34,8 +34,8 @@ func init() {
func TestBlockManagerEmptyFlush(t *testing.T) {
ctx := context.Background()
data := map[string][]byte{}
keyTime := map[string]time.Time{}
data := blobtesting.DataMap{}
keyTime := map[blob.ID]time.Time{}
bm := newTestBlockManager(data, keyTime, nil)
bm.Flush(ctx)
if got, want := len(data), 0; got != want {
@@ -45,8 +45,8 @@ func TestBlockManagerEmptyFlush(t *testing.T) {
func TestBlockZeroBytes1(t *testing.T) {
ctx := context.Background()
data := map[string][]byte{}
keyTime := map[string]time.Time{}
data := blobtesting.DataMap{}
keyTime := map[blob.ID]time.Time{}
bm := newTestBlockManager(data, keyTime, nil)
blockID := writeBlockAndVerify(ctx, t, bm, []byte{})
bm.Flush(ctx)
@@ -60,8 +60,8 @@ func TestBlockZeroBytes1(t *testing.T) {
func TestBlockZeroBytes2(t *testing.T) {
ctx := context.Background()
data := map[string][]byte{}
keyTime := map[string]time.Time{}
data := blobtesting.DataMap{}
keyTime := map[blob.ID]time.Time{}
bm := newTestBlockManager(data, keyTime, nil)
writeBlockAndVerify(ctx, t, bm, seededRandomData(10, 10))
writeBlockAndVerify(ctx, t, bm, []byte{})
@@ -74,8 +74,8 @@ func TestBlockZeroBytes2(t *testing.T) {
func TestBlockManagerSmallBlockWrites(t *testing.T) {
ctx := context.Background()
data := map[string][]byte{}
keyTime := map[string]time.Time{}
data := blobtesting.DataMap{}
keyTime := map[blob.ID]time.Time{}
bm := newTestBlockManager(data, keyTime, nil)
for i := 0; i < 100; i++ {
@@ -92,8 +92,8 @@ func TestBlockManagerSmallBlockWrites(t *testing.T) {
func TestBlockManagerDedupesPendingBlocks(t *testing.T) {
ctx := context.Background()
data := map[string][]byte{}
keyTime := map[string]time.Time{}
data := blobtesting.DataMap{}
keyTime := map[blob.ID]time.Time{}
bm := newTestBlockManager(data, keyTime, nil)
for i := 0; i < 100; i++ {
@@ -110,8 +110,8 @@ func TestBlockManagerDedupesPendingBlocks(t *testing.T) {
func TestBlockManagerDedupesPendingAndUncommittedBlocks(t *testing.T) {
ctx := context.Background()
data := map[string][]byte{}
keyTime := map[string]time.Time{}
data := blobtesting.DataMap{}
keyTime := map[blob.ID]time.Time{}
bm := newTestBlockManager(data, keyTime, nil)
// no writes here, all data fits in a single pack.
@@ -140,19 +140,19 @@ func TestBlockManagerDedupesPendingAndUncommittedBlocks(t *testing.T) {
func TestBlockManagerEmpty(t *testing.T) {
ctx := context.Background()
data := map[string][]byte{}
keyTime := map[string]time.Time{}
data := blobtesting.DataMap{}
keyTime := map[blob.ID]time.Time{}
bm := newTestBlockManager(data, keyTime, nil)
noSuchBlockID := string(hashValue([]byte("foo")))
b, err := bm.GetBlock(ctx, noSuchBlockID)
if err != storage.ErrBlockNotFound {
if err != ErrBlockNotFound {
t.Errorf("unexpected error when getting non-existent block: %v, %v", b, err)
}
bi, err := bm.BlockInfo(ctx, noSuchBlockID)
if err != storage.ErrBlockNotFound {
if err != ErrBlockNotFound {
t.Errorf("unexpected error when getting non-existent block info: %v, %v", bi, err)
}
@@ -176,8 +176,8 @@ func verifyActiveIndexBlockCount(ctx context.Context, t *testing.T, bm *Manager,
}
func TestBlockManagerInternalFlush(t *testing.T) {
ctx := context.Background()
data := map[string][]byte{}
keyTime := map[string]time.Time{}
data := blobtesting.DataMap{}
keyTime := map[blob.ID]time.Time{}
bm := newTestBlockManager(data, keyTime, nil)
for i := 0; i < 100; i++ {
@@ -214,8 +214,8 @@ func TestBlockManagerInternalFlush(t *testing.T) {
func TestBlockManagerWriteMultiple(t *testing.T) {
ctx := context.Background()
data := map[string][]byte{}
keyTime := map[string]time.Time{}
data := blobtesting.DataMap{}
keyTime := map[blob.ID]time.Time{}
timeFunc := fakeTimeNowWithAutoAdvance(fakeTime, 1*time.Second)
bm := newTestBlockManager(data, keyTime, timeFunc)
@@ -262,10 +262,10 @@ func TestBlockManagerWriteMultiple(t *testing.T) {
// was done in place and clobbered pending data in memory.
func TestBlockManagerFailedToWritePack(t *testing.T) {
ctx := context.Background()
data := map[string][]byte{}
keyTime := map[string]time.Time{}
st := storagetesting.NewMapStorage(data, keyTime, nil)
faulty := &storagetesting.FaultyStorage{
data := blobtesting.DataMap{}
keyTime := map[blob.ID]time.Time{}
st := blobtesting.NewMapStorage(data, keyTime, nil)
faulty := &blobtesting.FaultyStorage{
Base: st,
}
st = faulty
@@ -283,7 +283,7 @@ func TestBlockManagerFailedToWritePack(t *testing.T) {
}
logging.SetLevel(logging.DEBUG, "faulty-storage")
faulty.Faults = map[string][]*storagetesting.Fault{
faulty.Faults = map[string][]*blobtesting.Fault{
"PutBlock": {
{Err: errors.New("booboo")},
},
@@ -303,8 +303,8 @@ func TestBlockManagerFailedToWritePack(t *testing.T) {
func TestBlockManagerConcurrency(t *testing.T) {
ctx := context.Background()
data := map[string][]byte{}
keyTime := map[string]time.Time{}
data := blobtesting.DataMap{}
keyTime := map[blob.ID]time.Time{}
bm := newTestBlockManager(data, keyTime, nil)
preexistingBlock := writeBlockAndVerify(ctx, t, bm, seededRandomData(10, 100))
bm.Flush(ctx)
@@ -387,8 +387,8 @@ func TestBlockManagerConcurrency(t *testing.T) {
func TestDeleteBlock(t *testing.T) {
ctx := context.Background()
data := map[string][]byte{}
keyTime := map[string]time.Time{}
data := blobtesting.DataMap{}
keyTime := map[blob.ID]time.Time{}
bm := newTestBlockManager(data, keyTime, nil)
block1 := writeBlockAndVerify(ctx, t, bm, seededRandomData(10, 100))
bm.Flush(ctx)
@@ -418,8 +418,8 @@ func TestRewriteNonDeleted(t *testing.T) {
for action2 := 0; action2 < stepBehaviors; action2++ {
t.Run(fmt.Sprintf("case-%v-%v", action1, action2), func(t *testing.T) {
ctx := context.Background()
data := map[string][]byte{}
keyTime := map[string]time.Time{}
data := blobtesting.DataMap{}
keyTime := map[blob.ID]time.Time{}
fakeNow := fakeTimeNowWithAutoAdvance(fakeTime, 1*time.Second)
bm := newTestBlockManager(data, keyTime, fakeNow)
@@ -450,8 +450,8 @@ func TestRewriteNonDeleted(t *testing.T) {
func TestDisableFlush(t *testing.T) {
ctx := context.Background()
data := map[string][]byte{}
keyTime := map[string]time.Time{}
data := blobtesting.DataMap{}
keyTime := map[blob.ID]time.Time{}
bm := newTestBlockManager(data, keyTime, nil)
bm.DisableIndexFlush()
bm.DisableIndexFlush()
@@ -480,8 +480,8 @@ func TestRewriteDeleted(t *testing.T) {
for action3 := 0; action3 < stepBehaviors; action3++ {
t.Run(fmt.Sprintf("case-%v-%v-%v", action1, action2, action3), func(t *testing.T) {
ctx := context.Background()
data := map[string][]byte{}
keyTime := map[string]time.Time{}
data := blobtesting.DataMap{}
keyTime := map[blob.ID]time.Time{}
fakeNow := fakeTimeNowWithAutoAdvance(fakeTime, 1*time.Second)
bm := newTestBlockManager(data, keyTime, fakeNow)
@@ -503,7 +503,7 @@ func TestRewriteDeleted(t *testing.T) {
applyStep(action1)
assertNoError(t, bm.DeleteBlock(block1))
applyStep(action2)
if got, want := bm.RewriteBlock(ctx, block1), storage.ErrBlockNotFound; got != want && got != nil {
if got, want := bm.RewriteBlock(ctx, block1), ErrBlockNotFound; got != want && got != nil {
t.Errorf("unexpected error %v, wanted %v", got, want)
}
applyStep(action3)
@@ -532,8 +532,8 @@ func TestDeleteAndRecreate(t *testing.T) {
for _, tc := range cases {
t.Run(tc.desc, func(t *testing.T) {
// write a block
data := map[string][]byte{}
keyTime := map[string]time.Time{}
data := blobtesting.DataMap{}
keyTime := map[blob.ID]time.Time{}
bm := newTestBlockManager(data, keyTime, fakeTimeNowFrozen(fakeTime))
block1 := writeBlockAndVerify(ctx, t, bm, seededRandomData(10, 100))
bm.Flush(ctx)
@@ -573,10 +573,10 @@ func TestDeleteAndRecreate(t *testing.T) {
}
}
func TestFindUnreferencedStorageFiles(t *testing.T) {
func TestFindUnreferencedBlobs(t *testing.T) {
ctx := context.Background()
data := map[string][]byte{}
keyTime := map[string]time.Time{}
data := blobtesting.DataMap{}
keyTime := map[blob.ID]time.Time{}
bm := newTestBlockManager(data, keyTime, nil)
verifyUnreferencedStorageFilesCount(ctx, t, bm, 0)
blockID := writeBlockAndVerify(ctx, t, bm, seededRandomData(10, 100))
@@ -606,10 +606,10 @@ func TestFindUnreferencedStorageFiles(t *testing.T) {
verifyUnreferencedStorageFilesCount(ctx, t, bm, 2)
}
func TestFindUnreferencedStorageFiles2(t *testing.T) {
func TestFindUnreferencedBlobs2(t *testing.T) {
ctx := context.Background()
data := map[string][]byte{}
keyTime := map[string]time.Time{}
data := blobtesting.DataMap{}
keyTime := map[blob.ID]time.Time{}
bm := newTestBlockManager(data, keyTime, nil)
verifyUnreferencedStorageFilesCount(ctx, t, bm, 0)
blockID := writeBlockAndVerify(ctx, t, bm, seededRandomData(10, 100))
@@ -649,9 +649,9 @@ func dumpBlocks(t *testing.T, bm *Manager, caption string) {
func verifyUnreferencedStorageFilesCount(ctx context.Context, t *testing.T, bm *Manager, want int) {
t.Helper()
unref, err := bm.FindUnreferencedStorageFiles(ctx)
unref, err := bm.FindUnreferencedBlobs(ctx)
if err != nil {
t.Errorf("error in FindUnreferencedStorageFiles: %v", err)
t.Errorf("error in FindUnreferencedBlobs: %v", err)
}
log.Infof("got %v expecting %v", unref, want)
@@ -662,8 +662,8 @@ func verifyUnreferencedStorageFilesCount(ctx context.Context, t *testing.T, bm *
func TestBlockWriteAliasing(t *testing.T) {
ctx := context.Background()
data := map[string][]byte{}
keyTime := map[string]time.Time{}
data := blobtesting.DataMap{}
keyTime := map[blob.ID]time.Time{}
bm := newTestBlockManager(data, keyTime, fakeTimeNowFrozen(fakeTime))
blockData := []byte{100, 0, 0}
@@ -683,8 +683,8 @@ func TestBlockWriteAliasing(t *testing.T) {
func TestBlockReadAliasing(t *testing.T) {
ctx := context.Background()
data := map[string][]byte{}
keyTime := map[string]time.Time{}
data := blobtesting.DataMap{}
keyTime := map[blob.ID]time.Time{}
bm := newTestBlockManager(data, keyTime, fakeTimeNowFrozen(fakeTime))
blockData := []byte{100, 0, 0}
@@ -712,8 +712,8 @@ func verifyVersionCompat(t *testing.T, writeVersion int) {
ctx := context.Background()
// create block manager that writes 'writeVersion' and reads all versions >= minSupportedReadVersion
data := map[string][]byte{}
keyTime := map[string]time.Time{}
data := blobtesting.DataMap{}
keyTime := map[blob.ID]time.Time{}
mgr := newTestBlockManager(data, keyTime, nil)
mgr.writeFormatVersion = int32(writeVersion)
@@ -733,10 +733,10 @@ func verifyVersionCompat(t *testing.T, writeVersion int) {
// delete random 3 items (map iteration order is random)
cnt := 0
for blockID := range dataSet {
t.Logf("deleting %v", blockID)
assertNoError(t, mgr.DeleteBlock(blockID))
delete(dataSet, blockID)
for blobID := range dataSet {
t.Logf("deleting %v", blobID)
assertNoError(t, mgr.DeleteBlock(blobID))
delete(dataSet, blobID)
cnt++
if cnt >= 3 {
break
@@ -782,12 +782,12 @@ func verifyBlockManagerDataSet(ctx context.Context, t *testing.T, mgr *Manager,
}
}
func newTestBlockManager(data map[string][]byte, keyTime map[string]time.Time, timeFunc func() time.Time) *Manager {
func newTestBlockManager(data blobtesting.DataMap, keyTime map[blob.ID]time.Time, timeFunc func() time.Time) *Manager {
//st = logging.NewWrapper(st)
if timeFunc == nil {
timeFunc = fakeTimeNowWithAutoAdvance(fakeTime, 1*time.Second)
}
st := storagetesting.NewMapStorage(data, keyTime, timeFunc)
st := blobtesting.NewMapStorage(data, keyTime, timeFunc)
bm, err := newManagerWithOptions(context.Background(), st, FormattingOptions{
Hash: "HMAC-SHA256",
Encryption: "NONE",
@@ -801,11 +801,11 @@ func newTestBlockManager(data map[string][]byte, keyTime map[string]time.Time, t
return bm
}
func getIndexCount(d map[string][]byte) int {
func getIndexCount(d blobtesting.DataMap) int {
var cnt int
for k := range d {
if strings.HasPrefix(k, newIndexBlockPrefix) {
for blobID := range d {
if strings.HasPrefix(string(blobID), newIndexBlockPrefix) {
cnt++
}
}
@@ -832,8 +832,8 @@ func verifyBlockNotFound(ctx context.Context, t *testing.T, bm *Manager, blockID
t.Helper()
b, err := bm.GetBlock(ctx, blockID)
if err != storage.ErrBlockNotFound {
t.Errorf("unexpected response from GetBlock(%q), got %v,%v, expected %v", blockID, b, err, storage.ErrBlockNotFound)
if err != ErrBlockNotFound {
t.Errorf("unexpected response from GetBlock(%q), got %v,%v, expected %v", blockID, b, err, ErrBlockNotFound)
}
}
@@ -890,7 +890,7 @@ func hashValue(b []byte) string {
return hex.EncodeToString(h.Sum(nil))
}
func dumpBlockManagerData(t *testing.T, data map[string][]byte) {
func dumpBlockManagerData(t *testing.T, data blobtesting.DataMap) {
t.Helper()
for k, v := range data {
if k[0] == 'n' {

View File

@@ -7,6 +7,8 @@
"sort"
"github.com/pkg/errors"
"github.com/kopia/kopia/repo/blob"
)
// packIndexBuilder prepares and writes block index for writing.
@@ -35,21 +37,21 @@ func (b packIndexBuilder) sortedBlocks() []*Info {
}
type indexLayout struct {
packFileOffsets map[string]uint32
entryCount int
keyLength int
entryLength int
extraDataOffset uint32
packBlobIDOffsets map[blob.ID]uint32
entryCount int
keyLength int
entryLength int
extraDataOffset uint32
}
// Build writes the pack index to the provided output.
func (b packIndexBuilder) Build(output io.Writer) error {
allBlocks := b.sortedBlocks()
layout := &indexLayout{
packFileOffsets: map[string]uint32{},
keyLength: -1,
entryLength: 20,
entryCount: len(allBlocks),
packBlobIDOffsets: map[blob.ID]uint32{},
keyLength: -1,
entryLength: 20,
entryCount: len(allBlocks),
}
w := bufio.NewWriter(output)
@@ -89,10 +91,10 @@ func prepareExtraData(allBlocks []*Info, layout *indexLayout) []byte {
if i == 0 {
layout.keyLength = len(contentIDToBytes(it.BlockID))
}
if it.PackFile != "" {
if _, ok := layout.packFileOffsets[it.PackFile]; !ok {
layout.packFileOffsets[it.PackFile] = uint32(len(extraData))
extraData = append(extraData, []byte(it.PackFile)...)
if it.PackBlobID != "" {
if _, ok := layout.packBlobIDOffsets[it.PackBlobID]; !ok {
layout.packBlobIDOffsets[it.PackBlobID] = uint32(len(extraData))
extraData = append(extraData, []byte(it.PackBlobID)...)
}
}
if len(it.Payload) > 0 {
@@ -130,11 +132,11 @@ func formatEntry(entry []byte, it *Info, layout *indexLayout) error {
entryPackedLength := entry[16:20]
timestampAndFlags := uint64(it.TimestampSeconds) << 16
if len(it.PackFile) == 0 {
if len(it.PackBlobID) == 0 {
return errors.Errorf("empty pack block ID for %v", it.BlockID)
}
binary.BigEndian.PutUint32(entryPackFileOffset, layout.extraDataOffset+layout.packFileOffsets[it.PackFile])
binary.BigEndian.PutUint32(entryPackFileOffset, layout.extraDataOffset+layout.packBlobIDOffsets[it.PackBlobID])
if it.Deleted {
binary.BigEndian.PutUint32(entryPackedOffset, it.PackOffset|0x80000000)
} else {
@@ -142,7 +144,7 @@ func formatEntry(entry []byte, it *Info, layout *indexLayout) error {
}
binary.BigEndian.PutUint32(entryPackedLength, it.Length)
timestampAndFlags |= uint64(it.FormatVersion) << 8
timestampAndFlags |= uint64(len(it.PackFile))
timestampAndFlags |= uint64(len(it.PackBlobID))
binary.BigEndian.PutUint64(entryTimestampAndFlags, timestampAndFlags)
return nil
}

View File

@@ -6,22 +6,22 @@
"github.com/pkg/errors"
"github.com/kopia/kopia/repo/storage"
"github.com/kopia/kopia/repo/blob"
)
type committedBlockIndex struct {
cache committedBlockIndexCache
mu sync.Mutex
inUse map[string]packIndex
inUse map[blob.ID]packIndex
merged mergedIndex
}
type committedBlockIndexCache interface {
hasIndexBlockID(indexBlockID string) (bool, error)
addBlockToCache(indexBlockID string, data []byte) error
openIndex(indexBlockID string) (packIndex, error)
expireUnused(used []string) error
hasIndexBlockID(indexBlob blob.ID) (bool, error)
addBlockToCache(indexBlob blob.ID, data []byte) error
openIndex(indexBlob blob.ID) (packIndex, error)
expireUnused(used []blob.ID) error
}
func (b *committedBlockIndex) getBlock(blockID string) (Info, error) {
@@ -33,12 +33,12 @@ func (b *committedBlockIndex) getBlock(blockID string) (Info, error) {
return *info, nil
}
if err == nil {
return Info{}, storage.ErrBlockNotFound
return Info{}, ErrBlockNotFound
}
return Info{}, err
}
func (b *committedBlockIndex) addBlock(indexBlockID string, data []byte, use bool) error {
func (b *committedBlockIndex) addBlock(indexBlockID blob.ID, data []byte, use bool) error {
if err := b.cache.addBlockToCache(indexBlockID, data); err != nil {
return err
}
@@ -71,7 +71,7 @@ func (b *committedBlockIndex) listBlocks(prefix string, cb func(i Info) error) e
return m.Iterate(prefix, cb)
}
func (b *committedBlockIndex) packFilesChanged(packFiles []string) bool {
func (b *committedBlockIndex) packFilesChanged(packFiles []blob.ID) bool {
if len(packFiles) != len(b.inUse) {
return true
}
@@ -85,7 +85,7 @@ func (b *committedBlockIndex) packFilesChanged(packFiles []string) bool {
return false
}
func (b *committedBlockIndex) use(packFiles []string) (bool, error) {
func (b *committedBlockIndex) use(packFiles []blob.ID) (bool, error) {
b.mu.Lock()
defer b.mu.Unlock()
@@ -95,7 +95,7 @@ func (b *committedBlockIndex) use(packFiles []string) (bool, error) {
log.Debugf("set of index files has changed (had %v, now %v)", len(b.inUse), len(packFiles))
var newMerged mergedIndex
newInUse := map[string]packIndex{}
newInUse := map[blob.ID]packIndex{}
defer func() {
newMerged.Close() //nolint:errcheck
}()
@@ -128,12 +128,12 @@ func newCommittedBlockIndex(caching CachingOptions) (*committedBlockIndex, error
cache = &diskCommittedBlockIndexCache{dirname}
} else {
cache = &memoryCommittedBlockIndexCache{
blocks: map[string]packIndex{},
blocks: map[blob.ID]packIndex{},
}
}
return &committedBlockIndex{
cache: cache,
inUse: map[string]packIndex{},
inUse: map[blob.ID]packIndex{},
}, nil
}

View File

@@ -9,6 +9,8 @@
"github.com/pkg/errors"
"golang.org/x/exp/mmap"
"github.com/kopia/kopia/repo/blob"
)
const (
@@ -20,11 +22,11 @@ type diskCommittedBlockIndexCache struct {
dirname string
}
func (c *diskCommittedBlockIndexCache) indexBlockPath(indexBlockID string) string {
return filepath.Join(c.dirname, indexBlockID+simpleIndexSuffix)
func (c *diskCommittedBlockIndexCache) indexBlockPath(indexBlockID blob.ID) string {
return filepath.Join(c.dirname, string(indexBlockID)+simpleIndexSuffix)
}
func (c *diskCommittedBlockIndexCache) openIndex(indexBlockID string) (packIndex, error) {
func (c *diskCommittedBlockIndexCache) openIndex(indexBlockID blob.ID) (packIndex, error) {
fullpath := c.indexBlockPath(indexBlockID)
f, err := mmap.Open(fullpath)
@@ -35,7 +37,7 @@ func (c *diskCommittedBlockIndexCache) openIndex(indexBlockID string) (packIndex
return openPackIndex(f)
}
func (c *diskCommittedBlockIndexCache) hasIndexBlockID(indexBlockID string) (bool, error) {
func (c *diskCommittedBlockIndexCache) hasIndexBlockID(indexBlockID blob.ID) (bool, error) {
_, err := os.Stat(c.indexBlockPath(indexBlockID))
if err == nil {
return true, nil
@@ -47,7 +49,7 @@ func (c *diskCommittedBlockIndexCache) hasIndexBlockID(indexBlockID string) (boo
return false, err
}
func (c *diskCommittedBlockIndexCache) addBlockToCache(indexBlockID string, data []byte) error {
func (c *diskCommittedBlockIndexCache) addBlockToCache(indexBlockID blob.ID, data []byte) error {
exists, err := c.hasIndexBlockID(indexBlockID)
if err != nil {
return err
@@ -100,18 +102,18 @@ func writeTempFileAtomic(dirname string, data []byte) (string, error) {
return tf.Name(), nil
}
func (c *diskCommittedBlockIndexCache) expireUnused(used []string) error {
func (c *diskCommittedBlockIndexCache) expireUnused(used []blob.ID) error {
entries, err := ioutil.ReadDir(c.dirname)
if err != nil {
return errors.Wrap(err, "can't list cache")
}
remaining := map[string]os.FileInfo{}
remaining := map[blob.ID]os.FileInfo{}
for _, ent := range entries {
if strings.HasSuffix(ent.Name(), simpleIndexSuffix) {
n := strings.TrimSuffix(ent.Name(), simpleIndexSuffix)
remaining[n] = ent
remaining[blob.ID(n)] = ent
}
}

View File

@@ -5,21 +5,23 @@
"sync"
"github.com/pkg/errors"
"github.com/kopia/kopia/repo/blob"
)
type memoryCommittedBlockIndexCache struct {
mu sync.Mutex
blocks map[string]packIndex
blocks map[blob.ID]packIndex
}
func (m *memoryCommittedBlockIndexCache) hasIndexBlockID(indexBlockID string) (bool, error) {
func (m *memoryCommittedBlockIndexCache) hasIndexBlockID(indexBlockID blob.ID) (bool, error) {
m.mu.Lock()
defer m.mu.Unlock()
return m.blocks[indexBlockID] != nil, nil
}
func (m *memoryCommittedBlockIndexCache) addBlockToCache(indexBlockID string, data []byte) error {
func (m *memoryCommittedBlockIndexCache) addBlockToCache(indexBlockID blob.ID, data []byte) error {
m.mu.Lock()
defer m.mu.Unlock()
@@ -32,7 +34,7 @@ func (m *memoryCommittedBlockIndexCache) addBlockToCache(indexBlockID string, da
return nil
}
func (m *memoryCommittedBlockIndexCache) openIndex(indexBlockID string) (packIndex, error) {
func (m *memoryCommittedBlockIndexCache) openIndex(indexBlockID blob.ID) (packIndex, error) {
m.mu.Lock()
defer m.mu.Unlock()
@@ -44,6 +46,6 @@ func (m *memoryCommittedBlockIndexCache) openIndex(indexBlockID string) (packInd
return v, nil
}
func (m *memoryCommittedBlockIndexCache) expireUnused(used []string) error {
func (m *memoryCommittedBlockIndexCache) expireUnused(used []blob.ID) error {
return nil
}

View File

@@ -8,6 +8,8 @@
"strings"
"github.com/pkg/errors"
"github.com/kopia/kopia/repo/blob"
)
// packIndex is a read-only index of packed blocks.
@@ -174,7 +176,7 @@ func (b *index) entryToInfo(blockID string, entryData []byte) (Info, error) {
FormatVersion: e.PackedFormatVersion(),
PackOffset: e.PackedOffset(),
Length: e.PackedLength(),
PackFile: string(packFile),
PackBlobID: blob.ID(packFile),
}, nil
}

View File

@@ -2,18 +2,20 @@
import (
"time"
"github.com/kopia/kopia/repo/blob"
)
// Info is an information about a single block managed by Manager.
type Info struct {
BlockID string `json:"blockID"`
Length uint32 `json:"length"`
TimestampSeconds int64 `json:"time"`
PackFile string `json:"packFile,omitempty"`
PackOffset uint32 `json:"packOffset,omitempty"`
Deleted bool `json:"deleted"`
Payload []byte `json:"payload"` // set for payloads stored inline
FormatVersion byte `json:"formatVersion"`
BlockID string `json:"blockID"`
Length uint32 `json:"length"`
TimestampSeconds int64 `json:"time"`
PackBlobID blob.ID `json:"packFile,omitempty"`
PackOffset uint32 `json:"packOffset,omitempty"`
Deleted bool `json:"deleted"`
Payload []byte `json:"payload"` // set for payloads stored inline
FormatVersion byte `json:"formatVersion"`
}
// Timestamp returns the time when a block was created or deleted.

View File

@@ -11,11 +11,11 @@
"github.com/pkg/errors"
"github.com/kopia/kopia/repo/storage"
"github.com/kopia/kopia/repo/blob"
)
type listCache struct {
st storage.Storage
st blob.Storage
cacheFile string
listCacheDuration time.Duration
hmacSecret []byte
@@ -30,7 +30,7 @@ func (c *listCache) listIndexBlocks(ctx context.Context) ([]IndexInfo, error) {
log.Debugf("retrieved list of index blocks from cache")
return ci.Blocks, nil
}
} else if err != storage.ErrBlockNotFound {
} else if err != blob.ErrBlobNotFound {
log.Warningf("unable to open cache file: %v", err)
}
}
@@ -70,7 +70,7 @@ func (c *listCache) deleteListCache(ctx context.Context) {
func (c *listCache) readBlocksFromCache(ctx context.Context) (*cachedList, error) {
if !shouldUseListCache(ctx) {
return nil, storage.ErrBlockNotFound
return nil, blob.ErrBlobNotFound
}
ci := &cachedList{}
@@ -78,7 +78,7 @@ func (c *listCache) readBlocksFromCache(ctx context.Context) (*cachedList, error
data, err := ioutil.ReadFile(c.cacheFile)
if err != nil {
if os.IsNotExist(err) {
return nil, storage.ErrBlockNotFound
return nil, blob.ErrBlobNotFound
}
return nil, err
@@ -97,7 +97,7 @@ func (c *listCache) readBlocksFromCache(ctx context.Context) (*cachedList, error
}
func newListCache(ctx context.Context, st storage.Storage, caching CachingOptions) (*listCache, error) {
func newListCache(ctx context.Context, st blob.Storage, caching CachingOptions) (*listCache, error) {
var listCacheFile string
if caching.CacheDirectory != "" {

View File

@@ -10,27 +10,27 @@
func TestMerged(t *testing.T) {
i1, err := indexWithItems(
Info{BlockID: "aabbcc", TimestampSeconds: 1, PackFile: "xx", PackOffset: 11},
Info{BlockID: "ddeeff", TimestampSeconds: 1, PackFile: "xx", PackOffset: 111},
Info{BlockID: "z010203", TimestampSeconds: 1, PackFile: "xx", PackOffset: 111},
Info{BlockID: "de1e1e", TimestampSeconds: 4, PackFile: "xx", PackOffset: 111},
Info{BlockID: "aabbcc", TimestampSeconds: 1, PackBlobID: "xx", PackOffset: 11},
Info{BlockID: "ddeeff", TimestampSeconds: 1, PackBlobID: "xx", PackOffset: 111},
Info{BlockID: "z010203", TimestampSeconds: 1, PackBlobID: "xx", PackOffset: 111},
Info{BlockID: "de1e1e", TimestampSeconds: 4, PackBlobID: "xx", PackOffset: 111},
)
if err != nil {
t.Fatalf("can't create index: %v", err)
}
i2, err := indexWithItems(
Info{BlockID: "aabbcc", TimestampSeconds: 3, PackFile: "yy", PackOffset: 33},
Info{BlockID: "xaabbcc", TimestampSeconds: 1, PackFile: "xx", PackOffset: 111},
Info{BlockID: "de1e1e", TimestampSeconds: 4, PackFile: "xx", PackOffset: 222, Deleted: true},
Info{BlockID: "aabbcc", TimestampSeconds: 3, PackBlobID: "yy", PackOffset: 33},
Info{BlockID: "xaabbcc", TimestampSeconds: 1, PackBlobID: "xx", PackOffset: 111},
Info{BlockID: "de1e1e", TimestampSeconds: 4, PackBlobID: "xx", PackOffset: 222, Deleted: true},
)
if err != nil {
t.Fatalf("can't create index: %v", err)
}
i3, err := indexWithItems(
Info{BlockID: "aabbcc", TimestampSeconds: 2, PackFile: "zz", PackOffset: 22},
Info{BlockID: "ddeeff", TimestampSeconds: 1, PackFile: "zz", PackOffset: 222},
Info{BlockID: "k010203", TimestampSeconds: 1, PackFile: "xx", PackOffset: 111},
Info{BlockID: "k020304", TimestampSeconds: 1, PackFile: "xx", PackOffset: 111},
Info{BlockID: "aabbcc", TimestampSeconds: 2, PackBlobID: "zz", PackOffset: 22},
Info{BlockID: "ddeeff", TimestampSeconds: 1, PackBlobID: "zz", PackOffset: 222},
Info{BlockID: "k010203", TimestampSeconds: 1, PackBlobID: "xx", PackOffset: 111},
Info{BlockID: "k020304", TimestampSeconds: 1, PackBlobID: "xx", PackOffset: 111},
)
if err != nil {
t.Fatalf("can't create index: %v", err)

View File

@@ -9,6 +9,8 @@
"reflect"
"strings"
"testing"
"github.com/kopia/kopia/repo/blob"
)
func TestPackIndex(t *testing.T) {
@@ -31,11 +33,11 @@ func TestPackIndex(t *testing.T) {
}
return string(fmt.Sprintf("%v%x", prefix2, h.Sum(nil)))
}
deterministicPackFile := func(id int) string {
deterministicPackBlobID := func(id int) blob.ID {
h := sha1.New()
fmt.Fprintf(h, "%v", id)
blockNumber++
return string(fmt.Sprintf("%x", h.Sum(nil)))
return blob.ID(fmt.Sprintf("%x", h.Sum(nil)))
}
deterministicPackedOffset := func(id int) uint32 {
@@ -64,7 +66,7 @@ func TestPackIndex(t *testing.T) {
TimestampSeconds: randomUnixTime(),
Deleted: true,
BlockID: deterministicBlockID("deleted-packed", i),
PackFile: deterministicPackFile(i),
PackBlobID: deterministicPackBlobID(i),
PackOffset: deterministicPackedOffset(i),
Length: deterministicPackedLength(i),
FormatVersion: deterministicFormatVersion(i),
@@ -75,7 +77,7 @@ func TestPackIndex(t *testing.T) {
infos = append(infos, Info{
TimestampSeconds: randomUnixTime(),
BlockID: deterministicBlockID("packed", i),
PackFile: deterministicPackFile(i),
PackBlobID: deterministicPackBlobID(i),
PackOffset: deterministicPackedOffset(i),
Length: deterministicPackedLength(i),
FormatVersion: deterministicFormatVersion(i),

View File

@@ -11,8 +11,8 @@
"github.com/pkg/errors"
"github.com/kopia/kopia/repo/blob"
"github.com/kopia/kopia/repo/block"
"github.com/kopia/kopia/repo/storage"
)
// ConnectOptions specifies options when persisting configuration to connect to a repository.
@@ -21,8 +21,8 @@ type ConnectOptions struct {
}
// Connect connects to the repository in the specified storage and persists the configuration and credentials in the file provided.
func Connect(ctx context.Context, configFile string, st storage.Storage, password string, opt ConnectOptions) error {
formatBytes, err := st.GetBlock(ctx, FormatBlockID, 0, -1)
func Connect(ctx context.Context, configFile string, st blob.Storage, password string, opt ConnectOptions) error {
formatBytes, err := st.GetBlob(ctx, FormatBlobID, 0, -1)
if err != nil {
return errors.Wrap(err, "unable to read format block")
}

View File

@@ -13,7 +13,7 @@
"github.com/pkg/errors"
"github.com/kopia/kopia/repo/storage"
"github.com/kopia/kopia/repo/blob"
)
const defaultFormatEncryption = "AES256_GCM"
@@ -28,8 +28,8 @@
// are repository format blocks.
var formatBlockChecksumSecret = []byte("kopia-repository")
// FormatBlockID is the identifier of a storage block that describes repository format.
const FormatBlockID = "kopia.repository"
// FormatBlobID is the identifier of a BLOB that describes repository format.
const FormatBlobID = "kopia.repository"
var (
purposeAESKey = []byte("AES")
@@ -70,16 +70,16 @@ func parseFormatBlock(b []byte) (*formatBlock, error) {
// RecoverFormatBlock attempts to recover format block replica from the specified file.
// The format block can be either the prefix or a suffix of the given file.
// optionally the length can be provided (if known) to speed up recovery.
func RecoverFormatBlock(ctx context.Context, st storage.Storage, filename string, optionalLength int64) ([]byte, error) {
func RecoverFormatBlock(ctx context.Context, st blob.Storage, blobID blob.ID, optionalLength int64) ([]byte, error) {
if optionalLength > 0 {
return recoverFormatBlockWithLength(ctx, st, filename, optionalLength)
return recoverFormatBlockWithLength(ctx, st, blobID, optionalLength)
}
var foundMetadata storage.BlockMetadata
var foundMetadata blob.Metadata
if err := st.ListBlocks(ctx, filename, func(bm storage.BlockMetadata) error {
if foundMetadata.BlockID != "" {
return errors.Errorf("found multiple blocks with a given prefix: %v", filename)
if err := st.ListBlobs(ctx, blobID, func(bm blob.Metadata) error {
if foundMetadata.BlobID != "" {
return errors.Errorf("found multiple blocks with a given prefix: %v", blobID)
}
foundMetadata = bm
return nil
@@ -87,23 +87,22 @@ func RecoverFormatBlock(ctx context.Context, st storage.Storage, filename string
return nil, errors.Wrap(err, "error")
}
if foundMetadata.BlockID == "" {
return nil, storage.ErrBlockNotFound
if foundMetadata.BlobID == "" {
return nil, blob.ErrBlobNotFound
}
return recoverFormatBlockWithLength(ctx, st, foundMetadata.BlockID, foundMetadata.Length)
return recoverFormatBlockWithLength(ctx, st, foundMetadata.BlobID, foundMetadata.Length)
}
func recoverFormatBlockWithLength(ctx context.Context, st storage.Storage, filename string, length int64) ([]byte, error) {
func recoverFormatBlockWithLength(ctx context.Context, st blob.Storage, blobID blob.ID, length int64) ([]byte, error) {
chunkLength := int64(65536)
if chunkLength > length {
chunkLength = length
}
if chunkLength > 4 {
// try prefix
prefixChunk, err := st.GetBlock(ctx, filename, 0, chunkLength)
prefixChunk, err := st.GetBlob(ctx, blobID, 0, chunkLength)
if err != nil {
return nil, err
}
@@ -114,7 +113,7 @@ func recoverFormatBlockWithLength(ctx context.Context, st storage.Storage, filen
}
// try the suffix
suffixChunk, err := st.GetBlock(ctx, filename, length-chunkLength, chunkLength)
suffixChunk, err := st.GetBlob(ctx, blobID, length-chunkLength, chunkLength)
if err != nil {
return nil, err
}
@@ -144,7 +143,7 @@ func verifyFormatBlockChecksum(b []byte) ([]byte, bool) {
return data, true
}
func writeFormatBlock(ctx context.Context, st storage.Storage, f *formatBlock) error {
func writeFormatBlock(ctx context.Context, st blob.Storage, f *formatBlock) error {
var buf bytes.Buffer
e := json.NewEncoder(&buf)
e.SetIndent("", " ")
@@ -152,7 +151,7 @@ func writeFormatBlock(ctx context.Context, st storage.Storage, f *formatBlock) e
return errors.Wrap(err, "unable to marshal format block")
}
if err := st.PutBlock(ctx, FormatBlockID, buf.Bytes()); err != nil {
if err := st.PutBlob(ctx, FormatBlobID, buf.Bytes()); err != nil {
return errors.Wrap(err, "unable to write format block")
}

View File

@@ -6,13 +6,13 @@
"reflect"
"testing"
"github.com/kopia/kopia/internal/storagetesting"
"github.com/kopia/kopia/repo/storage"
"github.com/kopia/kopia/internal/blobtesting"
"github.com/kopia/kopia/repo/blob"
)
func TestFormatBlockRecovery(t *testing.T) {
data := map[string][]byte{}
st := storagetesting.NewMapStorage(data, nil, nil)
data := blobtesting.DataMap{}
st := blobtesting.NewMapStorage(data, nil, nil)
ctx := context.Background()
someDataBlock := []byte("aadsdasdas")
@@ -24,29 +24,29 @@ func TestFormatBlockRecovery(t *testing.T) {
t.Errorf("unexpected checksummed length: %v, want %v", got, want)
}
assertNoError(t, st.PutBlock(ctx, "some-block-by-itself", checksummed))
assertNoError(t, st.PutBlock(ctx, "some-block-suffix", append(append([]byte(nil), 1, 2, 3), checksummed...)))
assertNoError(t, st.PutBlock(ctx, "some-block-prefix", append(append([]byte(nil), checksummed...), 1, 2, 3)))
assertNoError(t, st.PutBlob(ctx, "some-block-by-itself", checksummed))
assertNoError(t, st.PutBlob(ctx, "some-block-suffix", append(append([]byte(nil), 1, 2, 3), checksummed...)))
assertNoError(t, st.PutBlob(ctx, "some-block-prefix", append(append([]byte(nil), checksummed...), 1, 2, 3)))
// mess up checksum
checksummed[len(checksummed)-3] ^= 1
assertNoError(t, st.PutBlock(ctx, "bad-checksum", checksummed))
assertNoError(t, st.PutBlock(ctx, "zero-len", []byte{}))
assertNoError(t, st.PutBlock(ctx, "one-len", []byte{1}))
assertNoError(t, st.PutBlock(ctx, "two-len", []byte{1, 2}))
assertNoError(t, st.PutBlock(ctx, "three-len", []byte{1, 2, 3}))
assertNoError(t, st.PutBlock(ctx, "four-len", []byte{1, 2, 3, 4}))
assertNoError(t, st.PutBlock(ctx, "five-len", []byte{1, 2, 3, 4, 5}))
assertNoError(t, st.PutBlob(ctx, "bad-checksum", checksummed))
assertNoError(t, st.PutBlob(ctx, "zero-len", []byte{}))
assertNoError(t, st.PutBlob(ctx, "one-len", []byte{1}))
assertNoError(t, st.PutBlob(ctx, "two-len", []byte{1, 2}))
assertNoError(t, st.PutBlob(ctx, "three-len", []byte{1, 2, 3}))
assertNoError(t, st.PutBlob(ctx, "four-len", []byte{1, 2, 3, 4}))
assertNoError(t, st.PutBlob(ctx, "five-len", []byte{1, 2, 3, 4, 5}))
cases := []struct {
block string
err error
blobID blob.ID
err error
}{
{"some-block-by-itself", nil},
{"some-block-suffix", nil},
{"some-block-prefix", nil},
{"bad-checksum", errFormatBlockNotFound},
{"no-such-block", storage.ErrBlockNotFound},
{"no-such-block", blob.ErrBlobNotFound},
{"zero-len", errFormatBlockNotFound},
{"one-len", errFormatBlockNotFound},
{"two-len", errFormatBlockNotFound},
@@ -56,8 +56,8 @@ func TestFormatBlockRecovery(t *testing.T) {
}
for _, tc := range cases {
t.Run(tc.block, func(t *testing.T) {
v, err := RecoverFormatBlock(ctx, st, tc.block, -1)
t.Run(string(tc.blobID), func(t *testing.T) {
v, err := RecoverFormatBlock(ctx, st, tc.blobID, -1)
if tc.err == nil {
if !reflect.DeepEqual(v, someDataBlock) || err != nil {
t.Errorf("unexpected result or error: v=%v err=%v, expected success", v, err)

View File

@@ -7,9 +7,9 @@
"github.com/pkg/errors"
"github.com/kopia/kopia/repo/blob"
"github.com/kopia/kopia/repo/block"
"github.com/kopia/kopia/repo/object"
"github.com/kopia/kopia/repo/storage"
)
// BuildInfo is the build information of Kopia.
@@ -28,17 +28,17 @@ type NewRepositoryOptions struct {
}
// Initialize creates initial repository data structures in the specified storage with given credentials.
func Initialize(ctx context.Context, st storage.Storage, opt *NewRepositoryOptions, password string) error {
func Initialize(ctx context.Context, st blob.Storage, opt *NewRepositoryOptions, password string) error {
if opt == nil {
opt = &NewRepositoryOptions{}
}
// get the block - expect ErrBlockNotFound
_, err := st.GetBlock(ctx, FormatBlockID, 0, -1)
// get the blob - expect ErrNotFound
_, err := st.GetBlob(ctx, FormatBlobID, 0, -1)
if err == nil {
return errors.Errorf("repository already initialized")
}
if err != storage.ErrBlockNotFound {
if err != blob.ErrBlobNotFound {
return err
}

View File

@@ -5,15 +5,15 @@
"io"
"os"
"github.com/kopia/kopia/repo/blob"
"github.com/kopia/kopia/repo/block"
"github.com/kopia/kopia/repo/object"
"github.com/kopia/kopia/repo/storage"
)
// LocalConfig is a configuration of Kopia stored in a configuration file.
type LocalConfig struct {
Storage storage.ConnectionInfo `json:"storage"`
Caching block.CachingOptions `json:"caching"`
Storage blob.ConnectionInfo `json:"storage"`
Caching block.CachingOptions `json:"caching"`
}
// repositoryObjectFormat describes the format of objects in a repository.

View File

@@ -15,7 +15,7 @@
"github.com/pkg/errors"
"github.com/kopia/kopia/internal/repologging"
"github.com/kopia/kopia/repo/storage"
"github.com/kopia/kopia/repo/blob"
)
var log = repologging.Logger("kopia/manifest")
@@ -289,7 +289,7 @@ func (m *Manager) loadCommittedBlocksLocked(ctx context.Context) error {
// success
break
}
if err == storage.ErrBlockNotFound {
if err == blob.ErrBlobNotFound {
// try again, lost a race with another manifest manager which just did compaction
continue
}
@@ -387,7 +387,7 @@ func (m *Manager) loadManifestBlock(ctx context.Context, blockID string) (manife
man := manifest{}
blk, err := m.b.GetBlock(ctx, blockID)
if err != nil {
// do not wrap the error here, we want to propagate original ErrBlockNotFound
// do not wrap the error here, we want to propagate original ErrNotFound
// which causes a retry if we lose list/delete race.
return man, err
}

View File

@@ -10,13 +10,13 @@
"github.com/pkg/errors"
"github.com/kopia/kopia/internal/storagetesting"
"github.com/kopia/kopia/internal/blobtesting"
"github.com/kopia/kopia/repo/block"
)
func TestManifest(t *testing.T) {
ctx := context.Background()
data := map[string][]byte{}
data := blobtesting.DataMap{}
mgr, setupErr := newManagerForTesting(ctx, t, data)
if setupErr != nil {
t.Fatalf("unable to open block manager: %v", setupErr)
@@ -126,8 +126,8 @@ func TestManifest(t *testing.T) {
func TestManifestInitCorruptedBlock(t *testing.T) {
ctx := context.Background()
data := map[string][]byte{}
st := storagetesting.NewMapStorage(data, nil, nil)
data := blobtesting.DataMap{}
st := blobtesting.NewMapStorage(data, nil, nil)
f := block.FormattingOptions{
Hash: "HMAC-SHA256-128",
@@ -151,8 +151,8 @@ func TestManifestInitCorruptedBlock(t *testing.T) {
bm.Flush(ctx)
// corrupt data at the storage level.
for k, v := range data {
if strings.HasPrefix(k, "p") {
for blobID, v := range data {
if strings.HasPrefix(string(blobID), "p") {
for i := 0; i < len(v); i++ {
v[i] ^= 1
}
@@ -264,8 +264,8 @@ func verifyMatches(ctx context.Context, t *testing.T, mgr *Manager, labels map[s
}
}
func newManagerForTesting(ctx context.Context, t *testing.T, data map[string][]byte) (*Manager, error) {
st := storagetesting.NewMapStorage(data, nil, nil)
func newManagerForTesting(ctx context.Context, t *testing.T, data blobtesting.DataMap) (*Manager, error) {
st := blobtesting.NewMapStorage(data, nil, nil)
bm, err := block.NewManager(ctx, st, block.FormattingOptions{
Hash: "HMAC-SHA256-128",
@@ -281,7 +281,7 @@ func newManagerForTesting(ctx context.Context, t *testing.T, data map[string][]b
func TestManifestInvalidPut(t *testing.T) {
ctx := context.Background()
data := map[string][]byte{}
data := blobtesting.DataMap{}
mgr, setupErr := newManagerForTesting(ctx, t, data)
if setupErr != nil {
t.Fatalf("unable to open block manager: %v", setupErr)
@@ -306,7 +306,7 @@ func TestManifestInvalidPut(t *testing.T) {
func TestManifestAutoCompaction(t *testing.T) {
ctx := context.Background()
data := map[string][]byte{}
data := blobtesting.DataMap{}
for i := 0; i < 100; i++ {
mgr, setupErr := newManagerForTesting(ctx, t, data)

View File

@@ -12,6 +12,9 @@
"github.com/kopia/kopia/repo/block"
)
// ErrObjectNotFound is returned when an object cannot be found.
var ErrObjectNotFound = errors.New("object not found")
// Reader allows reading, seeking, getting the length of and closing of a repository object.
type Reader interface {
io.Reader
@@ -209,8 +212,11 @@ func (om *Manager) flattenListChunk(rawReader io.Reader) ([]indirectObjectEntry,
func (om *Manager) newRawReader(ctx context.Context, objectID ID) (Reader, error) {
if blockID, ok := objectID.BlockID(); ok {
payload, err := om.blockMgr.GetBlock(ctx, blockID)
if err == block.ErrBlockNotFound {
return nil, ErrObjectNotFound
}
if err != nil {
return nil, err
return nil, errors.Wrap(err, "unexpected block error")
}
return newObjectReaderWithData(payload), nil

View File

@@ -15,8 +15,8 @@
"sync"
"testing"
"github.com/kopia/kopia/repo/blob"
"github.com/kopia/kopia/repo/block"
"github.com/kopia/kopia/repo/storage"
)
type fakeBlockManager struct {
@@ -32,7 +32,7 @@ func (f *fakeBlockManager) GetBlock(ctx context.Context, blockID string) ([]byte
return append([]byte(nil), d...), nil
}
return nil, storage.ErrBlockNotFound
return nil, block.ErrBlockNotFound
}
func (f *fakeBlockManager) WriteBlock(ctx context.Context, data []byte, prefix string) (string, error) {
@@ -55,7 +55,7 @@ func (f *fakeBlockManager) BlockInfo(ctx context.Context, blockID string) (block
return block.Info{BlockID: blockID, Length: uint32(len(d))}, nil
}
return block.Info{}, storage.ErrBlockNotFound
return block.Info{}, blob.ErrBlobNotFound
}
func (f *fakeBlockManager) Flush(ctx context.Context) error {
@@ -289,7 +289,7 @@ func TestReaderStoredBlockNotFound(t *testing.T) {
t.Errorf("cannot parse object ID: %v", err)
}
reader, err := om.Open(ctx, objectID)
if err != storage.ErrBlockNotFound || reader != nil {
if err != ErrObjectNotFound || reader != nil {
t.Errorf("unexpected result: reader: %v err: %v", reader, err)
}
}

View File

@@ -9,11 +9,11 @@
"github.com/pkg/errors"
"github.com/kopia/kopia/internal/repologging"
"github.com/kopia/kopia/repo/blob"
"github.com/kopia/kopia/repo/blob/logging"
"github.com/kopia/kopia/repo/block"
"github.com/kopia/kopia/repo/manifest"
"github.com/kopia/kopia/repo/object"
"github.com/kopia/kopia/repo/storage"
"github.com/kopia/kopia/repo/storage/logging"
)
var (
@@ -54,7 +54,7 @@ func Open(ctx context.Context, configFile string, password string, options *Opti
log.Debugf("opening storage: %v", lc.Storage.Type)
st, err := storage.NewStorage(ctx, lc.Storage)
st, err := blob.NewStorage(ctx, lc.Storage)
if err != nil {
return nil, errors.Wrap(err, "cannot open storage")
}
@@ -75,7 +75,7 @@ func Open(ctx context.Context, configFile string, password string, options *Opti
}
// OpenWithConfig opens the repository with a given configuration, avoiding the need for a config file.
func OpenWithConfig(ctx context.Context, st storage.Storage, lc *LocalConfig, password string, options *Options, caching block.CachingOptions) (*Repository, error) {
func OpenWithConfig(ctx context.Context, st blob.Storage, lc *LocalConfig, password string, options *Options, caching block.CachingOptions) (*Repository, error) {
log.Debugf("reading encrypted format block")
// Read cache block, potentially from cache.
fb, err := readAndCacheFormatBlockBytes(ctx, st, caching.CacheDirectory)
@@ -131,7 +131,7 @@ func OpenWithConfig(ctx context.Context, st storage.Storage, lc *LocalConfig, pa
return &Repository{
Blocks: bm,
Objects: om,
Storage: st,
Blobs: st,
Manifests: manifests,
CacheDirectory: caching.CacheDirectory,
UniqueID: f.UniqueID,
@@ -153,7 +153,7 @@ func SetCachingConfig(ctx context.Context, configFile string, opt block.CachingO
return err
}
st, err := storage.NewStorage(ctx, lc.Storage)
st, err := blob.NewStorage(ctx, lc.Storage)
if err != nil {
return errors.Wrap(err, "cannot open storage")
}
@@ -184,7 +184,7 @@ func SetCachingConfig(ctx context.Context, configFile string, opt block.CachingO
return nil
}
func readAndCacheFormatBlockBytes(ctx context.Context, st storage.Storage, cacheDirectory string) ([]byte, error) {
func readAndCacheFormatBlockBytes(ctx context.Context, st blob.Storage, cacheDirectory string) ([]byte, error) {
cachedFile := filepath.Join(cacheDirectory, "kopia.repository")
if cacheDirectory != "" {
b, err := ioutil.ReadFile(cachedFile)
@@ -194,7 +194,7 @@ func readAndCacheFormatBlockBytes(ctx context.Context, st storage.Storage, cache
}
}
b, err := st.GetBlock(ctx, FormatBlockID, 0, -1)
b, err := st.GetBlob(ctx, FormatBlobID, 0, -1)
if err != nil {
return nil, err
}

View File

@@ -6,17 +6,17 @@
"github.com/pkg/errors"
"github.com/kopia/kopia/repo/blob"
"github.com/kopia/kopia/repo/block"
"github.com/kopia/kopia/repo/manifest"
"github.com/kopia/kopia/repo/object"
"github.com/kopia/kopia/repo/storage"
)
// Repository represents storage where both content-addressable and user-addressable data is kept.
type Repository struct {
Blobs blob.Storage
Blocks *block.Manager
Objects *object.Manager
Storage storage.Storage
Manifests *manifest.Manager
UniqueID []byte
@@ -35,8 +35,8 @@ func (r *Repository) Close(ctx context.Context) error {
if err := r.Blocks.Flush(ctx); err != nil {
return errors.Wrap(err, "error closing blocks")
}
if err := r.Storage.Close(ctx); err != nil {
return errors.Wrap(err, "error closing storage")
if err := r.Blobs.Close(ctx); err != nil {
return errors.Wrap(err, "error closing blob storage")
}
return nil
}

View File

@@ -15,7 +15,6 @@
"github.com/kopia/kopia/repo"
"github.com/kopia/kopia/repo/block"
"github.com/kopia/kopia/repo/object"
"github.com/kopia/kopia/repo/storage"
)
func TestWriters(t *testing.T) {
@@ -182,7 +181,7 @@ func TestReaderStoredBlockNotFound(t *testing.T) {
t.Errorf("cannot parse object ID: %v", err)
}
reader, err := env.Repository.Objects.Open(ctx, objectID)
if err != storage.ErrBlockNotFound || reader != nil {
if err != object.ErrObjectNotFound || reader != nil {
t.Errorf("unexpected result: reader: %v err: %v", reader, err)
}
}

View File

@@ -1,2 +0,0 @@
// Package storage implements simple storage of immutable, unstructured binary large objects (BLOBs).
package storage

View File

@@ -1,120 +0,0 @@
package filesystem
import (
"context"
"io/ioutil"
"os"
"reflect"
"sort"
"testing"
"time"
"github.com/kopia/kopia/repo/storage"
"github.com/kopia/kopia/internal/storagetesting"
)
func TestFileStorage(t *testing.T) {
t.Parallel()
ctx := context.Background()
// Test varioush shard configurations.
for _, shardSpec := range [][]int{
{0},
{1},
{3, 3},
{2},
{1, 1},
{1, 2},
{2, 2, 2},
} {
path, _ := ioutil.TempDir("", "r-fs")
defer os.RemoveAll(path)
r, err := New(ctx, &Options{
Path: path,
DirectoryShards: shardSpec,
})
if r == nil || err != nil {
t.Errorf("unexpected result: %v %v", r, err)
}
storagetesting.VerifyStorage(ctx, t, r)
storagetesting.AssertConnectionInfoRoundTrips(ctx, t, r)
if err := r.Close(ctx); err != nil {
t.Fatalf("err: %v", err)
}
}
}
func TestFileStorageTouch(t *testing.T) {
t.Parallel()
ctx := context.Background()
t1 := "392ee1bc299db9f235e046a62625afb84902"
t2 := "2a7ff4f29eddbcd4c18fa9e73fec20bbb71f"
t3 := "0dae5918f83e6a24c8b3e274ca1026e43f24"
path, _ := ioutil.TempDir("", "r-fs")
defer os.RemoveAll(path)
r, err := New(ctx, &Options{
Path: path,
})
if r == nil || err != nil {
t.Errorf("unexpected result: %v %v", r, err)
}
fs := r.(*fsStorage)
assertNoError(t, fs.PutBlock(ctx, t1, []byte{1}))
time.Sleep(1 * time.Second) // sleep a bit to accommodate Apple filesystems with low timestamp resolution
assertNoError(t, fs.PutBlock(ctx, t2, []byte{1}))
time.Sleep(1 * time.Second)
assertNoError(t, fs.PutBlock(ctx, t3, []byte{1}))
verifyBlockTimestampOrder(t, fs, t1, t2, t3)
assertNoError(t, fs.TouchBlock(ctx, t2, 1*time.Hour)) // has no effect, all timestamps are very new
verifyBlockTimestampOrder(t, fs, t1, t2, t3)
assertNoError(t, fs.TouchBlock(ctx, t1, 0)) // moves t1 to the top of the pile
verifyBlockTimestampOrder(t, fs, t2, t3, t1)
time.Sleep(1 * time.Second)
assertNoError(t, fs.TouchBlock(ctx, t2, 0)) // moves t2 to the top of the pile
verifyBlockTimestampOrder(t, fs, t3, t1, t2)
time.Sleep(1 * time.Second)
assertNoError(t, fs.TouchBlock(ctx, t1, 0)) // moves t1 to the top of the pile
verifyBlockTimestampOrder(t, fs, t3, t2, t1)
}
func verifyBlockTimestampOrder(t *testing.T, st storage.Storage, want ...string) {
blocks, err := storage.ListAllBlocks(context.Background(), st, "")
if err != nil {
t.Errorf("error listing blocks: %v", err)
return
}
sort.Slice(blocks, func(i, j int) bool {
return blocks[i].Timestamp.Before(blocks[j].Timestamp)
})
var got []string
for _, b := range blocks {
got = append(got, b.BlockID)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("incorrect block order: %v, wanted %v", blocks, want)
}
}
func assertNoError(t *testing.T, err error) {
t.Helper()
if err != nil {
t.Errorf("err: %v", err)
}
}

View File

@@ -1,108 +0,0 @@
package storage
import (
"context"
"time"
"github.com/pkg/errors"
)
// CancelFunc requests cancellation of a storage operation.
type CancelFunc func()
// Storage encapsulates API for connecting to blob storage.
//
// The underlying storage system must provide:
//
// * high durability, availability and bit-rot protection
// * read-after-write - block written using PutBlock() must be immediately readable using GetBlock() and ListBlocks()
// * atomicity - it mustn't be possible to observe partial results of PutBlock() via either GetBlock() or ListBlocks()
// * timestamps that don't go back in time (small clock skew up to minutes is allowed)
// * reasonably low latency for retrievals
//
// The required semantics are provided by existing commercial cloud storage products (Google Cloud, AWS, Azure).
type Storage interface {
// PutBlock uploads the block with given data to the repository or replaces existing block with the provided
// id with given contents.
PutBlock(ctx context.Context, id string, data []byte) error
// DeleteBlock removes the block from storage. Future GetBlock() operations will fail with ErrBlockNotFound.
DeleteBlock(ctx context.Context, id string) error
// GetBlock returns full or partial contents of a block with given ID.
// If length>0, the the function retrieves a range of bytes [offset,offset+length)
// If length<0, the entire block must be fetched.
GetBlock(ctx context.Context, id string, offset, length int64) ([]byte, error)
// ListBlocks returns a channel of BlockMetadata that describes storage blocks with existing name prefixes.
// Iteration continues until all blocks have been listed or until client code invokes the returned cancellation function.
ListBlocks(ctx context.Context, prefix string, cb func(bm BlockMetadata) error) error
// ConnectionInfo returns JSON-serializable data structure containing information required to
// connect to storage.
ConnectionInfo() ConnectionInfo
// Close releases all resources associated with storage.
Close(ctx context.Context) error
}
// BlockMetadata represents metadata about a single block in a storage.
type BlockMetadata struct {
BlockID string
Length int64
Timestamp time.Time
}
// ErrBlockNotFound is returned when a block cannot be found in storage.
var ErrBlockNotFound = errors.New("block not found")
// ListAllBlocks returns BlockMetadata for all blocks in a given storage that have the provided name prefix.
func ListAllBlocks(ctx context.Context, st Storage, prefix string) ([]BlockMetadata, error) {
var result []BlockMetadata
err := st.ListBlocks(ctx, prefix, func(bm BlockMetadata) error {
result = append(result, bm)
return nil
})
return result, err
}
// ListAllBlocksConsistent lists all blocks with given name prefix in the provided storage until the results are
// consistent. The results are consistent if the list result fetched twice is identical. This guarantees that while
// the first scan was in progress, no new block was added or removed.
// maxAttempts specifies maximum number of list attempts (must be >= 2)
func ListAllBlocksConsistent(ctx context.Context, st Storage, prefix string, maxAttempts int) ([]BlockMetadata, error) {
var previous []BlockMetadata
for i := 0; i < maxAttempts; i++ {
result, err := ListAllBlocks(ctx, st, prefix)
if err != nil {
return nil, err
}
if i > 0 && sameBlocks(result, previous) {
return result, nil
}
previous = result
}
return nil, errors.Errorf("unable to achieve consistent snapshot despite %v attempts", maxAttempts)
}
// sameBlocks returns true if b1 & b2 contain the same blocks (ignoring order).
func sameBlocks(b1, b2 []BlockMetadata) bool {
if len(b1) != len(b2) {
return false
}
m := map[string]BlockMetadata{}
for _, b := range b1 {
m[b.BlockID] = b
}
for _, b := range b2 {
if m[b.BlockID] != b {
return false
}
}
return true
}

View File

@@ -1,57 +0,0 @@
package storage_test
import (
"context"
"testing"
"time"
"github.com/kopia/kopia/internal/storagetesting"
"github.com/kopia/kopia/repo/storage"
)
func TestListAllBlocksConsistent(t *testing.T) {
ctx := context.Background()
data := map[string][]byte{}
st := storagetesting.NewMapStorage(data, nil, time.Now)
st.PutBlock(ctx, "foo1", []byte{1, 2, 3}) //nolint:errcheck
st.PutBlock(ctx, "foo2", []byte{1, 2, 3}) //nolint:errcheck
st.PutBlock(ctx, "foo3", []byte{1, 2, 3}) //nolint:errcheck
// set up faulty storage that will add a block while a scan is in progress.
f := &storagetesting.FaultyStorage{
Base: st,
Faults: map[string][]*storagetesting.Fault{
"ListBlocksItem": {
{ErrCallback: func() error {
st.PutBlock(ctx, "foo0", []byte{1, 2, 3}) //nolint:errcheck
return nil
}},
},
},
}
r, err := storage.ListAllBlocksConsistent(ctx, f, "foo", 3)
if err != nil {
t.Fatalf("error: %v", err)
}
// make sure we get the list with 4 items, not 3.
if got, want := len(r), 4; got != want {
t.Errorf("unexpected list result count: %v, want %v", got, want)
}
}
func TestListAllBlocksConsistentEmpty(t *testing.T) {
ctx := context.Background()
data := map[string][]byte{}
st := storagetesting.NewMapStorage(data, nil, time.Now)
r, err := storage.ListAllBlocksConsistent(ctx, st, "foo", 3)
if err != nil {
t.Fatalf("error: %v", err)
}
if got, want := len(r), 0; got != want {
t.Errorf("unexpected list result count: %v, want %v", got, want)
}
}

View File

@@ -30,5 +30,5 @@ func (r *Repository) Upgrade(ctx context.Context) error {
}
log.Infof("writing updated format block...")
return writeFormatBlock(ctx, r.Storage, f)
return writeFormatBlock(ctx, r.Blobs, f)
}

View File

@@ -16,7 +16,7 @@ The following diagram illustrates the key components of Kopia:
### Binary Large Object Storage (BLOB)
BLOB storage is the place where your data is ultimately stored. Any type that implements simple Go [API](https://godoc.org/github.com/kopia/kopia/repo/storage#Storage) can be used as Kopia's blob storage.
BLOB storage is the place where your data is ultimately stored. Any type that implements simple Go [API](https://godoc.org/github.com/kopia/kopia/repo/blob#Storage) can be used as Kopia's blob storage.
Kopia currently supports the following storage providers:
@@ -29,7 +29,7 @@ Cloud storage solutions are a great choice because they provide high availabilit
Kopia does not require low-latency storage, it uses caching and other optimizations to be able to work efficiently with high-latency backends.
The API for BLOB storage can be found in https://godoc.org/github.com/kopia/kopia/repo/storage
The API for BLOB storage can be found in https://godoc.org/github.com/kopia/kopia/repo/blob
### Content-Addressable Block Storage (CABS)

View File

@@ -12,8 +12,8 @@
"github.com/kopia/kopia/internal/mockfs"
"github.com/kopia/kopia/repo"
"github.com/kopia/kopia/repo/blob/filesystem"
"github.com/kopia/kopia/repo/object"
"github.com/kopia/kopia/repo/storage/filesystem"
"github.com/kopia/kopia/snapshot"
)

View File

@@ -165,7 +165,7 @@ func TestEndToEnd(t *testing.T) {
lines := e.runAndVerifyOutputLineCount(t, 2, "blockindex", "ls")
for _, l := range lines {
indexFile := strings.Split(l, " ")[0]
e.runAndExpectSuccess(t, "storage", "delete", indexFile)
e.runAndExpectSuccess(t, "blob", "delete", indexFile)
}
// there should be no index files at this point
@@ -186,7 +186,7 @@ func TestEndToEnd(t *testing.T) {
t.Run("RepairFormatBlock", func(t *testing.T) {
// remove kopia.repository
e.runAndExpectSuccess(t, "storage", "rm", "kopia.repository")
e.runAndExpectSuccess(t, "blob", "rm", "kopia.repository")
e.runAndExpectSuccess(t, "repo", "disconnect")
// this will fail because the format block in the repository is not found

View File

@@ -17,9 +17,8 @@
"github.com/pkg/errors"
"github.com/kopia/kopia/repo"
"github.com/kopia/kopia/repo/blob/filesystem"
"github.com/kopia/kopia/repo/block"
"github.com/kopia/kopia/repo/storage"
"github.com/kopia/kopia/repo/storage/filesystem"
)
const masterPassword = "foo-bar-baz-1234"
@@ -233,7 +232,7 @@ func readKnownBlock(ctx context.Context, t *testing.T, r *repo.Repository) error
knownBlocksMutex.Unlock()
_, err := r.Blocks.GetBlock(ctx, blockID)
if err == nil || err == storage.ErrBlockNotFound {
if err == nil || err == block.ErrBlockNotFound {
return nil
}
@@ -254,7 +253,7 @@ func listAndReadAllBlocks(ctx context.Context, t *testing.T, r *repo.Repository)
for _, bi := range blocks {
_, err := r.Blocks.GetBlock(ctx, bi)
if err != nil {
if err == storage.ErrBlockNotFound && strings.HasPrefix(bi, "m") {
if err == block.ErrBlockNotFound && strings.HasPrefix(bi, "m") {
// this is ok, sometimes manifest manager will perform compaction and 'm' blocks will be marked as deleted
continue
}

View File

@@ -9,9 +9,9 @@
"testing"
"time"
"github.com/kopia/kopia/internal/storagetesting"
"github.com/kopia/kopia/internal/blobtesting"
"github.com/kopia/kopia/repo/blob"
"github.com/kopia/kopia/repo/block"
"github.com/kopia/kopia/repo/storage"
)
const goroutineCount = 16
@@ -21,9 +21,9 @@ func TestStressBlockManager(t *testing.T) {
t.Skip("skipping stress test during short tests")
}
data := map[string][]byte{}
keyTimes := map[string]time.Time{}
memst := storagetesting.NewMapStorage(data, keyTimes, time.Now)
data := blobtesting.DataMap{}
keyTimes := map[blob.ID]time.Time{}
memst := blobtesting.NewMapStorage(data, keyTimes, time.Now)
var duration = 3 * time.Second
if os.Getenv("KOPIA_LONG_STRESS_TEST") != "" {
@@ -33,7 +33,7 @@ func TestStressBlockManager(t *testing.T) {
stressTestWithStorage(t, memst, duration)
}
func stressTestWithStorage(t *testing.T, st storage.Storage, duration time.Duration) {
func stressTestWithStorage(t *testing.T, st blob.Storage, duration time.Duration) {
ctx := context.Background()
openMgr := func() (*block.Manager, error) {