more renames

This commit is contained in:
Jarek Kowalski
2016-04-01 19:50:50 -07:00
parent 0a6cb9173c
commit 8a02f23fbd
27 changed files with 279 additions and 276 deletions

View File

@@ -23,3 +23,6 @@ test:
vtest:
go test -v -timeout 30s github.com/kopia/kopia/...
doc:
godoc -http=:33333

View File

@@ -1,17 +1,17 @@
package storage
package blob
import (
"encoding/json"
)
// RepositoryConfiguration is a JSON-serializable description of Repository and its configuration.
type RepositoryConfiguration struct {
// StorageConfiguration is a JSON-serializable description of Storage and its configuration.
type StorageConfiguration struct {
Type string
Config interface{}
}
// UnmarshalJSON parses the JSON-encoded data into RepositoryConfiguration.
func (c *RepositoryConfiguration) UnmarshalJSON(b []byte) error {
// UnmarshalJSON parses the JSON-encoded data into StorageConfiguration.
func (c *StorageConfiguration) UnmarshalJSON(b []byte) error {
raw := struct {
Type string `json:"type"`
Data json.RawMessage `json:"config"`
@@ -30,8 +30,8 @@ func (c *RepositoryConfiguration) UnmarshalJSON(b []byte) error {
return nil
}
// MarshalJSON returns JSON-encoded repository configuration.
func (c *RepositoryConfiguration) MarshalJSON() ([]byte, error) {
// MarshalJSON returns JSON-encoded storage configuration.
func (c *StorageConfiguration) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Type string `json:"type"`
Data interface{} `json:"config"`

1
blob/constants.go Normal file
View File

@@ -0,0 +1 @@
package blob

2
blob/doc.go Normal file
View File

@@ -0,0 +1,2 @@
// Package blob implements simple storage of immutable, unstructured binary large objects (BLOBs).
package blob

View File

@@ -1,16 +1,16 @@
package storage
package blob
import "errors"
var (
// ErrBlockNotFound is returned when a block cannot be found in repository.
// ErrBlockNotFound is returned when a block cannot be found in blob.
ErrBlockNotFound = errors.New("block not found")
// ErrInvalidChecksum is returned when a repository block is invalid, which may indicate
// ErrInvalidChecksum is returned when a storage block is invalid, which may indicate
// that decryption has failed.
ErrInvalidChecksum = errors.New("invalid checksum")
// ErrWriteLimitExceeded is returned when the maximum amount of data has already been written
// to the repository.
// to the blob.
ErrWriteLimitExceeded = errors.New("write limit exceeded")
)

View File

@@ -1,4 +1,4 @@
package storage
package blob
import (
"fmt"
@@ -12,9 +12,9 @@
)
const (
fsRepositoryType = "fs"
fsStorageType = "fs"
fsRepositoryChunkSuffix = ".f"
fsStorageChunkSuffix = ".f"
)
var (
@@ -23,12 +23,12 @@
fsDefaultDirMode os.FileMode = 0775
)
type fsRepository struct {
FSRepositoryOptions
type fsStorage struct {
FSStorageOptions
}
// FSRepositoryOptions defines options for Filesystem-backed repository.
type FSRepositoryOptions struct {
// FSStorageOptions defines options for Filesystem-backed blob.
type FSStorageOptions struct {
Path string `json:"path"`
DirectoryShards []int `json:"dirShards"`
@@ -40,7 +40,7 @@ type FSRepositoryOptions struct {
FileGID *int `json:"gid,omitempty"`
}
func (fs *fsRepository) BlockExists(blockID BlockID) (bool, error) {
func (fs *fsStorage) BlockExists(blockID BlockID) (bool, error) {
_, path := fs.getShardedPathAndFilePath(blockID)
_, err := os.Stat(path)
if err == nil {
@@ -54,7 +54,7 @@ func (fs *fsRepository) BlockExists(blockID BlockID) (bool, error) {
return false, err
}
func (fs *fsRepository) GetBlock(blockID BlockID) ([]byte, error) {
func (fs *fsStorage) GetBlock(blockID BlockID) ([]byte, error) {
_, path := fs.getShardedPathAndFilePath(blockID)
d, err := ioutil.ReadFile(path)
if err == nil {
@@ -69,18 +69,18 @@ func (fs *fsRepository) GetBlock(blockID BlockID) ([]byte, error) {
}
func getBlockIDFromFileName(name string) (BlockID, bool) {
if strings.HasSuffix(name, fsRepositoryChunkSuffix) {
return BlockID(name[0 : len(name)-len(fsRepositoryChunkSuffix)]), true
if strings.HasSuffix(name, fsStorageChunkSuffix) {
return BlockID(name[0 : len(name)-len(fsStorageChunkSuffix)]), true
}
return BlockID(""), false
}
func makeFileName(blockID BlockID) string {
return string(blockID) + fsRepositoryChunkSuffix
return string(blockID) + fsStorageChunkSuffix
}
func (fs *fsRepository) ListBlocks(prefix BlockID) chan (BlockMetadata) {
func (fs *fsStorage) ListBlocks(prefix BlockID) chan (BlockMetadata) {
result := make(chan (BlockMetadata))
prefixString := string(prefix)
@@ -127,7 +127,7 @@ func (fs *fsRepository) ListBlocks(prefix BlockID) chan (BlockMetadata) {
return result
}
func (fs *fsRepository) PutBlock(blockID BlockID, data io.ReadCloser, options PutOptions) error {
func (fs *fsStorage) PutBlock(blockID BlockID, data io.ReadCloser, options PutOptions) error {
// Close the data reader regardless of whether we use it or not.
defer data.Close()
@@ -165,7 +165,7 @@ func (fs *fsRepository) PutBlock(blockID BlockID, data io.ReadCloser, options Pu
return nil
}
func (fs *fsRepository) DeleteBlock(blockID BlockID) error {
func (fs *fsStorage) DeleteBlock(blockID BlockID) error {
_, path := fs.getShardedPathAndFilePath(blockID)
err := os.Remove(path)
if err == nil || os.IsNotExist(err) {
@@ -175,11 +175,11 @@ func (fs *fsRepository) DeleteBlock(blockID BlockID) error {
return err
}
func (fs *fsRepository) Flush() error {
func (fs *fsStorage) Flush() error {
return nil
}
func (fs *fsRepository) getShardDirectory(blockID BlockID) (string, BlockID) {
func (fs *fsStorage) getShardDirectory(blockID BlockID) (string, BlockID) {
shardPath := fs.Path
blockIDString := string(blockID)
if len(blockIDString) < 20 {
@@ -193,7 +193,7 @@ func (fs *fsRepository) getShardDirectory(blockID BlockID) (string, BlockID) {
return shardPath, BlockID(blockIDString)
}
func (fs *fsRepository) getShardedPathAndFilePath(blockID BlockID) (string, string) {
func (fs *fsStorage) getShardedPathAndFilePath(blockID BlockID) (string, string) {
shardPath, blockID := fs.getShardDirectory(blockID)
result := filepath.Join(shardPath, makeFileName(blockID))
return shardPath, result
@@ -216,23 +216,23 @@ func parseShardString(shardString string) ([]int, error) {
return result, nil
}
func (fs *fsRepository) Configuration() RepositoryConfiguration {
return RepositoryConfiguration{
fsRepositoryType,
&fs.FSRepositoryOptions,
func (fs *fsStorage) Configuration() StorageConfiguration {
return StorageConfiguration{
fsStorageType,
&fs.FSStorageOptions,
}
}
// NewFSRepository creates new fs-backed repository in a specified directory.
func NewFSRepository(options *FSRepositoryOptions) (Repository, error) {
// NewFSStorage creates new fs-backed storage in a specified directory.
func NewFSStorage(options *FSStorageOptions) (Storage, error) {
var err error
if _, err = os.Stat(options.Path); err != nil {
return nil, fmt.Errorf("cannot access repository path: %v", err)
return nil, fmt.Errorf("cannot access storage path: %v", err)
}
r := &fsRepository{
FSRepositoryOptions: *options,
r := &fsStorage{
FSStorageOptions: *options,
}
if r.DirectoryShards == nil {
@@ -251,10 +251,10 @@ func NewFSRepository(options *FSRepositoryOptions) (Repository, error) {
}
func init() {
AddSupportedRepository(
fsRepositoryType,
func() interface{} { return &FSRepositoryOptions{} },
func(cfg interface{}) (Repository, error) {
return NewFSRepository(cfg.(*FSRepositoryOptions))
AddSupportedStorage(
fsStorageType,
func() interface{} { return &FSStorageOptions{} },
func(cfg interface{}) (Storage, error) {
return NewFSStorage(cfg.(*FSStorageOptions))
})
}

View File

@@ -1,4 +1,4 @@
package storage
package blob
import (
"encoding/json"
@@ -22,16 +22,16 @@
)
const (
gcsRepositoryType = "gcs"
gcsTokenCacheDir = ".kopia"
gcsStorageType = "gcs"
gcsTokenCacheDir = ".kopia"
// Those are not really secret, since the app is installed.
googleCloudClientID = "194841383482-nmn10h4mnllnsvou7qr55tfh5jsmtkap.apps.googleusercontent.com"
googleCloudClientSecret = "ZL52E96Q7iRCD9YXVA7U6UaI"
)
// GCSRepositoryOptions defines options Google Cloud Storage-backed repository.
type GCSRepositoryOptions struct {
// GCSStorageOptions defines options Google Cloud Storage-backed blob.
type GCSStorageOptions struct {
// BucketName is the name of the GCS bucket where data is stored.
BucketName string `json:"bucket"`
@@ -39,13 +39,13 @@ type GCSRepositoryOptions struct {
Prefix string `json:"prefix,omitempty"`
// TokenCacheFile is the name of the file that will persist the OAuth2 token.
// If not specified, the token will be persisted in GCSRepositoryOptions.
// If not specified, the token will be persisted in GCSStorageOptions.
TokenCacheFile string `json:"tokenCacheFile,omitempty"`
// Token stored the OAuth2 token (when TokenCacheFile is empty)
Token *oauth2.Token `json:"token,omitempty"`
// ReadOnly causes the repository to be configured without write permissions, to prevent accidental
// ReadOnly causes the storage to be configured without write permissions, to prevent accidental
// modifications to the data.
ReadOnly bool `json:"readonly"`
@@ -53,12 +53,12 @@ type GCSRepositoryOptions struct {
IgnoreDefaultCredentials bool `json:"ignoreDefaultCredentials"`
}
type gcsRepository struct {
GCSRepositoryOptions
type gcsStorage struct {
GCSStorageOptions
objectsService *gcsclient.ObjectsService
}
func (gcs *gcsRepository) BlockExists(b BlockID) (bool, error) {
func (gcs *gcsStorage) BlockExists(b BlockID) (bool, error) {
_, err := gcs.objectsService.Get(gcs.BucketName, gcs.getObjectNameString(b)).Do()
if err == nil {
@@ -68,7 +68,7 @@ func (gcs *gcsRepository) BlockExists(b BlockID) (bool, error) {
return false, err
}
func (gcs *gcsRepository) GetBlock(b BlockID) ([]byte, error) {
func (gcs *gcsStorage) GetBlock(b BlockID) ([]byte, error) {
v, err := gcs.objectsService.Get(gcs.BucketName, gcs.getObjectNameString(b)).Download()
if err != nil {
if err, ok := err.(*googleapi.Error); ok {
@@ -85,7 +85,7 @@ func (gcs *gcsRepository) GetBlock(b BlockID) ([]byte, error) {
return ioutil.ReadAll(v.Body)
}
func (gcs *gcsRepository) PutBlock(b BlockID, data io.ReadCloser, options PutOptions) error {
func (gcs *gcsStorage) PutBlock(b BlockID, data io.ReadCloser, options PutOptions) error {
object := gcsclient.Object{
Name: gcs.getObjectNameString(b),
}
@@ -98,7 +98,7 @@ func (gcs *gcsRepository) PutBlock(b BlockID, data io.ReadCloser, options PutOpt
return err
}
func (gcs *gcsRepository) DeleteBlock(b BlockID) error {
func (gcs *gcsStorage) DeleteBlock(b BlockID) error {
err := gcs.objectsService.Delete(gcs.BucketName, string(b)).Do()
if err != nil {
return fmt.Errorf("unable to delete block %s: %v", b, err)
@@ -107,11 +107,11 @@ func (gcs *gcsRepository) DeleteBlock(b BlockID) error {
return nil
}
func (gcs *gcsRepository) getObjectNameString(b BlockID) string {
func (gcs *gcsStorage) getObjectNameString(b BlockID) string {
return gcs.Prefix + string(b)
}
func (gcs *gcsRepository) ListBlocks(prefix BlockID) chan (BlockMetadata) {
func (gcs *gcsStorage) ListBlocks(prefix BlockID) chan (BlockMetadata) {
ch := make(chan BlockMetadata, 100)
go func() {
@@ -148,14 +148,14 @@ func (gcs *gcsRepository) ListBlocks(prefix BlockID) chan (BlockMetadata) {
return ch
}
func (gcs *gcsRepository) Flush() error {
func (gcs *gcsStorage) Flush() error {
return nil
}
func (gcs *gcsRepository) Configuration() RepositoryConfiguration {
return RepositoryConfiguration{
gcsRepositoryType,
&gcs.GCSRepositoryOptions,
func (gcs *gcsStorage) Configuration() StorageConfiguration {
return StorageConfiguration{
gcsStorageType,
&gcs.GCSStorageOptions,
}
}
@@ -180,17 +180,17 @@ func saveToken(file string, token *oauth2.Token) {
json.NewEncoder(f).Encode(token)
}
// NewGCSRepository creates new Google Cloud Storage-backed repository with specified options:
// NewGCSStorage creates new Google Cloud Storage-backed storage with specified options:
//
// - the 'BucketName' field is required and all other parameters are optional.
//
// By default the connection reuses credentials managed by (https://cloud.google.com/sdk/),
// but this can be disabled by setting IgnoreDefaultCredentials to true.
func NewGCSRepository(options *GCSRepositoryOptions) (Repository, error) {
func NewGCSStorage(options *GCSStorageOptions) (Storage, error) {
ctx := context.TODO()
gcs := &gcsRepository{
GCSRepositoryOptions: *options,
gcs := &gcsStorage{
GCSStorageOptions: *options,
}
if gcs.BucketName == "" {
@@ -227,7 +227,7 @@ func NewGCSRepository(options *GCSRepositoryOptions) (Repository, error) {
token = gcs.Token
} else {
if gcs.TokenCacheFile == "" {
// Cache file not provided, token will be saved in repository configuration.
// Cache file not provided, token will be saved in storage configuration.
token, err = tokenFromWeb(ctx, config)
if err != nil {
return nil, fmt.Errorf("cannot retrieve OAuth2 token: %v", err)
@@ -373,12 +373,12 @@ func authPrompt(url string, state string) (authenticationCode string, err error)
}
func init() {
AddSupportedRepository(
gcsRepositoryType,
AddSupportedStorage(
gcsStorageType,
func() interface{} {
return &GCSRepositoryOptions{}
return &GCSStorageOptions{}
},
func(cfg interface{}) (Repository, error) {
return NewGCSRepository(cfg.(*GCSRepositoryOptions))
func(cfg interface{}) (Storage, error) {
return NewGCSStorage(cfg.(*GCSStorageOptions))
})
}

View File

@@ -1,19 +1,19 @@
package storage
package blob
import (
"io"
"sync/atomic"
)
type writeLimitRepository struct {
Repository
type writeLimitStorage struct {
Storage
remainingBytes int64
}
type writeLimitReadCloser struct {
io.ReadCloser
repo *writeLimitRepository
repo *writeLimitStorage
}
func (s *writeLimitReadCloser) Read(b []byte) (int, error) {
@@ -22,24 +22,24 @@ func (s *writeLimitReadCloser) Read(b []byte) (int, error) {
return n, err
}
func (s *writeLimitRepository) PutBlock(id BlockID, data io.ReadCloser, options PutOptions) error {
func (s *writeLimitStorage) PutBlock(id BlockID, data io.ReadCloser, options PutOptions) error {
if !options.IgnoreLimits {
if atomic.LoadInt64(&s.remainingBytes) <= 0 {
return ErrWriteLimitExceeded
}
}
return s.Repository.PutBlock(id, &writeLimitReadCloser{
return s.Storage.PutBlock(id, &writeLimitReadCloser{
ReadCloser: data,
repo: s,
}, options)
}
// NewWriteLimitWrapper returns a Repository wrapper that limits the number of bytes written to a cas.
// NewWriteLimitWrapper returns a Storage wrapper that limits the number of bytes written to a cas.
// Once reached, the writes will return ErrWriteLimitExceeded
func NewWriteLimitWrapper(wrapped Repository, bytes int64) Repository {
return &writeLimitRepository{
Repository: wrapped,
func NewWriteLimitWrapper(wrapped Storage, bytes int64) Storage {
return &writeLimitStorage{
Storage: wrapped,
remainingBytes: bytes,
}
}

53
blob/logging.go Normal file
View File

@@ -0,0 +1,53 @@
package blob
import (
"io"
"log"
)
type loggingStorage struct {
Storage
}
func (s *loggingStorage) BlockExists(id BlockID) (bool, error) {
result, err := s.Storage.BlockExists(id)
log.Printf("BlockExists(%#v)=%#v,%#v", id, result, err)
return result, err
}
func (s *loggingStorage) GetBlock(id BlockID) ([]byte, error) {
result, err := s.Storage.GetBlock(id)
if len(result) < 20 {
log.Printf("GetBlock(%#v)=(%#v, %#v)", id, result, err)
} else {
log.Printf("GetBlock(%#v)=({%#v bytes}, %#v)", id, len(result), err)
}
return result, err
}
func (s *loggingStorage) PutBlock(id BlockID, data io.ReadCloser, options PutOptions) error {
err := s.Storage.PutBlock(id, data, options)
log.Printf("PutBlock(%#v, %#v)=%#v", id, options, err)
return err
}
func (s *loggingStorage) DeleteBlock(id BlockID) error {
err := s.Storage.DeleteBlock(id)
log.Printf("DeleteBlock(%#v)=%#v", id, err)
return err
}
func (s *loggingStorage) ListBlocks(prefix BlockID) chan (BlockMetadata) {
log.Printf("ListBlocks(%#v)", prefix)
return s.Storage.ListBlocks(prefix)
}
func (s *loggingStorage) Flush() error {
log.Printf("Flush()")
return s.Storage.Flush()
}
// NewLoggingWrapper returns a Storage wrapper that logs all storage commands.
func NewLoggingWrapper(wrapped Storage) Storage {
return &loggingStorage{wrapped}
}

View File

@@ -1,4 +1,4 @@
package storage
package blob
import (
"io"
@@ -9,23 +9,23 @@
"time"
)
type mapRepository struct {
type mapStorage struct {
data map[string][]byte
mutex sync.RWMutex
}
func (s *mapRepository) Configuration() RepositoryConfiguration {
return RepositoryConfiguration{}
func (s *mapStorage) Configuration() StorageConfiguration {
return StorageConfiguration{}
}
func (s *mapRepository) BlockExists(id BlockID) (bool, error) {
func (s *mapStorage) BlockExists(id BlockID) (bool, error) {
s.mutex.RLock()
defer s.mutex.RUnlock()
_, ok := s.data[string(id)]
return ok, nil
}
func (s *mapRepository) GetBlock(id BlockID) ([]byte, error) {
func (s *mapStorage) GetBlock(id BlockID) ([]byte, error) {
s.mutex.RLock()
defer s.mutex.RUnlock()
@@ -37,7 +37,7 @@ func (s *mapRepository) GetBlock(id BlockID) ([]byte, error) {
return nil, ErrBlockNotFound
}
func (s *mapRepository) PutBlock(id BlockID, data io.ReadCloser, options PutOptions) error {
func (s *mapStorage) PutBlock(id BlockID, data io.ReadCloser, options PutOptions) error {
s.mutex.Lock()
defer s.mutex.Unlock()
@@ -51,7 +51,7 @@ func (s *mapRepository) PutBlock(id BlockID, data io.ReadCloser, options PutOpti
return nil
}
func (s *mapRepository) DeleteBlock(id BlockID) error {
func (s *mapStorage) DeleteBlock(id BlockID) error {
s.mutex.Lock()
defer s.mutex.Unlock()
@@ -59,7 +59,7 @@ func (s *mapRepository) DeleteBlock(id BlockID) error {
return nil
}
func (s *mapRepository) ListBlocks(prefix BlockID) chan (BlockMetadata) {
func (s *mapStorage) ListBlocks(prefix BlockID) chan (BlockMetadata) {
ch := make(chan (BlockMetadata))
fixedTime := time.Now()
go func() {
@@ -88,12 +88,12 @@ func (s *mapRepository) ListBlocks(prefix BlockID) chan (BlockMetadata) {
return ch
}
func (s *mapRepository) Flush() error {
func (s *mapStorage) Flush() error {
return nil
}
// NewMapRepository returns an implementation of Repository backed by the contents of given map.
// NewMapStorage returns an implementation of Storage backed by the contents of given map.
// Used primarily for testing.
func NewMapRepository(data map[string][]byte) Repository {
return &mapRepository{data: data}
func NewMapStorage(data map[string][]byte) Storage {
return &mapStorage{data: data}
}

35
blob/registry.go Normal file
View File

@@ -0,0 +1,35 @@
package blob
import "fmt"
var (
factories = map[string]storageFactory{}
)
// StorageFactory allows creation of repositories in a generic way.
type storageFactory struct {
defaultConfigFunc func() interface{}
createStorageFunc func(interface{}) (Storage, error)
}
// AddSupportedStorage registers factory function to create storage with a given type name.
func AddSupportedStorage(
storageType string,
defaultConfigFunc func() interface{},
createStorageFunc func(interface{}) (Storage, error)) {
factories[storageType] = storageFactory{
defaultConfigFunc: defaultConfigFunc,
createStorageFunc: createStorageFunc,
}
}
// NewStorage creates new storage based on StorageConfiguration.
// The storage type must be previously registered using AddSupportedStorage.
func NewStorage(cfg StorageConfiguration) (Storage, error) {
if factory, ok := factories[cfg.Type]; ok {
return factory.createStorageFunc(cfg.Config)
}
return nil, fmt.Errorf("unknown storage type: %s", cfg.Type)
}

View File

@@ -1,21 +1,21 @@
package storage
package blob
import (
"io"
"time"
)
// BlockID represents the identifier of a block stored in a Repository.
// BlockID represents the identifier of a block stored in BLOB Storage.
type BlockID string
// PutOptions controls the behavior of Repository.PutBlock()
// PutOptions controls the behavior of Storage.PutBlock()
type PutOptions struct {
Overwrite bool
IgnoreLimits bool
}
// Repository encapsulates API for connecting to blob storage
type Repository interface {
// Storage encapsulates API for connecting to blob storage
type Storage interface {
// BlockExists determines whether the specified block existts.
PutBlock(id BlockID, data io.ReadCloser, options PutOptions) error
DeleteBlock(id BlockID) error
@@ -23,10 +23,10 @@ type Repository interface {
BlockExists(id BlockID) (bool, error)
GetBlock(id BlockID) ([]byte, error)
ListBlocks(prefix BlockID) chan (BlockMetadata)
Configuration() RepositoryConfiguration
Configuration() StorageConfiguration
}
// BlockMetadata represents metadata about a single block in a repository.
// BlockMetadata represents metadata about a single block in a blob.
// If Error field is set, no other field values should be assumed to be correct.
type BlockMetadata struct {
BlockID BlockID

View File

@@ -1,4 +1,4 @@
package storage
package blob
import (
"bytes"
@@ -7,25 +7,25 @@
"testing"
)
func TestLoggingRepository(t *testing.T) {
func TestLoggingStorage(t *testing.T) {
data := map[string][]byte{}
r := NewLoggingWrapper(NewMapRepository(data))
r := NewLoggingWrapper(NewMapStorage(data))
if r == nil {
t.Errorf("unexpected result: %v", r)
}
verifyRepository(t, r)
verifyStorage(t, r)
}
func TestMapRepository(t *testing.T) {
func TestMapStorage(t *testing.T) {
data := map[string][]byte{}
r := NewMapRepository(data)
r := NewMapStorage(data)
if r == nil {
t.Errorf("unexpected result: %v", r)
}
verifyRepository(t, r)
verifyStorage(t, r)
}
func TestFileRepository(t *testing.T) {
func TestFileStorage(t *testing.T) {
// Test varioush shard configurations.
for _, shardSpec := range [][]int{
[]int{0},
@@ -39,18 +39,18 @@ func TestFileRepository(t *testing.T) {
path, _ := ioutil.TempDir("", "r-fs")
defer os.RemoveAll(path)
r, err := NewFSRepository(&FSRepositoryOptions{
r, err := NewFSStorage(&FSStorageOptions{
Path: path,
DirectoryShards: shardSpec,
})
if r == nil || err != nil {
t.Errorf("unexpected result: %v %v", r, err)
}
verifyRepository(t, r)
verifyStorage(t, r)
}
}
func verifyRepository(t *testing.T, r Repository) {
func verifyStorage(t *testing.T, r Storage) {
blocks := []struct {
blk BlockID
contents []byte

View File

@@ -1,7 +1,7 @@
// Wrapper which implements asynchronous (write-back) PutBlock and DeleteBlock operation
// useful for slower backends (cloud).
package storage
package blob
import (
"fmt"
@@ -10,8 +10,8 @@
"sync/atomic"
)
type writeBackRepository struct {
Repository
type writeBackStorage struct {
Storage
channel chan writeBackRequest
deferredError atomic.Value
@@ -25,7 +25,7 @@ type writeBackRequest struct {
debugInfo string
}
func (wb *writeBackRepository) PutBlock(blockID BlockID, data io.ReadCloser, options PutOptions) error {
func (wb *writeBackStorage) PutBlock(blockID BlockID, data io.ReadCloser, options PutOptions) error {
err := wb.getDeferredError()
if err != nil {
data.Close()
@@ -34,14 +34,14 @@ func (wb *writeBackRepository) PutBlock(blockID BlockID, data io.ReadCloser, opt
wb.channel <- writeBackRequest{
action: func() error {
return wb.Repository.PutBlock(blockID, data, options)
return wb.Storage.PutBlock(blockID, data, options)
},
debugInfo: fmt.Sprintf("Put(%s)", blockID),
}
return nil
}
func (wb *writeBackRepository) getDeferredError() error {
func (wb *writeBackStorage) getDeferredError() error {
deferredError := wb.deferredError.Load()
if deferredError != nil {
return deferredError.(error)
@@ -50,17 +50,17 @@ func (wb *writeBackRepository) getDeferredError() error {
return nil
}
func (wb *writeBackRepository) DeleteBlock(blockID BlockID) error {
func (wb *writeBackStorage) DeleteBlock(blockID BlockID) error {
wb.channel <- writeBackRequest{
action: func() error {
return wb.Repository.DeleteBlock(blockID)
return wb.Storage.DeleteBlock(blockID)
},
debugInfo: fmt.Sprintf("Delete(%s)", blockID),
}
return nil
}
func (wb *writeBackRepository) Flush() error {
func (wb *writeBackStorage) Flush() error {
rwg := sync.WaitGroup{}
rwg.Add(1)
@@ -82,10 +82,10 @@ func (wb *writeBackRepository) Flush() error {
// Now release them all.
rwg.Done()
return wb.Repository.Flush()
return wb.Storage.Flush()
}
func (wb *writeBackRepository) processRequest(req writeBackRequest) {
func (wb *writeBackStorage) processRequest(req writeBackRequest) {
if req.workerPaused != nil {
req.workerPaused.Done()
req.workerRelease.Wait()
@@ -101,12 +101,12 @@ func (wb *writeBackRepository) processRequest(req writeBackRequest) {
}
}
// NewWriteBackWrapper returns a Repository wrapper that processes writes asynchronously using the specified
// NewWriteBackWrapper returns a Storage wrapper that processes writes asynchronously using the specified
// number of worker goroutines. This wrapper is best used with Repositories that exhibit high latency.
func NewWriteBackWrapper(wrapped Repository, workerCount int) Repository {
func NewWriteBackWrapper(wrapped Storage, workerCount int) Storage {
ch := make(chan writeBackRequest, workerCount)
result := &writeBackRepository{
Repository: wrapped,
result := &writeBackStorage{
Storage: wrapped,
channel: ch,
workerCount: workerCount,
}

View File

@@ -1,2 +1,2 @@
// Package cas implements Content-Addressable Storage on top of blob stores.
// Package cas implements Content-Addressable Storage on top of BLOB storage.
package cas

View File

@@ -17,23 +17,23 @@
"strings"
"sync/atomic"
"github.com/kopia/kopia/storage"
"github.com/kopia/kopia/blob"
)
// Since we never share keys, using constant IV is fine.
// Instead of using all-zero, we use this one.
var constantIV = []byte("kopiakopiakopiakopiakopiakopiakopiakopiakopiakopiakopiakopiakopiakopiakopiakopiakopiakopiakopiakopiakopiakopiakopiakopia")
// ObjectManager manages objects stored in a repository and allows reading and writing them.
// ObjectManager manages objects stored in a storage and allows reading and writing them.
type ObjectManager interface {
// NewWriter opens an ObjectWriter for writing new content to the repository.
// NewWriter opens an ObjectWriter for writing new content to the blob.
NewWriter(options ...WriterOption) ObjectWriter
// Open creates an io.ReadSeeker for reading object with a specified ID.
Open(objectID ObjectID) (io.ReadSeeker, error)
Flush() error
Repository() storage.Repository
Storage() blob.Storage
Close()
Stats() ObjectManagerStats
@@ -49,7 +49,7 @@ type ObjectManagerStats struct {
type keygenFunc func([]byte) (key []byte, locator []byte)
type objectManager struct {
repository storage.Repository
storage blob.Storage
verbose bool
bufferManager *bufferManager
stats ObjectManagerStats
@@ -68,22 +68,22 @@ func (mgr *objectManager) Close() {
}
func (mgr *objectManager) Flush() error {
return mgr.repository.Flush()
return mgr.storage.Flush()
}
func (mgr *objectManager) Stats() ObjectManagerStats {
return mgr.stats
}
func (mgr *objectManager) Repository() storage.Repository {
return mgr.repository
func (mgr *objectManager) Storage() blob.Storage {
return mgr.storage
}
func (mgr *objectManager) NewWriter(options ...WriterOption) ObjectWriter {
result := newObjectWriter(
objectWriterConfig{
mgr: mgr,
putOptions: storage.PutOptions{},
putOptions: blob.PutOptions{},
},
ObjectIDTypeStored)
@@ -111,7 +111,7 @@ func (mgr *objectManager) Open(objectID ObjectID) (io.ReadSeeker, error) {
totalLength := seekTable[len(seekTable)-1].endOffset()
return &objectReader{
repository: mgr.repository,
storage: mgr.storage,
seekTable: seekTable,
totalLength: totalLength,
}, nil
@@ -122,44 +122,44 @@ func (mgr *objectManager) Open(objectID ObjectID) (io.ReadSeeker, error) {
// ObjectManagerOption controls the behavior of ObjectManager.
type ObjectManagerOption func(o *objectManager) error
// WriteBack is an ObjectManagerOption that enables asynchronous writes to the repository using the pool
// WriteBack is an ObjectManagerOption that enables asynchronous writes to the storage using the pool
// of goroutines.
func WriteBack(workerCount int) ObjectManagerOption {
return func(o *objectManager) error {
o.repository = storage.NewWriteBackWrapper(o.repository, workerCount)
o.storage = blob.NewWriteBackWrapper(o.storage, workerCount)
return nil
}
}
// WriteLimit is an ObjectManagerOption that sets the limit on the number of bytes that can be written
// to the repository in this ObjectManager session. Once the limit is reached, the repository will
// to the storage in this ObjectManager session. Once the limit is reached, the storage will
// return ErrWriteLimitExceeded.
func WriteLimit(maxBytes int64) ObjectManagerOption {
return func(o *objectManager) error {
o.repository = storage.NewWriteLimitWrapper(o.repository, maxBytes)
o.storage = blob.NewWriteLimitWrapper(o.storage, maxBytes)
return nil
}
}
// EnableLogging is an ObjectManagerOption that causes all repository access to be logged.
// EnableLogging is an ObjectManagerOption that causes all storage access to be logged.
func EnableLogging() ObjectManagerOption {
return func(o *objectManager) error {
o.repository = storage.NewLoggingWrapper(o.repository)
o.storage = blob.NewLoggingWrapper(o.storage)
return nil
}
}
// NewObjectManager creates new ObjectManager with the specified repository, options, and key provider.
// NewObjectManager creates new ObjectManager with the specified storage, options, and key provider.
func NewObjectManager(
r storage.Repository,
r blob.Storage,
f Format,
options ...ObjectManagerOption,
) (ObjectManager, error) {
if f.Version != "1" {
return nil, fmt.Errorf("unsupported repository version: %v", f.Version)
return nil, fmt.Errorf("unsupported storage version: %v", f.Version)
}
mgr := &objectManager{
repository: r,
storage: r,
maxInlineBlobSize: f.MaxInlineBlobSize,
maxBlobSize: f.MaxBlobSize,
}

View File

@@ -12,7 +12,7 @@
"strings"
"testing"
"github.com/kopia/kopia/storage"
"github.com/kopia/kopia/blob"
)
func testFormat() Format {
@@ -40,7 +40,7 @@ func getMd5LObjectID(data []byte) string {
func setupTest(t *testing.T) (data map[string][]byte, mgr ObjectManager) {
data = map[string][]byte{}
st := storage.NewMapRepository(data)
st := blob.NewMapStorage(data)
mgr, err := NewObjectManager(st, testFormat())
if err != nil {
@@ -87,11 +87,11 @@ func TestWriters(t *testing.T) {
if !c.objectID.Type().IsStored() {
if len(data) != 0 {
t.Errorf("unexpected data written to the repository: %v", data)
t.Errorf("unexpected data written to the storage: %v", data)
}
} else {
if len(data) != 1 {
t.Errorf("unexpected data written to the repository: %v", data)
t.Errorf("unexpected data written to the storage: %v", data)
}
}
}
@@ -139,7 +139,7 @@ func TestWriterListChunk(t *testing.T) {
contentMd5Sum50: contentBytes[0:50],
getMd5Digest(listChunkContent): listChunkContent,
}) {
t.Errorf("invalid repository contents: %v", data)
t.Errorf("invalid storage contents: %v", data)
}
}
@@ -172,7 +172,7 @@ func TestWriterListOfListsChunk(t *testing.T) {
getMd5Digest(list2ChunkContent): list2ChunkContent,
getMd5Digest(listOfListsChunkContent): listOfListsChunkContent,
}) {
t.Errorf("invalid repository contents: %v", data)
t.Errorf("invalid storage contents: %v", data)
}
}
@@ -226,7 +226,7 @@ func TestWriterListOfListsOfListsChunk(t *testing.T) {
getMd5Digest(list2ChunkContent): list2ChunkContent,
getMd5Digest(list3ChunkContent): list3ChunkContent,
}) {
t.Errorf("invalid repository contents: %v", data)
t.Errorf("invalid storage contents: %v", data)
}
}
@@ -238,7 +238,7 @@ func TestHMAC(t *testing.T) {
s.Hash = "hmac-md5"
s.Secret = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25}
mgr, err := NewObjectManager(storage.NewMapRepository(data), s)
mgr, err := NewObjectManager(blob.NewMapStorage(data), s)
if err != nil {
t.Errorf("cannot create manager: %v", err)
}
@@ -300,7 +300,7 @@ func TestReaderStoredBlockNotFound(t *testing.T) {
t.Errorf("cannot parse object ID: %v", err)
}
reader, err := mgr.Open(objectID)
if err != storage.ErrBlockNotFound || reader != nil {
if err != blob.ErrBlockNotFound || reader != nil {
t.Errorf("unexpected result: reader: %v err: %v", reader, err)
}
}
@@ -454,7 +454,7 @@ func TestFormats(t *testing.T) {
for _, c := range cases {
data := map[string][]byte{}
st := storage.NewMapRepository(data)
st := blob.NewMapStorage(data)
mgr, err := NewObjectManager(st, c.format)
if err != nil {

View File

@@ -8,13 +8,13 @@
"strconv"
"strings"
"github.com/kopia/kopia/storage"
"github.com/kopia/kopia/blob"
)
type seekTableEntry struct {
startOffset int64
length int64
blockID storage.BlockID
blockID blob.BlockID
}
func (r *seekTableEntry) endOffset() int64 {
@@ -30,7 +30,7 @@ func (r *seekTableEntry) String() string {
}
type objectReader struct {
repository storage.Repository
storage blob.Storage
seekTable []seekTableEntry
@@ -85,7 +85,7 @@ func (r *objectReader) Read(buffer []byte) (int, error) {
func (r *objectReader) openCurrentChunk() error {
blockID := r.seekTable[r.currentChunkIndex].blockID
blockData, err := r.repository.GetBlock(blockID)
blockData, err := r.storage.GetBlock(blockID)
if err != nil {
return err
}
@@ -164,7 +164,7 @@ func (mgr *objectManager) newRawReader(objectID ObjectID) (io.ReadSeeker, error)
}
blockID := objectID.BlockID()
payload, err := mgr.repository.GetBlock(blockID)
payload, err := mgr.storage.GetBlock(blockID)
if err != nil {
return nil, err
}

View File

@@ -5,10 +5,10 @@
"fmt"
"io"
"github.com/kopia/kopia/storage"
"github.com/kopia/kopia/blob"
)
// ObjectWriter allows writing content to the repository and supports automatic deduplication and encryption
// ObjectWriter allows writing content to the storage and supports automatic deduplication and encryption
// of written data.
type ObjectWriter interface {
io.WriteCloser
@@ -19,7 +19,7 @@ type ObjectWriter interface {
// objectWriterConfig
type objectWriterConfig struct {
mgr *objectManager
putOptions storage.PutOptions
putOptions blob.PutOptions
}
type objectWriter struct {
@@ -99,7 +99,7 @@ func (w *objectWriter) flushBuffer(force bool) error {
objectID, readCloser := w.mgr.hashBufferForWriting(w.buffer, string(w.objectType)+w.prefix)
w.buffer = nil
if err := w.mgr.repository.PutBlock(objectID.BlockID(), readCloser, storage.PutOptions{}); err != nil {
if err := w.mgr.storage.PutBlock(objectID.BlockID(), readCloser, blob.PutOptions{}); err != nil {
return fmt.Errorf(
"error when flushing chunk %d of %s to %#v: %#v",
w.flushedObjectCount,
@@ -158,7 +158,7 @@ func (w *objectWriter) Result(forceStored bool) (ObjectID, error) {
// WriterOption is an option that can be passed to ObjectManager.NewWriter()
type WriterOption func(*objectWriter)
// WithBlockNamePrefix causes the ObjectWriter to prefix any blocks emitted to the repository with a given string.
// WithBlockNamePrefix causes the ObjectWriter to prefix any blocks emitted to the storage with a given string.
func WithBlockNamePrefix(prefix string) WriterOption {
return func(w *objectWriter) {
w.prefix = prefix
@@ -172,8 +172,8 @@ func WithDescription(description string) WriterOption {
}
}
// WithPutOptions causes the ObjectWriter to use the specified options when writing blocks to the repository.
func WithPutOptions(options storage.PutOptions) WriterOption {
// WithPutOptions causes the ObjectWriter to use the specified options when writing blocks to the blob.
func WithPutOptions(options blob.PutOptions) WriterOption {
return func(w *objectWriter) {
w.putOptions = options
}

View File

@@ -8,7 +8,7 @@
"strings"
"unicode/utf8"
"github.com/kopia/kopia/storage"
"github.com/kopia/kopia/blob"
)
var (
@@ -71,10 +71,10 @@ func (oei ObjectEncryptionInfo) Mode() EncryptionMode {
// ObjectIDTypeBinary represents binary inline object ID
ObjectIDTypeBinary ObjectIDType = "B"
// ObjectIDTypeStored represents ID of object whose data is stored directly in a single repository block indicated by BlockID.
// ObjectIDTypeStored represents ID of object whose data is stored directly in a single storage block indicated by BlockID.
ObjectIDTypeStored ObjectIDType = "C"
// ObjectIDTypeList represents ID of an object whose data is stored in mutliple repository blocks.
// ObjectIDTypeList represents ID of an object whose data is stored in mutliple storage blocks.
// The value of the ObjectID is the list chunk, which lists object IDs that need to be concatenated
// to form the contents.
ObjectIDTypeList ObjectIDType = "L" // list chunk
@@ -85,7 +85,7 @@ func (oei ObjectEncryptionInfo) Mode() EncryptionMode {
NullObjectID ObjectID = ""
)
// IsStored determines whether data for the given chunk type is stored in the repository
// IsStored determines whether data for the given chunk type is stored in the storage
// (as opposed to being stored inline as part of ObjectID itself).
func (ct ObjectIDType) IsStored() bool {
switch ct {
@@ -98,7 +98,7 @@ func (ct ObjectIDType) IsStored() bool {
}
// ObjectID represents the identifier of a chunk.
// Identifiers can either refer to data block stored in a Repository, or contain small amounts
// Identifiers can either refer to data block stored in a Storage, or contain small amounts
// of payload directly (useful for short ASCII or binary files).
type ObjectID string
@@ -125,16 +125,16 @@ func (c ObjectID) InlineData() []byte {
return nil
}
// BlockID returns identifier of the repository block. For inline chunk IDs, an empty string is returned.
func (c ObjectID) BlockID() storage.BlockID {
// BlockID returns identifier of the storage block. For inline chunk IDs, an empty string is returned.
func (c ObjectID) BlockID() blob.BlockID {
if c.Type().IsStored() {
content := string(c[1:])
firstColon := strings.Index(content, ":")
if firstColon > 0 {
return storage.BlockID(content[0:firstColon])
return blob.BlockID(content[0:firstColon])
}
return storage.BlockID(content)
return blob.BlockID(content)
}
return ""

View File

@@ -1,2 +1,2 @@
// Package fs implements filesystem layer on top content-addressable storage.
// Package fs implements filesystem layer on top Content-Addressable Storage layer.
package fs

View File

@@ -15,7 +15,7 @@
// ErrUploadCancelled is returned when the upload gets cancelled.
var ErrUploadCancelled = errors.New("upload cancelled")
// Uploader supports efficient uploading files and directories to CAS storage.
// Uploader supports efficient uploading files and directories to CAS.
type Uploader interface {
UploadFile(path string) (cas.ObjectID, error)
UploadDir(path string, previousObjectID cas.ObjectID) (cas.ObjectID, error)

View File

@@ -6,8 +6,8 @@
"os"
"path/filepath"
"github.com/kopia/kopia/blob"
"github.com/kopia/kopia/cas"
"github.com/kopia/kopia/storage"
"testing"
)
@@ -52,7 +52,7 @@ func TestUpload(t *testing.T) {
Hash: "md5",
}
repo, err := storage.NewFSRepository(&storage.FSRepositoryOptions{
repo, err := blob.NewFSStorage(&blob.FSStorageOptions{
Path: repoDir,
})

View File

@@ -1 +0,0 @@
package storage

View File

@@ -1,2 +0,0 @@
// Package storage implements repositories for connecting to various types of storage backends.
package storage

View File

@@ -1,53 +0,0 @@
package storage
import (
"io"
"log"
)
type loggingRepository struct {
Repository
}
func (s *loggingRepository) BlockExists(id BlockID) (bool, error) {
result, err := s.Repository.BlockExists(id)
log.Printf("BlockExists(%#v)=%#v,%#v", id, result, err)
return result, err
}
func (s *loggingRepository) GetBlock(id BlockID) ([]byte, error) {
result, err := s.Repository.GetBlock(id)
if len(result) < 20 {
log.Printf("GetBlock(%#v)=(%#v, %#v)", id, result, err)
} else {
log.Printf("GetBlock(%#v)=({%#v bytes}, %#v)", id, len(result), err)
}
return result, err
}
func (s *loggingRepository) PutBlock(id BlockID, data io.ReadCloser, options PutOptions) error {
err := s.Repository.PutBlock(id, data, options)
log.Printf("PutBlock(%#v, %#v)=%#v", id, options, err)
return err
}
func (s *loggingRepository) DeleteBlock(id BlockID) error {
err := s.Repository.DeleteBlock(id)
log.Printf("DeleteBlock(%#v)=%#v", id, err)
return err
}
func (s *loggingRepository) ListBlocks(prefix BlockID) chan (BlockMetadata) {
log.Printf("ListBlocks(%#v)", prefix)
return s.Repository.ListBlocks(prefix)
}
func (s *loggingRepository) Flush() error {
log.Printf("Flush()")
return s.Repository.Flush()
}
// NewLoggingWrapper returns a Repository wrapper that logs all repository commands.
func NewLoggingWrapper(wrapped Repository) Repository {
return &loggingRepository{wrapped}
}

View File

@@ -1,35 +0,0 @@
package storage
import "fmt"
var (
factories = map[string]repositoryFactory{}
)
// RepositoryFactory allows creation of repositories in a generic way.
type repositoryFactory struct {
defaultConfigFunc func() interface{}
createRepositoryFunc func(interface{}) (Repository, error)
}
// AddSupportedRepository registers factory function to create repository with a given type name.
func AddSupportedRepository(
repositoryType string,
defaultConfigFunc func() interface{},
createRepositoryFunc func(interface{}) (Repository, error)) {
factories[repositoryType] = repositoryFactory{
defaultConfigFunc: defaultConfigFunc,
createRepositoryFunc: createRepositoryFunc,
}
}
// NewRepository creates new repository based on RepositoryConfiguration.
// The repository type must be previously registered using AddSupportedRepository.
func NewRepository(cfg RepositoryConfiguration) (Repository, error) {
if factory, ok := factories[cfg.Type]; ok {
return factory.createRepositoryFunc(cfg.Config)
}
return nil, fmt.Errorf("unknown repository type: %s", cfg.Type)
}