Files
kopia/repo/content/content_cache_metadata.go
Jarek Kowalski e03971fc59 Upgraded linter to v1.33.0 (#734)
* linter: upgraded to 1.33, disabled some linters

* lint: fixed 'errorlint' errors

This ensures that all error comparisons use errors.Is() or errors.As().
We will be wrapping more errors going forward so it's important that
error checks are not strict everywhere.

Verified that there are no exceptions for errorlint linter which
guarantees that.

* lint: fixed or suppressed wrapcheck errors

* lint: nolintlint and misc cleanups

Co-authored-by: Julio López <julio+gh@kasten.io>
2020-12-21 22:39:22 -08:00

131 lines
3.2 KiB
Go

package content
import (
"context"
"github.com/pkg/errors"
"go.opencensus.io/stats"
"golang.org/x/sync/errgroup"
"github.com/kopia/kopia/internal/gather"
"github.com/kopia/kopia/repo/blob"
)
const metadataCacheSyncParallelism = 16
type contentCacheForMetadata struct {
*cacheBase
st blob.Storage
}
// sync synchronizes metadata cache with all blobs found in the storage.
func (c *contentCacheForMetadata) sync(ctx context.Context) error {
sem := make(chan struct{}, metadataCacheSyncParallelism)
log(ctx).Debugf("synchronizing metadata cache...")
defer log(ctx).Debugf("finished synchronizing metadata cache.")
var eg errgroup.Group
// list all blobs and fetch contents into cache in parallel.
if err := c.st.ListBlobs(ctx, PackBlobIDPrefixSpecial, func(bm blob.Metadata) error {
// acquire semaphore
sem <- struct{}{}
eg.Go(func() error {
defer func() {
<-sem
}()
_, err := c.getContent(ctx, "dummy", bm.BlobID, 0, 1)
return err
})
return nil
}); err != nil {
return errors.Wrap(err, "error listing blobs")
}
return eg.Wait()
}
func (c *contentCacheForMetadata) getContent(ctx context.Context, cacheKey cacheKey, blobID blob.ID, offset, length int64) ([]byte, error) {
m := c.perItemMutex(blobID)
m.Lock()
defer m.Unlock()
useCache := shouldUseContentCache(ctx)
if useCache {
if v, err := c.cacheBase.cacheStorage.GetBlob(ctx, blobID, offset, length); err == nil {
// cache hit
stats.Record(ctx,
metricContentCacheHitCount.M(1),
metricContentCacheHitBytes.M(int64(len(v))),
)
return v, nil
}
}
stats.Record(ctx, metricContentCacheMissCount.M(1))
// read the entire blob
log(ctx).Debugf("fetching metadata blob %q", blobID)
blobData, err := c.st.GetBlob(ctx, blobID, 0, -1)
if err != nil {
stats.Record(ctx, metricContentCacheMissErrors.M(1))
} else {
stats.Record(ctx, metricContentCacheMissBytes.M(int64(len(blobData))))
}
if errors.Is(err, blob.ErrBlobNotFound) {
// not found in underlying storage
// nolint:wrapcheck
return nil, err
}
if err != nil {
// nolint:wrapcheck
return nil, err
}
if useCache {
// store the whole blob in the cache, do not report cache writes as uploads.
if puterr := c.cacheStorage.PutBlob(
blob.WithUploadProgressCallback(ctx, nil),
blobID,
gather.FromSlice(blobData),
); puterr != nil {
stats.Record(ctx, metricContentCacheStoreErrors.M(1))
log(ctx).Warningf("unable to write cache item %v: %v", blobID, puterr)
}
}
if offset == 0 && length == -1 {
return blobData, nil
}
if offset < 0 || offset+length > int64(len(blobData)) {
return nil, errors.Errorf("invalid (offset=%v,length=%v) for blob %q of size %v", offset, length, blobID, len(blobData))
}
return blobData[offset : offset+length], nil
}
func newContentCacheForMetadata(ctx context.Context, st, cacheStorage blob.Storage, maxSizeBytes int64) (contentCache, error) {
if cacheStorage == nil {
return passthroughContentCache{st}, nil
}
cb, err := newContentCacheBase(ctx, cacheStorage, maxSizeBytes, defaultTouchThreshold, defaultSweepFrequency)
if err != nil {
return nil, errors.Wrap(err, "unable to create base cache")
}
return &contentCacheForMetadata{
st: st,
cacheBase: cb,
}, nil
}