chore(general): typos (#4659)

This commit is contained in:
Nathan Baulch
2025-06-11 10:24:14 +10:00
committed by GitHub
parent f44da2eec0
commit 19d92613a6
75 changed files with 115 additions and 115 deletions

View File

@@ -77,7 +77,7 @@ jobs:
run: make ci-build
timeout-minutes: 40
env:
# Apple credentials for notarizaton, used by Electron Builder
# Apple credentials for notarization, used by Electron Builder
APPLE_API_ISSUER: ${{ secrets.APPLE_API_ISSUER }}
APPLE_API_KEY_ID: ${{ secrets.APPLE_API_KEY_ID }}
APPLE_API_KEY_BASE64: ${{ secrets.APPLE_API_KEY_BASE64 }}

View File

@@ -34,7 +34,7 @@ linters:
- github.com/aws/aws-sdk-go:
recommendations:
- github.com/minio/minio-go
reason: "github.com/aws/aws-sdk-go is not activily developed any longer"
reason: "github.com/aws/aws-sdk-go is not actively developed any longer"
- github.com/rs/zerolog/log:
recommendations:
- "use kopia's logging packages"

View File

@@ -41,7 +41,7 @@ export default async function (configuration) {
for (let attempt = 0; attempt < 10; attempt++) {
console.log("Signing ", configuration.path, "attempt", attempt);
if (attempt > 0) {
console.log("Sleping for ", nextSleepTime);
console.log("Sleeping for ", nextSleepTime);
await new Promise((r) => setTimeout(r, nextSleepTime));
}
nextSleepTime *= 2;

View File

@@ -80,7 +80,7 @@ func (c *commandBenchmarkCompression) readInputFile(ctx context.Context) ([]byte
return data, nil
}
type compressionBechmarkResult struct {
type compressionBenchmarkResult struct {
compression compression.Name
throughput float64
compressedSize uint64
@@ -166,7 +166,7 @@ func (c *commandBenchmarkCompression) run(ctx context.Context) error {
}
func (c *commandBenchmarkCompression) runCompression(ctx context.Context, data []byte, repeatCount int, algorithms map[compression.Name]compression.Compressor) error {
var results []compressionBechmarkResult
var results []compressionBenchmarkResult
log(ctx).Infof("Compressing input file %q (%v) using %v compression methods.", c.dataFile, units.BytesString(len(data)), len(algorithms))
@@ -225,7 +225,7 @@ func (c *commandBenchmarkCompression) runCompression(ctx context.Context, data [
_, perSecond := tt.Completed(float64(c.parallel) * float64(len(data)) * float64(cnt))
results = append(results,
compressionBechmarkResult{
compressionBenchmarkResult{
compression: name,
throughput: perSecond,
compressedSize: compressedSize,
@@ -241,7 +241,7 @@ func (c *commandBenchmarkCompression) runCompression(ctx context.Context, data [
}
func (c *commandBenchmarkCompression) runDecompression(ctx context.Context, data []byte, repeatCount int, algorithms map[compression.Name]compression.Compressor) error {
var results []compressionBechmarkResult
var results []compressionBenchmarkResult
log(ctx).Infof("Decompressing input file %q (%v) using %v compression methods.", c.dataFile, units.BytesString(len(data)), len(algorithms))
@@ -294,7 +294,7 @@ func (c *commandBenchmarkCompression) runDecompression(ctx context.Context, data
_, perSecond := tt.Completed(float64(c.parallel) * float64(len(data)) * float64(cnt))
results = append(results,
compressionBechmarkResult{
compressionBenchmarkResult{
compression: name,
throughput: perSecond,
compressedSize: compressedSize,
@@ -309,7 +309,7 @@ func (c *commandBenchmarkCompression) runDecompression(ctx context.Context, data
return nil
}
func (c *commandBenchmarkCompression) sortResults(results []compressionBechmarkResult) {
func (c *commandBenchmarkCompression) sortResults(results []compressionBenchmarkResult) {
switch {
case c.bySize:
sort.Slice(results, func(i, j int) bool {
@@ -326,7 +326,7 @@ func (c *commandBenchmarkCompression) sortResults(results []compressionBechmarkR
}
}
func (c *commandBenchmarkCompression) printResults(results []compressionBechmarkResult) {
func (c *commandBenchmarkCompression) printResults(results []compressionBenchmarkResult) {
c.out.printStdout(" %-26v %-12v %-12v %v\n", "Compression", "Compressed", "Throughput", "Allocs Memory Usage")
c.out.printStdout("------------------------------------------------------------------------------------------------\n")

View File

@@ -37,7 +37,7 @@ func TestCommandBenchmarkHashing(t *testing.T) {
e.RunAndExpectSuccess(t, "benchmark", "hashing", "--repeat=1", "--block-size=1KB", "--print-options")
}
func TestCommandBenchmarkSpliter(t *testing.T) {
func TestCommandBenchmarkSplitter(t *testing.T) {
t.Parallel()
runner := testenv.NewInProcRunner(t)

View File

@@ -24,19 +24,19 @@ func TestMaintenanceSetExtendObjectLocks(t *testing.T) {
testutil.MustParseJSONLines(t, e.RunAndExpectSuccess(t, "maintenance", "info", "--json"), &mi)
require.False(t, mi.ExtendObjectLocks, "ExtendOjectLocks should not default to enabled.")
require.False(t, mi.ExtendObjectLocks, "ExtendObjectLocks should not default to enabled.")
e.RunAndExpectSuccess(t, "maintenance", "set", "--extend-object-locks", "true")
testutil.MustParseJSONLines(t, e.RunAndExpectSuccess(t, "maintenance", "info", "--json"), &mi)
require.True(t, mi.ExtendObjectLocks, "ExtendOjectLocks should be enabled.")
require.True(t, mi.ExtendObjectLocks, "ExtendObjectLocks should be enabled.")
e.RunAndExpectSuccess(t, "maintenance", "set", "--extend-object-locks", "false")
testutil.MustParseJSONLines(t, e.RunAndExpectSuccess(t, "maintenance", "info", "--json"), &mi)
require.False(t, mi.ExtendObjectLocks, "ExtendOjectLocks should be disabled.")
require.False(t, mi.ExtendObjectLocks, "ExtendObjectLocks should be disabled.")
}
func TestMaintenanceSetListParallelism(t *testing.T) {
@@ -78,7 +78,7 @@ func (s *formatSpecificTestSuite) TestInvalidExtendRetainOptions(t *testing.T) {
testutil.MustParseJSONLines(t, e.RunAndExpectSuccess(t, "maintenance", "info", "--json"), &mi)
require.False(t, mi.ExtendObjectLocks, "ExtendOjectLocks should be disabled.")
require.False(t, mi.ExtendObjectLocks, "ExtendObjectLocks should be disabled.")
// Enable extend object locks when retention_period-full_maintenance_interval > 24h
e.RunAndExpectSuccess(t, "maintenance", "set", "--full-interval", "23h59m")
@@ -90,7 +90,7 @@ func (s *formatSpecificTestSuite) TestInvalidExtendRetainOptions(t *testing.T) {
testutil.MustParseJSONLines(t, e.RunAndExpectSuccess(t, "maintenance", "info", "--json"), &mi)
require.True(t, mi.ExtendObjectLocks, "ExtendOjectLocks should be enabled.")
require.True(t, mi.ExtendObjectLocks, "ExtendObjectLocks should be enabled.")
require.Equal(t, mi.FullCycle.Interval, time.Duration(86340000000000), "maintenance-interval should be unchanged.")
// Cannot change retention_period when retention_period-full_maintenance_interval < 24h

View File

@@ -73,7 +73,7 @@ func (c *commandRepositorySetClient) run(ctx context.Context, rep repo.Repositor
opt.PermissiveCacheLoading = true
anyChange = true
log(ctx).Info("Setting to load indicies into cache permissively.")
log(ctx).Info("Setting to load indices into cache permissively.")
}
}

View File

@@ -98,7 +98,7 @@ func assign(iif content.Info, i int, m map[content.ID][2]content.Info) {
m[iif.ContentID] = v
}
// loadIndexBlobs load index blobs into indexEntries map. indexEntries map will allow comparison betweel two indexes (index at which == 0 and index at which == 1).
// loadIndexBlobs load index blobs into indexEntries map. indexEntries map will allow comparison between two indexes (index at which == 0 and index at which == 1).
func loadIndexBlobs(ctx context.Context, indexEntries map[content.ID][2]content.Info, sm *content.SharedManager, which int, indexBlobInfos []indexblob.Metadata) error {
d := gather.WriteBuffer{}
@@ -157,7 +157,7 @@ func (c *commandRepositoryUpgrade) validateAction(ctx context.Context, rep repo.
var zeroInfo content.Info
// both indexes will have matching contentiDs with matching indexInfo structures.
// both indexes will have matching contentIDs with matching indexInfo structures.
//nolint:gocritic
for contentID, indexEntryPairs := range indexEntries {
iep0 := indexEntryPairs[0] // first entry of index entry pair

View File

@@ -21,8 +21,8 @@ func TestRestoreSnapshotMaxTime(t *testing.T) {
return time.Date(y, time.Month(mo), d, h, m, s, 0, now.Location())
}
requireTime := func(expected time.Time, timespect string) {
mt, err := computeMaxTime(timespect)
requireTime := func(expected time.Time, timespec string) {
mt, err := computeMaxTime(timespec)
require.NoError(t, err)
require.Equal(t, expected, mt)
}

View File

@@ -124,7 +124,7 @@ func (c *commandServerStart) startServerWithOptionalTLSAndListener(ctx context.C
return checkErrServerClosed(ctx, httpServer.ServeTLS(listener, c.serverStartTLSCertFile, c.serverStartTLSKeyFile), "error starting TLS server")
case c.serverStartTLSGenerateCert:
// PEM files not provided, generate in-memory TLS cert/key but don't persit.
// PEM files not provided, generate in-memory TLS cert/key but don't persist.
cert, key, err := c.generateServerCertificate(ctx)
if err != nil {
return errors.Wrap(err, "unable to generate server cert")

View File

@@ -29,7 +29,7 @@ type commandSnapshotList struct {
snapshotListShowItemID bool
snapshotListShowRetentionReasons bool
snapshotListShowModTime bool
shapshotListShowOwner bool
snapshotListShowOwner bool
snapshotListShowIdentical bool
snapshotListShowAll bool
maxResultsPerPath int
@@ -50,7 +50,7 @@ func (c *commandSnapshotList) setup(svc appServices, parent commandParent) {
cmd.Flag("manifest-id", "Include manifest item ID.").Short('m').BoolVar(&c.snapshotListShowItemID)
cmd.Flag("retention", "Include retention reasons.").Default("true").BoolVar(&c.snapshotListShowRetentionReasons)
cmd.Flag("mtime", "Include file mod time").BoolVar(&c.snapshotListShowModTime)
cmd.Flag("owner", "Include owner").BoolVar(&c.shapshotListShowOwner)
cmd.Flag("owner", "Include owner").BoolVar(&c.snapshotListShowOwner)
cmd.Flag("show-identical", "Show identical snapshots").Short('l').BoolVar(&c.snapshotListShowIdentical)
cmd.Flag("storage-stats", "Compute and show storage statistics").BoolVar(&c.storageStats)
cmd.Flag("reverse", "Reverse sort order").BoolVar(&c.reverseSort)
@@ -410,7 +410,7 @@ func (c *commandSnapshotList) entryBits(ctx context.Context, m *snapshot.Manifes
bits = append(bits,
maybeHumanReadableBytes(c.snapshotListShowHumanReadable, totalBytes),
ent.Mode().String())
if c.shapshotListShowOwner {
if c.snapshotListShowOwner {
bits = append(bits,
fmt.Sprintf("uid:%v", ent.Owner().UserID),
fmt.Sprintf("gid:%v", ent.Owner().GroupID))

View File

@@ -310,7 +310,7 @@ func setupFilesystem(skipDefaultFiles bool) *mockfs.Directory {
},
},
{
desc: "absolut match",
desc: "absolute match",
setup: func(root *mockfs.Directory) {
root.Subdir("src").AddFileLines(".extraignore", []string{
"/sub/*.foo",

View File

@@ -226,7 +226,7 @@ func NewKopiaAPIClient(options Options) (*KopiaAPIClient, error) {
tp, _ = transport.(*http.Transport)
tp.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) {
dial, err := net.Dial("unix", u.Path)
return dial, errors.Wrap(err, "Failed to conect to socket: "+options.BaseURL)
return dial, errors.Wrap(err, "Failed to connect to socket: "+options.BaseURL)
}
}

View File

@@ -14,7 +14,7 @@
"github.com/kopia/kopia/repo/hashing"
)
// Crypter ecapsulates hashing and encryption.
// Crypter encapsulates hashing and encryption.
type Crypter interface {
HashFunc() hashing.HashFunc
Encryptor() encryption.Encryptor

View File

@@ -18,7 +18,7 @@
// ConcurrentAccessOptions encapsulates parameters for VerifyConcurrentAccess.
type ConcurrentAccessOptions struct {
NumBlobs int // number of shared blos in the pool
NumBlobs int // number of shared blobs in the pool
Getters int
Putters int

View File

@@ -10,7 +10,7 @@
"github.com/kopia/kopia/internal/gather"
)
func TestNoStorageProection(t *testing.T) {
func TestNoStorageProtection(t *testing.T) {
testStorageProtection(t, cacheprot.NoProtection(), false)
}

View File

@@ -209,7 +209,7 @@ func (c *Comparer) compareEntry(ctx context.Context, e1, e2 fs.Entry, path strin
}
// Checks for changes in e1's and e2's metadata when they have the same content,
// and upates the stats accordingly.
// and updates the stats accordingly.
// The function is not concurrency safe, as it updates st without any locking.
func compareMetadata(ctx context.Context, e1, e2 fs.Entry, path string, st *EntryTypeStats) {
var changed bool

View File

@@ -504,9 +504,9 @@ func getManifests(t *testing.T) map[string]*snapshot.Manifest {
// Tests GetPrecedingSnapshot function
// - GetPrecedingSnapshot with an invalid snapshot id and expect an error;
// - Add a snapshot, expect an error from GetPreceedingSnapshot since there is
// - Add a snapshot, expect an error from GetPrecedingSnapshot since there is
// only a single snapshot in the repo;
// - Subsequently add more snapshots and GetPreceedingSnapshot theimmediately
// - Subsequently add more snapshots and GetPrecedingSnapshot the immediately
// preceding with no error.
func TestGetPrecedingSnapshot(t *testing.T) {
ctx, env := repotesting.NewEnvironment(t, repotesting.FormatNotImportant)

View File

@@ -23,7 +23,7 @@
const LatestEpoch = -1
const (
initiaRefreshAttemptSleep = 100 * time.Millisecond
initialRefreshAttemptSleep = 100 * time.Millisecond
maxRefreshAttemptSleep = 15 * time.Second
maxRefreshAttemptSleepExponent = 1.5
)
@@ -448,7 +448,7 @@ func (e *Manager) refreshLocked(ctx context.Context) error {
return err
}
nextDelayTime := initiaRefreshAttemptSleep
nextDelayTime := initialRefreshAttemptSleep
if !p.Enabled {
return errors.New("epoch manager not enabled")

View File

@@ -17,7 +17,7 @@ func Frozen(t time.Time) func() time.Time {
}
// AutoAdvance returns a time source function that returns a time equal to
// 'start + ((n - 1) * dt)' wheren n is the number of serialized invocations of
// 'start + ((n - 1) * dt)' where n is the number of serialized invocations of
// the returned function. The returned function will generate a time series of
// the form [start, start+dt, start+2dt, start+3dt, ...].
func AutoAdvance(start time.Time, dt time.Duration) func() time.Time {

View File

@@ -25,7 +25,7 @@ func (w failingWriter) Write(buf []byte) (int, error) {
}
func TestGatherBytes(t *testing.T) {
// split the 'whole' into equivalent Bytes slicings in some interesting ways
// split the 'whole' into equivalent Bytes slices in some interesting ways
cases := []struct {
whole []byte
sliced Bytes
@@ -113,7 +113,7 @@ func() {
t.Errorf("unexpected data from GetBytes() %v, want %v", string(all), string(tc.whole))
}
// AppendSectionTo - test exhaustively all combinationf os start, length
// AppendSectionTo - test exhaustively all combinations of start, length
var tmp WriteBuffer
defer tmp.Close()
@@ -316,7 +316,7 @@ type testCase struct {
// write the generated data
n, err := preWrt.Write(buf)
require.NoErrorf(t, err, "Write() faiiled, inputBufferSize: %v", tc.inputBufferSize)
require.NoErrorf(t, err, "Write() failed, inputBufferSize: %v", tc.inputBufferSize)
require.Equalf(t, defaultAllocator.chunkSize, preWrt.alloc.chunkSize,
"this test expects that the default-allocator will be used, but we are using: %#v", preWrt.alloc)

View File

@@ -33,7 +33,7 @@ func verifyMapping(t *testing.T, mapping *metricid.Mapping) {
require.Equal(t, k, mapping.IndexToName[v])
}
// make sure we use consecurive numbers
// make sure we use consecutive numbers
require.Len(t, id2name, maxv)
require.Equal(t, mapping.MaxIndex, maxv)
}

View File

@@ -434,7 +434,7 @@ type ErrorEntry struct {
err error
}
// ErrorInfo implements fs.ErrorErntry.
// ErrorInfo implements fs.ErrorEntry.
func (e *ErrorEntry) ErrorInfo() error {
return e.err
}

View File

@@ -20,7 +20,7 @@ type BlobWriter struct {
}
// EncryptAndWriteBlobAsync encrypts given content and writes it to the repository asynchronously,
// folllowed by calling the provided closeFunc.
// followed by calling the provided closeFunc.
func (w *BlobWriter) EncryptAndWriteBlobAsync(ctx context.Context, prefix blob.ID, data gather.Bytes, closeFunc func()) {
encrypted := gather.NewWriteBuffer()
// Close happens in a goroutine

View File

@@ -7,7 +7,7 @@
)
// ScrubSensitiveData returns a copy of a given value with sensitive fields scrubbed.
// Fields are marked as sensitive with truct field tag `kopia:"sensitive"`.
// Fields are marked as sensitive with struct field tag `kopia:"sensitive"`.
func ScrubSensitiveData(v reflect.Value) reflect.Value {
switch v.Kind() {
case reflect.Ptr:

View File

@@ -102,7 +102,7 @@ func handleDeleteSnapshots(ctx context.Context, rc requestContext) (any, *apiErr
for _, m := range manifestIDs {
if err := w.DeleteManifest(ctx, m); err != nil {
return errors.Wrap(err, "uanble to delete snapshot")
return errors.Wrap(err, "unable to delete snapshot")
}
}

View File

@@ -122,7 +122,7 @@ func maybeStartMaintenanceManager(
// - not direct; or
// - read only.
// Note: the repo owner is not checked here since the repo owner can be externally
// changed while the server is running. The server would pick up the new onwer
// changed while the server is running. The server would pick up the new owner
// the next time a maintenance task executes.
dr, ok := rep.(repo.DirectRepository)
if !ok {

View File

@@ -174,14 +174,14 @@ func ListPolicies(ctx context.Context, c *apiclient.KopiaAPIClient, match *snaps
return resp, nil
}
func policyTargetURLParamters(si snapshot.SourceInfo) string {
func policyTargetURLParameters(si snapshot.SourceInfo) string {
return fmt.Sprintf("userName=%v&host=%v&path=%v", si.UserName, si.Host, si.Path)
}
// SetPolicy sets the policy.
func SetPolicy(ctx context.Context, c *apiclient.KopiaAPIClient, si snapshot.SourceInfo, pol *policy.Policy) error {
resp := &Empty{}
if err := c.Put(ctx, "policy?"+policyTargetURLParamters(si), pol, resp); err != nil {
if err := c.Put(ctx, "policy?"+policyTargetURLParameters(si), pol, resp); err != nil {
return errors.Wrap(err, "SetPolicy")
}
@@ -192,7 +192,7 @@ func SetPolicy(ctx context.Context, c *apiclient.KopiaAPIClient, si snapshot.Sou
func ResolvePolicy(ctx context.Context, c *apiclient.KopiaAPIClient, si snapshot.SourceInfo, req *ResolvePolicyRequest) (*ResolvePolicyResponse, error) {
resp := &ResolvePolicyResponse{}
if err := c.Post(ctx, "policy/resolve?"+policyTargetURLParamters(si), req, resp); err != nil {
if err := c.Post(ctx, "policy/resolve?"+policyTargetURLParameters(si), req, resp); err != nil {
return nil, errors.Wrap(err, "ResolvePolicy")
}

View File

@@ -51,7 +51,7 @@ func decodeHashedPassword(encodedHash string) (*passwordHash, error) {
}
if err := json.Unmarshal(passwordHashJSON, &h); err != nil {
return nil, errors.Wrap(err, "unmarshalling password hash")
return nil, errors.Wrap(err, "unmarshaling password hash")
}
return &h, nil

View File

@@ -5,8 +5,8 @@
// getPasswordHashAlgorithm returns the password hash algorithm given a version.
func getPasswordHashAlgorithm(passwordHashVersion int) (string, error) {
switch passwordHashVersion {
// when the version is unsetDefaulHashVersion, map it to ScryptHashVersion
case unsetDefaulHashVersion, ScryptHashVersion:
// when the version is unsetDefaultHashVersion, map it to ScryptHashVersion
case unsetDefaultHashVersion, ScryptHashVersion:
return scryptHashAlgorithm, nil
case Pbkdf2HashVersion:
return pbkdf2HashAlgorithm, nil

View File

@@ -7,7 +7,7 @@
const (
// default password hash version when it is not explicitly set in the user
// profile, this always maps to ScryptHashVersion.
unsetDefaulHashVersion = 0
unsetDefaultHashVersion = 0
// ScryptHashVersion is the version representation of the scrypt algorithm.
ScryptHashVersion = 1

View File

@@ -52,7 +52,7 @@ func NewStdConsoleEncoder(ec StdConsoleEncoderConfig) zapcore.Encoder {
}), ec}
}
// StdConsoleEncoderConfig provides configurationfor NewStdConsoleEncoder.
// StdConsoleEncoderConfig provides configuration for NewStdConsoleEncoder.
type StdConsoleEncoderConfig struct {
TimeLayout string
LocalTime bool

View File

@@ -12,7 +12,7 @@
// ManifestWithError represents information about the snapshot manifest with optional error.
type ManifestWithError struct {
Manifest snapshot.Manifest `json:"manifest"` // may not be filled out if there was an error, Manifst.Source is always set.
Manifest snapshot.Manifest `json:"manifest"` // may not be filled out if there was an error, Manifest.Source is always set.
Previous *snapshot.Manifest `json:"previous"` // may not be filled out
Error string `json:"error"` // will be present if there was an error

View File

@@ -89,7 +89,7 @@ func TestWebhook(t *testing.T) {
require.Equal(t, "This is a test.\n\n* one\n* two\n* three", requestBodies[1].String())
p3, err := sender.GetSender(ctx, "my-profile", "webhook", &webhook.Options{
Endpoint: server.URL + "/nonexixtent-path",
Endpoint: server.URL + "/nonexistent-path",
})
require.NoError(t, err)

View File

@@ -24,7 +24,7 @@ func TestFileStorage(t *testing.T) {
ctx := testlogging.Context(t)
// Test varioush shard configurations.
// Test various shard configurations.
for _, shardSpec := range [][]int{
{0},
{1},

View File

@@ -48,6 +48,6 @@ func TestLoggingStorage(t *testing.T) {
}
if got, want := st.ConnectionInfo().Type, underlying.ConnectionInfo().Type; got != want {
t.Errorf("unexpected connection infor %v, want %v", got, want)
t.Errorf("unexpected connection info %v, want %v", got, want)
}
}

View File

@@ -203,7 +203,7 @@ func (s *sftpImpl) PutBlobInPath(ctx context.Context, dirPath, fullPath string,
defer contig.Close()
if _, err := data.WriteTo(contig); err != nil {
return errors.Wrap(err, "can't write to comtiguous buffer")
return errors.Wrap(err, "can't write to contiguous buffer")
}
//nolint:wrapcheck

View File

@@ -179,7 +179,7 @@ func TestDeleteMultiple(t *testing.T) {
}, data)
}
func TestMetataJSONString(t *testing.T) {
func TestMetadataJSONString(t *testing.T) {
bm := blob.Metadata{
BlobID: "foo",
Length: 12345,

View File

@@ -228,7 +228,7 @@ func(ci Info) error {
}
// IterateUnreferencedBlobs returns the list of unreferenced storage blobs.
func (bm *WriteManager) IterateUnreferencedBlobs(ctx context.Context, blobPrefixes []blob.ID, parallellism int, callback func(blob.Metadata) error) error {
func (bm *WriteManager) IterateUnreferencedBlobs(ctx context.Context, blobPrefixes []blob.ID, parallelism int, callback func(blob.Metadata) error) error {
usedPacks, err := bigmap.NewSet(ctx)
if err != nil {
return errors.Wrap(err, "new set")
@@ -261,7 +261,7 @@ func(pi PackInfo) error {
var prefixes []blob.ID
if parallellism <= len(blobPrefixes) {
if parallelism <= len(blobPrefixes) {
prefixes = append(prefixes, blobPrefixes...)
} else {
// iterate {p,q}[0-9,a-f]
@@ -274,7 +274,7 @@ func(pi PackInfo) error {
bm.log.Debugf("scanning prefixes %v", prefixes)
if err := blob.IterateAllPrefixesInParallel(ctx, parallellism, bm.st, prefixes,
if err := blob.IterateAllPrefixesInParallel(ctx, parallelism, bm.st, prefixes,
func(bm blob.Metadata) error {
if usedPacks.Contains([]byte(bm.BlobID)) {
return nil

View File

@@ -1260,7 +1260,7 @@ func (s *contentManagerSuite) TestHandleWriteErrors(t *testing.T) {
// count how many times we retried writes/flushes
// also, verify that all the data is durable
cases := []struct {
faults []*fault.Fault // failures to similuate
faults []*fault.Fault // failures to simulate
contentSizes []int // sizes of contents to write
expectedWriteRetries []int
expectedFlushRetries int

View File

@@ -30,8 +30,8 @@
v2MaxFormatCount = invalidFormatVersion
v2MaxUniquePackIDCount = 1 << 24 // max number of packs that can be stored
v2MaxShortPackIDCount = 1 << 16 // max number that can be represented using 2 bytes
v2MaxContentLength = 1 << 28 // max supported content length (representible using 3.5 bytes)
v2MaxShortContentLength = 1 << 24 // max content length representible using 3 bytes
v2MaxContentLength = 1 << 28 // max supported content length (representable using 3.5 bytes)
v2MaxShortContentLength = 1 << 24 // max content length representable using 3 bytes
v2MaxPackOffset = 1 << 30 // max pack offset 1GiB to leave 2 bits for flags
v2DeletedMarker = 0x80000000
v2MaxEntrySize = 256 // maximum length of content ID + per-entry data combined

View File

@@ -204,14 +204,14 @@ func (m *ManagerV0) registerCompaction(ctx context.Context, inputs, outputs []bl
return nil
}
// WriteIndexBlobs writes the provided data shards into new index blobs oprionally appending the provided suffix.
// WriteIndexBlobs writes the provided data shards into new index blobs optionally appending the provided suffix.
func (m *ManagerV0) WriteIndexBlobs(ctx context.Context, dataShards []gather.Bytes, suffix blob.ID) ([]blob.Metadata, error) {
var result []blob.Metadata
for _, data := range dataShards {
bm, err := m.enc.EncryptAndWriteBlob(ctx, data, V0IndexBlobPrefix, suffix)
if err != nil {
return nil, errors.Wrap(err, "error writing index blbo")
return nil, errors.Wrap(err, "error writing index blob")
}
result = append(result, bm)
@@ -323,7 +323,7 @@ func (m *ManagerV0) findIndexBlobsToDelete(latestServerBlobTime time.Time, entri
tmp := map[blob.ID]bool{}
for _, cl := range entries {
// are the input index blobs in this compaction eligble for deletion?
// are the input index blobs in this compaction eligible for deletion?
if age := latestServerBlobTime.Sub(cl.metadata.Timestamp); age < maxEventualConsistencySettleTime {
m.log.Debugf("not deleting compacted index blob used as inputs for compaction %v, because it's too recent: %v < %v", cl.metadata.BlobID, age, maxEventualConsistencySettleTime)
continue

View File

@@ -159,7 +159,7 @@ func NewMemoryBlobCache(timeNow func() time.Time) blobCache {
}
}
// NewFormatBlobCache creates an implementationof blobCache for particular cache settings.
// NewFormatBlobCache creates an implementation of blobCache for particular cache settings.
func NewFormatBlobCache(cacheDir string, validDuration time.Duration, timeNow func() time.Time) blobCache {
if cacheDir != "" {
return NewDiskCache(cacheDir)

View File

@@ -249,7 +249,7 @@ func (m *Manager) RepositoryFormatBytes(ctx context.Context) ([]byte, error) {
return f.RepositoryFormatBytes(ctx)
}
// GetMutableParameters gets mutable paramers of the repository.
// GetMutableParameters gets mutable parameters of the repository.
// This function blocks to refresh the format blob if necessary.
func (m *Manager) GetMutableParameters(ctx context.Context) (MutableParameters, error) {
f, err := m.getOrRefreshFormat(ctx)
@@ -261,7 +261,7 @@ func (m *Manager) GetMutableParameters(ctx context.Context) (MutableParameters,
return f.GetMutableParameters(ctx)
}
// GetCachedMutableParameters gets mutable paramers of the repository without blocking.
// GetCachedMutableParameters gets mutable parameters of the repository without blocking.
func (m *Manager) GetCachedMutableParameters() MutableParameters {
m.mu.RLock()
defer m.mu.RUnlock()

View File

@@ -23,7 +23,7 @@
// on a repository that is already using the latest format version.
var ErrFormatUptoDate = errors.New("repository format is up to date") // +checklocksignore
// BackupBlobID gets the upgrade backu pblob-id fro mthe lock.
// BackupBlobID gets the upgrade backup blob-id from the lock.
func BackupBlobID(l UpgradeLockIntent) blob.ID {
return blob.ID(BackupBlobIDPrefix + l.OwnerID)
}

View File

@@ -56,7 +56,7 @@ func TestLocalConfig_noCaching(t *testing.T) {
require.NoError(t, err)
if got, want := loadedLC.Caching, originalLC.Caching; got != want {
t.Fatalf("cacheing did not round trip: %v, want %v", got, want)
t.Fatalf("caching did not round trip: %v, want %v", got, want)
}
}

View File

@@ -131,7 +131,7 @@ func (b *Buffer) AppendUint(val uint64, base int) *Buffer {
return b.AppendBytes(strconv.AppendUint(buf[:0], val, base))
}
// String returns a string value of a buffer. The value is valud as long as
// String returns a string value of a buffer. The value is valid as long as
// string remains allocated and no Append*() methods have been called.
func (b *Buffer) String() string {
if b.validLen == 0 {

View File

@@ -93,7 +93,7 @@ func GetParams(ctx context.Context, rep repo.Repository) (*Params, error) {
return &p, nil
}
// arbitrality pick first pick ID to return in case there's more than one
// arbitrarily pick first pick ID to return in case there's more than one
// this is possible when two repository clients independently create manifests at approximately the same time
// so it should not really matter which one we pick.
// see https://github.com/kopia/kopia/issues/391

View File

@@ -46,7 +46,7 @@ type Schedule struct {
Runs map[TaskType][]RunInfo `json:"runs"`
}
// ReportRun adds the provided run information to the history and discards oldest entried.
// ReportRun adds the provided run information to the history and discards oldest entries.
func (s *Schedule) ReportRun(taskType TaskType, info RunInfo) {
if s.Runs == nil {
s.Runs = map[TaskType][]RunInfo{}

View File

@@ -152,7 +152,7 @@ func (m *committedManifestManager) loadCommittedContentsLocked(ctx context.Conte
}, func(ci content.Info) error {
man, err := loadManifestContent(ctx, m.b, ci.ContentID)
if err != nil {
// this can be used to allow corrupterd repositories to still open and see the
// this can be used to allow corrupted repositories to still open and see the
// (incomplete) list of manifests.
if os.Getenv("KOPIA_IGNORE_MALFORMED_MANIFEST_CONTENTS") != "" {
log(ctx).Warnf("ignoring malformed manifest content %v: %v", ci.ContentID, err)

View File

@@ -39,7 +39,7 @@ func TestPickLatestID(t *testing.T) {
want: "id3",
},
{
// pick lexicographically latests ID if all times are the same.
// pick lexicographically latest ID if all times are the same.
input: []*manifest.EntryMetadata{
{ID: "idx", ModTime: t0},
{ID: "ida", ModTime: t0},

View File

@@ -37,7 +37,7 @@ type fakeContentManager struct {
// +checklocks:mu
data map[content.ID][]byte
// +checklocks:mu
compresionIDs map[content.ID]compression.HeaderID
compressionIDs map[content.ID]compression.HeaderID
supportsContentCompression bool
writeContentError error
@@ -72,8 +72,8 @@ func (f *fakeContentManager) WriteContent(ctx context.Context, data gather.Bytes
defer f.mu.Unlock()
f.data[contentID] = data.ToByteSlice()
if f.compresionIDs != nil {
f.compresionIDs[contentID] = comp
if f.compressionIDs != nil {
f.compressionIDs[contentID] = comp
}
return contentID, nil
@@ -88,7 +88,7 @@ func (f *fakeContentManager) ContentInfo(ctx context.Context, contentID content.
defer f.mu.Unlock()
if d, ok := f.data[contentID]; ok {
return content.Info{ContentID: contentID, PackedLength: uint32(len(d)), CompressionHeaderID: f.compresionIDs[contentID]}, nil
return content.Info{ContentID: contentID, PackedLength: uint32(len(d)), CompressionHeaderID: f.compressionIDs[contentID]}, nil
}
return content.Info{}, blob.ErrBlobNotFound
@@ -106,7 +106,7 @@ func setupTest(t *testing.T, compressionHeaderID map[content.ID]compression.Head
fcm := &fakeContentManager{
data: data,
supportsContentCompression: compressionHeaderID != nil,
compresionIDs: compressionHeaderID,
compressionIDs: compressionHeaderID,
}
r, err := NewObjectManager(testlogging.Context(t), fcm, format.ObjectFormat{

View File

@@ -78,7 +78,7 @@ Some cloud storage solutions provide the ability to generate restricted access k
So far, we have secured your access such that even if a bad actor gets access to your Kopia configuration, they can't do irreparable harm to your cloud backup. However, what if they get access to your login credentials? Your login credentials provide the ability to delete your data and even your entire buckets for all the buckets in your account. But the cloud providers have protection from that too.
Multi-factor-authentication (MFA) is one option. With MFA enabled, an attacker would need access to your password as well as your security device to be able to manipulate your account. All major providers support MFA, and it is recommended to use it to secure your account. Note that it is important to eliminate and root/global acess keys as well, since they can generally be used to execute nearly any task you can do when logged in (effectively bypassing MFA).
Multi-factor-authentication (MFA) is one option. With MFA enabled, an attacker would need access to your password as well as your security device to be able to manipulate your account. All major providers support MFA, and it is recommended to use it to secure your account. Note that it is important to eliminate and root/global access keys as well, since they can generally be used to execute nearly any task you can do when logged in (effectively bypassing MFA).
An additional layer of protection is `Object Locking' that can be enabled in AWS (and S3 compatible providers). An Object Lock is applied when a file is created (or on an existing file), and it provides a specific retention date. Until that retention date occurs, there is no way to delete the locked file. Even using your login credentials, the file is protected from deletion. It can still be overwritten with a new version or hidden such that it doesn't appear in a file list. But it will always be accessible until its retention date occurs. While Kopia supports applying Object Locks, there are some caveats:

View File

@@ -81,7 +81,7 @@ kopia policy set </path/to/source/directory/> --compression=<none|deflate-best-c
```shell
kopia policy set --global --compression=<none|deflate-best-compression|deflate-best-speed|deflate-default|gzip|gzip-best-compression|gzip-best-speed|pgzip|pgzip-best-compression|pgzip-best-speed|s2-better|s2-default|s2-parallel-4|s2-parallel-8|zstd|zstd-better-compression|zstd-fastest>
```
If you enable or disable compression or change the compression algorithm, the new setting is applied going forward and not reteroactively. In other words, Kopia will not modify the compression for files/directories already uploaded to your repository.
If you enable or disable compression or change the compression algorithm, the new setting is applied going forward and not retroactively. In other words, Kopia will not modify the compression for files/directories already uploaded to your repository.
If you are unclear about what compression algorithm to use, `zstd` is considered one of the top algorithms currently.

View File

@@ -310,7 +310,7 @@ services:
- /path/to/data/dir:/data:ro
# Mount repository location
- /path/to/repository/dir:/repository
# Mount path for browsing mounted snaphots
# Mount path for browsing mounted snapshots
- /path/to/tmp/dir:/tmp:shared
```

View File

@@ -2,7 +2,7 @@
import "github.com/kopia/kopia/snapshot"
// ErrorHandlingPolicy controls error hadnling behavior when taking snapshots.
// ErrorHandlingPolicy controls error handling behavior when taking snapshots.
type ErrorHandlingPolicy struct {
// IgnoreFileErrors controls whether or not snapshot operation should fail when a file throws an error on being read
IgnoreFileErrors *OptionalBool `json:"ignoreFileErrors,omitempty"`

View File

@@ -39,14 +39,14 @@
var log = logging.Module("kopia/snapshot/policy")
// GetEffectivePolicy calculates effective snapshot policy for a given source by combining the source-specifc policy (if any)
// GetEffectivePolicy calculates effective snapshot policy for a given source by combining the source-specific policy (if any)
// with parent policies. The source must contain a path.
// Returns the effective policies and all source policies that contributed to that (most specific first).
func GetEffectivePolicy(ctx context.Context, rep repo.Repository, si snapshot.SourceInfo) (effective *Policy, definition *Definition, sources []*Policy, e error) {
return GetEffectivePolicyWithOverride(ctx, rep, si, nil)
}
// GetEffectivePolicyWithOverride calculates effective snapshot policy for a given source by combining the source-specifc policy (if any)
// GetEffectivePolicyWithOverride calculates effective snapshot policy for a given source by combining the source-specific policy (if any)
// with parent policies. The source must contain a path.
// Returns the effective policies and all source policies that contributed to that (most specific first).
func GetEffectivePolicyWithOverride(ctx context.Context, rep repo.Repository, si snapshot.SourceInfo, optionalPolicyOverride *Policy) (effective *Policy, definition *Definition, sources []*Policy, e error) {
@@ -142,7 +142,7 @@ func GetDefinedPolicy(ctx context.Context, rep repo.Repository, si snapshot.Sour
return nil, ErrPolicyNotFound
}
// arbitrality pick first pick ID to return in case there's more than one
// arbitrarily pick first pick ID to return in case there's more than one
// this is possible when two repository clients independently create manifests at approximately the same time
// so it should not really matter which one we pick.
// see https://github.com/kopia/kopia/issues/391

View File

@@ -116,7 +116,7 @@ func (t *Tree) EffectivePolicy() *Policy {
return t.effective
}
// IsInherited returns true if the policy inherited to the given tree hode has been inherited from its parent.
// IsInherited returns true if the policy inherited to the given tree node has been inherited from its parent.
func (t *Tree) IsInherited() bool {
if t == nil {
return true

View File

@@ -226,7 +226,7 @@ func TestCompactPins(t *testing.T) {
}))
}
func TestCompactRetentionrRasons(t *testing.T) {
func TestCompactRetentionReasons(t *testing.T) {
cases := []struct {
input []string
want []string

View File

@@ -204,7 +204,7 @@ func (p *SchedulingPolicy) checkMissedSnapshot(now, previousSnapshotTime, nextSn
}
nextSnapshot := nextSnapshotTime
// We add a second to ensure that the next possible snapshot is > the last snaphot
// We add a second to ensure that the next possible snapshot is > the last snapshot
todSnapshot, todOk := p.getNextTimeOfDaySnapshot(momentAfterSnapshot)
cronSnapshot, cronOk := p.getNextCronSnapshot(momentAfterSnapshot)

View File

@@ -66,7 +66,7 @@ func (b *DirManifestBuilder) AddEntry(de *snapshot.DirEntry) {
}
}
// AddFailedEntry adds a failed directory entry to the builder and increments enither ignored or fatal error count.
// AddFailedEntry adds a failed directory entry to the builder and increments either ignored or fatal error count.
func (b *DirManifestBuilder) AddFailedEntry(relPath string, isIgnoredError bool, err error) {
b.mu.Lock()
defer b.mu.Unlock()

View File

@@ -76,7 +76,7 @@ func parseNestedObjectID(ctx context.Context, startingDir fs.Entry, parts []stri
}
// FindSnapshotByRootObjectIDOrManifestID returns the list of matching snapshots for a given rootID.
// which can be either snapshot manifst ID (which matches 0 or 1 snapshots)
// which can be either snapshot manifest ID (which matches 0 or 1 snapshots)
// or the root object ID (which can match arbitrary number of snapshots).
// If multiple snapshots match and they don't agree on root object attributes and consistentAttributes==true
// the function fails, otherwise it returns the latest of the snapshots.

View File

@@ -325,7 +325,7 @@ func (p *CountingUploadProgress) UITaskCounters(final bool) map[string]uitask.Co
"Hashed Bytes": uitask.BytesCounter(hashedBytes),
"Processed Bytes": uitask.BytesCounter(hashedBytes + cachedBytes),
// bytes actually ploaded to the server (non-deduplicated)
// bytes actually uploaded to the server (non-deduplicated)
"Uploaded Bytes": uitask.BytesCounter(atomic.LoadInt64(&p.counters.TotalUploadedBytes)),
"Excluded Files": uitask.SimpleCounter(int64(atomic.LoadInt32(&p.counters.TotalExcludedFiles))),

View File

@@ -44,7 +44,7 @@ func MustParseSnapshots(t *testing.T, lines []string) []SourceInfo {
return nil
}
currentSource.Snapshots = append(currentSource.Snapshots, mustParseSnaphotInfo(t, l[2:]))
currentSource.Snapshots = append(currentSource.Snapshots, mustParseSnapshotInfo(t, l[2:]))
continue
}
@@ -57,7 +57,7 @@ func MustParseSnapshots(t *testing.T, lines []string) []SourceInfo {
return result
}
func mustParseSnaphotInfo(t *testing.T, l string) SnapshotInfo {
func mustParseSnapshotInfo(t *testing.T, l string) SnapshotInfo {
t.Helper()
incomplete := strings.Contains(l, "incomplete")

View File

@@ -32,7 +32,7 @@ func TestACL(t *testing.T) {
// reduce default access to snapshots to APPEND - this will fail because exactly identical rule already exists and grants FULL access.
serverEnvironment.RunAndExpectFailure(t, "server", "acl", "add", "--user", "*@*", "--target", "type=snapshot,username=OWN_USER,hostname=OWN_HOST", "--access=APPEND")
// reduce default access to snapshots to APPEND with --overwrite, this wil succeed.
// reduce default access to snapshots to APPEND with --overwrite, this will succeed.
serverEnvironment.RunAndExpectSuccess(t, "server", "acl", "add", "--user", "*@*", "--target", "type=snapshot,username=OWN_USER,hostname=OWN_HOST", "--access=APPEND", "--overwrite")
// add read access to all snapshots and policies for user foo@bar

View File

@@ -109,7 +109,7 @@ func TestRestoreCommand(t *testing.T) {
// Attempt to restore using snapshot ID
restoreFailDir := testutil.TempDirectory(t)
// Remember original app cusomization
// Remember original app customization
origCustomizeApp := runner.CustomizeApp
// Prepare fake restore progress and set it when needed

View File

@@ -722,7 +722,7 @@ func TestSnapshotCreateAllFlushPerSource(t *testing.T) {
require.Len(t, indexList2, len(indexList1)+1)
require.Len(t, metadataBlobList2, len(metadataBlobList1)+1)
// snapshot with --flush-per-source, since there are 3 soufces, we'll have 3 index blobs
// snapshot with --flush-per-source, since there are 3 sources, we'll have 3 index blobs
e.RunAndExpectSuccess(t, "snapshot", "create", "--all", "--flush-per-source")
indexList3 := e.RunAndExpectSuccess(t, "index", "ls")

View File

@@ -183,10 +183,10 @@ func TestEndToEndTest(t *testing.T) {
downloadDir := testutil.TempDirectory(t)
snap1Path := testutil.TempDirectory(t)
// create a test snaphot
// create a test snapshot
createTestSnapshot(t, ctx, sp, tc, repoPath, snap1Path)
// navigate to the base page, wait unti we're redirected to 'Repository' page
// navigate to the base page, wait until we're redirected to 'Repository' page
require.NoError(t, chromedp.Run(ctx,
tc.log("clicking on snapshot source"),
chromedp.Click(`a[href*='/snapshots/single-source']`),
@@ -227,7 +227,7 @@ func TestConnectDisconnectReconnect(t *testing.T) {
runInBrowser(t, func(ctx context.Context, sp *testutil.ServerParameters, tc *TestContext) {
repoPath := testutil.TempDirectory(t)
// navigate to the base page, wait unti we're redirected to 'Repository' page
// navigate to the base page, wait until we're redirected to 'Repository' page
require.NoError(t, chromedp.Run(ctx,
chromedp.Navigate(sp.BaseURL),
chromedp.WaitVisible("button[data-testid='provider-filesystem']"),
@@ -338,7 +338,7 @@ func TestByteRepresentation(t *testing.T) {
var base10 string
// create a test snaphot
// create a test snapshot
createTestSnapshot(t, ctx, sp, tc, repoPath, snap1Path)
// begin test
@@ -386,7 +386,7 @@ func TestPagination(t *testing.T) {
repoPath := testutil.TempDirectory(t)
snap1Path := testutil.TempDirectory(t)
// create a test snaphot
// create a test snapshot
createTestSnapshot(t, ctx, sp, tc, repoPath, snap1Path)
})
}

View File

@@ -30,7 +30,7 @@
// BlobManipulator provides a way to run a kopia command.
type BlobManipulator struct {
KopiaCommandRunner *kopiarunner.KopiaSnapshotter
DirCreater *snapmeta.KopiaSnapshotter
DirCreator *snapmeta.KopiaSnapshotter
fileWriter *fiofilewriter.FileWriter
DataRepoPath string
@@ -52,7 +52,7 @@ func NewBlobManipulator(baseDirPath, dataRepoPath string) (*BlobManipulator, err
return &BlobManipulator{
KopiaCommandRunner: runner,
DirCreater: ks,
DirCreator: ks,
}, nil
}
@@ -84,7 +84,7 @@ func (bm *BlobManipulator) ConnectOrCreateRepo(dataRepoPath string) error {
return errKopiaRepoNotFound
}
return bm.DirCreater.ConnectOrCreateRepo(bm.DataRepoPath)
return bm.DirCreator.ConnectOrCreateRepo(bm.DataRepoPath)
}
// DeleteBlob deletes the provided blob or a random blob, in kopia repo.

View File

@@ -24,7 +24,7 @@ type OpenRepository struct {
openID string
}
// Refresh refreshes the set of committed Contents and manifest from repositor.
// Refresh refreshes the set of committed Contents and manifest from repository.
func (o *OpenRepository) Refresh(ctx context.Context, cids *TrackingSet[content.ID], mids *TrackingSet[manifest.ID]) {
o.ReadableContents.Replace(ctx, cids.ids)
o.ReadableManifests.Replace(ctx, mids.ids)

View File

@@ -25,7 +25,7 @@ func (s *RepositorySession) WriteManifest(ctx context.Context, mid manifest.ID)
s.WrittenManifests.Add(ctx, mid)
}
// Refresh refreshes the set of committed contents and manifest from repositor.
// Refresh refreshes the set of committed contents and manifest from repository.
func (s *RepositorySession) Refresh(ctx context.Context, cids *TrackingSet[content.ID], mids *TrackingSet[manifest.ID]) {
s.OpenRepo.Refresh(ctx, cids, mids)
}

View File

@@ -50,7 +50,7 @@
s3DataRepoPath = filepath.Join(s3RepoBaseDirPath, dataRepoPath)
)
func TestEngineWritefilesBasicFS(t *testing.T) {
func TestEngineWriteFilesBasicFS(t *testing.T) {
t.Setenv(snapmeta.EngineModeEnvKey, snapmeta.EngineModeBasic)
t.Setenv(snapmeta.S3BucketNameEnvKey, "")

View File

@@ -185,7 +185,7 @@ func isRootDirectoryRename(diffItem string, mod fswalker.ActionData) bool {
return mod.Before.GetInfo().GetIsDir() && filepath.Clean(mod.Before.GetPath()) == "."
}
// Directory size changes with underlying file system setups. Ignote the dir size during data consistency check to make it robust.
// Directory size changes with underlying file system setups. Ignore the dir size during data consistency check to make it robust.
// Remove this filter from GlobalFilterFuncs to detect the size difference in a directory.
func filterDirSizeCheck(str string, mod fswalker.ActionData) bool {
return mod.Before.GetInfo().GetIsDir() && strings.Contains(str, "size: ")

View File

@@ -57,7 +57,7 @@ func Walk(ctx context.Context, policy *fspb.Policy) (*fspb.Walk, error) { //noli
return retWalk, nil
}
// WalkPathHash performs a walk at the path prvided and returns a pointer
// WalkPathHash performs a walk at the path provided and returns a pointer
// to the Walk result.
func WalkPathHash(ctx context.Context, path string) (*fspb.Walk, error) {
return Walk(ctx, &fspb.Policy{

View File

@@ -161,7 +161,7 @@ func unzip(dir string, r io.Reader, stripPathComponents int) error {
}
// Download downloads the provided URL and extracts it to the provided directory, retrying
// exponentionally until succeeded.
// exponentially until succeeded.
func Download(url, dir string, checksum map[string]string, stripPathComponents int) error {
const (
// sleep durations 5, 10, 20, 40, 80, 160, 320