diff --git a/.github/workflows/htmlui-tests.yml b/.github/workflows/htmlui-tests.yml
index 6acb82240..8d536ccfb 100644
--- a/.github/workflows/htmlui-tests.yml
+++ b/.github/workflows/htmlui-tests.yml
@@ -16,8 +16,8 @@ env:
UNIX_SHELL_ON_WINDOWS: true
# set (to any value other than false) to trigger random unicode filenames testing (logs may be difficult to read)
ENABLE_UNICODE_FILENAMES: ${{ secrets.ENABLE_UNICODE_FILENAMES }}
- # set (to any value other than false) to trigger very long filenames testing
- ENABLE_LONG_FILENAMES: ${{ secrets.ENABLE_LONG_FILENAMES }}
+ # disable long filenames since they sometimes get messed up when simulating input keystrokes
+ ENABLE_LONG_FILENAMES: false
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
diff --git a/Makefile b/Makefile
index bf0083a40..c3bf268f3 100644
--- a/Makefile
+++ b/Makefile
@@ -76,6 +76,13 @@ ifneq ($(GOOS)/$(GOARCH),linux/arm)
endif
endif
+lint-fix: $(linter)
+ifneq ($(GOOS)/$(GOARCH),linux/arm64)
+ifneq ($(GOOS)/$(GOARCH),linux/arm)
+ $(linter) --deadline $(LINTER_DEADLINE) run --fix $(linter_flags)
+endif
+endif
+
lint-and-log: $(linter)
$(linter) --deadline $(LINTER_DEADLINE) run $(linter_flags) | tee .linterr.txt
diff --git a/cli/app.go b/cli/app.go
index b07cad4f2..fd150b92e 100644
--- a/cli/app.go
+++ b/cli/app.go
@@ -30,7 +30,7 @@
var tracer = otel.Tracer("cli")
-// nolint:gochecknoglobals
+//nolint:gochecknoglobals
var (
defaultColor = color.New()
warningColor = color.New(color.FgYellow)
@@ -164,7 +164,7 @@ type App struct {
stdinReader io.Reader
stdoutWriter io.Writer
stderrWriter io.Writer
- rootctx context.Context // nolint:containedctx
+ rootctx context.Context //nolint:containedctx
loggerFactory logging.LoggerFactory
simulatedCtrlC chan bool
envNamePrefix string
@@ -429,7 +429,7 @@ func assertDirectRepository(act func(ctx context.Context, rep repo.DirectReposit
func (c *App) directRepositoryWriteAction(act func(ctx context.Context, rep repo.DirectRepositoryWriter) error) func(ctx *kingpin.ParseContext) error {
return c.maybeRepositoryAction(assertDirectRepository(func(ctx context.Context, rep repo.DirectRepository) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return repo.DirectWriteSession(ctx, rep, repo.WriteSessionOptions{
Purpose: "cli:" + c.currentActionName(),
OnUpload: c.progress.UploadedBytes,
@@ -460,7 +460,7 @@ func (c *App) repositoryReaderAction(act func(ctx context.Context, rep repo.Repo
func (c *App) repositoryWriterAction(act func(ctx context.Context, rep repo.RepositoryWriter) error) func(ctx *kingpin.ParseContext) error {
return c.maybeRepositoryAction(func(ctx context.Context, rep repo.Repository) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return repo.WriteSession(ctx, rep, repo.WriteSessionOptions{
Purpose: "cli:" + c.currentActionName(),
OnUpload: c.progress.UploadedBytes,
@@ -575,7 +575,7 @@ func (c *App) maybeRunMaintenance(ctx context.Context, rep repo.Repository) erro
Purpose: "maybeRunMaintenance",
OnUpload: c.progress.UploadedBytes,
}, func(ctx context.Context, w repo.DirectRepositoryWriter) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return snapshotmaintenance.Run(ctx, w, maintenance.ModeAuto, false, maintenance.SafetyFull)
})
diff --git a/cli/auto_upgrade.go b/cli/auto_upgrade.go
index d217ee2a1..11f684b5a 100644
--- a/cli/auto_upgrade.go
+++ b/cli/auto_upgrade.go
@@ -27,7 +27,7 @@ func maybeAutoUpgradeRepository(ctx context.Context, r repo.Repository) error {
log(ctx).Debugf("Setting default maintenance parameters...")
- // nolint:wrapcheck
+ //nolint:wrapcheck
return repo.DirectWriteSession(ctx, dr, repo.WriteSessionOptions{
Purpose: "setDefaultMaintenanceParameters",
}, func(ctx context.Context, w repo.DirectRepositoryWriter) error {
diff --git a/cli/cli_progress.go b/cli/cli_progress.go
index 4afb17640..606c3b555 100644
--- a/cli/cli_progress.go
+++ b/cli/cli_progress.go
@@ -169,7 +169,7 @@ func (p *cliProgress) output(col *color.Color, msg string) {
prefix = ""
}
- col.Fprintf(p.out.stderr(), "%v%v", prefix, msg) // nolint:errcheck
+ col.Fprintf(p.out.stderr(), "%v%v", prefix, msg) //nolint:errcheck
}
if !p.enableProgress {
diff --git a/cli/command_acl_add.go b/cli/command_acl_add.go
index e2e150683..a7048ffb8 100644
--- a/cli/command_acl_add.go
+++ b/cli/command_acl_add.go
@@ -28,7 +28,7 @@ func (c *commandACLAdd) run(ctx context.Context, rep repo.RepositoryWriter) erro
r := acl.TargetRule{}
for _, v := range strings.Split(c.target, ",") {
- parts := strings.SplitN(v, "=", 2) // nolint:gomnd
+ parts := strings.SplitN(v, "=", 2) //nolint:gomnd
if len(parts) != 2 { //nolint:gomnd
return errors.Errorf("invalid target labels %q, must be key=value", v)
}
diff --git a/cli/command_benchmark_compression.go b/cli/command_benchmark_compression.go
index 2ba751aad..7aecc8cd4 100644
--- a/cli/command_benchmark_compression.go
+++ b/cli/command_benchmark_compression.go
@@ -51,7 +51,7 @@ func (c *commandBenchmarkCompression) readInputFile(ctx context.Context) ([]byte
return nil, errors.Wrap(err, "error opening input file")
}
- defer f.Close() // nolint:errcheck,gosec
+ defer f.Close() //nolint:errcheck,gosec
st, err := f.Stat()
if err != nil {
diff --git a/cli/command_benchmark_crypto.go b/cli/command_benchmark_crypto.go
index a476203b0..4e611779d 100644
--- a/cli/command_benchmark_crypto.go
+++ b/cli/command_benchmark_crypto.go
@@ -70,8 +70,8 @@ func (c *commandBenchmarkCrypto) runBenchmark(ctx context.Context) []cryptoBench
fo := &format.ContentFormat{
Encryption: ea,
Hash: ha,
- MasterKey: make([]byte, 32), // nolint:gomnd
- HMACSecret: make([]byte, 32), // nolint:gomnd
+ MasterKey: make([]byte, 32), //nolint:gomnd
+ HMACSecret: make([]byte, 32), //nolint:gomnd
}
hf, err := hashing.CreateHashFunc(fo)
diff --git a/cli/command_benchmark_encryption.go b/cli/command_benchmark_encryption.go
index 8def87d46..83a8c4e9c 100644
--- a/cli/command_benchmark_encryption.go
+++ b/cli/command_benchmark_encryption.go
@@ -69,8 +69,8 @@ func (c *commandBenchmarkEncryption) runBenchmark(ctx context.Context) []cryptoB
enc, err := encryption.CreateEncryptor(&format.ContentFormat{
Encryption: ea,
Hash: hashing.DefaultAlgorithm,
- MasterKey: make([]byte, 32), // nolint:gomnd
- HMACSecret: make([]byte, 32), // nolint:gomnd
+ MasterKey: make([]byte, 32), //nolint:gomnd
+ HMACSecret: make([]byte, 32), //nolint:gomnd
})
if err != nil {
continue
diff --git a/cli/command_benchmark_hashing.go b/cli/command_benchmark_hashing.go
index 0645b050d..2439ca0b2 100644
--- a/cli/command_benchmark_hashing.go
+++ b/cli/command_benchmark_hashing.go
@@ -65,7 +65,7 @@ func (c *commandBenchmarkHashing) runBenchmark(ctx context.Context) []cryptoBenc
for _, ha := range hashing.SupportedAlgorithms() {
hf, err := hashing.CreateHashFunc(&format.ContentFormat{
Hash: ha,
- HMACSecret: make([]byte, 32), // nolint:gomnd
+ HMACSecret: make([]byte, 32), //nolint:gomnd
})
if err != nil {
continue
diff --git a/cli/command_blob_list.go b/cli/command_blob_list.go
index 0e460fcd1..2b2fc6d10 100644
--- a/cli/command_blob_list.go
+++ b/cli/command_blob_list.go
@@ -39,7 +39,7 @@ func (c *commandBlobList) run(ctx context.Context, rep repo.DirectRepository) er
jl.begin(&c.jo)
defer jl.end()
- // nolint:wrapcheck
+ //nolint:wrapcheck
return rep.BlobReader().ListBlobs(ctx, blob.ID(c.blobListPrefix), func(b blob.Metadata) error {
if !c.shouldInclude(b) {
return nil
diff --git a/cli/command_blob_shards_modify.go b/cli/command_blob_shards_modify.go
index 9b18d7b9c..bf9651519 100644
--- a/cli/command_blob_shards_modify.go
+++ b/cli/command_blob_shards_modify.go
@@ -43,7 +43,7 @@ func (c *commandBlobShardsModify) setup(svc appServices, parent commandParent) {
}
func (c *commandBlobShardsModify) getParameters(dotShardsFile string) (*sharded.Parameters, error) {
- // nolint:gosec
+ //nolint:gosec
f, err := os.Open(dotShardsFile)
if err != nil {
return nil, errors.Wrap(err, "unable to open shards file")
@@ -192,7 +192,7 @@ func (c *commandBlobShardsModify) removeEmptyDirs(ctx context.Context, dir strin
isEmpty := true
for _, ent := range entries {
- // nolint:nestif
+ //nolint:nestif
if ent.IsDir() {
childPath := path.Join(dir, ent.Name())
@@ -231,7 +231,7 @@ func (c *commandBlobShardsModify) renameBlobs(ctx context.Context, dir, prefix s
}
for _, ent := range entries {
- // nolint:nestif
+ //nolint:nestif
if ent.IsDir() {
if err := c.renameBlobs(ctx, path.Join(dir, ent.Name()), prefix+ent.Name(), params, numMoved, numUnchanged); err != nil {
return err
@@ -253,7 +253,7 @@ func (c *commandBlobShardsModify) renameBlobs(ctx context.Context, dir, prefix s
if !c.dryRun {
err := os.Rename(srcFile, destFile)
if os.IsNotExist(err) {
- // nolint:gomnd
+ //nolint:gomnd
if err2 := os.MkdirAll(destDir, 0o700); err2 != nil {
return errors.Wrap(err2, "error creating directory")
}
diff --git a/cli/command_blob_stats.go b/cli/command_blob_stats.go
index 63c25d44c..c9b7bf581 100644
--- a/cli/command_blob_stats.go
+++ b/cli/command_blob_stats.go
@@ -65,7 +65,7 @@ func(b blob.Metadata) error {
sizeToString := units.BytesStringBase10
if c.raw {
sizeToString = func(l int64) string {
- // nolint:gomnd
+ //nolint:gomnd
return strconv.FormatInt(l, 10)
}
}
diff --git a/cli/command_cache_clear.go b/cli/command_cache_clear.go
index ddc71a44c..b9c55b40c 100644
--- a/cli/command_cache_clear.go
+++ b/cli/command_cache_clear.go
@@ -53,7 +53,7 @@ func clearCacheDirectory(ctx context.Context, d string) error {
log(ctx).Infof("Clearing cache directory: %v.", d)
err := retry.WithExponentialBackoffNoValue(ctx, "delete cache", func() error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return os.RemoveAll(d)
}, retry.Always)
if err != nil {
diff --git a/cli/command_cache_set.go b/cli/command_cache_set.go
index 47403e7a4..ab507b410 100644
--- a/cli/command_cache_set.go
+++ b/cli/command_cache_set.go
@@ -98,6 +98,6 @@ func (c *commandCacheSetParams) run(ctx context.Context, rep repo.RepositoryWrit
return errors.Errorf("no changes")
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return repo.SetCachingOptions(ctx, c.svc.repositoryConfigFileName(), opts)
}
diff --git a/cli/command_cache_sync.go b/cli/command_cache_sync.go
index f5cfd1d4a..06cd3b595 100644
--- a/cli/command_cache_sync.go
+++ b/cli/command_cache_sync.go
@@ -43,7 +43,7 @@ func (c *commandCacheSync) run(ctx context.Context, rep repo.DirectRepositoryWri
eg.Go(func() error {
defer close(ch)
- // nolint:wrapcheck
+ //nolint:wrapcheck
return rep.BlobReader().ListBlobs(ctx, content.PackBlobIDPrefixSpecial, func(bm blob.Metadata) error {
ch <- bm.BlobID
diff --git a/cli/command_content_rewrite.go b/cli/command_content_rewrite.go
index bf217795e..456565ee4 100644
--- a/cli/command_content_rewrite.go
+++ b/cli/command_content_rewrite.go
@@ -48,7 +48,7 @@ func (c *commandContentRewrite) runContentRewriteCommand(ctx context.Context, re
return err
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return maintenance.RewriteContents(ctx, rep, &maintenance.RewriteContentsOptions{
ContentIDRange: c.contentRange.contentIDRange(),
ContentIDs: contentIDs,
diff --git a/cli/command_content_stats.go b/cli/command_content_stats.go
index a089c6f61..6813322ef 100644
--- a/cli/command_content_stats.go
+++ b/cli/command_content_stats.go
@@ -51,7 +51,7 @@ func (c *commandContentStats) run(ctx context.Context, rep repo.DirectRepository
sizeToString := units.BytesStringBase10
if c.raw {
sizeToString = func(l int64) string {
- return strconv.FormatInt(l, 10) // nolint:gomnd
+ return strconv.FormatInt(l, 10) //nolint:gomnd
}
}
@@ -153,6 +153,6 @@ func(b content.Info) error {
return nil
})
- // nolint:wrapcheck
+ //nolint:wrapcheck
return grandTotal, byCompressionTotal, countMap, totalSizeOfContentsUnder, err
}
diff --git a/cli/command_content_verify.go b/cli/command_content_verify.go
index d445d241b..79e8cefed 100644
--- a/cli/command_content_verify.go
+++ b/cli/command_content_verify.go
@@ -155,7 +155,7 @@ func (c *commandContentVerify) contentVerify(ctx context.Context, r content.Read
return errors.Errorf("content %v out of bounds of its pack blob %v", ci.GetContentID(), ci.GetPackBlobID())
}
- // nolint:gosec
+ //nolint:gosec
if 100*rand.Float64() < downloadPercent {
if _, err := r.GetContent(ctx, ci.GetContentID()); err != nil {
return errors.Wrapf(err, "content %v is invalid", ci.GetContentID())
diff --git a/cli/command_index_inspect.go b/cli/command_index_inspect.go
index 7c753feb2..d878b8275 100644
--- a/cli/command_index_inspect.go
+++ b/cli/command_index_inspect.go
@@ -102,7 +102,7 @@ func (c *commandIndexInspect) inspectAllBlobs(ctx context.Context, rep repo.Dire
})
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return eg.Wait()
}
diff --git a/cli/command_index_optimize.go b/cli/command_index_optimize.go
index 8399e835a..b65b761d2 100644
--- a/cli/command_index_optimize.go
+++ b/cli/command_index_optimize.go
@@ -46,6 +46,6 @@ func (c *commandIndexOptimize) runOptimizeCommand(ctx context.Context, rep repo.
opt.DropDeletedBefore = rep.Time().Add(-age)
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return rep.ContentManager().CompactIndexes(ctx, opt)
}
diff --git a/cli/command_index_recover.go b/cli/command_index_recover.go
index 16ec4b27a..6fb99186c 100644
--- a/cli/command_index_recover.go
+++ b/cli/command_index_recover.go
@@ -105,7 +105,7 @@ func (c *commandIndexRecover) recoverIndexesFromAllPacks(ctx context.Context, re
go func() {
for _, prefix := range prefixes {
- // nolint:errcheck
+ //nolint:errcheck
rep.BlobStorage().ListBlobs(ctx, prefix, func(bm blob.Metadata) error {
atomic.AddInt32(discoveringBlobCount, 1)
return nil
diff --git a/cli/command_logs_session.go b/cli/command_logs_session.go
index 7a62f6287..88361898c 100644
--- a/cli/command_logs_session.go
+++ b/cli/command_logs_session.go
@@ -73,7 +73,7 @@ func getLogSessions(ctx context.Context, st blob.Reader) ([]*logSessionInfo, err
if err := st.ListBlobs(ctx, content.TextLogBlobPrefix, func(bm blob.Metadata) error {
parts := strings.Split(string(bm.BlobID), "_")
- // nolint:gomnd
+ //nolint:gomnd
if len(parts) < 8 {
log(ctx).Errorf("invalid part count: %v skipping unrecognized log: %v", len(parts), bm.BlobID)
return nil
@@ -81,21 +81,21 @@ func getLogSessions(ctx context.Context, st blob.Reader) ([]*logSessionInfo, err
id := parts[2] + "_" + parts[3]
- // nolint:gomnd
+ //nolint:gomnd
startTime, err := strconv.ParseInt(parts[4], 10, 64)
if err != nil {
log(ctx).Errorf("invalid start time - skipping unrecognized log: %v", bm.BlobID)
- // nolint:nilerr
+ //nolint:nilerr
return nil
}
- // nolint:gomnd
+ //nolint:gomnd
endTime, err := strconv.ParseInt(parts[5], 10, 64)
if err != nil {
log(ctx).Errorf("invalid end time - skipping unrecognized log: %v", bm.BlobID)
- // nolint:nilerr
+ //nolint:nilerr
return nil
}
diff --git a/cli/command_ls.go b/cli/command_ls.go
index c650015e6..7c4e53a77 100644
--- a/cli/command_ls.go
+++ b/cli/command_ls.go
@@ -57,7 +57,7 @@ func (c *commandList) listDirectory(ctx context.Context, d fs.Directory, prefix,
if err := d.IterateEntries(ctx, func(innerCtx context.Context, e fs.Entry) error {
return c.printDirectoryEntry(innerCtx, e, prefix, indent)
}); err != nil {
- return err // nolint:wrapcheck
+ return err //nolint:wrapcheck
}
if dws, ok := d.(fs.DirectoryWithSummary); ok && c.errorSummary {
diff --git a/cli/command_maintenance_run.go b/cli/command_maintenance_run.go
index a01c5e4d0..85fa91281 100644
--- a/cli/command_maintenance_run.go
+++ b/cli/command_maintenance_run.go
@@ -37,6 +37,6 @@ func (c *commandMaintenanceRun) run(ctx context.Context, rep repo.DirectReposito
mode = maintenance.ModeFull
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return snapshotmaintenance.Run(ctx, rep, mode, c.maintenanceRunForce, c.safety)
}
diff --git a/cli/command_maintenance_set.go b/cli/command_maintenance_set.go
index e80fc3f93..463a43c59 100644
--- a/cli/command_maintenance_set.go
+++ b/cli/command_maintenance_set.go
@@ -76,7 +76,7 @@ func (c *commandMaintenanceSet) setLogCleanupParametersFromFlags(ctx context.Con
if v := c.maxTotalRetainedLogSizeMB; v != -1 {
cl := p.LogRetention.OrDefault()
- cl.MaxTotalSize = v << 20 // nolint:gomnd
+ cl.MaxTotalSize = v << 20 //nolint:gomnd
p.LogRetention = cl
*changed = true
diff --git a/cli/command_mount.go b/cli/command_mount.go
index 9fde7eee7..d6e236623 100644
--- a/cli/command_mount.go
+++ b/cli/command_mount.go
@@ -68,11 +68,11 @@ func (c *commandMount) run(ctx context.Context, rep repo.Repository) error {
}
if c.mountTraceFS {
- // nolint:forcetypeassert
+ //nolint:forcetypeassert
entry = loggingfs.Wrap(entry, log(ctx).Debugf).(fs.Directory)
}
- // nolint:forcetypeassert
+ //nolint:forcetypeassert
entry = cachefs.Wrap(entry, c.newFSCache()).(fs.Directory)
ctrl, mountErr := mount.Directory(ctx, entry, c.mountPoint,
diff --git a/cli/command_policy_edit.go b/cli/command_policy_edit.go
index 94349a7b1..80b11c967 100644
--- a/cli/command_policy_edit.go
+++ b/cli/command_policy_edit.go
@@ -88,7 +88,7 @@ func (c *commandPolicyEdit) run(ctx context.Context, rep repo.RepositoryWriter)
updated = &policy.Policy{}
d := json.NewDecoder(bytes.NewBufferString(edited))
d.DisallowUnknownFields()
- // nolint:wrapcheck
+ //nolint:wrapcheck
return d.Decode(updated)
}); err != nil {
return errors.Wrap(err, "unable to launch editor")
diff --git a/cli/command_policy_set.go b/cli/command_policy_set.go
index 9671fd7cf..0b0fd8f33 100644
--- a/cli/command_policy_set.go
+++ b/cli/command_policy_set.go
@@ -44,7 +44,7 @@ func (c *commandPolicySet) setup(svc appServices, parent commandParent) {
cmd.Action(svc.repositoryWriterAction(c.run))
}
-// nolint:gochecknoglobals
+//nolint:gochecknoglobals
var booleanEnumValues = []string{"true", "false", "inherit"}
const (
@@ -187,7 +187,7 @@ func applyOptionalInt(ctx context.Context, desc string, val **policy.OptionalInt
return nil
}
- // nolint:gomnd
+ //nolint:gomnd
v, err := strconv.ParseInt(str, 10, 32)
if err != nil {
return errors.Wrapf(err, "can't parse the %v %q", desc, str)
@@ -218,14 +218,14 @@ func applyOptionalInt64MiB(ctx context.Context, desc string, val **policy.Option
return nil
}
- // nolint:gomnd
+ //nolint:gomnd
v, err := strconv.ParseInt(str, 10, 32)
if err != nil {
return errors.Wrapf(err, "can't parse the %v %q", desc, str)
}
// convert MiB to bytes
- v *= 1 << 20 // nolint:gomnd
+ v *= 1 << 20 //nolint:gomnd
i := policy.OptionalInt64(v)
*changeCount++
@@ -253,7 +253,7 @@ func applyPolicyNumber64(ctx context.Context, desc string, val *int64, str strin
return nil
}
- // nolint:gomnd
+ //nolint:gomnd
v, err := strconv.ParseInt(str, 10, 64)
if err != nil {
return errors.Wrapf(err, "can't parse the %q %q", desc, str)
diff --git a/cli/command_repository_connect.go b/cli/command_repository_connect.go
index 5cbde067f..36e4503b8 100644
--- a/cli/command_repository_connect.go
+++ b/cli/command_repository_connect.go
@@ -31,14 +31,14 @@ func (c *commandRepositoryConnect) setup(svc advancedAppServices, parent command
cc := cmd.Command(prov.Name, "Connect to repository in "+prov.Description)
f.Setup(svc, cc)
cc.Action(func(kpc *kingpin.ParseContext) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return svc.runAppWithContext(kpc.SelectedCommand, func(ctx context.Context) error {
st, err := f.Connect(ctx, false, 0)
if err != nil {
return errors.Wrap(err, "can't connect to storage")
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return svc.runConnectCommandWithStorage(ctx, &c.co, st)
})
})
diff --git a/cli/command_repository_connect_from_config.go b/cli/command_repository_connect_from_config.go
index 1c83ea317..b91448e9a 100644
--- a/cli/command_repository_connect_from_config.go
+++ b/cli/command_repository_connect_from_config.go
@@ -50,7 +50,7 @@ func (c *storageFromConfigFlags) connectToStorageFromConfigFile(ctx context.Cont
return nil, errors.Errorf("connection file does not specify blob storage connection parameters, kopia server connections are not supported")
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return blob.NewStorage(ctx, *cfg.Storage, false)
}
@@ -64,6 +64,6 @@ func (c *storageFromConfigFlags) connectToStorageFromConfigToken(ctx context.Con
c.sps.setPasswordFromToken(pass)
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return blob.NewStorage(ctx, ci, false)
}
diff --git a/cli/command_repository_create.go b/cli/command_repository_create.go
index e03baf31e..a743c055c 100644
--- a/cli/command_repository_create.go
+++ b/cli/command_repository_create.go
@@ -61,7 +61,7 @@ func (c *commandRepositoryCreate) setup(svc advancedAppServices, parent commandP
cc := cmd.Command(prov.Name, "Create repository in "+prov.Description)
f.Setup(svc, cc)
cc.Action(func(kpc *kingpin.ParseContext) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return svc.runAppWithContext(kpc.SelectedCommand, func(ctx context.Context) error {
st, err := f.Connect(ctx, true, c.createFormatVersion)
if err != nil {
@@ -146,7 +146,7 @@ func (c *commandRepositoryCreate) runCreateCommandWithStorage(ctx context.Contex
return errors.Wrap(err, "error populating repository")
}
- noteColor.Fprintf(c.out.stdout(), runValidationNote) // nolint:errcheck
+ noteColor.Fprintf(c.out.stdout(), runValidationNote) //nolint:errcheck
return nil
}
@@ -158,7 +158,7 @@ func (c *commandRepositoryCreate) populateRepository(ctx context.Context, passwo
}
defer rep.Close(ctx) //nolint:errcheck
- // nolint:wrapcheck
+ //nolint:wrapcheck
return repo.WriteSession(ctx, rep, repo.WriteSessionOptions{
Purpose: "populate repository",
}, func(ctx context.Context, w repo.RepositoryWriter) error {
diff --git a/cli/command_repository_repair.go b/cli/command_repository_repair.go
index 93175cc0b..1c30b5b3b 100644
--- a/cli/command_repository_repair.go
+++ b/cli/command_repository_repair.go
@@ -30,7 +30,7 @@ func (c *commandRepositoryRepair) setup(svc advancedAppServices, parent commandP
cc := cmd.Command(prov.Name, "Repair repository in "+prov.Description)
f.Setup(svc, cc)
cc.Action(func(kpc *kingpin.ParseContext) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return svc.runAppWithContext(kpc.SelectedCommand, func(ctx context.Context) error {
st, err := f.Connect(ctx, false, 0)
if err != nil {
diff --git a/cli/command_repository_set_client.go b/cli/command_repository_set_client.go
index dd7c3f3ec..94dce0b46 100644
--- a/cli/command_repository_set_client.go
+++ b/cli/command_repository_set_client.go
@@ -103,6 +103,6 @@ func (c *commandRepositorySetClient) run(ctx context.Context, rep repo.Repositor
return errors.Errorf("no changes")
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return repo.SetClientOptions(ctx, c.svc.repositoryConfigFileName(), opt)
}
diff --git a/cli/command_repository_set_parameters.go b/cli/command_repository_set_parameters.go
index 99d49a0fa..6bbafed33 100644
--- a/cli/command_repository_set_parameters.go
+++ b/cli/command_repository_set_parameters.go
@@ -75,7 +75,7 @@ func (c *commandRepositorySetParameters) setSizeMBParameter(ctx context.Context,
*dst = v << 20 //nolint:gomnd
*anyChange = true
- log(ctx).Infof(" - setting %v to %v.\n", desc, units.BytesStringBase2(int64(v)<<20)) // nolint:gomnd
+ log(ctx).Infof(" - setting %v to %v.\n", desc, units.BytesStringBase2(int64(v)<<20)) //nolint:gomnd
}
func (c *commandRepositorySetParameters) setInt64SizeMBParameter(ctx context.Context, v int64, desc string, dst *int64, anyChange *bool) {
@@ -86,7 +86,7 @@ func (c *commandRepositorySetParameters) setInt64SizeMBParameter(ctx context.Con
*dst = v << 20 //nolint:gomnd
*anyChange = true
- log(ctx).Infof(" - setting %v to %v.\n", desc, units.BytesStringBase2(v<<20)) // nolint:gomnd
+ log(ctx).Infof(" - setting %v to %v.\n", desc, units.BytesStringBase2(v<<20)) //nolint:gomnd
}
func (c *commandRepositorySetParameters) setIntParameter(ctx context.Context, v int, desc string, dst *int, anyChange *bool) {
diff --git a/cli/command_repository_status.go b/cli/command_repository_status.go
index b1f55bb67..1b8cd2be2 100644
--- a/cli/command_repository_status.go
+++ b/cli/command_repository_status.go
@@ -64,8 +64,8 @@ func (c *commandRepositoryStatus) outputJSON(ctx context.Context, r repo.Reposit
s.UniqueIDHex = hex.EncodeToString(dr.UniqueID())
s.ObjectFormat = dr.ObjectFormat()
s.BlobRetention = dr.BlobCfg()
- s.Storage = scrubber.ScrubSensitiveData(reflect.ValueOf(ci)).Interface().(blob.ConnectionInfo) // nolint:forcetypeassert
- s.ContentFormat = scrubber.ScrubSensitiveData(reflect.ValueOf(dr.ContentReader().ContentFormat().Struct())).Interface().(format.ContentFormat) // nolint:forcetypeassert
+ s.Storage = scrubber.ScrubSensitiveData(reflect.ValueOf(ci)).Interface().(blob.ConnectionInfo) //nolint:forcetypeassert
+ s.ContentFormat = scrubber.ScrubSensitiveData(reflect.ValueOf(dr.ContentReader().ContentFormat().Struct())).Interface().(format.ContentFormat) //nolint:forcetypeassert
switch cp, err := dr.BlobVolume().GetCapacity(ctx); {
case err == nil:
@@ -127,7 +127,7 @@ func (c *commandRepositoryStatus) dumpRetentionStatus(dr repo.DirectRepository)
}
}
-// nolint: funlen,gocyclo
+//nolint:funlen,gocyclo
func (c *commandRepositoryStatus) run(ctx context.Context, rep repo.Repository) error {
if c.jo.jsonOutput {
return c.outputJSON(ctx, rep)
diff --git a/cli/command_repository_sync.go b/cli/command_repository_sync.go
index a33b395ac..2d52ae489 100644
--- a/cli/command_repository_sync.go
+++ b/cli/command_repository_sync.go
@@ -57,7 +57,7 @@ func (c *commandRepositorySyncTo) setup(svc advancedAppServices, parent commandP
cc := cmd.Command(prov.Name, "Synchronize repository data to another repository in "+prov.Description)
f.Setup(svc, cc)
cc.Action(func(kpc *kingpin.ParseContext) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return svc.runAppWithContext(kpc.SelectedCommand, func(ctx context.Context) error {
st, err := f.Connect(ctx, false, 0)
if err != nil {
@@ -69,7 +69,7 @@ func (c *commandRepositorySyncTo) setup(svc advancedAppServices, parent commandP
return errors.Wrap(err, "open repository")
}
- defer rep.Close(ctx) // nolint:errcheck
+ defer rep.Close(ctx) //nolint:errcheck
dr, ok := rep.(repo.DirectRepository)
if !ok {
diff --git a/cli/command_repository_upgrade.go b/cli/command_repository_upgrade.go
index b8dae36ae..cf5111b53 100644
--- a/cli/command_repository_upgrade.go
+++ b/cli/command_repository_upgrade.go
@@ -42,7 +42,7 @@ type commandRepositoryUpgrade struct {
// own constants so that they do not have to wait for the default clock-drift to
// settle.
//
-// nolint:gochecknoglobals
+//nolint:gochecknoglobals
var MaxPermittedClockDrift = func() time.Duration { return maxPermittedClockDrift }
func (c *commandRepositoryUpgrade) setup(svc advancedAppServices, parent commandParent) {
diff --git a/cli/command_server_flush.go b/cli/command_server_flush.go
index 9767b234a..4479ca1b7 100644
--- a/cli/command_server_flush.go
+++ b/cli/command_server_flush.go
@@ -18,6 +18,6 @@ func (c *commandServerFlush) setup(svc appServices, parent commandParent) {
}
func (c *commandServerFlush) run(ctx context.Context, cli *apiclient.KopiaAPIClient) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return cli.Post(ctx, "control/flush", &serverapi.Empty{}, &serverapi.Empty{})
}
diff --git a/cli/command_server_refresh.go b/cli/command_server_refresh.go
index bc719d876..b49d69c8e 100644
--- a/cli/command_server_refresh.go
+++ b/cli/command_server_refresh.go
@@ -18,6 +18,6 @@ func (c *commandServerRefresh) setup(svc appServices, parent commandParent) {
}
func (c *commandServerRefresh) run(ctx context.Context, cli *apiclient.KopiaAPIClient) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return cli.Post(ctx, "control/refresh", &serverapi.Empty{}, &serverapi.Empty{})
}
diff --git a/cli/command_server_shutdown.go b/cli/command_server_shutdown.go
index 59e86cc23..156cfab5d 100644
--- a/cli/command_server_shutdown.go
+++ b/cli/command_server_shutdown.go
@@ -21,6 +21,6 @@ func (c *commandServerShutdown) setup(svc appServices, parent commandParent) {
}
func (c *commandServerShutdown) run(ctx context.Context, cli *apiclient.KopiaAPIClient) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return cli.Post(ctx, "control/shutdown", &serverapi.Empty{}, &serverapi.Empty{})
}
diff --git a/cli/command_server_start.go b/cli/command_server_start.go
index b253d4c89..97621c427 100644
--- a/cli/command_server_start.go
+++ b/cli/command_server_start.go
@@ -152,7 +152,7 @@ func (c *commandServerStart) serverStartOptions(ctx context.Context) (*server.Op
func (c *commandServerStart) initRepositoryPossiblyAsync(ctx context.Context, srv *server.Server) error {
initialize := func(ctx context.Context) (repo.Repository, error) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return c.svc.openRepository(ctx, false)
}
@@ -184,7 +184,7 @@ func (c *commandServerStart) run(ctx context.Context) error {
}
httpServer := &http.Server{
- ReadHeaderTimeout: 15 * time.Second, // nolint:gomnd
+ ReadHeaderTimeout: 15 * time.Second, //nolint:gomnd
Addr: stripProtocol(c.sf.serverAddress),
BaseContext: func(l net.Listener) context.Context {
return ctx
diff --git a/cli/command_snapshot_create.go b/cli/command_snapshot_create.go
index 4620b7f14..74e3156ea 100644
--- a/cli/command_snapshot_create.go
+++ b/cli/command_snapshot_create.go
@@ -82,7 +82,7 @@ func (c *commandSnapshotCreate) setup(svc appServices, parent commandParent) {
cmd.Action(svc.repositoryWriterAction(c.run))
}
-// nolint:gocyclo
+//nolint:gocyclo
func (c *commandSnapshotCreate) run(ctx context.Context, rep repo.RepositoryWriter) error {
sources := c.snapshotCreateSources
@@ -249,7 +249,7 @@ func parseTimestamp(timestamp string) (time.Time, error) {
return time.Time{}, nil
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return time.Parse(timeFormat, timestamp)
}
@@ -259,7 +259,7 @@ func startTimeAfterEndTime(startTime, endTime time.Time) bool {
startTime.After(endTime)
}
-// nolint:gocyclo
+//nolint:gocyclo
func (c *commandSnapshotCreate) snapshotSingleSource(ctx context.Context, rep repo.RepositoryWriter, u *snapshotfs.Uploader, sourceInfo snapshot.SourceInfo, tags map[string]string) error {
log(ctx).Infof("Snapshotting %v ...", sourceInfo)
diff --git a/cli/command_snapshot_expire.go b/cli/command_snapshot_expire.go
index 46cceb27c..103b68892 100644
--- a/cli/command_snapshot_expire.go
+++ b/cli/command_snapshot_expire.go
@@ -28,7 +28,7 @@ func (c *commandSnapshotExpire) setup(svc appServices, parent commandParent) {
func (c *commandSnapshotExpire) getSnapshotSourcesToExpire(ctx context.Context, rep repo.Repository) ([]snapshot.SourceInfo, error) {
if c.snapshotExpireAll {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return snapshot.ListSources(ctx, rep)
}
diff --git a/cli/command_snapshot_fix_test.go b/cli/command_snapshot_fix_test.go
index a8688872d..c7cf904f3 100644
--- a/cli/command_snapshot_fix_test.go
+++ b/cli/command_snapshot_fix_test.go
@@ -19,7 +19,7 @@
"github.com/kopia/kopia/tests/testenv"
)
-// nolint:maintidx
+//nolint:maintidx
func TestSnapshotFix(t *testing.T) {
srcDir1 := testutil.TempDirectory(t)
diff --git a/cli/command_snapshot_list.go b/cli/command_snapshot_list.go
index 811cd67e8..a4892cb56 100644
--- a/cli/command_snapshot_list.go
+++ b/cli/command_snapshot_list.go
@@ -239,7 +239,7 @@ type snapshotListRow struct {
func (c *commandSnapshotList) iterateSnapshotsMaybeWithStorageStats(ctx context.Context, rep repo.Repository, manifests []*snapshot.Manifest, callback func(m *snapshot.Manifest) error) error {
if c.storageStats {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return snapshotfs.CalculateStorageStats(ctx, rep, manifests, callback)
}
diff --git a/cli/command_snapshot_migrate.go b/cli/command_snapshot_migrate.go
index ad9170a41..0fdb24a99 100644
--- a/cli/command_snapshot_migrate.go
+++ b/cli/command_snapshot_migrate.go
@@ -50,7 +50,7 @@ func (c *commandSnapshotMigrate) run(ctx context.Context, destRepo repo.Reposito
return errors.Wrap(err, "can't open source repository")
}
- defer sourceRepo.Close(ctx) // nolint:errcheck
+ defer sourceRepo.Close(ctx) //nolint:errcheck
sources, err := c.getSourcesToMigrate(ctx, sourceRepo)
if err != nil {
@@ -323,7 +323,7 @@ func (c *commandSnapshotMigrate) getSourcesToMigrate(ctx context.Context, rep re
}
if c.migrateAll {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return snapshot.ListSources(ctx, rep)
}
diff --git a/cli/command_snapshot_verify.go b/cli/command_snapshot_verify.go
index d97201d26..e7830e351 100644
--- a/cli/command_snapshot_verify.go
+++ b/cli/command_snapshot_verify.go
@@ -70,7 +70,7 @@ func (c *commandSnapshotVerify) run(ctx context.Context, rep repo.Repository) er
v := snapshotfs.NewVerifier(ctx, rep, opts)
- // nolint:wrapcheck
+ //nolint:wrapcheck
return v.InParallel(ctx, func(tw *snapshotfs.TreeWalker) error {
manifests, err := c.loadSourceManifests(ctx, rep, c.verifyCommandSources)
if err != nil {
@@ -90,7 +90,7 @@ func (c *commandSnapshotVerify) run(ctx context.Context, rep repo.Repository) er
}
// ignore error now, return aggregate error at a higher level.
- // nolint:errcheck
+ //nolint:errcheck
tw.Process(ctx, root, rootPath)
}
@@ -101,7 +101,7 @@ func (c *commandSnapshotVerify) run(ctx context.Context, rep repo.Repository) er
}
// ignore error now, return aggregate error at a higher level.
- // nolint:errcheck
+ //nolint:errcheck
tw.Process(ctx, snapshotfs.DirectoryEntry(rep, oid, nil), oidStr)
}
@@ -112,7 +112,7 @@ func (c *commandSnapshotVerify) run(ctx context.Context, rep repo.Repository) er
}
// ignore error now, return aggregate error at a higher level.
- // nolint:errcheck
+ //nolint:errcheck
tw.Process(ctx, snapshotfs.AutoDetectEntryFromObjectID(ctx, rep, oid, oidStr), oidStr)
}
@@ -144,6 +144,6 @@ func (c *commandSnapshotVerify) loadSourceManifests(ctx context.Context, rep rep
}
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return snapshot.LoadSnapshots(ctx, rep, manifestIDs)
}
diff --git a/cli/config.go b/cli/config.go
index 8e017c7cc..35b9d92e7 100644
--- a/cli/config.go
+++ b/cli/config.go
@@ -114,7 +114,7 @@ func resolveSymlink(path string) (string, error) {
return path, nil
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return filepath.EvalSymlinks(path)
}
diff --git a/cli/observability_flags.go b/cli/observability_flags.go
index 372e3c6cb..3f0c00267 100644
--- a/cli/observability_flags.go
+++ b/cli/observability_flags.go
@@ -24,7 +24,7 @@
"github.com/kopia/kopia/repo"
)
-// nolint:gochecknoglobals
+//nolint:gochecknoglobals
var metricsPushFormats = map[string]expfmt.Format{
"text": expfmt.FmtText,
"proto-text": expfmt.FmtProtoText,
@@ -92,7 +92,7 @@ func (c *observabilityFlags) startMetrics(ctx context.Context) error {
log(ctx).Infof("starting prometheus metrics on %v", c.metricsListenAddr)
- go http.ListenAndServe(c.metricsListenAddr, m) // nolint:errcheck
+ go http.ListenAndServe(c.metricsListenAddr, m) //nolint:errcheck
}
if c.metricsPushAddr != "" {
diff --git a/cli/storage_azure.go b/cli/storage_azure.go
index bec226022..fd6bc30b8 100644
--- a/cli/storage_azure.go
+++ b/cli/storage_azure.go
@@ -25,6 +25,6 @@ func (c *storageAzureFlags) Setup(svc StorageProviderServices, cmd *kingpin.CmdC
}
func (c *storageAzureFlags) Connect(ctx context.Context, isCreate bool, formatVersion int) (blob.Storage, error) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return azure.New(ctx, &c.azOptions)
}
diff --git a/cli/storage_b2.go b/cli/storage_b2.go
index bc4a2c014..f7d6ff07d 100644
--- a/cli/storage_b2.go
+++ b/cli/storage_b2.go
@@ -22,6 +22,6 @@ func (c *storageB2Flags) Setup(svc StorageProviderServices, cmd *kingpin.CmdClau
}
func (c *storageB2Flags) Connect(ctx context.Context, isCreate bool, formatVersion int) (blob.Storage, error) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return b2.New(ctx, &c.b2options)
}
diff --git a/cli/storage_filesystem.go b/cli/storage_filesystem.go
index 0f6d70aa6..10f23f876 100644
--- a/cli/storage_filesystem.go
+++ b/cli/storage_filesystem.go
@@ -50,12 +50,12 @@ func (c *storageFilesystemFlags) Connect(ctx context.Context, isCreate bool, for
}
if v := c.connectOwnerUID; v != "" {
- // nolint:gomnd
+ //nolint:gomnd
fso.FileUID = getIntPtrValue(v, 10)
}
if v := c.connectOwnerGID; v != "" {
- // nolint:gomnd
+ //nolint:gomnd
fso.FileGID = getIntPtrValue(v, 10)
}
@@ -63,7 +63,7 @@ func (c *storageFilesystemFlags) Connect(ctx context.Context, isCreate bool, for
fso.DirectoryMode = getFileModeValue(c.connectDirMode, defaultDirMode)
fso.DirectoryShards = initialDirectoryShards(c.connectFlat, formatVersion)
- // nolint:wrapcheck
+ //nolint:wrapcheck
return filesystem.New(ctx, &fso, isCreate)
}
@@ -82,7 +82,7 @@ func initialDirectoryShards(flat bool, formatVersion int) []int {
}
func getIntPtrValue(value string, base int) *int {
- // nolint:gomnd
+ //nolint:gomnd
if int64Val, err := strconv.ParseInt(value, base, 32); err == nil {
intVal := int(int64Val)
return &intVal
@@ -92,7 +92,7 @@ func getIntPtrValue(value string, base int) *int {
}
func getFileModeValue(value string, def os.FileMode) os.FileMode {
- // nolint:gomnd
+ //nolint:gomnd
if uint32Val, err := strconv.ParseUint(value, 8, 32); err == nil {
return os.FileMode(uint32Val)
}
diff --git a/cli/storage_gcs.go b/cli/storage_gcs.go
index 54f880524..b679194e5 100644
--- a/cli/storage_gcs.go
+++ b/cli/storage_gcs.go
@@ -39,6 +39,6 @@ func (c *storageGCSFlags) Connect(ctx context.Context, isCreate bool, formatVers
c.options.ServiceAccountCredentialsFile = ""
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return gcs.New(ctx, &c.options)
}
diff --git a/cli/storage_gdrive.go b/cli/storage_gdrive.go
index f9c8d4871..d6dddbff1 100644
--- a/cli/storage_gdrive.go
+++ b/cli/storage_gdrive.go
@@ -38,6 +38,6 @@ func (c *storageGDriveFlags) Connect(ctx context.Context, isCreate bool, formatV
c.options.ServiceAccountCredentialsFile = ""
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return gdrive.New(ctx, &c.options)
}
diff --git a/cli/storage_rclone.go b/cli/storage_rclone.go
index 27eaaebdb..58f860597 100644
--- a/cli/storage_rclone.go
+++ b/cli/storage_rclone.go
@@ -44,6 +44,6 @@ func (c *storageRcloneFlags) Connect(ctx context.Context, isCreate bool, formatV
c.opt.EmbeddedConfig = string(cfg)
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return rclone.New(ctx, &c.opt, isCreate)
}
diff --git a/cli/storage_s3.go b/cli/storage_s3.go
index c3a1e1680..ebe4026e6 100644
--- a/cli/storage_s3.go
+++ b/cli/storage_s3.go
@@ -51,6 +51,6 @@ func (c *storageS3Flags) Connect(ctx context.Context, isCreate bool, formatVersi
return nil, errors.New("Cannot specify a 'point-in-time' option when creating a repository")
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return s3.New(ctx, &c.s3options)
}
diff --git a/cli/storage_sftp.go b/cli/storage_sftp.go
index 971a4d4fb..0c85c74e4 100644
--- a/cli/storage_sftp.go
+++ b/cli/storage_sftp.go
@@ -48,7 +48,7 @@ func (c *storageSFTPFlags) Setup(_ StorageProviderServices, cmd *kingpin.CmdClau
func (c *storageSFTPFlags) getOptions(formatVersion int) (*sftp.Options, error) {
sftpo := c.options
- // nolint:nestif
+ //nolint:nestif
if !sftpo.ExternalSSH {
if c.embedCredentials {
if sftpo.KeyData == "" {
@@ -115,6 +115,6 @@ func (c *storageSFTPFlags) Connect(ctx context.Context, isCreate bool, formatVer
return nil, err
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return sftp.New(ctx, opt, isCreate)
}
diff --git a/cli/storage_webdav.go b/cli/storage_webdav.go
index 5996696c9..7b8d88fe0 100644
--- a/cli/storage_webdav.go
+++ b/cli/storage_webdav.go
@@ -40,6 +40,6 @@ func (c *storageWebDAVFlags) Connect(ctx context.Context, isCreate bool, formatV
wo.DirectoryShards = initialDirectoryShards(c.connectFlat, formatVersion)
- // nolint:wrapcheck
+ //nolint:wrapcheck
return webdav.New(ctx, &wo, isCreate)
}
diff --git a/cli/throttle_get.go b/cli/throttle_get.go
index 39d5d453c..57d198a8c 100644
--- a/cli/throttle_get.go
+++ b/cli/throttle_get.go
@@ -45,5 +45,5 @@ func (c *commonThrottleGet) printValueOrUnlimited(label string, v float64, conve
}
func (c *commonThrottleGet) floatToString(v float64) string {
- return strconv.FormatFloat(v, 'f', 0, 64) // nolint:gomnd
+ return strconv.FormatFloat(v, 'f', 0, 64) //nolint:gomnd
}
diff --git a/cli/throttle_set.go b/cli/throttle_set.go
index e148b0551..b17481ae9 100644
--- a/cli/throttle_set.go
+++ b/cli/throttle_set.go
@@ -79,7 +79,7 @@ func (c *commonThrottleSet) setThrottleFloat64(ctx context.Context, desc string,
return nil
}
- // nolint:gomnd
+ //nolint:gomnd
v, err := strconv.ParseFloat(str, 64)
if err != nil {
return errors.Wrapf(err, "can't parse the %v %q", desc, str)
@@ -114,7 +114,7 @@ func (c *commonThrottleSet) setThrottleInt(ctx context.Context, desc string, val
return nil
}
- // nolint:gomnd
+ //nolint:gomnd
v, err := strconv.ParseInt(str, 10, 64)
if err != nil {
return errors.Wrapf(err, "can't parse the %v %q", desc, str)
diff --git a/cli/update_check.go b/cli/update_check.go
index c237e01c1..508468f66 100644
--- a/cli/update_check.go
+++ b/cli/update_check.go
@@ -64,7 +64,7 @@ func (c *App) writeUpdateState(us *updateState) error {
}
func (c *App) removeUpdateState() {
- os.Remove(c.updateStateFilename()) // nolint:errcheck
+ os.Remove(c.updateStateFilename()) //nolint:errcheck
}
// getUpdateState reads the update state file if available.
@@ -107,7 +107,7 @@ func getLatestReleaseNameFromGitHub(ctx context.Context) (string, error) {
ctx, cancel := context.WithTimeout(ctx, githubTimeout)
defer cancel()
- req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf(latestReleaseGitHubURLFormat, repo.BuildGitHubRepo), http.NoBody)
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf(latestReleaseGitHubURLFormat, repo.BuildGitHubRepo), http.NoBody)
if err != nil {
return "", errors.Wrap(err, "unable to get latest release from github")
}
@@ -138,7 +138,7 @@ func verifyGitHubReleaseIsComplete(ctx context.Context, releaseName string) erro
ctx, cancel := context.WithTimeout(ctx, githubTimeout)
defer cancel()
- req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf(checksumsURLFormat, repo.BuildGitHubRepo, releaseName), http.NoBody)
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf(checksumsURLFormat, repo.BuildGitHubRepo, releaseName), http.NoBody)
if err != nil {
return errors.Wrap(err, "unable to download releases checksum")
}
diff --git a/fs/cachefs/cache.go b/fs/cachefs/cache.go
index c46b7c400..770233156 100644
--- a/fs/cachefs/cache.go
+++ b/fs/cachefs/cache.go
@@ -112,7 +112,7 @@ func(innerCtx context.Context) ([]fs.Entry, error) {
return nil
}
- return d.IterateEntries(ctx, callback) // nolint:wrapcheck
+ return d.IterateEntries(ctx, callback) //nolint:wrapcheck
}
func (c *Cache) getEntriesFromCacheLocked(ctx context.Context, id string) []fs.Entry {
@@ -200,7 +200,7 @@ type Options struct {
MaxCachedEntries int
}
-// nolint:gochecknoglobals
+//nolint:gochecknoglobals
var defaultOptions = &Options{
MaxCachedDirectories: 1000, //nolint:gomnd
MaxCachedEntries: 100000, //nolint:gomnd
diff --git a/fs/cachefs/cachefs.go b/fs/cachefs/cachefs.go
index b6045ce7b..46159d5ff 100644
--- a/fs/cachefs/cachefs.go
+++ b/fs/cachefs/cachefs.go
@@ -24,7 +24,7 @@ type directory struct {
func (d *directory) Child(ctx context.Context, name string) (fs.Entry, error) {
e, err := d.Directory.Child(ctx, name)
if err != nil {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return nil, err
}
@@ -41,7 +41,7 @@ func(e fs.Entry) fs.Entry {
callback,
)
- return err // nolint:wrapcheck
+ return err //nolint:wrapcheck
}
type file struct {
diff --git a/fs/entry.go b/fs/entry.go
index 459fe1cd4..4a6fda7a0 100644
--- a/fs/entry.go
+++ b/fs/entry.go
@@ -85,7 +85,7 @@ func GetAllEntries(ctx context.Context, d Directory) ([]Entry, error) {
return nil
})
- return entries, err // nolint:wrapcheck
+ return entries, err //nolint:wrapcheck
}
// ErrEntryNotFound is returned when an entry is not found.
diff --git a/fs/ignorefs/ignorefs.go b/fs/ignorefs/ignorefs.go
index 857448dfb..ec6dd30a3 100644
--- a/fs/ignorefs/ignorefs.go
+++ b/fs/ignorefs/ignorefs.go
@@ -140,7 +140,7 @@ func (d *ignoreDirectory) skipCacheDirectory(ctx context.Context, relativePath s
func (d *ignoreDirectory) DirEntryOrNil(ctx context.Context) (*snapshot.DirEntry, error) {
if defp, ok := d.Directory.(snapshot.HasDirEntryOrNil); ok {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return defp.DirEntryOrNil(ctx)
}
// Ignored directories do not have DirEntry objects.
@@ -157,7 +157,7 @@ func (d *ignoreDirectory) IterateEntries(ctx context.Context, callback func(ctx
return err
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return d.Directory.IterateEntries(ctx, func(ctx context.Context, e fs.Entry) error {
if wrapped, ok := d.maybeWrappedChildEntry(ctx, thisContext, e); ok {
return callback(ctx, wrapped)
@@ -194,7 +194,7 @@ func (d *ignoreDirectory) Child(ctx context.Context, name string) (fs.Entry, err
e, err := d.Directory.Child(ctx, name)
if err != nil {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return nil, err
}
diff --git a/fs/localfs/local_fs.go b/fs/localfs/local_fs.go
index c9259132a..2aa9ba86c 100644
--- a/fs/localfs/local_fs.go
+++ b/fs/localfs/local_fs.go
@@ -186,7 +186,7 @@ func (fsd *filesystemDirectory) IterateEntries(ctx context.Context, cb func(cont
return errors.Wrap(err, "error listing directory")
}
-// nolint:gocognit,gocyclo
+//nolint:gocognit,gocyclo
func (fsd *filesystemDirectory) iterateEntriesInParallel(ctx context.Context, f *os.File, childPrefix string, batch []os.DirEntry, cb func(context.Context, fs.Entry) error) error {
inputCh := make(chan os.DirEntry, dirListingPrefetch)
outputCh := make(chan entryWithError, dirListingPrefetch)
@@ -248,7 +248,7 @@ func (fsd *filesystemDirectory) iterateEntriesInParallel(ctx context.Context, f
nextBatch, err := f.ReadDir(numEntriesToRead)
if err != nil && !errors.Is(err, io.EOF) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return err
}
@@ -295,7 +295,7 @@ func (fsf *filesystemFile) Open(ctx context.Context) (fs.Reader, error) {
}
func (fsl *filesystemSymlink) Readlink(ctx context.Context) (string, error) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return os.Readlink(fsl.fullPath())
}
diff --git a/fs/localfs/local_fs_pool.go b/fs/localfs/local_fs_pool.go
index 4258801ff..06268129d 100644
--- a/fs/localfs/local_fs_pool.go
+++ b/fs/localfs/local_fs_pool.go
@@ -2,54 +2,54 @@
import "github.com/kopia/kopia/internal/freepool"
-// nolint:gochecknoglobals
+//nolint:gochecknoglobals
var (
filesystemFilePool = freepool.New(
func() interface{} { return &filesystemFile{} },
func(v interface{}) {
- // nolint:forcetypeassert
+ //nolint:forcetypeassert
*v.(*filesystemFile) = filesystemFile{}
},
)
filesystemDirectoryPool = freepool.New(
func() interface{} { return &filesystemDirectory{} },
func(v interface{}) {
- // nolint:forcetypeassert
+ //nolint:forcetypeassert
*v.(*filesystemDirectory) = filesystemDirectory{}
},
)
filesystemSymlinkPool = freepool.New(
func() interface{} { return &filesystemSymlink{} },
func(v interface{}) {
- // nolint:forcetypeassert
+ //nolint:forcetypeassert
*v.(*filesystemSymlink) = filesystemSymlink{}
},
)
filesystemErrorEntryPool = freepool.New(
func() interface{} { return &filesystemErrorEntry{} },
func(v interface{}) {
- // nolint:forcetypeassert
+ //nolint:forcetypeassert
*v.(*filesystemErrorEntry) = filesystemErrorEntry{}
},
)
shallowFilesystemFilePool = freepool.New(
func() interface{} { return &shallowFilesystemFile{} },
func(v interface{}) {
- // nolint:forcetypeassert
+ //nolint:forcetypeassert
*v.(*shallowFilesystemFile) = shallowFilesystemFile{}
},
)
shallowFilesystemDirectoryPool = freepool.New(
func() interface{} { return &shallowFilesystemDirectory{} },
func(v interface{}) {
- // nolint:forcetypeassert
+ //nolint:forcetypeassert
*v.(*shallowFilesystemDirectory) = shallowFilesystemDirectory{}
},
)
)
func newFilesystemFile(e filesystemEntry) *filesystemFile {
- // nolint:forcetypeassert
+ //nolint:forcetypeassert
fsf := filesystemFilePool.Take().(*filesystemFile)
fsf.filesystemEntry = e
@@ -61,7 +61,7 @@ func (fsf *filesystemFile) Close() {
}
func newFilesystemDirectory(e filesystemEntry) *filesystemDirectory {
- // nolint:forcetypeassert
+ //nolint:forcetypeassert
fsd := filesystemDirectoryPool.Take().(*filesystemDirectory)
fsd.filesystemEntry = e
@@ -73,7 +73,7 @@ func (fsd *filesystemDirectory) Close() {
}
func newFilesystemSymlink(e filesystemEntry) *filesystemSymlink {
- // nolint:forcetypeassert
+ //nolint:forcetypeassert
fsd := filesystemSymlinkPool.Take().(*filesystemSymlink)
fsd.filesystemEntry = e
@@ -85,7 +85,7 @@ func (fsl *filesystemSymlink) Close() {
}
func newFilesystemErrorEntry(e filesystemEntry, err error) *filesystemErrorEntry {
- // nolint:forcetypeassert
+ //nolint:forcetypeassert
fse := filesystemErrorEntryPool.Take().(*filesystemErrorEntry)
fse.filesystemEntry = e
fse.err = err
@@ -98,7 +98,7 @@ func (e *filesystemErrorEntry) Close() {
}
func newShallowFilesystemFile(e filesystemEntry) *shallowFilesystemFile {
- // nolint:forcetypeassert
+ //nolint:forcetypeassert
fsf := shallowFilesystemFilePool.Take().(*shallowFilesystemFile)
fsf.filesystemEntry = e
@@ -110,7 +110,7 @@ func (fsf *shallowFilesystemFile) Close() {
}
func newShallowFilesystemDirectory(e filesystemEntry) *shallowFilesystemDirectory {
- // nolint:forcetypeassert
+ //nolint:forcetypeassert
fsf := shallowFilesystemDirectoryPool.Take().(*shallowFilesystemDirectory)
fsf.filesystemEntry = e
diff --git a/fs/localfs/local_fs_test.go b/fs/localfs/local_fs_test.go
index 5a8381586..91af9a80b 100644
--- a/fs/localfs/local_fs_test.go
+++ b/fs/localfs/local_fs_test.go
@@ -153,7 +153,7 @@ func TestIterateNonExistent(t *testing.T) {
}), os.ErrNotExist)
}
-// nolint:thelper
+//nolint:thelper
func testIterate(t *testing.T, nFiles int) {
tmp := testutil.TempDirectory(t)
diff --git a/fs/loggingfs/loggingfs.go b/fs/loggingfs/loggingfs.go
index 869183a58..4fbd4ece4 100644
--- a/fs/loggingfs/loggingfs.go
+++ b/fs/loggingfs/loggingfs.go
@@ -26,7 +26,7 @@ func (ld *loggingDirectory) Child(ctx context.Context, name string) (fs.Entry, e
ld.options.printf(ld.options.prefix+"Child(%v) took %v and returned %v", ld.relativePath, dt, err)
if err != nil {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return nil, err
}
diff --git a/fs/virtualfs/virtualfs.go b/fs/virtualfs/virtualfs.go
index 48ebcd3a2..5e470025f 100644
--- a/fs/virtualfs/virtualfs.go
+++ b/fs/virtualfs/virtualfs.go
@@ -73,7 +73,7 @@ type staticDirectory struct {
// Child gets the named child of a directory.
func (sd *staticDirectory) Child(ctx context.Context, name string) (fs.Entry, error) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return fs.IterateEntriesAndFindChild(ctx, sd, name)
}
diff --git a/internal/acl/access_level.go b/internal/acl/access_level.go
index df279defc..ac694f39d 100644
--- a/internal/acl/access_level.go
+++ b/internal/acl/access_level.go
@@ -11,7 +11,8 @@
type AccessLevel int
// accessLevelToString maps supported access levels to strings.
-// nolint:gochecknoglobals
+//
+//nolint:gochecknoglobals
var accessLevelToString = map[AccessLevel]string{
AccessLevelNone: "NONE",
AccessLevelRead: "READ",
@@ -20,7 +21,8 @@
}
// stringToAccessLevel maps strings to supported access levels.
-// nolint:gochecknoglobals
+//
+//nolint:gochecknoglobals
var stringToAccessLevel = map[string]AccessLevel{}
func init() {
@@ -45,7 +47,7 @@ func (a AccessLevel) MarshalJSON() ([]byte, error) {
return nil, errors.Errorf("Invalid access level: %v", a)
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return json.Marshal(j)
}
diff --git a/internal/acl/acl.go b/internal/acl/acl.go
index 88c0e9445..97290bd02 100644
--- a/internal/acl/acl.go
+++ b/internal/acl/acl.go
@@ -91,7 +91,7 @@ func oneOf(allowed ...string) valueValidatorFunc {
}
}
-// nolint:gochecknoglobals
+//nolint:gochecknoglobals
var allowedLabelsForType = map[string]map[string]valueValidatorFunc{
ContentManifestType: {},
policy.ManifestType: {
diff --git a/internal/acl/acl_manager.go b/internal/acl/acl_manager.go
index 97a93c704..9bcd8c863 100644
--- a/internal/acl/acl_manager.go
+++ b/internal/acl/acl_manager.go
@@ -25,7 +25,7 @@ func matchOrWildcard(rule, actual string) bool {
func userMatches(rule, username, hostname string) bool {
ruleParts := strings.Split(rule, "@")
- if len(ruleParts) != 2 { // nolint:gomnd
+ if len(ruleParts) != 2 { //nolint:gomnd
return false
}
diff --git a/internal/apiclient/apiclient.go b/internal/apiclient/apiclient.go
index 3c9905293..c6a9635c7 100644
--- a/internal/apiclient/apiclient.go
+++ b/internal/apiclient/apiclient.go
@@ -21,7 +21,8 @@
var log = logging.Module("client")
// CSRFTokenHeader is the name of CSRF token header that must be sent for most API calls.
-// nolint:gosec
+//
+//nolint:gosec
const CSRFTokenHeader = "X-Kopia-Csrf-Token"
// KopiaAPIClient provides helper methods for communicating with Kopia API server.
@@ -224,7 +225,7 @@ type basicAuthTransport struct {
func (t basicAuthTransport) RoundTrip(req *http.Request) (*http.Response, error) {
req.SetBasicAuth(t.username, t.password)
- // nolint:wrapcheck
+ //nolint:wrapcheck
return t.base.RoundTrip(req)
}
diff --git a/internal/atomicfile/atomicfile.go b/internal/atomicfile/atomicfile.go
index 8796bcfbc..a80b4a61d 100644
--- a/internal/atomicfile/atomicfile.go
+++ b/internal/atomicfile/atomicfile.go
@@ -53,6 +53,6 @@ func MaybePrefixLongFilenameOnWindows(fname string) string {
// Write is a wrapper around atomic.WriteFile that handles long file names on Windows.
func Write(filename string, r io.Reader) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return atomic.WriteFile(MaybePrefixLongFilenameOnWindows(filename), r)
}
diff --git a/internal/auth/authz_acl.go b/internal/auth/authz_acl.go
index e84c01076..f2420feb6 100644
--- a/internal/auth/authz_acl.go
+++ b/internal/auth/authz_acl.go
@@ -18,7 +18,8 @@
const defaultACLRefreshFrequency = 10 * time.Second
// ContentRule is a special target rule that targets contents instead of manifests.
-// nolint:gochecknoglobals
+//
+//nolint:gochecknoglobals
var ContentRule = acl.TargetRule{
manifest.TypeLabelKey: acl.ContentManifestType,
}
@@ -27,7 +28,8 @@
const anyUser = "*@*"
// DefaultACLs specifies default ACLs.
-// nolint:gochecknoglobals
+//
+//nolint:gochecknoglobals
var DefaultACLs = []*acl.Entry{
{
// everybody can write contents
diff --git a/internal/auth/authz_test.go b/internal/auth/authz_test.go
index 54011ffb1..1360a1c6c 100644
--- a/internal/auth/authz_test.go
+++ b/internal/auth/authz_test.go
@@ -115,7 +115,7 @@ func TestDefaultAuthorizer_DefaultACLs(t *testing.T) {
verifyLegacyAuthorizer(ctx, t, env.Repository, auth.DefaultAuthorizer())
}
-// nolint:thelper
+//nolint:thelper
func verifyLegacyAuthorizer(ctx context.Context, t *testing.T, rep repo.Repository, authorizer auth.Authorizer) {
cases := []struct {
usernameAtHost string
diff --git a/internal/blobtesting/concurrent.go b/internal/blobtesting/concurrent.go
index 84c41b5d9..a7ed2e3fc 100644
--- a/internal/blobtesting/concurrent.go
+++ b/internal/blobtesting/concurrent.go
@@ -32,7 +32,8 @@ type ConcurrentAccessOptions struct {
}
// VerifyConcurrentAccess tests data races on a repository to ensure only clean errors are returned.
-// nolint:gocognit,gocyclo,funlen,cyclop
+//
+//nolint:gocognit,gocyclo,funlen,cyclop
func VerifyConcurrentAccess(t *testing.T, st blob.Storage, options ConcurrentAccessOptions) {
t.Helper()
diff --git a/internal/blobtesting/verify.go b/internal/blobtesting/verify.go
index 6affd88c3..fba682de5 100644
--- a/internal/blobtesting/verify.go
+++ b/internal/blobtesting/verify.go
@@ -17,7 +17,8 @@
)
// VerifyStorage verifies the behavior of the specified storage.
-// nolint:gocyclo,thelper
+//
+//nolint:gocyclo,thelper
func VerifyStorage(ctx context.Context, t *testing.T, r blob.Storage, opts blob.PutOptions) {
blocks := []struct {
blk blob.ID
@@ -198,7 +199,8 @@ func VerifyStorage(ctx context.Context, t *testing.T, r blob.Storage, opts blob.
// AssertConnectionInfoRoundTrips verifies that the ConnectionInfo returned by a given storage can be used to create
// equivalent storage.
-// nolint:thelper
+//
+//nolint:thelper
func AssertConnectionInfoRoundTrips(ctx context.Context, t *testing.T, s blob.Storage) {
ci := s.ConnectionInfo()
@@ -214,7 +216,8 @@ func AssertConnectionInfoRoundTrips(ctx context.Context, t *testing.T, s blob.St
}
// TestValidationOptions is the set of options used when running providing validation from tests.
-// nolint:gomnd
+//
+//nolint:gomnd
var TestValidationOptions = providervalidation.Options{
MaxClockDrift: 3 * time.Minute,
ConcurrencyTestDuration: 15 * time.Second,
diff --git a/internal/cache/cache_metrics.go b/internal/cache/cache_metrics.go
index fa9d0e9fc..fc0a68ae2 100644
--- a/internal/cache/cache_metrics.go
+++ b/internal/cache/cache_metrics.go
@@ -5,7 +5,7 @@
"github.com/prometheus/client_golang/prometheus/promauto"
)
-// nolint:gochecknoglobals,promlinter
+//nolint:gochecknoglobals,promlinter
var (
metricHitCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "kopia_content_cache_hit_count",
diff --git a/internal/cache/cache_storage.go b/internal/cache/cache_storage.go
index f1b959519..b20c8a5cc 100644
--- a/internal/cache/cache_storage.go
+++ b/internal/cache/cache_storage.go
@@ -15,7 +15,7 @@
"github.com/kopia/kopia/repo/blob/sharded"
)
-// nolint:gochecknoglobals
+//nolint:gochecknoglobals
var mkdirAll = os.MkdirAll // for testability
// DirMode is the directory mode for all caches.
@@ -52,5 +52,5 @@ func NewStorageOrNil(ctx context.Context, cacheDir string, maxBytes int64, subdi
},
}, false)
- return fs.(Storage), errors.Wrap(err, "error initializing filesystem cache") // nolint:forcetypeassert
+ return fs.(Storage), errors.Wrap(err, "error initializing filesystem cache") //nolint:forcetypeassert
}
diff --git a/internal/cache/content_cache_concurrency_test.go b/internal/cache/content_cache_concurrency_test.go
index 48fc7aaf5..a2b749121 100644
--- a/internal/cache/content_cache_concurrency_test.go
+++ b/internal/cache/content_cache_concurrency_test.go
@@ -80,7 +80,7 @@ func TestGetContentRaceFetchesOnce_MetadataCache(t *testing.T) {
testGetContentRaceFetchesOnce(t, newContentMetadataCache)
}
-// nolint:thelper
+//nolint:thelper
func testContentCachePrefetchBlocksGetContent(t *testing.T, newCache newContentCacheFunc) {
ctx := testlogging.Context(t)
@@ -148,7 +148,7 @@ func testContentCachePrefetchBlocksGetContent(t *testing.T, newCache newContentC
require.Less(t, getBlobFinishedCnt, getContentFinishedCnt)
}
-// nolint:thelper
+//nolint:thelper
func testGetContentForDifferentContentIDsExecutesInParallel(t *testing.T, newCache newContentCacheFunc, minGetBlobParallelism int) {
ctx := testlogging.Context(t)
@@ -196,7 +196,7 @@ func testGetContentForDifferentContentIDsExecutesInParallel(t *testing.T, newCac
require.GreaterOrEqual(t, ct.maxConcurrencyLevel, minGetBlobParallelism)
}
-// nolint:thelper
+//nolint:thelper
func testGetContentForDifferentBlobsExecutesInParallel(t *testing.T, newCache newContentCacheFunc) {
ctx := testlogging.Context(t)
@@ -246,7 +246,7 @@ func testGetContentForDifferentBlobsExecutesInParallel(t *testing.T, newCache ne
require.GreaterOrEqual(t, ct.maxConcurrencyLevel, 2)
}
-// nolint:thelper
+//nolint:thelper
func testGetContentRaceFetchesOnce(t *testing.T, newCache newContentCacheFunc) {
ctx := testlogging.Context(t)
diff --git a/internal/cache/content_cache_passthrough.go b/internal/cache/content_cache_passthrough.go
index 05753414b..ddfe8f140 100644
--- a/internal/cache/content_cache_passthrough.go
+++ b/internal/cache/content_cache_passthrough.go
@@ -15,7 +15,7 @@ type passthroughContentCache struct {
func (c passthroughContentCache) Close(ctx context.Context) {}
func (c passthroughContentCache) GetContent(ctx context.Context, contentID string, blobID blob.ID, offset, length int64, output *gather.WriteBuffer) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return c.st.GetBlob(ctx, blobID, offset, length, output)
}
diff --git a/internal/cache/persistent_lru_cache.go b/internal/cache/persistent_lru_cache.go
index 5ff1ebe7c..474c31e1a 100644
--- a/internal/cache/persistent_lru_cache.go
+++ b/internal/cache/persistent_lru_cache.go
@@ -66,14 +66,14 @@ func (c *PersistentCache) GetFetchingMutex(key string) *sync.RWMutex {
}
if v, ok := c.mutexCache.Get(key); ok {
- // nolint:forcetypeassert
+ //nolint:forcetypeassert
return v.(*sync.RWMutex)
}
newVal := &sync.RWMutex{}
if prevVal, ok, _ := c.mutexCache.PeekOrAdd(key, newVal); ok {
- // nolint:forcetypeassert
+ //nolint:forcetypeassert
return prevVal.(*sync.RWMutex)
}
@@ -236,7 +236,7 @@ func (h contentMetadataHeap) Swap(i, j int) {
}
func (h *contentMetadataHeap) Push(x interface{}) {
- *h = append(*h, x.(blob.Metadata)) // nolint:forcetypeassert
+ *h = append(*h, x.(blob.Metadata)) //nolint:forcetypeassert
}
func (h *contentMetadataHeap) Pop() interface{} {
diff --git a/internal/cache/storage_protection.go b/internal/cache/storage_protection.go
index 8fc1a6956..947a45c71 100644
--- a/internal/cache/storage_protection.go
+++ b/internal/cache/storage_protection.go
@@ -24,12 +24,12 @@ type nullStorageProtection struct{
func (nullStorageProtection) Protect(id string, input gather.Bytes, output *gather.WriteBuffer) {
output.Reset()
- input.WriteTo(output) // nolint:errcheck
+ input.WriteTo(output) //nolint:errcheck
}
func (nullStorageProtection) Verify(id string, input gather.Bytes, output *gather.WriteBuffer) error {
output.Reset()
- input.WriteTo(output) // nolint:errcheck
+ input.WriteTo(output) //nolint:errcheck
return nil
}
@@ -50,7 +50,7 @@ func (p checksumProtection) Protect(id string, input gather.Bytes, output *gathe
func (p checksumProtection) Verify(id string, input gather.Bytes, output *gather.WriteBuffer) error {
output.Reset()
- // nolint:wrapcheck
+ //nolint:wrapcheck
return hmac.VerifyAndStrip(input, p.Secret, output)
}
diff --git a/internal/cache/storage_protection_test.go b/internal/cache/storage_protection_test.go
index b8603e0f5..4f6655a44 100644
--- a/internal/cache/storage_protection_test.go
+++ b/internal/cache/storage_protection_test.go
@@ -27,7 +27,7 @@ func TestEncryptionStorageProtection(t *testing.T) {
testStorageProtection(t, e, true)
}
-// nolint:thelper
+//nolint:thelper
func testStorageProtection(t *testing.T, sp cache.StorageProtection, protectsFromBitFlips bool) {
payload := []byte{0, 1, 2, 3, 4}
diff --git a/internal/clock/now_prod.go b/internal/clock/now_prod.go
index 8a6085f30..01548c11b 100644
--- a/internal/clock/now_prod.go
+++ b/internal/clock/now_prod.go
@@ -7,5 +7,5 @@
// Now returns current wall clock time.
func Now() time.Time {
- return discardMonotonicTime(time.Now()) // nolint:forbidigo
+ return discardMonotonicTime(time.Now()) //nolint:forbidigo
}
diff --git a/internal/clock/now_testing.go b/internal/clock/now_testing.go
index 35f47f3ad..2862451d0 100644
--- a/internal/clock/now_testing.go
+++ b/internal/clock/now_testing.go
@@ -16,7 +16,7 @@
// Now is overridable function that returns current wall clock time.
var Now = func() time.Time {
- return discardMonotonicTime(time.Now()) // nolint:forbidigo
+ return discardMonotonicTime(time.Now()) //nolint:forbidigo
}
func init() {
@@ -43,7 +43,7 @@ func getTimeFromServer(endpoint string) func() time.Time {
}
var (
- nextRefreshRealTime time.Time // nolint:forbidigo
+ nextRefreshRealTime time.Time //nolint:forbidigo
localTimeOffset time.Duration // offset to be added to time.Now() to produce server time
)
@@ -51,7 +51,7 @@ func getTimeFromServer(endpoint string) func() time.Time {
mu.Lock()
defer mu.Unlock()
- localTime := time.Now() // nolint:forbidigo
+ localTime := time.Now() //nolint:forbidigo
if localTime.After(nextRefreshRealTime) {
resp, err := http.Get(endpoint) //nolint:gosec,noctx
if err != nil {
@@ -67,7 +67,7 @@ func getTimeFromServer(endpoint string) func() time.Time {
log.Fatalf("invalid time received from fake time server: %v", err)
}
- nextRefreshRealTime = localTime.Add(timeInfo.ValidFor) // nolint:forbidigo
+ nextRefreshRealTime = localTime.Add(timeInfo.ValidFor) //nolint:forbidigo
// compute offset such that localTime + localTimeOffset == serverTime
localTimeOffset = timeInfo.Time.Sub(localTime)
diff --git a/internal/completeset/complete_set.go b/internal/completeset/complete_set.go
index 0d0ede9e7..77d8ac6f4 100644
--- a/internal/completeset/complete_set.go
+++ b/internal/completeset/complete_set.go
@@ -9,11 +9,14 @@
)
// FindFirst looks for a first complete set of blobs IDs following a naming convention:
-// '-s-c'
+//
+// '-s-c'
+//
// where:
-// 'prefix' is arbitrary string not containing a dash ('-')
-// 'set' is a random string shared by all indexes in the same set
-// 'count' is a number that specifies how many items must be in the set to make it complete.
+//
+// 'prefix' is arbitrary string not containing a dash ('-')
+// 'set' is a random string shared by all indexes in the same set
+// 'count' is a number that specifies how many items must be in the set to make it complete.
//
// The algorithm returns IDs of blobs that form the first complete set.
func FindFirst(bms []blob.Metadata) []blob.Metadata {
diff --git a/internal/connection/reconnector.go b/internal/connection/reconnector.go
index f606e2712..ccc0ca151 100644
--- a/internal/connection/reconnector.go
+++ b/internal/connection/reconnector.go
@@ -57,7 +57,7 @@ func (r *Reconnector) GetOrOpenConnection(ctx context.Context) (Connection, erro
// UsingConnection invokes the provided callback for a Connection.
func (r *Reconnector) UsingConnection(ctx context.Context, desc string, cb func(cli Connection) (interface{}, error)) (interface{}, error) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return retry.WithExponentialBackoff(ctx, desc, func() (interface{}, error) {
conn, err := r.GetOrOpenConnection(ctx)
if err != nil {
diff --git a/internal/ctxutil/detach.go b/internal/ctxutil/detach.go
index 81a8d40f2..8393566e8 100644
--- a/internal/ctxutil/detach.go
+++ b/internal/ctxutil/detach.go
@@ -7,8 +7,8 @@
type detachedContext struct {
// inherit most methods from context.Background()
- context.Context // nolint:containedctx
- wrapped context.Context // nolint:containedctx
+ context.Context //nolint:containedctx
+ wrapped context.Context //nolint:containedctx
}
// Detach returns a context that inheris provided context's values but not deadline or cancellation.
diff --git a/internal/diff/diff.go b/internal/diff/diff.go
index 148293271..6c7c33887 100644
--- a/internal/diff/diff.go
+++ b/internal/diff/diff.go
@@ -37,7 +37,7 @@ func (c *Comparer) Compare(ctx context.Context, e1, e2 fs.Entry) error {
// Close removes all temporary files used by the comparer.
func (c *Comparer) Close() error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return os.RemoveAll(c.tmpDir)
}
@@ -73,7 +73,7 @@ func (c *Comparer) compareDirectories(ctx context.Context, dir1, dir2 fs.Directo
return c.compareDirectoryEntries(ctx, entries1, entries2, parent)
}
-// nolint:gocyclo
+//nolint:gocyclo
func (c *Comparer) compareEntry(ctx context.Context, e1, e2 fs.Entry, path string) error {
// see if we have the same object IDs, which implies identical objects, thanks to content-addressable-storage
if h1, ok := e1.(object.HasObjectID); ok {
@@ -267,7 +267,7 @@ func (c *Comparer) compareFiles(ctx context.Context, f1, f2 fs.File, fname strin
args = append(args, c.DiffArguments...)
args = append(args, oldName, newName)
- cmd := exec.CommandContext(ctx, c.DiffCommand, args...) // nolint:gosec
+ cmd := exec.CommandContext(ctx, c.DiffCommand, args...) //nolint:gosec
cmd.Dir = c.tmpDir
cmd.Stdout = c.out
cmd.Stderr = c.out
diff --git a/internal/editor/editor.go b/internal/editor/editor.go
index 2bc3e2591..cd0f52e05 100644
--- a/internal/editor/editor.go
+++ b/internal/editor/editor.go
@@ -30,7 +30,7 @@ func EditLoop(ctx context.Context, fname, initial string, parse func(updated str
tmpFile := filepath.Join(tmpDir, fname)
defer os.RemoveAll(tmpDir) //nolint:errcheck
- // nolint:gomnd
+ //nolint:gomnd
if err := os.WriteFile(tmpFile, []byte(initial), 0o600); err != nil {
return errors.Wrap(err, "unable to write file to edit")
}
diff --git a/internal/epoch/epoch_manager.go b/internal/epoch/epoch_manager.go
index b4709216c..b44cfe42f 100644
--- a/internal/epoch/epoch_manager.go
+++ b/internal/epoch/epoch_manager.go
@@ -106,7 +106,8 @@ func (p *Parameters) GetEpochDeleteParallelism() int {
}
// Validate validates epoch parameters.
-// nolint:gomnd
+//
+//nolint:gomnd
func (p *Parameters) Validate() error {
if !p.Enabled {
return nil
@@ -140,7 +141,8 @@ func (p *Parameters) Validate() error {
}
// DefaultParameters contains default epoch manager parameters.
-// nolint:gomnd
+//
+//nolint:gomnd
func DefaultParameters() Parameters {
return Parameters{
Enabled: true,
@@ -780,7 +782,7 @@ func (e *Manager) WriteIndex(ctx context.Context, dataShards map[blob.ID]blob.By
}
// make sure we have at least 75% of remaining time
- // nolint:gomnd
+ //nolint:gomnd
cs, err := e.committedState(ctx, 3*p.EpochRefreshFrequency/4)
if err != nil {
return nil, errors.Wrap(err, "error getting committed state")
diff --git a/internal/epoch/epoch_manager_test.go b/internal/epoch/epoch_manager_test.go
index 30926b9fb..808c24fd5 100644
--- a/internal/epoch/epoch_manager_test.go
+++ b/internal/epoch/epoch_manager_test.go
@@ -71,7 +71,7 @@ func (te *epochManagerTestEnv) compact(ctx context.Context, blobs []blob.ID, pre
"PutBlob error")
}
-// write two dummy compaction blobs instead of 3, simulating a compaction that crashed before fully complete.
+// write two dummy compaction blobs instead of 3, simulating a compaction that crashed before fully complete.
func (te *epochManagerTestEnv) interruptedCompaction(ctx context.Context, _ []blob.ID, prefix blob.ID) error {
sess := rand.Int63()
@@ -525,7 +525,7 @@ func TestInvalid_Cleanup(t *testing.T) {
require.ErrorIs(t, err, ctx.Err())
}
-// nolint:thelper
+//nolint:thelper
func verifySequentialWrites(t *testing.T, te *epochManagerTestEnv) {
ctx := testlogging.Context(t)
expected := &fakeIndex{}
diff --git a/internal/epoch/epoch_utils.go b/internal/epoch/epoch_utils.go
index 3f1c6cb1e..c4940e6b6 100644
--- a/internal/epoch/epoch_utils.go
+++ b/internal/epoch/epoch_utils.go
@@ -35,7 +35,7 @@ func epochNumberFromBlobID(blobID blob.ID) (int, bool) {
func epochRangeFromBlobID(blobID blob.ID) (min, max int, ok bool) {
parts := strings.Split(string(blobID), "_")
- // nolint:gomnd
+ //nolint:gomnd
if len(parts) < 3 {
return 0, 0, false
}
@@ -84,7 +84,7 @@ func groupByEpochRanges(bms []blob.Metadata) map[int]map[int][]blob.Metadata {
func deletionWatermarkFromBlobID(blobID blob.ID) (time.Time, bool) {
str := strings.TrimPrefix(string(blobID), string(DeletionWatermarkBlobPrefix))
- unixSeconds, err := strconv.ParseInt(str, 10, 64) // nolint:gomnd
+ unixSeconds, err := strconv.ParseInt(str, 10, 64) //nolint:gomnd
if err != nil {
return time.Time{}, false
}
diff --git a/internal/fshasher/fshasher.go b/internal/fshasher/fshasher.go
index 14dd84a4a..07e2b2882 100644
--- a/internal/fshasher/fshasher.go
+++ b/internal/fshasher/fshasher.go
@@ -40,7 +40,7 @@ func Hash(ctx context.Context, e fs.Entry) ([]byte, error) {
return h.Sum(nil), nil
}
-// nolint:interfacer
+//nolint:interfacer
func write(ctx context.Context, tw *tar.Writer, fullpath string, e fs.Entry) error {
h, err := header(ctx, fullpath, e)
if err != nil {
diff --git a/internal/fshasher/fshasher_test.go b/internal/fshasher/fshasher_test.go
index 77474d932..cc80d4221 100644
--- a/internal/fshasher/fshasher_test.go
+++ b/internal/fshasher/fshasher_test.go
@@ -9,7 +9,7 @@
"github.com/kopia/kopia/internal/testlogging"
)
-// nolint:gocritic
+//nolint:gocritic
func TestHash(t *testing.T) {
const expectDifferentHashes = "Expected different hashes, got the same"
diff --git a/internal/gather/gather_bytes.go b/internal/gather/gather_bytes.go
index aa1f3b585..5caf11a1a 100644
--- a/internal/gather/gather_bytes.go
+++ b/internal/gather/gather_bytes.go
@@ -10,7 +10,7 @@
"github.com/pkg/errors"
)
-// nolint:gochecknoglobals
+//nolint:gochecknoglobals
var invalidSliceBuf = []byte(uuid.NewString())
// Bytes represents a sequence of bytes split into slices.
@@ -198,7 +198,7 @@ func (b Bytes) WriteTo(w io.Writer) (int64, error) {
totalN += int64(n)
if err != nil {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return totalN, err
}
}
diff --git a/internal/gather/gather_bytes_test.go b/internal/gather/gather_bytes_test.go
index 12d62d789..79a9987c3 100644
--- a/internal/gather/gather_bytes_test.go
+++ b/internal/gather/gather_bytes_test.go
@@ -159,7 +159,7 @@ func TestGatherBytesReadSeeker(t *testing.T) {
require.Equal(t, len(buf), tmp.Length())
reader := tmp.inner.Reader()
- defer reader.Close() // nolint:errcheck
+ defer reader.Close() //nolint:errcheck
require.NoError(t, iotest.TestReader(reader, buf))
diff --git a/internal/gather/gather_write_buffer_chunk.go b/internal/gather/gather_write_buffer_chunk.go
index 6fe1b15ef..b7c8fe4e8 100644
--- a/internal/gather/gather_write_buffer_chunk.go
+++ b/internal/gather/gather_write_buffer_chunk.go
@@ -16,27 +16,27 @@
maxCallersToTrackAllocations = 3
)
-// nolint:gochecknoglobals
+//nolint:gochecknoglobals
var (
trackChunkAllocations = os.Getenv("KOPIA_TRACK_CHUNK_ALLOC") != ""
defaultAllocator = &chunkAllocator{
name: "default",
- chunkSize: 1 << 16, // nolint:gomnd
- maxFreeListSize: 2048, // nolint:gomnd
+ chunkSize: 1 << 16, //nolint:gomnd
+ maxFreeListSize: 2048, //nolint:gomnd
}
// typicalContiguousAllocator is used for short-term buffers for encryption.
typicalContiguousAllocator = &chunkAllocator{
name: "mid-size contiguous",
- chunkSize: 8<<20 + 128, // nolint:gomnd
+ chunkSize: 8<<20 + 128, //nolint:gomnd
maxFreeListSize: runtime.NumCPU(),
}
// maxContiguousAllocator is used for short-term buffers for encryption.
maxContiguousAllocator = &chunkAllocator{
name: "contiguous",
- chunkSize: 16<<20 + 128, // nolint:gomnd
+ chunkSize: 16<<20 + 128, //nolint:gomnd
maxFreeListSize: runtime.NumCPU(),
}
)
diff --git a/internal/hmac/hmac.go b/internal/hmac/hmac.go
index c021a78b5..6bfd022db 100644
--- a/internal/hmac/hmac.go
+++ b/internal/hmac/hmac.go
@@ -15,12 +15,12 @@
func Append(input gather.Bytes, secret []byte, output *gather.WriteBuffer) {
h := hmac.New(sha256.New, secret)
- input.WriteTo(output) // nolint:errcheck
- input.WriteTo(h) // nolint:errcheck
+ input.WriteTo(output) //nolint:errcheck
+ input.WriteTo(h) //nolint:errcheck
var hash [sha256.Size]byte
- output.Write(h.Sum(hash[:0])) // nolint:errcheck
+ output.Write(h.Sum(hash[:0])) //nolint:errcheck
}
// VerifyAndStrip verifies that given block of bytes has correct HMAC-SHA256 checksum and strips it.
diff --git a/internal/indextest/indextest.go b/internal/indextest/indextest.go
index 0cd79f274..2ce529d5f 100644
--- a/internal/indextest/indextest.go
+++ b/internal/indextest/indextest.go
@@ -10,7 +10,8 @@
)
// InfoDiff returns a list of differences between two index.Info, empty if they are equal.
-// nolint:gocyclo
+//
+//nolint:gocyclo
func InfoDiff(i1, i2 index.Info, ignore ...string) []string {
var diffs []string
@@ -60,7 +61,7 @@ func InfoDiff(i1, i2 index.Info, ignore ...string) []string {
// dear future reader, if this fails because the number of methods has changed,
// you need to add additional verification above.
- // nolint:gomnd
+ //nolint:gomnd
if cnt := reflect.TypeOf((*index.Info)(nil)).Elem().NumMethod(); cnt != 11 {
diffs = append(diffs, fmt.Sprintf("unexpected number of methods on content.Info: %v, must update the test", cnt))
}
diff --git a/internal/iocopy/copy.go b/internal/iocopy/copy.go
index e2304ab7f..30f34f8df 100644
--- a/internal/iocopy/copy.go
+++ b/internal/iocopy/copy.go
@@ -46,20 +46,20 @@ func Copy(dst io.Writer, src io.Reader) (int64, error) {
// If the reader has a WriteTo method, use it to do the copy.
// Avoids an allocation and a copy.
if wt, ok := src.(io.WriterTo); ok {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return wt.WriteTo(dst)
}
// Similarly, if the writer has a ReadFrom method, use it to do the copy.
if rt, ok := dst.(io.ReaderFrom); ok {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return rt.ReadFrom(src)
}
buf := GetBuffer()
defer ReleaseBuffer(buf)
- // nolint:wrapcheck
+ //nolint:wrapcheck
return io.CopyBuffer(dst, src, buf)
}
diff --git a/internal/listcache/listcache.go b/internal/listcache/listcache.go
index 270047fba..3f9b01435 100644
--- a/internal/listcache/listcache.go
+++ b/internal/listcache/listcache.go
@@ -84,7 +84,7 @@ func (s *listCacheStorage) readBlobsFromCache(ctx context.Context, prefix blob.I
// ListBlobs implements blob.Storage and caches previous list results for a given prefix.
func (s *listCacheStorage) ListBlobs(ctx context.Context, prefix blob.ID, cb func(blob.Metadata) error) error {
if !s.isCachedPrefix(prefix) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return s.Storage.ListBlobs(ctx, prefix, cb)
}
@@ -92,7 +92,7 @@ func (s *listCacheStorage) ListBlobs(ctx context.Context, prefix blob.ID, cb fun
if cached == nil {
all, err := blob.ListAllBlobs(ctx, s.Storage, prefix)
if err != nil {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return err
}
@@ -118,7 +118,7 @@ func (s *listCacheStorage) PutBlob(ctx context.Context, blobID blob.ID, data blo
err := s.Storage.PutBlob(ctx, blobID, data, opts)
s.invalidateAfterUpdate(ctx, blobID)
- // nolint:wrapcheck
+ //nolint:wrapcheck
return err
}
@@ -135,7 +135,7 @@ func (s *listCacheStorage) DeleteBlob(ctx context.Context, blobID blob.ID) error
err := s.Storage.DeleteBlob(ctx, blobID)
s.invalidateAfterUpdate(ctx, blobID)
- // nolint:wrapcheck
+ //nolint:wrapcheck
return err
}
diff --git a/internal/logfile/logfile.go b/internal/logfile/logfile.go
index 9efb654bf..15eb21fbf 100644
--- a/internal/logfile/logfile.go
+++ b/internal/logfile/logfile.go
@@ -28,7 +28,7 @@
const logsDirMode = 0o700
-// nolint:gochecknoglobals
+//nolint:gochecknoglobals
var logLevels = []string{"debug", "info", "warning", "error"}
type loggingFlags struct {
@@ -277,7 +277,7 @@ func (c *loggingFlags) setupLogFileCore(now time.Time, suffix string) zapcore.Co
)
}
-// nolint:gocritic
+//nolint:gocritic
func (c *loggingFlags) jsonOrConsoleEncoder(ec zaplogutil.StdConsoleEncoderConfig, jc zapcore.EncoderConfig, isJSON bool) zapcore.Encoder {
if isJSON {
return zapcore.NewJSONEncoder(jc)
@@ -412,7 +412,7 @@ func (w *onDemandFile) Sync() error {
return nil
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return w.f.Sync()
}
@@ -480,6 +480,6 @@ func (w *onDemandFile) Write(b []byte) (int, error) {
n, err := w.f.Write(b)
w.currentSegmentSize += n
- // nolint:wrapcheck
+ //nolint:wrapcheck
return n, err
}
diff --git a/internal/mockfs/mockfs.go b/internal/mockfs/mockfs.go
index 96facbb34..a181d920a 100644
--- a/internal/mockfs/mockfs.go
+++ b/internal/mockfs/mockfs.go
@@ -14,7 +14,8 @@
)
// DefaultModTime is the default modification time for mock filesystem entries.
-// nolint:gochecknoglobals
+//
+//nolint:gochecknoglobals
var DefaultModTime = time.Date(2021, 1, 2, 3, 4, 5, 0, time.UTC)
// ReaderSeekerCloser implements io.Reader, io.Seeker and io.Closer.
@@ -238,7 +239,7 @@ func (imd *Directory) Subdir(name ...string) *Directory {
panic(fmt.Sprintf("'%s' is not a directory in '%s'", n, i.Name()))
}
- // nolint:forcetypeassert
+ //nolint:forcetypeassert
i = i2.(*Directory)
}
@@ -355,7 +356,7 @@ func NewDirectory() *Directory {
return &Directory{
entry: entry{
name: "",
- mode: 0o777 | os.ModeDir, // nolint:gomnd
+ mode: 0o777 | os.ModeDir, //nolint:gomnd
modTime: DefaultModTime,
},
}
diff --git a/internal/mount/mount_fuse.go b/internal/mount/mount_fuse.go
index 248dc28d4..44816aaa3 100644
--- a/internal/mount/mount_fuse.go
+++ b/internal/mount/mount_fuse.go
@@ -17,7 +17,8 @@
)
// we're serving read-only filesystem, cache some attributes for 30 seconds.
-// nolint:gochecknoglobals
+//
+//nolint:gochecknoglobals
var cacheTimeout = 30 * time.Second
func (mo *Options) toFuseMountOptions() *gofusefs.Options {
diff --git a/internal/mount/mount_net_use.go b/internal/mount/mount_net_use.go
index 712d3d8d8..2448cadc4 100644
--- a/internal/mount/mount_net_use.go
+++ b/internal/mount/mount_net_use.go
@@ -107,7 +107,7 @@ func (c netuseController) Unmount(ctx context.Context) error {
return errors.Wrap(err, "unable to delete drive with 'net use'")
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return c.inner.Unmount(ctx)
}
diff --git a/internal/mount/mount_webdav.go b/internal/mount/mount_webdav.go
index a6f592128..cf85d8aa9 100644
--- a/internal/mount/mount_webdav.go
+++ b/internal/mount/mount_webdav.go
@@ -52,7 +52,7 @@ func DirectoryWebDAV(ctx context.Context, entry fs.Directory) (Controller, error
}
srv := &http.Server{
- ReadHeaderTimeout: 15 * time.Second, // nolint:gomnd
+ ReadHeaderTimeout: 15 * time.Second, //nolint:gomnd
Handler: mux,
}
diff --git a/internal/ospath/ospath.go b/internal/ospath/ospath.go
index d8a2394b2..ae0903ae6 100644
--- a/internal/ospath/ospath.go
+++ b/internal/ospath/ospath.go
@@ -8,7 +8,7 @@
"strings"
)
-// nolint:gochecknoglobals
+//nolint:gochecknoglobals
var (
userSettingsDir string
userLogsDir string
@@ -26,7 +26,7 @@ func LogsDir() string {
// IsAbs determines if a given path is absolute, in particular treating treating \\hostname\share as absolute on Windows.
func IsAbs(s string) bool {
- // nolint:forbidigo
+ //nolint:forbidigo
if filepath.IsAbs(s) {
return true
}
diff --git a/internal/ownwrites/ownwrites.go b/internal/ownwrites/ownwrites.go
index 0be18d353..a2a0e2f97 100644
--- a/internal/ownwrites/ownwrites.go
+++ b/internal/ownwrites/ownwrites.go
@@ -26,7 +26,7 @@
prefixDelete = "del"
)
-// nolint:gochecknoglobals
+//nolint:gochecknoglobals
var markerData = gather.FromSlice([]byte("marker"))
// CacheStorage implements a wrapper around blob.Storage that ensures recent local mutations
@@ -95,7 +95,7 @@ func (s *CacheStorage) ListBlobs(ctx context.Context, prefix blob.ID, cb func(bl
return cb(bm)
}); err != nil {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return err
}
@@ -112,7 +112,7 @@ func (s *CacheStorage) ListBlobs(ctx context.Context, prefix blob.ID, cb func(bl
}
if err != nil {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return err
}
@@ -130,11 +130,11 @@ func (s *CacheStorage) PutBlob(ctx context.Context, blobID blob.ID, data blob.By
if err == nil && s.isCachedPrefix(blobID) {
opts.GetModTime = nil
- // nolint:errcheck
+ //nolint:errcheck
s.cacheStorage.PutBlob(ctx, prefixAdd+blobID, markerData, opts)
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return err
}
@@ -142,11 +142,11 @@ func (s *CacheStorage) PutBlob(ctx context.Context, blobID blob.ID, data blob.By
func (s *CacheStorage) DeleteBlob(ctx context.Context, blobID blob.ID) error {
err := s.Storage.DeleteBlob(ctx, blobID)
if err == nil && s.isCachedPrefix(blobID) {
- // nolint:errcheck
+ //nolint:errcheck
s.cacheStorage.PutBlob(ctx, prefixDelete+blobID, markerData, blob.PutOptions{})
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return err
}
diff --git a/internal/parallelwork/parallel_work_queue.go b/internal/parallelwork/parallel_work_queue.go
index 2557bae40..855e539b4 100644
--- a/internal/parallelwork/parallel_work_queue.go
+++ b/internal/parallelwork/parallel_work_queue.go
@@ -69,7 +69,7 @@ func (v *Queue) Process(ctx context.Context, workers int) error {
select {
case <-ctx.Done():
// context canceled - some other worker returned an error.
- // nolint:wrapcheck
+ //nolint:wrapcheck
return ctx.Err()
default:
@@ -89,7 +89,7 @@ func (v *Queue) Process(ctx context.Context, workers int) error {
})
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return eg.Wait()
}
@@ -113,7 +113,7 @@ func (v *Queue) dequeue(ctx context.Context) CallbackFunc {
front := v.queueItems.Front()
v.queueItems.Remove(front)
- return front.Value.(CallbackFunc) // nolint:forcetypeassert
+ return front.Value.(CallbackFunc) //nolint:forcetypeassert
}
func (v *Queue) completed(ctx context.Context) {
diff --git a/internal/passwordpersist/passwordpersist_file.go b/internal/passwordpersist/passwordpersist_file.go
index ee0a4c9cc..4463f8954 100644
--- a/internal/passwordpersist/passwordpersist_file.go
+++ b/internal/passwordpersist/passwordpersist_file.go
@@ -41,7 +41,7 @@ func (filePasswordStorage) PersistPassword(ctx context.Context, configFile, pass
fn := passwordFileName(configFile)
log(ctx).Debugf("Saving password to file %v.", fn)
- // nolint:wrapcheck
+ //nolint:wrapcheck
return os.WriteFile(fn, []byte(base64.StdEncoding.EncodeToString([]byte(password))), passwordFileMode)
}
diff --git a/internal/providervalidation/providervalidation.go b/internal/providervalidation/providervalidation.go
index 7b2a964c9..4ad3cfe90 100644
--- a/internal/providervalidation/providervalidation.go
+++ b/internal/providervalidation/providervalidation.go
@@ -34,7 +34,8 @@ type Options struct {
}
// DefaultOptions is the default set of options.
-// nolint:gomnd,gochecknoglobals
+//
+//nolint:gomnd,gochecknoglobals
var DefaultOptions = Options{
MaxClockDrift: 3 * time.Minute,
ConcurrencyTestDuration: 30 * time.Second,
@@ -51,7 +52,8 @@ type Options struct {
// ValidateProvider runs a series of tests against provided storage to validate that
// it can be used with Kopia.
-// nolint:gomnd,funlen,gocyclo,cyclop
+//
+//nolint:gomnd,funlen,gocyclo,cyclop
func ValidateProvider(ctx context.Context, st blob.Storage, opt Options) error {
if os.Getenv("KOPIA_SKIP_PROVIDER_VALIDATION") != "" {
return nil
@@ -292,7 +294,7 @@ func (c *concurrencyTest) pickBlob() (blob.ID, []byte, bool) {
return "", nil, false
}
- id := c.blobIDs[rand.Intn(len(c.blobIDs))] // nolint:gosec
+ id := c.blobIDs[rand.Intn(len(c.blobIDs))] //nolint:gosec
return id, c.blobData[id], c.blobWritten[id]
}
diff --git a/internal/releasable/releaseable_tracker.go b/internal/releasable/releaseable_tracker.go
index 639d4d13e..9ba3587ef 100644
--- a/internal/releasable/releaseable_tracker.go
+++ b/internal/releasable/releaseable_tracker.go
@@ -100,10 +100,10 @@ func (s *perKindTracker) active() map[interface{}]string {
}
var (
- perKindMutex sync.Mutex // nolint:gochecknoglobals
+ perKindMutex sync.Mutex //nolint:gochecknoglobals
// +checklocks:perKindMutex
- perKindTrackers = map[ItemKind]*perKindTracker{} // nolint:gochecknoglobals
+ perKindTrackers = map[ItemKind]*perKindTracker{} //nolint:gochecknoglobals
)
// EnableTracking enables tracking of the given item type.
diff --git a/internal/repotesting/reconnectable_storage.go b/internal/repotesting/reconnectable_storage.go
index 7a73309f8..c2087c226 100644
--- a/internal/repotesting/reconnectable_storage.go
+++ b/internal/repotesting/reconnectable_storage.go
@@ -46,7 +46,7 @@ func NewReconnectableStorage(tb testing.TB, st blob.Storage) blob.Storage {
return st2
}
-// nolint:gochecknoglobals
+//nolint:gochecknoglobals
var reconnectableStorageByUUID sync.Map
func (s reconnectableStorage) ConnectionInfo() blob.ConnectionInfo {
diff --git a/internal/retry/retry.go b/internal/retry/retry.go
index f7eb5d84e..91964554a 100644
--- a/internal/retry/retry.go
+++ b/internal/retry/retry.go
@@ -12,7 +12,7 @@
var log = logging.Module("retry")
-// nolint:gochecknoglobals
+//nolint:gochecknoglobals
var (
maxAttempts = 10
retryInitialSleepAmount = 100 * time.Millisecond
@@ -68,7 +68,7 @@ func internalRetry(ctx context.Context, desc string, attempt AttemptFunc, isRetr
for ; i < count || count < 0; i++ {
if cerr := ctx.Err(); cerr != nil {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return nil, cerr
}
diff --git a/internal/server/api_content.go b/internal/server/api_content.go
index cdcdeeaf7..832bf75d7 100644
--- a/internal/server/api_content.go
+++ b/internal/server/api_content.go
@@ -76,7 +76,7 @@ func handleContentPut(ctx context.Context, rc requestContext) (interface{}, *api
var comp compression.HeaderID
if c := rc.queryParam("compression"); c != "" {
- // nolint:gomnd
+ //nolint:gomnd
v, err := strconv.ParseInt(c, 16, 32)
if err != nil {
return nil, requestError(serverapi.ErrorMalformedRequest, "malformed compression ID")
diff --git a/internal/server/api_estimate.go b/internal/server/api_estimate.go
index 972a29f7e..143acbfb8 100644
--- a/internal/server/api_estimate.go
+++ b/internal/server/api_estimate.go
@@ -128,7 +128,7 @@ func handleEstimate(ctx context.Context, rc requestContext) (interface{}, *apiEr
// launch a goroutine that will continue the estimate and can be observed in the Tasks UI.
- // nolint:errcheck
+ //nolint:errcheck
go rc.srv.taskManager().Run(ctx, "Estimate", resolvedRoot, func(ctx context.Context, ctrl uitask.Controller) error {
taskIDChan <- ctrl.CurrentTaskID()
@@ -137,7 +137,7 @@ func handleEstimate(ctx context.Context, rc requestContext) (interface{}, *apiEr
ctrl.OnCancel(cancel)
- // nolint:wrapcheck
+ //nolint:wrapcheck
return snapshotfs.Estimate(estimatectx, dir, policyTree, estimateTaskProgress{ctrl}, req.MaxExamplesPerBucket)
})
diff --git a/internal/server/api_repo.go b/internal/server/api_repo.go
index bf46f2d34..3aa676659 100644
--- a/internal/server/api_repo.go
+++ b/internal/server/api_repo.go
@@ -185,7 +185,7 @@ func handleRepoExists(ctx context.Context, rc requestContext) (interface{}, *api
return nil, internalServerError(err)
}
- defer st.Close(ctx) // nolint:errcheck
+ defer st.Close(ctx) //nolint:errcheck
var tmp gather.WriteBuffer
defer tmp.Close()
@@ -361,7 +361,7 @@ func connectAPIServerAndOpen(ctx context.Context, si *repo.APIServerInfo, passwo
return nil, errors.Wrap(err, "error connecting to API server")
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return repo.Open(ctx, opts.ConfigFile, password, nil)
}
@@ -378,7 +378,7 @@ func connectAndOpen(ctx context.Context, conn blob.ConnectionInfo, password stri
return nil, errors.Wrap(err, "error connecting")
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return repo.Open(ctx, opts.ConfigFile, password, nil)
}
@@ -397,12 +397,12 @@ func (s *Server) disconnect(ctx context.Context) error {
}
if err := repo.Disconnect(ctx, s.options.ConfigFile); err != nil {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return err
}
if err := s.options.PasswordPersist.DeletePassword(ctx, s.options.ConfigFile); err != nil {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return err
}
diff --git a/internal/server/api_restore.go b/internal/server/api_restore.go
index 1ccdeb688..57eb735a7 100644
--- a/internal/server/api_restore.go
+++ b/internal/server/api_restore.go
@@ -89,7 +89,7 @@ func handleRestore(ctx context.Context, rc requestContext) (interface{}, *apiErr
// launch a goroutine that will continue the restore and can be observed in the Tasks UI.
- // nolint:errcheck
+ //nolint:errcheck
go rc.srv.taskManager().Run(ctx, "Restore", description, func(ctx context.Context, ctrl uitask.Controller) error {
taskIDChan <- ctrl.CurrentTaskID()
diff --git a/internal/server/api_sources.go b/internal/server/api_sources.go
index 5113b16db..6c496e398 100644
--- a/internal/server/api_sources.go
+++ b/internal/server/api_sources.go
@@ -75,7 +75,7 @@ func handleSourcesCreate(ctx context.Context, rc requestContext) (interface{}, *
if err = repo.WriteSession(ctx, rc.rep, repo.WriteSessionOptions{
Purpose: "handleSourcesCreate",
}, func(ctx context.Context, w repo.RepositoryWriter) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return policy.SetPolicy(ctx, w, sourceInfo, req.Policy)
}); err != nil {
return nil, internalServerError(errors.Wrap(err, "unable to set initial policy"))
diff --git a/internal/server/grpc_session.go b/internal/server/grpc_session.go
index 3ea1f1864..68d59165b 100644
--- a/internal/server/grpc_session.go
+++ b/internal/server/grpc_session.go
@@ -104,7 +104,7 @@ func (s *Server) Session(srv grpcapi.KopiaRepository_SessionServer) error {
return err
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return repo.DirectWriteSession(ctx, dr, opt, func(ctx context.Context, dw repo.DirectRepositoryWriter) error {
// channel to which workers will be sending errors, only holds 1 slot and sends are non-blocking.
lastErr := make(chan error, 1)
diff --git a/internal/server/server.go b/internal/server/server.go
index 366f8001b..014b1c954 100644
--- a/internal/server/server.go
+++ b/internal/server/server.go
@@ -244,7 +244,7 @@ func (s *Server) isAuthCookieValid(username, cookieValue string) bool {
}
func (s *Server) generateShortTermAuthCookie(username string, now time.Time) (string, error) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return jwt.NewWithClaims(jwt.SigningMethodHS256, &jwt.RegisteredClaims{
Subject: username,
NotBefore: jwt.NewNumericDate(now.Add(-time.Minute)),
@@ -657,11 +657,11 @@ func periodicMaintenanceOnce(ctx context.Context, rep repo.Repository) error {
return errors.Errorf("not a direct repository")
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return repo.DirectWriteSession(ctx, dr, repo.WriteSessionOptions{
Purpose: "periodicMaintenanceOnce",
}, func(ctx context.Context, w repo.DirectRepositoryWriter) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return snapshotmaintenance.Run(ctx, w, maintenance.ModeAuto, false, maintenance.SafetyFull)
})
}
@@ -891,7 +891,7 @@ func (s *Server) InitRepositoryAsync(ctx context.Context, mode string, initializ
wg.Add(1)
- // nolint:errcheck
+ //nolint:errcheck
go s.taskmgr.Run(ctx, "Repository", mode, func(ctx context.Context, ctrl uitask.Controller) error {
// we're still holding a lock, until wg.Done(), so no lock around this is needed.
taskID = ctrl.CurrentTaskID()
@@ -950,7 +950,7 @@ func RetryInitRepository(initialize InitRepositoryFunc) InitRepositoryFunc {
for {
if cerr := ctx.Err(); cerr != nil {
// context canceled, bail
- // nolint:wrapcheck
+ //nolint:wrapcheck
return nil, cerr
}
@@ -962,7 +962,7 @@ func RetryInitRepository(initialize InitRepositoryFunc) InitRepositoryFunc {
log(ctx).Warnf("unable to open repository: %v, will keep trying until canceled. Sleeping for %v", rerr, nextSleepTime)
if !clock.SleepInterruptibly(ctx, nextSleepTime) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return nil, ctx.Err()
}
diff --git a/internal/server/server_authz_checks_test.go b/internal/server/server_authz_checks_test.go
index cde09b493..c6a752c01 100644
--- a/internal/server/server_authz_checks_test.go
+++ b/internal/server/server_authz_checks_test.go
@@ -81,7 +81,7 @@ func TestValidateCSRFToken(t *testing.T) {
tc := tc
t.Run(fmt.Sprintf("case-%v", i), func(t *testing.T) {
- req, err := http.NewRequestWithContext(ctx, "GET", "/somepath", http.NoBody)
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, "/somepath", http.NoBody)
require.NoError(t, err)
if tc.session != "" {
diff --git a/internal/server/server_test.go b/internal/server/server_test.go
index e892b43a8..ace8e075a 100644
--- a/internal/server/server_test.go
+++ b/internal/server/server_test.go
@@ -42,7 +42,7 @@
maxCacheSizeBytes = 1e6
)
-// nolint:thelper
+//nolint:thelper
func startServer(t *testing.T, env *repotesting.Environment, tls bool) *repo.APIServerInfo {
ctx := testlogging.Context(t)
@@ -103,7 +103,7 @@ func TestServer_GRPC(t *testing.T) {
testServer(t, false)
}
-// nolint:thelper
+//nolint:thelper
func testServer(t *testing.T, disableGRPC bool) {
ctx, env := repotesting.NewEnvironment(t, repotesting.FormatNotImportant)
apiServerInfo := startServer(t, env, true)
@@ -145,7 +145,7 @@ func TestGPRServer_AuthenticationError(t *testing.T) {
}
}
-// nolint:gocyclo
+//nolint:gocyclo
func TestServerUIAccessDeniedToRemoteUser(t *testing.T) {
ctx, env := repotesting.NewEnvironment(t, repotesting.FormatNotImportant)
si := startServer(t, env, true)
@@ -238,7 +238,7 @@ func TestServerUIAccessDeniedToRemoteUser(t *testing.T) {
}
}
-// nolint:thelper
+//nolint:thelper
func remoteRepositoryTest(ctx context.Context, t *testing.T, rep repo.Repository) {
mustListSnapshotCount(ctx, t, rep, 0)
mustGetObjectNotFound(ctx, t, rep, mustParseObjectID(t, "abcd"))
@@ -436,7 +436,7 @@ func mustManifestNotFound(t *testing.T, err error) {
}
}
-// nolint:unparam
+//nolint:unparam
func mustParseObjectID(t *testing.T, s string) object.ID {
t.Helper()
diff --git a/internal/server/source_manager.go b/internal/server/source_manager.go
index 2aa38a172..98dbb4703 100644
--- a/internal/server/source_manager.go
+++ b/internal/server/source_manager.go
@@ -331,7 +331,7 @@ func (s *sourceManager) snapshot(ctx context.Context) error {
defer s.server.endUpload(ctx, s.src)
- // nolint:wrapcheck
+ //nolint:wrapcheck
return s.server.taskmgr.Run(ctx,
"Snapshot",
fmt.Sprintf("%v at %v", s.src, clock.Now().Format(time.RFC3339)),
@@ -365,7 +365,7 @@ func (s *sourceManager) snapshotInternal(ctx context.Context, ctrl uitask.Contro
s.lastAttemptedSnapshotTime = clock.Now()
s.sourceMutex.Unlock()
- // nolint:wrapcheck
+ //nolint:wrapcheck
return repo.WriteSession(ctx, s.rep, repo.WriteSessionOptions{
Purpose: "Source Manager Uploader",
OnUpload: func(numBytes int64) {
diff --git a/internal/serverapi/client_wrappers.go b/internal/serverapi/client_wrappers.go
index 576fb4f2b..8d8839c2e 100644
--- a/internal/serverapi/client_wrappers.go
+++ b/internal/serverapi/client_wrappers.go
@@ -77,25 +77,25 @@ func CancelUpload(ctx context.Context, c *apiclient.KopiaAPIClient, match *snaps
// CreateRepository invokes the 'repo/create' API.
func CreateRepository(ctx context.Context, c *apiclient.KopiaAPIClient, req *CreateRepositoryRequest) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return c.Post(ctx, "repo/create", req, &StatusResponse{})
}
// ConnectToRepository invokes the 'repo/connect' API.
func ConnectToRepository(ctx context.Context, c *apiclient.KopiaAPIClient, req *ConnectRepositoryRequest) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return c.Post(ctx, "repo/connect", req, &StatusResponse{})
}
// DisconnectFromRepository invokes the 'repo/disconnect' API.
func DisconnectFromRepository(ctx context.Context, c *apiclient.KopiaAPIClient) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return c.Post(ctx, "repo/disconnect", &Empty{}, &Empty{})
}
// Shutdown invokes the 'control/shutdown' API.
func Shutdown(ctx context.Context, c *apiclient.KopiaAPIClient) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return c.Post(ctx, "control/shutdown", &Empty{}, &Empty{})
}
diff --git a/internal/sparsefile/sparsefile.go b/internal/sparsefile/sparsefile.go
index 24839af66..9e538e7c2 100644
--- a/internal/sparsefile/sparsefile.go
+++ b/internal/sparsefile/sparsefile.go
@@ -24,10 +24,10 @@ func Copy(dst io.WriteSeeker, src io.Reader, bufSize uint64) (int64, error) {
func copyBuffer(dst io.WriteSeeker, src io.Reader, buf []byte) (written int64, err error) {
for {
nr, er := src.Read(buf)
- if nr > 0 { // nolint:nestif
+ if nr > 0 { //nolint:nestif
// If non-zero data is read, write it. Otherwise, skip forwards.
if isAllZero(buf) {
- dst.Seek(int64(nr), os.SEEK_CUR) // nolint:errcheck
+ dst.Seek(int64(nr), os.SEEK_CUR) //nolint:errcheck
written += int64(nr)
continue
diff --git a/internal/stat/stat_bsd.go b/internal/stat/stat_bsd.go
index 57fdf7202..92b4a5e7d 100644
--- a/internal/stat/stat_bsd.go
+++ b/internal/stat/stat_bsd.go
@@ -24,7 +24,7 @@ func GetFileAllocSize(fname string) (uint64, error) {
err := syscall.Stat(fname, &st)
if err != nil {
- return 0, err // nolint:wrapcheck
+ return 0, err //nolint:wrapcheck
}
return uint64(st.Blocks) * diskBlockSize, nil
@@ -36,7 +36,7 @@ func GetBlockSize(path string) (uint64, error) {
err := syscall.Statfs(path, &st)
if err != nil {
- return 0, err // nolint:wrapcheck
+ return 0, err //nolint:wrapcheck
}
if st.F_bsize <= 0 {
diff --git a/internal/stat/stat_unix.go b/internal/stat/stat_unix.go
index a5f5386ea..49a91096b 100644
--- a/internal/stat/stat_unix.go
+++ b/internal/stat/stat_unix.go
@@ -24,7 +24,7 @@ func GetFileAllocSize(fname string) (uint64, error) {
err := syscall.Stat(fname, &st)
if err != nil {
- return 0, err // nolint:wrapcheck
+ return 0, err //nolint:wrapcheck
}
return uint64(st.Blocks) * diskBlockSize, nil
@@ -36,12 +36,12 @@ func GetBlockSize(path string) (uint64, error) {
err := syscall.Statfs(path, &st)
if err != nil {
- return 0, err // nolint:wrapcheck
+ return 0, err //nolint:wrapcheck
}
if st.Bsize <= 0 {
return 0, errors.Wrapf(errInvalidBlockSize, "%d", st.Bsize)
}
- return uint64(st.Bsize), nil // nolint:unconvert,nolintlint
+ return uint64(st.Bsize), nil //nolint:unconvert,nolintlint
}
diff --git a/internal/stats/countsum.go b/internal/stats/countsum.go
index 44a17bc80..8797c4a77 100644
--- a/internal/stats/countsum.go
+++ b/internal/stats/countsum.go
@@ -2,7 +2,6 @@
// +build amd64
// Package stats provides helpers for simple stats
-//
package stats
import "sync/atomic"
diff --git a/internal/testlogging/ctx.go b/internal/testlogging/ctx.go
index b3f0ef667..06e0b75bc 100644
--- a/internal/testlogging/ctx.go
+++ b/internal/testlogging/ctx.go
@@ -29,7 +29,8 @@ type testingT interface {
)
// NewTestLogger returns logger bound to the provided testing.T.
-// nolint:thelper
+//
+//nolint:thelper
func NewTestLogger(t *testing.T) logging.Logger {
return Printf(t.Logf, "")
}
diff --git a/internal/testutil/testutil.go b/internal/testutil/testutil.go
index 060d1f0bc..6fe3d5b99 100644
--- a/internal/testutil/testutil.go
+++ b/internal/testutil/testutil.go
@@ -137,7 +137,8 @@ func MustParseJSONLines(t *testing.T, lines []string, v interface{}) {
}
// RunAllTestsWithParam uses reflection to run all test methods starting with 'Test' on the provided object.
-// nolint:thelper
+//
+//nolint:thelper
func RunAllTestsWithParam(t *testing.T, v interface{}) {
m := reflect.ValueOf(v)
typ := m.Type()
diff --git a/internal/testutil/tmpdir.go b/internal/testutil/tmpdir.go
index f8f7b24e6..5f44a57a2 100644
--- a/internal/testutil/tmpdir.go
+++ b/internal/testutil/tmpdir.go
@@ -19,7 +19,7 @@
logsDirPermissions = 0o750
)
-// nolint:gochecknoglobals
+//nolint:gochecknoglobals
var interestingLengths = []int{10, 50, 100, 240, 250, 260, 270}
// GetInterestingTempDirectoryName returns interesting directory name used for testing.
@@ -29,13 +29,13 @@ func GetInterestingTempDirectoryName() (string, error) {
return "", errors.Wrap(err, "unable to create temp directory")
}
- // nolint:gosec
+ //nolint:gosec
targetLen := interestingLengths[rand.Intn(len(interestingLengths))]
// make sure the base directory is quite long to trigger very long filenames on Windows.
if n := len(td); n < targetLen {
td = filepath.Join(td, strings.Repeat("f", targetLen-n))
- // nolint:gomnd
+ //nolint:gomnd
if err := os.MkdirAll(td, 0o700); err != nil {
return "", errors.Wrap(err, "unable to create temp directory")
}
@@ -56,7 +56,7 @@ func TempDirectory(tb testing.TB) string {
tb.Cleanup(func() {
if !tb.Failed() {
- os.RemoveAll(d) // nolint:errcheck
+ os.RemoveAll(d) //nolint:errcheck
} else {
tb.Logf("temporary files left in %v", d)
}
@@ -93,7 +93,7 @@ func TempLogDirectory(t *testing.T) string {
dumpLogs(t, logsDir)
}
- os.RemoveAll(logsDir) // nolint:errcheck
+ os.RemoveAll(logsDir) //nolint:errcheck
})
return logsDir
@@ -122,7 +122,7 @@ func dumpLogs(t *testing.T, dirname string) {
func dumpLogFile(t *testing.T, fname string) {
t.Helper()
- data, err := os.ReadFile(fname) // nolint:gosec
+ data, err := os.ReadFile(fname) //nolint:gosec
if err != nil {
t.Error(err)
return
@@ -137,9 +137,9 @@ func trimOutput(s string) string {
return s
}
- lines2 := append([]string(nil), lines[0:(maxOutputLinesToLog/2)]...) // nolint:gomnd
+ lines2 := append([]string(nil), lines[0:(maxOutputLinesToLog/2)]...) //nolint:gomnd
lines2 = append(lines2, fmt.Sprintf("/* %v lines removed */", len(lines)-maxOutputLinesToLog))
- lines2 = append(lines2, lines[len(lines)-(maxOutputLinesToLog/2):]...) // nolint:gomnd
+ lines2 = append(lines2, lines[len(lines)-(maxOutputLinesToLog/2):]...) //nolint:gomnd
return strings.Join(lines2, "\n")
}
diff --git a/internal/timestampmeta/timestampmeta.go b/internal/timestampmeta/timestampmeta.go
index ee73a4e2f..533a97552 100644
--- a/internal/timestampmeta/timestampmeta.go
+++ b/internal/timestampmeta/timestampmeta.go
@@ -15,13 +15,13 @@ func ToMap(t time.Time, mapKey string) map[string]string {
}
return map[string]string{
- mapKey: strconv.FormatInt(t.UnixNano(), 10), // nolint:gomnd
+ mapKey: strconv.FormatInt(t.UnixNano(), 10), //nolint:gomnd
}
}
// FromValue attempts to convert the provided value stored in metadata into time.Time.
func FromValue(v string) (t time.Time, ok bool) {
- nanos, err := strconv.ParseInt(v, 10, 64) // nolint:gomnd
+ nanos, err := strconv.ParseInt(v, 10, 64) //nolint:gomnd
if err != nil {
return time.Time{}, false
}
diff --git a/internal/timetrack/timer.go b/internal/timetrack/timer.go
index b367de833..4c37e721e 100644
--- a/internal/timetrack/timer.go
+++ b/internal/timetrack/timer.go
@@ -12,10 +12,10 @@ type Timer struct {
// Elapsed returns time elapsed since the timer was started.
func (t Timer) Elapsed() time.Duration {
- return time.Since(t.startTime) // nolint:forbidigo
+ return time.Since(t.startTime) //nolint:forbidigo
}
// StartTimer starts the timer.
func StartTimer() Timer {
- return Timer{time.Now()} // nolint:forbidigo
+ return Timer{time.Now()} //nolint:forbidigo
}
diff --git a/internal/tlsutil/tlsutil.go b/internal/tlsutil/tlsutil.go
index 8496c25d3..8dfaa0555 100644
--- a/internal/tlsutil/tlsutil.go
+++ b/internal/tlsutil/tlsutil.go
@@ -43,7 +43,7 @@ func GenerateServerCertificate(ctx context.Context, keySize int, certValid time.
notBefore := clock.Now()
notAfter := notBefore.Add(certValid)
- // nolint:gomnd
+ //nolint:gomnd
serialNumber, err := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 128))
if err != nil {
return nil, nil, errors.Wrap(err, "unable to generate serial number")
diff --git a/internal/uitask/uitask.go b/internal/uitask/uitask.go
index 4564fa613..e095d1249 100644
--- a/internal/uitask/uitask.go
+++ b/internal/uitask/uitask.go
@@ -178,7 +178,7 @@ func uiLevelEncoder(l zapcore.Level, enc zapcore.PrimitiveArrayEncoder) {
}
}
-// nolint:gochecknoglobals
+//nolint:gochecknoglobals
var containsFormatLogModule = []byte(`"` + uiLogModuleKey + `":"` + content.FormatLogModule + `"`)
func (t *runningTaskInfo) addLogEntry(le json.RawMessage) {
diff --git a/internal/uitask/uitask_test.go b/internal/uitask/uitask_test.go
index 93458a5e8..cc06ed60d 100644
--- a/internal/uitask/uitask_test.go
+++ b/internal/uitask/uitask_test.go
@@ -315,7 +315,7 @@ func TestUITaskCancel_BeforeOnCancel(t *testing.T) {
func getTaskID(t *testing.T, m *uitask.Manager, desc string) string {
t.Helper()
- // nolint:gocritic
+ //nolint:gocritic
for _, tsk := range m.ListTasks() {
if tsk.Description == desc {
return tsk.TaskID
@@ -354,7 +354,7 @@ func logText(items []json.RawMessage) string {
func mustFindTask(t *testing.T, tasks []uitask.Info, tid string) uitask.Info {
t.Helper()
- // nolint:gocritic
+ //nolint:gocritic
for _, tsk := range tasks {
if tsk.TaskID == tid {
return tsk
diff --git a/internal/units/units.go b/internal/units/units.go
index a1d0f4ef0..10c41d33c 100644
--- a/internal/units/units.go
+++ b/internal/units/units.go
@@ -6,7 +6,7 @@
"strings"
)
-// nolint:gochecknoglobals
+//nolint:gochecknoglobals
var (
base10UnitPrefixes = []string{"", "K", "M", "G", "T"}
base2UnitPrefixes = []string{"", "Ki", "Mi", "Gi", "Ti"}
@@ -30,24 +30,24 @@ func toDecimalUnitString(f, thousand float64, prefixes []string, suffix string)
// BytesStringBase10 formats the given value as bytes with the appropriate base-10 suffix (KB, MB, GB, ...)
func BytesStringBase10(b int64) string {
- // nolint:gomnd
+ //nolint:gomnd
return toDecimalUnitString(float64(b), 1000, base10UnitPrefixes, "B")
}
// BytesStringBase2 formats the given value as bytes with the appropriate base-2 suffix (KiB, MiB, GiB, ...)
func BytesStringBase2(b int64) string {
- // nolint:gomnd
+ //nolint:gomnd
return toDecimalUnitString(float64(b), 1024.0, base2UnitPrefixes, "B")
}
// BytesPerSecondsString formats the given value bytes per second with the appropriate base-10 suffix (KB/s, MB/s, GB/s, ...)
func BytesPerSecondsString(bps float64) string {
- // nolint:gomnd
+ //nolint:gomnd
return toDecimalUnitString(bps, 1000, base10UnitPrefixes, "B/s")
}
// Count returns the given number with the appropriate base-10 suffix (K, M, G, ...)
func Count(v int64) string {
- // nolint:gomnd
+ //nolint:gomnd
return toDecimalUnitString(float64(v), 1000, base10UnitPrefixes, "")
}
diff --git a/internal/user/user_profile_hash_v1.go b/internal/user/user_profile_hash_v1.go
index b120db070..a0cc949fd 100644
--- a/internal/user/user_profile_hash_v1.go
+++ b/internal/user/user_profile_hash_v1.go
@@ -9,7 +9,7 @@
"golang.org/x/crypto/scrypt"
)
-// parameters for v1 hashing.
+// parameters for v1 hashing.
const (
hashVersion1 = 1
@@ -20,7 +20,7 @@
v1KeyLength = 32
)
-// nolint:gochecknoglobals
+//nolint:gochecknoglobals
var dummyV1HashThatNeverMatchesAnyPassword = make([]byte, v1KeyLength+v1SaltLength)
func (p *Profile) setPasswordV1(password string) error {
diff --git a/internal/wcmatch/runeScanner.go b/internal/wcmatch/runeScanner.go
index 87665525e..11aef934a 100644
--- a/internal/wcmatch/runeScanner.go
+++ b/internal/wcmatch/runeScanner.go
@@ -12,9 +12,9 @@ func newRuneScanner(text string, ignoreCase bool) *runeScanner {
return &runeScanner{0, []rune(text), ignoreCase}
}
-// peek returns the character at the current position plus the specified index.
-// If ignoreCase was specified when creating the reader, any uppercase character is
-// converted to lowercase. If outside the bounds of the text, 0 is returned.
+// peek returns the character at the current position plus the specified index.
+// If ignoreCase was specified when creating the reader, any uppercase character is
+// converted to lowercase. If outside the bounds of the text, 0 is returned.
func (s *runeScanner) peek(index int) rune {
if s.pos+index < len(s.text) && s.pos+index >= 0 {
ch := s.text[s.pos+index]
diff --git a/internal/wcmatch/wcmatch.go b/internal/wcmatch/wcmatch.go
index a3500e5fa..9b5dfe01f 100644
--- a/internal/wcmatch/wcmatch.go
+++ b/internal/wcmatch/wcmatch.go
@@ -67,7 +67,8 @@ func (matcher *WildcardMatcher) Options() Options {
// NewWildcardMatcher creates a new WildcardMatcher with the specified pattern and options.
// The default option is for the matcher to be case-sensitive without a base dir.
-// nolint:funlen,gocognit,gocyclo,cyclop,maintidx
+//
+//nolint:funlen,gocognit,gocyclo,cyclop,maintidx
func NewWildcardMatcher(pattern string, options ...Option) (matcher *WildcardMatcher, err error) {
var result []token
diff --git a/internal/webdavmount/webdavmount.go b/internal/webdavmount/webdavmount.go
index 1d5591e26..1af482f9e 100644
--- a/internal/webdavmount/webdavmount.go
+++ b/internal/webdavmount/webdavmount.go
@@ -26,7 +26,7 @@
type webdavFile struct {
// webdavFile implements webdav.File but needs context
// +checklocks:mu
- ctx context.Context // nolint:containedctx
+ ctx context.Context //nolint:containedctx
entry fs.File
@@ -66,7 +66,7 @@ func (f *webdavFile) Read(b []byte) (int, error) {
return 0, err
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return r.Read(b)
}
@@ -76,7 +76,7 @@ func (f *webdavFile) Seek(offset int64, whence int) (int64, error) {
return 0, err
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return r.Seek(offset, whence)
}
@@ -91,7 +91,7 @@ func (f *webdavFile) Close() error {
f.mu.Unlock()
if r != nil {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return r.Close()
}
@@ -100,13 +100,13 @@ func (f *webdavFile) Close() error {
type webdavDir struct {
// webdavDir implements webdav.File but needs context
- ctx context.Context // nolint:containedctx
+ ctx context.Context //nolint:containedctx
w *webdavFS
entry fs.Directory
}
-// nolint:gochecknoglobals
+//nolint:gochecknoglobals
var symlinksAreUnsupportedLogged = new(int32)
// TODO: (bug) This incorrectly truncates the entries in the directory and does not allow pagination.
diff --git a/internal/workshare/workshare_test.go b/internal/workshare/workshare_test.go
index c5131a557..f600f7f1f 100644
--- a/internal/workshare/workshare_test.go
+++ b/internal/workshare/workshare_test.go
@@ -140,7 +140,7 @@ func TestDisallowed_UseAfterPoolClose(t *testing.T) {
})
}
-// nolint:thelper
+//nolint:thelper
func testComputeTreeSum(t *testing.T, numWorkers int) {
w := workshare.NewPool(numWorkers)
defer w.Close()
diff --git a/internal/workshare/workshare_waitgroup.go b/internal/workshare/workshare_waitgroup.go
index 212a76158..70409c46e 100644
--- a/internal/workshare/workshare_waitgroup.go
+++ b/internal/workshare/workshare_waitgroup.go
@@ -2,40 +2,40 @@
//
// It is commonly used to traverse tree-like structures:
//
-// type processWorkRequest struct {
-// node *someNode
-// err error
-// }
+// type processWorkRequest struct {
+// node *someNode
+// err error
+// }
//
-// func processWork(p *workshare.Pool, req interface{}) {
-// req := req.(*processWorkRequest)
-// req.err = visitNode(p, req.node)
-// }
+// func processWork(p *workshare.Pool, req interface{}) {
+// req := req.(*processWorkRequest)
+// req.err = visitNode(p, req.node)
+// }
//
-// func visitNode(p *workshare.Pool, n *someNode) error {
-// var wg workshare.AsyncGroup
-// defer wg.Close()
+// func visitNode(p *workshare.Pool, n *someNode) error {
+// var wg workshare.AsyncGroup
+// defer wg.Close()
//
-// for _, child := range n.children {
-// if wg.CanShareWork(p) {
-// // run asynchronously, collect result using wg.Wait() below.
-// RunAsync(p, processWork, processWorkRequest{child})
-// } else {
-// if err := visitNode(p, n); err != nil {
-// return err
-// }
-// }
-// }
+// for _, child := range n.children {
+// if wg.CanShareWork(p) {
+// // run asynchronously, collect result using wg.Wait() below.
+// RunAsync(p, processWork, processWorkRequest{child})
+// } else {
+// if err := visitNode(p, n); err != nil {
+// return err
+// }
+// }
+// }
//
-// // wait for results from all shared work and handle them.
-// for _, req := range wg.Wait() {
-// if err := req.(*processWorkRequest).err; err != nil {
-// return err
-// }
-// }
+// // wait for results from all shared work and handle them.
+// for _, req := range wg.Wait() {
+// if err := req.(*processWorkRequest).err; err != nil {
+// return err
+// }
+// }
//
-// return nil
-// }
+// return nil
+// }
//
// wp = workshare.NewPool(10)
// defer wp.Close()
diff --git a/internal/zaplogutil/zaplogutil.go b/internal/zaplogutil/zaplogutil.go
index c9c4c78fc..5b22f4166 100644
--- a/internal/zaplogutil/zaplogutil.go
+++ b/internal/zaplogutil/zaplogutil.go
@@ -62,7 +62,7 @@ type StdConsoleEncoderConfig struct {
ColoredLogLevel bool
}
-// nolint:gochecknoglobals
+//nolint:gochecknoglobals
var bufPool = buffer.NewPool()
type stdConsoleEncoder struct {
@@ -128,7 +128,7 @@ func (c *stdConsoleEncoder) EncodeEntry(ent zapcore.Entry, fields []zapcore.Fiel
line.AppendString(ent.Message)
if line2, err := c.Encoder.EncodeEntry(ent, fields); err == nil {
- if line2.Len() > 2 { // nolint:gomnd
+ if line2.Len() > 2 { //nolint:gomnd
line.AppendString("\t")
line.AppendString(line2.String())
}
diff --git a/main.go b/main.go
index adc5f51f9..db12fcb51 100644
--- a/main.go
+++ b/main.go
@@ -3,7 +3,7 @@
Usage:
- $ kopia [] [ ...]
+ $ kopia [] [ ...]
Use 'kopia help' to see more details.
*/
diff --git a/repo/api_server_repository.go b/repo/api_server_repository.go
index 8bc36cb6f..a38717987 100644
--- a/repo/api_server_repository.go
+++ b/repo/api_server_repository.go
@@ -61,7 +61,7 @@ func (r *apiServerRepository) ClientOptions() ClientOptions {
}
func (r *apiServerRepository) OpenObject(ctx context.Context, id object.ID) (object.Reader, error) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return object.Open(ctx, r, id)
}
@@ -71,12 +71,12 @@ func (r *apiServerRepository) NewObjectWriter(ctx context.Context, opt object.Wr
// ConcatenateObjects creates a concatenated objects from the provided object IDs.
func (r *apiServerRepository) ConcatenateObjects(ctx context.Context, objectIDs []object.ID) (object.ID, error) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return r.omgr.Concatenate(ctx, objectIDs)
}
func (r *apiServerRepository) VerifyObject(ctx context.Context, id object.ID) ([]content.ID, error) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return object.VerifyObject(ctx, r, id)
}
@@ -87,7 +87,7 @@ func (r *apiServerRepository) GetManifest(ctx context.Context, id manifest.ID, d
return nil, errors.Wrap(err, "GetManifest")
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return mm.Metadata, json.Unmarshal(mm.Payload, data)
}
@@ -188,12 +188,12 @@ func (r *apiServerRepository) GetContent(ctx context.Context, contentID content.
return errors.Wrap(err, "GetContent")
}
- tmp.Write(result) // nolint:errcheck
+ tmp.Write(result) //nolint:errcheck
return nil
}, &tmp)
if err != nil {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return nil, err
}
@@ -252,7 +252,7 @@ func (r *apiServerRepository) Close(ctx context.Context) error {
}
func (r *apiServerRepository) PrefetchObjects(ctx context.Context, objectIDs []object.ID, hint string) ([]content.ID, error) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return object.PrefetchBackingContents(ctx, r, objectIDs, hint)
}
diff --git a/repo/blob/azure/azure_storage.go b/repo/blob/azure/azure_storage.go
index da0bd42d7..b6e7b2957 100644
--- a/repo/blob/azure/azure_storage.go
+++ b/repo/blob/azure/azure_storage.go
@@ -57,7 +57,7 @@ func (az *azStorage) GetBlob(ctx context.Context, b blob.ID, offset, length int6
}
body := resp.Body(nil)
- defer body.Close() // nolint:errcheck
+ defer body.Close() //nolint:errcheck
if length == 0 {
return nil
@@ -67,7 +67,7 @@ func (az *azStorage) GetBlob(ctx context.Context, b blob.ID, offset, length int6
return translateError(err)
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return blob.EnsureLengthExactly(output.Length(), length)
}
@@ -100,7 +100,7 @@ func translateError(err error) error {
var re *azblob.StorageError
if errors.As(err, &re) {
- // nolint:exhaustive
+ //nolint:exhaustive
switch re.ErrorCode {
case azblob.StorageErrorCodeBlobNotFound:
return blob.ErrBlobNotFound
@@ -296,6 +296,6 @@ func() interface{} {
return &Options{}
},
func(ctx context.Context, o interface{}, isCreate bool) (blob.Storage, error) {
- return New(ctx, o.(*Options)) // nolint:forcetypeassert
+ return New(ctx, o.(*Options)) //nolint:forcetypeassert
})
}
diff --git a/repo/blob/b2/b2_storage.go b/repo/blob/b2/b2_storage.go
index 2fca7c977..7deced8ce 100644
--- a/repo/blob/b2/b2_storage.go
+++ b/repo/blob/b2/b2_storage.go
@@ -64,7 +64,7 @@ func (s *b2Storage) GetBlob(ctx context.Context, id blob.ID, offset, length int6
return nil
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return iocopy.JustCopy(output, r)
}
@@ -72,7 +72,7 @@ func (s *b2Storage) GetBlob(ctx context.Context, id blob.ID, offset, length int6
return translateError(err)
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return blob.EnsureLengthExactly(output.Length(), length)
}
@@ -204,7 +204,7 @@ func (s *b2Storage) ListBlobs(ctx context.Context, prefix blob.ID, callback func
for {
resp, err := s.bucket.ListFileNamesWithPrefix(nextFile, maxFileQuery, fullPrefix, "")
if err != nil {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return err
}
@@ -292,6 +292,6 @@ func() interface{} {
return &Options{}
},
func(ctx context.Context, o interface{}, isCreate bool) (blob.Storage, error) {
- return New(ctx, o.(*Options)) // nolint:forcetypeassert
+ return New(ctx, o.(*Options)) //nolint:forcetypeassert
})
}
diff --git a/repo/blob/beforeop/beforeop.go b/repo/blob/beforeop/beforeop.go
index 4953bdc6f..3c8db08e1 100644
--- a/repo/blob/beforeop/beforeop.go
+++ b/repo/blob/beforeop/beforeop.go
@@ -27,7 +27,7 @@ func (s beforeOp) GetBlob(ctx context.Context, id blob.ID, offset, length int64,
}
}
- return s.Storage.GetBlob(ctx, id, offset, length, output) // nolint:wrapcheck
+ return s.Storage.GetBlob(ctx, id, offset, length, output) //nolint:wrapcheck
}
func (s beforeOp) GetMetadata(ctx context.Context, id blob.ID) (blob.Metadata, error) {
@@ -37,7 +37,7 @@ func (s beforeOp) GetMetadata(ctx context.Context, id blob.ID) (blob.Metadata, e
}
}
- return s.Storage.GetMetadata(ctx, id) // nolint:wrapcheck
+ return s.Storage.GetMetadata(ctx, id) //nolint:wrapcheck
}
func (s beforeOp) PutBlob(ctx context.Context, id blob.ID, data blob.Bytes, opts blob.PutOptions) error {
@@ -47,7 +47,7 @@ func (s beforeOp) PutBlob(ctx context.Context, id blob.ID, data blob.Bytes, opts
}
}
- return s.Storage.PutBlob(ctx, id, data, opts) // nolint:wrapcheck
+ return s.Storage.PutBlob(ctx, id, data, opts) //nolint:wrapcheck
}
func (s beforeOp) DeleteBlob(ctx context.Context, id blob.ID) error {
@@ -57,7 +57,7 @@ func (s beforeOp) DeleteBlob(ctx context.Context, id blob.ID) error {
}
}
- return s.Storage.DeleteBlob(ctx, id) // nolint:wrapcheck
+ return s.Storage.DeleteBlob(ctx, id) //nolint:wrapcheck
}
// NewWrapper creates a wrapped storage interface for data operations that need
diff --git a/repo/blob/config.go b/repo/blob/config.go
index a5a39dbfb..215fe1282 100644
--- a/repo/blob/config.go
+++ b/repo/blob/config.go
@@ -40,7 +40,7 @@ func (c *ConnectionInfo) UnmarshalJSON(b []byte) error {
// MarshalJSON returns JSON-encoded storage configuration.
func (c ConnectionInfo) MarshalJSON() ([]byte, error) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return json.Marshal(struct {
Type string `json:"type"`
Data interface{} `json:"config"`
diff --git a/repo/blob/filesystem/filesystem_storage.go b/repo/blob/filesystem/filesystem_storage.go
index 1336761ed..ce65205d3 100644
--- a/repo/blob/filesystem/filesystem_storage.go
+++ b/repo/blob/filesystem/filesystem_storage.go
@@ -83,7 +83,7 @@ func (fs *fsImpl) GetBlobFromPath(ctx context.Context, dirPath, path string, off
defer f.Close() //nolint:errcheck
if length < 0 {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return iocopy.JustCopy(output, f)
}
@@ -117,11 +117,11 @@ func (fs *fsImpl) GetBlobFromPath(ctx context.Context, dirPath, path string, off
return blob.ErrBlobNotFound
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return err
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return blob.EnsureLengthExactly(output.Length(), length)
}
@@ -133,7 +133,7 @@ func (fs *fsImpl) GetMetadataFromPath(ctx context.Context, dirPath, path string)
return blob.Metadata{}, blob.ErrBlobNotFound
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return blob.Metadata{}, err
}
@@ -143,7 +143,7 @@ func (fs *fsImpl) GetMetadataFromPath(ctx context.Context, dirPath, path string)
}, nil
}, fs.isRetriable)
if err != nil {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return blob.Metadata{}, err
}
@@ -186,7 +186,7 @@ func (fs *fsImpl) PutBlobInPath(ctx context.Context, dirPath, path string, data
log(ctx).Errorf("can't remove temp file: %v", removeErr)
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return err
}
@@ -222,12 +222,12 @@ func (fs *fsImpl) createTempFileAndDir(tempFile string) (osWriteFile, error) {
return nil, errors.Wrap(err, "cannot create directory")
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return fs.osi.CreateNewFile(tempFile, fs.fileMode())
}
if err != nil {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return nil, err
}
@@ -235,14 +235,14 @@ func (fs *fsImpl) createTempFileAndDir(tempFile string) (osWriteFile, error) {
}
func (fs *fsImpl) DeleteBlobInPath(ctx context.Context, dirPath, path string) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return retry.WithExponentialBackoffNoValue(ctx, "DeleteBlobInPath:"+path, func() error {
err := fs.osi.Remove(path)
if err == nil || fs.osi.IsNotExist(err) {
return nil
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return err
}, fs.isRetriable)
}
@@ -250,11 +250,11 @@ func (fs *fsImpl) DeleteBlobInPath(ctx context.Context, dirPath, path string) er
func (fs *fsImpl) ReadDir(ctx context.Context, dirname string) ([]os.FileInfo, error) {
v, err := retry.WithExponentialBackoff(ctx, "ReadDir:"+dirname, func() (interface{}, error) {
v, err := fs.osi.ReadDir(dirname)
- // nolint:wrapcheck
+ //nolint:wrapcheck
return v, err
}, fs.isRetriable)
if err != nil {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return nil, err
}
@@ -271,7 +271,7 @@ func (fs *fsImpl) ReadDir(ctx context.Context, dirname string) ([]os.FileInfo, e
}
if err != nil {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return nil, err
}
@@ -283,7 +283,7 @@ func (fs *fsImpl) ReadDir(ctx context.Context, dirname string) ([]os.FileInfo, e
// TouchBlob updates file modification time to current time if it's sufficiently old.
func (fs *fsStorage) TouchBlob(ctx context.Context, blobID blob.ID, threshold time.Duration) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return retry.WithExponentialBackoffNoValue(ctx, "TouchBlob", func() error {
_, path, err := fs.Storage.GetShardedPathAndFilePath(ctx, blobID)
if err != nil {
@@ -294,7 +294,7 @@ func (fs *fsStorage) TouchBlob(ctx context.Context, blobID blob.ID, threshold ti
st, err := osi.Stat(path)
if err != nil {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return err
}
@@ -307,7 +307,7 @@ func (fs *fsStorage) TouchBlob(ctx context.Context, blobID blob.ID, threshold ti
log(ctx).Debugf("updating timestamp on %v to %v", path, n)
- // nolint:wrapcheck
+ //nolint:wrapcheck
return osi.Chtimes(path, n, n)
}, fs.Impl.(*fsImpl).isRetriable) //nolint:forcetypeassert
}
diff --git a/repo/blob/filesystem/filesystem_storage_capacity_openbsd.go b/repo/blob/filesystem/filesystem_storage_capacity_openbsd.go
index 3bc892ba7..8fc1518b1 100644
--- a/repo/blob/filesystem/filesystem_storage_capacity_openbsd.go
+++ b/repo/blob/filesystem/filesystem_storage_capacity_openbsd.go
@@ -21,10 +21,10 @@ func (fs *fsStorage) GetCapacity(ctx context.Context) (blob.Capacity, error) {
}
return blob.Capacity{
- SizeB: uint64(stat.F_blocks) * uint64(stat.F_bsize), // nolint:unconvert,nolintlint
- FreeB: uint64(stat.F_bavail) * uint64(stat.F_bsize), // nolint:unconvert,nolintlint
+ SizeB: uint64(stat.F_blocks) * uint64(stat.F_bsize), //nolint:unconvert,nolintlint
+ FreeB: uint64(stat.F_bavail) * uint64(stat.F_bsize), //nolint:unconvert,nolintlint
}, nil
}, fs.Impl.(*fsImpl).isRetriable)
- return c.(blob.Capacity), err // nolint:forcetypeassert,wrapcheck
+ return c.(blob.Capacity), err //nolint:forcetypeassert,wrapcheck
}
diff --git a/repo/blob/filesystem/filesystem_storage_capacity_unix.go b/repo/blob/filesystem/filesystem_storage_capacity_unix.go
index aa7cec459..511941816 100644
--- a/repo/blob/filesystem/filesystem_storage_capacity_unix.go
+++ b/repo/blob/filesystem/filesystem_storage_capacity_unix.go
@@ -21,10 +21,10 @@ func (fs *fsStorage) GetCapacity(ctx context.Context) (blob.Capacity, error) {
}
return blob.Capacity{
- SizeB: uint64(stat.Blocks) * uint64(stat.Bsize), // nolint:unconvert
- FreeB: uint64(stat.Bavail) * uint64(stat.Bsize), // nolint:unconvert
+ SizeB: uint64(stat.Blocks) * uint64(stat.Bsize), //nolint:unconvert
+ FreeB: uint64(stat.Bavail) * uint64(stat.Bsize), //nolint:unconvert
}, nil
}, fs.Impl.(*fsImpl).isRetriable)
- return c.(blob.Capacity), err // nolint:forcetypeassert,wrapcheck
+ return c.(blob.Capacity), err //nolint:forcetypeassert,wrapcheck
}
diff --git a/repo/blob/filesystem/osinterface_realos.go b/repo/blob/filesystem/osinterface_realos.go
index d19cfe5d7..c28a5094c 100644
--- a/repo/blob/filesystem/osinterface_realos.go
+++ b/repo/blob/filesystem/osinterface_realos.go
@@ -13,7 +13,7 @@ type realOS struct{
func (realOS) Open(fname string) (osReadFile, error) {
f, err := os.Open(fname) //nolint:gosec
if err != nil {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return nil, err
}
@@ -27,12 +27,12 @@ func (realOS) IsExist(err error) bool { return os.IsExist(err) }
func (realOS) IsPathSeparator(c byte) bool { return os.IsPathSeparator(c) }
func (realOS) Rename(oldname, newname string) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return os.Rename(oldname, newname)
}
func (realOS) ReadDir(dirname string) ([]fs.DirEntry, error) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return os.ReadDir(dirname)
}
@@ -49,32 +49,32 @@ func (realOS) IsLinkError(err error) bool {
}
func (realOS) Remove(fname string) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return os.Remove(fname)
}
func (realOS) Stat(fname string) (os.FileInfo, error) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return os.Stat(fname)
}
func (realOS) CreateNewFile(fname string, perm os.FileMode) (osWriteFile, error) {
- // nolint:wrapcheck,gosec
+ //nolint:wrapcheck,gosec
return os.OpenFile(fname, os.O_CREATE|os.O_EXCL|os.O_WRONLY, perm)
}
func (realOS) Mkdir(fname string, mode os.FileMode) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return os.Mkdir(fname, mode)
}
func (realOS) MkdirAll(fname string, mode os.FileMode) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return os.MkdirAll(fname, mode)
}
func (realOS) Chtimes(fname string, atime, mtime time.Time) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return os.Chtimes(fname, atime, mtime)
}
@@ -83,7 +83,7 @@ func (realOS) Geteuid() int {
}
func (realOS) Chown(fname string, uid, gid int) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return os.Chown(fname, uid, gid)
}
diff --git a/repo/blob/gcs/gcs_storage.go b/repo/blob/gcs/gcs_storage.go
index 951e7b04a..4d60ac163 100644
--- a/repo/blob/gcs/gcs_storage.go
+++ b/repo/blob/gcs/gcs_storage.go
@@ -53,7 +53,7 @@ func (gcs *gcsStorage) GetBlob(ctx context.Context, b blob.ID, offset, length in
}
defer reader.Close() //nolint:errcheck
- // nolint:wrapcheck
+ //nolint:wrapcheck
return iocopy.JustCopy(output, reader)
}
@@ -61,7 +61,7 @@ func (gcs *gcsStorage) GetBlob(ctx context.Context, b blob.ID, offset, length in
return translateError(err)
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return blob.EnsureLengthExactly(output.Length(), length)
}
diff --git a/repo/blob/gdrive/gdrive_storage.go b/repo/blob/gdrive/gdrive_storage.go
index ad4edd1b1..15ca3b1e2 100644
--- a/repo/blob/gdrive/gdrive_storage.go
+++ b/repo/blob/gdrive/gdrive_storage.go
@@ -97,7 +97,7 @@ func (gdrive *gdriveStorage) GetBlob(ctx context.Context, b blob.ID, offset, len
}
}
- return blob.EnsureLengthExactly(output.Length(), length) // nolint:wrapcheck
+ return blob.EnsureLengthExactly(output.Length(), length) //nolint:wrapcheck
}
func (gdrive *gdriveStorage) GetMetadata(ctx context.Context, blobID blob.ID) (blob.Metadata, error) {
@@ -120,7 +120,7 @@ func (gdrive *gdriveStorage) GetMetadata(ctx context.Context, blobID blob.ID) (b
return blob.Metadata{}, errors.Wrapf(err, "get file by blob id in GetMetadata(%s)", blobID)
}
- file := f.(*drive.File) // nolint:forcetypeassert
+ file := f.(*drive.File) //nolint:forcetypeassert
if file.Size == 0 { // Need to retrieve the rest of metadata fields.
file, err = gdrive.getFileByFileID(ctx, file.Id, metadataFields)
@@ -340,7 +340,7 @@ func (gdrive *gdriveStorage) getFileID(ctx context.Context, blobID blob.ID) (str
return fileID, err
})
- return fileID.(string), err // nolint:forcetypeassert
+ return fileID.(string), err //nolint:forcetypeassert
}
// Get fileID for a blob with the given cache entry.
@@ -366,10 +366,10 @@ func (gdrive *gdriveStorage) tryGetFileByBlobID(ctx context.Context, blobID blob
return gdrive.getFileByBlobID(ctx, blobID, fields)
}, retryNotFound)
if err != nil {
- return nil, err // nolint:wrapcheck
+ return nil, err //nolint:wrapcheck
}
- return f.(*drive.File), nil // nolint:forcetypeassert
+ return f.(*drive.File), nil //nolint:forcetypeassert
}
func (gdrive *gdriveStorage) getFileByFileID(ctx context.Context, fileID string, fields googleapi.Field) (*drive.File, error) {
@@ -384,7 +384,7 @@ func (gdrive *gdriveStorage) getFileByBlobID(ctx context.Context, blobID blob.ID
IncludeItemsFromAllDrives(true).
Q(fmt.Sprintf("'%s' in parents and name = '%s' and mimeType = '%s' and trashed = false", gdrive.folderID, toFileName(blobID), blobMimeType)).
Fields(fields).
- PageSize(2). // nolint:gomnd
+ PageSize(2). //nolint:gomnd
Context(ctx).
Do()
@@ -452,7 +452,7 @@ func parseBlobMetadata(file *drive.File, blobID blob.ID) (blob.Metadata, error)
}
func parseModifiedTime(file *drive.File) (time.Time, error) {
- return time.Parse(time.RFC3339, file.ModifiedTime) // nolint:wrapcheck
+ return time.Parse(time.RFC3339, file.ModifiedTime) //nolint:wrapcheck
}
func retryNotFound(err error) bool {
diff --git a/repo/blob/logging/logging_storage.go b/repo/blob/logging/logging_storage.go
index d5980d044..d1425e1df 100644
--- a/repo/blob/logging/logging_storage.go
+++ b/repo/blob/logging/logging_storage.go
@@ -61,7 +61,7 @@ func (s *loggingStorage) GetBlob(ctx context.Context, id blob.ID, offset, length
"duration", dt,
)
- // nolint:wrapcheck
+ //nolint:wrapcheck
return err
}
@@ -80,7 +80,7 @@ func (s *loggingStorage) GetCapacity(ctx context.Context) (blob.Capacity, error)
"duration", dt,
)
- // nolint:wrapcheck
+ //nolint:wrapcheck
return c, err
}
@@ -102,7 +102,7 @@ func (s *loggingStorage) GetMetadata(ctx context.Context, id blob.ID) (blob.Meta
"duration", dt,
)
- // nolint:wrapcheck
+ //nolint:wrapcheck
return result, err
}
@@ -124,7 +124,7 @@ func (s *loggingStorage) PutBlob(ctx context.Context, id blob.ID, data blob.Byte
"duration", dt,
)
- // nolint:wrapcheck
+ //nolint:wrapcheck
return err
}
@@ -144,7 +144,7 @@ func (s *loggingStorage) DeleteBlob(ctx context.Context, id blob.ID) error {
"error", s.translateError(err),
"duration", dt,
)
- // nolint:wrapcheck
+ //nolint:wrapcheck
return err
}
@@ -170,7 +170,7 @@ func (s *loggingStorage) ListBlobs(ctx context.Context, prefix blob.ID, callback
"duration", dt,
)
- // nolint:wrapcheck
+ //nolint:wrapcheck
return err
}
@@ -187,7 +187,7 @@ func (s *loggingStorage) Close(ctx context.Context) error {
"duration", dt,
)
- // nolint:wrapcheck
+ //nolint:wrapcheck
return err
}
@@ -209,7 +209,7 @@ func (s *loggingStorage) FlushCaches(ctx context.Context) error {
"duration", dt,
)
- // nolint:wrapcheck
+ //nolint:wrapcheck
return err
}
diff --git a/repo/blob/rclone/rclone_storage.go b/repo/blob/rclone/rclone_storage.go
index 565056997..218fb785b 100644
--- a/repo/blob/rclone/rclone_storage.go
+++ b/repo/blob/rclone/rclone_storage.go
@@ -84,8 +84,8 @@ func (r *rcloneStorage) waitForTransfersToEnd(ctx context.Context) {
func (r *rcloneStorage) Kill() {
// this will kill rclone process if any
if r.cmd != nil && r.cmd.Process != nil {
- r.cmd.Process.Kill() // nolint:errcheck
- r.cmd.Wait() // nolint:errcheck
+ r.cmd.Process.Kill() //nolint:errcheck
+ r.cmd.Wait() //nolint:errcheck
}
}
@@ -103,8 +103,8 @@ func (r *rcloneStorage) Close(ctx context.Context) error {
// this will kill rclone process if any
if r.cmd != nil && r.cmd.Process != nil {
log(ctx).Debugf("killing rclone")
- r.cmd.Process.Kill() // nolint:errcheck
- r.cmd.Wait() // nolint:errcheck
+ r.cmd.Process.Kill() //nolint:errcheck
+ r.cmd.Wait() //nolint:errcheck
}
if r.temporaryDir != "" {
@@ -194,7 +194,8 @@ func (r *rcloneStorage) runRCloneAndWaitForServerAddress(ctx context.Context, c
}
// New creates new RClone storage with specified options.
-// nolint:funlen
+//
+//nolint:funlen
func New(ctx context.Context, opt *Options, isCreate bool) (blob.Storage, error) {
// generate directory for all temp files.
td, err := os.MkdirTemp("", "kopia-rclone")
@@ -227,7 +228,7 @@ func New(ctx context.Context, opt *Options, isCreate bool) (blob.Storage, error)
}()
// write TLS files.
- // nolint:gomnd
+ //nolint:gomnd
cert, key, err := tlsutil.GenerateServerCertificate(ctx, 2048, 365*24*time.Hour, []string{"127.0.0.1"})
if err != nil {
return nil, errors.Wrap(err, "unable to generate server certificate")
@@ -264,7 +265,7 @@ func New(ctx context.Context, opt *Options, isCreate bool) (blob.Storage, error)
if opt.EmbeddedConfig != "" {
tmpConfigFile := filepath.Join(r.temporaryDir, "rclone.conf")
- // nolint:gomnd
+ //nolint:gomnd
if err = os.WriteFile(tmpConfigFile, []byte(opt.EmbeddedConfig), 0o600); err != nil {
return nil, errors.Wrap(err, "unable to write config file")
}
diff --git a/repo/blob/rclone/rclone_storage_test.go b/repo/blob/rclone/rclone_storage_test.go
index 87681f7ac..2d335d02d 100644
--- a/repo/blob/rclone/rclone_storage_test.go
+++ b/repo/blob/rclone/rclone_storage_test.go
@@ -328,7 +328,7 @@ func cleanupOldData(t *testing.T, rcloneExe, remotePath string) {
configFile = filepath.Join(tmpDir, "rclone.conf")
- // nolint:gomnd
+ //nolint:gomnd
if err = os.WriteFile(configFile, b, 0o600); err != nil {
t.Fatalf("unable to write config file: %v", err)
}
diff --git a/repo/blob/readonly/readonly_storage.go b/repo/blob/readonly/readonly_storage.go
index 2fa066add..7b0fe5140 100644
--- a/repo/blob/readonly/readonly_storage.go
+++ b/repo/blob/readonly/readonly_storage.go
@@ -18,17 +18,17 @@ type readonlyStorage struct {
}
func (s readonlyStorage) GetCapacity(ctx context.Context) (blob.Capacity, error) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return s.base.GetCapacity(ctx)
}
func (s readonlyStorage) GetBlob(ctx context.Context, id blob.ID, offset, length int64, output blob.OutputBuffer) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return s.base.GetBlob(ctx, id, offset, length, output)
}
func (s readonlyStorage) GetMetadata(ctx context.Context, id blob.ID) (blob.Metadata, error) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return s.base.GetMetadata(ctx, id)
}
@@ -41,12 +41,12 @@ func (s readonlyStorage) DeleteBlob(ctx context.Context, id blob.ID) error {
}
func (s readonlyStorage) ListBlobs(ctx context.Context, prefix blob.ID, callback func(blob.Metadata) error) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return s.base.ListBlobs(ctx, prefix, callback)
}
func (s readonlyStorage) Close(ctx context.Context) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return s.base.Close(ctx)
}
@@ -59,7 +59,7 @@ func (s readonlyStorage) DisplayName() string {
}
func (s readonlyStorage) FlushCaches(ctx context.Context) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return s.base.FlushCaches(ctx)
}
diff --git a/repo/blob/registry.go b/repo/blob/registry.go
index 73fc9c7b2..c15586815 100644
--- a/repo/blob/registry.go
+++ b/repo/blob/registry.go
@@ -10,7 +10,7 @@
// creating the underlying storage location (e.g. directory), when possible.
type CreateStorageFunc func(ctx context.Context, options interface{}, isCreate bool) (Storage, error)
-// nolint:gochecknoglobals
+//nolint:gochecknoglobals
var factories = map[string]*storageFactory{}
// StorageFactory allows creation of repositories in a generic way.
diff --git a/repo/blob/retrying/retrying_storage.go b/repo/blob/retrying/retrying_storage.go
index fc3b899ce..ce37c57cc 100644
--- a/repo/blob/retrying/retrying_storage.go
+++ b/repo/blob/retrying/retrying_storage.go
@@ -17,43 +17,43 @@ type retryingStorage struct {
}
func (s retryingStorage) GetBlob(ctx context.Context, id blob.ID, offset, length int64, output blob.OutputBuffer) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return retry.WithExponentialBackoffNoValue(ctx, fmt.Sprintf("GetBlob(%v,%v,%v)", id, offset, length), func() error {
output.Reset()
- // nolint:wrapcheck
+ //nolint:wrapcheck
return s.Storage.GetBlob(ctx, id, offset, length, output)
}, isRetriable)
}
func (s retryingStorage) GetMetadata(ctx context.Context, id blob.ID) (blob.Metadata, error) {
v, err := retry.WithExponentialBackoff(ctx, "GetMetadata("+string(id)+")", func() (interface{}, error) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return s.Storage.GetMetadata(ctx, id)
}, isRetriable)
if err != nil {
- return blob.Metadata{}, err // nolint:wrapcheck
+ return blob.Metadata{}, err //nolint:wrapcheck
}
- return v.(blob.Metadata), nil // nolint:forcetypeassert
+ return v.(blob.Metadata), nil //nolint:forcetypeassert
}
func (s retryingStorage) PutBlob(ctx context.Context, id blob.ID, data blob.Bytes, opts blob.PutOptions) error {
_, err := retry.WithExponentialBackoff(ctx, "PutBlob("+string(id)+")", func() (interface{}, error) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return true, s.Storage.PutBlob(ctx, id, data, opts)
}, isRetriable)
- return err // nolint:wrapcheck
+ return err //nolint:wrapcheck
}
func (s retryingStorage) DeleteBlob(ctx context.Context, id blob.ID) error {
_, err := retry.WithExponentialBackoff(ctx, "DeleteBlob("+string(id)+")", func() (interface{}, error) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return true, s.Storage.DeleteBlob(ctx, id)
}, isRetriable)
- return err // nolint:wrapcheck
+ return err //nolint:wrapcheck
}
// NewWrapper returns a Storage wrapper that adds retry loop around all operations of the underlying storage.
diff --git a/repo/blob/s3/s3_storage.go b/repo/blob/s3/s3_storage.go
index 57de97ad3..af56a23b4 100644
--- a/repo/blob/s3/s3_storage.go
+++ b/repo/blob/s3/s3_storage.go
@@ -75,7 +75,7 @@ func (s *s3Storage) getBlobWithVersion(ctx context.Context, b blob.ID, version s
return nil
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return iocopy.JustCopy(output, o)
}
@@ -83,7 +83,7 @@ func (s *s3Storage) getBlobWithVersion(ctx context.Context, b blob.ID, version s
return translateError(err)
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return blob.EnsureLengthExactly(output.Length(), length)
}
@@ -194,7 +194,7 @@ func (s *s3Storage) putBlob(ctx context.Context, b blob.ID, data blob.Bytes, opt
var er minio.ErrorResponse
if errors.As(err, &er) && er.Code == "InvalidRequest" && strings.Contains(strings.ToLower(er.Message), "content-md5") {
- return versionMetadata{}, err // nolint:wrapcheck
+ return versionMetadata{}, err //nolint:wrapcheck
}
if errors.Is(err, io.EOF) && uploadInfo.Size == 0 {
@@ -208,7 +208,7 @@ func (s *s3Storage) putBlob(ctx context.Context, b blob.ID, data blob.Bytes, opt
}
if err != nil {
- return versionMetadata{}, err // nolint:wrapcheck
+ return versionMetadata{}, err //nolint:wrapcheck
}
return versionMetadata{
@@ -293,7 +293,7 @@ func (s *s3Storage) FlushCaches(ctx context.Context) error {
}
func getCustomTransport(insecureSkipVerify bool) (transport *http.Transport) {
- // nolint:gosec
+ //nolint:gosec
customTransport := &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: insecureSkipVerify}}
return customTransport
}
@@ -393,6 +393,6 @@ func() interface{} {
return &Options{}
},
func(ctx context.Context, o interface{}, isCreate bool) (blob.Storage, error) {
- return New(ctx, o.(*Options)) // nolint:forcetypeassert
+ return New(ctx, o.(*Options)) //nolint:forcetypeassert
})
}
diff --git a/repo/blob/s3/s3_storage_test.go b/repo/blob/s3/s3_storage_test.go
index 0c23784d8..edc6ee2c1 100644
--- a/repo/blob/s3/s3_storage_test.go
+++ b/repo/blob/s3/s3_storage_test.go
@@ -132,7 +132,8 @@ func getProviderOptions(tb testing.TB, envName string) *Options {
// verifyInvalidCredentialsForGetBlob verifies that the invalid credentials
// error is returned by GetBlob.
-// nolint:thelper
+//
+//nolint:thelper
func verifyInvalidCredentialsForGetBlob(ctx context.Context, t *testing.T, r blob.Storage) {
blocks := []struct {
blk blob.ID
@@ -149,7 +150,8 @@ func verifyInvalidCredentialsForGetBlob(ctx context.Context, t *testing.T, r blo
// verifyBlobNotFoundForGetBlob verifies that the ErrBlobNotFound
// error is returned by GetBlob.
-// nolint:thelper
+//
+//nolint:thelper
func verifyBlobNotFoundForGetBlob(ctx context.Context, t *testing.T, r blob.Storage) {
blocks := []struct {
blk blob.ID
@@ -420,7 +422,7 @@ func TestInvalidCredsFailsFast(t *testing.T) {
})
require.Error(t, err)
- // nolint:forbidigo
+ //nolint:forbidigo
if dt := timer.Elapsed(); dt > 10*time.Second {
t.Fatalf("opening storage took too long, probably due to retries")
}
@@ -503,7 +505,7 @@ func TestNeedMD5AWS(t *testing.T) {
require.NoError(t, err, "could not put test blob")
}
-// nolint:thelper
+//nolint:thelper
func testStorage(t *testing.T, options *Options, runValidationTest bool, opts blob.PutOptions) {
ctx := testlogging.Context(t)
@@ -537,7 +539,7 @@ func testStorage(t *testing.T, options *Options, runValidationTest bool, opts bl
}
}
-// nolint:thelper,gocritic
+//nolint:thelper,gocritic
func testPutBlobWithInvalidRetention(t *testing.T, options Options, opts blob.PutOptions) {
ctx := testlogging.Context(t)
@@ -571,7 +573,7 @@ func TestCustomTransportNoSSLVerify(t *testing.T) {
func getURL(url string, insecureSkipVerify bool) error {
client := &http.Client{Transport: getCustomTransport(insecureSkipVerify)}
- resp, err := client.Get(url) // nolint:noctx
+ resp, err := client.Get(url) //nolint:noctx
if err != nil {
return err
}
diff --git a/repo/blob/s3/s3_versioned_test.go b/repo/blob/s3/s3_versioned_test.go
index 7e178923b..c60812213 100644
--- a/repo/blob/s3/s3_versioned_test.go
+++ b/repo/blob/s3/s3_versioned_test.go
@@ -716,10 +716,10 @@ func (s *s3Storage) putBlobVersion(ctx context.Context, id blob.ID, data blob.By
v, err := s.putBlob(ctx, id, data, opts)
vm = v
- return true, err // nolint:wrapcheck
+ return true, err //nolint:wrapcheck
}, isRetriable)
- return vm, err // nolint:wrapcheck
+ return vm, err //nolint:wrapcheck
}
func compareMetadata(tb testing.TB, a, b versionMetadata) {
diff --git a/repo/blob/sftp/sftp_storage.go b/repo/blob/sftp/sftp_storage.go
index 4633147c6..ec8f40984 100644
--- a/repo/blob/sftp/sftp_storage.go
+++ b/repo/blob/sftp/sftp_storage.go
@@ -111,14 +111,14 @@ func (s *sftpStorage) GetCapacity(ctx context.Context) (blob.Capacity, error) {
return blob.Capacity{
SizeB: stat.Blocks * stat.Bsize,
FreeB: stat.Bfree * stat.Bsize,
- }, err // nolint:wrapcheck
+ }, err //nolint:wrapcheck
})
- return i.(blob.Capacity), err // nolint:forcetypeassert,wrapcheck
+ return i.(blob.Capacity), err //nolint:forcetypeassert,wrapcheck
}
func (s *sftpImpl) GetBlobFromPath(ctx context.Context, dirPath, fullPath string, offset, length int64, output blob.OutputBuffer) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return s.rec.UsingConnectionNoResult(ctx, "GetBlobFromPath", func(conn connection.Connection) error {
r, err := sftpClientFromConnection(conn).Open(fullPath)
if isNotExist(err) {
@@ -134,7 +134,7 @@ func (s *sftpImpl) GetBlobFromPath(ctx context.Context, dirPath, fullPath string
// read entire blob
output.Reset()
- // nolint:wrapcheck
+ //nolint:wrapcheck
return iocopy.JustCopy(output, r)
}
@@ -157,7 +157,7 @@ func (s *sftpImpl) GetBlobFromPath(ctx context.Context, dirPath, fullPath string
return errors.Wrap(err, "read error")
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return blob.EnsureLengthExactly(output.Length(), length)
})
}
@@ -179,7 +179,7 @@ func (s *sftpImpl) GetMetadataFromPath(ctx context.Context, dirPath, fullPath st
}, nil
})
if err != nil {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return blob.Metadata{}, err
}
@@ -194,7 +194,7 @@ func (s *sftpImpl) PutBlobInPath(ctx context.Context, dirPath, fullPath string,
return errors.Wrap(blob.ErrUnsupportedPutBlobOption, "do-not-recreate")
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return s.rec.UsingConnectionNoResult(ctx, "PutBlobInPath", func(conn connection.Connection) error {
randSuffix := make([]byte, tempFileRandomSuffixLen)
if _, err := rand.Read(randSuffix); err != nil {
@@ -261,7 +261,7 @@ func (osInterface) IsPathSeparator(c byte) bool {
}
func (osi osInterface) Mkdir(name string, perm os.FileMode) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return osi.cli.Mkdir(name)
}
@@ -275,7 +275,7 @@ func (s *sftpImpl) createTempFileAndDir(cli *sftp.Client, tempFile string) (*sft
return nil, errors.Wrap(err, "cannot create directory")
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return cli.OpenFile(tempFile, flags)
}
@@ -295,7 +295,7 @@ func isNotExist(err error) bool {
}
func (s *sftpImpl) DeleteBlobInPath(ctx context.Context, dirPath, fullPath string) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return s.rec.UsingConnectionNoResult(ctx, "DeleteBlobInPath", func(conn connection.Connection) error {
err := sftpClientFromConnection(conn).Remove(fullPath)
if err == nil || isNotExist(err) {
@@ -308,11 +308,11 @@ func (s *sftpImpl) DeleteBlobInPath(ctx context.Context, dirPath, fullPath strin
func (s *sftpImpl) ReadDir(ctx context.Context, dirname string) ([]os.FileInfo, error) {
v, err := s.rec.UsingConnection(ctx, "ReadDir", func(conn connection.Connection) (interface{}, error) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return sftpClientFromConnection(conn).ReadDir(dirname)
})
if err != nil {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return nil, err
}
@@ -367,9 +367,9 @@ func getHostKeyCallback(opt *Options) (ssh.HostKeyCallback, error) {
}
// this file is no longer needed after `knownhosts.New` returns, so we can delete it.
- defer os.Remove(tmpFile) // nolint:errcheck
+ defer os.Remove(tmpFile) //nolint:errcheck
- // nolint:wrapcheck
+ //nolint:wrapcheck
return knownhosts.New(tmpFile)
}
@@ -377,7 +377,7 @@ func getHostKeyCallback(opt *Options) (ssh.HostKeyCallback, error) {
return nil, errors.Errorf("known hosts path must be absolute")
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return knownhosts.New(opt.knownHostsFile())
}
@@ -483,7 +483,7 @@ func getSFTPClientExternal(ctx context.Context, opt *Options) (*sftpConnection,
closeFunc := func() error {
p := cmd.Process
if p != nil {
- p.Kill() // nolint:errcheck
+ p.Kill() //nolint:errcheck
}
return nil
@@ -492,7 +492,7 @@ func getSFTPClientExternal(ctx context.Context, opt *Options) (*sftpConnection,
// open the SFTP session
c, err := sftp.NewClientPipe(rd, wr)
if err != nil {
- closeFunc() // nolint:errcheck
+ closeFunc() //nolint:errcheck
return nil, errors.Wrap(err, "error creating sftp client pipe")
}
@@ -526,7 +526,7 @@ func getSFTPClient(ctx context.Context, opt *Options) (*sftpConnection, error) {
sftp.UseConcurrentReads(true),
)
if err != nil {
- conn.Close() // nolint:errcheck
+ conn.Close() //nolint:errcheck
return nil, errors.Wrapf(err, "unable to create sftp client")
}
diff --git a/repo/blob/sftp/sftp_storage_test.go b/repo/blob/sftp/sftp_storage_test.go
index 2494a6a8b..d6019f577 100644
--- a/repo/blob/sftp/sftp_storage_test.go
+++ b/repo/blob/sftp/sftp_storage_test.go
@@ -54,7 +54,7 @@ func mustGetLocalTmpDir(t *testing.T) string {
return tmpDir
}
-// nolint:unparam
+//nolint:unparam
func runAndGetOutput(t *testing.T, cmd string, args ...string) ([]byte, error) {
t.Helper()
@@ -75,7 +75,7 @@ func runAndGetOutput(t *testing.T, cmd string, args ...string) ([]byte, error) {
return o, nil
}
-// nolint:unparam
+//nolint:unparam
func mustRunCommand(t *testing.T, cmd string, args ...string) []byte {
t.Helper()
@@ -268,7 +268,7 @@ func TestInvalidServerFailsFast(t *testing.T) {
t.Fatalf("unexpected success with bad credentials")
}
- // nolint:forbidigo
+ //nolint:forbidigo
if dt := timer.Elapsed(); dt > 10*time.Second {
t.Fatalf("opening storage took too long, probably due to retries")
}
@@ -321,7 +321,7 @@ func deleteBlobs(ctx context.Context, t *testing.T, st blob.Storage) {
}
}
-// nolint:gocritic
+//nolint:gocritic
func createSFTPStorage(ctx context.Context, t *testing.T, opt sftp.Options, embed bool) (blob.Storage, error) {
t.Helper()
diff --git a/repo/blob/sharded/sharded.go b/repo/blob/sharded/sharded.go
index a679b59fc..f84c7ff85 100644
--- a/repo/blob/sharded/sharded.go
+++ b/repo/blob/sharded/sharded.go
@@ -51,7 +51,7 @@ func (s *Storage) GetBlob(ctx context.Context, blobID blob.ID, offset, length in
return errors.Wrap(err, "error determining sharded path")
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return s.Impl.GetBlobFromPath(ctx, dirPath, filePath, offset, length, output)
}
@@ -72,7 +72,7 @@ func (s *Storage) ListBlobs(ctx context.Context, prefix blob.ID, callback func(b
pw := parallelwork.NewQueue()
// channel to which pw will write blob.Metadata, some buf
- result := make(chan blob.Metadata, 128) // nolint:gomnd
+ result := make(chan blob.Metadata, 128) //nolint:gomnd
finished := make(chan struct{})
defer close(finished)
@@ -161,7 +161,7 @@ func (s *Storage) ListBlobs(ctx context.Context, prefix blob.ID, callback func(b
}
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return eg.Wait()
}
@@ -185,7 +185,7 @@ func (s *Storage) PutBlob(ctx context.Context, blobID blob.ID, data blob.Bytes,
return errors.Wrap(err, "error determining sharded path")
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return s.Impl.PutBlobInPath(ctx, dirPath, filePath, data, opts)
}
@@ -196,7 +196,7 @@ func (s *Storage) DeleteBlob(ctx context.Context, blobID blob.ID) error {
return errors.Wrap(err, "error determining sharded path")
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return s.Impl.DeleteBlobInPath(ctx, dirPath, filePath)
}
@@ -213,7 +213,7 @@ func (s *Storage) getParameters(ctx context.Context) (*Parameters, error) {
dotShardsFile := path.Join(s.RootPath, ParametersFile)
- // nolint:nestif
+ //nolint:nestif
if err := s.Impl.GetBlobFromPath(ctx, s.RootPath, dotShardsFile, 0, -1, &tmp); err != nil {
if !errors.Is(err, blob.ErrBlobNotFound) {
return nil, errors.Wrap(err, "error getting sharding parameters for storage")
diff --git a/repo/blob/storage.go b/repo/blob/storage.go
index 38d8c0c44..651644c8a 100644
--- a/repo/blob/storage.go
+++ b/repo/blob/storage.go
@@ -200,7 +200,7 @@ func ListAllBlobs(ctx context.Context, st Reader, prefix ID) ([]Metadata, error)
// IterateAllPrefixesInParallel invokes the provided callback and returns the first error returned by the callback or nil.
func IterateAllPrefixesInParallel(ctx context.Context, parallelism int, st Storage, prefixes []ID, callback func(Metadata) error) error {
if len(prefixes) == 1 {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return st.ListBlobs(ctx, prefixes[0], callback)
}
@@ -339,7 +339,7 @@ func PutBlobAndGetMetadata(ctx context.Context, st Storage, blobID ID, data Byte
BlobID: blobID,
Length: int64(data.Length()),
Timestamp: *opts.GetModTime,
- }, err // nolint:wrapcheck
+ }, err //nolint:wrapcheck
}
// ReadBlobMap reads the map of all the blobs indexed by ID.
diff --git a/repo/blob/throttling/throttler_test.go b/repo/blob/throttling/throttler_test.go
index b7c0b355e..4900697a2 100644
--- a/repo/blob/throttling/throttler_test.go
+++ b/repo/blob/throttling/throttler_test.go
@@ -94,7 +94,7 @@ func TestThrottlerLargeWindow(t *testing.T) {
require.Greater(t, timer.Elapsed(), 900*time.Millisecond)
}
-// nolint:thelper
+//nolint:thelper
func testRateLimiting(t *testing.T, name string, wantRate float64, worker func(total *int64)) {
t.Run(name, func(t *testing.T) {
t.Parallel()
diff --git a/repo/blob/throttling/throttling_storage.go b/repo/blob/throttling/throttling_storage.go
index 4ee2ff7e1..3662bbaa9 100644
--- a/repo/blob/throttling/throttling_storage.go
+++ b/repo/blob/throttling/throttling_storage.go
@@ -71,21 +71,21 @@ func (s *throttlingStorage) GetBlob(ctx context.Context, id blob.ID, offset, len
}
}
- return err // nolint:wrapcheck
+ return err //nolint:wrapcheck
}
func (s *throttlingStorage) GetMetadata(ctx context.Context, id blob.ID) (blob.Metadata, error) {
s.throttler.BeforeOperation(ctx, operationGetMetadata)
defer s.throttler.AfterOperation(ctx, operationGetMetadata)
- return s.Storage.GetMetadata(ctx, id) // nolint:wrapcheck
+ return s.Storage.GetMetadata(ctx, id) //nolint:wrapcheck
}
func (s *throttlingStorage) ListBlobs(ctx context.Context, blobIDPrefix blob.ID, cb func(bm blob.Metadata) error) error {
s.throttler.BeforeOperation(ctx, operationListBlobs)
defer s.throttler.AfterOperation(ctx, operationListBlobs)
- return s.Storage.ListBlobs(ctx, blobIDPrefix, cb) // nolint:wrapcheck
+ return s.Storage.ListBlobs(ctx, blobIDPrefix, cb) //nolint:wrapcheck
}
func (s *throttlingStorage) PutBlob(ctx context.Context, id blob.ID, data blob.Bytes, opts blob.PutOptions) error {
@@ -94,14 +94,14 @@ func (s *throttlingStorage) PutBlob(ctx context.Context, id blob.ID, data blob.B
s.throttler.BeforeUpload(ctx, int64(data.Length()))
- return s.Storage.PutBlob(ctx, id, data, opts) // nolint:wrapcheck
+ return s.Storage.PutBlob(ctx, id, data, opts) //nolint:wrapcheck
}
func (s *throttlingStorage) DeleteBlob(ctx context.Context, id blob.ID) error {
s.throttler.BeforeOperation(ctx, operationDeleteBlob)
defer s.throttler.AfterOperation(ctx, operationDeleteBlob)
- return s.Storage.DeleteBlob(ctx, id) // nolint:wrapcheck
+ return s.Storage.DeleteBlob(ctx, id) //nolint:wrapcheck
}
// NewWrapper returns a Storage wrapper that adds retry loop around all operations of the underlying storage.
diff --git a/repo/blob/throttling/token_bucket.go b/repo/blob/throttling/token_bucket.go
index 26bee670a..fd70c3c1c 100644
--- a/repo/blob/throttling/token_bucket.go
+++ b/repo/blob/throttling/token_bucket.go
@@ -113,7 +113,7 @@ func sleepWithContext(ctx context.Context, dur time.Duration) {
func newTokenBucket(name string, initialTokens, maxTokens float64, addTimeUnit time.Duration) *tokenBucket {
return &tokenBucket{
name: name,
- now: time.Now, // nolint:forbidigo
+ now: time.Now, //nolint:forbidigo
sleep: sleepWithContext,
numTokens: initialTokens,
maxTokens: maxTokens,
diff --git a/repo/blob/webdav/webdav_storage.go b/repo/blob/webdav/webdav_storage.go
index 4ad31b48f..ea1d9278a 100644
--- a/repo/blob/webdav/webdav_storage.go
+++ b/repo/blob/webdav/webdav_storage.go
@@ -73,7 +73,7 @@ func (d *davStorageImpl) GetBlobFromPath(ctx context.Context, dirPath, path stri
return d.translateError(err)
}
- defer s.Close() // nolint:errcheck
+ defer s.Close() //nolint:errcheck
if length == 0 {
return nil
@@ -83,7 +83,7 @@ func (d *davStorageImpl) GetBlobFromPath(ctx context.Context, dirPath, path stri
return errors.Wrap(err, "error populating output")
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return blob.EnsureLengthExactly(output.Length(), length)
}
@@ -159,23 +159,23 @@ func (d *davStorageImpl) PutBlobInPath(ctx context.Context, dirPath, filePath st
var buf bytes.Buffer
- data.WriteTo(&buf) // nolint:errcheck
+ data.WriteTo(&buf) //nolint:errcheck
b := buf.Bytes()
- // nolint:wrapcheck
+ //nolint:wrapcheck
if err := retry.WithExponentialBackoffNoValue(ctx, "WriteTemporaryFileAndCreateParentDirs", func() error {
mkdirAttempted := false
for {
- // nolint:wrapcheck
+ //nolint:wrapcheck
err := d.translateError(d.cli.Write(writePath, b, defaultFilePerm))
if err == nil {
if d.Options.AtomicWrites {
return nil
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return d.cli.Rename(writePath, filePath, true)
}
@@ -213,7 +213,7 @@ func (d *davStorageImpl) PutBlobInPath(ctx context.Context, dirPath, filePath st
func (d *davStorageImpl) DeleteBlobInPath(ctx context.Context, dirPath, filePath string) error {
return d.translateError(retry.WithExponentialBackoffNoValue(ctx, "DeleteBlobInPath", func() error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return d.cli.Remove(filePath)
}, isRetriable))
}
diff --git a/repo/blob/webdav/webdav_storage_test.go b/repo/blob/webdav/webdav_storage_test.go
index 120c2ab42..e352d7ea5 100644
--- a/repo/blob/webdav/webdav_storage_test.go
+++ b/repo/blob/webdav/webdav_storage_test.go
@@ -32,7 +32,7 @@ func basicAuth(h http.Handler) http.HandlerFunc {
http.Error(w, "not authorized", http.StatusForbidden)
} else {
w.Header().Set("WWW-Authenticate", `Basic realm="testing"`)
- w.WriteHeader(401)
+ w.WriteHeader(http.StatusUnauthorized)
w.Write([]byte("Unauthorized.\n"))
}
}
@@ -145,7 +145,7 @@ func transformMissingPUTs(next http.Handler) http.HandlerFunc {
}
}
-// nolint:thelper
+//nolint:thelper
func verifyWebDAVStorage(t *testing.T, url, username, password string, shardSpec []int) {
ctx := testlogging.Context(t)
diff --git a/repo/compression/compressor.go b/repo/compression/compressor.go
index debdf2c1a..858d1d68e 100644
--- a/repo/compression/compressor.go
+++ b/repo/compression/compressor.go
@@ -23,7 +23,8 @@ type Compressor interface {
}
// maps of registered compressors by header ID and name.
-// nolint:gochecknoglobals
+//
+//nolint:gochecknoglobals
var (
ByHeaderID = map[HeaderID]Compressor{}
ByName = map[Name]Compressor{}
diff --git a/repo/compression/compressor_deflate.go b/repo/compression/compressor_deflate.go
index d54bc2c7a..1375e253b 100644
--- a/repo/compression/compressor_deflate.go
+++ b/repo/compression/compressor_deflate.go
@@ -44,7 +44,7 @@ func (c *deflateCompressor) Compress(output io.Writer, input io.Reader) error {
return errors.Wrap(err, "unable to write header")
}
- // nolint:forcetypeassert
+ //nolint:forcetypeassert
w := c.pool.Get().(*flate.Writer)
defer c.pool.Put(w)
diff --git a/repo/compression/compressor_gzip.go b/repo/compression/compressor_gzip.go
index 56589bea8..3f1431b1e 100644
--- a/repo/compression/compressor_gzip.go
+++ b/repo/compression/compressor_gzip.go
@@ -41,7 +41,7 @@ func (c *gzipCompressor) Compress(output io.Writer, input io.Reader) error {
return errors.Wrap(err, "unable to write header")
}
- // nolint:forcetypeassert
+ //nolint:forcetypeassert
w := c.pool.Get().(*gzip.Writer)
defer c.pool.Put(w)
diff --git a/repo/compression/compressor_lz4.go b/repo/compression/compressor_lz4.go
index e5b8c1ec5..71e387ba7 100644
--- a/repo/compression/compressor_lz4.go
+++ b/repo/compression/compressor_lz4.go
@@ -37,7 +37,7 @@ func (c *lz4Compressor) Compress(output io.Writer, input io.Reader) error {
return errors.Wrap(err, "unable to write header")
}
- // nolint:forcetypeassert
+ //nolint:forcetypeassert
w := c.pool.Get().(*lz4.Writer)
defer c.pool.Put(w)
diff --git a/repo/compression/compressor_pgzip.go b/repo/compression/compressor_pgzip.go
index b9ede99b7..6f5e03b88 100644
--- a/repo/compression/compressor_pgzip.go
+++ b/repo/compression/compressor_pgzip.go
@@ -42,7 +42,7 @@ func (c *pgzipCompressor) Compress(output io.Writer, input io.Reader) error {
return errors.Wrap(err, "unable to write header")
}
- // nolint:forcetypeassert
+ //nolint:forcetypeassert
w := c.pool.Get().(*pgzip.Writer)
defer c.pool.Put(w)
diff --git a/repo/compression/compressor_s2.go b/repo/compression/compressor_s2.go
index 9a87b8c36..04be69de1 100644
--- a/repo/compression/compressor_s2.go
+++ b/repo/compression/compressor_s2.go
@@ -45,7 +45,7 @@ func (c *s2Compressor) Compress(output io.Writer, input io.Reader) error {
return errors.Wrap(err, "unable to write header")
}
- // nolint:forcetypeassert
+ //nolint:forcetypeassert
w := c.pool.Get().(*s2.Writer)
defer c.pool.Put(w)
diff --git a/repo/compression/compressor_zstd.go b/repo/compression/compressor_zstd.go
index c57216729..9b10fad7b 100644
--- a/repo/compression/compressor_zstd.go
+++ b/repo/compression/compressor_zstd.go
@@ -42,7 +42,7 @@ func (c *zstdCompressor) Compress(output io.Writer, input io.Reader) error {
return errors.Wrap(err, "unable to write header")
}
- // nolint:forcetypeassert
+ //nolint:forcetypeassert
w := c.pool.Get().(*zstd.Encoder)
defer c.pool.Put(w)
diff --git a/repo/connect.go b/repo/connect.go
index 6ec83af72..103d4623f 100644
--- a/repo/connect.go
+++ b/repo/connect.go
@@ -43,7 +43,7 @@ func Connect(ctx context.Context, configFile string, st blob.Storage, password s
f, err := format.ParseKopiaRepositoryJSON(formatBytes.ToByteSlice())
if err != nil {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return err
}
@@ -102,7 +102,7 @@ func Disconnect(ctx context.Context, configFile string) error {
log(ctx).Errorf("unable to remove maintenance lock file", maintenanceLock)
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return os.Remove(configFile)
}
diff --git a/repo/content/blob_crypto.go b/repo/content/blob_crypto.go
index 21fbce163..df0dbf6dd 100644
--- a/repo/content/blob_crypto.go
+++ b/repo/content/blob_crypto.go
@@ -22,7 +22,7 @@ type crypter interface {
// getIndexBlobIV gets the initialization vector from the provided blob ID by taking
// 32 characters immediately preceding the first dash ('-') and decoding them using base16.
func getIndexBlobIV(s blob.ID) ([]byte, error) {
- if p := strings.Index(string(s), "-"); p >= 0 { // nolint:gocritic
+ if p := strings.Index(string(s), "-"); p >= 0 { //nolint:gocritic
s = s[0:p]
}
diff --git a/repo/content/committed_content_index.go b/repo/content/committed_content_index.go
index dd5b90c36..14047452b 100644
--- a/repo/content/committed_content_index.go
+++ b/repo/content/committed_content_index.go
@@ -129,7 +129,7 @@ func (c *committedContentIndex) listContents(r IDRange, cb func(i Info) error) e
deletionWatermark := c.deletionWatermark
c.mu.Unlock()
- // nolint:wrapcheck
+ //nolint:wrapcheck
return m.Iterate(r, func(i Info) error {
if shouldIgnore(i, deletionWatermark) {
return nil
@@ -169,7 +169,7 @@ func (c *committedContentIndex) merge(ctx context.Context, indexFiles []blob.ID)
ndx, err = c.cache.openIndex(ctx, e)
if err != nil {
- newlyOpened.Close() // nolint:errcheck
+ newlyOpened.Close() //nolint:errcheck
return nil, nil, errors.Wrapf(err, "unable to open pack index %q", e)
}
@@ -183,7 +183,7 @@ func (c *committedContentIndex) merge(ctx context.Context, indexFiles []blob.ID)
mergedAndCombined, err := c.combineSmallIndexes(newMerged)
if err != nil {
- newlyOpened.Close() // nolint:errcheck
+ newlyOpened.Close() //nolint:errcheck
return nil, nil, errors.Wrap(err, "unable to combine small indexes")
}
diff --git a/repo/content/committed_content_index_cache_test.go b/repo/content/committed_content_index_cache_test.go
index a4be003d8..8d877bb75 100644
--- a/repo/content/committed_content_index_cache_test.go
+++ b/repo/content/committed_content_index_cache_test.go
@@ -32,7 +32,7 @@ func TestCommittedContentIndexCache_Memory(t *testing.T) {
}, nil)
}
-// nolint:thelper
+//nolint:thelper
func testCache(t *testing.T, cache committedContentIndexCache, fakeTime *faketime.ClockTimeWithOffset) {
ctx := testlogging.Context(t)
diff --git a/repo/content/committed_content_index_disk_cache.go b/repo/content/committed_content_index_disk_cache.go
index 5e9906385..9653ba433 100644
--- a/repo/content/committed_content_index_disk_cache.go
+++ b/repo/content/committed_content_index_disk_cache.go
@@ -43,7 +43,7 @@ func (c *diskCommittedContentIndexCache) openIndex(ctx context.Context, indexBlo
ndx, err := index.Open(f, closeMmap, c.v1PerContentOverhead)
if err != nil {
- closeMmap() // nolint:errcheck
+ closeMmap() //nolint:errcheck
return nil, errors.Wrapf(err, "error openind index from %v", indexBlobID)
}
@@ -59,7 +59,7 @@ func (c *diskCommittedContentIndexCache) mmapOpenWithRetry(path string) (mmap.MM
)
// retry milliseconds: 10, 20, 40, 80, 160, 320, 640, 1280, total ~2.5s
- f, err := os.Open(path) // nolint:gosec
+ f, err := os.Open(path) //nolint:gosec
nextDelay := startingDelay
retryCount := 0
@@ -69,7 +69,7 @@ func (c *diskCommittedContentIndexCache) mmapOpenWithRetry(path string) (mmap.MM
time.Sleep(nextDelay)
nextDelay *= 2
- f, err = os.Open(path) // nolint:gosec
+ f, err = os.Open(path) //nolint:gosec
}
if err != nil {
@@ -78,7 +78,7 @@ func (c *diskCommittedContentIndexCache) mmapOpenWithRetry(path string) (mmap.MM
mm, err := mmap.Map(f, mmap.RDONLY, 0)
if err != nil {
- f.Close() // nolint:errcheck
+ f.Close() //nolint:errcheck
return nil, nil, errors.Wrap(err, "mmap error")
}
diff --git a/repo/content/committed_read_manager.go b/repo/content/committed_read_manager.go
index bde047fd0..02b248705 100644
--- a/repo/content/committed_read_manager.go
+++ b/repo/content/committed_read_manager.go
@@ -41,7 +41,7 @@
DefaultIndexCacheSweepAge = 1 * time.Hour
)
-// nolint:gochecknoglobals
+//nolint:gochecknoglobals
var cachedIndexBlobPrefixes = []blob.ID{
LegacyIndexBlobPrefix,
compactionLogBlobPrefix,
@@ -53,7 +53,7 @@
epoch.RangeCheckpointIndexBlobPrefix,
}
-// nolint:gochecknoglobals
+//nolint:gochecknoglobals
var allIndexBlobPrefixes = []blob.ID{
LegacyIndexBlobPrefix,
epoch.UncompactedIndexBlobPrefix,
@@ -187,7 +187,7 @@ func (sm *SharedManager) loadPackIndexesLocked(ctx context.Context) error {
}
if err := ctx.Err(); err != nil {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return err
}
@@ -330,7 +330,7 @@ func (sm *SharedManager) IndexBlobs(ctx context.Context, includeInactive bool) (
blobs, _, err := ibm.listActiveIndexBlobs(ctx)
- // nolint:wrapcheck
+ //nolint:wrapcheck
return blobs, err
}
@@ -365,7 +365,7 @@ func newCacheBackingStorage(ctx context.Context, caching *CachingOptions, subdir
}
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return filesystem.New(ctx, &filesystem.Options{
Path: blobListCacheDir,
Options: sharded.Options{
@@ -545,7 +545,7 @@ func (sm *SharedManager) release(ctx context.Context) error {
sm.indexBlobCache.Close(ctx)
if sm.internalLogger != nil {
- sm.internalLogger.Sync() // nolint:errcheck
+ sm.internalLogger.Sync() //nolint:errcheck
}
sm.internalLogManager.Close(ctx)
diff --git a/repo/content/content_formatter_test.go b/repo/content/content_formatter_test.go
index 043d378b5..eeb88e7f3 100644
--- a/repo/content/content_formatter_test.go
+++ b/repo/content/content_formatter_test.go
@@ -72,7 +72,7 @@ func TestFormatters(t *testing.T) {
}
}
-// nolint:thelper
+//nolint:thelper
func verifyEndToEndFormatter(ctx context.Context, t *testing.T, hashAlgo, encryptionAlgo string) {
data := blobtesting.DataMap{}
keyTime := map[blob.ID]time.Time{}
diff --git a/repo/content/content_index_recovery.go b/repo/content/content_index_recovery.go
index fb09bd277..0ce5b1b20 100644
--- a/repo/content/content_index_recovery.go
+++ b/repo/content/content_index_recovery.go
@@ -74,7 +74,7 @@ func (p *packContentPostamble) toBytes() ([]byte, error) {
binary.BigEndian.PutUint32(buf[n:], checksum)
n += 4
- if n > 255 { // nolint:gomnd
+ if n > 255 { //nolint:gomnd
return nil, errors.Errorf("postamble too long: %v", n)
}
@@ -94,7 +94,7 @@ func findPostamble(b []byte) *packContentPostamble {
// length of postamble is the last byte
postambleLength := int(b[len(b)-1])
- if postambleLength < 5 { // nolint:gomnd
+ if postambleLength < 5 { //nolint:gomnd
// too short, must be at least 5 bytes (checksum + own length)
return nil
}
diff --git a/repo/content/content_manager.go b/repo/content/content_manager.go
index 3611b30d8..634a7ff66 100644
--- a/repo/content/content_manager.go
+++ b/repo/content/content_manager.go
@@ -41,7 +41,8 @@
var tracer = otel.Tracer("kopia/content")
// PackBlobIDPrefixes contains all possible prefixes for pack blobs.
-// nolint:gochecknoglobals
+//
+//nolint:gochecknoglobals
var PackBlobIDPrefixes = []blob.ID{
PackBlobIDPrefixRegular,
PackBlobIDPrefixSpecial,
@@ -428,7 +429,7 @@ func (bm *WriteManager) writeIndexBlobs(ctx context.Context, dataShards []gather
return nil, err
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return ibm.writeIndexBlobs(ctx, dataShards, sessionID)
}
@@ -697,7 +698,9 @@ func (bm *WriteManager) UndeleteContent(ctx context.Context, contentID ID) error
// When onlyRewriteDelete is true, the content is only rewritten if the existing
// content is marked as deleted. The new content is NOT marked deleted.
-// When onlyRewriteDelete is false, the content is unconditionally rewritten
+//
+// When onlyRewriteDelete is false, the content is unconditionally rewritten
+//
// and the content's deleted status is preserved.
func (bm *WriteManager) rewriteContent(ctx context.Context, contentID ID, onlyRewriteDeleted bool, mp format.MutableParameters) error {
var data gather.WriteBuffer
@@ -751,7 +754,7 @@ func (bm *WriteManager) getOrCreatePendingPackInfoLocked(ctx context.Context, pr
b.Append(bm.format.RepositoryFormatBytes())
- // nolint:gosec
+ //nolint:gosec
if err := writeRandomBytesToBuffer(b, rand.Intn(bm.maxPreambleLength-bm.minPreambleLength+1)+bm.minPreambleLength); err != nil {
return nil, errors.Wrap(err, "unable to prepare content preamble")
}
diff --git a/repo/content/content_manager_lock_free.go b/repo/content/content_manager_lock_free.go
index ecd691ce9..ba8cfa711 100644
--- a/repo/content/content_manager_lock_free.go
+++ b/repo/content/content_manager_lock_free.go
@@ -35,7 +35,7 @@ func (sm *SharedManager) maybeCompressAndEncryptDataForPacking(data gather.Bytes
comp = compression.HeaderZstdFastest
}
- // nolint:nestif
+ //nolint:nestif
if comp != NoCompression {
if mp.IndexVersion < index.Version2 {
return NoCompression, errors.Errorf("compression is not enabled for this repository")
diff --git a/repo/content/content_manager_metrics.go b/repo/content/content_manager_metrics.go
index 1f514410d..c979532bb 100644
--- a/repo/content/content_manager_metrics.go
+++ b/repo/content/content_manager_metrics.go
@@ -6,7 +6,8 @@
)
// content cache metrics.
-// nolint:gochecknoglobals,promlinter
+//
+//nolint:gochecknoglobals,promlinter
var (
metricContentGetCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "kopia_content_get_count",
diff --git a/repo/content/content_manager_test.go b/repo/content/content_manager_test.go
index 52f9f9ab4..c2dfb15ad 100644
--- a/repo/content/content_manager_test.go
+++ b/repo/content/content_manager_test.go
@@ -330,7 +330,7 @@ func (s *contentManagerSuite) TestContentManagerWriteMultiple(t *testing.T) {
}
bm = s.newTestContentManagerWithCustomTime(t, st, timeFunc)
- defer bm.Close(ctx) // nolint:gocritic
+ defer bm.Close(ctx) //nolint:gocritic
}
pos := rand.Intn(len(contentIDs))
@@ -651,7 +651,7 @@ func (s *contentManagerSuite) TestDeletionAfterCreationWithFrozenTime(t *testing
require.Equal(t, fakeTime.Add(2*time.Second), ci.Timestamp().UTC())
}
-// nolint:gocyclo
+//nolint:gocyclo
func (s *contentManagerSuite) TestUndeleteContentSimple(t *testing.T) {
ctx := testlogging.Context(t)
data := blobtesting.DataMap{}
@@ -805,7 +805,7 @@ func (s *contentManagerSuite) TestUndeleteContentSimple(t *testing.T) {
}
}
-// nolint:gocyclo
+//nolint:gocyclo
func (s *contentManagerSuite) TestUndeleteContent(t *testing.T) {
ctx := testlogging.Context(t)
data := blobtesting.DataMap{}
@@ -1819,7 +1819,7 @@ func (s *contentManagerSuite) TestAutoCompressionOfMetadata(t *testing.T) {
st := blobtesting.NewMapStorage(data, nil, nil)
bm := s.newTestContentManager(t, st)
- // nolint:lll
+ //nolint:lll
contentID, err := bm.WriteContent(ctx,
gather.FromSlice([]byte(`{"stream":"kopia:directory","entries":[{"name":".chglog","type":"d","mode":"0755","mtime":"2022-03-22T22:45:22.159239913-07:00","uid":501,"gid":20,"obj":"k18c2fa7d9108a2bf0d9d5b8e7993c48d","summ":{"size":1897,"files":2,"symlinks":0,"dirs":1,"maxTime":"2022-03-22T22:45:22.159499411-07:00","numFailed":0}},{"name":".git","type":"d","mode":"0755","mtime":"2022-04-03T17:47:38.340226306-07:00","uid":501,"gid":20,"obj":"k0ad4214eb961aa78cf06611ec4563086","summ":{"size":88602907,"files":7336,"symlinks":0,"dirs":450,"maxTime":"2022-04-03T17:28:54.030135198-07:00","numFailed":0}},{"name":".github","type":"d","mode":"0755","mtime":"2022-03-22T22:45:22.160470238-07:00","uid":501,"gid":20,"obj":"k76bee329054d5574d89a4e87c3f24088","summ":{"size":20043,"files":13,"symlinks":0,"dirs":2,"maxTime":"2022-03-22T22:45:22.162580934-07:00","numFailed":0}},{"name":".logs","type":"d","mode":"0750","mtime":"2021-11-06T13:43:35.082115457-07:00","uid":501,"gid":20,"obj":"k1e7d5bda28d6b684bb180cac16775c1c","summ":{"size":382943352,"files":1823,"symlinks":0,"dirs":122,"maxTime":"2021-11-06T13:43:45.111270118-07:00","numFailed":0}},{"name":".release","type":"d","mode":"0755","mtime":"2021-04-16T06:26:47-07:00","uid":501,"gid":20,"obj":"k0eb539316600015bf2861e593f68e18d","summ":{"size":159711446,"files":19,"symlinks":0,"dirs":1,"maxTime":"2021-04-16T06:26:47-07:00","numFailed":0}},{"name":".screenshots","type":"d","mode":"0755","mtime":"2022-01-29T00:12:29.023594487-08:00","uid":501,"gid":20,"obj":"k97f6dbc82e84c97c955364d12ddc44bd","summ":{"size":6770746,"files":53,"symlinks":0,"dirs":7,"maxTime":"2022-03-19T18:59:51.559099257-07:00","numFailed":0}},{"name":"app","type":"d","mode":"0755","mtime":"2022-03-26T22:28:51.863826565-07:00","uid":501,"gid":20,"obj":"k656b41b8679c2537392b3997648cf43e","summ":{"size":565633611,"files":44812,"symlinks":0,"dirs":7576,"maxTime":"2022-03-26T22:28:51.863946606-07:00","numFailed":0}},{"name":"cli","type":"d","mode":"0755","mtime":"2022-04-03T12:24:52.84319224-07:00","uid":501,"gid":20,"obj":"k04ab4f2a1da96c47f62a51f119dba14d","summ":{"size":468233,"files":164,"symlinks":0,"dirs":1,"maxTime":"2022-04-03T12:24:52.843267824-07:00","numFailed":0}},{"name":"dist","type":"d","mode":"0755","mtime":"2022-03-19T22:46:00.12834831-07:00","uid":501,"gid":20,"obj":"k19fc65da8a47b7702bf6b501b7f3e1b5","summ":{"size":3420732994,"files":315,"symlinks":0,"dirs":321,"maxTime":"2022-03-27T12:10:08.019195221-07:00","numFailed":0}},{"name":"fs","type":"d","mode":"0755","mtime":"2022-03-22T22:45:22.194955195-07:00","uid":501,"gid":20,"obj":"k1f0be83e34826450e651f16ba63c5b9c","summ":{"size":80421,"files":21,"symlinks":0,"dirs":6,"maxTime":"2022-03-22T22:45:22.195085778-07:00","numFailed":0}},{"name":"icons","type":"d","mode":"0755","mtime":"2022-01-23T12:06:14.739575928-08:00","uid":501,"gid":20,"obj":"k9e76c283312bdc6e562f66c7d6526396","summ":{"size":361744,"files":13,"symlinks":0,"dirs":1,"maxTime":"2021-03-12T19:28:45-08:00","numFailed":0}},{"name":"internal","type":"d","mode":"0755","mtime":"2022-04-02T18:14:02.459772332-07:00","uid":501,"gid":20,"obj":"k181db968f69045159753f8d6f3f3454f","summ":{"size":778467,"files":198,"symlinks":0,"dirs":56,"maxTime":"2022-04-03T12:24:52.844331708-07:00","numFailed":0}},{"name":"node_modules","type":"d","mode":"0755","mtime":"2021-05-16T15:45:19-07:00","uid":501,"gid":20,"obj":"kf2b636c57a7cc412739d2c10ca7ab0a3","summ":{"size":5061213,"files":361,"symlinks":0,"dirs":69,"maxTime":"2021-05-16T15:45:19-07:00","numFailed":0}},{"name":"repo","type":"d","mode":"0755","mtime":"2022-04-03T12:24:52.844407167-07:00","uid":501,"gid":20,"obj":"kb839dcd04d94a1b568f7f5e8fc809fab","summ":{"size":992877,"files":193,"symlinks":0,"dirs":27,"maxTime":"2022-04-03T17:47:31.211316848-07:00","numFailed":0}},{"name":"site","type":"d","mode":"0755","mtime":"2022-03-22T22:45:22.250939688-07:00","uid":501,"gid":20,"obj":"k5d8ce70ca4337c17219502963f0fe6d3","summ":{"size":58225583,"files":11387,"symlinks":0,"dirs":557,"maxTime":"2022-03-22T22:45:22.258280685-07:00","numFailed":0}},{"name":"snapshot","type":"d","mode":"0755","mtime":"2022-03-22T22:45:22.265723348-07:00","uid":501,"gid":20,"obj":"k6201166bd99c8fe85d53d742e92c81a6","summ":{"size":316009,"files":66,"symlinks":0,"dirs":6,"maxTime":"2022-03-26T23:04:24.313115653-07:00","numFailed":0}},{"name":"tests","type":"d","mode":"0755","mtime":"2022-03-22T22:45:22.2749515-07:00","uid":501,"gid":20,"obj":"k1e20890089f6cbad3c6fe79cbae71e09","summ":{"size":657360,"files":183,"symlinks":0,"dirs":30,"maxTime":"2022-04-02T18:41:02.232496031-07:00","numFailed":0}},{"name":"tools","type":"d","mode":"0755","mtime":"2022-03-22T22:45:22.279094142-07:00","uid":501,"gid":20,"obj":"k6464e940fea5ef916ab86eafdb68b1cd","summ":{"size":889231805,"files":12412,"symlinks":0,"dirs":3405,"maxTime":"2022-03-22T22:45:22.279144141-07:00","numFailed":0}},{"name":".DS_Store","type":"f","mode":"0644","size":14340,"mtime":"2022-02-12T20:06:35.60110891-08:00","uid":501,"gid":20,"obj":"d9295958410ae3b73f68033274cd7a8f"},{"name":".codecov.yml","type":"f","mode":"0644","size":620,"mtime":"2022-03-22T22:45:22.159772743-07:00","uid":501,"gid":20,"obj":"6f81038ca8d7b81804f42031142731ed"},{"name":".gitattributes","type":"f","mode":"0644","size":340,"mtime":"2022-03-22T22:45:22.159870909-07:00","uid":501,"gid":20,"obj":"5608c2d289164627e8bdb468bbee2643"},{"name":".gitignore","type":"f","mode":"0644","size":321,"mtime":"2022-03-22T22:45:22.162843932-07:00","uid":501,"gid":20,"obj":"c43ce513c6371e0838fc553b77f5cdb2"},{"name":".golangci.yml","type":"f","mode":"0644","size":3071,"mtime":"2022-03-22T22:45:22.163100014-07:00","uid":501,"gid":20,"obj":"4289f49e43fba6800fa75462bd2ad43e"},{"name":".gometalinter.json","type":"f","mode":"0644","size":163,"mtime":"2019-05-09T22:33:06-07:00","uid":501,"gid":20,"obj":"fe4fc9d77cfb5f1b062414fdfd121713"},{"name":".goreleaser.yml","type":"f","mode":"0644","size":1736,"mtime":"2022-03-22T22:45:22.163354888-07:00","uid":501,"gid":20,"obj":"91093a462f4f72c619fb9f144702c1bf"},{"name":".linterr.txt","type":"f","mode":"0644","size":425,"mtime":"2021-11-08T22:14:29.315279172-08:00","uid":501,"gid":20,"obj":"f6c165387b84c7fb0ebc26fdc812775d"},{"name":".tmp.integration-tests.json","type":"f","mode":"0644","size":5306553,"mtime":"2022-03-27T12:10:55.035217892-07:00","uid":501,"gid":20,"obj":"Ixbc27b9a704275d05a6505e794ce63e66"},{"name":".tmp.provider-tests.json","type":"f","mode":"0644","size":617740,"mtime":"2022-02-15T21:30:28.579546866-08:00","uid":501,"gid":20,"obj":"e7f69fc0222763628d5b294faf37a6d7"},{"name":".tmp.unit-tests.json","type":"f","mode":"0644","size":200525943,"mtime":"2022-04-03T10:08:51.453180251-07:00","uid":501,"gid":20,"obj":"Ixf5da1bbcdbc267fa123d93aaf90cbd75"},{"name":".wwhrd.yml","type":"f","mode":"0644","size":244,"mtime":"2022-03-22T22:45:22.163564803-07:00","uid":501,"gid":20,"obj":"cea0cac6d19d59dcf2818b08521f46b8"},{"name":"BUILD.md","type":"f","mode":"0644","size":4873,"mtime":"2022-03-22T22:45:22.163818593-07:00","uid":501,"gid":20,"obj":"bcd47eca7b520b3ea88e4799cc0c9fea"},{"name":"CODE_OF_CONDUCT.md","type":"f","mode":"0644","size":5226,"mtime":"2021-03-12T19:28:45-08:00","uid":501,"gid":20,"obj":"270e55b022ec0c7588b2dbb501791b3e"},{"name":"GOVERNANCE.md","type":"f","mode":"0644","size":12477,"mtime":"2020-03-15T23:40:35-07:00","uid":501,"gid":20,"obj":"96674fad8fcf2bdfb96b0583917bb617"},{"name":"LICENSE","type":"f","mode":"0644","size":10763,"mtime":"2019-05-27T15:50:18-07:00","uid":501,"gid":20,"obj":"e751b8a146e1dd5494564e9a8c26dd6a"},{"name":"Makefile","type":"f","mode":"0644","size":17602,"mtime":"2022-03-22T22:45:22.1639718-07:00","uid":501,"gid":20,"obj":"aa9cc80d567e94087ea9be8fef718c1a"},{"name":"README.md","type":"f","mode":"0644","size":3874,"mtime":"2022-03-22T22:45:22.164109925-07:00","uid":501,"gid":20,"obj":"d227c763b9cf476426da5d99e9fff694"},{"name":"a.log","type":"f","mode":"0644","size":3776,"mtime":"2022-03-08T19:19:40.196874627-08:00","uid":501,"gid":20,"obj":"6337190196e804297f92a17805600be7"},{"name":"build_architecture.svg","type":"f","mode":"0644","size":143884,"mtime":"2021-03-12T19:28:45-08:00","uid":501,"gid":20,"obj":"72c0aef8c43498b056236b2d46d7e44a"},{"name":"coverage.txt","type":"f","mode":"0644","size":194996,"mtime":"2022-03-26T07:09:37.533649628-07:00","uid":501,"gid":20,"obj":"fdf1a20cea21d4daf053b99711735d0e"},{"name":"go.mod","type":"f","mode":"0644","size":5447,"mtime":"2022-03-27T09:40:59.78753556-07:00","uid":501,"gid":20,"obj":"71eefc767aeea467b1d1f7ff0ee5c21b"},{"name":"go.sum","type":"f","mode":"0644","size":114899,"mtime":"2022-03-27T09:40:59.788485485-07:00","uid":501,"gid":20,"obj":"2e801e525d9e58208dff3c25bd30f296"},{"name":"main.go","type":"f","mode":"0644","size":2057,"mtime":"2022-03-22T22:45:22.22380977-07:00","uid":501,"gid":20,"obj":"73411f7e340e5cddc43faaa1d1fe5743"}],"summary":{"size":5787582078,"files":79395,"symlinks":0,"dirs":12639,"maxTime":"2022-04-03T17:47:38.340226306-07:00","numFailed":0}}`)),
"k",
diff --git a/repo/content/content_prefetch.go b/repo/content/content_prefetch.go
index 250b39dc2..114f415ff 100644
--- a/repo/content/content_prefetch.go
+++ b/repo/content/content_prefetch.go
@@ -15,10 +15,10 @@ type prefetchOptions struct {
fullBlobPrefetchBytesThreshold int64
}
-// nolint:gochecknoglobals
+//nolint:gochecknoglobals
var defaultPrefetchOptions = &prefetchOptions{2, 5e6}
-// nolint:gochecknoglobals
+//nolint:gochecknoglobals
var prefetchHintToOptions = map[string]*prefetchOptions{
"": defaultPrefetchOptions,
"default": defaultPrefetchOptions,
@@ -48,7 +48,8 @@ func (o *prefetchOptions) shouldPrefetchEntireBlob(infos []Info) bool {
// PrefetchContents fetches the provided content IDs into the cache.
// Note that due to cache configuration, it's not guaranteed that all contents will
// actually be added to the cache.
-// nolint:gocyclo
+//
+//nolint:gocyclo
func (bm *WriteManager) PrefetchContents(ctx context.Context, contentIDs []ID, hint string) []ID {
o := prefetchHintToOptions[hint]
if o == nil {
diff --git a/repo/content/encrypted_blob_mgr.go b/repo/content/encrypted_blob_mgr.go
index 5e7a5d3a7..3960af6f8 100644
--- a/repo/content/encrypted_blob_mgr.go
+++ b/repo/content/encrypted_blob_mgr.go
@@ -23,7 +23,7 @@ func (m *encryptedBlobMgr) getEncryptedBlob(ctx context.Context, blobID blob.ID,
defer payload.Close()
if err := m.indexBlobCache.GetOrLoad(ctx, string(blobID), func(output *gather.WriteBuffer) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return m.st.GetBlob(ctx, blobID, 0, -1, output)
}, &payload); err != nil {
return errors.Wrap(err, "getContent")
diff --git a/repo/content/index/id.go b/repo/content/index/id.go
index 8a80eb365..430f49a0f 100644
--- a/repo/content/index/id.go
+++ b/repo/content/index/id.go
@@ -42,7 +42,7 @@ type ID struct {
func (i ID) MarshalJSON() ([]byte, error) {
s := i.String()
- // nolint:wrapcheck
+ //nolint:wrapcheck
return json.Marshal(s)
}
@@ -70,10 +70,11 @@ func (i ID) Hash() []byte {
}
// EmptyID represents empty content ID.
-var EmptyID = ID{} // nolint:gochecknoglobals
+var EmptyID = ID{} //nolint:gochecknoglobals
// prefixStrings contains precomputed single-character strings for all valid prefixes 'g'..'z'
-// nolint:gochecknoglobals
+//
+//nolint:gochecknoglobals
var prefixStrings [256]IDPrefix
func init() {
diff --git a/repo/content/index/id_range.go b/repo/content/index/id_range.go
index 3cb9e2e0a..0b0b02fc1 100644
--- a/repo/content/index/id_range.go
+++ b/repo/content/index/id_range.go
@@ -19,13 +19,16 @@ func PrefixRange(prefix IDPrefix) IDRange {
}
// AllIDs is an IDRange that contains all valid IDs.
-// nolint:gochecknoglobals
+//
+//nolint:gochecknoglobals
var AllIDs = IDRange{"", maxIDCharacterPlus1}
// AllPrefixedIDs is an IDRange that contains all valid IDs prefixed IDs ('g' .. 'z').
-// nolint:gochecknoglobals
+//
+//nolint:gochecknoglobals
var AllPrefixedIDs = IDRange{"g", maxIDCharacterPlus1}
// AllNonPrefixedIDs is an IDRange that contains all valid IDs non-prefixed IDs ('0' .. 'f').
-// nolint:gochecknoglobals
+//
+//nolint:gochecknoglobals
var AllNonPrefixedIDs = IDRange{"0", "g"}
diff --git a/repo/content/index/index_builder.go b/repo/content/index/index_builder.go
index e4e5ced91..afba2a9da 100644
--- a/repo/content/index/index_builder.go
+++ b/repo/content/index/index_builder.go
@@ -44,7 +44,8 @@ func (b Builder) Add(i Info) {
// base36Value stores a base-36 reverse lookup such that ASCII character corresponds to its
// base-36 value ('0'=0..'9'=9, 'a'=10, 'b'=11, .., 'z'=35).
-// nolint:gochecknoglobals
+//
+//nolint:gochecknoglobals
var base36Value [256]byte
func init() {
@@ -68,7 +69,7 @@ func (b Builder) sortedContents() []Info {
// by first [0-9a-z] and second character [0-9a-f].
for cid, v := range b {
first := int(base36Value[cid.prefix])
- second := int(cid.data[0] >> 4) // nolint:gomnd
+ second := int(cid.data[0] >> 4) //nolint:gomnd
// first: 0..35, second: 0..15
buck := first<<4 + second //nolint:gomnd
@@ -163,7 +164,7 @@ func (b Builder) shard(maxShardSize int) []Builder {
for k, v := range b {
h := fnv.New32a()
- io.WriteString(h, k.String()) // nolint:errcheck
+ io.WriteString(h, k.String()) //nolint:errcheck
shard := h.Sum32() % uint32(numShards)
diff --git a/repo/content/index/index_encode_util.go b/repo/content/index/index_encode_util.go
index 5570c4767..1c6073f08 100644
--- a/repo/content/index/index_encode_util.go
+++ b/repo/content/index/index_encode_util.go
@@ -22,7 +22,7 @@ func decodeBigEndianUint16(d []byte) uint16 {
func encodeBigEndianUint24(b []byte, v uint32) {
_ = b[2] // early bounds check
- b[0] = byte(v >> 16) // nolint:gomnd
- b[1] = byte(v >> 8) // nolint:gomnd
+ b[0] = byte(v >> 16) //nolint:gomnd
+ b[1] = byte(v >> 8) //nolint:gomnd
b[2] = byte(v)
}
diff --git a/repo/content/index/index_v1.go b/repo/content/index/index_v1.go
index cf2a5581d..5237d2851 100644
--- a/repo/content/index/index_v1.go
+++ b/repo/content/index/index_v1.go
@@ -383,7 +383,7 @@ func (b *indexBuilderV1) formatEntry(entry []byte, it Info) error {
entryPackFileOffset := entry[8:12]
entryPackedOffset := entry[12:16]
entryPackedLength := entry[16:20]
- timestampAndFlags := uint64(it.GetTimestampSeconds()) << 16 // nolint:gomnd
+ timestampAndFlags := uint64(it.GetTimestampSeconds()) << 16 //nolint:gomnd
packBlobID := it.GetPackBlobID()
if len(packBlobID) == 0 {
@@ -399,7 +399,7 @@ func (b *indexBuilderV1) formatEntry(entry []byte, it Info) error {
}
binary.BigEndian.PutUint32(entryPackedLength, it.GetPackedLength())
- timestampAndFlags |= uint64(it.GetFormatVersion()) << 8 // nolint:gomnd
+ timestampAndFlags |= uint64(it.GetFormatVersion()) << 8 //nolint:gomnd
timestampAndFlags |= uint64(len(packBlobID))
binary.BigEndian.PutUint64(entryTimestampAndFlags, timestampAndFlags)
diff --git a/repo/content/index/index_v2.go b/repo/content/index/index_v2.go
index 270dde76f..96255cde9 100644
--- a/repo/content/index/index_v2.go
+++ b/repo/content/index/index_v2.go
@@ -52,9 +52,10 @@
// 17: pack ID - bits 16..23 - present if more than 2^16 packs are in a single index
-// 18: high-order bits - present if any content length is greater than 2^24 == 16MiB
-// original length bits 24..27 (4 hi bits)
-// packed length bits 24..27 (4 lo bits)
+// 18: high-order bits - present if any content length is greater than 2^24 == 16MiB
+//
+// original length bits 24..27 (4 hi bits)
+// packed length bits 24..27 (4 lo bits)
const (
v2EntryOffsetTimestampSeconds = 0
v2EntryOffsetPackOffsetAndFlags = 4
@@ -84,8 +85,8 @@
)
// layout of v2 format entry
-// 0-3: compressionID - 32 bit (corresponding to compression.HeaderID)
//
+// 0-3: compressionID - 32 bit (corresponding to compression.HeaderID)
const (
v2FormatInfoSize = 6
diff --git a/repo/content/index/merged.go b/repo/content/index/merged.go
index bc29b9f90..20e0a61fb 100644
--- a/repo/content/index/merged.go
+++ b/repo/content/index/merged.go
@@ -95,7 +95,7 @@ func (h nextInfoHeap) Less(i, j int) bool {
func (h nextInfoHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
func (h *nextInfoHeap) Push(x interface{}) {
- *h = append(*h, x.(*nextInfo)) // nolint:forcetypeassert
+ *h = append(*h, x.(*nextInfo)) //nolint:forcetypeassert
}
func (h *nextInfoHeap) Pop() interface{} {
@@ -155,7 +155,7 @@ func (m Merged) Iterate(r IDRange, cb func(i Info) error) error {
var pendingItem Info
for len(minHeap) > 0 {
- // nolint:forcetypeassert
+ //nolint:forcetypeassert
min := heap.Pop(&minHeap).(*nextInfo)
if pendingItem == nil || pendingItem.GetContentID() != min.it.GetContentID() {
if pendingItem != nil {
diff --git a/repo/content/index/packindex_test.go b/repo/content/index/packindex_test.go
index 6d913a65d..b5b33976f 100644
--- a/repo/content/index/packindex_test.go
+++ b/repo/content/index/packindex_test.go
@@ -107,7 +107,7 @@ func TestPackIndex_V2(t *testing.T) {
testPackIndex(t, Version2)
}
-// nolint:thelper,gocyclo,cyclop
+//nolint:thelper,gocyclo,cyclop
func testPackIndex(t *testing.T, version int) {
var infos []Info
// deleted contents with all information
diff --git a/repo/content/index_blob_manager_v0.go b/repo/content/index_blob_manager_v0.go
index 469d92777..02aeb8eea 100644
--- a/repo/content/index_blob_manager_v0.go
+++ b/repo/content/index_blob_manager_v0.go
@@ -537,7 +537,7 @@ func (m *indexBlobManagerV0) dropContentsFromBuilder(bld index.Builder, opt Comp
// that have not been upgraded from being able to open the repository after its format
// has been upgraded.
func WriteLegacyIndexPoisonBlob(ctx context.Context, st blob.Storage) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return st.PutBlob(
ctx,
legacyIndexPoisonBlobID,
diff --git a/repo/content/index_blob_manager_v0_test.go b/repo/content/index_blob_manager_v0_test.go
index d83196fe5..f2af6da49 100644
--- a/repo/content/index_blob_manager_v0_test.go
+++ b/repo/content/index_blob_manager_v0_test.go
@@ -157,7 +157,8 @@ func pickRandomActionTestIndexBlobManagerStress() action {
// TestIndexBlobManagerStress launches N actors, each randomly writing new index blobs,
// verifying that all blobs previously written by it are correct and randomly compacting blobs.
-// nolint:gocyclo
+//
+//nolint:gocyclo
func TestIndexBlobManagerStress(t *testing.T) {
t.Parallel()
testutil.SkipNonDeterministicTestUnderCodeCoverage(t)
diff --git a/repo/content/index_blob_manager_v1.go b/repo/content/index_blob_manager_v1.go
index 3b4884bea..a143aa36a 100644
--- a/repo/content/index_blob_manager_v1.go
+++ b/repo/content/index_blob_manager_v1.go
@@ -126,7 +126,7 @@ func (m *indexBlobManagerV1) writeIndexBlobs(ctx context.Context, dataShards []g
shards[unprefixedBlobID] = data2.Bytes()
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return m.epochMgr.WriteIndex(ctx, shards)
}
diff --git a/repo/content/info.go b/repo/content/info.go
index 52675b51c..79b837785 100644
--- a/repo/content/info.go
+++ b/repo/content/info.go
@@ -24,7 +24,8 @@
)
// EmptyID is an empty content ID.
-// nolint:gochecknoglobals
+//
+//nolint:gochecknoglobals
var EmptyID = index.EmptyID
// ToInfoStruct converts the provided Info to *InfoStruct.
@@ -34,13 +35,13 @@ func ToInfoStruct(i Info) *InfoStruct {
// IDFromHash creates and validates content ID from a prefix and hash.
func IDFromHash(prefix IDPrefix, hash []byte) (ID, error) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return index.IDFromHash(prefix, hash)
}
// ParseID parses the provided string as content ID.
func ParseID(s string) (ID, error) {
- return index.ParseID(s) // nolint:wrapcheck
+ return index.ParseID(s) //nolint:wrapcheck
}
// IDsFromStrings converts strings to IDs.
diff --git a/repo/content/internal_logger.go b/repo/content/internal_logger.go
index 7a09cdaaf..024c68dfc 100644
--- a/repo/content/internal_logger.go
+++ b/repo/content/internal_logger.go
@@ -30,7 +30,7 @@ type internalLogManager struct {
// internalLogManager implements io.Writer and we must be able to write to the
// repository asynchronously when the context is not provided.
- ctx context.Context // nolint:containedctx
+ ctx context.Context //nolint:containedctx
st blob.Storage
bc crypter
@@ -76,7 +76,7 @@ func (m *internalLogManager) encryptAndWriteLogBlob(prefix blob.ID, data gather.
func (m *internalLogManager) NewLogger() *zap.SugaredLogger {
var rnd [2]byte
- rand.Read(rnd[:]) // nolint:errcheck
+ rand.Read(rnd[:]) //nolint:errcheck
w := &internalLogger{
m: m,
diff --git a/repo/content/sessions.go b/repo/content/sessions.go
index b7720dbfb..fbc635078 100644
--- a/repo/content/sessions.go
+++ b/repo/content/sessions.go
@@ -32,7 +32,7 @@ type SessionInfo struct {
Host string `json:"hostname"`
}
-// nolint:gochecknoglobals
+//nolint:gochecknoglobals
var (
sessionIDEpochStartTime = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)
sessionIDEpochGranularity = 30 * 24 * time.Hour
diff --git a/repo/encryption/aes256_gcm_hmac_sha256_encryptor.go b/repo/encryption/aes256_gcm_hmac_sha256_encryptor.go
index d1427ade5..59f2a8c32 100644
--- a/repo/encryption/aes256_gcm_hmac_sha256_encryptor.go
+++ b/repo/encryption/aes256_gcm_hmac_sha256_encryptor.go
@@ -23,7 +23,7 @@ type aes256GCMHmacSha256 struct {
// aeadForContent returns cipher.AEAD using key derived from a given contentID.
func (e aes256GCMHmacSha256) aeadForContent(contentID []byte) (cipher.AEAD, error) {
- // nolint:forcetypeassert
+ //nolint:forcetypeassert
h := e.hmacPool.Get().(hash.Hash)
defer e.hmacPool.Put(h)
h.Reset()
@@ -40,7 +40,7 @@ func (e aes256GCMHmacSha256) aeadForContent(contentID []byte) (cipher.AEAD, erro
return nil, errors.Wrap(err, "unable to create AES-256 cipher")
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return cipher.NewGCM(c)
}
diff --git a/repo/encryption/chacha20_poly1305_hmac_sha256_encryptor.go b/repo/encryption/chacha20_poly1305_hmac_sha256_encryptor.go
index f820e8307..8cb938163 100644
--- a/repo/encryption/chacha20_poly1305_hmac_sha256_encryptor.go
+++ b/repo/encryption/chacha20_poly1305_hmac_sha256_encryptor.go
@@ -23,7 +23,7 @@ type chacha20poly1305hmacSha256Encryptor struct {
// aeadForContent returns cipher.AEAD using key derived from a given contentID.
func (e chacha20poly1305hmacSha256Encryptor) aeadForContent(contentID []byte) (cipher.AEAD, error) {
- // nolint:forcetypeassert
+ //nolint:forcetypeassert
h := e.hmacPool.Get().(hash.Hash)
defer e.hmacPool.Put(h)
@@ -36,7 +36,7 @@ func (e chacha20poly1305hmacSha256Encryptor) aeadForContent(contentID []byte) (c
var hashBuf [32]byte
key := h.Sum(hashBuf[:0])
- // nolint:wrapcheck
+ //nolint:wrapcheck
return chacha20poly1305.New(key)
}
diff --git a/repo/encryption/encryption.go b/repo/encryption/encryption.go
index 639442edf..01974687f 100644
--- a/repo/encryption/encryption.go
+++ b/repo/encryption/encryption.go
@@ -88,7 +88,7 @@ type encryptorInfo struct {
newEncryptor EncryptorFactory
}
-// nolint:gochecknoglobals
+//nolint:gochecknoglobals
var encryptors = map[string]*encryptorInfo{}
// deriveKey uses HKDF to derive a key of a given length and a given purpose from parameters.
diff --git a/repo/encryption/encryption_test.go b/repo/encryption/encryption_test.go
index 808c48939..80c9c701e 100644
--- a/repo/encryption/encryption_test.go
+++ b/repo/encryption/encryption_test.go
@@ -21,7 +21,7 @@ type parameters struct {
func (p parameters) GetEncryptionAlgorithm() string { return p.encryptionAlgo }
func (p parameters) GetMasterKey() []byte { return p.masterKey }
-// nolint:gocyclo
+//nolint:gocyclo
func TestRoundTrip(t *testing.T) {
data := make([]byte, 100)
rand.Read(data)
diff --git a/repo/format/crypto_key_derivation_nontest.go b/repo/format/crypto_key_derivation_nontest.go
index 14d892b60..4ddf6bdc2 100644
--- a/repo/format/crypto_key_derivation_nontest.go
+++ b/repo/format/crypto_key_derivation_nontest.go
@@ -17,7 +17,7 @@ func (f *KopiaRepositoryJSON) DeriveFormatEncryptionKeyFromPassword(password str
switch f.KeyDerivationAlgorithm {
case "scrypt-65536-8-1":
- // nolint:wrapcheck,gomnd
+ //nolint:wrapcheck,gomnd
return scrypt.Key([]byte(password), f.UniqueID, 65536, 8, 1, masterKeySize)
default:
diff --git a/repo/format/format_blob.go b/repo/format/format_blob.go
index 139108240..b38b4e7fb 100644
--- a/repo/format/format_blob.go
+++ b/repo/format/format_blob.go
@@ -35,7 +35,7 @@
// ErrInvalidPassword is returned when repository password is invalid.
var ErrInvalidPassword = errors.Errorf("invalid repository password")
-// nolint:gochecknoglobals
+//nolint:gochecknoglobals
var (
purposeAESKey = []byte("AES")
purposeAuthData = []byte("CHECKSUM")
@@ -191,8 +191,8 @@ func (f *KopiaRepositoryJSON) WriteKopiaRepositoryBlobWithID(ctx context.Context
}
func initCrypto(masterKey, repositoryID []byte) (cipher.AEAD, []byte, error) {
- aesKey := DeriveKeyFromMasterKey(masterKey, repositoryID, purposeAESKey, 32) // nolint:gomnd
- authData := DeriveKeyFromMasterKey(masterKey, repositoryID, purposeAuthData, 32) // nolint:gomnd
+ aesKey := DeriveKeyFromMasterKey(masterKey, repositoryID, purposeAESKey, 32) //nolint:gomnd
+ authData := DeriveKeyFromMasterKey(masterKey, repositoryID, purposeAuthData, 32) //nolint:gomnd
blk, err := aes.NewCipher(aesKey)
if err != nil {
diff --git a/repo/format/format_blob_cache.go b/repo/format/format_blob_cache.go
index d812df761..b458f9f45 100644
--- a/repo/format/format_blob_cache.go
+++ b/repo/format/format_blob_cache.go
@@ -47,7 +47,7 @@ func readRepositoryBlobBytesFromCache(ctx context.Context, cachedFile string, va
return nil, time.Time{}, errors.Errorf("cached file too old")
}
- data, err = os.ReadFile(cachedFile) // nolint:gosec
+ data, err = os.ReadFile(cachedFile) //nolint:gosec
if err != nil {
return nil, time.Time{}, errors.Wrapf(err, "failed to read the cache file %q", cachedFile)
}
diff --git a/repo/grpc_repository_client.go b/repo/grpc_repository_client.go
index 789b20d5f..5434213c1 100644
--- a/repo/grpc_repository_client.go
+++ b/repo/grpc_repository_client.go
@@ -201,7 +201,7 @@ func (r *grpcRepositoryClient) ClientOptions() ClientOptions {
}
func (r *grpcRepositoryClient) OpenObject(ctx context.Context, id object.ID) (object.Reader, error) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return object.Open(ctx, r, id)
}
@@ -210,7 +210,7 @@ func (r *grpcRepositoryClient) NewObjectWriter(ctx context.Context, opt object.W
}
func (r *grpcRepositoryClient) VerifyObject(ctx context.Context, id object.ID) ([]content.ID, error) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return object.VerifyObject(ctx, r, id)
}
@@ -293,7 +293,7 @@ func (r *grpcRepositoryClient) PutManifest(ctx context.Context, labels map[strin
return "", err
}
- return v.(manifest.ID), nil // nolint:forcetypeassert
+ return v.(manifest.ID), nil //nolint:forcetypeassert
}
func (r *grpcInnerSession) PutManifest(ctx context.Context, labels map[string]string, payload interface{}) (manifest.ID, error) {
@@ -330,7 +330,7 @@ func (r *grpcRepositoryClient) FindManifests(ctx context.Context, labels map[str
return nil, err
}
- return v.([]*manifest.EntryMetadata), nil // nolint:forcetypeassert
+ return v.([]*manifest.EntryMetadata), nil //nolint:forcetypeassert
}
func (r *grpcInnerSession) FindManifests(ctx context.Context, labels map[string]string) ([]*manifest.EntryMetadata, error) {
@@ -382,7 +382,7 @@ func (r *grpcInnerSession) DeleteManifest(ctx context.Context, id manifest.ID) e
}
func (r *grpcRepositoryClient) PrefetchObjects(ctx context.Context, objectIDs []object.ID, hint string) ([]content.ID, error) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return object.PrefetchBackingContents(ctx, r, objectIDs, hint)
}
@@ -394,7 +394,7 @@ func (r *grpcRepositoryClient) PrefetchContents(ctx context.Context, contentIDs
return nil
}
- // nolint:forcetypeassert
+ //nolint:forcetypeassert
return ids.([]content.ID)
}
@@ -476,7 +476,7 @@ func (r *grpcRepositoryClient) NewWriter(ctx context.Context, opt WriteSessionOp
// ConcatenateObjects creates a concatenated objects from the provided object IDs.
func (r *grpcRepositoryClient) ConcatenateObjects(ctx context.Context, objectIDs []object.ID) (object.ID, error) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return r.omgr.Concatenate(ctx, objectIDs)
}
@@ -496,7 +496,7 @@ func (r *grpcRepositoryClient) maybeRetry(ctx context.Context, attempt sessionAt
// If the grpcRepositoryClient set to automatically retry and the provided callback returns io.EOF,
// the inner session will be killed and re-established as necessary.
func (r *grpcRepositoryClient) retry(ctx context.Context, attempt sessionAttemptFunc) (interface{}, error) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return retry.WithExponentialBackoff(ctx, "invoking GRPC API", func() (interface{}, error) {
v, err := r.inSessionWithoutRetry(ctx, attempt)
if errors.Is(err, io.EOF) {
@@ -528,7 +528,7 @@ func (r *grpcRepositoryClient) ContentInfo(ctx context.Context, contentID conten
return nil, err
}
- return v.(content.Info), nil // nolint:forcetypeassert
+ return v.(content.Info), nil //nolint:forcetypeassert
}
func (r *grpcInnerSession) contentInfo(ctx context.Context, contentID content.ID) (content.Info, error) {
@@ -602,7 +602,7 @@ func (r *grpcRepositoryClient) GetContent(ctx context.Context, contentID content
_, err = output.Write(v.([]byte))
- // nolint:wrapcheck
+ //nolint:wrapcheck
return err
}, &b)
@@ -658,7 +658,7 @@ func (r *grpcRepositoryClient) doWrite(ctx context.Context, contentID content.ID
r.contentCache.Put(ctx, contentID.String(), gather.FromSlice(data))
}
- if got, want := v.(content.ID), contentID; got != want { // nolint:forcetypeassert
+ if got, want := v.(content.ID), contentID; got != want { //nolint:forcetypeassert
return errors.Errorf("server returned different content ID: %v vs %v", got, want)
}
@@ -720,7 +720,7 @@ func (r *grpcInnerSession) WriteContent(ctx context.Context, data []byte, prefix
}) {
switch rr := resp.Response.(type) {
case *apipb.SessionResponse_WriteContent:
- // nolint:wrapcheck
+ //nolint:wrapcheck
return content.ParseID(rr.WriteContent.GetContentId())
default:
@@ -864,7 +864,7 @@ func (r *grpcRepositoryClient) getOrEstablishInnerSession(ctx context.Context) (
return nil, errors.Wrap(err, "error establishing session")
}
- // nolint:forcetypeassert
+ //nolint:forcetypeassert
r.innerSession = v.(*grpcInnerSession)
}
@@ -926,5 +926,5 @@ func newGRPCAPIRepositoryForConnection(ctx context.Context, conn *grpc.ClientCon
return nil, err
}
- return v.(*grpcRepositoryClient), nil // nolint:forcetypeassert
+ return v.(*grpcRepositoryClient), nil //nolint:forcetypeassert
}
diff --git a/repo/hashing/blake3_hashes.go b/repo/hashing/blake3_hashes.go
index 1f13e2ec2..8720b0719 100644
--- a/repo/hashing/blake3_hashes.go
+++ b/repo/hashing/blake3_hashes.go
@@ -17,11 +17,11 @@ func newBlake3(key []byte) (hash.Hash, error) {
key = xKey[:blake3KeySize]
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return blake3.NewKeyed(key[:blake3KeySize])
}
func init() {
- Register("BLAKE3-256", truncatedKeyedHashFuncFactory(newBlake3, 32)) // nolint:gomnd
- Register("BLAKE3-256-128", truncatedKeyedHashFuncFactory(newBlake3, 16)) // nolint:gomnd
+ Register("BLAKE3-256", truncatedKeyedHashFuncFactory(newBlake3, 32)) //nolint:gomnd
+ Register("BLAKE3-256-128", truncatedKeyedHashFuncFactory(newBlake3, 16)) //nolint:gomnd
}
diff --git a/repo/hashing/blake_hashes.go b/repo/hashing/blake_hashes.go
index ee9c2eb49..96063c401 100644
--- a/repo/hashing/blake_hashes.go
+++ b/repo/hashing/blake_hashes.go
@@ -6,8 +6,8 @@
)
func init() {
- Register("BLAKE2S-128", truncatedKeyedHashFuncFactory(blake2s.New128, 16)) // nolint:gomnd
- Register("BLAKE2S-256", truncatedKeyedHashFuncFactory(blake2s.New256, 32)) // nolint:gomnd
- Register("BLAKE2B-256-128", truncatedKeyedHashFuncFactory(blake2b.New256, 16)) // nolint:gomnd
- Register("BLAKE2B-256", truncatedKeyedHashFuncFactory(blake2b.New256, 32)) // nolint:gomnd
+ Register("BLAKE2S-128", truncatedKeyedHashFuncFactory(blake2s.New128, 16)) //nolint:gomnd
+ Register("BLAKE2S-256", truncatedKeyedHashFuncFactory(blake2s.New256, 32)) //nolint:gomnd
+ Register("BLAKE2B-256-128", truncatedKeyedHashFuncFactory(blake2b.New256, 16)) //nolint:gomnd
+ Register("BLAKE2B-256", truncatedKeyedHashFuncFactory(blake2b.New256, 32)) //nolint:gomnd
}
diff --git a/repo/hashing/hashing.go b/repo/hashing/hashing.go
index fd41b5f61..8994d721f 100644
--- a/repo/hashing/hashing.go
+++ b/repo/hashing/hashing.go
@@ -27,7 +27,7 @@ type Parameters interface {
// HashFuncFactory returns a hash function for given formatting options.
type HashFuncFactory func(p Parameters) (HashFunc, error)
-// nolint:gochecknoglobals
+//nolint:gochecknoglobals
var hashFunctions = map[string]HashFuncFactory{}
// Register registers a hash function with a given name.
@@ -61,7 +61,7 @@ func truncatedHMACHashFuncFactory(hf func() hash.Hash, truncate int) HashFuncFac
}
return func(output []byte, data gather.Bytes) []byte {
- // nolint:forcetypeassert
+ //nolint:forcetypeassert
h := pool.Get().(hash.Hash)
defer pool.Put(h)
@@ -90,7 +90,7 @@ func truncatedKeyedHashFuncFactory(hf func(key []byte) (hash.Hash, error), trunc
}
return func(output []byte, data gather.Bytes) []byte {
- // nolint:forcetypeassert
+ //nolint:forcetypeassert
h := pool.Get().(hash.Hash)
defer pool.Put(h)
diff --git a/repo/hashing/sha_hashes.go b/repo/hashing/sha_hashes.go
index 0279bd28f..f5d23600f 100644
--- a/repo/hashing/sha_hashes.go
+++ b/repo/hashing/sha_hashes.go
@@ -7,9 +7,9 @@
)
func init() {
- Register("HMAC-SHA256", truncatedHMACHashFuncFactory(sha256.New, 32)) // nolint:gomnd
- Register("HMAC-SHA256-128", truncatedHMACHashFuncFactory(sha256.New, 16)) // nolint:gomnd
- Register("HMAC-SHA224", truncatedHMACHashFuncFactory(sha256.New224, 28)) // nolint:gomnd
- Register("HMAC-SHA3-224", truncatedHMACHashFuncFactory(sha3.New224, 28)) // nolint:gomnd
- Register("HMAC-SHA3-256", truncatedHMACHashFuncFactory(sha3.New256, 32)) // nolint:gomnd
+ Register("HMAC-SHA256", truncatedHMACHashFuncFactory(sha256.New, 32)) //nolint:gomnd
+ Register("HMAC-SHA256-128", truncatedHMACHashFuncFactory(sha256.New, 16)) //nolint:gomnd
+ Register("HMAC-SHA224", truncatedHMACHashFuncFactory(sha256.New224, 28)) //nolint:gomnd
+ Register("HMAC-SHA3-224", truncatedHMACHashFuncFactory(sha3.New224, 28)) //nolint:gomnd
+ Register("HMAC-SHA3-256", truncatedHMACHashFuncFactory(sha3.New256, 32)) //nolint:gomnd
}
diff --git a/repo/initialize.go b/repo/initialize.go
index b579b1285..ac26ea4f3 100644
--- a/repo/initialize.go
+++ b/repo/initialize.go
@@ -19,7 +19,8 @@
)
// BuildInfo is the build information of Kopia.
-// nolint:gochecknoglobals
+//
+//nolint:gochecknoglobals
var (
BuildInfo = "unknown"
BuildVersion = "v0-unofficial"
diff --git a/repo/logging/logging_buf.go b/repo/logging/logging_buf.go
index 4be72c507..37fd82724 100644
--- a/repo/logging/logging_buf.go
+++ b/repo/logging/logging_buf.go
@@ -15,7 +15,7 @@ type Buffer struct {
validLen int // valid length
}
-// nolint:gochecknoglobals
+//nolint:gochecknoglobals
var bufPool = &sync.Pool{
New: func() interface{} {
return &Buffer{}
@@ -24,7 +24,7 @@ type Buffer struct {
// GetBuffer gets a logging buffer.
func GetBuffer() *Buffer {
- // nolint:forcetypeassert
+ //nolint:forcetypeassert
return bufPool.Get().(*Buffer)
}
diff --git a/repo/logging/null_logger.go b/repo/logging/null_logger.go
index 4c356c53d..431f3c3a2 100644
--- a/repo/logging/null_logger.go
+++ b/repo/logging/null_logger.go
@@ -3,7 +3,8 @@
import "go.uber.org/zap"
// NullLogger represents a singleton logger that discards all output.
-// nolint:gochecknoglobals
+//
+//nolint:gochecknoglobals
var NullLogger = zap.NewNop().Sugar()
func getNullLogger(module string) Logger {
diff --git a/repo/maintenance/blob_gc.go b/repo/maintenance/blob_gc.go
index 915708616..324d13979 100644
--- a/repo/maintenance/blob_gc.go
+++ b/repo/maintenance/blob_gc.go
@@ -23,7 +23,8 @@ type DeleteUnreferencedBlobsOptions struct {
}
// DeleteUnreferencedBlobs deletes o was created after maintenance startederenced by index entries.
-// nolint:gocyclo,funlen
+//
+//nolint:gocyclo,funlen
func DeleteUnreferencedBlobs(ctx context.Context, rep repo.DirectRepositoryWriter, opt DeleteUnreferencedBlobsOptions, safety SafetyParameters) (int, error) {
if opt.Parallel == 0 {
opt.Parallel = 16
diff --git a/repo/maintenance/cleanup_logs.go b/repo/maintenance/cleanup_logs.go
index 9a2507f0a..956f5dbdd 100644
--- a/repo/maintenance/cleanup_logs.go
+++ b/repo/maintenance/cleanup_logs.go
@@ -33,7 +33,7 @@ func (o LogRetentionOptions) OrDefault() LogRetentionOptions {
// defaultLogRetention returns CleanupLogsOptions applied by default during maintenance.
func defaultLogRetention() LogRetentionOptions {
- // nolint:gomnd
+ //nolint:gomnd
return LogRetentionOptions{
MaxTotalSize: 1 << 30, // keep no more than 1 GiB logs
MaxAge: 30 * 24 * time.Hour, // no more than 30 days of data
diff --git a/repo/maintenance/content_rewrite.go b/repo/maintenance/content_rewrite.go
index 12986613d..50c7a57d2 100644
--- a/repo/maintenance/content_rewrite.go
+++ b/repo/maintenance/content_rewrite.go
@@ -118,7 +118,7 @@ func RewriteContents(ctx context.Context, rep repo.DirectRepositoryWriter, opt *
log(ctx).Debugf("Total bytes rewritten %v", units.BytesStringBase10(totalBytes))
if failedCount == 0 {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return rep.ContentManager().Flush(ctx)
}
@@ -212,7 +212,7 @@ func(pi content.PackInfo) error {
return nil
}
- // nolint:gomnd
+ //nolint:gomnd
if packNumberByPrefix[prefix] == 2 {
// when we encounter the 2nd pack, emit contents from the first one too.
for _, ci := range firstPackByPrefix[prefix].ContentInfos {
diff --git a/repo/maintenance/drop_deleted_contents.go b/repo/maintenance/drop_deleted_contents.go
index d56051b6e..90a4195b5 100644
--- a/repo/maintenance/drop_deleted_contents.go
+++ b/repo/maintenance/drop_deleted_contents.go
@@ -12,7 +12,7 @@
func DropDeletedContents(ctx context.Context, rep repo.DirectRepositoryWriter, dropDeletedBefore time.Time, safety SafetyParameters) error {
log(ctx).Infof("Dropping contents deleted before %v", dropDeletedBefore)
- // nolint:wrapcheck
+ //nolint:wrapcheck
return rep.ContentManager().CompactIndexes(ctx, content.CompactOptions{
AllIndexes: true,
DropDeletedBefore: dropDeletedBefore,
diff --git a/repo/maintenance/index_compaction.go b/repo/maintenance/index_compaction.go
index cae3ebcb3..6e105f2ab 100644
--- a/repo/maintenance/index_compaction.go
+++ b/repo/maintenance/index_compaction.go
@@ -13,7 +13,7 @@ func runTaskIndexCompactionQuick(ctx context.Context, runParams RunParameters, s
const maxSmallBlobsForIndexCompaction = 8
- // nolint:wrapcheck
+ //nolint:wrapcheck
return runParams.rep.ContentManager().CompactIndexes(ctx, content.CompactOptions{
MaxSmallBlobs: maxSmallBlobsForIndexCompaction,
DisableEventualConsistencySafety: safety.DisableEventualConsistencySafety,
diff --git a/repo/maintenance/maintenance_params.go b/repo/maintenance/maintenance_params.go
index 6302032a5..1442375a4 100644
--- a/repo/maintenance/maintenance_params.go
+++ b/repo/maintenance/maintenance_params.go
@@ -10,7 +10,7 @@
"github.com/kopia/kopia/repo/manifest"
)
-// nolint:gochecknoglobals
+//nolint:gochecknoglobals
var manifestLabels = map[string]string{
"type": "maintenance",
}
diff --git a/repo/maintenance/maintenance_run.go b/repo/maintenance/maintenance_run.go
index 930dd7c3b..633b9bd97 100644
--- a/repo/maintenance/maintenance_run.go
+++ b/repo/maintenance/maintenance_run.go
@@ -522,17 +522,17 @@ func maxEndTime(taskRuns ...[]RunInfo) time.Time {
//
// Step #1 - race between GC and snapshot creation:
//
-// - 'snapshot gc' runs and marks unreachable contents as deleted
-// - 'snapshot create' runs at approximately the same time and creates manifest
-// which makes some contents live again.
+// - 'snapshot gc' runs and marks unreachable contents as deleted
+// - 'snapshot create' runs at approximately the same time and creates manifest
+// which makes some contents live again.
//
// As a result of this race, GC has marked some entries as incorrectly deleted, but we
// can still return them since they are not dropped from the index.
//
// Step #2 - fix incorrectly deleted contents
//
-// - subsequent 'snapshot gc' runs and undeletes contents incorrectly
-// marked as deleted in Step 1.
+// - subsequent 'snapshot gc' runs and undeletes contents incorrectly
+// marked as deleted in Step 1.
//
// After Step 2 completes, we know for sure that all contents deleted before Step #1 has started
// are safe to drop from the index because Step #2 has fixed them, as long as all snapshots that
diff --git a/repo/maintenance/maintenance_safety.go b/repo/maintenance/maintenance_safety.go
index 6b1e7371d..38fd682aa 100644
--- a/repo/maintenance/maintenance_safety.go
+++ b/repo/maintenance/maintenance_safety.go
@@ -36,7 +36,8 @@ type SafetyParameters struct {
}
// Supported safety levels.
-// nolint:gochecknoglobals
+//
+//nolint:gochecknoglobals
var (
// SafetyNone has safety parameters which allow full garbage collection without unnecessary
// delays, but it is safe only if no other kopia clients are running and storage backend is
diff --git a/repo/maintenance/maintenance_schedule.go b/repo/maintenance/maintenance_schedule.go
index 2e472e34a..ccd66cc3e 100644
--- a/repo/maintenance/maintenance_schedule.go
+++ b/repo/maintenance/maintenance_schedule.go
@@ -20,7 +20,7 @@
maintenanceScheduleBlobID = "kopia.maintenance"
)
-// nolint:gochecknoglobals
+//nolint:gochecknoglobals
var (
maintenanceScheduleKeyPurpose = []byte("maintenance schedule")
maintenanceScheduleAEADExtraData = []byte("maintenance")
@@ -67,7 +67,7 @@ func getAES256GCM(rep repo.DirectRepository) (cipher.AEAD, error) {
return nil, errors.Wrap(err, "unable to create AES-256 cipher")
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return cipher.NewGCM(c)
}
@@ -170,7 +170,7 @@ func SetSchedule(ctx context.Context, rep repo.DirectRepositoryWriter, s *Schedu
result := append([]byte(nil), nonce...)
ciphertext := c.Seal(result, nonce, v, maintenanceScheduleAEADExtraData)
- // nolint:wrapcheck
+ //nolint:wrapcheck
return rep.BlobStorage().PutBlob(ctx, maintenanceScheduleBlobID, gather.FromSlice(ciphertext), blob.PutOptions{})
}
diff --git a/repo/manifest/committed_manifest_manager.go b/repo/manifest/committed_manifest_manager.go
index c294342d8..048d2862f 100644
--- a/repo/manifest/committed_manifest_manager.go
+++ b/repo/manifest/committed_manifest_manager.go
@@ -356,7 +356,7 @@ func loadManifestContent(ctx context.Context, b contentManager, contentID conten
func newCommittedManager(b contentManager) *committedManifestManager {
debugID := ""
if os.Getenv("KOPIA_DEBUG_MANIFEST_MANAGER") != "" {
- debugID = fmt.Sprintf("%x", rand.Int63()) // nolint:gosec
+ debugID = fmt.Sprintf("%x", rand.Int63()) //nolint:gosec
}
return &committedManifestManager{
diff --git a/repo/object/object_manager.go b/repo/object/object_manager.go
index e0e0df711..10078ada0 100644
--- a/repo/object/object_manager.go
+++ b/repo/object/object_manager.go
@@ -120,7 +120,7 @@ func (om *Manager) Concatenate(ctx context.Context, objectIDs []ID) (ID, error)
Prefix: indirectContentPrefix,
Description: "CONCATENATED INDEX",
})
- defer w.Close() // nolint:errcheck
+ defer w.Close() //nolint:errcheck
if werr := writeIndirectObject(w, concatenatedEntries); werr != nil {
return EmptyID, werr
diff --git a/repo/object/object_manager_test.go b/repo/object/object_manager_test.go
index 357afdc90..d19458096 100644
--- a/repo/object/object_manager_test.go
+++ b/repo/object/object_manager_test.go
@@ -474,7 +474,7 @@ func TestHMAC(t *testing.T) {
}
}
-// nolint:gocyclo
+//nolint:gocyclo
func TestConcatenate(t *testing.T) {
ctx := testlogging.Context(t)
_, _, om := setupTest(t, nil)
@@ -826,7 +826,7 @@ func verify(ctx context.Context, t *testing.T, cr contentReader, objectID ID, ex
}
}
-// nolint:gocyclo
+//nolint:gocyclo
func TestSeek(t *testing.T) {
ctx := testlogging.Context(t)
_, _, om := setupTest(t, nil)
diff --git a/repo/object/object_reader.go b/repo/object/object_reader.go
index c7e1b3d0e..53080c48d 100644
--- a/repo/object/object_reader.go
+++ b/repo/object/object_reader.go
@@ -37,7 +37,7 @@ func VerifyObject(ctx context.Context, cr contentReader, oid ID) ([]content.ID,
type objectReader struct {
// objectReader implements io.Reader, but needs context to read from repository
- ctx context.Context // nolint:containedctx
+ ctx context.Context //nolint:containedctx
cr contentReader
diff --git a/repo/object/object_writer.go b/repo/object/object_writer.go
index 67c511f58..032932bb1 100644
--- a/repo/object/object_writer.go
+++ b/repo/object/object_writer.go
@@ -160,7 +160,7 @@ func (w *objectWriter) flushBuffer() error {
w.asyncWritesWG.Add(1)
asyncBuf := gather.NewWriteBuffer()
- w.buffer.Bytes().WriteTo(asyncBuf) // nolint:errcheck
+ w.buffer.Bytes().WriteTo(asyncBuf) //nolint:errcheck
go func() {
defer func() {
diff --git a/repo/object/objectid.go b/repo/object/objectid.go
index 75c348150..663ed750b 100644
--- a/repo/object/objectid.go
+++ b/repo/object/objectid.go
@@ -12,9 +12,9 @@
// ID is an identifier of a repository object. Repository objects can be stored.
//
-// 1. In a single content block, this is the most common case for small objects.
-// 2. In a series of content blocks with an indirect block pointing at them (multiple indirections are allowed).
-// This is used for larger files. Object IDs using indirect blocks start with "I"
+// 1. In a single content block, this is the most common case for small objects.
+// 2. In a series of content blocks with an indirect block pointing at them (multiple indirections are allowed).
+// This is used for larger files. Object IDs using indirect blocks start with "I"
type ID struct {
cid content.ID
indirection byte
@@ -25,7 +25,7 @@ type ID struct {
func (i ID) MarshalJSON() ([]byte, error) {
s := i.String()
- // nolint:wrapcheck
+ //nolint:wrapcheck
return json.Marshal(s)
}
@@ -48,7 +48,8 @@ func (i *ID) UnmarshalJSON(v []byte) error {
}
// EmptyID is an empty object ID equivalent to an empty string.
-// nolint:gochecknoglobals
+//
+//nolint:gochecknoglobals
var EmptyID = ID{}
// HasObjectID exposes the identifier of an object.
diff --git a/repo/open.go b/repo/open.go
index 010f15bb2..292d4540c 100644
--- a/repo/open.go
+++ b/repo/open.go
@@ -38,7 +38,8 @@
//
// The strings are arbitrary, but should be short, human-readable and immutable once a version
// that starts requiring them is released.
-// nolint:gochecknoglobals
+//
+//nolint:gochecknoglobals
var supportedFeatures = []feature.Feature{
"index-v1",
"index-v2",
@@ -54,7 +55,7 @@
// localCacheIntegrityHMACSecretLength length of HMAC secret protecting local cache items.
const localCacheIntegrityHMACSecretLength = 16
-// nolint:gochecknoglobals
+//nolint:gochecknoglobals
var localCacheIntegrityPurpose = []byte("local-cache-integrity")
var log = logging.Module("kopia/repo")
@@ -131,7 +132,7 @@ func getContentCacheOrNil(ctx context.Context, opt *content.CachingOptions, pass
// derive content cache key from the password & HMAC secret using scrypt.
salt := append([]byte("content-cache-protection"), opt.HMACSecret...)
- // nolint:gomnd
+ //nolint:gomnd
cacheEncryptionKey, err := scrypt.Key([]byte(password), salt, 65536, 8, 1, 32)
if err != nil {
return nil, errors.Wrap(err, "unable to derive cache encryption key from password")
@@ -196,7 +197,8 @@ func openDirect(ctx context.Context, configFile string, lc *LocalConfig, passwor
}
// openWithConfig opens the repository with a given configuration, avoiding the need for a config file.
-// nolint:funlen,gocyclo,cyclop
+//
+//nolint:funlen,gocyclo,cyclop
func openWithConfig(ctx context.Context, st blob.Storage, lc *LocalConfig, password string, options *Options, cacheOpts *content.CachingOptions, configFile string) (DirectRepository, error) {
cacheOpts = cacheOpts.CloneOrDefault()
cmOpts := &content.ManagerOptions{
@@ -211,7 +213,7 @@ func openWithConfig(ctx context.Context, st blob.Storage, lc *LocalConfig, passw
ufb, internalErr = format.ReadAndCacheDecodedRepositoryConfig(ctx, st, password, cacheOpts.CacheDirectory,
lc.FormatBlobCacheDuration)
if internalErr != nil {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return nil, internalErr
}
@@ -224,7 +226,7 @@ func openWithConfig(ctx context.Context, st blob.Storage, lc *LocalConfig, passw
}, func(internalErr error) bool {
return !options.DoNotWaitForUpgrade && errors.Is(internalErr, ErrRepositoryUnavailableDueToUpgrageInProgress)
}); err != nil {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return nil, err
}
@@ -256,7 +258,7 @@ func openWithConfig(ctx context.Context, st blob.Storage, lc *LocalConfig, passw
if fo.MaxPackSize == 0 {
// legacy only, apply default
- fo.MaxPackSize = 20 << 20 // nolint:gomnd
+ fo.MaxPackSize = 20 << 20 //nolint:gomnd
}
// do not embed repository format info in pack blobs when password change is enabled.
@@ -426,7 +428,7 @@ func upgradeLockMonitor(
ufb, err := format.ReadAndCacheDecodedRepositoryConfig(ctx, st, password, cacheOpts.CacheDirectory, lockRefreshInterval)
if err != nil {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return err
}
diff --git a/repo/repository.go b/repo/repository.go
index 2513e10f6..72e3b1272 100644
--- a/repo/repository.go
+++ b/repo/repository.go
@@ -154,7 +154,7 @@ func (r *directRepository) NewObjectWriter(ctx context.Context, opt object.Write
// ConcatenateObjects creates a concatenated objects from the provided object IDs.
func (r *directRepository) ConcatenateObjects(ctx context.Context, objectIDs []object.ID) (object.ID, error) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return r.omgr.Concatenate(ctx, objectIDs)
}
@@ -165,37 +165,37 @@ func (r *directRepository) DisableIndexRefresh() {
// OpenObject opens the reader for a given object, returns object.ErrNotFound.
func (r *directRepository) OpenObject(ctx context.Context, id object.ID) (object.Reader, error) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return object.Open(ctx, r.cmgr, id)
}
// VerifyObject verifies that the given object is stored properly in a repository and returns backing content IDs.
func (r *directRepository) VerifyObject(ctx context.Context, id object.ID) ([]content.ID, error) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return object.VerifyObject(ctx, r.cmgr, id)
}
// GetManifest returns the given manifest data and metadata.
func (r *directRepository) GetManifest(ctx context.Context, id manifest.ID, data interface{}) (*manifest.EntryMetadata, error) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return r.mmgr.Get(ctx, id, data)
}
// PutManifest saves the given manifest payload with a set of labels.
func (r *directRepository) PutManifest(ctx context.Context, labels map[string]string, payload interface{}) (manifest.ID, error) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return r.mmgr.Put(ctx, labels, payload)
}
// FindManifests returns metadata for manifests matching given set of labels.
func (r *directRepository) FindManifests(ctx context.Context, labels map[string]string) ([]*manifest.EntryMetadata, error) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return r.mmgr.Find(ctx, labels)
}
// DeleteManifest deletes the manifest with a given ID.
func (r *directRepository) DeleteManifest(ctx context.Context, id manifest.ID) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return r.mmgr.Delete(ctx, id)
}
@@ -206,19 +206,19 @@ func (r *directRepository) PrefetchContents(ctx context.Context, contentIDs []co
// PrefetchObjects brings the requested objects into the cache.
func (r *directRepository) PrefetchObjects(ctx context.Context, objectIDs []object.ID, hint string) ([]content.ID, error) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return object.PrefetchBackingContents(ctx, r.cmgr, objectIDs, hint)
}
// ListActiveSessions returns the map of active sessions.
func (r *directRepository) ListActiveSessions(ctx context.Context) (map[content.SessionID]*content.SessionInfo, error) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return r.cmgr.ListActiveSessions(ctx)
}
// ContentInfo gets the information about particular content.
func (r *directRepository) ContentInfo(ctx context.Context, contentID content.ID) (content.Info, error) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return r.cmgr.ContentInfo(ctx, contentID)
}
@@ -328,7 +328,7 @@ func (r *directRepository) ContentReader() content.Reader {
// IndexBlobs returns the index blobs in use.
func (r *directRepository) IndexBlobs(ctx context.Context, includeInactive bool) ([]content.IndexBlobInfo, error) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return r.cmgr.IndexBlobs(ctx, includeInactive)
}
diff --git a/repo/splitter/splitter.go b/repo/splitter/splitter.go
index 0fcd0791a..d2e33a9f6 100644
--- a/repo/splitter/splitter.go
+++ b/repo/splitter/splitter.go
@@ -43,7 +43,8 @@ func SupportedAlgorithms() []string {
type Factory func() Splitter
// splitterFactories is a map of registered splitter factories.
-// nolint:gochecknoglobals
+//
+//nolint:gochecknoglobals
var splitterFactories = map[string]Factory{
"FIXED-1M": Fixed(splitterSize1MB),
"FIXED-2M": Fixed(splitterSize2MB),
diff --git a/repo/splitter/splitter_buzhash32.go b/repo/splitter/splitter_buzhash32.go
index 0a94b702f..32b901ed6 100644
--- a/repo/splitter/splitter_buzhash32.go
+++ b/repo/splitter/splitter_buzhash32.go
@@ -85,8 +85,8 @@ func newBuzHash32SplitterFactory(avgSize int) Factory {
// avgSize must be a power of two, so 0b000001000...0000
// it just so happens that mask is avgSize-1 :)
mask := uint32(avgSize - 1)
- maxSize := avgSize * 2 // nolint:gomnd
- minSize := avgSize / 2 // nolint:gomnd
+ maxSize := avgSize * 2 //nolint:gomnd
+ minSize := avgSize / 2 //nolint:gomnd
return func() Splitter {
s := buzhash32.New()
diff --git a/repo/upgrade_lock.go b/repo/upgrade_lock.go
index 75d9937b5..c301db650 100644
--- a/repo/upgrade_lock.go
+++ b/repo/upgrade_lock.go
@@ -124,7 +124,7 @@ func (r *directRepository) CommitUpgrade(ctx context.Context) error {
// hence using this API could render the repository corrupted and unreadable by
// clients.
//
-// nolint:gocyclo
+//nolint:gocyclo
func (r *directRepository) RollbackUpgrade(ctx context.Context) error {
f := r.formatBlob
diff --git a/repo/upgrade_lock_test.go b/repo/upgrade_lock_test.go
index 7fd4f769d..1969161c8 100644
--- a/repo/upgrade_lock_test.go
+++ b/repo/upgrade_lock_test.go
@@ -28,7 +28,7 @@
func TestFormatUpgradeSetLock(t *testing.T) {
ctx, env := repotesting.NewEnvironment(t, format.FormatVersion1, repotesting.Options{OpenOptions: func(opts *repo.Options) {
- // nolint:goconst
+ //nolint:goconst
opts.UpgradeOwnerID = "upgrade-owner"
}})
formatBlockCacheDuration := env.Repository.ClientOptions().FormatBlobCacheDuration
diff --git a/site/cli2md/cli2md.go b/site/cli2md/cli2md.go
index 378748a1d..24c31134c 100644
--- a/site/cli2md/cli2md.go
+++ b/site/cli2md/cli2md.go
@@ -29,7 +29,7 @@
dirMode = 0o750
)
-// nolint:gochecknoglobals
+//nolint:gochecknoglobals
var overrideDefault = map[string]string{
"config-file": "repository.config",
"log-dir": "kopia",
@@ -244,7 +244,7 @@ func generateSubcommands(w io.Writer, dir, sectionTitle string, cmds []*kingpin.
}
subcommandSlug := strings.Replace(c.FullCommand, " ", "-", -1)
- helpSummary := strings.SplitN(c.Help, "\n", 2)[0] // nolint:gomnd
+ helpSummary := strings.SplitN(c.Help, "\n", 2)[0] //nolint:gomnd
helpSummary = strings.TrimSuffix(helpSummary, ".")
fmt.Fprintf(w, "* [`%v`](%v) - %v\n", c.FullCommand, subcommandSlug+"/", helpSummary)
generateSubcommandPage(filepath.Join(dir, subcommandSlug+".md"), c)
diff --git a/snapshot/manifest.go b/snapshot/manifest.go
index 8bcdda7aa..57fbc2fad 100644
--- a/snapshot/manifest.go
+++ b/snapshot/manifest.go
@@ -93,10 +93,10 @@ func (p Permissions) MarshalJSON() ([]byte, error) {
return nil, nil
}
- // nolint:gomnd
+ //nolint:gomnd
s := "0" + strconv.FormatInt(int64(p), 8)
- // nolint:wrapcheck
+ //nolint:wrapcheck
return json.Marshal(&s)
}
@@ -108,7 +108,7 @@ func (p *Permissions) UnmarshalJSON(b []byte) error {
return errors.Wrap(err, "unable to unmarshal JSON")
}
- // nolint:gomnd
+ //nolint:gomnd
v, err := strconv.ParseInt(s, 0, 32)
if err != nil {
return errors.Wrap(err, "unable to parse permission string")
diff --git a/snapshot/policy/policy_manager.go b/snapshot/policy/policy_manager.go
index 14d8a9b55..f72becf0a 100644
--- a/snapshot/policy/policy_manager.go
+++ b/snapshot/policy/policy_manager.go
@@ -33,7 +33,8 @@
const typeKey = manifest.TypeLabelKey
// GlobalPolicySourceInfo is a source where global policy is attached.
-// nolint:gochecknoglobals
+//
+//nolint:gochecknoglobals
var GlobalPolicySourceInfo = snapshot.SourceInfo{}
var log = logging.Module("kopia/snapshot/policy")
diff --git a/snapshot/policy/policy_merge_test.go b/snapshot/policy/policy_merge_test.go
index cb4bb9f46..d4c66d0a2 100644
--- a/snapshot/policy/policy_merge_test.go
+++ b/snapshot/policy/policy_merge_test.go
@@ -58,7 +58,7 @@ func TestPolicyMerge(t *testing.T) {
testPolicyMerge(t, reflect.TypeOf(policy.Policy{}), reflect.TypeOf(policy.Definition{}), "")
}
-// nolint:thelper
+//nolint:thelper
func testPolicyMerge(t *testing.T, policyType, definitionType reflect.Type, prefix string) {
for i := 0; i < policyType.NumField(); i++ {
f := policyType.Field(i)
@@ -80,7 +80,7 @@ func testPolicyMerge(t *testing.T, policyType, definitionType reflect.Type, pref
}
}
-// nolint:thelper
+//nolint:thelper
func testPolicyMergeSingleField(t *testing.T, fieldName string, typ reflect.Type) {
var v0, v1, v2 reflect.Value
diff --git a/snapshot/policy/policy_tree.go b/snapshot/policy/policy_tree.go
index 109c6baf3..75952b577 100644
--- a/snapshot/policy/policy_tree.go
+++ b/snapshot/policy/policy_tree.go
@@ -4,7 +4,7 @@
"strings"
)
-// nolint:gochecknoglobals
+//nolint:gochecknoglobals
var (
// defaultActionsPolicy is the default actions policy.
defaultActionsPolicy = ActionsPolicy{}
@@ -56,7 +56,7 @@
MaxParallelFileReads: nil, // defaults to runtime.NumCPUs()
// upload large files in chunks of 2 GiB
- ParallelUploadAboveSize: newOptionalInt64(2 << 30), // nolint:gomnd
+ ParallelUploadAboveSize: newOptionalInt64(2 << 30), //nolint:gomnd
}
// DefaultPolicy is a default policy returned by policy tree in absence of other policies.
diff --git a/snapshot/policy/retention_policy.go b/snapshot/policy/retention_policy.go
index 6d4c24cfb..aab5a27b9 100644
--- a/snapshot/policy/retention_policy.go
+++ b/snapshot/policy/retention_policy.go
@@ -315,11 +315,11 @@ func CompactPins(pins []string) []string {
func SortRetentionTags(tags []string) {
retentionPrefixSortValue := map[string]int{
"latest": 1,
- "hourly": 2, // nolint:gomnd
- "daily": 3, // nolint:gomnd
- "weekly": 4, // nolint:gomnd
- "monthly": 5, // nolint:gomnd
- "annual": 6, // nolint:gomnd
+ "hourly": 2, //nolint:gomnd
+ "daily": 3, //nolint:gomnd
+ "weekly": 4, //nolint:gomnd
+ "monthly": 5, //nolint:gomnd
+ "annual": 6, //nolint:gomnd
}
sort.Slice(tags, func(i, j int) bool {
diff --git a/snapshot/restore/local_fs_output.go b/snapshot/restore/local_fs_output.go
index 48a67fd9c..238c518a6 100644
--- a/snapshot/restore/local_fs_output.go
+++ b/snapshot/restore/local_fs_output.go
@@ -335,7 +335,7 @@ func isWindows() bool {
func (o *FilesystemOutput) createDirectory(ctx context.Context, path string) error {
switch st, err := os.Stat(path); {
case os.IsNotExist(err):
- // nolint:wrapcheck
+ //nolint:wrapcheck
return os.MkdirAll(path, outputDirMode)
case err != nil:
return errors.Wrap(err, "failed to stat path "+path)
@@ -404,7 +404,7 @@ func (o *FilesystemOutput) copyFileContent(ctx context.Context, targetPath strin
targetPath = atomicfile.MaybePrefixLongFilenameOnWindows(targetPath)
if o.WriteFilesAtomically {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return atomicfile.Write(targetPath, r)
}
diff --git a/snapshot/restore/local_fs_output_darwin.go b/snapshot/restore/local_fs_output_darwin.go
index 2d48ecb91..4151bc561 100644
--- a/snapshot/restore/local_fs_output_darwin.go
+++ b/snapshot/restore/local_fs_output_darwin.go
@@ -8,17 +8,17 @@
)
func symlinkChown(path string, uid, gid int) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return unix.Lchown(path, uid, gid)
}
func symlinkChmod(path string, mode os.FileMode) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return unix.Fchmodat(unix.AT_FDCWD, path, uint32(mode), unix.AT_SYMLINK_NOFOLLOW)
}
func symlinkChtimes(linkPath string, atime, mtime time.Time) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return unix.Lutimes(linkPath, []unix.Timeval{
unix.NsecToTimeval(atime.UnixNano()),
unix.NsecToTimeval(mtime.UnixNano()),
diff --git a/snapshot/restore/local_fs_output_unix.go b/snapshot/restore/local_fs_output_unix.go
index 7f1122de7..c363915d9 100644
--- a/snapshot/restore/local_fs_output_unix.go
+++ b/snapshot/restore/local_fs_output_unix.go
@@ -11,7 +11,7 @@
)
func symlinkChown(path string, uid, gid int) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return unix.Lchown(path, uid, gid)
}
@@ -21,7 +21,7 @@ func symlinkChmod(path string, mode os.FileMode) error {
}
func symlinkChtimes(linkPath string, atime, mtime time.Time) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return unix.Lutimes(linkPath, []unix.Timeval{
unix.NsecToTimeval(atime.UnixNano()),
unix.NsecToTimeval(mtime.UnixNano()),
diff --git a/snapshot/restore/local_fs_output_windows.go b/snapshot/restore/local_fs_output_windows.go
index c6477f2f3..5c6d2c94d 100644
--- a/snapshot/restore/local_fs_output_windows.go
+++ b/snapshot/restore/local_fs_output_windows.go
@@ -40,6 +40,6 @@ func symlinkChtimes(linkPath string, atime, mtime time.Time) error {
defer windows.CloseHandle(h) //nolint:errcheck
- // nolint:wrapcheck
+ //nolint:wrapcheck
return windows.SetFileTime(h, &ftw, &fta, &ftw)
}
diff --git a/snapshot/restore/shallow_helper.go b/snapshot/restore/shallow_helper.go
index 36b6ee004..24dc9df20 100644
--- a/snapshot/restore/shallow_helper.go
+++ b/snapshot/restore/shallow_helper.go
@@ -22,7 +22,7 @@ func PathIfPlaceholder(path string) string {
// exist without experiencing errors caused by long file names.
func SafeRemoveAll(path string) error {
if SafelySuffixablePath(path) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return os.RemoveAll(atomicfile.MaybePrefixLongFilenameOnWindows(path + localfs.ShallowEntrySuffix))
}
diff --git a/snapshot/restore/tar_output.go b/snapshot/restore/tar_output.go
index 30f91a7eb..206e95360 100644
--- a/snapshot/restore/tar_output.go
+++ b/snapshot/restore/tar_output.go
@@ -60,7 +60,7 @@ func (o *TarOutput) Close(ctx context.Context) error {
return errors.Wrap(err, "error closing tar")
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return o.w.Close()
}
diff --git a/snapshot/restore/zip_output.go b/snapshot/restore/zip_output.go
index 02a7330a4..c6c7dccad 100644
--- a/snapshot/restore/zip_output.go
+++ b/snapshot/restore/zip_output.go
@@ -44,7 +44,7 @@ func (o *ZipOutput) Close(ctx context.Context) error {
return errors.Wrap(err, "error closing zip")
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return o.w.Close()
}
diff --git a/snapshot/snapshotfs/all_sources.go b/snapshot/snapshotfs/all_sources.go
index a3e3fa56b..1e1dd2765 100644
--- a/snapshot/snapshotfs/all_sources.go
+++ b/snapshot/snapshotfs/all_sources.go
@@ -30,7 +30,7 @@ func (s *repositoryAllSources) ModTime() time.Time {
}
func (s *repositoryAllSources) Mode() os.FileMode {
- return 0o555 | os.ModeDir // nolint:gomnd
+ return 0o555 | os.ModeDir //nolint:gomnd
}
func (s *repositoryAllSources) Size() int64 {
@@ -61,7 +61,7 @@ func (s *repositoryAllSources) Close() {
}
func (s *repositoryAllSources) Child(ctx context.Context, name string) (fs.Entry, error) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return fs.IterateEntriesAndFindChild(ctx, s, name)
}
diff --git a/snapshot/snapshotfs/dir_manifest_builder.go b/snapshot/snapshotfs/dir_manifest_builder.go
index 0f3f22619..5ec2d36da 100644
--- a/snapshot/snapshotfs/dir_manifest_builder.go
+++ b/snapshot/snapshotfs/dir_manifest_builder.go
@@ -41,7 +41,7 @@ func (b *DirManifestBuilder) AddEntry(de *snapshot.DirEntry) {
b.summary.MaxModTime = de.ModTime
}
- // nolint:exhaustive
+ //nolint:exhaustive
switch de.Type {
case snapshot.EntryTypeSymlink:
b.summary.TotalSymlinkCount++
diff --git a/snapshot/snapshotfs/dir_rewriter.go b/snapshot/snapshotfs/dir_rewriter.go
index f7dc8be81..a98176fc8 100644
--- a/snapshot/snapshotfs/dir_rewriter.go
+++ b/snapshot/snapshotfs/dir_rewriter.go
@@ -59,7 +59,7 @@ type DirRewriter struct {
}
type dirRewriterRequest struct {
- ctx context.Context // nolint:containedctx
+ ctx context.Context //nolint:containedctx
parentPath string
input *snapshot.DirEntry
result *snapshot.DirEntry
@@ -92,7 +92,7 @@ func (rw *DirRewriter) getCachedReplacement(ctx context.Context, parentPath stri
// see if we already processed this exact directory entry
if v, ok := rw.cache.Load(key); ok {
- // nolint:forcetypeassert
+ //nolint:forcetypeassert
return v.(*snapshot.DirEntry).Clone(), nil
}
@@ -116,7 +116,7 @@ func (rw *DirRewriter) getCachedReplacement(ctx context.Context, parentPath stri
actual, _ := rw.cache.LoadOrStore(key, result.Clone())
- // nolint:forcetypeassert
+ //nolint:forcetypeassert
return actual.(*snapshot.DirEntry), nil
}
diff --git a/snapshot/snapshotfs/estimate.go b/snapshot/snapshotfs/estimate.go
index 37ca7f430..d77662f58 100644
--- a/snapshot/snapshotfs/estimate.go
+++ b/snapshot/snapshotfs/estimate.go
@@ -114,7 +114,7 @@ type processEntryError struct {
// see if the context got canceled
select {
case <-ctx.Done():
- // nolint:wrapcheck
+ //nolint:wrapcheck
return ctx.Err()
default:
diff --git a/snapshot/snapshotfs/repofs.go b/snapshot/snapshotfs/repofs.go
index 4af1b8ac7..9dde0be9e 100644
--- a/snapshot/snapshotfs/repofs.go
+++ b/snapshot/snapshotfs/repofs.go
@@ -280,7 +280,7 @@ func DirectoryEntry(rep repo.Repository, objectID object.ID, dirSummary *fs.Dire
DirSummary: dirSummary,
})
- return d.(fs.Directory) // nolint:forcetypeassert
+ return d.(fs.Directory) //nolint:forcetypeassert
}
// SnapshotRoot returns fs.Entry representing the root of a snapshot.
diff --git a/snapshot/snapshotfs/source_directories.go b/snapshot/snapshotfs/source_directories.go
index 104b0ea71..fdb0a6ea4 100644
--- a/snapshot/snapshotfs/source_directories.go
+++ b/snapshot/snapshotfs/source_directories.go
@@ -30,7 +30,7 @@ func (s *sourceDirectories) Name() string {
}
func (s *sourceDirectories) Mode() os.FileMode {
- return 0o555 | os.ModeDir // nolint:gomnd
+ return 0o555 | os.ModeDir //nolint:gomnd
}
func (s *sourceDirectories) ModTime() time.Time {
@@ -65,7 +65,7 @@ func (s *sourceDirectories) Close() {
}
func (s *sourceDirectories) Child(ctx context.Context, name string) (fs.Entry, error) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return fs.IterateEntriesAndFindChild(ctx, s, name)
}
diff --git a/snapshot/snapshotfs/source_snapshots.go b/snapshot/snapshotfs/source_snapshots.go
index 43cd39d01..478903f86 100644
--- a/snapshot/snapshotfs/source_snapshots.go
+++ b/snapshot/snapshotfs/source_snapshots.go
@@ -28,7 +28,7 @@ func (s *sourceSnapshots) Name() string {
}
func (s *sourceSnapshots) Mode() os.FileMode {
- return 0o555 | os.ModeDir // nolint:gomnd
+ return 0o555 | os.ModeDir //nolint:gomnd
}
func (s *sourceSnapshots) Size() int64 {
@@ -63,7 +63,7 @@ func (s *sourceSnapshots) Close() {
}
func (s *sourceSnapshots) Child(ctx context.Context, name string) (fs.Entry, error) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return fs.IterateEntriesAndFindChild(ctx, s, name)
}
diff --git a/snapshot/snapshotfs/upload.go b/snapshot/snapshotfs/upload.go
index 5f1331f54..8fdb027a3 100644
--- a/snapshot/snapshotfs/upload.go
+++ b/snapshot/snapshotfs/upload.go
@@ -124,7 +124,6 @@ func (u *Uploader) IsCanceled() bool {
return u.incompleteReason() != ""
}
-//
func (u *Uploader) incompleteReason() string {
if c := atomic.LoadInt32(&u.canceled) != 0; c {
return IncompleteReasonCanceled
@@ -247,7 +246,7 @@ func (u *Uploader) uploadFileData(ctx context.Context, parentCheckpointRegistry
defer writer.Close() //nolint:errcheck
parentCheckpointRegistry.addCheckpointCallback(fname, func() (*snapshot.DirEntry, error) {
- // nolint:govet
+ //nolint:govet
checkpointID, err := writer.Checkpoint()
if err != nil {
return nil, errors.Wrap(err, "checkpoint error")
@@ -396,7 +395,7 @@ func (u *Uploader) copyWithProgress(dst io.Writer, src io.Reader) (int64, error)
}
if writeErr != nil {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return written, writeErr
}
@@ -410,7 +409,7 @@ func (u *Uploader) copyWithProgress(dst io.Writer, src io.Reader) (int64, error)
break
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return written, readErr
}
}
@@ -693,7 +692,7 @@ func findCachedEntry(ctx context.Context, entryRelativePath string, entry fs.Ent
func (u *Uploader) maybeIgnoreCachedEntry(ctx context.Context, ent fs.Entry) fs.Entry {
if h, ok := ent.(object.HasObjectID); ok {
- if 100*rand.Float64() < u.ForceHashPercentage { // nolint:gosec
+ if 100*rand.Float64() < u.ForceHashPercentage { //nolint:gosec
uploadLog(ctx).Debugw("re-hashing cached object", "oid", h.ObjectID())
return nil
}
@@ -768,7 +767,7 @@ type processEntryError struct {
return dirReadError{err}
}
-// nolint:funlen
+//nolint:funlen
func (u *Uploader) processSingle(
ctx context.Context,
entry fs.Entry,
@@ -937,7 +936,7 @@ func maybeLogEntryProcessed(logger logging.Logger, level policy.LogDetail, msg,
keyValuePairs = append(keyValuePairs, "dur", timer.Elapsed())
}
- // nolint:nestif
+ //nolint:nestif
if de != nil {
if level >= minDetailLevelSize {
if ds := de.DirSummary; ds != nil {
diff --git a/snapshot/snapshotfs/upload_actions.go b/snapshot/snapshotfs/upload_actions.go
index 2e2c57971..b2619d3f7 100644
--- a/snapshot/snapshotfs/upload_actions.go
+++ b/snapshot/snapshotfs/upload_actions.go
@@ -112,16 +112,16 @@ func prepareCommandForAction(ctx context.Context, actionType string, h *policy.A
switch {
case runtime.GOOS == "windows":
- c = exec.CommandContext(ctx, os.Getenv("COMSPEC"), "/c", scriptFile) // nolint:gosec
+ c = exec.CommandContext(ctx, os.Getenv("COMSPEC"), "/c", scriptFile) //nolint:gosec
case strings.HasPrefix(h.Script, "#!"):
// on unix if a script starts with #!, it will run under designated interpreter
- c = exec.CommandContext(ctx, scriptFile) // nolint:gosec
+ c = exec.CommandContext(ctx, scriptFile) //nolint:gosec
default:
- c = exec.CommandContext(ctx, "sh", "-e", scriptFile) // nolint:gosec
+ c = exec.CommandContext(ctx, "sh", "-e", scriptFile) //nolint:gosec
}
case h.Command != "":
- c = exec.CommandContext(ctx, h.Command, h.Arguments...) // nolint:gosec
+ c = exec.CommandContext(ctx, h.Command, h.Arguments...) //nolint:gosec
default:
cancel()
@@ -177,7 +177,7 @@ func runActionCommand(
func parseCaptures(v []byte, captures map[string]string) error {
s := bufio.NewScanner(bytes.NewReader(v))
for s.Scan() {
- // nolint:gomnd
+ //nolint:gomnd
l := strings.SplitN(s.Text(), "=", 2)
if len(l) <= 1 {
continue
@@ -189,7 +189,7 @@ func parseCaptures(v []byte, captures map[string]string) error {
}
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return s.Err()
}
diff --git a/snapshot/snapshotfs/upload_test.go b/snapshot/snapshotfs/upload_test.go
index a8832f3c2..e23430dea 100644
--- a/snapshot/snapshotfs/upload_test.go
+++ b/snapshot/snapshotfs/upload_test.go
@@ -133,7 +133,7 @@ func newUploadTestHarness(ctx context.Context, t *testing.T) *uploadTestHarness
return th
}
-// nolint:gocyclo
+//nolint:gocyclo
func TestUpload(t *testing.T) {
ctx := testlogging.Context(t)
th := newUploadTestHarness(ctx, t)
@@ -1088,7 +1088,7 @@ type loggedAction struct {
keysAndValues map[string]interface{}
}
-// nolint:maintidx
+//nolint:maintidx
func TestUploadLogging(t *testing.T) {
sourceDir := mockfs.NewDirectory()
sourceDir.AddFile("f1", []byte{1, 2, 3}, defaultPermissions)
diff --git a/snapshot/snapshotmaintenance/snapshotmaintenance.go b/snapshot/snapshotmaintenance/snapshotmaintenance.go
index bf92a1964..35d1ea136 100644
--- a/snapshot/snapshotmaintenance/snapshotmaintenance.go
+++ b/snapshot/snapshotmaintenance/snapshotmaintenance.go
@@ -13,7 +13,7 @@
// Run runs the complete snapshot and repository maintenance.
func Run(ctx context.Context, dr repo.DirectRepositoryWriter, mode maintenance.Mode, force bool, safety maintenance.SafetyParameters) error {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return maintenance.RunExclusive(ctx, dr, mode, force,
func(ctx context.Context, runParams maintenance.RunParameters) error {
// run snapshot GC before full maintenance
@@ -23,7 +23,7 @@ func(ctx context.Context, runParams maintenance.RunParameters) error {
}
}
- // nolint:wrapcheck
+ //nolint:wrapcheck
return maintenance.Run(ctx, runParams, safety)
})
}
diff --git a/snapshot/snapshotmaintenance/snapshotmaintenance_test.go b/snapshot/snapshotmaintenance/snapshotmaintenance_test.go
index dc54f6eb1..c3b1ca4d3 100644
--- a/snapshot/snapshotmaintenance/snapshotmaintenance_test.go
+++ b/snapshot/snapshotmaintenance/snapshotmaintenance_test.go
@@ -82,13 +82,13 @@ func (s *formatSpecificTestSuite) TestSnapshotGCSimple(t *testing.T) {
// Test maintenance when a directory is deleted and then reused.
// Scenario / events:
-// - create snapshot s1 on a directory d is created
-// - delete s1
-// - let enough time pass so the contents in s1 are eligible for GC mark/deletion
-// - concurrently create a snapshot s2 on directory d while performing full
-// maintenance
-// - Check full maintenance can be run afterwards
-// - Verify contents.
+// - create snapshot s1 on a directory d is created
+// - delete s1
+// - let enough time pass so the contents in s1 are eligible for GC mark/deletion
+// - concurrently create a snapshot s2 on directory d while performing full
+// maintenance
+// - Check full maintenance can be run afterwards
+// - Verify contents.
func (s *formatSpecificTestSuite) TestMaintenanceReuseDirManifest(t *testing.T) {
ctx := testlogging.Context(t)
th := newTestHarness(t, s.formatVersion)
diff --git a/tests/end_to_end_test/api_server_repository_test.go b/tests/end_to_end_test/api_server_repository_test.go
index 32a0eb4d1..53c64c00e 100644
--- a/tests/end_to_end_test/api_server_repository_test.go
+++ b/tests/end_to_end_test/api_server_repository_test.go
@@ -48,7 +48,7 @@ func TestAPIServerRepository_DisableGRPC_htpasswd(t *testing.T) {
testAPIServerRepository(t, []string{"--no-grpc"}, false, false)
}
-// nolint:thelper
+//nolint:thelper
func testAPIServerRepository(t *testing.T, serverStartArgs []string, useGRPC, allowRepositoryUsers bool) {
ctx := testlogging.Context(t)
@@ -266,7 +266,7 @@ func testAPIServerRepository(t *testing.T, serverStartArgs []string, useGRPC, al
Hostname: "bar",
}, nil, "baz")
- // nolint:forbidigo
+ //nolint:forbidigo
if dur := timer.Elapsed(); dur > 15*time.Second {
t.Fatalf("failed connection took %v", dur)
}
diff --git a/tests/end_to_end_test/content_info_test.go b/tests/end_to_end_test/content_info_test.go
index b0b239f2d..15291a37d 100644
--- a/tests/end_to_end_test/content_info_test.go
+++ b/tests/end_to_end_test/content_info_test.go
@@ -14,7 +14,7 @@
"github.com/kopia/kopia/tests/testenv"
)
-// nolint:thelper
+//nolint:thelper
func (s *formatSpecificTestSuite) TestContentListAndStats(t *testing.T) {
runner := testenv.NewInProcRunner(t)
e := testenv.NewCLITest(t, s.formatFlags, runner)
diff --git a/tests/end_to_end_test/restore_fail_test.go b/tests/end_to_end_test/restore_fail_test.go
index 6720dd9d1..da0d745a2 100644
--- a/tests/end_to_end_test/restore_fail_test.go
+++ b/tests/end_to_end_test/restore_fail_test.go
@@ -16,15 +16,16 @@
// TestRestoreFail
// Motivation: Cause a kopia snapshot restore command to fail, ensure non-zero exit code.
// Description:
-// 1. Create kopia repo
-// 2. Create a directory tree for testing
-// 3. Issue kopia blob list before issuing any snapshots
-// 4. Create a snapshot of the source directory, parse the snapshot ID
-// 5. Issue another kopia blob list, find the blob IDs that were not
-// present in the previous blob list.
-// 6. Find a pack blob by searching for a blob ID with the "p" prefix
-// 7. Issue kopia blob delete on the ID of the found pack blob
-// 8. Attempt a snapshot restore on the snapshot, expecting failure
+// 1. Create kopia repo
+// 2. Create a directory tree for testing
+// 3. Issue kopia blob list before issuing any snapshots
+// 4. Create a snapshot of the source directory, parse the snapshot ID
+// 5. Issue another kopia blob list, find the blob IDs that were not
+// present in the previous blob list.
+// 6. Find a pack blob by searching for a blob ID with the "p" prefix
+// 7. Issue kopia blob delete on the ID of the found pack blob
+// 8. Attempt a snapshot restore on the snapshot, expecting failure
+//
// Pass Criteria: Kopia commands issue successfully, except the final restore
// command is expected to fail. Expect to find new blobs after a snapshot
// and expect one of them is a pack blob type prefixed with "p".
diff --git a/tests/end_to_end_test/server_start_test.go b/tests/end_to_end_test/server_start_test.go
index fb5a9e019..74d272fb2 100644
--- a/tests/end_to_end_test/server_start_test.go
+++ b/tests/end_to_end_test/server_start_test.go
@@ -570,7 +570,7 @@ func verifySourceCount(t *testing.T, cli *apiclient.KopiaAPIClient, match *snaps
func verifyUIServedWithCorrectTitle(t *testing.T, cli *apiclient.KopiaAPIClient, sp testutil.ServerParameters) {
t.Helper()
- req, err := http.NewRequestWithContext(context.Background(), "GET", sp.BaseURL, http.NoBody)
+ req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, sp.BaseURL, http.NoBody)
require.NoError(t, err)
req.SetBasicAuth("kopia", sp.Password)
diff --git a/tests/end_to_end_test/shallowrestore_test.go b/tests/end_to_end_test/shallowrestore_test.go
index ad9225a15..60fb6a1fb 100644
--- a/tests/end_to_end_test/shallowrestore_test.go
+++ b/tests/end_to_end_test/shallowrestore_test.go
@@ -308,7 +308,7 @@ func deepenSubtreeDirectory(m *mutatorArgs) {
// 3. Original shouldn't require any changes as the entire tree should
// be there.
-} // nolint:wsl
+} //nolint:wsl
// deepenSubtreeFile reifies a shallow file entry with its actual contents.
func deepenSubtreeFile(m *mutatorArgs) {
@@ -323,7 +323,7 @@ func deepenSubtreeFile(m *mutatorArgs) {
m.e.RunAndExpectSuccess(m.t, "restore", "--shallow=1000", fileinshallow)
// 3. Original shouldn't require any changes.
-} // nolint:wsl
+} //nolint:wsl
// deepenOneSubtreeLevel reifies a shallow directory entry with one level
// of reification. In particular: given a path into a shallow restored
@@ -354,7 +354,7 @@ func deepenOneSubtreeLevel(m *mutatorArgs) {
compareShallowToOriginalDir(m.t, m.rdc, localfs.TrimShallowSuffix(origpath), localfs.TrimShallowSuffix(fileinshallow), 1)
// 3. Original shouldn't require any changes.
-} // nolint:wsl
+} //nolint:wsl
// removeEntry tests that we can remove both directory and file shallow
// placeholders.
@@ -874,6 +874,7 @@ func compareShallowToOriginalDir(t *testing.T, rdc *repoDirEntryCache, original,
// FileInfo info. There are three cases: relpathdepth > depth: the part
// of the original tree not shallow restored; relpathdepth == depth: the
// shallow placeholders; relpathdepth < depth: the fully restored portion.
+//
//nolint:gocyclo,cyclop
func verifyShallowVsOriginalFile(t *testing.T, rdc *repoDirEntryCache, shallow, relpath, opath string, depth int, info os.FileInfo) {
t.Helper()
diff --git a/tests/end_to_end_test/snapshot_create_test.go b/tests/end_to_end_test/snapshot_create_test.go
index 81a24698a..3dbb9042c 100644
--- a/tests/end_to_end_test/snapshot_create_test.go
+++ b/tests/end_to_end_test/snapshot_create_test.go
@@ -222,7 +222,7 @@ func TestSnapshottingCacheDirectory(t *testing.T) {
}
}
-// nolint:maintidx
+//nolint:maintidx
func TestSnapshotCreateWithIgnore(t *testing.T) {
cases := []struct {
desc string
diff --git a/tests/end_to_end_test/snapshot_fail_test.go b/tests/end_to_end_test/snapshot_fail_test.go
index 31e9d1336..993c210e3 100644
--- a/tests/end_to_end_test/snapshot_fail_test.go
+++ b/tests/end_to_end_test/snapshot_fail_test.go
@@ -69,7 +69,7 @@ func cond(c bool, a, b int) int {
return b
}
-// nolint:thelper,cyclop
+//nolint:thelper,cyclop
func testSnapshotFail(t *testing.T, isFailFast bool, snapshotCreateFlags []string, snapshotCreateEnv map[string]string) {
if runtime.GOOS == windowsOSName {
t.Skip("this test does not work on Windows")
@@ -298,7 +298,8 @@ func createSimplestFileTree(t *testing.T, dirDepth, currDepth int, currPath stri
// files and directories (if present). It issues the kopia snapshot command
// against "source" and will test permissions against all entries in "parentDir".
// It returns the number of successful snapshot operations.
-// nolint:thelper
+//
+//nolint:thelper
func testPermissions(t *testing.T, e *testenv.CLITest, source, modifyEntry, restoreDir string, expect map[os.FileMode]expectedSnapshotResult, snapshotCreateFlags []string, snapshotCreateEnv map[string]string) int {
var numSuccessfulSnapshots int
diff --git a/tests/htmlui_e2e_test/htmlui_e2e_test.go b/tests/htmlui_e2e_test/htmlui_e2e_test.go
index f2c70aeed..b8a11b97a 100644
--- a/tests/htmlui_e2e_test/htmlui_e2e_test.go
+++ b/tests/htmlui_e2e_test/htmlui_e2e_test.go
@@ -19,7 +19,7 @@
"github.com/kopia/kopia/tests/testenv"
)
-// nolint:thelper
+//nolint:thelper
func runInBrowser(t *testing.T, run func(ctx context.Context, sp *testutil.ServerParameters, tc *TestContext)) {
if os.Getenv("HTMLUI_E2E_TEST") == "" {
t.Skip()
diff --git a/tests/repository_stress_test/repomodel/content_set.go b/tests/repository_stress_test/repomodel/content_set.go
index a9d12a07a..535a41c76 100644
--- a/tests/repository_stress_test/repomodel/content_set.go
+++ b/tests/repository_stress_test/repomodel/content_set.go
@@ -22,7 +22,7 @@ func (s *ContentSet) PickRandom() content.ID {
return content.EmptyID
}
- // nolint:gosec
+ //nolint:gosec
return s.ids[rand.Intn(len(s.ids))]
}
diff --git a/tests/repository_stress_test/repomodel/manifest_set.go b/tests/repository_stress_test/repomodel/manifest_set.go
index f04ecbb06..def225101 100644
--- a/tests/repository_stress_test/repomodel/manifest_set.go
+++ b/tests/repository_stress_test/repomodel/manifest_set.go
@@ -22,7 +22,7 @@ func (s *ManifestSet) PickRandom() manifest.ID {
return ""
}
- // nolint:gosec
+ //nolint:gosec
return s.ids[rand.Intn(len(s.ids))]
}
diff --git a/tests/repository_stress_test/repository_stress_test.go b/tests/repository_stress_test/repository_stress_test.go
index 20953ce51..c8c2e6257 100644
--- a/tests/repository_stress_test/repository_stress_test.go
+++ b/tests/repository_stress_test/repository_stress_test.go
@@ -185,7 +185,7 @@ func TestStressContentReadHeavy(t *testing.T) {
})
}
-// nolint:thelper
+//nolint:thelper
func runStress(t *testing.T, opt *StressOptions) {
if testing.Short() {
return
diff --git a/tests/robustness/engine/action.go b/tests/robustness/engine/action.go
index 05e4abe6c..384b2894c 100644
--- a/tests/robustness/engine/action.go
+++ b/tests/robustness/engine/action.go
@@ -258,7 +258,7 @@ func restoreIntoDataDirectoryAction(ctx context.Context, e *Engine, opts map[str
if err := e.Checker.RestoreSnapshotToPath(ctx, snapID, e.FileWriter.DataDirectory(ctx), b, opts); err != nil {
log.Print(b.String())
- return nil, err // nolint:wrapcheck
+ return nil, err //nolint:wrapcheck
}
return nil, nil
diff --git a/tests/robustness/fiofilewriter/fio_filewriter.go b/tests/robustness/fiofilewriter/fio_filewriter.go
index abc51fbd9..b57122a6a 100644
--- a/tests/robustness/fiofilewriter/fio_filewriter.go
+++ b/tests/robustness/fiofilewriter/fio_filewriter.go
@@ -73,14 +73,14 @@ func (fw *FileWriter) DataDirectory(ctx context.Context) string {
// WriteRandomFiles writes a number of files at some filesystem depth, based
// on its input options.
//
-// - MaxDirDepthField
-// - MaxFileSizeField
-// - MinFileSizeField
-// - MaxNumFilesPerWriteField
-// - MinNumFilesPerWriteField
-// - MaxDedupePercentField
-// - MinDedupePercentField
-// - DedupePercentStepField
+// - MaxDirDepthField
+// - MaxFileSizeField
+// - MinFileSizeField
+// - MaxNumFilesPerWriteField
+// - MinNumFilesPerWriteField
+// - MaxDedupePercentField
+// - MinDedupePercentField
+// - DedupePercentStepField
//
// Default values are used for missing options. The method
// returns the effective options used along with the selected depth
@@ -164,7 +164,7 @@ func (fw *FileWriter) WriteRandomFiles(ctx context.Context, opts map[string]stri
// DeleteRandomSubdirectory deletes a random directory up to a specified depth,
// based on its input options:
//
-// - MaxDirDepthField
+// - MaxDirDepthField
//
// Default values are used for missing options. The method
// returns the effective options used along with the selected depth
@@ -198,8 +198,8 @@ func (fw *FileWriter) DeleteRandomSubdirectory(ctx context.Context, opts map[str
// DeleteDirectoryContents deletes some of the contents of random directory up to a specified depth,
// based on its input options:
//
-// - MaxDirDepthField
-// - DeletePercentOfContentsField
+// - MaxDirDepthField
+// - DeletePercentOfContentsField
//
// Default values are used for missing options. The method
// returns the effective options used along with the selected depth
diff --git a/tests/robustness/multiclient_test/framework/harness.go b/tests/robustness/multiclient_test/framework/harness.go
index 1b1631d0f..3ca3765a0 100644
--- a/tests/robustness/multiclient_test/framework/harness.go
+++ b/tests/robustness/multiclient_test/framework/harness.go
@@ -174,7 +174,7 @@ func (th *TestHarness) getEngine(ctx context.Context) bool {
SyncRepositories: true,
}
- eng, err := engine.New(args) // nolint:govet
+ eng, err := engine.New(args) //nolint:govet
if err != nil {
log.Println("Error on engine creation:", err)
return false
diff --git a/tests/robustness/multiclient_test/multiclient_test.go b/tests/robustness/multiclient_test/multiclient_test.go
index 41480837a..cf31ad14b 100644
--- a/tests/robustness/multiclient_test/multiclient_test.go
+++ b/tests/robustness/multiclient_test/multiclient_test.go
@@ -153,7 +153,7 @@ func TestRandomizedSmall(t *testing.T) {
err := tryRestoreIntoDataDirectory(ctx, t)
require.NoError(t, err)
- // nolint:forbidigo
+ //nolint:forbidigo
for st.Elapsed() <= *randomizedTestDur {
err := tryRandomAction(ctx, t, opts)
require.NoError(t, err)
diff --git a/tests/robustness/pathlock/path_lock.go b/tests/robustness/pathlock/path_lock.go
index 0a0ad7677..6194e9ab2 100644
--- a/tests/robustness/pathlock/path_lock.go
+++ b/tests/robustness/pathlock/path_lock.go
@@ -13,12 +13,12 @@
// A call to Lock a given path will block any asynchronous calls to Lock
// that same path, or any parent or child path in the same sub-tree.
// For example:
-// - Lock path /a/b/c
-// - Blocks a Lock call for the same path /a/b/c
-// - Blocks a Lock call for path /a/b or /a
-// - Blocks a Lock call for path /a/b/c/d
-// - Allows a Lock call for path /a/b/x
-// - Allows a Lock call for path /a/x
+// - Lock path /a/b/c
+// - Blocks a Lock call for the same path /a/b/c
+// - Blocks a Lock call for path /a/b or /a
+// - Blocks a Lock call for path /a/b/c/d
+// - Allows a Lock call for path /a/b/x
+// - Allows a Lock call for path /a/x
type Locker interface {
Lock(path string) (Unlocker, error)
}
diff --git a/tests/robustness/robustness_test/main_test.go b/tests/robustness/robustness_test/main_test.go
index cfcb29bcb..43cd8989b 100644
--- a/tests/robustness/robustness_test/main_test.go
+++ b/tests/robustness/robustness_test/main_test.go
@@ -198,7 +198,7 @@ func (th *kopiaRobustnessTestHarness) getEngine() bool {
SyncRepositories: true,
}
- eng, err := engine.New(args) // nolint:govet
+ eng, err := engine.New(args) //nolint:govet
if err != nil {
log.Println("Error on engine creation:", err)
return false
diff --git a/tests/robustness/snapmeta/kopia_connector_test.go b/tests/robustness/snapmeta/kopia_connector_test.go
index 544e44f1c..2c5a48e97 100644
--- a/tests/robustness/snapmeta/kopia_connector_test.go
+++ b/tests/robustness/snapmeta/kopia_connector_test.go
@@ -10,7 +10,7 @@
)
func TestKopiaConnector(t *testing.T) {
- assert := assert.New(t) // nolint:gocritic
+ assert := assert.New(t) //nolint:gocritic
t.Setenv("KOPIA_EXE", "kopia.exe")
diff --git a/tests/robustness/snapmeta/kopia_snapshotter.go b/tests/robustness/snapmeta/kopia_snapshotter.go
index d5052615a..5a883ebc8 100644
--- a/tests/robustness/snapmeta/kopia_snapshotter.go
+++ b/tests/robustness/snapmeta/kopia_snapshotter.go
@@ -164,13 +164,13 @@ func (ks *KopiaSnapshotter) ConnectOrCreateFilesystem(path string) error {
// ConnectOrCreateS3WithServer TBD: remove this.
func (ks *KopiaSnapshotter) ConnectOrCreateS3WithServer(serverAddr, bucketName, pathPrefix string) (*exec.Cmd, error) {
- // nolint:nilnil
+ //nolint:nilnil
return nil, nil
}
// ConnectOrCreateFilesystemWithServer TBD: remove this.
func (ks *KopiaSnapshotter) ConnectOrCreateFilesystemWithServer(serverAddr, repoPath string) (*exec.Cmd, error) {
- // nolint:nilnil
+ //nolint:nilnil
return nil, nil
}
diff --git a/tests/stress_test/stress_test.go b/tests/stress_test/stress_test.go
index 7f744d2bb..77bcb5334 100644
--- a/tests/stress_test/stress_test.go
+++ b/tests/stress_test/stress_test.go
@@ -44,7 +44,7 @@ func TestStressBlockManager(t *testing.T) {
stressTestWithStorage(t, memst, duration)
}
-// nolint:thelper
+//nolint:thelper
func stressTestWithStorage(t *testing.T, st blob.Storage, duration time.Duration) {
ctx := testlogging.Context(t)
@@ -80,7 +80,7 @@ func stressTestWithStorage(t *testing.T, st blob.Storage, duration time.Duration
})
}
-// nolint:thelper
+//nolint:thelper
func stressWorker(ctx context.Context, t *testing.T, deadline time.Time, openMgr func() (*content.WriteManager, error), seed int64) {
src := rand.NewSource(seed)
rnd := rand.New(src)
diff --git a/tests/testenv/cli_test_env.go b/tests/testenv/cli_test_env.go
index 0d8442a3e..ef42a8188 100644
--- a/tests/testenv/cli_test_env.go
+++ b/tests/testenv/cli_test_env.go
@@ -231,7 +231,7 @@ func (e *CLITest) Run(t *testing.T, expectedError bool, args ...string) (stdout,
require.NoError(t, gotErr, "unexpected error when running 'kopia %v' (stdout:\n%v\nstderr:\n%v", strings.Join(args, " "), strings.Join(stdout, "\n"), strings.Join(stderr, "\n"))
}
- // nolint:forbidigo
+ //nolint:forbidigo
t.Logf("finished in %v: 'kopia %v'", timer.Elapsed().Milliseconds(), strings.Join(args, " "))
return stdout, stderr, gotErr
diff --git a/tests/testenv/storage_inmemory.go b/tests/testenv/storage_inmemory.go
index bfd5219a1..7772de8c1 100644
--- a/tests/testenv/storage_inmemory.go
+++ b/tests/testenv/storage_inmemory.go
@@ -21,7 +21,7 @@ func (c *storageInMemoryFlags) Setup(_ cli.StorageProviderServices, cmd *kingpin
}
func (c *storageInMemoryFlags) Connect(ctx context.Context, isCreate bool, _ int) (blob.Storage, error) {
- // nolint:wrapcheck
+ //nolint:wrapcheck
return blob.NewStorage(ctx, blob.ConnectionInfo{
Type: repotesting.ReconnectableStorageType,
Config: &c.options,
diff --git a/tests/tools/fio/workload_test.go b/tests/tools/fio/workload_test.go
index 0f1a8f5c9..cf02ffee6 100644
--- a/tests/tools/fio/workload_test.go
+++ b/tests/tools/fio/workload_test.go
@@ -286,7 +286,7 @@ func TestDeleteContentsAtDepth(t *testing.T) {
}
}
-// nolint:thelper
+//nolint:thelper
func testDeleteContentsAtDepth(t *testing.T, prob float32, checker func(t *testing.T, fileCount int)) {
r, err := NewRunner()
require.NoError(t, err)
diff --git a/tests/tools/fswalker/fswalker.go b/tests/tools/fswalker/fswalker.go
index 69e6ce1db..1eed0abe2 100644
--- a/tests/tools/fswalker/fswalker.go
+++ b/tests/tools/fswalker/fswalker.go
@@ -13,7 +13,7 @@
"path/filepath"
"strings"
- // nolint:staticcheck
+ //nolint:staticcheck
"github.com/golang/protobuf/proto"
"github.com/google/fswalker"
fspb "github.com/google/fswalker/proto/fswalker"
diff --git a/tests/tools/fswalker/protofile/protofile.go b/tests/tools/fswalker/protofile/protofile.go
index b6e6fe213..fc688967d 100644
--- a/tests/tools/fswalker/protofile/protofile.go
+++ b/tests/tools/fswalker/protofile/protofile.go
@@ -6,7 +6,7 @@
"bytes"
"os"
- // nolint:staticcheck
+ //nolint:staticcheck
"github.com/golang/protobuf/proto"
"google.golang.org/protobuf/encoding/prototext"
)
diff --git a/tools/gettool/autodownload/autodownload.go b/tools/gettool/autodownload/autodownload.go
index 6616b1d2a..5bd374a62 100644
--- a/tools/gettool/autodownload/autodownload.go
+++ b/tools/gettool/autodownload/autodownload.go
@@ -28,9 +28,9 @@ func createFile(target string, mode os.FileMode, modTime time.Time, src io.Reade
return errors.Wrap(err, "error creating file")
}
- defer os.Chtimes(target, modTime, modTime) // nolint:errcheck
+ defer os.Chtimes(target, modTime, modTime) //nolint:errcheck
- defer f.Close() // nolint:errcheck,gosec
+ defer f.Close() //nolint:errcheck,gosec
if _, err := io.Copy(f, src); err != nil {
return errors.Wrap(err, "error copying contents")
@@ -40,7 +40,7 @@ func createFile(target string, mode os.FileMode, modTime time.Time, src io.Reade
}
func createSymlink(linkPath, linkTarget string) error {
- os.Remove(linkPath) // nolint:errcheck
+ os.Remove(linkPath) //nolint:errcheck
return errors.Wrap(os.Symlink(linkTarget, linkPath), "error creating symlink")
}
@@ -148,7 +148,7 @@ func unzip(dir string, r io.Reader, stripPathComponents int) error {
return errors.Wrapf(ferr, "error creating file %v", f.Name)
}
- fc.Close() // nolint:errcheck
+ fc.Close() //nolint:errcheck
default:
return errors.Errorf("unsupported zip entry %v: %v", f.Name, f.FileInfo().Mode())
@@ -160,7 +160,7 @@ func unzip(dir string, r io.Reader, stripPathComponents int) error {
// Download downloads the provided.
func Download(url, dir string, checksum map[string]string, stripPathComponents int) (err error) {
- resp, err := http.Get(url) // nolint:gosec,noctx
+ resp, err := http.Get(url) //nolint:gosec,noctx
if err != nil {
return errors.Wrapf(err, "unable to get %q", url)
}
diff --git a/tools/gettool/checksums.txt b/tools/gettool/checksums.txt
index 028e83082..76331582b 100644
--- a/tools/gettool/checksums.txt
+++ b/tools/gettool/checksums.txt
@@ -8,12 +8,12 @@ https://github.com/gohugoio/hugo/releases/download/v0.89.2/hugo_extended_0.89.2_
https://github.com/gohugoio/hugo/releases/download/v0.89.2/hugo_extended_0.89.2_macOS-64bit.tar.gz: f9185f6d14eb84d9029d59cdd8a977f2f0be334c4f9d38f2099e56a0c0734731
https://github.com/gohugoio/hugo/releases/download/v0.89.2/hugo_extended_0.89.2_macOS-ARM64.tar.gz: 99a5b4738528d4858a0237199eabc7aee77674c8c7edcfe269efb4b515566cec
https://github.com/gohugoio/hugo/releases/download/v0.89.2/hugo_extended_0.89.2_windows-64bit.zip: 8d79db4f24fbf023c64862c37d09291ac216875dad91e71dd8753cb5883e4274
-https://github.com/golangci/golangci-lint/releases/download/v1.47.0/golangci-lint-1.47.0-darwin-amd64.tar.gz: 9bf19d01df5d274f137a4df1dc1f70d37d70eea613936436fa41eda93ed05bcb
-https://github.com/golangci/golangci-lint/releases/download/v1.47.0/golangci-lint-1.47.0-darwin-arm64.tar.gz: 0eea96192b4e5c771bb9fee0950bc7d88f4c04880e1a7e717a79f7a5eb794ba2
-https://github.com/golangci/golangci-lint/releases/download/v1.47.0/golangci-lint-1.47.0-linux-amd64.tar.gz: b8885fdea31b63e6b016898e8ba92283a55d3b744d2474faba44cf053d0ecdef
-https://github.com/golangci/golangci-lint/releases/download/v1.47.0/golangci-lint-1.47.0-linux-arm64.tar.gz: 49ba83fccaa4e79ca4207dab065434058b238cf7146a783a58a3f6380354ee8b
-https://github.com/golangci/golangci-lint/releases/download/v1.47.0/golangci-lint-1.47.0-linux-armv6.tar.gz: e3e8df724a48f8ec03640abb629f2bb64f66caac133c95083f9644f6aa6631c3
-https://github.com/golangci/golangci-lint/releases/download/v1.47.0/golangci-lint-1.47.0-windows-amd64.zip: d9475be660217fb5f8131a12beb564e4bb650f056c15ff0c54757a5ce8877d66
+https://github.com/golangci/golangci-lint/releases/download/v1.48.0/golangci-lint-1.48.0-darwin-amd64.tar.gz: ec2e1c3bb3d34268cd57baba6b631127beb185bbe8cfde8ac40ba9b4c8615784
+https://github.com/golangci/golangci-lint/releases/download/v1.48.0/golangci-lint-1.48.0-darwin-arm64.tar.gz: ce69d7b94940c197ee3d293cfae7530191c094f76f9aecca97554058b12725ac
+https://github.com/golangci/golangci-lint/releases/download/v1.48.0/golangci-lint-1.48.0-linux-amd64.tar.gz: 127c5c9d47cf3a3cf4128815dea1d9623d57a83a22005e91b986b0cbceb09233
+https://github.com/golangci/golangci-lint/releases/download/v1.48.0/golangci-lint-1.48.0-linux-arm64.tar.gz: b772408fdda4957edfe93526c7654b787695ac345d76cdf2bdc4470995f62a81
+https://github.com/golangci/golangci-lint/releases/download/v1.48.0/golangci-lint-1.48.0-linux-armv6.tar.gz: 65032d81a57660b802485c7043b030f6942c8f0b5f4e3616e79f22533cafa6f7
+https://github.com/golangci/golangci-lint/releases/download/v1.48.0/golangci-lint-1.48.0-windows-amd64.zip: 12cbd4c975a7a0c59f28f2f19324490bdca1d530279637467d918ad811de17d2
https://github.com/goreleaser/goreleaser/releases/download/v0.176.0/goreleaser_Darwin_arm64.tar.gz: 1f95e6561974f4766d8833438b646b06930563ca9867447ea03edb623d876c75
https://github.com/goreleaser/goreleaser/releases/download/v0.176.0/goreleaser_Darwin_x86_64.tar.gz: 17ecad881a50e32f033da5a200c8417d37cae70f09e925645452937998aca506
https://github.com/goreleaser/goreleaser/releases/download/v0.176.0/goreleaser_Linux_arm64.tar.gz: 8bf2a9b9e84498bfa239f2fe91b2d555642c87ab9d3f5d37f29e6e97116910a3
diff --git a/tools/gettool/gettool.go b/tools/gettool/gettool.go
index 96c635087..32547e83f 100644
--- a/tools/gettool/gettool.go
+++ b/tools/gettool/gettool.go
@@ -51,7 +51,7 @@ func (ti ToolInfo) actualURL(version, goos, goarch string) string {
return u
}
-// nolint:gochecknoglobals
+//nolint:gochecknoglobals
var tools = map[string]ToolInfo{
"linter": {
urlTemplate: "https://github.com/golangci/golangci-lint/releases/download/vVERSION/golangci-lint-VERSION-GOOS-GOARCH.EXT",
@@ -122,7 +122,7 @@ func (ti ToolInfo) actualURL(version, goos, goarch string) string {
},
}
-// nolint:gochecknoglobals
+//nolint:gochecknoglobals
var (
tool = flag.String("tool", "", "Name of the tool:version")
outputDir = flag.String("output-dir", "", "Output directory")
@@ -133,7 +133,7 @@ func (ti ToolInfo) actualURL(version, goos, goarch string) string {
regenerateChecksums = flag.Bool("regenerate-checksums", false, "Regenerate checksums")
)
-// nolint:gochecknoglobals
+//nolint:gochecknoglobals
var buildArchitectures = []struct {
goos string
goarch string
@@ -184,7 +184,7 @@ func main() {
for _, toolNameVersion := range strings.Split(*tool, ",") {
parts := strings.Split(toolNameVersion, ":")
- // nolint:gomnd
+ //nolint:gomnd
if len(parts) != 2 {
log.Fatalf("invalid tool spec, must be tool:version[,tool:version]")
}
diff --git a/tools/tools.mk b/tools/tools.mk
index 899ff3491..ecdb00a91 100644
--- a/tools/tools.mk
+++ b/tools/tools.mk
@@ -102,7 +102,7 @@ retry:=
endif
# tool versions
-GOLANGCI_LINT_VERSION=1.47.0
+GOLANGCI_LINT_VERSION=1.48.0
CHECKLOCKS_VERSION=release-20220314.0
NODE_VERSION=16.13.0
HUGO_VERSION=0.89.2