chore(ci): upgraded linter to 1.48.0 (#2294)

Mechanically fixed all issues, added `lint-fix` make target.
This commit is contained in:
Jarek Kowalski
2022-08-08 23:07:54 -07:00
committed by GitHub
parent 419c7acb11
commit 51dcaa985d
305 changed files with 850 additions and 794 deletions

View File

@@ -16,8 +16,8 @@ env:
UNIX_SHELL_ON_WINDOWS: true
# set (to any value other than false) to trigger random unicode filenames testing (logs may be difficult to read)
ENABLE_UNICODE_FILENAMES: ${{ secrets.ENABLE_UNICODE_FILENAMES }}
# set (to any value other than false) to trigger very long filenames testing
ENABLE_LONG_FILENAMES: ${{ secrets.ENABLE_LONG_FILENAMES }}
# disable long filenames since they sometimes get messed up when simulating input keystrokes
ENABLE_LONG_FILENAMES: false
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true

View File

@@ -76,6 +76,13 @@ ifneq ($(GOOS)/$(GOARCH),linux/arm)
endif
endif
lint-fix: $(linter)
ifneq ($(GOOS)/$(GOARCH),linux/arm64)
ifneq ($(GOOS)/$(GOARCH),linux/arm)
$(linter) --deadline $(LINTER_DEADLINE) run --fix $(linter_flags)
endif
endif
lint-and-log: $(linter)
$(linter) --deadline $(LINTER_DEADLINE) run $(linter_flags) | tee .linterr.txt

View File

@@ -30,7 +30,7 @@
var tracer = otel.Tracer("cli")
// nolint:gochecknoglobals
//nolint:gochecknoglobals
var (
defaultColor = color.New()
warningColor = color.New(color.FgYellow)
@@ -164,7 +164,7 @@ type App struct {
stdinReader io.Reader
stdoutWriter io.Writer
stderrWriter io.Writer
rootctx context.Context // nolint:containedctx
rootctx context.Context //nolint:containedctx
loggerFactory logging.LoggerFactory
simulatedCtrlC chan bool
envNamePrefix string
@@ -429,7 +429,7 @@ func assertDirectRepository(act func(ctx context.Context, rep repo.DirectReposit
func (c *App) directRepositoryWriteAction(act func(ctx context.Context, rep repo.DirectRepositoryWriter) error) func(ctx *kingpin.ParseContext) error {
return c.maybeRepositoryAction(assertDirectRepository(func(ctx context.Context, rep repo.DirectRepository) error {
// nolint:wrapcheck
//nolint:wrapcheck
return repo.DirectWriteSession(ctx, rep, repo.WriteSessionOptions{
Purpose: "cli:" + c.currentActionName(),
OnUpload: c.progress.UploadedBytes,
@@ -460,7 +460,7 @@ func (c *App) repositoryReaderAction(act func(ctx context.Context, rep repo.Repo
func (c *App) repositoryWriterAction(act func(ctx context.Context, rep repo.RepositoryWriter) error) func(ctx *kingpin.ParseContext) error {
return c.maybeRepositoryAction(func(ctx context.Context, rep repo.Repository) error {
// nolint:wrapcheck
//nolint:wrapcheck
return repo.WriteSession(ctx, rep, repo.WriteSessionOptions{
Purpose: "cli:" + c.currentActionName(),
OnUpload: c.progress.UploadedBytes,
@@ -575,7 +575,7 @@ func (c *App) maybeRunMaintenance(ctx context.Context, rep repo.Repository) erro
Purpose: "maybeRunMaintenance",
OnUpload: c.progress.UploadedBytes,
}, func(ctx context.Context, w repo.DirectRepositoryWriter) error {
// nolint:wrapcheck
//nolint:wrapcheck
return snapshotmaintenance.Run(ctx, w, maintenance.ModeAuto, false, maintenance.SafetyFull)
})

View File

@@ -27,7 +27,7 @@ func maybeAutoUpgradeRepository(ctx context.Context, r repo.Repository) error {
log(ctx).Debugf("Setting default maintenance parameters...")
// nolint:wrapcheck
//nolint:wrapcheck
return repo.DirectWriteSession(ctx, dr, repo.WriteSessionOptions{
Purpose: "setDefaultMaintenanceParameters",
}, func(ctx context.Context, w repo.DirectRepositoryWriter) error {

View File

@@ -169,7 +169,7 @@ func (p *cliProgress) output(col *color.Color, msg string) {
prefix = ""
}
col.Fprintf(p.out.stderr(), "%v%v", prefix, msg) // nolint:errcheck
col.Fprintf(p.out.stderr(), "%v%v", prefix, msg) //nolint:errcheck
}
if !p.enableProgress {

View File

@@ -28,7 +28,7 @@ func (c *commandACLAdd) run(ctx context.Context, rep repo.RepositoryWriter) erro
r := acl.TargetRule{}
for _, v := range strings.Split(c.target, ",") {
parts := strings.SplitN(v, "=", 2) // nolint:gomnd
parts := strings.SplitN(v, "=", 2) //nolint:gomnd
if len(parts) != 2 { //nolint:gomnd
return errors.Errorf("invalid target labels %q, must be key=value", v)
}

View File

@@ -51,7 +51,7 @@ func (c *commandBenchmarkCompression) readInputFile(ctx context.Context) ([]byte
return nil, errors.Wrap(err, "error opening input file")
}
defer f.Close() // nolint:errcheck,gosec
defer f.Close() //nolint:errcheck,gosec
st, err := f.Stat()
if err != nil {

View File

@@ -70,8 +70,8 @@ func (c *commandBenchmarkCrypto) runBenchmark(ctx context.Context) []cryptoBench
fo := &format.ContentFormat{
Encryption: ea,
Hash: ha,
MasterKey: make([]byte, 32), // nolint:gomnd
HMACSecret: make([]byte, 32), // nolint:gomnd
MasterKey: make([]byte, 32), //nolint:gomnd
HMACSecret: make([]byte, 32), //nolint:gomnd
}
hf, err := hashing.CreateHashFunc(fo)

View File

@@ -69,8 +69,8 @@ func (c *commandBenchmarkEncryption) runBenchmark(ctx context.Context) []cryptoB
enc, err := encryption.CreateEncryptor(&format.ContentFormat{
Encryption: ea,
Hash: hashing.DefaultAlgorithm,
MasterKey: make([]byte, 32), // nolint:gomnd
HMACSecret: make([]byte, 32), // nolint:gomnd
MasterKey: make([]byte, 32), //nolint:gomnd
HMACSecret: make([]byte, 32), //nolint:gomnd
})
if err != nil {
continue

View File

@@ -65,7 +65,7 @@ func (c *commandBenchmarkHashing) runBenchmark(ctx context.Context) []cryptoBenc
for _, ha := range hashing.SupportedAlgorithms() {
hf, err := hashing.CreateHashFunc(&format.ContentFormat{
Hash: ha,
HMACSecret: make([]byte, 32), // nolint:gomnd
HMACSecret: make([]byte, 32), //nolint:gomnd
})
if err != nil {
continue

View File

@@ -39,7 +39,7 @@ func (c *commandBlobList) run(ctx context.Context, rep repo.DirectRepository) er
jl.begin(&c.jo)
defer jl.end()
// nolint:wrapcheck
//nolint:wrapcheck
return rep.BlobReader().ListBlobs(ctx, blob.ID(c.blobListPrefix), func(b blob.Metadata) error {
if !c.shouldInclude(b) {
return nil

View File

@@ -43,7 +43,7 @@ func (c *commandBlobShardsModify) setup(svc appServices, parent commandParent) {
}
func (c *commandBlobShardsModify) getParameters(dotShardsFile string) (*sharded.Parameters, error) {
// nolint:gosec
//nolint:gosec
f, err := os.Open(dotShardsFile)
if err != nil {
return nil, errors.Wrap(err, "unable to open shards file")
@@ -192,7 +192,7 @@ func (c *commandBlobShardsModify) removeEmptyDirs(ctx context.Context, dir strin
isEmpty := true
for _, ent := range entries {
// nolint:nestif
//nolint:nestif
if ent.IsDir() {
childPath := path.Join(dir, ent.Name())
@@ -231,7 +231,7 @@ func (c *commandBlobShardsModify) renameBlobs(ctx context.Context, dir, prefix s
}
for _, ent := range entries {
// nolint:nestif
//nolint:nestif
if ent.IsDir() {
if err := c.renameBlobs(ctx, path.Join(dir, ent.Name()), prefix+ent.Name(), params, numMoved, numUnchanged); err != nil {
return err
@@ -253,7 +253,7 @@ func (c *commandBlobShardsModify) renameBlobs(ctx context.Context, dir, prefix s
if !c.dryRun {
err := os.Rename(srcFile, destFile)
if os.IsNotExist(err) {
// nolint:gomnd
//nolint:gomnd
if err2 := os.MkdirAll(destDir, 0o700); err2 != nil {
return errors.Wrap(err2, "error creating directory")
}

View File

@@ -65,7 +65,7 @@ func(b blob.Metadata) error {
sizeToString := units.BytesStringBase10
if c.raw {
sizeToString = func(l int64) string {
// nolint:gomnd
//nolint:gomnd
return strconv.FormatInt(l, 10)
}
}

View File

@@ -53,7 +53,7 @@ func clearCacheDirectory(ctx context.Context, d string) error {
log(ctx).Infof("Clearing cache directory: %v.", d)
err := retry.WithExponentialBackoffNoValue(ctx, "delete cache", func() error {
// nolint:wrapcheck
//nolint:wrapcheck
return os.RemoveAll(d)
}, retry.Always)
if err != nil {

View File

@@ -98,6 +98,6 @@ func (c *commandCacheSetParams) run(ctx context.Context, rep repo.RepositoryWrit
return errors.Errorf("no changes")
}
// nolint:wrapcheck
//nolint:wrapcheck
return repo.SetCachingOptions(ctx, c.svc.repositoryConfigFileName(), opts)
}

View File

@@ -43,7 +43,7 @@ func (c *commandCacheSync) run(ctx context.Context, rep repo.DirectRepositoryWri
eg.Go(func() error {
defer close(ch)
// nolint:wrapcheck
//nolint:wrapcheck
return rep.BlobReader().ListBlobs(ctx, content.PackBlobIDPrefixSpecial, func(bm blob.Metadata) error {
ch <- bm.BlobID

View File

@@ -48,7 +48,7 @@ func (c *commandContentRewrite) runContentRewriteCommand(ctx context.Context, re
return err
}
// nolint:wrapcheck
//nolint:wrapcheck
return maintenance.RewriteContents(ctx, rep, &maintenance.RewriteContentsOptions{
ContentIDRange: c.contentRange.contentIDRange(),
ContentIDs: contentIDs,

View File

@@ -51,7 +51,7 @@ func (c *commandContentStats) run(ctx context.Context, rep repo.DirectRepository
sizeToString := units.BytesStringBase10
if c.raw {
sizeToString = func(l int64) string {
return strconv.FormatInt(l, 10) // nolint:gomnd
return strconv.FormatInt(l, 10) //nolint:gomnd
}
}
@@ -153,6 +153,6 @@ func(b content.Info) error {
return nil
})
// nolint:wrapcheck
//nolint:wrapcheck
return grandTotal, byCompressionTotal, countMap, totalSizeOfContentsUnder, err
}

View File

@@ -155,7 +155,7 @@ func (c *commandContentVerify) contentVerify(ctx context.Context, r content.Read
return errors.Errorf("content %v out of bounds of its pack blob %v", ci.GetContentID(), ci.GetPackBlobID())
}
// nolint:gosec
//nolint:gosec
if 100*rand.Float64() < downloadPercent {
if _, err := r.GetContent(ctx, ci.GetContentID()); err != nil {
return errors.Wrapf(err, "content %v is invalid", ci.GetContentID())

View File

@@ -102,7 +102,7 @@ func (c *commandIndexInspect) inspectAllBlobs(ctx context.Context, rep repo.Dire
})
}
// nolint:wrapcheck
//nolint:wrapcheck
return eg.Wait()
}

View File

@@ -46,6 +46,6 @@ func (c *commandIndexOptimize) runOptimizeCommand(ctx context.Context, rep repo.
opt.DropDeletedBefore = rep.Time().Add(-age)
}
// nolint:wrapcheck
//nolint:wrapcheck
return rep.ContentManager().CompactIndexes(ctx, opt)
}

View File

@@ -105,7 +105,7 @@ func (c *commandIndexRecover) recoverIndexesFromAllPacks(ctx context.Context, re
go func() {
for _, prefix := range prefixes {
// nolint:errcheck
//nolint:errcheck
rep.BlobStorage().ListBlobs(ctx, prefix, func(bm blob.Metadata) error {
atomic.AddInt32(discoveringBlobCount, 1)
return nil

View File

@@ -73,7 +73,7 @@ func getLogSessions(ctx context.Context, st blob.Reader) ([]*logSessionInfo, err
if err := st.ListBlobs(ctx, content.TextLogBlobPrefix, func(bm blob.Metadata) error {
parts := strings.Split(string(bm.BlobID), "_")
// nolint:gomnd
//nolint:gomnd
if len(parts) < 8 {
log(ctx).Errorf("invalid part count: %v skipping unrecognized log: %v", len(parts), bm.BlobID)
return nil
@@ -81,21 +81,21 @@ func getLogSessions(ctx context.Context, st blob.Reader) ([]*logSessionInfo, err
id := parts[2] + "_" + parts[3]
// nolint:gomnd
//nolint:gomnd
startTime, err := strconv.ParseInt(parts[4], 10, 64)
if err != nil {
log(ctx).Errorf("invalid start time - skipping unrecognized log: %v", bm.BlobID)
// nolint:nilerr
//nolint:nilerr
return nil
}
// nolint:gomnd
//nolint:gomnd
endTime, err := strconv.ParseInt(parts[5], 10, 64)
if err != nil {
log(ctx).Errorf("invalid end time - skipping unrecognized log: %v", bm.BlobID)
// nolint:nilerr
//nolint:nilerr
return nil
}

View File

@@ -57,7 +57,7 @@ func (c *commandList) listDirectory(ctx context.Context, d fs.Directory, prefix,
if err := d.IterateEntries(ctx, func(innerCtx context.Context, e fs.Entry) error {
return c.printDirectoryEntry(innerCtx, e, prefix, indent)
}); err != nil {
return err // nolint:wrapcheck
return err //nolint:wrapcheck
}
if dws, ok := d.(fs.DirectoryWithSummary); ok && c.errorSummary {

View File

@@ -37,6 +37,6 @@ func (c *commandMaintenanceRun) run(ctx context.Context, rep repo.DirectReposito
mode = maintenance.ModeFull
}
// nolint:wrapcheck
//nolint:wrapcheck
return snapshotmaintenance.Run(ctx, rep, mode, c.maintenanceRunForce, c.safety)
}

View File

@@ -76,7 +76,7 @@ func (c *commandMaintenanceSet) setLogCleanupParametersFromFlags(ctx context.Con
if v := c.maxTotalRetainedLogSizeMB; v != -1 {
cl := p.LogRetention.OrDefault()
cl.MaxTotalSize = v << 20 // nolint:gomnd
cl.MaxTotalSize = v << 20 //nolint:gomnd
p.LogRetention = cl
*changed = true

View File

@@ -68,11 +68,11 @@ func (c *commandMount) run(ctx context.Context, rep repo.Repository) error {
}
if c.mountTraceFS {
// nolint:forcetypeassert
//nolint:forcetypeassert
entry = loggingfs.Wrap(entry, log(ctx).Debugf).(fs.Directory)
}
// nolint:forcetypeassert
//nolint:forcetypeassert
entry = cachefs.Wrap(entry, c.newFSCache()).(fs.Directory)
ctrl, mountErr := mount.Directory(ctx, entry, c.mountPoint,

View File

@@ -88,7 +88,7 @@ func (c *commandPolicyEdit) run(ctx context.Context, rep repo.RepositoryWriter)
updated = &policy.Policy{}
d := json.NewDecoder(bytes.NewBufferString(edited))
d.DisallowUnknownFields()
// nolint:wrapcheck
//nolint:wrapcheck
return d.Decode(updated)
}); err != nil {
return errors.Wrap(err, "unable to launch editor")

View File

@@ -44,7 +44,7 @@ func (c *commandPolicySet) setup(svc appServices, parent commandParent) {
cmd.Action(svc.repositoryWriterAction(c.run))
}
// nolint:gochecknoglobals
//nolint:gochecknoglobals
var booleanEnumValues = []string{"true", "false", "inherit"}
const (
@@ -187,7 +187,7 @@ func applyOptionalInt(ctx context.Context, desc string, val **policy.OptionalInt
return nil
}
// nolint:gomnd
//nolint:gomnd
v, err := strconv.ParseInt(str, 10, 32)
if err != nil {
return errors.Wrapf(err, "can't parse the %v %q", desc, str)
@@ -218,14 +218,14 @@ func applyOptionalInt64MiB(ctx context.Context, desc string, val **policy.Option
return nil
}
// nolint:gomnd
//nolint:gomnd
v, err := strconv.ParseInt(str, 10, 32)
if err != nil {
return errors.Wrapf(err, "can't parse the %v %q", desc, str)
}
// convert MiB to bytes
v *= 1 << 20 // nolint:gomnd
v *= 1 << 20 //nolint:gomnd
i := policy.OptionalInt64(v)
*changeCount++
@@ -253,7 +253,7 @@ func applyPolicyNumber64(ctx context.Context, desc string, val *int64, str strin
return nil
}
// nolint:gomnd
//nolint:gomnd
v, err := strconv.ParseInt(str, 10, 64)
if err != nil {
return errors.Wrapf(err, "can't parse the %q %q", desc, str)

View File

@@ -31,14 +31,14 @@ func (c *commandRepositoryConnect) setup(svc advancedAppServices, parent command
cc := cmd.Command(prov.Name, "Connect to repository in "+prov.Description)
f.Setup(svc, cc)
cc.Action(func(kpc *kingpin.ParseContext) error {
// nolint:wrapcheck
//nolint:wrapcheck
return svc.runAppWithContext(kpc.SelectedCommand, func(ctx context.Context) error {
st, err := f.Connect(ctx, false, 0)
if err != nil {
return errors.Wrap(err, "can't connect to storage")
}
// nolint:wrapcheck
//nolint:wrapcheck
return svc.runConnectCommandWithStorage(ctx, &c.co, st)
})
})

View File

@@ -50,7 +50,7 @@ func (c *storageFromConfigFlags) connectToStorageFromConfigFile(ctx context.Cont
return nil, errors.Errorf("connection file does not specify blob storage connection parameters, kopia server connections are not supported")
}
// nolint:wrapcheck
//nolint:wrapcheck
return blob.NewStorage(ctx, *cfg.Storage, false)
}
@@ -64,6 +64,6 @@ func (c *storageFromConfigFlags) connectToStorageFromConfigToken(ctx context.Con
c.sps.setPasswordFromToken(pass)
}
// nolint:wrapcheck
//nolint:wrapcheck
return blob.NewStorage(ctx, ci, false)
}

View File

@@ -61,7 +61,7 @@ func (c *commandRepositoryCreate) setup(svc advancedAppServices, parent commandP
cc := cmd.Command(prov.Name, "Create repository in "+prov.Description)
f.Setup(svc, cc)
cc.Action(func(kpc *kingpin.ParseContext) error {
// nolint:wrapcheck
//nolint:wrapcheck
return svc.runAppWithContext(kpc.SelectedCommand, func(ctx context.Context) error {
st, err := f.Connect(ctx, true, c.createFormatVersion)
if err != nil {
@@ -146,7 +146,7 @@ func (c *commandRepositoryCreate) runCreateCommandWithStorage(ctx context.Contex
return errors.Wrap(err, "error populating repository")
}
noteColor.Fprintf(c.out.stdout(), runValidationNote) // nolint:errcheck
noteColor.Fprintf(c.out.stdout(), runValidationNote) //nolint:errcheck
return nil
}
@@ -158,7 +158,7 @@ func (c *commandRepositoryCreate) populateRepository(ctx context.Context, passwo
}
defer rep.Close(ctx) //nolint:errcheck
// nolint:wrapcheck
//nolint:wrapcheck
return repo.WriteSession(ctx, rep, repo.WriteSessionOptions{
Purpose: "populate repository",
}, func(ctx context.Context, w repo.RepositoryWriter) error {

View File

@@ -30,7 +30,7 @@ func (c *commandRepositoryRepair) setup(svc advancedAppServices, parent commandP
cc := cmd.Command(prov.Name, "Repair repository in "+prov.Description)
f.Setup(svc, cc)
cc.Action(func(kpc *kingpin.ParseContext) error {
// nolint:wrapcheck
//nolint:wrapcheck
return svc.runAppWithContext(kpc.SelectedCommand, func(ctx context.Context) error {
st, err := f.Connect(ctx, false, 0)
if err != nil {

View File

@@ -103,6 +103,6 @@ func (c *commandRepositorySetClient) run(ctx context.Context, rep repo.Repositor
return errors.Errorf("no changes")
}
// nolint:wrapcheck
//nolint:wrapcheck
return repo.SetClientOptions(ctx, c.svc.repositoryConfigFileName(), opt)
}

View File

@@ -75,7 +75,7 @@ func (c *commandRepositorySetParameters) setSizeMBParameter(ctx context.Context,
*dst = v << 20 //nolint:gomnd
*anyChange = true
log(ctx).Infof(" - setting %v to %v.\n", desc, units.BytesStringBase2(int64(v)<<20)) // nolint:gomnd
log(ctx).Infof(" - setting %v to %v.\n", desc, units.BytesStringBase2(int64(v)<<20)) //nolint:gomnd
}
func (c *commandRepositorySetParameters) setInt64SizeMBParameter(ctx context.Context, v int64, desc string, dst *int64, anyChange *bool) {
@@ -86,7 +86,7 @@ func (c *commandRepositorySetParameters) setInt64SizeMBParameter(ctx context.Con
*dst = v << 20 //nolint:gomnd
*anyChange = true
log(ctx).Infof(" - setting %v to %v.\n", desc, units.BytesStringBase2(v<<20)) // nolint:gomnd
log(ctx).Infof(" - setting %v to %v.\n", desc, units.BytesStringBase2(v<<20)) //nolint:gomnd
}
func (c *commandRepositorySetParameters) setIntParameter(ctx context.Context, v int, desc string, dst *int, anyChange *bool) {

View File

@@ -64,8 +64,8 @@ func (c *commandRepositoryStatus) outputJSON(ctx context.Context, r repo.Reposit
s.UniqueIDHex = hex.EncodeToString(dr.UniqueID())
s.ObjectFormat = dr.ObjectFormat()
s.BlobRetention = dr.BlobCfg()
s.Storage = scrubber.ScrubSensitiveData(reflect.ValueOf(ci)).Interface().(blob.ConnectionInfo) // nolint:forcetypeassert
s.ContentFormat = scrubber.ScrubSensitiveData(reflect.ValueOf(dr.ContentReader().ContentFormat().Struct())).Interface().(format.ContentFormat) // nolint:forcetypeassert
s.Storage = scrubber.ScrubSensitiveData(reflect.ValueOf(ci)).Interface().(blob.ConnectionInfo) //nolint:forcetypeassert
s.ContentFormat = scrubber.ScrubSensitiveData(reflect.ValueOf(dr.ContentReader().ContentFormat().Struct())).Interface().(format.ContentFormat) //nolint:forcetypeassert
switch cp, err := dr.BlobVolume().GetCapacity(ctx); {
case err == nil:
@@ -127,7 +127,7 @@ func (c *commandRepositoryStatus) dumpRetentionStatus(dr repo.DirectRepository)
}
}
// nolint: funlen,gocyclo
//nolint:funlen,gocyclo
func (c *commandRepositoryStatus) run(ctx context.Context, rep repo.Repository) error {
if c.jo.jsonOutput {
return c.outputJSON(ctx, rep)

View File

@@ -57,7 +57,7 @@ func (c *commandRepositorySyncTo) setup(svc advancedAppServices, parent commandP
cc := cmd.Command(prov.Name, "Synchronize repository data to another repository in "+prov.Description)
f.Setup(svc, cc)
cc.Action(func(kpc *kingpin.ParseContext) error {
// nolint:wrapcheck
//nolint:wrapcheck
return svc.runAppWithContext(kpc.SelectedCommand, func(ctx context.Context) error {
st, err := f.Connect(ctx, false, 0)
if err != nil {
@@ -69,7 +69,7 @@ func (c *commandRepositorySyncTo) setup(svc advancedAppServices, parent commandP
return errors.Wrap(err, "open repository")
}
defer rep.Close(ctx) // nolint:errcheck
defer rep.Close(ctx) //nolint:errcheck
dr, ok := rep.(repo.DirectRepository)
if !ok {

View File

@@ -42,7 +42,7 @@ type commandRepositoryUpgrade struct {
// own constants so that they do not have to wait for the default clock-drift to
// settle.
//
// nolint:gochecknoglobals
//nolint:gochecknoglobals
var MaxPermittedClockDrift = func() time.Duration { return maxPermittedClockDrift }
func (c *commandRepositoryUpgrade) setup(svc advancedAppServices, parent commandParent) {

View File

@@ -18,6 +18,6 @@ func (c *commandServerFlush) setup(svc appServices, parent commandParent) {
}
func (c *commandServerFlush) run(ctx context.Context, cli *apiclient.KopiaAPIClient) error {
// nolint:wrapcheck
//nolint:wrapcheck
return cli.Post(ctx, "control/flush", &serverapi.Empty{}, &serverapi.Empty{})
}

View File

@@ -18,6 +18,6 @@ func (c *commandServerRefresh) setup(svc appServices, parent commandParent) {
}
func (c *commandServerRefresh) run(ctx context.Context, cli *apiclient.KopiaAPIClient) error {
// nolint:wrapcheck
//nolint:wrapcheck
return cli.Post(ctx, "control/refresh", &serverapi.Empty{}, &serverapi.Empty{})
}

View File

@@ -21,6 +21,6 @@ func (c *commandServerShutdown) setup(svc appServices, parent commandParent) {
}
func (c *commandServerShutdown) run(ctx context.Context, cli *apiclient.KopiaAPIClient) error {
// nolint:wrapcheck
//nolint:wrapcheck
return cli.Post(ctx, "control/shutdown", &serverapi.Empty{}, &serverapi.Empty{})
}

View File

@@ -152,7 +152,7 @@ func (c *commandServerStart) serverStartOptions(ctx context.Context) (*server.Op
func (c *commandServerStart) initRepositoryPossiblyAsync(ctx context.Context, srv *server.Server) error {
initialize := func(ctx context.Context) (repo.Repository, error) {
// nolint:wrapcheck
//nolint:wrapcheck
return c.svc.openRepository(ctx, false)
}
@@ -184,7 +184,7 @@ func (c *commandServerStart) run(ctx context.Context) error {
}
httpServer := &http.Server{
ReadHeaderTimeout: 15 * time.Second, // nolint:gomnd
ReadHeaderTimeout: 15 * time.Second, //nolint:gomnd
Addr: stripProtocol(c.sf.serverAddress),
BaseContext: func(l net.Listener) context.Context {
return ctx

View File

@@ -82,7 +82,7 @@ func (c *commandSnapshotCreate) setup(svc appServices, parent commandParent) {
cmd.Action(svc.repositoryWriterAction(c.run))
}
// nolint:gocyclo
//nolint:gocyclo
func (c *commandSnapshotCreate) run(ctx context.Context, rep repo.RepositoryWriter) error {
sources := c.snapshotCreateSources
@@ -249,7 +249,7 @@ func parseTimestamp(timestamp string) (time.Time, error) {
return time.Time{}, nil
}
// nolint:wrapcheck
//nolint:wrapcheck
return time.Parse(timeFormat, timestamp)
}
@@ -259,7 +259,7 @@ func startTimeAfterEndTime(startTime, endTime time.Time) bool {
startTime.After(endTime)
}
// nolint:gocyclo
//nolint:gocyclo
func (c *commandSnapshotCreate) snapshotSingleSource(ctx context.Context, rep repo.RepositoryWriter, u *snapshotfs.Uploader, sourceInfo snapshot.SourceInfo, tags map[string]string) error {
log(ctx).Infof("Snapshotting %v ...", sourceInfo)

View File

@@ -28,7 +28,7 @@ func (c *commandSnapshotExpire) setup(svc appServices, parent commandParent) {
func (c *commandSnapshotExpire) getSnapshotSourcesToExpire(ctx context.Context, rep repo.Repository) ([]snapshot.SourceInfo, error) {
if c.snapshotExpireAll {
// nolint:wrapcheck
//nolint:wrapcheck
return snapshot.ListSources(ctx, rep)
}

View File

@@ -19,7 +19,7 @@
"github.com/kopia/kopia/tests/testenv"
)
// nolint:maintidx
//nolint:maintidx
func TestSnapshotFix(t *testing.T) {
srcDir1 := testutil.TempDirectory(t)

View File

@@ -239,7 +239,7 @@ type snapshotListRow struct {
func (c *commandSnapshotList) iterateSnapshotsMaybeWithStorageStats(ctx context.Context, rep repo.Repository, manifests []*snapshot.Manifest, callback func(m *snapshot.Manifest) error) error {
if c.storageStats {
// nolint:wrapcheck
//nolint:wrapcheck
return snapshotfs.CalculateStorageStats(ctx, rep, manifests, callback)
}

View File

@@ -50,7 +50,7 @@ func (c *commandSnapshotMigrate) run(ctx context.Context, destRepo repo.Reposito
return errors.Wrap(err, "can't open source repository")
}
defer sourceRepo.Close(ctx) // nolint:errcheck
defer sourceRepo.Close(ctx) //nolint:errcheck
sources, err := c.getSourcesToMigrate(ctx, sourceRepo)
if err != nil {
@@ -323,7 +323,7 @@ func (c *commandSnapshotMigrate) getSourcesToMigrate(ctx context.Context, rep re
}
if c.migrateAll {
// nolint:wrapcheck
//nolint:wrapcheck
return snapshot.ListSources(ctx, rep)
}

View File

@@ -70,7 +70,7 @@ func (c *commandSnapshotVerify) run(ctx context.Context, rep repo.Repository) er
v := snapshotfs.NewVerifier(ctx, rep, opts)
// nolint:wrapcheck
//nolint:wrapcheck
return v.InParallel(ctx, func(tw *snapshotfs.TreeWalker) error {
manifests, err := c.loadSourceManifests(ctx, rep, c.verifyCommandSources)
if err != nil {
@@ -90,7 +90,7 @@ func (c *commandSnapshotVerify) run(ctx context.Context, rep repo.Repository) er
}
// ignore error now, return aggregate error at a higher level.
// nolint:errcheck
//nolint:errcheck
tw.Process(ctx, root, rootPath)
}
@@ -101,7 +101,7 @@ func (c *commandSnapshotVerify) run(ctx context.Context, rep repo.Repository) er
}
// ignore error now, return aggregate error at a higher level.
// nolint:errcheck
//nolint:errcheck
tw.Process(ctx, snapshotfs.DirectoryEntry(rep, oid, nil), oidStr)
}
@@ -112,7 +112,7 @@ func (c *commandSnapshotVerify) run(ctx context.Context, rep repo.Repository) er
}
// ignore error now, return aggregate error at a higher level.
// nolint:errcheck
//nolint:errcheck
tw.Process(ctx, snapshotfs.AutoDetectEntryFromObjectID(ctx, rep, oid, oidStr), oidStr)
}
@@ -144,6 +144,6 @@ func (c *commandSnapshotVerify) loadSourceManifests(ctx context.Context, rep rep
}
}
// nolint:wrapcheck
//nolint:wrapcheck
return snapshot.LoadSnapshots(ctx, rep, manifestIDs)
}

View File

@@ -114,7 +114,7 @@ func resolveSymlink(path string) (string, error) {
return path, nil
}
// nolint:wrapcheck
//nolint:wrapcheck
return filepath.EvalSymlinks(path)
}

View File

@@ -24,7 +24,7 @@
"github.com/kopia/kopia/repo"
)
// nolint:gochecknoglobals
//nolint:gochecknoglobals
var metricsPushFormats = map[string]expfmt.Format{
"text": expfmt.FmtText,
"proto-text": expfmt.FmtProtoText,
@@ -92,7 +92,7 @@ func (c *observabilityFlags) startMetrics(ctx context.Context) error {
log(ctx).Infof("starting prometheus metrics on %v", c.metricsListenAddr)
go http.ListenAndServe(c.metricsListenAddr, m) // nolint:errcheck
go http.ListenAndServe(c.metricsListenAddr, m) //nolint:errcheck
}
if c.metricsPushAddr != "" {

View File

@@ -25,6 +25,6 @@ func (c *storageAzureFlags) Setup(svc StorageProviderServices, cmd *kingpin.CmdC
}
func (c *storageAzureFlags) Connect(ctx context.Context, isCreate bool, formatVersion int) (blob.Storage, error) {
// nolint:wrapcheck
//nolint:wrapcheck
return azure.New(ctx, &c.azOptions)
}

View File

@@ -22,6 +22,6 @@ func (c *storageB2Flags) Setup(svc StorageProviderServices, cmd *kingpin.CmdClau
}
func (c *storageB2Flags) Connect(ctx context.Context, isCreate bool, formatVersion int) (blob.Storage, error) {
// nolint:wrapcheck
//nolint:wrapcheck
return b2.New(ctx, &c.b2options)
}

View File

@@ -50,12 +50,12 @@ func (c *storageFilesystemFlags) Connect(ctx context.Context, isCreate bool, for
}
if v := c.connectOwnerUID; v != "" {
// nolint:gomnd
//nolint:gomnd
fso.FileUID = getIntPtrValue(v, 10)
}
if v := c.connectOwnerGID; v != "" {
// nolint:gomnd
//nolint:gomnd
fso.FileGID = getIntPtrValue(v, 10)
}
@@ -63,7 +63,7 @@ func (c *storageFilesystemFlags) Connect(ctx context.Context, isCreate bool, for
fso.DirectoryMode = getFileModeValue(c.connectDirMode, defaultDirMode)
fso.DirectoryShards = initialDirectoryShards(c.connectFlat, formatVersion)
// nolint:wrapcheck
//nolint:wrapcheck
return filesystem.New(ctx, &fso, isCreate)
}
@@ -82,7 +82,7 @@ func initialDirectoryShards(flat bool, formatVersion int) []int {
}
func getIntPtrValue(value string, base int) *int {
// nolint:gomnd
//nolint:gomnd
if int64Val, err := strconv.ParseInt(value, base, 32); err == nil {
intVal := int(int64Val)
return &intVal
@@ -92,7 +92,7 @@ func getIntPtrValue(value string, base int) *int {
}
func getFileModeValue(value string, def os.FileMode) os.FileMode {
// nolint:gomnd
//nolint:gomnd
if uint32Val, err := strconv.ParseUint(value, 8, 32); err == nil {
return os.FileMode(uint32Val)
}

View File

@@ -39,6 +39,6 @@ func (c *storageGCSFlags) Connect(ctx context.Context, isCreate bool, formatVers
c.options.ServiceAccountCredentialsFile = ""
}
// nolint:wrapcheck
//nolint:wrapcheck
return gcs.New(ctx, &c.options)
}

View File

@@ -38,6 +38,6 @@ func (c *storageGDriveFlags) Connect(ctx context.Context, isCreate bool, formatV
c.options.ServiceAccountCredentialsFile = ""
}
// nolint:wrapcheck
//nolint:wrapcheck
return gdrive.New(ctx, &c.options)
}

View File

@@ -44,6 +44,6 @@ func (c *storageRcloneFlags) Connect(ctx context.Context, isCreate bool, formatV
c.opt.EmbeddedConfig = string(cfg)
}
// nolint:wrapcheck
//nolint:wrapcheck
return rclone.New(ctx, &c.opt, isCreate)
}

View File

@@ -51,6 +51,6 @@ func (c *storageS3Flags) Connect(ctx context.Context, isCreate bool, formatVersi
return nil, errors.New("Cannot specify a 'point-in-time' option when creating a repository")
}
// nolint:wrapcheck
//nolint:wrapcheck
return s3.New(ctx, &c.s3options)
}

View File

@@ -48,7 +48,7 @@ func (c *storageSFTPFlags) Setup(_ StorageProviderServices, cmd *kingpin.CmdClau
func (c *storageSFTPFlags) getOptions(formatVersion int) (*sftp.Options, error) {
sftpo := c.options
// nolint:nestif
//nolint:nestif
if !sftpo.ExternalSSH {
if c.embedCredentials {
if sftpo.KeyData == "" {
@@ -115,6 +115,6 @@ func (c *storageSFTPFlags) Connect(ctx context.Context, isCreate bool, formatVer
return nil, err
}
// nolint:wrapcheck
//nolint:wrapcheck
return sftp.New(ctx, opt, isCreate)
}

View File

@@ -40,6 +40,6 @@ func (c *storageWebDAVFlags) Connect(ctx context.Context, isCreate bool, formatV
wo.DirectoryShards = initialDirectoryShards(c.connectFlat, formatVersion)
// nolint:wrapcheck
//nolint:wrapcheck
return webdav.New(ctx, &wo, isCreate)
}

View File

@@ -45,5 +45,5 @@ func (c *commonThrottleGet) printValueOrUnlimited(label string, v float64, conve
}
func (c *commonThrottleGet) floatToString(v float64) string {
return strconv.FormatFloat(v, 'f', 0, 64) // nolint:gomnd
return strconv.FormatFloat(v, 'f', 0, 64) //nolint:gomnd
}

View File

@@ -79,7 +79,7 @@ func (c *commonThrottleSet) setThrottleFloat64(ctx context.Context, desc string,
return nil
}
// nolint:gomnd
//nolint:gomnd
v, err := strconv.ParseFloat(str, 64)
if err != nil {
return errors.Wrapf(err, "can't parse the %v %q", desc, str)
@@ -114,7 +114,7 @@ func (c *commonThrottleSet) setThrottleInt(ctx context.Context, desc string, val
return nil
}
// nolint:gomnd
//nolint:gomnd
v, err := strconv.ParseInt(str, 10, 64)
if err != nil {
return errors.Wrapf(err, "can't parse the %v %q", desc, str)

View File

@@ -64,7 +64,7 @@ func (c *App) writeUpdateState(us *updateState) error {
}
func (c *App) removeUpdateState() {
os.Remove(c.updateStateFilename()) // nolint:errcheck
os.Remove(c.updateStateFilename()) //nolint:errcheck
}
// getUpdateState reads the update state file if available.
@@ -107,7 +107,7 @@ func getLatestReleaseNameFromGitHub(ctx context.Context) (string, error) {
ctx, cancel := context.WithTimeout(ctx, githubTimeout)
defer cancel()
req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf(latestReleaseGitHubURLFormat, repo.BuildGitHubRepo), http.NoBody)
req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf(latestReleaseGitHubURLFormat, repo.BuildGitHubRepo), http.NoBody)
if err != nil {
return "", errors.Wrap(err, "unable to get latest release from github")
}
@@ -138,7 +138,7 @@ func verifyGitHubReleaseIsComplete(ctx context.Context, releaseName string) erro
ctx, cancel := context.WithTimeout(ctx, githubTimeout)
defer cancel()
req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf(checksumsURLFormat, repo.BuildGitHubRepo, releaseName), http.NoBody)
req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf(checksumsURLFormat, repo.BuildGitHubRepo, releaseName), http.NoBody)
if err != nil {
return errors.Wrap(err, "unable to download releases checksum")
}

View File

@@ -112,7 +112,7 @@ func(innerCtx context.Context) ([]fs.Entry, error) {
return nil
}
return d.IterateEntries(ctx, callback) // nolint:wrapcheck
return d.IterateEntries(ctx, callback) //nolint:wrapcheck
}
func (c *Cache) getEntriesFromCacheLocked(ctx context.Context, id string) []fs.Entry {
@@ -200,7 +200,7 @@ type Options struct {
MaxCachedEntries int
}
// nolint:gochecknoglobals
//nolint:gochecknoglobals
var defaultOptions = &Options{
MaxCachedDirectories: 1000, //nolint:gomnd
MaxCachedEntries: 100000, //nolint:gomnd

View File

@@ -24,7 +24,7 @@ type directory struct {
func (d *directory) Child(ctx context.Context, name string) (fs.Entry, error) {
e, err := d.Directory.Child(ctx, name)
if err != nil {
// nolint:wrapcheck
//nolint:wrapcheck
return nil, err
}
@@ -41,7 +41,7 @@ func(e fs.Entry) fs.Entry {
callback,
)
return err // nolint:wrapcheck
return err //nolint:wrapcheck
}
type file struct {

View File

@@ -85,7 +85,7 @@ func GetAllEntries(ctx context.Context, d Directory) ([]Entry, error) {
return nil
})
return entries, err // nolint:wrapcheck
return entries, err //nolint:wrapcheck
}
// ErrEntryNotFound is returned when an entry is not found.

View File

@@ -140,7 +140,7 @@ func (d *ignoreDirectory) skipCacheDirectory(ctx context.Context, relativePath s
func (d *ignoreDirectory) DirEntryOrNil(ctx context.Context) (*snapshot.DirEntry, error) {
if defp, ok := d.Directory.(snapshot.HasDirEntryOrNil); ok {
// nolint:wrapcheck
//nolint:wrapcheck
return defp.DirEntryOrNil(ctx)
}
// Ignored directories do not have DirEntry objects.
@@ -157,7 +157,7 @@ func (d *ignoreDirectory) IterateEntries(ctx context.Context, callback func(ctx
return err
}
// nolint:wrapcheck
//nolint:wrapcheck
return d.Directory.IterateEntries(ctx, func(ctx context.Context, e fs.Entry) error {
if wrapped, ok := d.maybeWrappedChildEntry(ctx, thisContext, e); ok {
return callback(ctx, wrapped)
@@ -194,7 +194,7 @@ func (d *ignoreDirectory) Child(ctx context.Context, name string) (fs.Entry, err
e, err := d.Directory.Child(ctx, name)
if err != nil {
// nolint:wrapcheck
//nolint:wrapcheck
return nil, err
}

View File

@@ -186,7 +186,7 @@ func (fsd *filesystemDirectory) IterateEntries(ctx context.Context, cb func(cont
return errors.Wrap(err, "error listing directory")
}
// nolint:gocognit,gocyclo
//nolint:gocognit,gocyclo
func (fsd *filesystemDirectory) iterateEntriesInParallel(ctx context.Context, f *os.File, childPrefix string, batch []os.DirEntry, cb func(context.Context, fs.Entry) error) error {
inputCh := make(chan os.DirEntry, dirListingPrefetch)
outputCh := make(chan entryWithError, dirListingPrefetch)
@@ -248,7 +248,7 @@ func (fsd *filesystemDirectory) iterateEntriesInParallel(ctx context.Context, f
nextBatch, err := f.ReadDir(numEntriesToRead)
if err != nil && !errors.Is(err, io.EOF) {
// nolint:wrapcheck
//nolint:wrapcheck
return err
}
@@ -295,7 +295,7 @@ func (fsf *filesystemFile) Open(ctx context.Context) (fs.Reader, error) {
}
func (fsl *filesystemSymlink) Readlink(ctx context.Context) (string, error) {
// nolint:wrapcheck
//nolint:wrapcheck
return os.Readlink(fsl.fullPath())
}

View File

@@ -2,54 +2,54 @@
import "github.com/kopia/kopia/internal/freepool"
// nolint:gochecknoglobals
//nolint:gochecknoglobals
var (
filesystemFilePool = freepool.New(
func() interface{} { return &filesystemFile{} },
func(v interface{}) {
// nolint:forcetypeassert
//nolint:forcetypeassert
*v.(*filesystemFile) = filesystemFile{}
},
)
filesystemDirectoryPool = freepool.New(
func() interface{} { return &filesystemDirectory{} },
func(v interface{}) {
// nolint:forcetypeassert
//nolint:forcetypeassert
*v.(*filesystemDirectory) = filesystemDirectory{}
},
)
filesystemSymlinkPool = freepool.New(
func() interface{} { return &filesystemSymlink{} },
func(v interface{}) {
// nolint:forcetypeassert
//nolint:forcetypeassert
*v.(*filesystemSymlink) = filesystemSymlink{}
},
)
filesystemErrorEntryPool = freepool.New(
func() interface{} { return &filesystemErrorEntry{} },
func(v interface{}) {
// nolint:forcetypeassert
//nolint:forcetypeassert
*v.(*filesystemErrorEntry) = filesystemErrorEntry{}
},
)
shallowFilesystemFilePool = freepool.New(
func() interface{} { return &shallowFilesystemFile{} },
func(v interface{}) {
// nolint:forcetypeassert
//nolint:forcetypeassert
*v.(*shallowFilesystemFile) = shallowFilesystemFile{}
},
)
shallowFilesystemDirectoryPool = freepool.New(
func() interface{} { return &shallowFilesystemDirectory{} },
func(v interface{}) {
// nolint:forcetypeassert
//nolint:forcetypeassert
*v.(*shallowFilesystemDirectory) = shallowFilesystemDirectory{}
},
)
)
func newFilesystemFile(e filesystemEntry) *filesystemFile {
// nolint:forcetypeassert
//nolint:forcetypeassert
fsf := filesystemFilePool.Take().(*filesystemFile)
fsf.filesystemEntry = e
@@ -61,7 +61,7 @@ func (fsf *filesystemFile) Close() {
}
func newFilesystemDirectory(e filesystemEntry) *filesystemDirectory {
// nolint:forcetypeassert
//nolint:forcetypeassert
fsd := filesystemDirectoryPool.Take().(*filesystemDirectory)
fsd.filesystemEntry = e
@@ -73,7 +73,7 @@ func (fsd *filesystemDirectory) Close() {
}
func newFilesystemSymlink(e filesystemEntry) *filesystemSymlink {
// nolint:forcetypeassert
//nolint:forcetypeassert
fsd := filesystemSymlinkPool.Take().(*filesystemSymlink)
fsd.filesystemEntry = e
@@ -85,7 +85,7 @@ func (fsl *filesystemSymlink) Close() {
}
func newFilesystemErrorEntry(e filesystemEntry, err error) *filesystemErrorEntry {
// nolint:forcetypeassert
//nolint:forcetypeassert
fse := filesystemErrorEntryPool.Take().(*filesystemErrorEntry)
fse.filesystemEntry = e
fse.err = err
@@ -98,7 +98,7 @@ func (e *filesystemErrorEntry) Close() {
}
func newShallowFilesystemFile(e filesystemEntry) *shallowFilesystemFile {
// nolint:forcetypeassert
//nolint:forcetypeassert
fsf := shallowFilesystemFilePool.Take().(*shallowFilesystemFile)
fsf.filesystemEntry = e
@@ -110,7 +110,7 @@ func (fsf *shallowFilesystemFile) Close() {
}
func newShallowFilesystemDirectory(e filesystemEntry) *shallowFilesystemDirectory {
// nolint:forcetypeassert
//nolint:forcetypeassert
fsf := shallowFilesystemDirectoryPool.Take().(*shallowFilesystemDirectory)
fsf.filesystemEntry = e

View File

@@ -153,7 +153,7 @@ func TestIterateNonExistent(t *testing.T) {
}), os.ErrNotExist)
}
// nolint:thelper
//nolint:thelper
func testIterate(t *testing.T, nFiles int) {
tmp := testutil.TempDirectory(t)

View File

@@ -26,7 +26,7 @@ func (ld *loggingDirectory) Child(ctx context.Context, name string) (fs.Entry, e
ld.options.printf(ld.options.prefix+"Child(%v) took %v and returned %v", ld.relativePath, dt, err)
if err != nil {
// nolint:wrapcheck
//nolint:wrapcheck
return nil, err
}

View File

@@ -73,7 +73,7 @@ type staticDirectory struct {
// Child gets the named child of a directory.
func (sd *staticDirectory) Child(ctx context.Context, name string) (fs.Entry, error) {
// nolint:wrapcheck
//nolint:wrapcheck
return fs.IterateEntriesAndFindChild(ctx, sd, name)
}

View File

@@ -11,7 +11,8 @@
type AccessLevel int
// accessLevelToString maps supported access levels to strings.
// nolint:gochecknoglobals
//
//nolint:gochecknoglobals
var accessLevelToString = map[AccessLevel]string{
AccessLevelNone: "NONE",
AccessLevelRead: "READ",
@@ -20,7 +21,8 @@
}
// stringToAccessLevel maps strings to supported access levels.
// nolint:gochecknoglobals
//
//nolint:gochecknoglobals
var stringToAccessLevel = map[string]AccessLevel{}
func init() {
@@ -45,7 +47,7 @@ func (a AccessLevel) MarshalJSON() ([]byte, error) {
return nil, errors.Errorf("Invalid access level: %v", a)
}
// nolint:wrapcheck
//nolint:wrapcheck
return json.Marshal(j)
}

View File

@@ -91,7 +91,7 @@ func oneOf(allowed ...string) valueValidatorFunc {
}
}
// nolint:gochecknoglobals
//nolint:gochecknoglobals
var allowedLabelsForType = map[string]map[string]valueValidatorFunc{
ContentManifestType: {},
policy.ManifestType: {

View File

@@ -25,7 +25,7 @@ func matchOrWildcard(rule, actual string) bool {
func userMatches(rule, username, hostname string) bool {
ruleParts := strings.Split(rule, "@")
if len(ruleParts) != 2 { // nolint:gomnd
if len(ruleParts) != 2 { //nolint:gomnd
return false
}

View File

@@ -21,7 +21,8 @@
var log = logging.Module("client")
// CSRFTokenHeader is the name of CSRF token header that must be sent for most API calls.
// nolint:gosec
//
//nolint:gosec
const CSRFTokenHeader = "X-Kopia-Csrf-Token"
// KopiaAPIClient provides helper methods for communicating with Kopia API server.
@@ -224,7 +225,7 @@ type basicAuthTransport struct {
func (t basicAuthTransport) RoundTrip(req *http.Request) (*http.Response, error) {
req.SetBasicAuth(t.username, t.password)
// nolint:wrapcheck
//nolint:wrapcheck
return t.base.RoundTrip(req)
}

View File

@@ -53,6 +53,6 @@ func MaybePrefixLongFilenameOnWindows(fname string) string {
// Write is a wrapper around atomic.WriteFile that handles long file names on Windows.
func Write(filename string, r io.Reader) error {
// nolint:wrapcheck
//nolint:wrapcheck
return atomic.WriteFile(MaybePrefixLongFilenameOnWindows(filename), r)
}

View File

@@ -18,7 +18,8 @@
const defaultACLRefreshFrequency = 10 * time.Second
// ContentRule is a special target rule that targets contents instead of manifests.
// nolint:gochecknoglobals
//
//nolint:gochecknoglobals
var ContentRule = acl.TargetRule{
manifest.TypeLabelKey: acl.ContentManifestType,
}
@@ -27,7 +28,8 @@
const anyUser = "*@*"
// DefaultACLs specifies default ACLs.
// nolint:gochecknoglobals
//
//nolint:gochecknoglobals
var DefaultACLs = []*acl.Entry{
{
// everybody can write contents

View File

@@ -115,7 +115,7 @@ func TestDefaultAuthorizer_DefaultACLs(t *testing.T) {
verifyLegacyAuthorizer(ctx, t, env.Repository, auth.DefaultAuthorizer())
}
// nolint:thelper
//nolint:thelper
func verifyLegacyAuthorizer(ctx context.Context, t *testing.T, rep repo.Repository, authorizer auth.Authorizer) {
cases := []struct {
usernameAtHost string

View File

@@ -32,7 +32,8 @@ type ConcurrentAccessOptions struct {
}
// VerifyConcurrentAccess tests data races on a repository to ensure only clean errors are returned.
// nolint:gocognit,gocyclo,funlen,cyclop
//
//nolint:gocognit,gocyclo,funlen,cyclop
func VerifyConcurrentAccess(t *testing.T, st blob.Storage, options ConcurrentAccessOptions) {
t.Helper()

View File

@@ -17,7 +17,8 @@
)
// VerifyStorage verifies the behavior of the specified storage.
// nolint:gocyclo,thelper
//
//nolint:gocyclo,thelper
func VerifyStorage(ctx context.Context, t *testing.T, r blob.Storage, opts blob.PutOptions) {
blocks := []struct {
blk blob.ID
@@ -198,7 +199,8 @@ func VerifyStorage(ctx context.Context, t *testing.T, r blob.Storage, opts blob.
// AssertConnectionInfoRoundTrips verifies that the ConnectionInfo returned by a given storage can be used to create
// equivalent storage.
// nolint:thelper
//
//nolint:thelper
func AssertConnectionInfoRoundTrips(ctx context.Context, t *testing.T, s blob.Storage) {
ci := s.ConnectionInfo()
@@ -214,7 +216,8 @@ func AssertConnectionInfoRoundTrips(ctx context.Context, t *testing.T, s blob.St
}
// TestValidationOptions is the set of options used when running providing validation from tests.
// nolint:gomnd
//
//nolint:gomnd
var TestValidationOptions = providervalidation.Options{
MaxClockDrift: 3 * time.Minute,
ConcurrencyTestDuration: 15 * time.Second,

View File

@@ -5,7 +5,7 @@
"github.com/prometheus/client_golang/prometheus/promauto"
)
// nolint:gochecknoglobals,promlinter
//nolint:gochecknoglobals,promlinter
var (
metricHitCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "kopia_content_cache_hit_count",

View File

@@ -15,7 +15,7 @@
"github.com/kopia/kopia/repo/blob/sharded"
)
// nolint:gochecknoglobals
//nolint:gochecknoglobals
var mkdirAll = os.MkdirAll // for testability
// DirMode is the directory mode for all caches.
@@ -52,5 +52,5 @@ func NewStorageOrNil(ctx context.Context, cacheDir string, maxBytes int64, subdi
},
}, false)
return fs.(Storage), errors.Wrap(err, "error initializing filesystem cache") // nolint:forcetypeassert
return fs.(Storage), errors.Wrap(err, "error initializing filesystem cache") //nolint:forcetypeassert
}

View File

@@ -80,7 +80,7 @@ func TestGetContentRaceFetchesOnce_MetadataCache(t *testing.T) {
testGetContentRaceFetchesOnce(t, newContentMetadataCache)
}
// nolint:thelper
//nolint:thelper
func testContentCachePrefetchBlocksGetContent(t *testing.T, newCache newContentCacheFunc) {
ctx := testlogging.Context(t)
@@ -148,7 +148,7 @@ func testContentCachePrefetchBlocksGetContent(t *testing.T, newCache newContentC
require.Less(t, getBlobFinishedCnt, getContentFinishedCnt)
}
// nolint:thelper
//nolint:thelper
func testGetContentForDifferentContentIDsExecutesInParallel(t *testing.T, newCache newContentCacheFunc, minGetBlobParallelism int) {
ctx := testlogging.Context(t)
@@ -196,7 +196,7 @@ func testGetContentForDifferentContentIDsExecutesInParallel(t *testing.T, newCac
require.GreaterOrEqual(t, ct.maxConcurrencyLevel, minGetBlobParallelism)
}
// nolint:thelper
//nolint:thelper
func testGetContentForDifferentBlobsExecutesInParallel(t *testing.T, newCache newContentCacheFunc) {
ctx := testlogging.Context(t)
@@ -246,7 +246,7 @@ func testGetContentForDifferentBlobsExecutesInParallel(t *testing.T, newCache ne
require.GreaterOrEqual(t, ct.maxConcurrencyLevel, 2)
}
// nolint:thelper
//nolint:thelper
func testGetContentRaceFetchesOnce(t *testing.T, newCache newContentCacheFunc) {
ctx := testlogging.Context(t)

View File

@@ -15,7 +15,7 @@ type passthroughContentCache struct {
func (c passthroughContentCache) Close(ctx context.Context) {}
func (c passthroughContentCache) GetContent(ctx context.Context, contentID string, blobID blob.ID, offset, length int64, output *gather.WriteBuffer) error {
// nolint:wrapcheck
//nolint:wrapcheck
return c.st.GetBlob(ctx, blobID, offset, length, output)
}

View File

@@ -66,14 +66,14 @@ func (c *PersistentCache) GetFetchingMutex(key string) *sync.RWMutex {
}
if v, ok := c.mutexCache.Get(key); ok {
// nolint:forcetypeassert
//nolint:forcetypeassert
return v.(*sync.RWMutex)
}
newVal := &sync.RWMutex{}
if prevVal, ok, _ := c.mutexCache.PeekOrAdd(key, newVal); ok {
// nolint:forcetypeassert
//nolint:forcetypeassert
return prevVal.(*sync.RWMutex)
}
@@ -236,7 +236,7 @@ func (h contentMetadataHeap) Swap(i, j int) {
}
func (h *contentMetadataHeap) Push(x interface{}) {
*h = append(*h, x.(blob.Metadata)) // nolint:forcetypeassert
*h = append(*h, x.(blob.Metadata)) //nolint:forcetypeassert
}
func (h *contentMetadataHeap) Pop() interface{} {

View File

@@ -24,12 +24,12 @@ type nullStorageProtection struct{
func (nullStorageProtection) Protect(id string, input gather.Bytes, output *gather.WriteBuffer) {
output.Reset()
input.WriteTo(output) // nolint:errcheck
input.WriteTo(output) //nolint:errcheck
}
func (nullStorageProtection) Verify(id string, input gather.Bytes, output *gather.WriteBuffer) error {
output.Reset()
input.WriteTo(output) // nolint:errcheck
input.WriteTo(output) //nolint:errcheck
return nil
}
@@ -50,7 +50,7 @@ func (p checksumProtection) Protect(id string, input gather.Bytes, output *gathe
func (p checksumProtection) Verify(id string, input gather.Bytes, output *gather.WriteBuffer) error {
output.Reset()
// nolint:wrapcheck
//nolint:wrapcheck
return hmac.VerifyAndStrip(input, p.Secret, output)
}

View File

@@ -27,7 +27,7 @@ func TestEncryptionStorageProtection(t *testing.T) {
testStorageProtection(t, e, true)
}
// nolint:thelper
//nolint:thelper
func testStorageProtection(t *testing.T, sp cache.StorageProtection, protectsFromBitFlips bool) {
payload := []byte{0, 1, 2, 3, 4}

View File

@@ -7,5 +7,5 @@
// Now returns current wall clock time.
func Now() time.Time {
return discardMonotonicTime(time.Now()) // nolint:forbidigo
return discardMonotonicTime(time.Now()) //nolint:forbidigo
}

View File

@@ -16,7 +16,7 @@
// Now is overridable function that returns current wall clock time.
var Now = func() time.Time {
return discardMonotonicTime(time.Now()) // nolint:forbidigo
return discardMonotonicTime(time.Now()) //nolint:forbidigo
}
func init() {
@@ -43,7 +43,7 @@ func getTimeFromServer(endpoint string) func() time.Time {
}
var (
nextRefreshRealTime time.Time // nolint:forbidigo
nextRefreshRealTime time.Time //nolint:forbidigo
localTimeOffset time.Duration // offset to be added to time.Now() to produce server time
)
@@ -51,7 +51,7 @@ func getTimeFromServer(endpoint string) func() time.Time {
mu.Lock()
defer mu.Unlock()
localTime := time.Now() // nolint:forbidigo
localTime := time.Now() //nolint:forbidigo
if localTime.After(nextRefreshRealTime) {
resp, err := http.Get(endpoint) //nolint:gosec,noctx
if err != nil {
@@ -67,7 +67,7 @@ func getTimeFromServer(endpoint string) func() time.Time {
log.Fatalf("invalid time received from fake time server: %v", err)
}
nextRefreshRealTime = localTime.Add(timeInfo.ValidFor) // nolint:forbidigo
nextRefreshRealTime = localTime.Add(timeInfo.ValidFor) //nolint:forbidigo
// compute offset such that localTime + localTimeOffset == serverTime
localTimeOffset = timeInfo.Time.Sub(localTime)

View File

@@ -9,11 +9,14 @@
)
// FindFirst looks for a first complete set of blobs IDs following a naming convention:
// '<any>-s<set>-c<count>'
//
// '<any>-s<set>-c<count>'
//
// where:
// 'prefix' is arbitrary string not containing a dash ('-')
// 'set' is a random string shared by all indexes in the same set
// 'count' is a number that specifies how many items must be in the set to make it complete.
//
// 'prefix' is arbitrary string not containing a dash ('-')
// 'set' is a random string shared by all indexes in the same set
// 'count' is a number that specifies how many items must be in the set to make it complete.
//
// The algorithm returns IDs of blobs that form the first complete set.
func FindFirst(bms []blob.Metadata) []blob.Metadata {

View File

@@ -57,7 +57,7 @@ func (r *Reconnector) GetOrOpenConnection(ctx context.Context) (Connection, erro
// UsingConnection invokes the provided callback for a Connection.
func (r *Reconnector) UsingConnection(ctx context.Context, desc string, cb func(cli Connection) (interface{}, error)) (interface{}, error) {
// nolint:wrapcheck
//nolint:wrapcheck
return retry.WithExponentialBackoff(ctx, desc, func() (interface{}, error) {
conn, err := r.GetOrOpenConnection(ctx)
if err != nil {

View File

@@ -7,8 +7,8 @@
type detachedContext struct {
// inherit most methods from context.Background()
context.Context // nolint:containedctx
wrapped context.Context // nolint:containedctx
context.Context //nolint:containedctx
wrapped context.Context //nolint:containedctx
}
// Detach returns a context that inheris provided context's values but not deadline or cancellation.

View File

@@ -37,7 +37,7 @@ func (c *Comparer) Compare(ctx context.Context, e1, e2 fs.Entry) error {
// Close removes all temporary files used by the comparer.
func (c *Comparer) Close() error {
// nolint:wrapcheck
//nolint:wrapcheck
return os.RemoveAll(c.tmpDir)
}
@@ -73,7 +73,7 @@ func (c *Comparer) compareDirectories(ctx context.Context, dir1, dir2 fs.Directo
return c.compareDirectoryEntries(ctx, entries1, entries2, parent)
}
// nolint:gocyclo
//nolint:gocyclo
func (c *Comparer) compareEntry(ctx context.Context, e1, e2 fs.Entry, path string) error {
// see if we have the same object IDs, which implies identical objects, thanks to content-addressable-storage
if h1, ok := e1.(object.HasObjectID); ok {
@@ -267,7 +267,7 @@ func (c *Comparer) compareFiles(ctx context.Context, f1, f2 fs.File, fname strin
args = append(args, c.DiffArguments...)
args = append(args, oldName, newName)
cmd := exec.CommandContext(ctx, c.DiffCommand, args...) // nolint:gosec
cmd := exec.CommandContext(ctx, c.DiffCommand, args...) //nolint:gosec
cmd.Dir = c.tmpDir
cmd.Stdout = c.out
cmd.Stderr = c.out

View File

@@ -30,7 +30,7 @@ func EditLoop(ctx context.Context, fname, initial string, parse func(updated str
tmpFile := filepath.Join(tmpDir, fname)
defer os.RemoveAll(tmpDir) //nolint:errcheck
// nolint:gomnd
//nolint:gomnd
if err := os.WriteFile(tmpFile, []byte(initial), 0o600); err != nil {
return errors.Wrap(err, "unable to write file to edit")
}

View File

@@ -106,7 +106,8 @@ func (p *Parameters) GetEpochDeleteParallelism() int {
}
// Validate validates epoch parameters.
// nolint:gomnd
//
//nolint:gomnd
func (p *Parameters) Validate() error {
if !p.Enabled {
return nil
@@ -140,7 +141,8 @@ func (p *Parameters) Validate() error {
}
// DefaultParameters contains default epoch manager parameters.
// nolint:gomnd
//
//nolint:gomnd
func DefaultParameters() Parameters {
return Parameters{
Enabled: true,
@@ -780,7 +782,7 @@ func (e *Manager) WriteIndex(ctx context.Context, dataShards map[blob.ID]blob.By
}
// make sure we have at least 75% of remaining time
// nolint:gomnd
//nolint:gomnd
cs, err := e.committedState(ctx, 3*p.EpochRefreshFrequency/4)
if err != nil {
return nil, errors.Wrap(err, "error getting committed state")

View File

@@ -71,7 +71,7 @@ func (te *epochManagerTestEnv) compact(ctx context.Context, blobs []blob.ID, pre
"PutBlob error")
}
// write two dummy compaction blobs instead of 3, simulating a compaction that crashed before fully complete.
// write two dummy compaction blobs instead of 3, simulating a compaction that crashed before fully complete.
func (te *epochManagerTestEnv) interruptedCompaction(ctx context.Context, _ []blob.ID, prefix blob.ID) error {
sess := rand.Int63()
@@ -525,7 +525,7 @@ func TestInvalid_Cleanup(t *testing.T) {
require.ErrorIs(t, err, ctx.Err())
}
// nolint:thelper
//nolint:thelper
func verifySequentialWrites(t *testing.T, te *epochManagerTestEnv) {
ctx := testlogging.Context(t)
expected := &fakeIndex{}

View File

@@ -35,7 +35,7 @@ func epochNumberFromBlobID(blobID blob.ID) (int, bool) {
func epochRangeFromBlobID(blobID blob.ID) (min, max int, ok bool) {
parts := strings.Split(string(blobID), "_")
// nolint:gomnd
//nolint:gomnd
if len(parts) < 3 {
return 0, 0, false
}
@@ -84,7 +84,7 @@ func groupByEpochRanges(bms []blob.Metadata) map[int]map[int][]blob.Metadata {
func deletionWatermarkFromBlobID(blobID blob.ID) (time.Time, bool) {
str := strings.TrimPrefix(string(blobID), string(DeletionWatermarkBlobPrefix))
unixSeconds, err := strconv.ParseInt(str, 10, 64) // nolint:gomnd
unixSeconds, err := strconv.ParseInt(str, 10, 64) //nolint:gomnd
if err != nil {
return time.Time{}, false
}

View File

@@ -40,7 +40,7 @@ func Hash(ctx context.Context, e fs.Entry) ([]byte, error) {
return h.Sum(nil), nil
}
// nolint:interfacer
//nolint:interfacer
func write(ctx context.Context, tw *tar.Writer, fullpath string, e fs.Entry) error {
h, err := header(ctx, fullpath, e)
if err != nil {

View File

@@ -9,7 +9,7 @@
"github.com/kopia/kopia/internal/testlogging"
)
// nolint:gocritic
//nolint:gocritic
func TestHash(t *testing.T) {
const expectDifferentHashes = "Expected different hashes, got the same"

View File

@@ -10,7 +10,7 @@
"github.com/pkg/errors"
)
// nolint:gochecknoglobals
//nolint:gochecknoglobals
var invalidSliceBuf = []byte(uuid.NewString())
// Bytes represents a sequence of bytes split into slices.
@@ -198,7 +198,7 @@ func (b Bytes) WriteTo(w io.Writer) (int64, error) {
totalN += int64(n)
if err != nil {
// nolint:wrapcheck
//nolint:wrapcheck
return totalN, err
}
}

Some files were not shown because too many files have changed in this diff Show More