chore(ci): remove exclusion for unused ctx parameters (#4530)

Remove unused-parameter exclusion for `ctx` in revive linter.

---------

Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
Co-authored-by: Matthieu MOREL <matthieu.morel35@gmail.com>
This commit is contained in:
Julio Lopez
2025-04-26 23:11:36 -07:00
committed by GitHub
parent aa40c14e01
commit 8098f49c90
75 changed files with 134 additions and 138 deletions

View File

@@ -132,10 +132,6 @@ linters:
- text: "tracer is a global variable"
linters:
- gochecknoglobals
# always allow ctx even when unused
- text: "unused-parameter: parameter 'ctx' seems to be unused"
linters:
- revive
- text: "Magic number: 1e"
linters:
- mnd

View File

@@ -88,7 +88,7 @@ type appServices interface {
maybeRepositoryAction(act func(ctx context.Context, rep repo.Repository) error, mode repositoryAccessMode) func(ctx *kingpin.ParseContext) error
baseActionWithContext(act func(ctx context.Context) error) func(ctx *kingpin.ParseContext) error
openRepository(ctx context.Context, mustBeConnected bool) (repo.Repository, error)
advancedCommand(ctx context.Context)
advancedCommand()
repositoryConfigFileName() string
getProgress() *cliProgress
getRestoreProgress() RestoreProgress
@@ -658,7 +658,7 @@ func (c *App) maybeRunMaintenance(ctx context.Context, rep repo.Repository) erro
return errors.Wrap(err, "error running maintenance")
}
func (c *App) advancedCommand(ctx context.Context) {
func (c *App) advancedCommand() {
if c.AdvancedCommands != "enabled" {
_, _ = errorColor.Fprintf(c.stderrWriter, `
This command could be dangerous or lead to repository corruption when used improperly.

View File

@@ -24,7 +24,7 @@ func (c *commandBlobDelete) setup(svc appServices, parent commandParent) {
}
func (c *commandBlobDelete) run(ctx context.Context, rep repo.DirectRepositoryWriter) error {
c.svc.advancedCommand(ctx)
c.svc.advancedCommand()
for _, b := range c.blobIDs {
err := rep.BlobStorage().DeleteBlob(ctx, blob.ID(b))

View File

@@ -31,7 +31,7 @@ func (c *commandBlobGC) setup(svc appServices, parent commandParent) {
}
func (c *commandBlobGC) run(ctx context.Context, rep repo.DirectRepositoryWriter) error {
c.svc.advancedCommand(ctx)
c.svc.advancedCommand()
opts := maintenance.DeleteUnreferencedBlobsOptions{
DryRun: c.delete != "yes",

View File

@@ -23,7 +23,7 @@ func (c *commandContentDelete) setup(svc appServices, parent commandParent) {
}
func (c *commandContentDelete) run(ctx context.Context, rep repo.DirectRepositoryWriter) error {
c.svc.advancedCommand(ctx)
c.svc.advancedCommand()
contentIDs, err := toContentIDs(c.ids)
if err != nil {

View File

@@ -41,7 +41,7 @@ func (c *commandContentRewrite) setup(svc appServices, parent commandParent) {
}
func (c *commandContentRewrite) runContentRewriteCommand(ctx context.Context, rep repo.DirectRepositoryWriter) error {
c.svc.advancedCommand(ctx)
c.svc.advancedCommand()
contentIDs, err := toContentIDs(c.contentRewriteIDs)
if err != nil {

View File

@@ -29,7 +29,7 @@ func (c *commandIndexOptimize) setup(svc appServices, parent commandParent) {
}
func (c *commandIndexOptimize) runOptimizeCommand(ctx context.Context, rep repo.DirectRepositoryWriter) error {
c.svc.advancedCommand(ctx)
c.svc.advancedCommand()
contentIDs, err := toContentIDs(c.optimizeDropContents)
if err != nil {

View File

@@ -40,7 +40,7 @@ func (c *commandIndexRecover) setup(svc appServices, parent commandParent) {
}
func (c *commandIndexRecover) run(ctx context.Context, rep repo.DirectRepositoryWriter) error {
c.svc.advancedCommand(ctx)
c.svc.advancedCommand()
var (
processedBlobCount atomic.Int32

View File

@@ -23,7 +23,7 @@ func (c *commandManifestDelete) setup(svc appServices, parent commandParent) {
}
func (c *commandManifestDelete) run(ctx context.Context, rep repo.RepositoryWriter) error {
c.svc.advancedCommand(ctx)
c.svc.advancedCommand()
for _, it := range toManifestIDs(c.manifestRemoveItems) {
if err := rep.DeleteManifest(ctx, it); err != nil {

View File

@@ -19,7 +19,7 @@ func (c *commandRepositoryThrottleGet) setup(svc appServices, parent commandPare
cmd.Action(svc.directRepositoryReadAction(c.run))
}
func (c *commandRepositoryThrottleGet) run(ctx context.Context, rep repo.DirectRepository) error {
func (c *commandRepositoryThrottleGet) run(_ context.Context, rep repo.DirectRepository) error {
limits := rep.Throttler().Limits()
if err := c.ctg.output(&limits); err != nil {

View File

@@ -419,7 +419,7 @@ func (c *commandRestore) run(ctx context.Context, rep repo.Repository) error {
}
restoreProgress := c.getRestoreProgress()
progressCallback := func(ctx context.Context, stats restore.Stats) {
progressCallback := func(_ context.Context, stats restore.Stats) {
restoreProgress.SetCounters(stats)
}

View File

@@ -59,7 +59,7 @@ func (ep *estimateProgress) Error(ctx context.Context, filename string, err erro
}
}
func (ep *estimateProgress) Stats(ctx context.Context, st *snapshot.Stats, included, excluded upload.SampleBuckets, excludedDirs []string, final bool) {
func (ep *estimateProgress) Stats(_ context.Context, st *snapshot.Stats, included, excluded upload.SampleBuckets, excludedDirs []string, final bool) {
_ = final
ep.stats = *st

View File

@@ -30,7 +30,7 @@ func (c *commandServerUserHashPassword) setup(svc appServices, parent commandPar
// connected repository. To avoid a future incompatible change where the
// 'hash-password' command stops working without a connected repository,
// a connected repository is required now.
func (c *commandServerUserHashPassword) runServerUserHashPassword(ctx context.Context, _ repo.RepositoryWriter) error {
func (c *commandServerUserHashPassword) runServerUserHashPassword(_ context.Context, _ repo.RepositoryWriter) error {
if c.password == "" {
// when password hash is empty, ask for password
pwd, err := askConfirmPass(c.out.stdout(), "Enter password to hash: ")

View File

@@ -11,7 +11,7 @@ type staticIterator struct {
func (it *staticIterator) Close() {
}
func (it *staticIterator) Next(ctx context.Context) (Entry, error) {
func (it *staticIterator) Next(_ context.Context) (Entry, error) {
if it.cur < len(it.entries) {
v := it.entries[it.cur]
it.cur++

View File

@@ -103,7 +103,7 @@ func (f *fileWithMetadata) Entry() (fs.Entry, error) {
return newFilesystemFile(newEntry(fi, dirPrefix(f.Name()))), nil
}
func (fsf *filesystemFile) Open(ctx context.Context) (fs.Reader, error) {
func (fsf *filesystemFile) Open(_ context.Context) (fs.Reader, error) {
f, err := os.Open(fsf.fullPath())
if err != nil {
return nil, errors.Wrap(err, "unable to open local file")
@@ -112,12 +112,12 @@ func (fsf *filesystemFile) Open(ctx context.Context) (fs.Reader, error) {
return &fileWithMetadata{f}, nil
}
func (fsl *filesystemSymlink) Readlink(ctx context.Context) (string, error) {
func (fsl *filesystemSymlink) Readlink(_ context.Context) (string, error) {
//nolint:wrapcheck
return os.Readlink(fsl.fullPath())
}
func (fsl *filesystemSymlink) Resolve(ctx context.Context) (fs.Entry, error) {
func (fsl *filesystemSymlink) Resolve(_ context.Context) (fs.Entry, error) {
target, err := filepath.EvalSymlinks(fsl.fullPath())
if err != nil {
return nil, errors.Wrapf(err, "cannot resolve symlink for '%q'", fsl.fullPath())

View File

@@ -22,7 +22,7 @@ type filesystemDirectoryIterator struct {
currentBatch []os.DirEntry
}
func (it *filesystemDirectoryIterator) Next(ctx context.Context) (fs.Entry, error) {
func (it *filesystemDirectoryIterator) Next(_ context.Context) (fs.Entry, error) {
for {
// we're at the end of the current batch, fetch the next batch
if it.currentIndex >= len(it.currentBatch) {
@@ -63,7 +63,7 @@ func (it *filesystemDirectoryIterator) Close() {
it.dirHandle.Close() //nolint:errcheck
}
func (fsd *filesystemDirectory) Iterate(ctx context.Context) (fs.DirectoryIterator, error) {
func (fsd *filesystemDirectory) Iterate(_ context.Context) (fs.DirectoryIterator, error) {
fullPath := fsd.fullPath()
f, direrr := os.Open(fullPath) //nolint:gosec
@@ -76,7 +76,7 @@ func (fsd *filesystemDirectory) Iterate(ctx context.Context) (fs.DirectoryIterat
return &filesystemDirectoryIterator{dirHandle: f, childPrefix: childPrefix}, nil
}
func (fsd *filesystemDirectory) Child(ctx context.Context, name string) (fs.Entry, error) {
func (fsd *filesystemDirectory) Child(_ context.Context, name string) (fs.Entry, error) {
fullPath := fsd.fullPath()
st, err := os.Lstat(filepath.Join(fullPath, name))

View File

@@ -96,21 +96,21 @@ func checkedDirEntryFromPlaceholder(path, php string) (*snapshot.DirEntry, error
return dirEntryFromPlaceholder(php)
}
func (fsf *shallowFilesystemFile) DirEntryOrNil(ctx context.Context) (*snapshot.DirEntry, error) {
func (fsf *shallowFilesystemFile) DirEntryOrNil(_ context.Context) (*snapshot.DirEntry, error) {
path := fsf.fullPath()
php := path + ShallowEntrySuffix
return checkedDirEntryFromPlaceholder(path, php)
}
func (fsd *shallowFilesystemDirectory) DirEntryOrNil(ctx context.Context) (*snapshot.DirEntry, error) {
func (fsd *shallowFilesystemDirectory) DirEntryOrNil(_ context.Context) (*snapshot.DirEntry, error) {
path := fsd.fullPath()
php := filepath.Join(path+ShallowEntrySuffix, ShallowEntrySuffix)
return checkedDirEntryFromPlaceholder(path, php)
}
func (fsf *shallowFilesystemFile) Open(ctx context.Context) (fs.Reader, error) {
func (fsf *shallowFilesystemFile) Open(_ context.Context) (fs.Reader, error) {
// TODO(rjk): Conceivably, we could implement all of these in terms of the repository.
return nil, errors.New("shallowFilesystemFile.Open not supported")
}
@@ -124,7 +124,7 @@ func (fsd *shallowFilesystemDirectory) Child(ctx context.Context, name string) (
return nil, errors.New("shallowFilesystemDirectory.Child not supported")
}
func (fsd *shallowFilesystemDirectory) Iterate(ctx context.Context) (fs.DirectoryIterator, error) {
func (fsd *shallowFilesystemDirectory) Iterate(_ context.Context) (fs.DirectoryIterator, error) {
return nil, errors.New("shallowFilesystemDirectory.IterateEntries not supported")
}

View File

@@ -26,7 +26,7 @@ func TrimShallowSuffix(path string) string {
type PlaceholderFilePath string
// DirEntryOrNil returns the snapshot.DirEntry corresponding to this PlaceholderFilePath.
func (pf PlaceholderFilePath) DirEntryOrNil(ctx context.Context) (*snapshot.DirEntry, error) {
func (pf PlaceholderFilePath) DirEntryOrNil(_ context.Context) (*snapshot.DirEntry, error) {
path := string(pf)
if fi, err := os.Lstat(path); err == nil && fi.IsDir() {
return dirEntryFromPlaceholder(filepath.Join(path, ShallowEntrySuffix))

View File

@@ -78,7 +78,7 @@ func (sd *staticDirectory) Child(ctx context.Context, name string) (fs.Entry, er
return fs.IterateEntriesAndFindChild(ctx, sd, name)
}
func (sd *staticDirectory) Iterate(ctx context.Context) (fs.DirectoryIterator, error) {
func (sd *staticDirectory) Iterate(_ context.Context) (fs.DirectoryIterator, error) {
return fs.StaticIterator(append([]fs.Entry{}, sd.entries...), nil), nil
}
@@ -108,13 +108,13 @@ type streamingDirectory struct {
var errChildNotSupported = errors.New("streamingDirectory.Child not supported")
func (sd *streamingDirectory) Child(ctx context.Context, _ string) (fs.Entry, error) {
func (sd *streamingDirectory) Child(_ context.Context, _ string) (fs.Entry, error) {
return nil, errChildNotSupported
}
var errIteratorAlreadyUsed = errors.New("cannot use streaming directory iterator more than once") // +checklocksignore: mu
func (sd *streamingDirectory) Iterate(ctx context.Context) (fs.DirectoryIterator, error) {
func (sd *streamingDirectory) Iterate(_ context.Context) (fs.DirectoryIterator, error) {
sd.mu.Lock()
defer sd.mu.Unlock()
@@ -158,7 +158,7 @@ type virtualFile struct {
// GetReader returns the streaming file's reader.
// Note: Caller of this function has to ensure concurrency safety.
// The file's reader is set to nil after the first call.
func (vf *virtualFile) GetReader(ctx context.Context) (io.ReadCloser, error) {
func (vf *virtualFile) GetReader(_ context.Context) (io.ReadCloser, error) {
if vf.reader == nil {
return nil, errReaderAlreadyUsed
}

View File

@@ -25,12 +25,12 @@ type singleUserAuthenticator struct {
expectedPasswordBytes []byte
}
func (a *singleUserAuthenticator) IsValid(ctx context.Context, _ repo.Repository, username, password string) bool {
func (a *singleUserAuthenticator) IsValid(_ context.Context, _ repo.Repository, username, password string) bool {
return subtle.ConstantTimeCompare([]byte(username), a.expectedUsernameBytes)*
subtle.ConstantTimeCompare([]byte(password), a.expectedPasswordBytes) == 1
}
func (a *singleUserAuthenticator) Refresh(ctx context.Context) error {
func (a *singleUserAuthenticator) Refresh(_ context.Context) error {
return nil
}
@@ -75,11 +75,11 @@ type htpasswdAuthenticator struct {
f *htpasswd.File
}
func (a htpasswdAuthenticator) IsValid(ctx context.Context, _ repo.Repository, username, password string) bool {
func (a htpasswdAuthenticator) IsValid(_ context.Context, _ repo.Repository, username, password string) bool {
return a.f.Match(username, password)
}
func (a htpasswdAuthenticator) Refresh(ctx context.Context) error {
func (a htpasswdAuthenticator) Refresh(_ context.Context) error {
return errors.Wrap(a.f.Reload(nil), "error reloading password file")
}

View File

@@ -61,7 +61,7 @@ func (ac *repositoryUserAuthenticator) IsValid(ctx context.Context, rep repo.Rep
return valid
}
func (ac *repositoryUserAuthenticator) Refresh(ctx context.Context) error {
func (ac *repositoryUserAuthenticator) Refresh(_ context.Context) error {
ac.mu.Lock()
defer ac.mu.Unlock()

View File

@@ -81,11 +81,11 @@ func (la legacyAuthorizationInfo) ManifestAccessLevel(labels map[string]string)
type legacyAuthorizer struct{}
func (legacyAuthorizer) Authorize(ctx context.Context, _ repo.Repository, username string) AuthorizationInfo {
func (legacyAuthorizer) Authorize(_ context.Context, _ repo.Repository, username string) AuthorizationInfo {
return legacyAuthorizationInfo{usernameAtHostname: username}
}
func (legacyAuthorizer) Refresh(ctx context.Context) error {
func (legacyAuthorizer) Refresh(_ context.Context) error {
return nil
}

View File

@@ -141,7 +141,7 @@ func (ac *aclCache) Authorize(ctx context.Context, rep repo.Repository, username
return aclEntriesAuthorizer{acl.EntriesForUser(ac.aclEntries, u, h), u, h}
}
func (ac *aclCache) Refresh(ctx context.Context) error {
func (ac *aclCache) Refresh(_ context.Context) error {
ac.mu.Lock()
defer ac.mu.Unlock()

View File

@@ -389,7 +389,7 @@ func (m *internalMap) newSegment(ctx context.Context) mmap.MMap {
}
// Close releases all resources associated with a map.
func (m *internalMap) Close(ctx context.Context) {
func (m *internalMap) Close(_ context.Context) {
m.mu.Lock()
defer m.mu.Unlock()

View File

@@ -51,7 +51,7 @@ func (s *Map) PutIfAbsent(ctx context.Context, key, value []byte) bool {
}
// Get gets the element from the map and appends the value to the provided buffer.
func (s *Map) Get(ctx context.Context, output, key []byte) (result []byte, ok bool, err error) {
func (s *Map) Get(_ context.Context, output, key []byte) (result []byte, ok bool, err error) {
if v, ok := s.inner.Get(output, key); ok {
result, err := s.decrypt(key, v)

View File

@@ -12,7 +12,7 @@ type passthroughContentCache struct {
st blob.Storage
}
func (c passthroughContentCache) Close(ctx context.Context) {}
func (c passthroughContentCache) Close(_ context.Context) {}
func (c passthroughContentCache) GetContent(ctx context.Context, contentID string, blobID blob.ID, offset, length int64, output *gather.WriteBuffer) error {
_ = contentID
@@ -21,13 +21,13 @@ func (c passthroughContentCache) GetContent(ctx context.Context, contentID strin
return c.st.GetBlob(ctx, blobID, offset, length, output)
}
func (c passthroughContentCache) PrefetchBlob(ctx context.Context, blobID blob.ID) error {
func (c passthroughContentCache) PrefetchBlob(_ context.Context, blobID blob.ID) error {
_ = blobID
return nil
}
func (c passthroughContentCache) Sync(ctx context.Context, blobPrefix blob.ID) error {
func (c passthroughContentCache) Sync(_ context.Context, blobPrefix blob.ID) error {
_ = blobPrefix
return nil

View File

@@ -211,7 +211,7 @@ func (c *PersistentCache) Put(ctx context.Context, key string, data gather.Bytes
}
// Close closes the instance of persistent cache possibly waiting for at least one sweep to complete.
func (c *PersistentCache) Close(ctx context.Context) {
func (c *PersistentCache) Close(_ context.Context) {
if c == nil {
return
}

View File

@@ -60,7 +60,7 @@ func populateAttributes(a *fuse.Attr, e fs.Entry) {
a.Blocks = (a.Size + fakeBlockSize - 1) / fakeBlockSize
}
func (n *fuseNode) Getattr(ctx context.Context, _ gofusefs.FileHandle, a *fuse.AttrOut) syscall.Errno {
func (n *fuseNode) Getattr(_ context.Context, _ gofusefs.FileHandle, a *fuse.AttrOut) syscall.Errno {
populateAttributes(&a.Attr, n.entry)
a.Ino = n.StableAttr().Ino
@@ -114,7 +114,7 @@ func (f *fuseFileHandle) Read(ctx context.Context, dest []byte, off int64) (fuse
return fuse.ReadResultData(dest[0:n]), gofusefs.OK
}
func (f *fuseFileHandle) Release(ctx context.Context) syscall.Errno {
func (f *fuseFileHandle) Release(_ context.Context) syscall.Errno {
f.mu.Lock()
defer f.mu.Unlock()

View File

@@ -103,7 +103,7 @@ func (r *Registry) Snapshot(reset bool) Snapshot {
}
// Close closes the metrics registry.
func (r *Registry) Close(ctx context.Context) error {
func (r *Registry) Close(_ context.Context) error {
if r == nil {
return nil
}

View File

@@ -53,7 +53,7 @@ type SnapshotValueAggregator[T any] interface {
// CreateTimeSeries computes time series which represent aggregations of a given
// counters or distributions over a set of snapshots.
func CreateTimeSeries[TValue any](
ctx context.Context,
_ context.Context,
snapshots []*Snapshot,
valueHandler SnapshotValueAggregator[TValue],
opts AggregateMetricsOptions,

View File

@@ -315,7 +315,7 @@ func (imd *Directory) SupportsMultipleIterations() bool {
}
// Child gets the named child of a directory.
func (imd *Directory) Child(ctx context.Context, name string) (fs.Entry, error) {
func (imd *Directory) Child(_ context.Context, name string) (fs.Entry, error) {
e := fs.FindByName(imd.children, name)
if e != nil {
return e, nil
@@ -325,7 +325,7 @@ func (imd *Directory) Child(ctx context.Context, name string) (fs.Entry, error)
}
// Iterate returns directory iterator.
func (imd *Directory) Iterate(ctx context.Context) (fs.DirectoryIterator, error) {
func (imd *Directory) Iterate(_ context.Context) (fs.DirectoryIterator, error) {
if imd.readdirError != nil {
return nil, errors.Wrapf(imd.readdirError, "in mockfs Directory.Iterate on directory %s", imd.name)
}
@@ -361,7 +361,7 @@ func (ifr *fileReader) Entry() (fs.Entry, error) {
}
// Open opens the file for reading, optionally simulating error.
func (imf *File) Open(ctx context.Context) (fs.Reader, error) {
func (imf *File) Open(_ context.Context) (fs.Reader, error) {
r, err := imf.source()
if err != nil {
return nil, err
@@ -398,7 +398,7 @@ func (imsl *Symlink) Resolve(ctx context.Context) (fs.Entry, error) {
}
// Readlink implements fs.Symlink interface.
func (imsl *Symlink) Readlink(ctx context.Context) (string, error) {
func (imsl *Symlink) Readlink(_ context.Context) (string, error) {
return imsl.target, nil
}

View File

@@ -89,7 +89,7 @@ func (fc fuseController) MountPath() string {
return fc.mountPoint
}
func (fc fuseController) Unmount(ctx context.Context) error {
func (fc fuseController) Unmount(_ context.Context) error {
if err := fc.fuseConnection.Unmount(); err != nil {
return errors.Wrap(err, "unmount error")
}

View File

@@ -7,7 +7,7 @@
"github.com/pkg/errors"
)
func mountWebDavHelper(ctx context.Context, url, path string) error {
func mountWebDavHelper(_ context.Context, url, path string) error {
mount := exec.Command("/sbin/mount", "-t", "webdav", "-r", url, path)
if err := mount.Run(); err != nil {
return errors.Errorf("webdav mount %q on %q failed: %v", url, path, err)
@@ -16,7 +16,7 @@ func mountWebDavHelper(ctx context.Context, url, path string) error {
return nil
}
func unmountWebDevHelper(ctx context.Context, path string) error {
func unmountWebDevHelper(_ context.Context, path string) error {
unmount := exec.Command("/usr/sbin/diskutil", "unmount", path)
if err := unmount.Run(); err != nil {
return errors.Errorf("unmount %q failed: %v", path, err)

View File

@@ -45,7 +45,7 @@ func (filePasswordStorage) PersistPassword(ctx context.Context, configFile, pass
return os.WriteFile(fn, []byte(base64.StdEncoding.EncodeToString([]byte(password))), passwordFileMode)
}
func (filePasswordStorage) DeletePassword(ctx context.Context, configFile string) error {
func (filePasswordStorage) DeletePassword(_ context.Context, configFile string) error {
err := os.Remove(passwordFileName(configFile))
if err != nil && !os.IsNotExist(err) {
return errors.Wrap(err, "error deleting password file")

View File

@@ -53,7 +53,7 @@ func (w *BlobWriter) EncryptAndWriteBlobAsync(ctx context.Context, prefix blob.I
}
// Wait waits for all the writes to complete.
func (w *BlobWriter) Wait(ctx context.Context) error {
func (w *BlobWriter) Wait(_ context.Context) error {
w.wg.Wait()
return nil
}

View File

@@ -8,7 +8,7 @@
"github.com/kopia/kopia/internal/serverapi"
)
func handleCLIInfo(ctx context.Context, rc requestContext) (interface{}, *apiError) {
func handleCLIInfo(_ context.Context, rc requestContext) (interface{}, *apiError) {
executable, err := os.Executable()
if err != nil {
executable = "kopia"

View File

@@ -24,7 +24,7 @@ type estimateTaskProgress struct {
ctrl uitask.Controller
}
func (p estimateTaskProgress) Processing(ctx context.Context, dirname string) {
func (p estimateTaskProgress) Processing(_ context.Context, dirname string) {
p.ctrl.ReportProgressInfo(dirname)
}

View File

@@ -77,7 +77,7 @@ func handleMountDelete(ctx context.Context, rc requestContext) (interface{}, *ap
return &serverapi.Empty{}, nil
}
func handleMountList(ctx context.Context, rc requestContext) (interface{}, *apiError) {
func handleMountList(_ context.Context, rc requestContext) (interface{}, *apiError) {
res := &serverapi.MountedSnapshots{
Items: []*serverapi.MountedSnapshot{},
}

View File

@@ -10,7 +10,7 @@
"github.com/kopia/kopia/snapshot"
)
func handlePathResolve(ctx context.Context, rc requestContext) (interface{}, *apiError) {
func handlePathResolve(_ context.Context, rc requestContext) (interface{}, *apiError) {
var req serverapi.ResolvePathRequest
if err := json.Unmarshal(rc.body, &req); err != nil {

View File

@@ -26,7 +26,7 @@
const syncConnectWaitTime = 5 * time.Second
func handleRepoStatus(ctx context.Context, rc requestContext) (interface{}, *apiError) {
func handleRepoStatus(_ context.Context, rc requestContext) (interface{}, *apiError) {
if rc.rep == nil {
return &serverapi.StatusResponse{
Connected: false,
@@ -240,7 +240,7 @@ func handleRepoSetDescription(ctx context.Context, rc requestContext) (interface
return handleRepoStatus(ctx, rc)
}
func handleRepoSupportedAlgorithms(ctx context.Context, _ requestContext) (interface{}, *apiError) {
func handleRepoSupportedAlgorithms(_ context.Context, _ requestContext) (interface{}, *apiError) {
res := &serverapi.SupportedAlgorithmsResponse{
DefaultHashAlgorithm: hashing.DefaultAlgorithm,
SupportedHashAlgorithms: toAlgorithmInfo(hashing.SupportedAlgorithms(), neverDeprecated),
@@ -299,7 +299,7 @@ func sortAlgorithms(a []serverapi.AlgorithmInfo) {
})
}
func handleRepoGetThrottle(ctx context.Context, rc requestContext) (interface{}, *apiError) {
func handleRepoGetThrottle(_ context.Context, rc requestContext) (interface{}, *apiError) {
dr, ok := rc.rep.(repo.DirectRepository)
if !ok {
return nil, requestError(serverapi.ErrorStorageConnection, "no direct storage connection")
@@ -308,7 +308,7 @@ func handleRepoGetThrottle(ctx context.Context, rc requestContext) (interface{},
return dr.Throttler().Limits(), nil
}
func handleRepoSetThrottle(ctx context.Context, rc requestContext) (interface{}, *apiError) {
func handleRepoSetThrottle(_ context.Context, rc requestContext) (interface{}, *apiError) {
dr, ok := rc.rep.(repo.DirectRepository)
if !ok {
return nil, requestError(serverapi.ErrorStorageConnection, "no direct storage connection")
@@ -387,7 +387,7 @@ func (s *Server) disconnect(ctx context.Context) error {
return nil
}
func handleRepoSync(ctx context.Context, rc requestContext) (interface{}, *apiError) {
func handleRepoSync(_ context.Context, rc requestContext) (interface{}, *apiError) {
rc.srv.Refresh()
return &serverapi.Empty{}, nil

View File

@@ -95,7 +95,7 @@ func handleRestore(ctx context.Context, rc requestContext) (interface{}, *apiErr
opt := req.Options
opt.ProgressCallback = func(ctx context.Context, s restore.Stats) {
opt.ProgressCallback = func(_ context.Context, s restore.Stats) {
ctrl.ReportCounters(restoreCounters(s))
}

View File

@@ -15,7 +15,7 @@
"github.com/kopia/kopia/snapshot/policy"
)
func handleSourcesList(ctx context.Context, rc requestContext) (interface{}, *apiError) {
func handleSourcesList(_ context.Context, rc requestContext) (interface{}, *apiError) {
_, multiUser := rc.rep.(repo.DirectRepository)
resp := &serverapi.SourcesResponse{

View File

@@ -7,7 +7,7 @@
"github.com/kopia/kopia/internal/uitask"
)
func handleTaskList(ctx context.Context, rc requestContext) (interface{}, *apiError) {
func handleTaskList(_ context.Context, rc requestContext) (interface{}, *apiError) {
tasks := rc.srv.taskManager().ListTasks()
if tasks == nil {
tasks = []uitask.Info{}
@@ -18,7 +18,7 @@ func handleTaskList(ctx context.Context, rc requestContext) (interface{}, *apiEr
}, nil
}
func handleTaskInfo(ctx context.Context, rc requestContext) (interface{}, *apiError) {
func handleTaskInfo(_ context.Context, rc requestContext) (interface{}, *apiError) {
taskID := rc.muxVar("taskID")
t, ok := rc.srv.taskManager().GetTask(taskID)
@@ -29,11 +29,11 @@ func handleTaskInfo(ctx context.Context, rc requestContext) (interface{}, *apiEr
return t, nil
}
func handleTaskSummary(ctx context.Context, rc requestContext) (interface{}, *apiError) {
func handleTaskSummary(_ context.Context, rc requestContext) (interface{}, *apiError) {
return rc.srv.taskManager().TaskSummary(), nil
}
func handleTaskLogs(ctx context.Context, rc requestContext) (interface{}, *apiError) {
func handleTaskLogs(_ context.Context, rc requestContext) (interface{}, *apiError) {
taskID := rc.muxVar("taskID")
return serverapi.TaskLogResponse{
@@ -41,7 +41,7 @@ func handleTaskLogs(ctx context.Context, rc requestContext) (interface{}, *apiEr
}, nil
}
func handleTaskCancel(ctx context.Context, rc requestContext) (interface{}, *apiError) {
func handleTaskCancel(_ context.Context, rc requestContext) (interface{}, *apiError) {
rc.srv.taskManager().CancelTask(rc.muxVar("taskID"))
return &serverapi.Empty{}, nil

View File

@@ -37,7 +37,7 @@ func getUIPreferencesOrEmpty(s serverInterface) (serverapi.UIPreferences, error)
return p, nil
}
func handleGetUIPreferences(ctx context.Context, rc requestContext) (interface{}, *apiError) {
func handleGetUIPreferences(_ context.Context, rc requestContext) (interface{}, *apiError) {
p, err := getUIPreferencesOrEmpty(rc.srv)
if err != nil {
return nil, internalServerError(err)
@@ -46,7 +46,7 @@ func handleGetUIPreferences(ctx context.Context, rc requestContext) (interface{}
return &p, nil
}
func handleSetUIPreferences(ctx context.Context, rc requestContext) (interface{}, *apiError) {
func handleSetUIPreferences(_ context.Context, rc requestContext) (interface{}, *apiError) {
var p serverapi.UIPreferences
// verify the JSON is valid by unmarshaling it

View File

@@ -58,7 +58,7 @@ func (s *Server) validateCSRFToken(r *http.Request) bool {
return false
}
func requireUIUser(ctx context.Context, rc requestContext) bool {
func requireUIUser(_ context.Context, rc requestContext) bool {
if rc.srv.getAuthenticator() == nil {
return true
}
@@ -72,7 +72,7 @@ func requireUIUser(ctx context.Context, rc requestContext) bool {
return user == rc.srv.getOptions().UIUser
}
func requireServerControlUser(ctx context.Context, rc requestContext) bool {
func requireServerControlUser(_ context.Context, rc requestContext) bool {
if rc.srv.getAuthenticator() == nil {
return true
}
@@ -86,11 +86,11 @@ func requireServerControlUser(ctx context.Context, rc requestContext) bool {
return user == rc.srv.getOptions().ServerControlUser
}
func anyAuthenticatedUser(ctx context.Context, _ requestContext) bool {
func anyAuthenticatedUser(_ context.Context, _ requestContext) bool {
return true
}
func handlerWillCheckAuthorization(ctx context.Context, _ requestContext) bool {
func handlerWillCheckAuthorization(_ context.Context, _ requestContext) bool {
return true
}

View File

@@ -172,15 +172,15 @@ type webdavFS struct {
dir fs.Directory
}
func (w *webdavFS) Mkdir(ctx context.Context, path string, _ os.FileMode) error {
func (w *webdavFS) Mkdir(_ context.Context, path string, _ os.FileMode) error {
return errors.Errorf("can't create %q: read-only filesystem", path)
}
func (w *webdavFS) RemoveAll(ctx context.Context, path string) error {
func (w *webdavFS) RemoveAll(_ context.Context, path string) error {
return errors.Errorf("can't remove %q: read-only filesystem", path)
}
func (w *webdavFS) Rename(ctx context.Context, oldPath, newPath string) error {
func (w *webdavFS) Rename(_ context.Context, oldPath, newPath string) error {
return errors.Errorf("can't rename %q to %q: read-only filesystem", oldPath, newPath)
}

View File

@@ -21,7 +21,7 @@ type emailProvider struct {
opt Options
}
func (p *emailProvider) Send(ctx context.Context, msg *sender.Message) error {
func (p *emailProvider) Send(_ context.Context, msg *sender.Message) error {
var auth smtp.Auth
if p.opt.SMTPUsername != "" {

View File

@@ -39,7 +39,7 @@ func MergeOptions(ctx context.Context, src Options, dst *Options, isUpdate bool)
}
// ApplyDefaultsAndValidate applies default values and validates the configuration.
func (o *Options) ApplyDefaultsAndValidate(ctx context.Context) error {
func (o *Options) ApplyDefaultsAndValidate(_ context.Context) error {
if o.SMTPPort == 0 {
o.SMTPPort = defaultSMTPPort
}

View File

@@ -18,7 +18,7 @@ type jsonSender struct {
minSeverity sender.Severity
}
func (p *jsonSender) Send(ctx context.Context, msg *sender.Message) error {
func (p *jsonSender) Send(_ context.Context, msg *sender.Message) error {
if msg.Severity < p.minSeverity {
return nil
}

View File

@@ -18,7 +18,7 @@ type Options struct {
}
// ApplyDefaultsAndValidate applies default values and validates the configuration.
func (o *Options) ApplyDefaultsAndValidate(ctx context.Context) error {
func (o *Options) ApplyDefaultsAndValidate(_ context.Context) error {
if o.AppToken == "" {
return errors.Errorf("App Token must be provided")
}

View File

@@ -18,7 +18,7 @@ type Options struct {
}
// ApplyDefaultsAndValidate applies default values and validates the configuration.
func (o *Options) ApplyDefaultsAndValidate(ctx context.Context) error {
func (o *Options) ApplyDefaultsAndValidate(_ context.Context) error {
if o.Method == "" {
o.Method = "POST"
}

View File

@@ -32,7 +32,7 @@ type b2Storage struct {
bucket *backblaze.Bucket
}
func (s *b2Storage) GetBlob(ctx context.Context, id blob.ID, offset, length int64, output blob.OutputBuffer) error {
func (s *b2Storage) GetBlob(_ context.Context, id blob.ID, offset, length int64, output blob.OutputBuffer) error {
fileName := s.getObjectNameString(id)
if offset < 0 {
@@ -87,7 +87,7 @@ func (s *b2Storage) resolveFileID(fileName string) (string, error) {
return "", nil
}
func (s *b2Storage) GetMetadata(ctx context.Context, id blob.ID) (blob.Metadata, error) {
func (s *b2Storage) GetMetadata(_ context.Context, id blob.ID) (blob.Metadata, error) {
fileName := s.getObjectNameString(id)
fileID, err := s.resolveFileID(fileName)
@@ -145,7 +145,7 @@ func translateError(err error) error {
return err
}
func (s *b2Storage) PutBlob(ctx context.Context, id blob.ID, data blob.Bytes, opts blob.PutOptions) error {
func (s *b2Storage) PutBlob(_ context.Context, id blob.ID, data blob.Bytes, opts blob.PutOptions) error {
switch {
case opts.HasRetentionOptions():
return errors.Wrap(blob.ErrUnsupportedPutBlobOption, "blob-retention")
@@ -175,7 +175,7 @@ func (s *b2Storage) PutBlob(ctx context.Context, id blob.ID, data blob.Bytes, op
return nil
}
func (s *b2Storage) DeleteBlob(ctx context.Context, id blob.ID) error {
func (s *b2Storage) DeleteBlob(_ context.Context, id blob.ID) error {
_, err := s.bucket.HideFile(s.getObjectNameString(id))
err = translateError(err)
@@ -191,7 +191,7 @@ func (s *b2Storage) getObjectNameString(id blob.ID) string {
return s.Prefix + string(id)
}
func (s *b2Storage) ListBlobs(ctx context.Context, prefix blob.ID, callback func(blob.Metadata) error) error {
func (s *b2Storage) ListBlobs(_ context.Context, prefix blob.ID, callback func(blob.Metadata) error) error {
const maxFileQuery = 1000
fullPrefix := s.getObjectNameString(prefix)
@@ -247,7 +247,7 @@ func (s *b2Storage) String() string {
}
// New creates new B2-backed storage with specified options.
func New(ctx context.Context, opt *Options, isCreate bool) (blob.Storage, error) {
func New(_ context.Context, opt *Options, isCreate bool) (blob.Storage, error) {
_ = isCreate
if opt.BucketName == "" {

View File

@@ -234,7 +234,7 @@ func (gcs *gcsStorage) DisplayName() string {
return fmt.Sprintf("GCS: %v", gcs.BucketName)
}
func (gcs *gcsStorage) Close(ctx context.Context) error {
func (gcs *gcsStorage) Close(_ context.Context) error {
return errors.Wrap(gcs.storageClient.Close(), "error closing GCS storage")
}

View File

@@ -334,7 +334,7 @@ func (gdrive *gdriveStorage) DisplayName() string {
return fmt.Sprintf("Google Drive: %v", gdrive.folderID)
}
func (gdrive *gdriveStorage) FlushCaches(ctx context.Context) error {
func (gdrive *gdriveStorage) FlushCaches(_ context.Context) error {
gdrive.fileIDCache.Clear()
return nil
}

View File

@@ -52,7 +52,7 @@ func (t *tokenBucketBasedThrottler) BeforeOperation(ctx context.Context, op stri
}
}
func (t *tokenBucketBasedThrottler) AfterOperation(ctx context.Context, op string) {
func (t *tokenBucketBasedThrottler) AfterOperation(_ context.Context, op string) {
switch op {
case operationListBlobs:
case operationGetBlob, operationGetMetadata:

View File

@@ -68,11 +68,11 @@ func (b *tokenBucket) Take(ctx context.Context, n float64) {
}
}
func (b *tokenBucket) TakeDuration(ctx context.Context, n float64) time.Duration {
func (b *tokenBucket) TakeDuration(_ context.Context, n float64) time.Duration {
return b.sleepDurationBeforeTokenAreAvailable(n, b.now())
}
func (b *tokenBucket) Return(ctx context.Context, n float64) {
func (b *tokenBucket) Return(_ context.Context, n float64) {
b.mu.Lock()
defer b.mu.Unlock()

View File

@@ -45,7 +45,7 @@ type davStorageImpl struct {
cli *gowebdav.Client
}
func (d *davStorageImpl) GetBlobFromPath(ctx context.Context, dirPath, path string, offset, length int64, output blob.OutputBuffer) error {
func (d *davStorageImpl) GetBlobFromPath(_ context.Context, dirPath, path string, offset, length int64, output blob.OutputBuffer) error {
_ = dirPath
output.Reset()
@@ -86,7 +86,7 @@ func (d *davStorageImpl) GetBlobFromPath(ctx context.Context, dirPath, path stri
return blob.EnsureLengthExactly(output.Length(), length)
}
func (d *davStorageImpl) GetMetadataFromPath(ctx context.Context, dirPath, path string) (blob.Metadata, error) {
func (d *davStorageImpl) GetMetadataFromPath(_ context.Context, dirPath, path string) (blob.Metadata, error) {
_ = dirPath
fi, err := d.cli.Stat(path)
@@ -129,7 +129,7 @@ func (d *davStorageImpl) translateError(err error) error {
return err
}
func (d *davStorageImpl) ReadDir(ctx context.Context, dir string) ([]os.FileInfo, error) {
func (d *davStorageImpl) ReadDir(_ context.Context, dir string) ([]os.FileInfo, error) {
entries, err := d.cli.ReadDir(gowebdav.FixSlash(dir))
if err == nil {
return entries, nil
@@ -258,7 +258,7 @@ func isRetriable(err error) bool {
}
// New creates new WebDAV-backed storage in a specified URL.
func New(ctx context.Context, opts *Options, isCreate bool) (blob.Storage, error) {
func New(_ context.Context, opts *Options, isCreate bool) (blob.Storage, error) {
cli := gowebdav.NewClient(opts.URL, opts.Username, opts.Password)
// Since we're handling encrypted data, there's no point compressing it server-side.

View File

@@ -13,7 +13,7 @@
)
// GetCachingOptions reads caching configuration for a given repository.
func GetCachingOptions(ctx context.Context, configFile string) (*content.CachingOptions, error) {
func GetCachingOptions(_ context.Context, configFile string) (*content.CachingOptions, error) {
lc, err := LoadConfigFromFile(configFile)
if err != nil {
return nil, err

View File

@@ -107,7 +107,7 @@ func Disconnect(ctx context.Context, configFile string) error {
}
// SetClientOptions updates client options stored in the provided configuration file.
func SetClientOptions(ctx context.Context, configFile string, cliOpt ClientOptions) error {
func SetClientOptions(_ context.Context, configFile string, cliOpt ClientOptions) error {
lc, err := LoadConfigFromFile(configFile)
if err != nil {
return err

View File

@@ -33,7 +33,7 @@ func (c *diskCommittedContentIndexCache) indexBlobPath(indexBlobID blob.ID) stri
return filepath.Join(c.dirname, string(indexBlobID)+simpleIndexSuffix)
}
func (c *diskCommittedContentIndexCache) openIndex(ctx context.Context, indexBlobID blob.ID) (index.Index, error) {
func (c *diskCommittedContentIndexCache) openIndex(_ context.Context, indexBlobID blob.ID) (index.Index, error) {
fullpath := c.indexBlobPath(indexBlobID)
f, closeMmap, err := c.mmapOpenWithRetry(fullpath)
@@ -96,7 +96,7 @@ func (c *diskCommittedContentIndexCache) mmapOpenWithRetry(path string) (mmap.MM
}, nil
}
func (c *diskCommittedContentIndexCache) hasIndexBlobID(ctx context.Context, indexBlobID blob.ID) (bool, error) {
func (c *diskCommittedContentIndexCache) hasIndexBlobID(_ context.Context, indexBlobID blob.ID) (bool, error) {
_, err := os.Stat(c.indexBlobPath(indexBlobID))
if err == nil {
return true, nil
@@ -165,7 +165,7 @@ func writeTempFileAtomic(dirname string, data []byte) (string, error) {
return tf.Name(), nil
}
func (c *diskCommittedContentIndexCache) expireUnused(ctx context.Context, used []blob.ID) error {
func (c *diskCommittedContentIndexCache) expireUnused(_ context.Context, used []blob.ID) error {
c.log.Debugw("expireUnused",
"except", used,
"minSweepAge", c.minSweepAge)

View File

@@ -20,14 +20,14 @@ type memoryCommittedContentIndexCache struct {
v1PerContentOverhead func() int // +checklocksignore
}
func (m *memoryCommittedContentIndexCache) hasIndexBlobID(ctx context.Context, indexBlobID blob.ID) (bool, error) {
func (m *memoryCommittedContentIndexCache) hasIndexBlobID(_ context.Context, indexBlobID blob.ID) (bool, error) {
m.mu.Lock()
defer m.mu.Unlock()
return m.contents[indexBlobID] != nil, nil
}
func (m *memoryCommittedContentIndexCache) addContentToCache(ctx context.Context, indexBlobID blob.ID, data gather.Bytes) error {
func (m *memoryCommittedContentIndexCache) addContentToCache(_ context.Context, indexBlobID blob.ID, data gather.Bytes) error {
m.mu.Lock()
defer m.mu.Unlock()
@@ -41,7 +41,7 @@ func (m *memoryCommittedContentIndexCache) addContentToCache(ctx context.Context
return nil
}
func (m *memoryCommittedContentIndexCache) openIndex(ctx context.Context, indexBlobID blob.ID) (index.Index, error) {
func (m *memoryCommittedContentIndexCache) openIndex(_ context.Context, indexBlobID blob.ID) (index.Index, error) {
m.mu.Lock()
defer m.mu.Unlock()
@@ -53,7 +53,7 @@ func (m *memoryCommittedContentIndexCache) openIndex(ctx context.Context, indexB
return v, nil
}
func (m *memoryCommittedContentIndexCache) expireUnused(ctx context.Context, used []blob.ID) error {
func (m *memoryCommittedContentIndexCache) expireUnused(_ context.Context, used []blob.ID) error {
m.mu.Lock()
defer m.mu.Unlock()

View File

@@ -987,7 +987,7 @@ type SessionOptions struct {
}
// NewWriteManager returns a session write manager.
func NewWriteManager(ctx context.Context, sm *SharedManager, options SessionOptions, writeManagerID string) *WriteManager {
func NewWriteManager(_ context.Context, sm *SharedManager, options SessionOptions, writeManagerID string) *WriteManager {
if options.OnUpload == nil {
options.OnUpload = func(int64) {}
}

View File

@@ -46,7 +46,7 @@ func (f *ContentFormat) ResolveFormatVersion() error {
}
// GetMutableParameters implements FormattingOptionsProvider.
func (f *ContentFormat) GetMutableParameters(ctx context.Context) (MutableParameters, error) {
func (f *ContentFormat) GetMutableParameters(_ context.Context) (MutableParameters, error) {
return f.MutableParameters, nil
}

View File

@@ -56,7 +56,7 @@ type inMemoryCache struct {
times map[blob.ID]time.Time
}
func (c *inMemoryCache) Get(ctx context.Context, blobID blob.ID) ([]byte, time.Time, bool) {
func (c *inMemoryCache) Get(_ context.Context, blobID blob.ID) ([]byte, time.Time, bool) {
c.mu.Lock()
defer c.mu.Unlock()
@@ -68,7 +68,7 @@ func (c *inMemoryCache) Get(ctx context.Context, blobID blob.ID) ([]byte, time.T
return nil, time.Time{}, false
}
func (c *inMemoryCache) Put(ctx context.Context, blobID blob.ID, data []byte) (time.Time, error) {
func (c *inMemoryCache) Put(_ context.Context, blobID blob.ID, data []byte) (time.Time, error) {
c.mu.Lock()
defer c.mu.Unlock()
@@ -78,7 +78,7 @@ func (c *inMemoryCache) Put(ctx context.Context, blobID blob.ID, data []byte) (t
return c.times[blobID], nil
}
func (c *inMemoryCache) Remove(ctx context.Context, ids []blob.ID) {
func (c *inMemoryCache) Remove(_ context.Context, ids []blob.ID) {
c.mu.Lock()
defer c.mu.Unlock()
@@ -92,7 +92,7 @@ type onDiskCache struct {
cacheDirectory string
}
func (c *onDiskCache) Get(ctx context.Context, blobID blob.ID) ([]byte, time.Time, bool) {
func (c *onDiskCache) Get(_ context.Context, blobID blob.ID) ([]byte, time.Time, bool) {
cachedFile := filepath.Join(c.cacheDirectory, string(blobID))
cst, err := os.Stat(cachedFile)
@@ -108,7 +108,7 @@ func (c *onDiskCache) Get(ctx context.Context, blobID blob.ID) ([]byte, time.Tim
return data, cacheMTime, err == nil
}
func (c *onDiskCache) Put(ctx context.Context, blobID blob.ID, data []byte) (time.Time, error) {
func (c *onDiskCache) Put(_ context.Context, blobID blob.ID, data []byte) (time.Time, error) {
cachedFile := filepath.Join(c.cacheDirectory, string(blobID))
// optimistically assume cache directory exist, create it if not

View File

@@ -152,7 +152,7 @@ func (f *formattingOptionsProvider) HashFunc() hashing.HashFunc {
return f.h
}
func (f *formattingOptionsProvider) RepositoryFormatBytes(ctx context.Context) ([]byte, error) {
func (f *formattingOptionsProvider) RepositoryFormatBytes(_ context.Context) ([]byte, error) {
if f.SupportsPasswordChange() {
return nil, nil
}

View File

@@ -504,7 +504,7 @@ func (r *grpcRepositoryClient) Time() time.Time {
return clock.Now()
}
func (r *grpcRepositoryClient) Refresh(ctx context.Context) error {
func (r *grpcRepositoryClient) Refresh(_ context.Context) error {
return nil
}
@@ -830,7 +830,7 @@ type grpcCreds struct {
password string
}
func (c grpcCreds) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
func (c grpcCreds) GetRequestMetadata(_ context.Context, uri ...string) (map[string]string, error) {
_ = uri
return map[string]string{
@@ -879,7 +879,7 @@ func openGRPCAPIRepository(ctx context.Context, si *APIServerInfo, password stri
}
par.registerEarlyCloseFunc(
func(ctx context.Context) error {
func(_ context.Context) error {
return errors.Wrap(conn.Close(), "error closing GRPC connection")
})

View File

@@ -68,7 +68,7 @@ type Manager struct {
}
// Put serializes the provided payload to JSON and persists it. Returns unique identifier that represents the manifest.
func (m *Manager) Put(ctx context.Context, labels map[string]string, payload interface{}) (ID, error) {
func (m *Manager) Put(_ context.Context, labels map[string]string, payload interface{}) (ID, error) {
if labels[TypeLabelKey] == "" {
return "", errors.New("'type' label is required")
}
@@ -294,7 +294,7 @@ type ManagerOptions struct {
}
// NewManager returns new manifest manager for the provided content manager.
func NewManager(ctx context.Context, b contentManager, options ManagerOptions, mr *metrics.Registry) (*Manager, error) {
func NewManager(_ context.Context, b contentManager, options ManagerOptions, mr *metrics.Registry) (*Manager, error) {
_ = mr
timeNow := options.TimeNow

View File

@@ -214,7 +214,7 @@ func PrefetchBackingContents(ctx context.Context, contentMgr contentManager, obj
}
// NewObjectManager creates an ObjectManager with the specified content manager and format.
func NewObjectManager(ctx context.Context, bm contentManager, f format.ObjectFormat, mr *metrics.Registry) (*Manager, error) {
func NewObjectManager(_ context.Context, bm contentManager, f format.ObjectFormat, mr *metrics.Registry) (*Manager, error) {
_ = mr
om := &Manager{

View File

@@ -403,7 +403,7 @@ func wrapLockingStorage(st blob.Storage, r format.BlobStorageConfiguration) blob
// collect prefixes that need to be locked on put
prefixes := GetLockingStoragePrefixes()
return beforeop.NewWrapper(st, nil, nil, nil, func(ctx context.Context, id blob.ID, opts *blob.PutOptions) error {
return beforeop.NewWrapper(st, nil, nil, nil, func(_ context.Context, id blob.ID, opts *blob.PutOptions) error {
for _, prefix := range prefixes {
if strings.HasPrefix(string(id), prefix) {
opts.RetentionMode = r.RetentionMode

View File

@@ -142,7 +142,7 @@ func (o *FilesystemOutput) BeginDirectory(ctx context.Context, relativePath stri
}
// FinishDirectory implements restore.Output interface.
func (o *FilesystemOutput) FinishDirectory(ctx context.Context, relativePath string, e fs.Directory) error {
func (o *FilesystemOutput) FinishDirectory(_ context.Context, relativePath string, e fs.Directory) error {
path := filepath.Join(o.TargetPath, filepath.FromSlash(relativePath))
if err := o.setAttributes(path, e, os.FileMode(0)); err != nil {
return errors.Wrap(err, "error setting attributes")
@@ -159,7 +159,7 @@ func (o *FilesystemOutput) WriteDirEntry(ctx context.Context, relativePath strin
}
// Close implements restore.Output interface.
func (o *FilesystemOutput) Close(ctx context.Context) error {
func (o *FilesystemOutput) Close(_ context.Context) error {
return nil
}
@@ -180,7 +180,7 @@ func (o *FilesystemOutput) WriteFile(ctx context.Context, relativePath string, f
}
// FileExists implements restore.Output interface.
func (o *FilesystemOutput) FileExists(ctx context.Context, relativePath string, e fs.File) bool {
func (o *FilesystemOutput) FileExists(_ context.Context, relativePath string, e fs.File) bool {
st, err := os.Lstat(filepath.Join(o.TargetPath, relativePath))
if err != nil {
return false

View File

@@ -23,7 +23,7 @@ func (o *TarOutput) Parallelizable() bool {
}
// BeginDirectory implements restore.Output interface.
func (o *TarOutput) BeginDirectory(ctx context.Context, relativePath string, d fs.Directory) error {
func (o *TarOutput) BeginDirectory(_ context.Context, relativePath string, d fs.Directory) error {
if relativePath == "" {
return nil
}
@@ -59,7 +59,7 @@ func (o *TarOutput) WriteDirEntry(ctx context.Context, relativePath string, de *
}
// Close implements restore.Output interface.
func (o *TarOutput) Close(ctx context.Context) error {
func (o *TarOutput) Close(_ context.Context) error {
if err := o.tf.Close(); err != nil {
return errors.Wrap(err, "error closing tar")
}

View File

@@ -45,7 +45,7 @@ func (o *ZipOutput) WriteDirEntry(ctx context.Context, relativePath string, de *
}
// Close implements restore.Output interface.
func (o *ZipOutput) Close(ctx context.Context) error {
func (o *ZipOutput) Close(_ context.Context) error {
if err := o.zf.Close(); err != nil {
return errors.Wrap(err, "error closing zip")
}

View File

@@ -233,7 +233,7 @@ func (rsl *repositorySymlink) Readlink(ctx context.Context) (string, error) {
return string(b), nil
}
func (rsl *repositorySymlink) Resolve(ctx context.Context) (fs.Entry, error) {
func (rsl *repositorySymlink) Resolve(_ context.Context) (fs.Entry, error) {
return nil, errors.New("Symlink.Resolve not implemented in Repofs")
}

View File

@@ -178,7 +178,7 @@ func (v *Verifier) InParallel(ctx context.Context, enqueue func(tw *TreeWalker)
}
// NewVerifier creates a verifier.
func NewVerifier(ctx context.Context, rep repo.Repository, opts VerifierOptions) *Verifier {
func NewVerifier(_ context.Context, rep repo.Repository, opts VerifierOptions) *Verifier {
if opts.Parallelism == 0 {
opts.Parallelism = runtime.NumCPU()
}

View File

@@ -1359,7 +1359,7 @@ func (u *Uploader) wrapIgnorefs(logger logging.Logger, entry fs.Directory, polic
return entry
}
return ignorefs.New(entry, policyTree, ignorefs.ReportIgnoredFiles(func(ctx context.Context, fname string, md fs.Entry, policyTree *policy.Tree) {
return ignorefs.New(entry, policyTree, ignorefs.ReportIgnoredFiles(func(_ context.Context, fname string, md fs.Entry, policyTree *policy.Tree) {
if md.IsDir() {
maybeLogEntryProcessed(
logger,