mirror of
https://github.com/kopia/kopia.git
synced 2025-12-23 22:57:50 -05:00
chore(ci): upgraded linter to 1.53.3 (#3079)
* chore(ci): upgraded linter to 1.53.3 This flagged a bunch of unused parameters, so the PR is larger than usual, but 99% mechanical. * separate lint CI task * run Lint in separate CI
This commit is contained in:
39
.github/workflows/lint.yml
vendored
Normal file
39
.github/workflows/lint.yml
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
name: Lint
|
||||
on:
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
env:
|
||||
# environment variables shared between build steps
|
||||
# do not include sensitive credentials and tokens here, instead pass them
|
||||
# directly to tools that need them to limit the blast radius in case one of them
|
||||
# becomes compromised and leaks credentials to external sites.
|
||||
# required by Makefile
|
||||
UNIX_SHELL_ON_WINDOWS: true
|
||||
# set (to any value other than false) to trigger random unicode filenames testing (logs may be difficult to read)
|
||||
ENABLE_UNICODE_FILENAMES: ${{ secrets.ENABLE_UNICODE_FILENAMES }}
|
||||
# set (to any value other than false) to trigger very long filenames testing
|
||||
ENABLE_LONG_FILENAMES: ${{ secrets.ENABLE_LONG_FILENAMES }}
|
||||
jobs:
|
||||
build:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest]
|
||||
name: Lint
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- name: Check out repository
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.20'
|
||||
id: go
|
||||
if: ${{ !contains(matrix.os, 'ARMHF') }}
|
||||
- name: Lint
|
||||
run: make lint
|
||||
@@ -71,6 +71,7 @@ linters:
|
||||
enable-all: true
|
||||
disable:
|
||||
- deadcode
|
||||
- depguard
|
||||
- exhaustivestruct
|
||||
- exhaustruct
|
||||
- gochecknoinits
|
||||
@@ -79,6 +80,7 @@ linters:
|
||||
- ifshort
|
||||
- interfacer
|
||||
- ireturn # this one may be interesting to control allocations
|
||||
- gosmopolitan
|
||||
- maligned
|
||||
- nilnil
|
||||
- nlreturn
|
||||
@@ -90,6 +92,7 @@ linters:
|
||||
- scopelint
|
||||
- sqlclosecheck
|
||||
- structcheck
|
||||
- tagalign
|
||||
- tagliatelle
|
||||
- testpackage
|
||||
- tparallel
|
||||
@@ -129,6 +132,10 @@ issues:
|
||||
- text: "tracer is a global variable"
|
||||
linters:
|
||||
- gochecknoglobals
|
||||
# always allow ctx even when unused
|
||||
- text: "unused-parameter: parameter 'ctx' seems to be unused"
|
||||
linters:
|
||||
- revive
|
||||
- text: "Magic number: 1e"
|
||||
linters:
|
||||
- gomnd
|
||||
|
||||
4
Makefile
4
Makefile
@@ -39,7 +39,7 @@ GOTESTSUM_FORMAT=pkgname-and-test-fails
|
||||
GOTESTSUM_FLAGS=--format=$(GOTESTSUM_FORMAT) --no-summary=skipped
|
||||
GO_TEST?=$(gotestsum) $(GOTESTSUM_FLAGS) --
|
||||
|
||||
LINTER_DEADLINE=600s
|
||||
LINTER_DEADLINE=1200s
|
||||
UNIT_TESTS_TIMEOUT=1200s
|
||||
|
||||
ifeq ($(GOARCH),amd64)
|
||||
@@ -213,7 +213,7 @@ download-rclone:
|
||||
go run ./tools/gettool --tool rclone:$(RCLONE_VERSION) --output-dir dist/kopia_linux_arm_6/ --goos=linux --goarch=arm
|
||||
|
||||
|
||||
ci-tests: lint vet test
|
||||
ci-tests: vet test
|
||||
|
||||
ci-integration-tests:
|
||||
$(MAKE) robustness-tool-tests
|
||||
|
||||
@@ -328,9 +328,9 @@ func NewApp() *App {
|
||||
exitWithError: func(err error) {
|
||||
if err != nil {
|
||||
os.Exit(1)
|
||||
} else {
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
os.Exit(0)
|
||||
},
|
||||
stdoutWriter: colorable.NewColorableStdout(),
|
||||
stderrWriter: colorable.NewColorableStderr(),
|
||||
|
||||
@@ -66,11 +66,11 @@ type cliProgress struct {
|
||||
progressFlags
|
||||
}
|
||||
|
||||
func (p *cliProgress) HashingFile(fname string) {
|
||||
func (p *cliProgress) HashingFile(_ string) {
|
||||
p.inProgressHashing.Add(1)
|
||||
}
|
||||
|
||||
func (p *cliProgress) FinishedHashingFile(fname string, totalSize int64) {
|
||||
func (p *cliProgress) FinishedHashingFile(_ string, _ int64) {
|
||||
p.hashedFiles.Add(1)
|
||||
p.inProgressHashing.Add(-1)
|
||||
p.maybeOutput()
|
||||
@@ -98,7 +98,7 @@ func (p *cliProgress) Error(path string, err error, isIgnored bool) {
|
||||
}
|
||||
}
|
||||
|
||||
func (p *cliProgress) CachedFile(fname string, numBytes int64) {
|
||||
func (p *cliProgress) CachedFile(_ string, numBytes int64) {
|
||||
p.cachedBytes.Add(numBytes)
|
||||
p.cachedFiles.Add(1)
|
||||
p.maybeOutput()
|
||||
|
||||
@@ -51,7 +51,7 @@ func (c *commandBenchmarkCompression) readInputFile(ctx context.Context) ([]byte
|
||||
return nil, errors.Wrap(err, "error opening input file")
|
||||
}
|
||||
|
||||
defer f.Close() //nolint:errcheck,gosec
|
||||
defer f.Close() //nolint:errcheck
|
||||
|
||||
st, err := f.Stat()
|
||||
if err != nil {
|
||||
|
||||
@@ -178,7 +178,7 @@ func (c *commandBlobShardsModify) run(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error creating .shards file")
|
||||
}
|
||||
defer of.Close() //nolint:errcheck,gosec
|
||||
defer of.Close() //nolint:errcheck
|
||||
|
||||
return errors.Wrap(dstPar.Save(of), "error saving .shards file")
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ func (c *commandCacheInfo) setup(svc appServices, parent commandParent) {
|
||||
c.out.setup(svc)
|
||||
}
|
||||
|
||||
func (c *commandCacheInfo) run(ctx context.Context, rep repo.Repository) error {
|
||||
func (c *commandCacheInfo) run(ctx context.Context, _ repo.Repository) error {
|
||||
opts, err := repo.GetCachingOptions(ctx, c.svc.repositoryConfigFileName())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error getting cache options")
|
||||
|
||||
@@ -42,7 +42,7 @@ func (c *commandCacheSetParams) setup(svc appServices, parent commandParent) {
|
||||
c.svc = svc
|
||||
}
|
||||
|
||||
func (c *commandCacheSetParams) run(ctx context.Context, rep repo.RepositoryWriter) error {
|
||||
func (c *commandCacheSetParams) run(ctx context.Context, _ repo.RepositoryWriter) error {
|
||||
opts, err := repo.GetCachingOptions(ctx, c.svc.repositoryConfigFileName())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error getting caching options")
|
||||
|
||||
@@ -48,9 +48,5 @@ func (c *policyRetentionFlags) setRetentionPolicyFromFlags(ctx context.Context,
|
||||
}
|
||||
}
|
||||
|
||||
if err := applyPolicyBoolPtr(ctx, "do not save identical snapshots", &rp.IgnoreIdenticalSnapshots, c.policySetIgnoreIdenticalSnapshots, changeCount); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return applyPolicyBoolPtr(ctx, "do not save identical snapshots", &rp.IgnoreIdenticalSnapshots, c.policySetIgnoreIdenticalSnapshots, changeCount)
|
||||
}
|
||||
|
||||
@@ -29,9 +29,5 @@ func (c *policyUploadFlags) setUploadPolicyFromFlags(ctx context.Context, up *po
|
||||
return err
|
||||
}
|
||||
|
||||
if err := applyOptionalInt64MiB(ctx, "parallel upload above size", &up.ParallelUploadAboveSize, c.parallelizeUploadAboveSizeMiB, changeCount); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return applyOptionalInt64MiB(ctx, "parallel upload above size", &up.ParallelUploadAboveSize, c.parallelizeUploadAboveSizeMiB, changeCount)
|
||||
}
|
||||
|
||||
@@ -25,6 +25,8 @@ func (c *storageFromConfigFlags) Setup(sps StorageProviderServices, cmd *kingpin
|
||||
}
|
||||
|
||||
func (c *storageFromConfigFlags) Connect(ctx context.Context, isCreate bool, formatVersion int) (blob.Storage, error) {
|
||||
_ = formatVersion
|
||||
|
||||
if isCreate {
|
||||
return nil, errors.New("not supported")
|
||||
}
|
||||
|
||||
@@ -132,7 +132,7 @@ func updateRepositoryParameters(
|
||||
if upgradeToEpochManager {
|
||||
log(ctx).Infof("migrating current indexes to epoch format")
|
||||
|
||||
if err := rep.ContentManager().PrepareUpgradeToIndexBlobManagerV1(ctx, mp.EpochParameters); err != nil {
|
||||
if err := rep.ContentManager().PrepareUpgradeToIndexBlobManagerV1(ctx); err != nil {
|
||||
return errors.Wrap(err, "error upgrading indexes")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -82,13 +82,13 @@ func (c *commandRepositoryStatus) outputJSON(ctx context.Context, r repo.Reposit
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *commandRepositoryStatus) dumpUpgradeStatus(ctx context.Context, dr repo.DirectRepository) error {
|
||||
func (c *commandRepositoryStatus) dumpUpgradeStatus(dr repo.DirectRepository) error {
|
||||
drw, isDr := dr.(repo.DirectRepositoryWriter)
|
||||
if !isDr {
|
||||
return nil
|
||||
}
|
||||
|
||||
l, err := drw.FormatManager().GetUpgradeLockIntent(ctx)
|
||||
l, err := drw.FormatManager().GetUpgradeLockIntent()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get the upgrade lock intent")
|
||||
}
|
||||
@@ -218,7 +218,7 @@ func (c *commandRepositoryStatus) run(ctx context.Context, rep repo.Repository)
|
||||
|
||||
c.dumpRetentionStatus(dr)
|
||||
|
||||
if err := c.dumpUpgradeStatus(ctx, dr); err != nil {
|
||||
if err := c.dumpUpgradeStatus(dr); err != nil {
|
||||
return errors.Wrap(err, "failed to dump upgrade status")
|
||||
}
|
||||
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kopia/kopia/repo"
|
||||
)
|
||||
|
||||
@@ -21,7 +23,7 @@ func (c *commandRepositoryThrottleGet) run(ctx context.Context, rep repo.DirectR
|
||||
limits := rep.Throttler().Limits()
|
||||
|
||||
if err := c.ctg.output(&limits); err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "output")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -361,7 +361,7 @@ func (c *commandRepositoryUpgrade) drainOrCommit(ctx context.Context, rep repo.D
|
||||
if mp.EpochParameters.Enabled {
|
||||
log(ctx).Infof("Repository indices have already been migrated to the epoch format, no need to drain other clients")
|
||||
|
||||
l, err := rep.FormatManager().GetUpgradeLockIntent(ctx)
|
||||
l, err := rep.FormatManager().GetUpgradeLockIntent()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get upgrade lock intent")
|
||||
}
|
||||
@@ -404,7 +404,7 @@ func (c *commandRepositoryUpgrade) sleepWithContext(ctx context.Context, dur tim
|
||||
|
||||
func (c *commandRepositoryUpgrade) drainAllClients(ctx context.Context, rep repo.DirectRepositoryWriter) error {
|
||||
for {
|
||||
l, err := rep.FormatManager().GetUpgradeLockIntent(ctx)
|
||||
l, err := rep.FormatManager().GetUpgradeLockIntent()
|
||||
|
||||
upgradeTime := l.UpgradeTime()
|
||||
now := rep.Time()
|
||||
@@ -454,7 +454,7 @@ func (c *commandRepositoryUpgrade) upgrade(ctx context.Context, rep repo.DirectR
|
||||
|
||||
log(ctx).Infof("migrating current indices to epoch format")
|
||||
|
||||
if uerr := rep.ContentManager().PrepareUpgradeToIndexBlobManagerV1(ctx, mp.EpochParameters); uerr != nil {
|
||||
if uerr := rep.ContentManager().PrepareUpgradeToIndexBlobManagerV1(ctx); uerr != nil {
|
||||
return errors.Wrap(uerr, "error upgrading indices")
|
||||
}
|
||||
|
||||
|
||||
@@ -60,6 +60,8 @@ func (ep *estimateProgress) Error(ctx context.Context, filename string, err erro
|
||||
}
|
||||
|
||||
func (ep *estimateProgress) Stats(ctx context.Context, st *snapshot.Stats, included, excluded snapshotfs.SampleBuckets, excludedDirs []string, final bool) {
|
||||
_ = final
|
||||
|
||||
ep.stats = *st
|
||||
ep.included = included
|
||||
ep.excluded = excluded
|
||||
|
||||
@@ -4,6 +4,6 @@
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
)
|
||||
|
||||
func (c *App) setupOSSpecificKeychainFlags(svc appServices, app *kingpin.Application) {
|
||||
func (c *App) setupOSSpecificKeychainFlags(_ appServices, app *kingpin.Application) {
|
||||
app.Flag("use-keychain", "Use macOS Keychain for storing repository password.").Default("true").BoolVar(&c.keyRingEnabled)
|
||||
}
|
||||
|
||||
@@ -7,5 +7,6 @@
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
)
|
||||
|
||||
//nolint:revive
|
||||
func (c *App) setupOSSpecificKeychainFlags(svc appServices, app *kingpin.Application) {
|
||||
}
|
||||
|
||||
@@ -4,6 +4,6 @@
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
)
|
||||
|
||||
func (c *App) setupOSSpecificKeychainFlags(svc appServices, app *kingpin.Application) {
|
||||
func (c *App) setupOSSpecificKeychainFlags(_ appServices, app *kingpin.Application) {
|
||||
app.Flag("use-credential-manager", "Use Windows Credential Manager for storing repository password.").Default("true").BoolVar(&c.keyRingEnabled)
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
package cli
|
||||
|
||||
//nolint:revive
|
||||
func onExternalConfigReloadRequest(f func()) {
|
||||
// SIGHUP not supported on Windows.
|
||||
}
|
||||
|
||||
@@ -25,6 +25,8 @@ func (c *storageAzureFlags) Setup(svc StorageProviderServices, cmd *kingpin.CmdC
|
||||
}
|
||||
|
||||
func (c *storageAzureFlags) Connect(ctx context.Context, isCreate bool, formatVersion int) (blob.Storage, error) {
|
||||
_ = formatVersion
|
||||
|
||||
//nolint:wrapcheck
|
||||
return azure.New(ctx, &c.azOptions, false)
|
||||
return azure.New(ctx, &c.azOptions, isCreate)
|
||||
}
|
||||
|
||||
@@ -22,6 +22,8 @@ func (c *storageB2Flags) Setup(svc StorageProviderServices, cmd *kingpin.CmdClau
|
||||
}
|
||||
|
||||
func (c *storageB2Flags) Connect(ctx context.Context, isCreate bool, formatVersion int) (blob.Storage, error) {
|
||||
_ = formatVersion
|
||||
|
||||
//nolint:wrapcheck
|
||||
return b2.New(ctx, &c.b2options, false)
|
||||
return b2.New(ctx, &c.b2options, isCreate)
|
||||
}
|
||||
|
||||
@@ -29,6 +29,8 @@ func (c *storageGCSFlags) Setup(_ StorageProviderServices, cmd *kingpin.CmdClaus
|
||||
}
|
||||
|
||||
func (c *storageGCSFlags) Connect(ctx context.Context, isCreate bool, formatVersion int) (blob.Storage, error) {
|
||||
_ = formatVersion
|
||||
|
||||
if c.embedCredentials {
|
||||
data, err := os.ReadFile(c.options.ServiceAccountCredentialsFile)
|
||||
if err != nil {
|
||||
@@ -40,5 +42,5 @@ func (c *storageGCSFlags) Connect(ctx context.Context, isCreate bool, formatVers
|
||||
}
|
||||
|
||||
//nolint:wrapcheck
|
||||
return gcs.New(ctx, &c.options, false)
|
||||
return gcs.New(ctx, &c.options, isCreate)
|
||||
}
|
||||
|
||||
@@ -28,6 +28,8 @@ func (c *storageGDriveFlags) Setup(_ StorageProviderServices, cmd *kingpin.CmdCl
|
||||
}
|
||||
|
||||
func (c *storageGDriveFlags) Connect(ctx context.Context, isCreate bool, formatVersion int) (blob.Storage, error) {
|
||||
_ = formatVersion
|
||||
|
||||
if c.embedCredentials {
|
||||
data, err := os.ReadFile(c.options.ServiceAccountCredentialsFile)
|
||||
if err != nil {
|
||||
@@ -39,5 +41,5 @@ func (c *storageGDriveFlags) Connect(ctx context.Context, isCreate bool, formatV
|
||||
}
|
||||
|
||||
//nolint:wrapcheck
|
||||
return gdrive.New(ctx, &c.options, false)
|
||||
return gdrive.New(ctx, &c.options, isCreate)
|
||||
}
|
||||
|
||||
@@ -53,7 +53,7 @@ func (c *storageS3Flags) Setup(svc StorageProviderServices, cmd *kingpin.CmdClau
|
||||
cmd.Flag("root-ca-pem-path", "Certficate authority file path").PreAction(c.preActionLoadPEMPath).StringVar(&c.rootCaPemPath)
|
||||
}
|
||||
|
||||
func (c *storageS3Flags) preActionLoadPEMPath(pc *kingpin.ParseContext) error {
|
||||
func (c *storageS3Flags) preActionLoadPEMPath(_ *kingpin.ParseContext) error {
|
||||
if len(c.s3options.RootCA) > 0 {
|
||||
return errors.Errorf("root-ca-pem-base64 and root-ca-pem-path are mutually exclusive")
|
||||
}
|
||||
@@ -68,7 +68,7 @@ func (c *storageS3Flags) preActionLoadPEMPath(pc *kingpin.ParseContext) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *storageS3Flags) preActionLoadPEMBase64(pc *kingpin.ParseContext) error {
|
||||
func (c *storageS3Flags) preActionLoadPEMBase64(_ *kingpin.ParseContext) error {
|
||||
caContent, err := base64.StdEncoding.DecodeString(c.rootCaPemBase64)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to decode CA")
|
||||
@@ -80,10 +80,12 @@ func (c *storageS3Flags) preActionLoadPEMBase64(pc *kingpin.ParseContext) error
|
||||
}
|
||||
|
||||
func (c *storageS3Flags) Connect(ctx context.Context, isCreate bool, formatVersion int) (blob.Storage, error) {
|
||||
_ = formatVersion
|
||||
|
||||
if isCreate && c.s3options.PointInTime != nil && !c.s3options.PointInTime.IsZero() {
|
||||
return nil, errors.New("Cannot specify a 'point-in-time' option when creating a repository")
|
||||
}
|
||||
|
||||
//nolint:wrapcheck
|
||||
return s3.New(ctx, &c.s3options, false)
|
||||
return s3.New(ctx, &c.s3options, isCreate)
|
||||
}
|
||||
|
||||
@@ -56,11 +56,7 @@ func (c *commonThrottleSet) apply(ctx context.Context, limits *throttling.Limits
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.setThrottleInt(ctx, "concurrent writes", &limits.ConcurrentWrites, c.setConcurrentWrites, changeCount); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return c.setThrottleInt(ctx, "concurrent writes", &limits.ConcurrentWrites, c.setConcurrentWrites, changeCount)
|
||||
}
|
||||
|
||||
func (c *commonThrottleSet) setThrottleFloat64(ctx context.Context, desc string, bps bool, val *float64, str string, changeCount *int) error {
|
||||
|
||||
@@ -73,7 +73,7 @@ func (c *App) getUpdateState() (*updateState, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to open update state file")
|
||||
}
|
||||
defer f.Close() //nolint:errcheck,gosec
|
||||
defer f.Close() //nolint:errcheck
|
||||
|
||||
us := &updateState{}
|
||||
if err := json.NewDecoder(f).Decode(us); err != nil {
|
||||
|
||||
@@ -151,7 +151,7 @@ func (fsd *filesystemDirectory) IterateEntries(ctx context.Context, cb func(cont
|
||||
if direrr != nil {
|
||||
return errors.Wrap(direrr, "unable to read directory")
|
||||
}
|
||||
defer f.Close() //nolint:errcheck,gosec
|
||||
defer f.Close() //nolint:errcheck
|
||||
|
||||
childPrefix := fullPath + string(filepath.Separator)
|
||||
|
||||
|
||||
@@ -6,10 +6,12 @@
|
||||
"github.com/kopia/kopia/fs"
|
||||
)
|
||||
|
||||
//nolint:revive
|
||||
func platformSpecificOwnerInfo(fi os.FileInfo) fs.OwnerInfo {
|
||||
return fs.OwnerInfo{}
|
||||
}
|
||||
|
||||
//nolint:revive
|
||||
func platformSpecificDeviceInfo(fi os.FileInfo) fs.DeviceInfo {
|
||||
return fs.DeviceInfo{}
|
||||
}
|
||||
|
||||
@@ -119,10 +119,12 @@ func (fsd *shallowFilesystemDirectory) SupportsMultipleIterations() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
//nolint:revive
|
||||
func (fsd *shallowFilesystemDirectory) Child(ctx context.Context, name string) (fs.Entry, error) {
|
||||
return nil, errors.New("shallowFilesystemDirectory.Child not supported")
|
||||
}
|
||||
|
||||
//nolint:revive
|
||||
func (fsd *shallowFilesystemDirectory) IterateEntries(ctx context.Context, cb func(context.Context, fs.Entry) error) error {
|
||||
return errors.New("shallowFilesystemDirectory.IterateEntries not supported")
|
||||
}
|
||||
|
||||
@@ -113,7 +113,7 @@ type streamingDirectory struct {
|
||||
|
||||
var errChildNotSupported = errors.New("streamingDirectory.Child not supported")
|
||||
|
||||
func (sd *streamingDirectory) Child(ctx context.Context, name string) (fs.Entry, error) {
|
||||
func (sd *streamingDirectory) Child(ctx context.Context, _ string) (fs.Entry, error) {
|
||||
return nil, errChildNotSupported
|
||||
}
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@ type singleUserAuthenticator struct {
|
||||
expectedPasswordBytes []byte
|
||||
}
|
||||
|
||||
func (a *singleUserAuthenticator) IsValid(ctx context.Context, rep repo.Repository, username, password string) bool {
|
||||
func (a *singleUserAuthenticator) IsValid(ctx context.Context, _ repo.Repository, username, password string) bool {
|
||||
return subtle.ConstantTimeCompare([]byte(username), a.expectedUsernameBytes)*
|
||||
subtle.ConstantTimeCompare([]byte(password), a.expectedPasswordBytes) == 1
|
||||
}
|
||||
@@ -75,7 +75,7 @@ type htpasswdAuthenticator struct {
|
||||
f *htpasswd.File
|
||||
}
|
||||
|
||||
func (a htpasswdAuthenticator) IsValid(ctx context.Context, rep repo.Repository, username, password string) bool {
|
||||
func (a htpasswdAuthenticator) IsValid(ctx context.Context, _ repo.Repository, username, password string) bool {
|
||||
return a.f.Match(username, password)
|
||||
}
|
||||
|
||||
|
||||
@@ -41,6 +41,8 @@ type noAccessAuthorizationInfo struct{
|
||||
|
||||
func (noAccessAuthorizationInfo) ContentAccessLevel() AccessLevel { return AccessLevelNone }
|
||||
func (noAccessAuthorizationInfo) ManifestAccessLevel(labels map[string]string) AccessLevel {
|
||||
_ = labels
|
||||
|
||||
return AccessLevelNone
|
||||
}
|
||||
|
||||
@@ -79,7 +81,7 @@ func (la legacyAuthorizationInfo) ManifestAccessLevel(labels map[string]string)
|
||||
|
||||
type legacyAuthorizer struct{}
|
||||
|
||||
func (legacyAuthorizer) Authorize(ctx context.Context, rep repo.Repository, username string) AuthorizationInfo {
|
||||
func (legacyAuthorizer) Authorize(ctx context.Context, _ repo.Repository, username string) AuthorizationInfo {
|
||||
return legacyAuthorizationInfo{usernameAtHostname: username}
|
||||
}
|
||||
|
||||
|
||||
6
internal/cache/content_cache_passthrough.go
vendored
6
internal/cache/content_cache_passthrough.go
vendored
@@ -15,15 +15,21 @@ type passthroughContentCache struct {
|
||||
func (c passthroughContentCache) Close(ctx context.Context) {}
|
||||
|
||||
func (c passthroughContentCache) GetContent(ctx context.Context, contentID string, blobID blob.ID, offset, length int64, output *gather.WriteBuffer) error {
|
||||
_ = contentID
|
||||
|
||||
//nolint:wrapcheck
|
||||
return c.st.GetBlob(ctx, blobID, offset, length, output)
|
||||
}
|
||||
|
||||
func (c passthroughContentCache) PrefetchBlob(ctx context.Context, blobID blob.ID) error {
|
||||
_ = blobID
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c passthroughContentCache) Sync(ctx context.Context, blobPrefix blob.ID) error {
|
||||
_ = blobPrefix
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -23,12 +23,12 @@ type StorageProtection interface {
|
||||
|
||||
type nullStorageProtection struct{}
|
||||
|
||||
func (nullStorageProtection) Protect(id string, input gather.Bytes, output *gather.WriteBuffer) {
|
||||
func (nullStorageProtection) Protect(_ string, input gather.Bytes, output *gather.WriteBuffer) {
|
||||
output.Reset()
|
||||
input.WriteTo(output) //nolint:errcheck
|
||||
}
|
||||
|
||||
func (nullStorageProtection) Verify(id string, input gather.Bytes, output *gather.WriteBuffer) error {
|
||||
func (nullStorageProtection) Verify(_ string, input gather.Bytes, output *gather.WriteBuffer) error {
|
||||
output.Reset()
|
||||
input.WriteTo(output) //nolint:errcheck
|
||||
|
||||
@@ -44,12 +44,12 @@ type checksumProtection struct {
|
||||
Secret []byte
|
||||
}
|
||||
|
||||
func (p checksumProtection) Protect(id string, input gather.Bytes, output *gather.WriteBuffer) {
|
||||
func (p checksumProtection) Protect(_ string, input gather.Bytes, output *gather.WriteBuffer) {
|
||||
output.Reset()
|
||||
hmac.Append(input, p.Secret, output)
|
||||
}
|
||||
|
||||
func (p checksumProtection) Verify(id string, input gather.Bytes, output *gather.WriteBuffer) error {
|
||||
func (p checksumProtection) Verify(_ string, input gather.Bytes, output *gather.WriteBuffer) error {
|
||||
output.Reset()
|
||||
//nolint:wrapcheck
|
||||
return hmac.VerifyAndStrip(input, p.Secret, output)
|
||||
|
||||
@@ -292,7 +292,7 @@ func downloadFile(ctx context.Context, f fs.File, fname string) error {
|
||||
return errors.Wrap(err, "error creating file to edit")
|
||||
}
|
||||
|
||||
defer dst.Close() //nolint:errcheck,gosec
|
||||
defer dst.Close() //nolint:errcheck
|
||||
|
||||
return errors.Wrap(iocopy.JustCopy(dst, src), "error downloading file")
|
||||
}
|
||||
|
||||
@@ -33,7 +33,7 @@ func (f *testFile) ModTime() time.Time { return f.modtime }
|
||||
func (f *testFile) Sys() interface{} { return nil }
|
||||
func (f *testFile) Owner() fs.OwnerInfo { return fs.OwnerInfo{UserID: 1000, GroupID: 1000} }
|
||||
func (f *testFile) Device() fs.DeviceInfo { return fs.DeviceInfo{Dev: 1} }
|
||||
func (f *testFile) Open(_ context.Context) (io.Reader, error) {
|
||||
func (f *testFile) Open(ctx context.Context) (io.Reader, error) {
|
||||
return strings.NewReader(f.content), nil
|
||||
}
|
||||
|
||||
@@ -66,7 +66,7 @@ func (d *testDirectory) ModTime() time.Time { return d.modtime }
|
||||
func (d *testDirectory) Sys() interface{} { return nil }
|
||||
func (d *testDirectory) Owner() fs.OwnerInfo { return fs.OwnerInfo{UserID: 1000, GroupID: 1000} }
|
||||
func (d *testDirectory) Device() fs.DeviceInfo { return fs.DeviceInfo{Dev: 1} }
|
||||
func (d *testDirectory) Child(_ context.Context, name string) (fs.Entry, error) {
|
||||
func (d *testDirectory) Child(ctx context.Context, name string) (fs.Entry, error) {
|
||||
for _, f := range d.files {
|
||||
if f.Name() == name {
|
||||
return f, nil
|
||||
@@ -75,7 +75,7 @@ func (d *testDirectory) Child(_ context.Context, name string) (fs.Entry, error)
|
||||
|
||||
return nil, fs.ErrEntryNotFound
|
||||
}
|
||||
func (d *testDirectory) Readdir(_ context.Context) ([]fs.Entry, error) { return d.files, nil }
|
||||
func (d *testDirectory) Readdir(ctx context.Context) ([]fs.Entry, error) { return d.files, nil }
|
||||
|
||||
func TestCompareEmptyDirectories(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
|
||||
@@ -68,7 +68,7 @@ func readAndStripComments(fname string) (string, error) {
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "error opening edited file")
|
||||
}
|
||||
defer f.Close() //nolint:errcheck,gosec
|
||||
defer f.Close() //nolint:errcheck
|
||||
|
||||
var result []string
|
||||
|
||||
|
||||
@@ -39,30 +39,33 @@ type ParametersProvider interface {
|
||||
var ErrVerySlowIndexWrite = errors.Errorf("extremely slow index write - index write took more than two epochs")
|
||||
|
||||
// Parameters encapsulates all parameters that influence the behavior of epoch manager.
|
||||
//
|
||||
// Note as a historical mistake, JSON tags are not camelCase, but rather PascalCase. We can't change
|
||||
// that since the parameters are stored in a repository.
|
||||
type Parameters struct {
|
||||
// whether epoch manager is enabled, must be true.
|
||||
Enabled bool
|
||||
Enabled bool `json:"Enabled"`
|
||||
|
||||
// how frequently each client will list blobs to determine the current epoch.
|
||||
EpochRefreshFrequency time.Duration
|
||||
EpochRefreshFrequency time.Duration `json:"EpochRefreshFrequency"`
|
||||
|
||||
// number of epochs between full checkpoints.
|
||||
FullCheckpointFrequency int
|
||||
FullCheckpointFrequency int `json:"FullCheckpointFrequency"`
|
||||
|
||||
// do not delete uncompacted blobs if the corresponding compacted blob age is less than this.
|
||||
CleanupSafetyMargin time.Duration
|
||||
CleanupSafetyMargin time.Duration `json:"CleanupSafetyMargin"`
|
||||
|
||||
// minimum duration of an epoch
|
||||
MinEpochDuration time.Duration
|
||||
MinEpochDuration time.Duration `json:"MinEpochDuration"`
|
||||
|
||||
// advance epoch if number of files exceeds this
|
||||
EpochAdvanceOnCountThreshold int
|
||||
EpochAdvanceOnCountThreshold int `json:"EpochAdvanceOnCountThreshold"`
|
||||
|
||||
// advance epoch if total size of files exceeds this.
|
||||
EpochAdvanceOnTotalSizeBytesThreshold int64
|
||||
EpochAdvanceOnTotalSizeBytesThreshold int64 `json:"EpochAdvanceOnTotalSizeBytesThreshold"`
|
||||
|
||||
// number of blobs to delete in parallel during cleanup
|
||||
DeleteParallelism int
|
||||
DeleteParallelism int `json:"DeleteParallelism"`
|
||||
}
|
||||
|
||||
// GetEpochManagerEnabled returns whether epoch manager is enabled, must be true.
|
||||
@@ -589,6 +592,8 @@ func (e *Manager) maybeGenerateNextRangeCheckpointAsync(ctx context.Context, cs
|
||||
}
|
||||
|
||||
func (e *Manager) maybeOptimizeRangeCheckpointsAsync(ctx context.Context, cs CurrentSnapshot) {
|
||||
// TODO: implement me
|
||||
_ = cs
|
||||
}
|
||||
|
||||
func (e *Manager) maybeStartCleanupAsync(ctx context.Context, cs CurrentSnapshot, p *Parameters) {
|
||||
|
||||
@@ -60,7 +60,7 @@ func populateAttributes(a *fuse.Attr, e fs.Entry) {
|
||||
a.Blocks = (a.Size + fakeBlockSize - 1) / fakeBlockSize
|
||||
}
|
||||
|
||||
func (n *fuseNode) Getattr(ctx context.Context, fh gofusefs.FileHandle, a *fuse.AttrOut) syscall.Errno {
|
||||
func (n *fuseNode) Getattr(ctx context.Context, _ gofusefs.FileHandle, a *fuse.AttrOut) syscall.Errno {
|
||||
populateAttributes(&a.Attr, n.entry)
|
||||
|
||||
a.Ino = n.StableAttr().Ino
|
||||
@@ -72,7 +72,7 @@ type fuseFileNode struct {
|
||||
fuseNode
|
||||
}
|
||||
|
||||
func (f *fuseFileNode) Open(ctx context.Context, flags uint32) (gofusefs.FileHandle, uint32, syscall.Errno) {
|
||||
func (f *fuseFileNode) Open(ctx context.Context, _ uint32) (gofusefs.FileHandle, uint32, syscall.Errno) {
|
||||
reader, err := f.entry.(fs.File).Open(ctx)
|
||||
if err != nil {
|
||||
log(ctx).Errorf("error opening %v: %v", f.entry.Name(), err)
|
||||
|
||||
@@ -12,6 +12,8 @@
|
||||
)
|
||||
|
||||
// Directory returns an error due to mounting being unsupported on current operating system.
|
||||
//
|
||||
//nolint:revive
|
||||
func Directory(ctx context.Context, entry fs.Directory, mountPoint string, mountOptions Options) (Controller, error) {
|
||||
return nil, errors.Errorf("mounting is not supported")
|
||||
}
|
||||
|
||||
@@ -9,15 +9,18 @@ func None() Strategy {
|
||||
|
||||
type noneStrategy struct{}
|
||||
|
||||
//nolint:revive
|
||||
func (noneStrategy) GetPassword(ctx context.Context, configFile string) (string, error) {
|
||||
return "", ErrPasswordNotFound
|
||||
}
|
||||
|
||||
//nolint:revive
|
||||
func (noneStrategy) PersistPassword(ctx context.Context, configFile, password string) error {
|
||||
// silently succeed
|
||||
return nil
|
||||
}
|
||||
|
||||
//nolint:revive
|
||||
func (noneStrategy) DeletePassword(ctx context.Context, configFile string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -371,6 +371,9 @@ func (c *concurrencyTest) getMetadataWorker(ctx context.Context, worker int) fun
|
||||
}
|
||||
|
||||
func (c *concurrencyTest) listBlobWorker(ctx context.Context, worker int) func() error {
|
||||
// TODO: implement me
|
||||
_ = worker
|
||||
|
||||
return func() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -105,11 +105,11 @@ func WithExponentialBackoffNoValue(ctx context.Context, desc string, attempt fun
|
||||
}
|
||||
|
||||
// Always is a retry function that retries all errors.
|
||||
func Always(err error) bool {
|
||||
func Always(error) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Never is a retry function that never retries.
|
||||
func Never(err error) bool {
|
||||
func Never(error) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -37,6 +37,9 @@ func (p estimateTaskProgress) Error(ctx context.Context, dirname string, err err
|
||||
}
|
||||
|
||||
func (p estimateTaskProgress) Stats(ctx context.Context, st *snapshot.Stats, included, excluded snapshotfs.SampleBuckets, excludedDirs []string, final bool) {
|
||||
_ = excludedDirs
|
||||
_ = final
|
||||
|
||||
p.ctrl.ReportCounters(map[string]uitask.CounterValue{
|
||||
"Bytes": uitask.BytesCounter(atomic.LoadInt64(&st.TotalFileSize)),
|
||||
"Excluded Bytes": uitask.BytesCounter(atomic.LoadInt64(&st.ExcludedTotalFileSize)),
|
||||
|
||||
@@ -269,7 +269,7 @@ func handleRepoSetDescription(ctx context.Context, rc requestContext) (interface
|
||||
return handleRepoStatus(ctx, rc)
|
||||
}
|
||||
|
||||
func handleRepoSupportedAlgorithms(ctx context.Context, rc requestContext) (interface{}, *apiError) {
|
||||
func handleRepoSupportedAlgorithms(ctx context.Context, _ requestContext) (interface{}, *apiError) {
|
||||
res := &serverapi.SupportedAlgorithmsResponse{
|
||||
DefaultHashAlgorithm: hashing.DefaultAlgorithm,
|
||||
SupportedHashAlgorithms: toAlgorithmInfo(hashing.SupportedAlgorithms(), neverDeprecated),
|
||||
@@ -300,7 +300,7 @@ func handleRepoSupportedAlgorithms(ctx context.Context, rc requestContext) (inte
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func neverDeprecated(n string) bool {
|
||||
func neverDeprecated(string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ func getUIPreferencesOrEmpty(s serverInterface) (serverapi.UIPreferences, error)
|
||||
return p, errors.Wrap(err, "unable to open UI preferences file")
|
||||
}
|
||||
|
||||
defer f.Close() //nolint:errcheck,gosec
|
||||
defer f.Close() //nolint:errcheck
|
||||
|
||||
if err := json.NewDecoder(f).Decode(&p); err != nil {
|
||||
return p, errors.Wrap(err, "invalid UI preferences file")
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
"github.com/kopia/kopia/repo"
|
||||
)
|
||||
|
||||
func handleCurrentUser(ctx context.Context, rc requestContext) (interface{}, *apiError) {
|
||||
func handleCurrentUser(ctx context.Context, _ requestContext) (interface{}, *apiError) {
|
||||
return serverapi.CurrentUserResponse{
|
||||
Username: repo.GetDefaultUserName(ctx),
|
||||
Hostname: repo.GetDefaultHostName(ctx),
|
||||
|
||||
@@ -87,11 +87,11 @@ func requireServerControlUser(ctx context.Context, rc requestContext) bool {
|
||||
return user == rc.srv.getOptions().ServerControlUser
|
||||
}
|
||||
|
||||
func anyAuthenticatedUser(ctx context.Context, rc requestContext) bool {
|
||||
func anyAuthenticatedUser(ctx context.Context, _ requestContext) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func handlerWillCheckAuthorization(ctx context.Context, rc requestContext) bool {
|
||||
func handlerWillCheckAuthorization(ctx context.Context, _ requestContext) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
|
||||
@@ -123,8 +123,6 @@ type CheckRepositoryExistsRequest struct {
|
||||
}
|
||||
|
||||
// ConnectRepositoryRequest contains request to connect to a repository.
|
||||
//
|
||||
//nolint:musttag // false positive
|
||||
type ConnectRepositoryRequest struct {
|
||||
Storage blob.ConnectionInfo `json:"storage"`
|
||||
Password string `json:"password"`
|
||||
|
||||
@@ -11,11 +11,15 @@
|
||||
|
||||
// GetFileAllocSize gets the space allocated on disk for the file
|
||||
// 'fname' in bytes.
|
||||
//
|
||||
//nolint:revive
|
||||
func GetFileAllocSize(fname string) (uint64, error) {
|
||||
return 0, errNotImplemented
|
||||
}
|
||||
|
||||
// GetBlockSize gets the disk block size of the underlying system.
|
||||
//
|
||||
//nolint:revive
|
||||
func GetBlockSize(path string) (uint64, error) {
|
||||
return 0, errNotImplemented
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"log" //nolint:depguard
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
|
||||
@@ -91,7 +91,7 @@ func WritePrivateKeyToFile(fname string, priv *rsa.PrivateKey) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error opening private key file")
|
||||
}
|
||||
defer f.Close() //nolint:errcheck,gosec
|
||||
defer f.Close() //nolint:errcheck
|
||||
|
||||
privBytes, err := x509.MarshalPKCS8PrivateKey(priv)
|
||||
if err != nil {
|
||||
@@ -111,7 +111,7 @@ func WriteCertificateToFile(fname string, cert *x509.Certificate) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error opening certificate file")
|
||||
}
|
||||
defer f.Close() //nolint:errcheck,gosec
|
||||
defer f.Close() //nolint:errcheck
|
||||
|
||||
if err := pem.Encode(f, &pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}); err != nil {
|
||||
return errors.Wrap(err, "Failed to write data")
|
||||
|
||||
@@ -36,7 +36,7 @@ type webdavFile struct {
|
||||
r fs.Reader
|
||||
}
|
||||
|
||||
func (f *webdavFile) Readdir(n int) ([]os.FileInfo, error) {
|
||||
func (f *webdavFile) Readdir(_ int) ([]os.FileInfo, error) {
|
||||
return nil, errors.New("not a directory")
|
||||
}
|
||||
|
||||
@@ -80,7 +80,7 @@ func (f *webdavFile) Seek(offset int64, whence int) (int64, error) {
|
||||
return r.Seek(offset, whence)
|
||||
}
|
||||
|
||||
func (f *webdavFile) Write(b []byte) (int, error) {
|
||||
func (f *webdavFile) Write(_ []byte) (int, error) {
|
||||
return 0, errors.New("read-only filesystem")
|
||||
}
|
||||
|
||||
@@ -145,7 +145,7 @@ func (d *webdavDir) Stat() (os.FileInfo, error) {
|
||||
return webdavFileInfo{d.entry}, nil
|
||||
}
|
||||
|
||||
func (d *webdavDir) Write(b []byte) (int, error) {
|
||||
func (d *webdavDir) Write(_ []byte) (int, error) {
|
||||
return 0, errors.New("read-only filesystem")
|
||||
}
|
||||
|
||||
@@ -153,7 +153,7 @@ func (d *webdavDir) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *webdavDir) Read(b []byte) (int, error) {
|
||||
func (d *webdavDir) Read(_ []byte) (int, error) {
|
||||
return 0, errors.New("not supported")
|
||||
}
|
||||
|
||||
@@ -169,7 +169,7 @@ type webdavFS struct {
|
||||
dir fs.Directory
|
||||
}
|
||||
|
||||
func (w *webdavFS) Mkdir(ctx context.Context, path string, mode os.FileMode) error {
|
||||
func (w *webdavFS) Mkdir(ctx context.Context, path string, _ os.FileMode) error {
|
||||
return errors.Errorf("can't create %q: read-only filesystem", path)
|
||||
}
|
||||
|
||||
@@ -181,7 +181,7 @@ func (w *webdavFS) Rename(ctx context.Context, oldPath, newPath string) error {
|
||||
return errors.Errorf("can't rename %q to %q: read-only filesystem", oldPath, newPath)
|
||||
}
|
||||
|
||||
func (w *webdavFS) OpenFile(ctx context.Context, path string, flags int, mode os.FileMode) (webdav.File, error) {
|
||||
func (w *webdavFS) OpenFile(ctx context.Context, path string, _ int, _ os.FileMode) (webdav.File, error) {
|
||||
f, err := w.findEntry(ctx, path)
|
||||
if err != nil {
|
||||
log(ctx).Errorf("OpenFile(%q) failed with %v", path, err)
|
||||
|
||||
@@ -110,6 +110,7 @@ func (r *apiServerRepository) ReplaceManifests(ctx context.Context, labels map[s
|
||||
}
|
||||
|
||||
func (r *apiServerRepository) SetFindManifestPageSizeForTesting(v int32) {
|
||||
_ = v
|
||||
}
|
||||
|
||||
func (r *apiServerRepository) FindManifests(ctx context.Context, labels map[string]string) ([]*manifest.EntryMetadata, error) {
|
||||
|
||||
@@ -14,7 +14,7 @@ type Options struct {
|
||||
|
||||
// Azure Storage account name and key
|
||||
StorageAccount string `json:"storageAccount"`
|
||||
StorageKey string `json:"storageKey" kopia:"sensitive"`
|
||||
StorageKey string `json:"storageKey" kopia:"sensitive"`
|
||||
|
||||
// Alternatively provide SAS Token
|
||||
SASToken string `json:"sasToken" kopia:"sensitive"`
|
||||
|
||||
@@ -240,6 +240,8 @@ func (az *azStorage) FlushCaches(ctx context.Context) error {
|
||||
//
|
||||
// - the 'Container', 'StorageAccount' and 'StorageKey' fields are required and all other parameters are optional.
|
||||
func New(ctx context.Context, opt *Options, isCreate bool) (blob.Storage, error) {
|
||||
_ = isCreate
|
||||
|
||||
if opt.Container == "" {
|
||||
return nil, errors.New("container name must be specified")
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@ type Options struct {
|
||||
Prefix string `json:"prefix,omitempty"`
|
||||
|
||||
KeyID string `json:"keyID"`
|
||||
Key string `json:"key" kopia:"sensitive"`
|
||||
Key string `json:"key" kopia:"sensitive"`
|
||||
|
||||
throttling.Limits
|
||||
}
|
||||
|
||||
@@ -260,6 +260,8 @@ func (s *b2Storage) String() string {
|
||||
|
||||
// New creates new B2-backed storage with specified options.
|
||||
func New(ctx context.Context, opt *Options, isCreate bool) (blob.Storage, error) {
|
||||
_ = isCreate
|
||||
|
||||
if opt.BucketName == "" {
|
||||
return nil, errors.New("bucket name must be specified")
|
||||
}
|
||||
|
||||
@@ -77,6 +77,8 @@ func (fs *fsImpl) isRetriable(err error) bool {
|
||||
}
|
||||
|
||||
func (fs *fsImpl) GetBlobFromPath(ctx context.Context, dirPath, path string, offset, length int64, output blob.OutputBuffer) error {
|
||||
_ = dirPath
|
||||
|
||||
err := retry.WithExponentialBackoffNoValue(ctx, "GetBlobFromPath:"+path, func() error {
|
||||
output.Reset()
|
||||
|
||||
@@ -131,6 +133,8 @@ func (fs *fsImpl) GetBlobFromPath(ctx context.Context, dirPath, path string, off
|
||||
}
|
||||
|
||||
func (fs *fsImpl) GetMetadataFromPath(ctx context.Context, dirPath, path string) (blob.Metadata, error) {
|
||||
_ = dirPath
|
||||
|
||||
//nolint:wrapcheck
|
||||
return retry.WithExponentialBackoff(ctx, "GetMetadataFromPath:"+path, func() (blob.Metadata, error) {
|
||||
fi, err := fs.osi.Stat(path)
|
||||
@@ -152,6 +156,8 @@ func (fs *fsImpl) GetMetadataFromPath(ctx context.Context, dirPath, path string)
|
||||
|
||||
//nolint:wrapcheck,gocyclo
|
||||
func (fs *fsImpl) PutBlobInPath(ctx context.Context, dirPath, path string, data blob.Bytes, opts blob.PutOptions) error {
|
||||
_ = dirPath
|
||||
|
||||
switch {
|
||||
case opts.HasRetentionOptions():
|
||||
return errors.Wrap(blob.ErrUnsupportedPutBlobOption, "blob-retention")
|
||||
@@ -197,7 +203,7 @@ func (fs *fsImpl) PutBlobInPath(ctx context.Context, dirPath, path string, data
|
||||
}
|
||||
|
||||
if t := opts.SetModTime; !t.IsZero() {
|
||||
if chtimesErr := fs.osi.Chtimes(path, t, t); err != nil {
|
||||
if chtimesErr := fs.osi.Chtimes(path, t, t); chtimesErr != nil {
|
||||
return errors.Wrapf(chtimesErr, "can't change file %q times", path)
|
||||
}
|
||||
}
|
||||
@@ -235,6 +241,8 @@ func (fs *fsImpl) createTempFileAndDir(tempFile string) (osWriteFile, error) {
|
||||
}
|
||||
|
||||
func (fs *fsImpl) DeleteBlobInPath(ctx context.Context, dirPath, path string) error {
|
||||
_ = dirPath
|
||||
|
||||
//nolint:wrapcheck
|
||||
return retry.WithExponentialBackoffNoValue(ctx, "DeleteBlobInPath:"+path, func() error {
|
||||
err := fs.osi.Remove(path)
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
package filesystem
|
||||
|
||||
//nolint:revive
|
||||
func (realOS) IsESTALE(err error) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ type Options struct {
|
||||
ServiceAccountCredentialsFile string `json:"credentialsFile,omitempty"`
|
||||
|
||||
// ServiceAccountCredentialJSON specifies the raw JSON credentials.
|
||||
ServiceAccountCredentialJSON json.RawMessage `kopia:"sensitive" json:"credentials,omitempty"`
|
||||
ServiceAccountCredentialJSON json.RawMessage `json:"credentials,omitempty" kopia:"sensitive"`
|
||||
|
||||
// ReadOnly causes GCS connection to be opened with read-only scope to prevent accidental mutations.
|
||||
ReadOnly bool `json:"readOnly,omitempty"`
|
||||
|
||||
@@ -242,6 +242,8 @@ func tokenSourceFromCredentialsJSON(ctx context.Context, data json.RawMessage, s
|
||||
// By default the connection reuses credentials managed by (https://cloud.google.com/sdk/),
|
||||
// but this can be disabled by setting IgnoreDefaultCredentials to true.
|
||||
func New(ctx context.Context, opt *Options, isCreate bool) (blob.Storage, error) {
|
||||
_ = isCreate
|
||||
|
||||
var ts oauth2.TokenSource
|
||||
|
||||
var err error
|
||||
|
||||
@@ -15,7 +15,7 @@ type Options struct {
|
||||
ServiceAccountCredentialsFile string `json:"credentialsFile,omitempty"`
|
||||
|
||||
// ServiceAccountCredentialJSON specifies the raw JSON credentials.
|
||||
ServiceAccountCredentialJSON json.RawMessage `kopia:"sensitive" json:"credentials,omitempty"`
|
||||
ServiceAccountCredentialJSON json.RawMessage `json:"credentials,omitempty" kopia:"sensitive"`
|
||||
|
||||
// ReadOnly causes GCS connection to be opened with read-only scope to prevent accidental mutations.
|
||||
ReadOnly bool `json:"readOnly,omitempty"`
|
||||
|
||||
@@ -541,6 +541,8 @@ func CreateDriveService(ctx context.Context, opt *Options) (*drive.Service, erro
|
||||
// By default the connection reuses credentials managed by (https://cloud.google.com/sdk/),
|
||||
// but this can be disabled by setting IgnoreDefaultCredentials to true.
|
||||
func New(ctx context.Context, opt *Options, isCreate bool) (blob.Storage, error) {
|
||||
_ = isCreate
|
||||
|
||||
if opt.FolderID == "" {
|
||||
return nil, errors.New("folder-id must be specified")
|
||||
}
|
||||
|
||||
@@ -32,10 +32,12 @@ func (s readonlyStorage) GetMetadata(ctx context.Context, id blob.ID) (blob.Meta
|
||||
return s.base.GetMetadata(ctx, id)
|
||||
}
|
||||
|
||||
//nolint:revive
|
||||
func (s readonlyStorage) PutBlob(ctx context.Context, id blob.ID, data blob.Bytes, opts blob.PutOptions) error {
|
||||
return ErrReadonly
|
||||
}
|
||||
|
||||
//nolint:revive
|
||||
func (s readonlyStorage) DeleteBlob(ctx context.Context, id blob.ID) error {
|
||||
return ErrReadonly
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ type Options struct {
|
||||
|
||||
AccessKeyID string `json:"accessKeyID"`
|
||||
SecretAccessKey string `json:"secretAccessKey" kopia:"sensitive"`
|
||||
SessionToken string `json:"sessionToken" kopia:"sensitive"`
|
||||
SessionToken string `json:"sessionToken" kopia:"sensitive"`
|
||||
|
||||
// Region is an optional region to pass in authorization header.
|
||||
Region string `json:"region,omitempty"`
|
||||
|
||||
@@ -318,6 +318,8 @@ func getCustomTransport(opt *Options) (*http.Transport, error) {
|
||||
//
|
||||
// - the 'BucketName' field is required and all other parameters are optional.
|
||||
func New(ctx context.Context, opt *Options, isCreate bool) (blob.Storage, error) {
|
||||
_ = isCreate
|
||||
|
||||
st, err := newStorage(ctx, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -16,9 +16,9 @@ type Options struct {
|
||||
Port int `json:"port"`
|
||||
Username string `json:"username"`
|
||||
// if password is specified Keyfile/Keydata is ignored.
|
||||
Password string `json:"password" kopia:"sensitive"`
|
||||
Password string `json:"password" kopia:"sensitive"`
|
||||
Keyfile string `json:"keyfile,omitempty"`
|
||||
KeyData string `json:"keyData,omitempty" kopia:"sensitive"`
|
||||
KeyData string `json:"keyData,omitempty" kopia:"sensitive"`
|
||||
KnownHostsFile string `json:"knownHostsFile,omitempty"`
|
||||
KnownHostsData string `json:"knownHostsData,omitempty"`
|
||||
|
||||
|
||||
@@ -117,6 +117,8 @@ func (s *sftpStorage) GetCapacity(ctx context.Context) (blob.Capacity, error) {
|
||||
}
|
||||
|
||||
func (s *sftpImpl) GetBlobFromPath(ctx context.Context, dirPath, fullPath string, offset, length int64, output blob.OutputBuffer) error {
|
||||
_ = dirPath
|
||||
|
||||
//nolint:wrapcheck
|
||||
return s.rec.UsingConnectionNoResult(ctx, "GetBlobFromPath", func(conn connection.Connection) error {
|
||||
r, err := sftpClientFromConnection(conn).Open(fullPath)
|
||||
@@ -162,6 +164,8 @@ func (s *sftpImpl) GetBlobFromPath(ctx context.Context, dirPath, fullPath string
|
||||
}
|
||||
|
||||
func (s *sftpImpl) GetMetadataFromPath(ctx context.Context, dirPath, fullPath string) (blob.Metadata, error) {
|
||||
_ = dirPath
|
||||
|
||||
return connection.UsingConnection(ctx, s.rec, "GetMetadataFromPath", func(conn connection.Connection) (blob.Metadata, error) {
|
||||
fi, err := sftpClientFromConnection(conn).Stat(fullPath)
|
||||
if isNotExist(err) {
|
||||
@@ -180,6 +184,8 @@ func (s *sftpImpl) GetMetadataFromPath(ctx context.Context, dirPath, fullPath st
|
||||
}
|
||||
|
||||
func (s *sftpImpl) PutBlobInPath(ctx context.Context, dirPath, fullPath string, data blob.Bytes, opts blob.PutOptions) error {
|
||||
_ = dirPath
|
||||
|
||||
switch {
|
||||
case opts.HasRetentionOptions():
|
||||
return errors.Wrap(blob.ErrUnsupportedPutBlobOption, "blob-retention")
|
||||
@@ -219,7 +225,7 @@ func (s *sftpImpl) PutBlobInPath(ctx context.Context, dirPath, fullPath string,
|
||||
}
|
||||
|
||||
if t := opts.SetModTime; !t.IsZero() {
|
||||
if chtimesErr := sftpClientFromConnection(conn).Chtimes(fullPath, t, t); err != nil {
|
||||
if chtimesErr := sftpClientFromConnection(conn).Chtimes(fullPath, t, t); chtimesErr != nil {
|
||||
return errors.Wrap(chtimesErr, "can't change file times")
|
||||
}
|
||||
}
|
||||
@@ -254,6 +260,8 @@ func (osInterface) IsPathSeparator(c byte) bool {
|
||||
}
|
||||
|
||||
func (osi osInterface) Mkdir(name string, perm os.FileMode) error {
|
||||
_ = perm
|
||||
|
||||
//nolint:wrapcheck
|
||||
return osi.cli.Mkdir(name)
|
||||
}
|
||||
@@ -288,6 +296,8 @@ func isNotExist(err error) bool {
|
||||
}
|
||||
|
||||
func (s *sftpImpl) DeleteBlobInPath(ctx context.Context, dirPath, fullPath string) error {
|
||||
_ = dirPath
|
||||
|
||||
//nolint:wrapcheck
|
||||
return s.rec.UsingConnectionNoResult(ctx, "DeleteBlobInPath", func(conn connection.Connection) error {
|
||||
err := sftpClientFromConnection(conn).Remove(fullPath)
|
||||
@@ -329,7 +339,7 @@ func writeKnownHostsDataStringToTempFile(data string) (string, error) {
|
||||
return "", errors.Wrap(err, "error creating temp file")
|
||||
}
|
||||
|
||||
defer tf.Close() //nolint:errcheck,gosec
|
||||
defer tf.Close() //nolint:errcheck
|
||||
|
||||
if _, err := tf.WriteString(data); err != nil {
|
||||
return "", errors.Wrap(err, "error writing temporary file")
|
||||
|
||||
@@ -151,6 +151,7 @@ func startDockerSFTPServerOrSkip(t *testing.T, idRSA string) (host string, port
|
||||
|
||||
t.Logf("SFTP server OK on host:%q port:%v. Known hosts file: %v", host, port, knownHostsFile)
|
||||
|
||||
//nolint:nakedret
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
type Options struct {
|
||||
URL string `json:"url"`
|
||||
Username string `json:"username,omitempty"`
|
||||
Password string `json:"password,omitempty" kopia:"sensitive"`
|
||||
Password string `json:"password,omitempty" kopia:"sensitive"`
|
||||
TrustedServerCertificateFingerprint string `json:"trustedServerCertificateFingerprint,omitempty"`
|
||||
AtomicWrites bool `json:"atomicWrites"`
|
||||
|
||||
|
||||
@@ -49,6 +49,8 @@ func (d *davStorage) GetCapacity(ctx context.Context) (blob.Capacity, error) {
|
||||
}
|
||||
|
||||
func (d *davStorageImpl) GetBlobFromPath(ctx context.Context, dirPath, path string, offset, length int64, output blob.OutputBuffer) error {
|
||||
_ = dirPath
|
||||
|
||||
output.Reset()
|
||||
|
||||
if offset < 0 {
|
||||
@@ -88,6 +90,8 @@ func (d *davStorageImpl) GetBlobFromPath(ctx context.Context, dirPath, path stri
|
||||
}
|
||||
|
||||
func (d *davStorageImpl) GetMetadataFromPath(ctx context.Context, dirPath, path string) (blob.Metadata, error) {
|
||||
_ = dirPath
|
||||
|
||||
fi, err := d.cli.Stat(path)
|
||||
if err != nil {
|
||||
return blob.Metadata{}, d.translateError(err)
|
||||
@@ -212,6 +216,8 @@ func (d *davStorageImpl) PutBlobInPath(ctx context.Context, dirPath, filePath st
|
||||
}
|
||||
|
||||
func (d *davStorageImpl) DeleteBlobInPath(ctx context.Context, dirPath, filePath string) error {
|
||||
_ = dirPath
|
||||
|
||||
err := d.translateError(retry.WithExponentialBackoffNoValue(ctx, "DeleteBlobInPath", func() error {
|
||||
//nolint:wrapcheck
|
||||
return d.cli.Remove(filePath)
|
||||
|
||||
@@ -585,9 +585,9 @@ func (sm *SharedManager) shouldRefreshIndexes() bool {
|
||||
}
|
||||
|
||||
// PrepareUpgradeToIndexBlobManagerV1 prepares the repository for migrating to IndexBlobManagerV1.
|
||||
func (sm *SharedManager) PrepareUpgradeToIndexBlobManagerV1(ctx context.Context, params epoch.Parameters) error {
|
||||
func (sm *SharedManager) PrepareUpgradeToIndexBlobManagerV1(ctx context.Context) error {
|
||||
//nolint:wrapcheck
|
||||
return sm.indexBlobManagerV1.PrepareUpgradeToIndexBlobManagerV1(ctx, params, sm.indexBlobManagerV0)
|
||||
return sm.indexBlobManagerV1.PrepareUpgradeToIndexBlobManagerV1(ctx, sm.indexBlobManagerV0)
|
||||
}
|
||||
|
||||
// NewSharedManager returns SharedManager that is used by SessionWriteManagers on top of a repository.
|
||||
|
||||
@@ -25,7 +25,7 @@ func (sm *SharedManager) Refresh(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
ibm.Invalidate(ctx)
|
||||
ibm.Invalidate()
|
||||
|
||||
timer := timetrack.StartTimer()
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ type Manager interface {
|
||||
WriteIndexBlobs(ctx context.Context, data []gather.Bytes, suffix blob.ID) ([]blob.Metadata, error)
|
||||
ListActiveIndexBlobs(ctx context.Context) ([]Metadata, time.Time, error)
|
||||
Compact(ctx context.Context, opts CompactOptions) error
|
||||
Invalidate(ctx context.Context)
|
||||
Invalidate()
|
||||
}
|
||||
|
||||
// CompactOptions provides options for compaction.
|
||||
|
||||
@@ -144,7 +144,7 @@ func (m *ManagerV0) ListActiveIndexBlobs(ctx context.Context) ([]Metadata, time.
|
||||
}
|
||||
|
||||
// Invalidate invalidates any caches.
|
||||
func (m *ManagerV0) Invalidate(ctx context.Context) {
|
||||
func (m *ManagerV0) Invalidate() {
|
||||
}
|
||||
|
||||
// Compact performs compaction of index blobs by merging smaller ones into larger
|
||||
|
||||
@@ -53,7 +53,7 @@ func (m *ManagerV1) ListActiveIndexBlobs(ctx context.Context) ([]Metadata, time.
|
||||
}
|
||||
|
||||
// Invalidate clears any read caches.
|
||||
func (m *ManagerV1) Invalidate(ctx context.Context) {
|
||||
func (m *ManagerV1) Invalidate() {
|
||||
m.EpochMgr.Invalidate()
|
||||
}
|
||||
|
||||
@@ -147,7 +147,7 @@ func (m *ManagerV1) EpochManager() *epoch.Manager {
|
||||
}
|
||||
|
||||
// PrepareUpgradeToIndexBlobManagerV1 prepares the repository for migrating to IndexBlobManagerV1.
|
||||
func (m *ManagerV1) PrepareUpgradeToIndexBlobManagerV1(ctx context.Context, params epoch.Parameters, v0 *ManagerV0) error {
|
||||
func (m *ManagerV1) PrepareUpgradeToIndexBlobManagerV1(ctx context.Context, v0 *ManagerV0) error {
|
||||
ibl, _, err := v0.ListActiveIndexBlobs(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error listing active index blobs")
|
||||
|
||||
@@ -163,7 +163,7 @@ func (r *ReedSolomonCrcECC) computeSizesFromStored(length int) sizesInfo {
|
||||
// The parity data comes first so we can avoid storing the padding needed for the
|
||||
// data shards, and instead compute the padded size based on the input length.
|
||||
// All parity shards are always stored.
|
||||
func (r *ReedSolomonCrcECC) Encrypt(input gather.Bytes, contentID []byte, output *gather.WriteBuffer) error {
|
||||
func (r *ReedSolomonCrcECC) Encrypt(input gather.Bytes, _ []byte, output *gather.WriteBuffer) error {
|
||||
sizes := r.computeSizesFromOriginal(input.Length())
|
||||
inputPlusLengthSize := lengthSize + input.Length()
|
||||
dataSizeInBlock := sizes.DataShards * sizes.ShardSize
|
||||
@@ -247,7 +247,7 @@ func (r *ReedSolomonCrcECC) Encrypt(input gather.Bytes, contentID []byte, output
|
||||
|
||||
// Decrypt corrects the data from input based on the ECC data.
|
||||
// See Encrypt comments for a description of the layout.
|
||||
func (r *ReedSolomonCrcECC) Decrypt(input gather.Bytes, contentID []byte, output *gather.WriteBuffer) error {
|
||||
func (r *ReedSolomonCrcECC) Decrypt(input gather.Bytes, _ []byte, output *gather.WriteBuffer) error {
|
||||
sizes := r.computeSizesFromStored(input.Length())
|
||||
dataPlusCrcSizeInBlock := sizes.DataShards * (crcSize + sizes.ShardSize)
|
||||
parityPlusCrcSizeInBlock := sizes.ParityShards * (crcSize + sizes.ShardSize)
|
||||
|
||||
@@ -32,14 +32,17 @@ type blobCache interface {
|
||||
|
||||
type nullCache struct{}
|
||||
|
||||
//nolint:revive
|
||||
func (nullCache) Get(ctx context.Context, blobID blob.ID) ([]byte, time.Time, bool) {
|
||||
return nil, time.Time{}, false
|
||||
}
|
||||
|
||||
//nolint:revive
|
||||
func (nullCache) Put(ctx context.Context, blobID blob.ID, data []byte) (time.Time, error) {
|
||||
return clock.Now(), nil
|
||||
}
|
||||
|
||||
//nolint:revive
|
||||
func (nullCache) Remove(ctx context.Context, ids []blob.ID) {
|
||||
}
|
||||
|
||||
|
||||
@@ -255,7 +255,7 @@ func mustGetMutableParameters(t *testing.T, mgr *format.Manager) format.MutableP
|
||||
func mustGetUpgradeLockIntent(t *testing.T, mgr *format.Manager) *format.UpgradeLockIntent {
|
||||
t.Helper()
|
||||
|
||||
uli, err := mgr.GetUpgradeLockIntent(testlogging.Context(t))
|
||||
uli, err := mgr.GetUpgradeLockIntent()
|
||||
require.NoError(t, err)
|
||||
|
||||
return uli
|
||||
|
||||
@@ -10,8 +10,6 @@
|
||||
|
||||
// RepositoryConfig describes the format of objects in a repository.
|
||||
// The contents of this object are stored encrypted since they contain sensitive key material.
|
||||
//
|
||||
//nolint:musttag // false positive
|
||||
type RepositoryConfig struct {
|
||||
ContentFormat
|
||||
ObjectFormat
|
||||
|
||||
@@ -186,7 +186,7 @@ func (m *Manager) RollbackUpgrade(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// GetUpgradeLockIntent gets the current upgrade lock intent.
|
||||
func (m *Manager) GetUpgradeLockIntent(ctx context.Context) (*UpgradeLockIntent, error) {
|
||||
func (m *Manager) GetUpgradeLockIntent() (*UpgradeLockIntent, error) {
|
||||
if err := m.maybeRefreshNotLocked(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -772,6 +772,8 @@ type grpcCreds struct {
|
||||
}
|
||||
|
||||
func (c grpcCreds) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
|
||||
_ = uri
|
||||
|
||||
return map[string]string{
|
||||
"kopia-hostname": c.hostname,
|
||||
"kopia-username": c.username,
|
||||
|
||||
@@ -133,7 +133,7 @@ func LoadConfigFromFile(fileName string) (*LocalConfig, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error loading config file")
|
||||
}
|
||||
defer f.Close() //nolint:errcheck,gosec
|
||||
defer f.Close() //nolint:errcheck
|
||||
|
||||
var lc LocalConfig
|
||||
|
||||
|
||||
@@ -7,6 +7,6 @@
|
||||
//nolint:gochecknoglobals
|
||||
var NullLogger = zap.NewNop().Sugar()
|
||||
|
||||
func getNullLogger(module string) Logger {
|
||||
func getNullLogger(_ string) Logger {
|
||||
return NullLogger
|
||||
}
|
||||
|
||||
@@ -268,6 +268,8 @@ type ManagerOptions struct {
|
||||
|
||||
// NewManager returns new manifest manager for the provided content manager.
|
||||
func NewManager(ctx context.Context, b contentManager, options ManagerOptions, mr *metrics.Registry) (*Manager, error) {
|
||||
_ = mr
|
||||
|
||||
timeNow := options.TimeNow
|
||||
if timeNow == nil {
|
||||
timeNow = clock.Now
|
||||
|
||||
@@ -180,7 +180,7 @@ func appendIndexEntries(indexEntries []IndirectObjectEntry, startingLength int64
|
||||
return indexEntries, totalLength
|
||||
}
|
||||
|
||||
func noop(contentID content.ID) error { return nil }
|
||||
func noop(content.ID) error { return nil }
|
||||
|
||||
// PrefetchBackingContents attempts to brings contents backing the provided object IDs into the cache.
|
||||
// This may succeed only partially due to cache size limits and other.
|
||||
@@ -199,6 +199,8 @@ func PrefetchBackingContents(ctx context.Context, contentMgr contentManager, obj
|
||||
|
||||
// NewObjectManager creates an ObjectManager with the specified content manager and format.
|
||||
func NewObjectManager(ctx context.Context, bm contentManager, f format.ObjectFormat, mr *metrics.Registry) (*Manager, error) {
|
||||
_ = mr
|
||||
|
||||
om := &Manager{
|
||||
contentMgr: bm,
|
||||
Format: f,
|
||||
|
||||
@@ -114,7 +114,7 @@ func (o *FilesystemOutput) Parallelizable() bool {
|
||||
}
|
||||
|
||||
// BeginDirectory implements restore.Output interface.
|
||||
func (o *FilesystemOutput) BeginDirectory(ctx context.Context, relativePath string, e fs.Directory) error {
|
||||
func (o *FilesystemOutput) BeginDirectory(ctx context.Context, relativePath string, _ fs.Directory) error {
|
||||
path := filepath.Join(o.TargetPath, filepath.FromSlash(relativePath))
|
||||
|
||||
if err := o.createDirectory(ctx, path); err != nil {
|
||||
@@ -135,6 +135,8 @@ func (o *FilesystemOutput) FinishDirectory(ctx context.Context, relativePath str
|
||||
}
|
||||
|
||||
// WriteDirEntry implements restore.Output interface.
|
||||
//
|
||||
//nolint:revive
|
||||
func (o *FilesystemOutput) WriteDirEntry(ctx context.Context, relativePath string, de *snapshot.DirEntry, e fs.Directory) error {
|
||||
return nil
|
||||
}
|
||||
@@ -230,6 +232,8 @@ func fileIsSymlink(st os.FileInfo) bool {
|
||||
}
|
||||
|
||||
// SymlinkExists implements restore.Output interface.
|
||||
//
|
||||
//nolint:revive
|
||||
func (o *FilesystemOutput) SymlinkExists(ctx context.Context, relativePath string, e fs.Symlink) bool {
|
||||
st, err := os.Lstat(filepath.Join(o.TargetPath, relativePath))
|
||||
if err != nil {
|
||||
@@ -365,7 +369,7 @@ func write(targetPath string, r fs.Reader, size int64, c streamCopier) error {
|
||||
|
||||
// ensure we always close f. Note that this does not conflict with the
|
||||
// close below, as close is idempotent.
|
||||
defer f.Close() //nolint:errcheck,gosec
|
||||
defer f.Close() //nolint:errcheck
|
||||
|
||||
name := f.Name()
|
||||
|
||||
@@ -416,7 +420,7 @@ func isEmptyDirectory(name string) (bool, error) {
|
||||
return false, errors.Wrap(err, "error opening directory")
|
||||
}
|
||||
|
||||
defer f.Close() //nolint:errcheck,gosec
|
||||
defer f.Close() //nolint:errcheck
|
||||
|
||||
if _, err = f.Readdirnames(1); errors.Is(err, io.EOF) {
|
||||
return true, nil
|
||||
|
||||
@@ -15,6 +15,7 @@ func symlinkChown(path string, uid, gid int) error {
|
||||
return unix.Lchown(path, uid, gid)
|
||||
}
|
||||
|
||||
//nolint:revive
|
||||
func symlinkChmod(path string, mode os.FileMode) error {
|
||||
// linux does not support permissions on symlinks
|
||||
return nil
|
||||
|
||||
@@ -10,10 +10,12 @@
|
||||
"github.com/kopia/kopia/internal/atomicfile"
|
||||
)
|
||||
|
||||
//nolint:revive
|
||||
func symlinkChown(path string, uid, gid int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
//nolint:revive
|
||||
func symlinkChmod(path string, mode os.FileMode) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -93,6 +93,8 @@ type Options struct {
|
||||
}
|
||||
|
||||
// Entry walks a snapshot root with given root entry and restores it to the provided output.
|
||||
//
|
||||
//nolint:revive
|
||||
func Entry(ctx context.Context, rep repo.Repository, output Output, rootEntry fs.Entry, options Options) (Stats, error) {
|
||||
c := copier{
|
||||
output: output,
|
||||
|
||||
@@ -45,11 +45,15 @@ func (o *TarOutput) BeginDirectory(ctx context.Context, relativePath string, d f
|
||||
}
|
||||
|
||||
// FinishDirectory implements restore.Output interface.
|
||||
//
|
||||
//nolint:revive
|
||||
func (o *TarOutput) FinishDirectory(ctx context.Context, relativePath string, e fs.Directory) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteDirEntry implements restore.Output interface.
|
||||
//
|
||||
//nolint:revive
|
||||
func (o *TarOutput) WriteDirEntry(ctx context.Context, relativePath string, de *snapshot.DirEntry, e fs.Directory) error {
|
||||
return nil
|
||||
}
|
||||
@@ -94,6 +98,8 @@ func (o *TarOutput) WriteFile(ctx context.Context, relativePath string, f fs.Fil
|
||||
}
|
||||
|
||||
// FileExists implements restore.Output interface.
|
||||
//
|
||||
//nolint:revive
|
||||
func (o *TarOutput) FileExists(ctx context.Context, relativePath string, f fs.File) bool {
|
||||
return false
|
||||
}
|
||||
@@ -123,6 +129,8 @@ func (o *TarOutput) CreateSymlink(ctx context.Context, relativePath string, l fs
|
||||
}
|
||||
|
||||
// SymlinkExists implements restore.Output interface.
|
||||
//
|
||||
//nolint:revive
|
||||
func (o *TarOutput) SymlinkExists(ctx context.Context, relativePath string, l fs.Symlink) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -24,16 +24,22 @@ func (o *ZipOutput) Parallelizable() bool {
|
||||
}
|
||||
|
||||
// BeginDirectory implements restore.Output interface.
|
||||
//
|
||||
//nolint:revive
|
||||
func (o *ZipOutput) BeginDirectory(ctx context.Context, relativePath string, e fs.Directory) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// FinishDirectory implements restore.Output interface.
|
||||
//
|
||||
//nolint:revive
|
||||
func (o *ZipOutput) FinishDirectory(ctx context.Context, relativePath string, e fs.Directory) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteDirEntry implements restore.Output interface.
|
||||
//
|
||||
//nolint:revive
|
||||
func (o *ZipOutput) WriteDirEntry(ctx context.Context, relativePath string, de *snapshot.DirEntry, e fs.Directory) error {
|
||||
return nil
|
||||
}
|
||||
@@ -77,17 +83,23 @@ func (o *ZipOutput) WriteFile(ctx context.Context, relativePath string, f fs.Fil
|
||||
}
|
||||
|
||||
// FileExists implements restore.Output interface.
|
||||
//
|
||||
//nolint:revive
|
||||
func (o *ZipOutput) FileExists(ctx context.Context, relativePath string, l fs.File) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// CreateSymlink implements restore.Output interface.
|
||||
//
|
||||
//nolint:revive
|
||||
func (o *ZipOutput) CreateSymlink(ctx context.Context, relativePath string, e fs.Symlink) error {
|
||||
log(ctx).Debugf("create symlink not implemented yet")
|
||||
return nil
|
||||
}
|
||||
|
||||
// SymlinkExists implements restore.Output interface.
|
||||
//
|
||||
//nolint:revive
|
||||
func (o *ZipOutput) SymlinkExists(ctx context.Context, relativePath string, l fs.Symlink) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -67,6 +67,8 @@ type dirRewriterRequest struct {
|
||||
}
|
||||
|
||||
func (rw *DirRewriter) processRequest(pool *workshare.Pool[*dirRewriterRequest], req *dirRewriterRequest) {
|
||||
_ = pool
|
||||
|
||||
req.result, req.err = rw.getCachedReplacement(req.ctx, req.parentPath, req.input)
|
||||
}
|
||||
|
||||
@@ -246,6 +248,8 @@ func (rw *DirRewriter) Close(ctx context.Context) {
|
||||
}
|
||||
|
||||
// RewriteKeep is a callback that keeps the unreadable entry.
|
||||
//
|
||||
//nolint:revive
|
||||
func RewriteKeep(ctx context.Context, parentPath string, input *snapshot.DirEntry, err error) (*snapshot.DirEntry, error) {
|
||||
return input, nil
|
||||
}
|
||||
@@ -293,11 +297,15 @@ func RewriteAsStub(rep repo.RepositoryWriter) RewriteFailedEntryCallback {
|
||||
}
|
||||
|
||||
// RewriteFail is a callback that fails the entire rewrite process when a directory is unreadable.
|
||||
//
|
||||
//nolint:revive
|
||||
func RewriteFail(ctx context.Context, parentPath string, entry *snapshot.DirEntry, err error) (*snapshot.DirEntry, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// RewriteRemove is a callback that removes the entire failed entry.
|
||||
//
|
||||
//nolint:revive
|
||||
func RewriteRemove(ctx context.Context, parentPath string, entry *snapshot.DirEntry, err error) (*snapshot.DirEntry, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -64,42 +64,66 @@ type NullUploadProgress struct{
|
||||
func (p *NullUploadProgress) UploadStarted() {}
|
||||
|
||||
// EstimatedDataSize implements UploadProgress.
|
||||
//
|
||||
//nolint:revive
|
||||
func (p *NullUploadProgress) EstimatedDataSize(fileCount int, totalBytes int64) {}
|
||||
|
||||
// UploadFinished implements UploadProgress.
|
||||
func (p *NullUploadProgress) UploadFinished() {}
|
||||
|
||||
// HashedBytes implements UploadProgress.
|
||||
//
|
||||
//nolint:revive
|
||||
func (p *NullUploadProgress) HashedBytes(numBytes int64) {}
|
||||
|
||||
// ExcludedFile implements UploadProgress.
|
||||
//
|
||||
//nolint:revive
|
||||
func (p *NullUploadProgress) ExcludedFile(fname string, numBytes int64) {}
|
||||
|
||||
// ExcludedDir implements UploadProgress.
|
||||
//
|
||||
//nolint:revive
|
||||
func (p *NullUploadProgress) ExcludedDir(dirname string) {}
|
||||
|
||||
// CachedFile implements UploadProgress.
|
||||
//
|
||||
//nolint:revive
|
||||
func (p *NullUploadProgress) CachedFile(fname string, numBytes int64) {}
|
||||
|
||||
// UploadedBytes implements UploadProgress.
|
||||
//
|
||||
//nolint:revive
|
||||
func (p *NullUploadProgress) UploadedBytes(numBytes int64) {}
|
||||
|
||||
// HashingFile implements UploadProgress.
|
||||
//
|
||||
//nolint:revive
|
||||
func (p *NullUploadProgress) HashingFile(fname string) {}
|
||||
|
||||
// FinishedHashingFile implements UploadProgress.
|
||||
//
|
||||
//nolint:revive
|
||||
func (p *NullUploadProgress) FinishedHashingFile(fname string, numBytes int64) {}
|
||||
|
||||
// FinishedFile implements UploadProgress.
|
||||
//
|
||||
//nolint:revive
|
||||
func (p *NullUploadProgress) FinishedFile(fname string, err error) {}
|
||||
|
||||
// StartedDirectory implements UploadProgress.
|
||||
//
|
||||
//nolint:revive
|
||||
func (p *NullUploadProgress) StartedDirectory(dirname string) {}
|
||||
|
||||
// FinishedDirectory implements UploadProgress.
|
||||
//
|
||||
//nolint:revive
|
||||
func (p *NullUploadProgress) FinishedDirectory(dirname string) {}
|
||||
|
||||
// Error implements UploadProgress.
|
||||
//
|
||||
//nolint:revive
|
||||
func (p *NullUploadProgress) Error(path string, err error, isIgnored bool) {}
|
||||
|
||||
var _ UploadProgress = (*NullUploadProgress)(nil)
|
||||
@@ -171,25 +195,35 @@ func (p *CountingUploadProgress) HashedBytes(numBytes int64) {
|
||||
}
|
||||
|
||||
// CachedFile implements UploadProgress.
|
||||
//
|
||||
//nolint:revive
|
||||
func (p *CountingUploadProgress) CachedFile(fname string, numBytes int64) {
|
||||
atomic.AddInt32(&p.counters.TotalCachedFiles, 1)
|
||||
atomic.AddInt64(&p.counters.TotalCachedBytes, numBytes)
|
||||
}
|
||||
|
||||
// FinishedHashingFile implements UploadProgress.
|
||||
//
|
||||
//nolint:revive
|
||||
func (p *CountingUploadProgress) FinishedHashingFile(fname string, numBytes int64) {
|
||||
atomic.AddInt32(&p.counters.TotalHashedFiles, 1)
|
||||
}
|
||||
|
||||
// FinishedFile implements UploadProgress.
|
||||
//
|
||||
//nolint:revive
|
||||
func (p *CountingUploadProgress) FinishedFile(fname string, err error) {}
|
||||
|
||||
// ExcludedDir implements UploadProgress.
|
||||
//
|
||||
//nolint:revive
|
||||
func (p *CountingUploadProgress) ExcludedDir(dirname string) {
|
||||
atomic.AddInt32(&p.counters.TotalExcludedDirs, 1)
|
||||
}
|
||||
|
||||
// ExcludedFile implements UploadProgress.
|
||||
//
|
||||
//nolint:revive
|
||||
func (p *CountingUploadProgress) ExcludedFile(fname string, numBytes int64) {
|
||||
atomic.AddInt32(&p.counters.TotalExcludedFiles, 1)
|
||||
}
|
||||
|
||||
@@ -14,10 +14,11 @@ type scanResults struct {
|
||||
totalFileSize int64
|
||||
}
|
||||
|
||||
func (e *scanResults) Error(ctx context.Context, filename string, err error, isIgnored bool) {}
|
||||
func (e *scanResults) Error(context.Context, string, error, bool) {}
|
||||
|
||||
func (e *scanResults) Processing(ctx context.Context, pathname string) {}
|
||||
func (e *scanResults) Processing(context.Context, string) {}
|
||||
|
||||
//nolint:revive
|
||||
func (e *scanResults) Stats(ctx context.Context, s *snapshot.Stats, includedFiles, excludedFiles SampleBuckets, excludedDirs []string, final bool) {
|
||||
if final {
|
||||
e.numFiles = int(atomic.LoadInt32(&s.TotalFileCount))
|
||||
|
||||
@@ -36,7 +36,7 @@ func getProcessStats(fname string) (processStats, error) {
|
||||
if err != nil {
|
||||
return processStats{}, err
|
||||
}
|
||||
defer f.Close() //nolint:errcheck,gosec
|
||||
defer f.Close() //nolint:errcheck
|
||||
|
||||
s := bufio.NewScanner(f)
|
||||
|
||||
@@ -101,7 +101,7 @@ func parseRepoSize(fname string) (int64, error) {
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer f.Close() //nolint:errcheck,gosec
|
||||
defer f.Close() //nolint:errcheck
|
||||
|
||||
s := bufio.NewScanner(f)
|
||||
s.Scan()
|
||||
|
||||
@@ -133,7 +133,7 @@ func generateAppFlags(app *kingpin.ApplicationModel) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to create common flags file")
|
||||
}
|
||||
defer f.Close() //nolint:errcheck,gosec
|
||||
defer f.Close() //nolint:errcheck
|
||||
|
||||
title := "Flags"
|
||||
fmt.Fprintf(f, `---
|
||||
@@ -158,7 +158,7 @@ func generateCommands(app *kingpin.ApplicationModel, section string, weight int,
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to create common flags file")
|
||||
}
|
||||
defer f.Close() //nolint:errcheck,gosec
|
||||
defer f.Close() //nolint:errcheck
|
||||
|
||||
title := section + " Commands"
|
||||
fmt.Fprintf(f, `---
|
||||
@@ -258,7 +258,7 @@ func generateSubcommandPage(fname string, cmd *kingpin.CmdModel) {
|
||||
if err != nil {
|
||||
log.Fatalf("unable to create page: %v", err)
|
||||
}
|
||||
defer f.Close() //nolint:errcheck,gosec
|
||||
defer f.Close() //nolint:errcheck
|
||||
|
||||
title := cmd.FullCommand
|
||||
fmt.Fprintf(f, `---
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user