feat(cli): Support displaying storage values in base-2 [#2492] (#2502)

* Update display on repository summary

* Apply throughout app

* Situate units_test

* Update Command Line documentation

* Envar cleanup

* Rename to BytesString

* Restore envar string available for test

* Remove extraneous empty check and restore UIPreferences field for frontend

* PR: config bool cleanup and missed `BaseEnv`s

* Fix lint and test
This commit is contained in:
atom
2022-10-24 20:00:36 -06:00
committed by GitHub
parent 7d47658cbb
commit c5efed01f4
40 changed files with 198 additions and 149 deletions

View File

@@ -147,12 +147,12 @@ func (p *cliProgress) output(col *color.Color, msg string) {
inProgressHashing,
hashedFiles,
units.BytesStringBase10(hashedBytes),
units.BytesString(hashedBytes),
cachedFiles,
units.BytesStringBase10(cachedBytes),
units.BytesString(cachedBytes),
units.BytesStringBase10(uploadedBytes),
units.BytesString(uploadedBytes),
)
if fatalErrorCount > 0 {
@@ -177,7 +177,7 @@ func (p *cliProgress) output(col *color.Color, msg string) {
}
if est, ok := p.uploadStartTime.Estimate(float64(hashedBytes+cachedBytes), float64(p.estimatedTotalBytes)); ok {
line += fmt.Sprintf(", estimated %v", units.BytesStringBase10(p.estimatedTotalBytes))
line += fmt.Sprintf(", estimated %v", units.BytesString(p.estimatedTotalBytes))
line += fmt.Sprintf(" (%.1f%%)", est.PercentComplete)
line += fmt.Sprintf(" %v left", est.Remaining)
} else {

View File

@@ -62,7 +62,7 @@ func (c *commandBenchmarkCompression) readInputFile(ctx context.Context) ([]byte
if dataLength > defaultCompressedDataByMethod {
dataLength = defaultCompressedDataByMethod
log(ctx).Infof("NOTICE: The provided input file is too big, using first %v.", units.BytesStringBase2(dataLength))
log(ctx).Infof("NOTICE: The provided input file is too big, using first %v.", units.BytesString(dataLength))
}
data := make([]byte, dataLength)
@@ -94,7 +94,7 @@ func (c *commandBenchmarkCompression) run(ctx context.Context) error {
return errors.Errorf("empty data file")
}
log(ctx).Infof("Compressing input file %q (%v) using all compression methods.", c.dataFile, units.BytesStringBase2(int64(len(data))))
log(ctx).Infof("Compressing input file %q (%v) using all compression methods.", c.dataFile, units.BytesString(int64(len(data))))
repeatCount := c.repeat
@@ -106,7 +106,7 @@ func (c *commandBenchmarkCompression) run(ctx context.Context) error {
}
}
log(ctx).Infof("Repeating %v times per compression method (total %v). Override with --repeat=N.", repeatCount, units.BytesStringBase2(int64(repeatCount*len(data))))
log(ctx).Infof("Repeating %v times per compression method (total %v). Override with --repeat=N.", repeatCount, units.BytesString(int64(repeatCount*len(data))))
for name, comp := range compression.ByName {
if compression.IsDeprecated[name] && !c.deprecated {
@@ -211,10 +211,10 @@ func (c *commandBenchmarkCompression) printResults(results []compressionBechmark
c.out.printStdout("%3d. %-26v %-12v %-12v %-8v %v%v",
ndx,
r.compression,
units.BytesStringBase2(r.compressedSize),
units.BytesStringBase2(int64(r.throughput))+"/s",
units.BytesString(r.compressedSize),
units.BytesString(int64(r.throughput))+"/s",
r.allocations,
units.BytesStringBase2(r.allocBytes),
units.BytesString(r.allocBytes),
maybeDeprecated,
)

View File

@@ -45,7 +45,7 @@ func (c *commandBenchmarkCrypto) run(ctx context.Context) error {
c.out.printStdout("-----------------------------------------------------------------\n")
for ndx, r := range results {
c.out.printStdout("%3d. %-20v %-30v %v / second", ndx, r.hash, r.encryption, units.BytesStringBase2(int64(r.throughput)))
c.out.printStdout("%3d. %-20v %-30v %v / second", ndx, r.hash, r.encryption, units.BytesString(int64(r.throughput)))
if c.optionPrint {
c.out.printStdout(", --block-hash=%s --encryption=%s", r.hash, r.encryption)

View File

@@ -47,10 +47,10 @@ func (c *commandBenchmarkEcc) run(ctx context.Context) error {
for ndx, r := range results {
c.out.printStdout("%3d. %-30v %12v/s %12v/s %6v%% [%v]", ndx, r.ecc,
units.BytesStringBase2(int64(r.throughputEncoding)),
units.BytesStringBase2(int64(r.throughputDecoding)),
units.BytesString(int64(r.throughputEncoding)),
units.BytesString(int64(r.throughputDecoding)),
int(math.Round(r.growth*100)), //nolint:gomnd
units.BytesStringBase2(int64(r.size)),
units.BytesString(int64(r.size)),
)
if c.optionPrint {

View File

@@ -45,7 +45,7 @@ func (c *commandBenchmarkEncryption) run(ctx context.Context) error {
c.out.printStdout("-----------------------------------------------------------------\n")
for ndx, r := range results {
c.out.printStdout("%3d. %-30v %v / second", ndx, r.encryption, units.BytesStringBase2(int64(r.throughput)))
c.out.printStdout("%3d. %-30v %v / second", ndx, r.encryption, units.BytesString(int64(r.throughput)))
if c.optionPrint {
c.out.printStdout(", --encryption=%s", r.encryption)

View File

@@ -42,7 +42,7 @@ func (c *commandBenchmarkHashing) run(ctx context.Context) error {
c.out.printStdout("-----------------------------------------------------------------\n")
for ndx, r := range results {
c.out.printStdout("%3d. %-20v %v / second", ndx, r.hash, units.BytesStringBase2(int64(r.throughput)))
c.out.printStdout("%3d. %-20v %v / second", ndx, r.hash, units.BytesString(int64(r.throughput)))
if c.optionPrint {
c.out.printStdout(", --block-hash=%s", r.hash)

View File

@@ -126,7 +126,7 @@ type benchResult struct {
c.out.printStdout("%-25v %12v count:%v min:%v 10th:%v 25th:%v 50th:%v 75th:%v 90th:%v max:%v\n",
r.splitter,
units.BytesStringBase10(r.bytesPerSecond)+"/s",
units.BytesString(r.bytesPerSecond)+"/s",
r.segmentCount,
r.min, r.p10, r.p25, r.p50, r.p75, r.p90, r.max,
)
@@ -143,7 +143,7 @@ type benchResult struct {
c.out.printStdout("%3v. %-25v %-12v count:%v min:%v 10th:%v 25th:%v 50th:%v 75th:%v 90th:%v max:%v\n",
ndx,
r.splitter,
units.BytesStringBase10(r.bytesPerSecond)+"/s",
units.BytesString(r.bytesPerSecond)+"/s",
r.segmentCount,
r.min, r.p10, r.p25, r.p50, r.p75, r.p90, r.max)

View File

@@ -62,7 +62,7 @@ func(b blob.Metadata) error {
return errors.Wrap(err, "error listing blobs")
}
sizeToString := units.BytesStringBase10
sizeToString := units.BytesString
if c.raw {
sizeToString = func(l int64) string {
//nolint:gomnd

View File

@@ -73,14 +73,14 @@ func (c *commandCacheInfo) run(ctx context.Context, rep repo.Repository) error {
maybeLimit := ""
if l, ok := path2Limit[ent.Name()]; ok {
maybeLimit = fmt.Sprintf(" (limit %v, min sweep age %v)", units.BytesStringBase10(l), path2SweepAgeSeconds[ent.Name()])
maybeLimit = fmt.Sprintf(" (limit %v, min sweep age %v)", units.BytesString(l), path2SweepAgeSeconds[ent.Name()])
}
if ent.Name() == "blob-list" {
maybeLimit = fmt.Sprintf(" (duration %v)", opts.MaxListCacheDuration.DurationOrDefault(0))
}
c.out.printStdout("%v: %v files %v%v\n", subdir, fileCount, units.BytesStringBase10(totalFileSize), maybeLimit)
c.out.printStdout("%v: %v files %v%v\n", subdir, fileCount, units.BytesString(totalFileSize), maybeLimit)
}
c.out.printStderr("To adjust cache sizes use 'kopia cache set'.\n")

View File

@@ -58,14 +58,14 @@ func (c *commandCacheSetParams) run(ctx context.Context, rep repo.RepositoryWrit
if v := c.contentCacheSizeMB; v != -1 {
v *= 1e6 // convert MB to bytes
log(ctx).Infof("changing content cache size to %v", units.BytesStringBase10(v))
log(ctx).Infof("changing content cache size to %v", units.BytesString(v))
opts.MaxCacheSizeBytes = v
changed++
}
if v := c.maxMetadataCacheSizeMB; v != -1 {
v *= 1e6 // convert MB to bytes
log(ctx).Infof("changing metadata cache size to %v", units.BytesStringBase10(v))
log(ctx).Infof("changing metadata cache size to %v", units.BytesString(v))
opts.MaxMetadataCacheSizeBytes = v
changed++
}

View File

@@ -48,7 +48,7 @@ func (c *commandContentStats) run(ctx context.Context, rep repo.DirectRepository
return errors.Wrap(err, "error calculating totals")
}
sizeToString := units.BytesStringBase10
sizeToString := units.BytesString
if c.raw {
sizeToString = func(l int64) string {
return strconv.FormatInt(l, 10) //nolint:gomnd

View File

@@ -58,7 +58,7 @@ func (c *commandIndexEpochList) run(ctx context.Context, rep repo.DirectReposito
formatTimestamp(min),
formatTimestamp(max),
len(uces),
units.BytesStringBase2(blob.TotalLength(uces)),
units.BytesString(blob.TotalLength(uces)),
max.Sub(min).Round(time.Second),
)
}
@@ -68,7 +68,7 @@ func (c *commandIndexEpochList) run(ctx context.Context, rep repo.DirectReposito
e,
formatTimestamp(secs[0].Timestamp),
len(secs),
units.BytesStringBase2(blob.TotalLength(secs)),
units.BytesString(blob.TotalLength(secs)),
)
}
}
@@ -78,7 +78,7 @@ func (c *commandIndexEpochList) run(ctx context.Context, rep repo.DirectReposito
cs.MinEpoch,
cs.MaxEpoch,
len(cs.Blobs),
units.BytesStringBase2(blob.TotalLength(cs.Blobs)),
units.BytesString(blob.TotalLength(cs.Blobs)),
)
}

View File

@@ -42,7 +42,7 @@ func (c *commandLogsList) run(ctx context.Context, rep repo.DirectRepository) er
"%v %v %v %v %v\n", s.id,
formatTimestamp(s.startTime),
s.endTime.Sub(s.startTime),
units.BytesStringBase2(s.totalSize),
units.BytesString(s.totalSize),
len(s.segments),
)
}

View File

@@ -107,6 +107,6 @@ func TestLogsMaintenance(t *testing.T) {
infoLines := e.RunAndExpectSuccess(t, "maintenance", "info")
require.Contains(t, infoLines, " max age of logs: 22h0m0s")
require.Contains(t, infoLines, " max total size: 33 MiB")
require.Contains(t, infoLines, " max total size: 34.6 MB")
require.Contains(t, infoLines, " max count: 44")
}

View File

@@ -64,7 +64,7 @@ func (c *commandMaintenanceInfo) run(ctx context.Context, rep repo.DirectReposit
c.out.printStdout("Log Retention:\n")
c.out.printStdout(" max count: %v\n", cl.MaxCount)
c.out.printStdout(" max age of logs: %v\n", cl.MaxAge)
c.out.printStdout(" max total size: %v\n", units.BytesStringBase2(cl.MaxTotalSize))
c.out.printStdout(" max total size: %v\n", units.BytesString(cl.MaxTotalSize))
c.out.printStdout("Recent Maintenance Runs:\n")

View File

@@ -80,7 +80,7 @@ func (c *commandMaintenanceSet) setLogCleanupParametersFromFlags(ctx context.Con
p.LogRetention = cl
*changed = true
log(ctx).Infof("Setting total retained log size to %v.", units.BytesStringBase2(cl.MaxTotalSize))
log(ctx).Infof("Setting total retained log size to %v.", units.BytesString(cl.MaxTotalSize))
}
}

View File

@@ -230,7 +230,7 @@ func applyOptionalInt64MiB(ctx context.Context, desc string, val **policy.Option
i := policy.OptionalInt64(v)
*changeCount++
log(ctx).Infof(" - setting %q to %v.", desc, units.BytesStringBase2(v))
log(ctx).Infof(" - setting %q to %v.", desc, units.BytesString(v))
*val = &i

View File

@@ -19,7 +19,7 @@ func TestSetUploadPolicy(t *testing.T) {
lines = compressSpaces(lines)
require.Contains(t, lines, " Max parallel snapshots (server/UI): 1 (defined for this target)")
require.Contains(t, lines, " Max parallel file reads: - (defined for this target)")
require.Contains(t, lines, " Parallel upload above size: 2 GiB (defined for this target)")
require.Contains(t, lines, " Parallel upload above size: 2.1 GB (defined for this target)")
// make some directory we'll be setting policy on
td := testutil.TempDirectory(t)
@@ -28,7 +28,7 @@ func TestSetUploadPolicy(t *testing.T) {
lines = compressSpaces(lines)
require.Contains(t, lines, " Max parallel snapshots (server/UI): 1 inherited from (global)")
require.Contains(t, lines, " Max parallel file reads: - inherited from (global)")
require.Contains(t, lines, " Parallel upload above size: 2 GiB inherited from (global)")
require.Contains(t, lines, " Parallel upload above size: 2.1 GB inherited from (global)")
e.RunAndExpectSuccess(t, "policy", "set", "--global", "--max-parallel-snapshots=7", "--max-parallel-file-reads=33", "--parallel-upload-above-size-mib=4096")
@@ -37,7 +37,7 @@ func TestSetUploadPolicy(t *testing.T) {
require.Contains(t, lines, " Max parallel snapshots (server/UI): 7 inherited from (global)")
require.Contains(t, lines, " Max parallel file reads: 33 inherited from (global)")
require.Contains(t, lines, " Parallel upload above size: 4 GiB inherited from (global)")
require.Contains(t, lines, " Parallel upload above size: 4.3 GB inherited from (global)")
e.RunAndExpectSuccess(t, "policy", "set", "--global", "--max-parallel-snapshots=default", "--max-parallel-file-reads=default", "--parallel-upload-above-size-mib=default")
@@ -46,5 +46,5 @@ func TestSetUploadPolicy(t *testing.T) {
require.Contains(t, lines, " Max parallel snapshots (server/UI): 1 inherited from (global)")
require.Contains(t, lines, " Max parallel file reads: - inherited from (global)")
require.Contains(t, lines, " Parallel upload above size: 2 GiB inherited from (global)")
require.Contains(t, lines, " Parallel upload above size: 2.1 GB inherited from (global)")
}

View File

@@ -192,7 +192,7 @@ func appendFilesPolicyValue(items []policyTableRow, p *policy.Policy, def *polic
if maxSize := p.FilesPolicy.MaxFileSize; maxSize > 0 {
items = append(items, policyTableRow{
" Ignore files above:",
units.BytesStringBase2(maxSize),
units.BytesString(maxSize),
definitionPointToString(p.Target(), def.FilesPolicy.MaxFileSize),
})
}
@@ -349,13 +349,13 @@ func appendCompressionPolicyRows(rows []policyTableRow, p *policy.Policy, def *p
case p.CompressionPolicy.MaxSize > 0:
rows = append(rows, policyTableRow{fmt.Sprintf(
" Only compress files between %v and %v.",
units.BytesStringBase10(p.CompressionPolicy.MinSize),
units.BytesStringBase10(p.CompressionPolicy.MaxSize)), "", ""})
units.BytesString(p.CompressionPolicy.MinSize),
units.BytesString(p.CompressionPolicy.MaxSize)), "", ""})
case p.CompressionPolicy.MinSize > 0:
rows = append(rows, policyTableRow{fmt.Sprintf(
" Only compress files bigger than %v.",
units.BytesStringBase10(p.CompressionPolicy.MinSize)), "", ""})
units.BytesString(p.CompressionPolicy.MinSize)), "", ""})
default:
rows = append(rows, policyTableRow{" Compress files of all sizes.", "", ""})
@@ -442,5 +442,5 @@ func valueOrNotSetOptionalInt64Bytes(p *policy.OptionalInt64) string {
return "-"
}
return units.BytesStringBase2(int64(*p))
return units.BytesString(int64(*p))
}

View File

@@ -75,7 +75,7 @@ func (c *commandRepositorySetParameters) setSizeMBParameter(ctx context.Context,
*dst = v << 20 //nolint:gomnd
*anyChange = true
log(ctx).Infof(" - setting %v to %v.\n", desc, units.BytesStringBase2(int64(v)<<20)) //nolint:gomnd
log(ctx).Infof(" - setting %v to %v.\n", desc, units.BytesString(int64(v)<<20)) //nolint:gomnd
}
func (c *commandRepositorySetParameters) setInt64SizeMBParameter(ctx context.Context, v int64, desc string, dst *int64, anyChange *bool) {
@@ -86,7 +86,7 @@ func (c *commandRepositorySetParameters) setInt64SizeMBParameter(ctx context.Con
*dst = v << 20 //nolint:gomnd
*anyChange = true
log(ctx).Infof(" - setting %v to %v.\n", desc, units.BytesStringBase2(v<<20)) //nolint:gomnd
log(ctx).Infof(" - setting %v to %v.\n", desc, units.BytesString(v<<20)) //nolint:gomnd
}
func (c *commandRepositorySetParameters) setIntParameter(ctx context.Context, v int, desc string, dst *int, anyChange *bool) {

View File

@@ -32,7 +32,7 @@ func (s *formatSpecificTestSuite) TestRepositorySetParameters(t *testing.T) {
out := env.RunAndExpectSuccess(t, "repository", "status")
// default values
require.Contains(t, out, "Max pack length: 20 MiB")
require.Contains(t, out, "Max pack length: 21 MB")
require.Contains(t, out, fmt.Sprintf("Format version: %d", s.formatVersion))
// failure cases
@@ -43,11 +43,11 @@ func (s *formatSpecificTestSuite) TestRepositorySetParameters(t *testing.T) {
env.RunAndExpectSuccess(t, "repository", "set-parameters", "--index-version=2", "--max-pack-size-mb=33")
out = env.RunAndExpectSuccess(t, "repository", "status")
require.Contains(t, out, "Max pack length: 33 MiB")
require.Contains(t, out, "Max pack length: 34.6 MB")
env.RunAndExpectSuccess(t, "repository", "set-parameters", "--max-pack-size-mb=44")
out = env.RunAndExpectSuccess(t, "repository", "status")
require.Contains(t, out, "Max pack length: 44 MiB")
require.Contains(t, out, "Max pack length: 46.1 MB")
}
func (s *formatSpecificTestSuite) TestRepositorySetParametersRetention(t *testing.T) {
@@ -122,7 +122,7 @@ func (s *formatSpecificTestSuite) TestRepositorySetParametersUpgrade(t *testing.
out := env.RunAndExpectSuccess(t, "repository", "status")
// default values
require.Contains(t, out, "Max pack length: 20 MiB")
require.Contains(t, out, "Max pack length: 21 MB")
switch s.formatVersion {
case format.FormatVersion1:
@@ -176,7 +176,7 @@ func (s *formatSpecificTestSuite) TestRepositorySetParametersUpgrade(t *testing.
require.Contains(t, out, "Index Format: v2")
require.Contains(t, out, "Format version: 3")
require.Contains(t, out, "Epoch cleanup margin: 23h0m0s")
require.Contains(t, out, "Epoch advance on: 22 blobs or 77 MiB, minimum 3h0m0s")
require.Contains(t, out, "Epoch advance on: 22 blobs or 80.7 MB, minimum 3h0m0s")
require.Contains(t, out, "Epoch checkpoint every: 9 epochs")
env.RunAndExpectSuccess(t, "index", "epoch", "list")

View File

@@ -159,8 +159,8 @@ func (c *commandRepositoryStatus) run(ctx context.Context, rep repo.Repository)
switch cp, err := dr.BlobVolume().GetCapacity(ctx); {
case err == nil:
c.out.printStdout("Storage capacity: %v\n", units.BytesStringBase10(int64(cp.SizeB)))
c.out.printStdout("Storage available: %v\n", units.BytesStringBase10(int64(cp.FreeB)))
c.out.printStdout("Storage capacity: %v\n", units.BytesString(int64(cp.SizeB)))
c.out.printStdout("Storage available: %v\n", units.BytesString(int64(cp.FreeB)))
case errors.Is(err, blob.ErrNotAVolume):
c.out.printStdout("Storage capacity: unbounded\n")
default:
@@ -189,7 +189,7 @@ func (c *commandRepositoryStatus) run(ctx context.Context, rep repo.Repository)
c.outputRequiredFeatures(dr)
c.out.printStdout("Max pack length: %v\n", units.BytesStringBase2(int64(mp.MaxPackSize)))
c.out.printStdout("Max pack length: %v\n", units.BytesString(int64(mp.MaxPackSize)))
c.out.printStdout("Index Format: v%v\n", mp.IndexVersion)
emgr, epochMgrEnabled, emerr := dr.ContentReader().EpochManager()
@@ -208,7 +208,7 @@ func (c *commandRepositoryStatus) run(ctx context.Context, rep repo.Repository)
c.out.printStdout("\n")
c.out.printStdout("Epoch refresh frequency: %v\n", mp.EpochParameters.EpochRefreshFrequency)
c.out.printStdout("Epoch advance on: %v blobs or %v, minimum %v\n", mp.EpochParameters.EpochAdvanceOnCountThreshold, units.BytesStringBase2(mp.EpochParameters.EpochAdvanceOnTotalSizeBytesThreshold), mp.EpochParameters.MinEpochDuration)
c.out.printStdout("Epoch advance on: %v blobs or %v, minimum %v\n", mp.EpochParameters.EpochAdvanceOnCountThreshold, units.BytesString(mp.EpochParameters.EpochAdvanceOnTotalSizeBytesThreshold), mp.EpochParameters.MinEpochDuration)
c.out.printStdout("Epoch cleanup margin: %v\n", mp.EpochParameters.CleanupSafetyMargin)
c.out.printStdout("Epoch checkpoint every: %v epochs\n", mp.EpochParameters.FullCheckpointFrequency)
} else {

View File

@@ -139,7 +139,7 @@ func (c *commandRepositorySyncTo) runSyncWithStorage(ctx context.Context, src bl
}
srcBlobs++
c.outputSyncProgress(fmt.Sprintf(" Found %v BLOBs (%v) in the source repository, %v (%v) to copy", srcBlobs, units.BytesStringBase10(totalSrcSize), len(blobsToCopy), units.BytesStringBase10(totalCopyBytes)))
c.outputSyncProgress(fmt.Sprintf(" Found %v BLOBs (%v) in the source repository, %v (%v) to copy", srcBlobs, units.BytesString(totalSrcSize), len(blobsToCopy), units.BytesString(totalCopyBytes)))
return nil
}); err != nil {
@@ -158,8 +158,8 @@ func (c *commandRepositorySyncTo) runSyncWithStorage(ctx context.Context, src bl
log(ctx).Infof(
" Found %v BLOBs to delete (%v), %v in sync (%v)",
len(blobsToDelete), units.BytesStringBase10(totalDeleteBytes),
inSyncBlobs, units.BytesStringBase10(inSyncBytes),
len(blobsToDelete), units.BytesString(totalDeleteBytes),
inSyncBlobs, units.BytesString(inSyncBytes),
)
if c.repositorySyncDryRun {
@@ -186,7 +186,7 @@ func (c *commandRepositorySyncTo) listDestinationBlobs(ctx context.Context, dst
if err := dst.ListBlobs(ctx, "", func(bm blob.Metadata) error {
dstMetadata[bm.BlobID] = bm
dstTotalBytes += bm.Length
c.outputSyncProgress(fmt.Sprintf(" Found %v BLOBs in the destination repository (%v)", len(dstMetadata), units.BytesStringBase10(dstTotalBytes)))
c.outputSyncProgress(fmt.Sprintf(" Found %v BLOBs in the destination repository (%v)", len(dstMetadata), units.BytesString(dstTotalBytes)))
return nil
}); err != nil {
return nil, errors.Wrap(err, "error listing BLOBs in destination repository")
@@ -255,7 +255,7 @@ func (c *commandRepositorySyncTo) runSyncBlobs(ctx context.Context, src blob.Rea
c.outputSyncProgress(
fmt.Sprintf(" Copied %v blobs (%v), Speed: %v, ETA: %v",
numBlobs, units.BytesStringBase10(bytesCopied), speed, eta))
numBlobs, units.BytesString(bytesCopied), speed, eta))
progressMutex.Unlock()
}

View File

@@ -329,7 +329,7 @@ func printRestoreStats(ctx context.Context, st restore.Stats) {
var maybeSkipped, maybeErrors string
if st.SkippedCount > 0 {
maybeSkipped = fmt.Sprintf(", skipped %v (%v)", st.SkippedCount, units.BytesStringBase10(st.SkippedTotalFileSize))
maybeSkipped = fmt.Sprintf(", skipped %v (%v)", st.SkippedCount, units.BytesString(st.SkippedTotalFileSize))
}
if st.IgnoredErrorCount > 0 {
@@ -340,7 +340,7 @@ func printRestoreStats(ctx context.Context, st restore.Stats) {
st.RestoredFileCount,
st.RestoredDirCount,
st.RestoredSymlinkCount,
units.BytesStringBase10(st.RestoredTotalFileSize),
units.BytesString(st.RestoredTotalFileSize),
maybeSkipped, maybeErrors)
}
@@ -421,7 +421,7 @@ func (c *commandRestore) run(ctx context.Context, rep repo.Repository) error {
}
if stats.SkippedCount > 0 {
maybeSkipped = fmt.Sprintf(", skipped %v (%v)", stats.SkippedCount, units.BytesStringBase10(stats.SkippedTotalFileSize))
maybeSkipped = fmt.Sprintf(", skipped %v (%v)", stats.SkippedCount, units.BytesString(stats.SkippedTotalFileSize))
}
if stats.IgnoredErrorCount > 0 {
@@ -429,8 +429,8 @@ func (c *commandRestore) run(ctx context.Context, rep repo.Repository) error {
}
log(ctx).Infof("Processed %v (%v) of %v (%v)%v%v%v.",
restoredCount, units.BytesStringBase10(stats.RestoredTotalFileSize),
enqueuedCount, units.BytesStringBase10(stats.EnqueuedTotalFileSize),
restoredCount, units.BytesString(stats.RestoredTotalFileSize),
enqueuedCount, units.BytesString(stats.EnqueuedTotalFileSize),
maybeSkipped,
maybeErrors,
maybeRemaining)

View File

@@ -101,12 +101,12 @@ func (c *commandSnapshotEstimate) run(ctx context.Context, rep repo.Repository)
return errors.Wrap(err, "error estimating")
}
c.out.printStdout("Snapshot includes %v file(s), total size %v\n", ep.stats.TotalFileCount, units.BytesStringBase10(ep.stats.TotalFileSize))
c.out.printStdout("Snapshot includes %v file(s), total size %v\n", ep.stats.TotalFileCount, units.BytesString(ep.stats.TotalFileSize))
c.showBuckets(ep.included, c.snapshotEstimateShowFiles)
c.out.printStdout("\n")
if ep.stats.ExcludedFileCount > 0 {
c.out.printStdout("Snapshot excludes %v file(s), total size %v\n", ep.stats.ExcludedFileCount, units.BytesStringBase10(ep.stats.ExcludedTotalFileSize))
c.out.printStdout("Snapshot excludes %v file(s), total size %v\n", ep.stats.ExcludedFileCount, units.BytesString(ep.stats.ExcludedTotalFileSize))
c.showBuckets(ep.excluded, true)
} else {
c.out.printStdout("Snapshot excludes no files.\n")
@@ -145,16 +145,16 @@ func (c *commandSnapshotEstimate) showBuckets(buckets snapshotfs.SampleBuckets,
if i == 0 {
sizeRange = fmt.Sprintf("< %-6v",
units.BytesStringBase10(bucket.MinSize))
units.BytesString(bucket.MinSize))
} else {
sizeRange = fmt.Sprintf("%-6v...%6v",
units.BytesStringBase10(bucket.MinSize),
units.BytesStringBase10(buckets[i-1].MinSize))
units.BytesString(bucket.MinSize),
units.BytesString(buckets[i-1].MinSize))
}
c.out.printStdout("%18v: %7v files, total size %v\n",
sizeRange,
bucket.Count, units.BytesStringBase10(bucket.TotalSize))
bucket.Count, units.BytesString(bucket.TotalSize))
if showFiles {
for _, sample := range bucket.Examples {

View File

@@ -149,11 +149,11 @@ func snapshotSizeDelta(m1, m2 *snapshot.Manifest) string {
deltaBytes := m2.RootEntry.DirSummary.TotalFileSize - m1.RootEntry.DirSummary.TotalFileSize
if deltaBytes < 0 {
return "-" + units.BytesStringBase10(-deltaBytes)
return "-" + units.BytesString(-deltaBytes)
}
if deltaBytes > 0 {
return "+" + units.BytesStringBase10(deltaBytes)
return "+" + units.BytesString(deltaBytes)
}
return ""

View File

@@ -440,7 +440,7 @@ func (c *commandSnapshotList) entryBits(ctx context.Context, m *snapshot.Manifes
if u := m.StorageStats; u != nil {
bits = append(bits,
fmt.Sprintf("new-data:%v", units.BytesStringBase10(atomic.LoadInt64(&u.NewData.PackedContentBytes))),
fmt.Sprintf("new-data:%v", units.BytesString(atomic.LoadInt64(&u.NewData.PackedContentBytes))),
fmt.Sprintf("new-files:%v", atomic.LoadInt32(&u.NewData.FileObjectCount)),
fmt.Sprintf("new-dirs:%v", atomic.LoadInt32(&u.NewData.DirObjectCount)),
fmt.Sprintf("compression:%v", formatCompressionPercentage(atomic.LoadInt64(&u.NewData.OriginalContentBytes), atomic.LoadInt64(&u.NewData.PackedContentBytes))),
@@ -452,7 +452,7 @@ func (c *commandSnapshotList) entryBits(ctx context.Context, m *snapshot.Manifes
func deltaBytes(b int64) string {
if b > 0 {
return "(+" + units.BytesStringBase10(b) + ")"
return "(+" + units.BytesString(b) + ")"
}
return ""

View File

@@ -54,7 +54,7 @@ func showContentWithFlags(w io.Writer, rd io.Reader, unzip, indentJSON bool) err
func maybeHumanReadableBytes(enable bool, value int64) string {
if enable {
return units.BytesStringBase10(value)
return units.BytesString(value)
}
return fmt.Sprintf("%v", value)

View File

@@ -66,17 +66,17 @@ func logBucketSamples(ctx context.Context, buckets snapshotfs.SampleBuckets, pre
if i == 0 {
sizeRange = fmt.Sprintf("< %-6v",
units.BytesStringBase10(bucket.MinSize))
units.BytesString(bucket.MinSize))
} else {
sizeRange = fmt.Sprintf("%-6v...%6v",
units.BytesStringBase10(bucket.MinSize),
units.BytesStringBase10(buckets[i-1].MinSize))
units.BytesString(bucket.MinSize),
units.BytesString(buckets[i-1].MinSize))
}
log(ctx).Infof("%v files %v: %7v files, total size %v\n",
prefix,
sizeRange,
bucket.Count, units.BytesStringBase10(bucket.TotalSize))
bucket.Count, units.BytesString(bucket.TotalSize))
hasAny = true

View File

@@ -288,6 +288,7 @@ type CLIInfo struct {
// UIPreferences represents JSON object storing UI preferences.
type UIPreferences struct {
Theme string `json:"theme"` // 'dark', 'light' or ''
PageSize int `json:"pageSize"` // A page size; the actual possible values will only be provided by the frontend
BytesStringBase2 bool `json:"bytesStringBase2"` // If `true`, display storage values in base-2 (default is base-10)
Theme string `json:"theme"` // 'dark', 'light' or ''
PageSize int `json:"pageSize"` // A page size; the actual possible values will only be provided by the frontend
}

View File

@@ -1,8 +1,10 @@
// Package units contains helpers to convert sizes to humand-readable strings.
// Package units contains helpers to convert sizes to human-readable strings.
package units
import (
"fmt"
"os"
"strconv"
"strings"
)
@@ -12,6 +14,10 @@
base2UnitPrefixes = []string{"", "Ki", "Mi", "Gi", "Ti"}
)
const (
bytesStringBase2Envar = "KOPIA_BYTES_STRING_BASE_2"
)
func niceNumber(f float64) string {
return strings.TrimRight(strings.TrimRight(fmt.Sprintf("%.1f", f), "0"), ".")
}
@@ -40,6 +46,15 @@ func BytesStringBase2(b int64) string {
return toDecimalUnitString(float64(b), 1024.0, base2UnitPrefixes, "B")
}
// BytesString formats the given value as bytes with the unit provided from the environment.
func BytesString(b int64) string {
if v, _ := strconv.ParseBool(os.Getenv(bytesStringBase2Envar)); v {
return BytesStringBase2(b)
}
return BytesStringBase10(b)
}
// BytesPerSecondsString formats the given value bytes per second with the appropriate base-10 suffix (KB/s, MB/s, GB/s, ...)
func BytesPerSecondsString(bps float64) string {
//nolint:gomnd

View File

@@ -1,34 +1,62 @@
package units
import "testing"
import (
"os"
"testing"
)
var base10Cases = []struct {
value int64
expected string
}{
{0, "0 B"},
{1, "1 B"},
{2, "2 B"},
{899, "899 B"},
{900, "0.9 KB"},
{999, "1 KB"},
{1000, "1 KB"},
{1200, "1.2 KB"},
{899999, "900 KB"},
{900000, "0.9 MB"},
{999000, "1 MB"},
{999999, "1 MB"},
{1000000, "1 MB"},
{99000000, "99 MB"},
{990000000, "1 GB"},
{9990000000, "10 GB"},
{99900000000, "99.9 GB"},
{1000000000000, "1 TB"},
{99000000000000, "99 TB"},
}
var base2Cases = []struct {
value int64
expected string
}{
{0, "0 B"},
{1, "1 B"},
{2, "2 B"},
{899, "899 B"},
{900, "900 B"},
{999, "1 KiB"},
{1024, "1 KiB"},
{1400, "1.4 KiB"},
{900<<10 - 1, "900 KiB"},
{900 << 10, "900 KiB"},
{999000, "1 MiB"},
{999999, "1 MiB"},
{1000000, "1 MiB"},
{99 << 20, "99 MiB"},
{1 << 30, "1 GiB"},
{10 << 30, "10 GiB"},
{99900000000, "93 GiB"},
{1000000000000, "0.9 TiB"},
{99000000000000, "90 TiB"},
}
func TestBytesStringBase10(t *testing.T) {
cases := []struct {
value int64
expected string
}{
{0, "0 B"},
{1, "1 B"},
{2, "2 B"},
{899, "899 B"},
{900, "0.9 KB"},
{999, "1 KB"},
{1000, "1 KB"},
{1200, "1.2 KB"},
{899999, "900 KB"},
{900000, "0.9 MB"},
{999000, "1 MB"},
{999999, "1 MB"},
{1000000, "1 MB"},
{99000000, "99 MB"},
{990000000, "1 GB"},
{9990000000, "10 GB"},
{99900000000, "99.9 GB"},
{1000000000000, "1 TB"},
{99000000000000, "99 TB"},
}
for i, c := range cases {
for i, c := range base10Cases {
actual := BytesStringBase10(c.value)
if actual != c.expected {
t.Errorf("case #%v failed for %v, expected: '%v', got '%v'", i, c.value, c.expected, actual)
@@ -37,35 +65,32 @@ func TestBytesStringBase10(t *testing.T) {
}
func TestBytesStringBase2(t *testing.T) {
cases := []struct {
value int64
expected string
}{
{0, "0 B"},
{1, "1 B"},
{2, "2 B"},
{899, "899 B"},
{900, "900 B"},
{999, "1 KiB"},
{1024, "1 KiB"},
{1400, "1.4 KiB"},
{900<<10 - 1, "900 KiB"},
{900 << 10, "900 KiB"},
{999000, "1 MiB"},
{999999, "1 MiB"},
{1000000, "1 MiB"},
{99 << 20, "99 MiB"},
{1 << 30, "1 GiB"},
{10 << 30, "10 GiB"},
{99900000000, "93 GiB"},
{1000000000000, "0.9 TiB"},
{99000000000000, "90 TiB"},
}
for i, c := range cases {
for i, c := range base2Cases {
actual := BytesStringBase2(c.value)
if actual != c.expected {
t.Errorf("case #%v failed for %v, expected: '%v', got '%v'", i, c.value, c.expected, actual)
}
}
}
func TestBytesString(t *testing.T) {
defer os.Unsetenv(bytesStringBase2Envar)
t.Setenv(bytesStringBase2Envar, "false")
for i, c := range base10Cases {
actual := BytesString(c.value)
if actual != c.expected {
t.Errorf("case #%v failed for %v, expected: '%v', got '%v'", i, c.value, c.expected, actual)
}
}
t.Setenv(bytesStringBase2Envar, "true")
for i, c := range base2Cases {
actual := BytesString(c.value)
if actual != c.expected {
t.Errorf("case #%v failed for %v, expected: '%v', got '%v'", i, c.value, c.expected, actual)
}
}
}

View File

@@ -65,11 +65,11 @@ type MutableParameters struct {
// Validate validates the parameters.
func (v *MutableParameters) Validate() error {
if v.MaxPackSize < minValidPackSize {
return errors.Errorf("max pack size too small, must be >= %v", units.BytesStringBase2(minValidPackSize))
return errors.Errorf("max pack size too small, must be >= %v", units.BytesString(minValidPackSize))
}
if v.MaxPackSize > maxValidPackSize {
return errors.Errorf("max pack size too big, must be <= %v", units.BytesStringBase2(maxValidPackSize))
return errors.Errorf("max pack size too big, must be <= %v", units.BytesString(maxValidPackSize))
}
if v.IndexVersion < 0 || v.IndexVersion > index.Version2 {

View File

@@ -48,7 +48,7 @@ func DeleteUnreferencedBlobs(ctx context.Context, rep repo.DirectRepositoryWrite
}
cnt, del := deleted.Add(bm.Length)
if cnt%100 == 0 {
log(ctx).Infof(" deleted %v unreferenced blobs (%v)", cnt, units.BytesStringBase10(del))
log(ctx).Infof(" deleted %v unreferenced blobs (%v)", cnt, units.BytesString(del))
}
}
@@ -119,7 +119,7 @@ func DeleteUnreferencedBlobs(ctx context.Context, rep repo.DirectRepositoryWrite
close(unused)
unreferencedCount, unreferencedSize := unreferenced.Approximate()
log(ctx).Debugf("Found %v blobs to delete (%v)", unreferencedCount, units.BytesStringBase10(unreferencedSize))
log(ctx).Debugf("Found %v blobs to delete (%v)", unreferencedCount, units.BytesString(unreferencedSize))
// wait for all delete workers to finish.
if err := eg.Wait(); err != nil {
@@ -132,7 +132,7 @@ func DeleteUnreferencedBlobs(ctx context.Context, rep repo.DirectRepositoryWrite
del, cnt := deleted.Approximate()
log(ctx).Infof("Deleted total %v unreferenced blobs (%v)", del, units.BytesStringBase10(cnt))
log(ctx).Infof("Deleted total %v unreferenced blobs (%v)", del, units.BytesString(cnt))
return int(del), nil
}

View File

@@ -82,7 +82,7 @@ func CleanupLogs(ctx context.Context, rep repo.DirectRepositoryWriter, opt LogRe
toDelete := allLogBlobs[deletePosition:]
log(ctx).Debugf("Keeping %v logs of total size %v", deletePosition, units.BytesStringBase2(totalSize))
log(ctx).Debugf("Keeping %v logs of total size %v", deletePosition, units.BytesString(totalSize))
if !opt.DryRun {
for _, bm := range toDelete {

View File

@@ -115,7 +115,7 @@ func RewriteContents(ctx context.Context, rep repo.DirectRepositoryWriter, opt *
wg.Wait()
log(ctx).Debugf("Total bytes rewritten %v", units.BytesStringBase10(totalBytes))
log(ctx).Debugf("Total bytes rewritten %v", units.BytesString(totalBytes))
if failedCount == 0 {
//nolint:wrapcheck

View File

@@ -8,6 +8,14 @@ Kopia provides a command-line interface (CLI) for accessing all its functions. A
Kopia functionality is organized into [Common Commands](common/) for typical use or [Advanced Commands](advanced/) for low-level data manipulation or recovery. Click on the above links for more details.
### Environment Variables
The following environment variables can be used to configure how Kopia runs:
| Variable Name | Default | Description |
| --------------------------- | ------- | -------------------------------------------------------------------------------------------------------- |
| `KOPIA_BYTES_STRING_BASE_2` | `false` | If set to `true`, Kopia will output storage values in binary (base-2). The default is decimal (base-10). |
### Connecting to Repository
Most commands require a [Repository](../../advanced/architecture/) to be connected first. The first time you use Kopia, repository must be created, later on it can be connected to from one or more machines.

View File

@@ -29,7 +29,7 @@ func (b *SampleBucket) add(fname string, size int64, maxExamplesPerBucket int) {
b.TotalSize += size
if len(b.Examples) < maxExamplesPerBucket {
b.Examples = append(b.Examples, fmt.Sprintf("%v - %v", fname, units.BytesStringBase10(size)))
b.Examples = append(b.Examples, fmt.Sprintf("%v - %v", fname, units.BytesString(size)))
}
}
@@ -94,7 +94,7 @@ func Estimate(ctx context.Context, entry fs.Directory, policyTree *policy.Tree,
estimateLog(ctx).Debugf("excluded dir %v", relativePath)
} else {
estimateLog(ctx).Debugf("excluded file %v (%v)", relativePath, units.BytesStringBase10(e.Size()))
estimateLog(ctx).Debugf("excluded file %v (%v)", relativePath, units.BytesString(e.Size()))
atomic.AddInt32(&stats.ExcludedFileCount, 1)
atomic.AddInt64(&stats.ExcludedTotalFileSize, e.Size())
eb.add(relativePath, e.Size(), maxExamplesPerBucket)

View File

@@ -83,10 +83,10 @@ func Run(ctx context.Context, rep repo.DirectRepositoryWriter, gcDelete bool, sa
l := log(ctx)
l.Infof("GC found %v unused contents (%v)", st.UnusedCount, units.BytesStringBase2(st.UnusedBytes))
l.Infof("GC found %v unused contents that are too recent to delete (%v)", st.TooRecentCount, units.BytesStringBase2(st.TooRecentBytes))
l.Infof("GC found %v in-use contents (%v)", st.InUseCount, units.BytesStringBase2(st.InUseBytes))
l.Infof("GC found %v in-use system-contents (%v)", st.SystemCount, units.BytesStringBase2(st.SystemBytes))
l.Infof("GC found %v unused contents (%v)", st.UnusedCount, units.BytesString(st.UnusedBytes))
l.Infof("GC found %v unused contents that are too recent to delete (%v)", st.TooRecentCount, units.BytesString(st.TooRecentBytes))
l.Infof("GC found %v in-use contents (%v)", st.InUseCount, units.BytesString(st.InUseBytes))
l.Infof("GC found %v in-use system-contents (%v)", st.SystemCount, units.BytesString(st.SystemBytes))
if st.UnusedCount > 0 && !gcDelete {
return errors.Errorf("Not deleting because 'gcDelete' was not set")
@@ -153,7 +153,7 @@ func runInternal(ctx context.Context, rep repo.DirectRepositoryWriter, gcDelete
}
if cnt%100000 == 0 {
log(ctx).Infof("... found %v unused contents so far (%v bytes)", cnt, units.BytesStringBase2(totalSize))
log(ctx).Infof("... found %v unused contents so far (%v bytes)", cnt, units.BytesString(totalSize))
if gcDelete {
if err := rep.Flush(ctx); err != nil {
return errors.Wrap(err, "flush error")

View File

@@ -100,7 +100,7 @@ func (kc *KopiaClient) SnapshotCreate(ctx context.Context, key string, val []byt
return errors.Wrap(err, "cannot get manifest")
}
log.Printf("snapshotting %v", units.BytesStringBase10(atomic.LoadInt64(&man.Stats.TotalFileSize)))
log.Printf("snapshotting %v", units.BytesString(atomic.LoadInt64(&man.Stats.TotalFileSize)))
if _, err := snapshot.SaveSnapshot(ctx, rw, man); err != nil {
return errors.Wrap(err, "cannot save snapshot")
@@ -143,7 +143,7 @@ func (kc *KopiaClient) SnapshotRestore(ctx context.Context, key string) ([]byte,
return nil, err
}
log.Printf("restored %v", units.BytesStringBase10(int64(len(val))))
log.Printf("restored %v", units.BytesString(int64(len(val))))
if err := r.Close(ctx); err != nil {
return nil, err