Files
kopia/cli/command_benchmark_compression.go
Jarek Kowalski 1a8fcb086c Added endurance test which tests kopia over long time scale (#558)
Globally replaced all use of time with internal 'clock' package
which provides indirection to time.Now()

Added support for faking clock in Kopia via KOPIA_FAKE_CLOCK_ENDPOINT

logfile: squelch annoying log message

testenv: added faketimeserver which serves time over HTTP

testing: added endurance test which tests kopia over long time scale

This creates kopia repository and simulates usage of Kopia over multiple
months (using accelerated fake time) to trigger effects that are only
visible after long time passage (maintenance, compactions, expirations).

The test is not used part of any test suite yet but will run in
post-submit mode only, preferably 24/7.

testing: refactored internal/clock to only support injection when
'testing' build tag is present
2020-08-26 23:03:46 -07:00

116 lines
3.1 KiB
Go

package cli
import (
"bytes"
"hash/fnv"
"io/ioutil"
"sort"
kingpin "gopkg.in/alecthomas/kingpin.v2"
"github.com/kopia/kopia/internal/clock"
"github.com/kopia/kopia/internal/units"
"github.com/kopia/kopia/repo/compression"
)
var (
benchmarkCompressionCommand = benchmarkCommands.Command("compression", "Run compression benchmarks")
benchmarkCompressionBlockSize = benchmarkCompressionCommand.Flag("block-size", "Size of a block to encrypt").Default("1MB").Bytes()
benchmarkCompressionRepeat = benchmarkCompressionCommand.Flag("repeat", "Number of repetitions").Default("100").Int()
benchmarkCompressionDataFile = benchmarkCompressionCommand.Flag("data-file", "Use data from the given file instead of empty").ExistingFile()
benchmarkCompressionBySize = benchmarkCompressionCommand.Flag("by-size", "Sort results by size").Bool()
benchmarkCompressionVerifyStable = benchmarkCompressionCommand.Flag("verify-stable", "Verify that compression is stable").Bool()
)
func runBenchmarkCompressionAction(ctx *kingpin.ParseContext) error {
type benchResult struct {
compression compression.Name
throughput float64
compressedSize int64
}
var results []benchResult
data := make([]byte, *benchmarkCompressionBlockSize)
if *benchmarkCompressionDataFile != "" {
d, err := ioutil.ReadFile(*benchmarkCompressionDataFile)
if err != nil {
return err
}
data = d
}
for name, comp := range compression.ByName {
printStderr("Benchmarking compressor '%v' (%v x %v bytes)\n", name, *benchmarkCompressionRepeat, len(data))
t0 := clock.Now()
var compressedSize int64
var lastHash uint64
cnt := *benchmarkCompressionRepeat
var compressed bytes.Buffer
for i := 0; i < cnt; i++ {
compressed.Reset()
if err := comp.Compress(&compressed, data); err != nil {
printStderr("compression %q failed: %v\n", name, err)
continue
}
compressedSize = int64(compressed.Len())
if *benchmarkCompressionVerifyStable {
h := hashOf(compressed.Bytes())
if i == 0 {
lastHash = h
} else if h != lastHash {
printStderr("compression %q is not stable\n", name)
continue
}
}
}
hashTime := clock.Since(t0)
bytesPerSecond := float64(len(data)) * float64(cnt) / hashTime.Seconds()
results = append(results, benchResult{compression: name, throughput: bytesPerSecond, compressedSize: compressedSize})
}
if *benchmarkCompressionBySize {
sort.Slice(results, func(i, j int) bool {
return results[i].compressedSize < results[j].compressedSize
})
} else {
sort.Slice(results, func(i, j int) bool {
return results[i].throughput > results[j].throughput
})
}
printStdout(" %-30v %-15v %v\n", "Compression", "Compressed Size", "Throughput")
printStdout("-----------------------------------------------------------------\n")
for ndx, r := range results {
printStdout("%3d. %-30v %-15v %v / second\n", ndx, r.compression, r.compressedSize, units.BytesStringBase2(int64(r.throughput)))
}
return nil
}
func init() {
benchmarkCompressionCommand.Action(runBenchmarkCompressionAction)
}
func hashOf(b []byte) uint64 {
h := fnv.New64a()
h.Write(b) //nolint:errcheck
return h.Sum64()
}