Files
kopia/internal/gather/gather_write_buffer_chunk_test.go
Jarek Kowalski 34da3a2415 feat(general): implemented custom log encoder for ZAP (#2116)
Turns out standard ConsoleEncoder is not optimized at all and we're
emitting up to 8 log entries per file. This change avoids massive
amount of allocations and brings in some latency reduction.

Backing up 100k files in a flat directory:

duration: current:7.8 baseline:7.9 change:-1.0 %
repo_size: current:1019954521.4 baseline:1019976318.9 change:-0.0 %
num_files: current:58.0 baseline:58.0 change:0%
avg_heap_objects: current:6682141.0 baseline:7508631.3 change:-11.0 %
avg_heap_bytes: current:845404672.0 baseline:867325413.9 change:-2.5 %
avg_ram: current:156.5 baseline:159.1 change:-1.7 %
max_ram: current:278.3 baseline:287.2 change:-3.1 %
avg_cpu: current:153.8 baseline:156.4 change:-1.7 %
max_cpu: current:298.4 baseline:297.1 change:+0.4 %

Backing up Linux 5.18.4:

duration: current:3.6 baseline:4.2 change:-14.2 %
repo_size: current:1081624213.7 baseline:1081635886.8 change:-0.0 %
num_files: current:60.0 baseline:60.0 change:0%
avg_heap_objects: current:5180192.3 baseline:5831270.7 change:-11.2 %
avg_heap_bytes: current:783468754.2 baseline:804350042.1 change:-2.6 %
avg_ram: current:239.0 baseline:240.6 change:-0.6 %
max_ram: current:384.8 baseline:368.0 change:+4.6 %
avg_cpu: current:259.8 baseline:230.8 change:+12.6 %
max_cpu: current:321.6 baseline:303.9 change:+5.9 %
2022-07-02 13:55:01 +00:00

114 lines
2.9 KiB
Go

package gather
import (
"bytes"
"context"
"testing"
"github.com/stretchr/testify/require"
"github.com/kopia/kopia/repo/logging"
"github.com/kopia/kopia/repo/splitter"
)
func TestWriteBufferChunk(t *testing.T) {
// reset for testing
all := &chunkAllocator{
chunkSize: 100,
maxFreeListSize: 10,
}
// reset for testing
chunk1 := all.allocChunk()
_ = append(chunk1, []byte("chunk1")...)
if got, want := len(chunk1), 0; got != want {
t.Errorf("invalid chunk len: %v, want %v", got, want)
}
if got, want := cap(chunk1), all.chunkSize; got != want {
t.Errorf("invalid chunk cap: %v, want %v", got, want)
}
if got, want := all.freeListHighWaterMark, 0; got != want {
t.Errorf("unexpected high water mark %v, want %v", got, want)
}
chunk2 := all.allocChunk()
_ = append(chunk2, []byte("chunk2")...)
if got, want := all.freeListHighWaterMark, 0; got != want {
t.Errorf("unexpected high water mark %v, want %v", got, want)
}
all.releaseChunk(chunk2)
if got, want := all.freeListHighWaterMark, 1; got != want {
t.Errorf("unexpected high water mark %v, want %v", got, want)
}
all.releaseChunk(chunk1)
if got, want := all.freeListHighWaterMark, 2; got != want {
t.Errorf("unexpected high water mark %v, want %v", got, want)
}
// allocate chunk3 - make sure we got the same slice as chunk1 (LIFO)
chunk3 := all.allocChunk()
if got, want := chunk3[0:6], []byte("chunk1"); !bytes.Equal(got, want) {
t.Errorf("got wrong chunk data %q, want %q", string(got), string(want))
}
// allocate chunk4 - make sure we got the same slice as chunk1 (LIFO)
chunk4 := all.allocChunk()
if got, want := chunk4[0:6], []byte("chunk2"); !bytes.Equal(got, want) {
t.Errorf("got wrong chunk data %q, want %q", string(got), string(want))
}
}
func TestContigAllocatorChunkSize(t *testing.T) {
// verify that contiguous allocator has chunk size big enough for all splitter results
// + some minimal overhead.
const maxOverhead = 128
for _, s := range splitter.SupportedAlgorithms() {
mss := splitter.GetFactory(s)().MaxSegmentSize()
if got, want := maxContiguousAllocator.chunkSize, mss+maxOverhead; got < want {
t.Errorf("contiguous allocator chunk size too small: %v, want %v ", got, want)
}
}
}
func TestTrackAllocation(t *testing.T) {
old := trackChunkAllocations
trackChunkAllocations = true
defer func() {
trackChunkAllocations = old
}()
var tmp WriteBuffer
defer tmp.Close()
var log bytes.Buffer
ctx := logging.WithLogger(context.Background(), logging.ToWriter(&log))
DumpStats(ctx)
require.Contains(t, log.String(), `"chunksAlive":0`)
require.NotContains(t, log.String(), "leaked chunk")
tmp.Append([]byte{1, 2, 3})
log.Reset()
DumpStats(ctx)
require.Contains(t, log.String(), `"chunksAlive":1`)
require.Contains(t, log.String(), "leaked chunk")
log.Reset()
tmp.Close()
DumpStats(ctx)
require.Contains(t, log.String(), `"chunksAlive":0`)
require.NotContains(t, log.String(), "leaked chunk")
}