Files
kopia/tests/stress_test/stress_test.go
Jarek Kowalski 792cc874dc repo: allow reusing of object writer buffers (#1315)
This reduces memory consumption and speeds up backups.

1. Backing up kopia repository (3.5 GB files:133102 dirs:20074):

before: 25s, 490 MB
after: 21s, 445 MB

2. Large files (14.8 GB, 76 files)

before: 30s, 597 MB
after: 28s, 495 MB

All tests repeated 5 times for clean local filesystem repo.
2021-09-25 14:54:31 -07:00

151 lines
3.4 KiB
Go

package stress_test
import (
"bytes"
"context"
"fmt"
"math/rand"
"os"
"testing"
"time"
"github.com/kopia/kopia/internal/blobtesting"
"github.com/kopia/kopia/internal/clock"
"github.com/kopia/kopia/internal/gather"
"github.com/kopia/kopia/internal/testlogging"
"github.com/kopia/kopia/repo/blob"
"github.com/kopia/kopia/repo/content"
"github.com/kopia/kopia/repo/encryption"
)
const goroutineCount = 16
func TestStressBlockManager(t *testing.T) {
if os.Getenv("KOPIA_STRESS_TEST") == "" {
t.Skip("skipping stress test")
}
data := blobtesting.DataMap{}
keyTimes := map[blob.ID]time.Time{}
memst := blobtesting.NewMapStorage(data, keyTimes, clock.Now)
duration := 3 * time.Second
if os.Getenv("CI") != "" {
duration = 30 * time.Second
}
stressTestWithStorage(t, memst, duration)
}
// nolint:thelper
func stressTestWithStorage(t *testing.T, st blob.Storage, duration time.Duration) {
ctx := testlogging.Context(t)
openMgr := func() (*content.WriteManager, error) {
return content.NewManagerForTesting(ctx, st, &content.FormattingOptions{
Hash: "HMAC-SHA256-128",
Encryption: encryption.DefaultAlgorithm,
MutableParameters: content.MutableParameters{
Version: 1,
MaxPackSize: 20000000,
},
MasterKey: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
}, nil, nil)
}
seed0 := clock.Now().Nanosecond()
t.Logf("running with seed %v", seed0)
deadline := clock.Now().Add(duration)
t.Run("workers", func(t *testing.T) {
for i := 0; i < goroutineCount; i++ {
i := i
t.Run(fmt.Sprintf("worker-%v", i), func(t *testing.T) {
t.Parallel()
stressWorker(ctx, t, deadline, openMgr, int64(seed0+i))
})
}
})
}
// nolint:thelper
func stressWorker(ctx context.Context, t *testing.T, deadline time.Time, openMgr func() (*content.WriteManager, error), seed int64) {
src := rand.NewSource(seed)
rnd := rand.New(src)
bm, err := openMgr()
if err != nil {
t.Fatalf("error opening manager: %v", err)
}
type writtenBlock struct {
contentID content.ID
data []byte
}
var workerBlocks []writtenBlock
for clock.Now().Before(deadline) {
l := rnd.Intn(30000)
data := make([]byte, l)
if _, err := rnd.Read(data); err != nil {
t.Errorf("err: %v", err)
return
}
dataCopy := append([]byte{}, data...)
contentID, err := bm.WriteContent(ctx, gather.FromSlice(data), "", content.NoCompression)
if err != nil {
t.Errorf("err: %v", err)
return
}
switch rnd.Intn(20) {
case 0:
if ferr := bm.Flush(ctx); ferr != nil {
t.Errorf("flush error: %v", ferr)
return
}
case 1:
if ferr := bm.Flush(ctx); ferr != nil {
t.Errorf("flush error: %v", ferr)
return
}
if cerr := bm.Close(ctx); cerr != nil {
t.Errorf("close error: %v", cerr)
return
}
bm, err = openMgr()
if err != nil {
t.Errorf("error opening: %v", err)
return
}
}
workerBlocks = append(workerBlocks, writtenBlock{contentID, dataCopy})
if len(workerBlocks) > 5 {
pos := rnd.Intn(len(workerBlocks))
previous := workerBlocks[pos]
d2, err := bm.GetContent(ctx, previous.contentID)
if err != nil {
t.Errorf("error verifying content %q: %v", previous.contentID, err)
return
}
if !bytes.Equal(previous.data, d2) {
t.Errorf("invalid previous data for %q %x %x", previous.contentID, d2, previous.data)
return
}
workerBlocks = append(workerBlocks[0:pos], workerBlocks[pos+1:]...)
}
}
}