Files
kopia/repo/content/committed_content_index_cache_test.go
Jarek Kowalski 0f7253eb66 feat(general): rewrote content logs to always be JSON-based and reorganized log structure (#4822)
This is a breaking change to users who might be using Kopia as a library.

### Log Format

```json
{"t":"<timestamp-rfc-3389-microseconds>", "span:T1":"V1", "span:T2":"V2", "n":"<source>", "m":"<message>", /*parameters*/}
```

Where each record is associated with one or more spans that describe its scope:

* `"span:client": "<hash-of-username@hostname>"`
* `"span:repo": "<random>"` - random identifier of a repository connection (from `repo.Open`)
* `"span:maintenance": "<random>"` - random identifier of a maintenance session
* `"span:upload": "<hash-of-username@host:/path>"` - uniquely identifies upload session of a given directory
* `"span:checkpoint": "<random>"` - encapsulates each checkpoint operation during Upload
* `"span:server-session": "<random>"` -single client connection to the server
* `"span:flush": "<random>"` - encapsulates each Flush session
* `"span:maintenance": "<random>"` - encapsulates each maintenance operation
* `"span:loadIndex" : "<random>"` - encapsulates index loading operation
* `"span:emr" : "<random>"` - encapsulates epoch manager refresh
* `"span:writePack": "<pack-blob-ID>"` - encapsulates pack blob preparation and writing

(plus additional minor spans for various phases of the maintenance).

Notable points:

- Used internal zero allocation JSON writer for reduced memory usage.
- renamed `--disable-internal-log` to `--disable-repository-log` (controls saving blobs to repository)
- added `--disable-content-log` (controls writing of `content-log` files)
- all storage operations are also logged in a structural way and associated with the corresponding spans.
- all content IDs are logged in a truncated format (since first N bytes that are usually enough to be unique) to improve compressibility of logs (blob IDs are frequently repeated but content IDs usually appear just once).

This format should make it possible to recreate the journey of any single content throughout pack blobs, indexes and compaction events.
2025-09-27 17:11:13 -07:00

145 lines
3.7 KiB
Go

package content
import (
"bytes"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/kopia/kopia/internal/faketime"
"github.com/kopia/kopia/internal/gather"
"github.com/kopia/kopia/internal/testlogging"
"github.com/kopia/kopia/internal/testutil"
"github.com/kopia/kopia/repo/blob"
"github.com/kopia/kopia/repo/content/index"
)
func TestCommittedContentIndexCache_Disk(t *testing.T) {
t.Parallel()
ta := faketime.NewClockTimeWithOffset(0)
testCache(t, &diskCommittedContentIndexCache{
testutil.TempDirectory(t),
ta.NowFunc(),
func() int { return 3 },
nil, DefaultIndexCacheSweepAge,
}, ta)
}
func TestCommittedContentIndexCache_Memory(t *testing.T) {
t.Parallel()
testCache(t, &memoryCommittedContentIndexCache{
contents: map[blob.ID]index.Index{},
v1PerContentOverhead: func() int { return 3 },
}, nil)
}
//nolint:thelper
func testCache(t *testing.T, cache committedContentIndexCache, fakeTime *faketime.ClockTimeWithOffset) {
ctx := testlogging.Context(t)
has, err := cache.hasIndexBlobID(ctx, "ndx1")
require.NoError(t, err)
if has {
t.Fatal("hasIndexBlobID invalid response, expected false")
}
if _, err = cache.openIndex(ctx, "ndx1"); err == nil {
t.Fatal("openIndex unexpectedly succeeded")
}
require.NoError(t, cache.addContentToCache(ctx, "ndx1", mustBuildIndex(t, index.Builder{
mustParseID(t, "c1"): Info{PackBlobID: "p1234", ContentID: mustParseID(t, "c1")},
mustParseID(t, "c2"): Info{PackBlobID: "p1234", ContentID: mustParseID(t, "c2")},
})))
has, err = cache.hasIndexBlobID(ctx, "ndx1")
require.NoError(t, err)
if !has {
t.Fatal("hasIndexBlobID invalid response, expected true")
}
require.NoError(t, cache.addContentToCache(ctx, "ndx2", mustBuildIndex(t, index.Builder{
mustParseID(t, "c3"): Info{PackBlobID: "p2345", ContentID: mustParseID(t, "c3")},
mustParseID(t, "c4"): Info{PackBlobID: "p2345", ContentID: mustParseID(t, "c4")},
})))
require.NoError(t, cache.addContentToCache(ctx, "ndx2", mustBuildIndex(t, index.Builder{
mustParseID(t, "c3"): Info{PackBlobID: "p2345", ContentID: mustParseID(t, "c3")},
mustParseID(t, "c4"): Info{PackBlobID: "p2345", ContentID: mustParseID(t, "c4")},
})))
ndx1, err := cache.openIndex(ctx, "ndx1")
require.NoError(t, err)
ndx2, err := cache.openIndex(ctx, "ndx2")
require.NoError(t, err)
var i Info
ok, err := ndx1.GetInfo(mustParseID(t, "c1"), &i)
require.True(t, ok)
require.NoError(t, err)
if got, want := i.PackBlobID, blob.ID("p1234"); got != want {
t.Fatalf("unexpected pack blob ID: %v, want %v", got, want)
}
require.NoError(t, ndx1.Close())
ok, err = ndx2.GetInfo(mustParseID(t, "c3"), &i)
require.True(t, ok)
require.NoError(t, err)
if got, want := i.PackBlobID, blob.ID("p2345"); got != want {
t.Fatalf("unexpected pack blob ID: %v, want %v", got, want)
}
// expire leaving only ndx2, this will actually not do anything for disk cache
// because it relies on passage of time for safety, we'll re-do it in a sec.
require.NoError(t, cache.expireUnused(ctx, []blob.ID{"ndx2"}))
if fakeTime != nil {
fakeTime.Advance(2 * time.Hour)
}
// expire leaving only ndx2
require.NoError(t, cache.expireUnused(ctx, []blob.ID{"ndx2"}))
has, err = cache.hasIndexBlobID(ctx, "ndx1")
require.NoError(t, err)
if has {
t.Fatal("hasIndexBlobID invalid response, expected false")
}
if _, err = cache.openIndex(ctx, "ndx1"); err == nil {
t.Fatal("openIndex unexpectedly succeeded")
}
}
func mustBuildIndex(t *testing.T, b index.Builder) gather.Bytes {
t.Helper()
var buf bytes.Buffer
if err := b.Build(&buf, index.Version2); err != nil {
t.Fatal(err)
}
return gather.FromSlice(buf.Bytes())
}
func mustParseID(t *testing.T, s string) ID {
t.Helper()
id, err := index.ParseID(s)
require.NoError(t, err)
return id
}