mirror of
https://github.com/kopia/kopia.git
synced 2025-12-23 22:57:50 -05:00
feat(general): rewrote content logs to always be JSON-based and reorganized log structure (#4822)
This is a breaking change to users who might be using Kopia as a library.
### Log Format
```json
{"t":"<timestamp-rfc-3389-microseconds>", "span:T1":"V1", "span:T2":"V2", "n":"<source>", "m":"<message>", /*parameters*/}
```
Where each record is associated with one or more spans that describe its scope:
* `"span:client": "<hash-of-username@hostname>"`
* `"span:repo": "<random>"` - random identifier of a repository connection (from `repo.Open`)
* `"span:maintenance": "<random>"` - random identifier of a maintenance session
* `"span:upload": "<hash-of-username@host:/path>"` - uniquely identifies upload session of a given directory
* `"span:checkpoint": "<random>"` - encapsulates each checkpoint operation during Upload
* `"span:server-session": "<random>"` -single client connection to the server
* `"span:flush": "<random>"` - encapsulates each Flush session
* `"span:maintenance": "<random>"` - encapsulates each maintenance operation
* `"span:loadIndex" : "<random>"` - encapsulates index loading operation
* `"span:emr" : "<random>"` - encapsulates epoch manager refresh
* `"span:writePack": "<pack-blob-ID>"` - encapsulates pack blob preparation and writing
(plus additional minor spans for various phases of the maintenance).
Notable points:
- Used internal zero allocation JSON writer for reduced memory usage.
- renamed `--disable-internal-log` to `--disable-repository-log` (controls saving blobs to repository)
- added `--disable-content-log` (controls writing of `content-log` files)
- all storage operations are also logged in a structural way and associated with the corresponding spans.
- all content IDs are logged in a truncated format (since first N bytes that are usually enough to be unique) to improve compressibility of logs (blob IDs are frequently repeated but content IDs usually appear just once).
This format should make it possible to recreate the journey of any single content throughout pack blobs, indexes and compaction events.
This commit is contained in:
28
cli/app.go
28
cli/app.go
@@ -135,7 +135,7 @@ type App struct {
|
||||
traceStorage bool
|
||||
keyRingEnabled bool
|
||||
persistCredentials bool
|
||||
disableInternalLog bool
|
||||
disableRepositoryLog bool
|
||||
dumpAllocatorStats bool
|
||||
DangerousCommands string
|
||||
cliStorageProviders []StorageProvider
|
||||
@@ -175,15 +175,16 @@ type App struct {
|
||||
// testability hooks
|
||||
testonlyIgnoreMissingRequiredFeatures bool
|
||||
|
||||
isInProcessTest bool
|
||||
exitWithError func(err error) // os.Exit() with 1 or 0 based on err
|
||||
stdinReader io.Reader
|
||||
stdoutWriter io.Writer
|
||||
stderrWriter io.Writer
|
||||
rootctx context.Context //nolint:containedctx
|
||||
loggerFactory logging.LoggerFactory
|
||||
simulatedCtrlC chan bool
|
||||
envNamePrefix string
|
||||
isInProcessTest bool
|
||||
exitWithError func(err error) // os.Exit() with 1 or 0 based on err
|
||||
stdinReader io.Reader
|
||||
stdoutWriter io.Writer
|
||||
stderrWriter io.Writer
|
||||
rootctx context.Context //nolint:containedctx
|
||||
loggerFactory logging.LoggerFactory
|
||||
contentLogWriter io.Writer
|
||||
simulatedCtrlC chan bool
|
||||
envNamePrefix string
|
||||
}
|
||||
|
||||
func (c *App) enableTestOnlyFlags() bool {
|
||||
@@ -217,8 +218,9 @@ func (c *App) Stderr() io.Writer {
|
||||
}
|
||||
|
||||
// SetLoggerFactory sets the logger factory to be used throughout the app.
|
||||
func (c *App) SetLoggerFactory(loggerForModule logging.LoggerFactory) {
|
||||
func (c *App) SetLoggerFactory(loggerForModule logging.LoggerFactory, contentLogWriter io.Writer) {
|
||||
c.loggerFactory = loggerForModule
|
||||
c.contentLogWriter = contentLogWriter
|
||||
}
|
||||
|
||||
// RegisterOnExit registers the provided function to run before app exits.
|
||||
@@ -276,7 +278,7 @@ func (c *App) setup(app *kingpin.Application) {
|
||||
app.Flag("timezone", "Format time according to specified time zone (local, utc, original or time zone name)").Hidden().StringVar(&timeZone)
|
||||
app.Flag("password", "Repository password.").Envar(c.EnvName("KOPIA_PASSWORD")).Short('p').StringVar(&c.password)
|
||||
app.Flag("persist-credentials", "Persist credentials").Default("true").Envar(c.EnvName("KOPIA_PERSIST_CREDENTIALS_ON_CONNECT")).BoolVar(&c.persistCredentials)
|
||||
app.Flag("disable-internal-log", "Disable internal log").Hidden().Envar(c.EnvName("KOPIA_DISABLE_INTERNAL_LOG")).BoolVar(&c.disableInternalLog)
|
||||
app.Flag("disable-repository-log", "Disable repository log").Hidden().Envar(c.EnvName("KOPIA_DISABLE_REPOSITORY_LOG")).BoolVar(&c.disableRepositoryLog)
|
||||
app.Flag("dangerous-commands", "Enable dangerous commands that could result in data loss and repository corruption.").Hidden().Envar(c.EnvName("KOPIA_DANGEROUS_COMMANDS")).StringVar(&c.DangerousCommands)
|
||||
app.Flag("track-releasable", "Enable tracking of releasable resources.").Hidden().Envar(c.EnvName("KOPIA_TRACK_RELEASABLE")).StringsVar(&c.trackReleasable)
|
||||
app.Flag("dump-allocator-stats", "Dump allocator stats at the end of execution.").Hidden().Envar(c.EnvName("KOPIA_DUMP_ALLOCATOR_STATS")).BoolVar(&c.dumpAllocatorStats)
|
||||
@@ -451,6 +453,8 @@ func assertDirectRepository(act func(ctx context.Context, rep repo.DirectReposit
|
||||
|
||||
func (c *App) directRepositoryWriteAction(act func(ctx context.Context, rep repo.DirectRepositoryWriter) error) func(ctx *kingpin.ParseContext) error {
|
||||
return c.maybeRepositoryAction(assertDirectRepository(func(ctx context.Context, rep repo.DirectRepository) error {
|
||||
rep.LogManager().Enable()
|
||||
|
||||
return repo.DirectWriteSession(ctx, rep, repo.WriteSessionOptions{
|
||||
Purpose: "cli:" + c.currentActionName(),
|
||||
OnUpload: c.progress.UploadedBytes,
|
||||
|
||||
@@ -29,6 +29,8 @@ func (c *commandLogsCleanup) setup(svc appServices, parent commandParent) {
|
||||
}
|
||||
|
||||
func (c *commandLogsCleanup) run(ctx context.Context, rep repo.DirectRepositoryWriter) error {
|
||||
rep.LogManager().Disable()
|
||||
|
||||
toDelete, err := maintenance.CleanupLogs(ctx, rep, maintenance.LogRetentionOptions{
|
||||
MaxTotalSize: c.maxTotalSizeMB << 20, //nolint:mnd
|
||||
MaxCount: c.maxCount,
|
||||
|
||||
@@ -91,12 +91,18 @@ func TestLogsMaintenance(t *testing.T) {
|
||||
e.RunAndVerifyOutputLineCount(t, 5, "logs", "list")
|
||||
|
||||
e.RunAndExpectSuccess(t, "maintenance", "run", "--full")
|
||||
|
||||
// maintenance run will create a new log and keep previous 2 logs
|
||||
e.RunAndVerifyOutputLineCount(t, 3, "logs", "list")
|
||||
|
||||
e.RunAndExpectSuccess(t, "maintenance", "set", "--max-retained-log-age=1ms")
|
||||
|
||||
// maintenance does not run here
|
||||
e.RunAndVerifyOutputLineCount(t, 4, "logs", "list")
|
||||
|
||||
e.RunAndExpectSuccess(t, "maintenance", "run", "--full")
|
||||
|
||||
// maintenance run will create a new log and delete all previous logs
|
||||
e.RunAndVerifyOutputLineCount(t, 1, "logs", "list")
|
||||
}
|
||||
|
||||
|
||||
@@ -226,6 +226,11 @@ func (c *commandServerStart) run(ctx context.Context) (reterr error) {
|
||||
// wait for all connections to finish within a shutdown grace period
|
||||
log(ctx2).Debugf("attempting graceful shutdown for %v", c.shutdownGracePeriod)
|
||||
|
||||
// Gracefully shutdown GRPC server first to close GRPC connections
|
||||
log(ctx2).Debug("shutting down GRPC server")
|
||||
srv.ShutdownGRPCServer()
|
||||
log(ctx2).Debug("GRPC server shutdown completed")
|
||||
|
||||
if serr := httpServer.Shutdown(ctx2); serr != nil {
|
||||
// graceful shutdown unsuccessful, force close
|
||||
log(ctx2).Debugf("unable to shut down gracefully - closing: %v", serr)
|
||||
|
||||
@@ -64,10 +64,11 @@ func (c *App) openRepository(ctx context.Context, required bool) (repo.Repositor
|
||||
|
||||
func (c *App) optionsFromFlags(ctx context.Context) *repo.Options {
|
||||
return &repo.Options{
|
||||
TraceStorage: c.traceStorage,
|
||||
DisableInternalLog: c.disableInternalLog,
|
||||
UpgradeOwnerID: c.upgradeOwnerID,
|
||||
DoNotWaitForUpgrade: c.doNotWaitForUpgrade,
|
||||
TraceStorage: c.traceStorage,
|
||||
DisableRepositoryLog: c.disableRepositoryLog,
|
||||
UpgradeOwnerID: c.upgradeOwnerID,
|
||||
DoNotWaitForUpgrade: c.doNotWaitForUpgrade,
|
||||
ContentLogWriter: c.contentLogWriter,
|
||||
|
||||
// when a fatal error is encountered in the repository, run all registered callbacks
|
||||
// and exit the program.
|
||||
|
||||
91
internal/blobparam/blobid_params.go
Normal file
91
internal/blobparam/blobid_params.go
Normal file
@@ -0,0 +1,91 @@
|
||||
// Package blobparam provides parameters for logging blob-specific operations.
|
||||
package blobparam
|
||||
|
||||
import (
|
||||
"github.com/kopia/kopia/internal/contentlog"
|
||||
"github.com/kopia/kopia/repo/blob"
|
||||
)
|
||||
|
||||
type blobMetadataListParam struct {
|
||||
key string
|
||||
list []blob.Metadata
|
||||
}
|
||||
|
||||
func (v blobMetadataListParam) WriteValueTo(jw *contentlog.JSONWriter) {
|
||||
jw.BeginListField(v.key)
|
||||
|
||||
for _, bm := range v.list {
|
||||
jw.BeginObject()
|
||||
jw.StringField("blobID", string(bm.BlobID))
|
||||
jw.Int64Field("l", bm.Length)
|
||||
jw.TimeField("ts", bm.Timestamp)
|
||||
jw.EndObject()
|
||||
}
|
||||
|
||||
jw.EndList()
|
||||
}
|
||||
|
||||
// BlobMetadataList creates a parameter for a list of blob metadata.
|
||||
//
|
||||
//nolint:revive
|
||||
func BlobMetadataList(name string, list []blob.Metadata) blobMetadataListParam {
|
||||
return blobMetadataListParam{key: name, list: list}
|
||||
}
|
||||
|
||||
type blobIDParam struct {
|
||||
key string
|
||||
val blob.ID
|
||||
}
|
||||
|
||||
func (v blobIDParam) WriteValueTo(jw *contentlog.JSONWriter) {
|
||||
jw.StringField(v.key, string(v.val))
|
||||
}
|
||||
|
||||
// BlobID creates a parameter for a blob ID.
|
||||
//
|
||||
//nolint:revive
|
||||
func BlobID(name string, id blob.ID) blobIDParam {
|
||||
return blobIDParam{key: name, val: id}
|
||||
}
|
||||
|
||||
type blobIDListParam struct {
|
||||
key string
|
||||
list []blob.ID
|
||||
}
|
||||
|
||||
func (v blobIDListParam) WriteValueTo(jw *contentlog.JSONWriter) {
|
||||
jw.BeginListField(v.key)
|
||||
|
||||
for _, blobID := range v.list {
|
||||
jw.StringElement(string(blobID))
|
||||
}
|
||||
|
||||
jw.EndList()
|
||||
}
|
||||
|
||||
// BlobIDList creates a parameter for a list of blob IDs.
|
||||
//
|
||||
//nolint:revive
|
||||
func BlobIDList(name string, list []blob.ID) blobIDListParam {
|
||||
return blobIDListParam{key: name, list: list}
|
||||
}
|
||||
|
||||
type blobMetadataParam struct {
|
||||
key string
|
||||
val blob.Metadata
|
||||
}
|
||||
|
||||
func (v blobMetadataParam) WriteValueTo(jw *contentlog.JSONWriter) {
|
||||
jw.BeginObjectField(v.key)
|
||||
jw.StringField("blobID", string(v.val.BlobID))
|
||||
jw.Int64Field("l", v.val.Length)
|
||||
jw.TimeField("ts", v.val.Timestamp)
|
||||
jw.EndObject()
|
||||
}
|
||||
|
||||
// BlobMetadata creates a parameter for a blob metadata.
|
||||
//
|
||||
//nolint:revive
|
||||
func BlobMetadata(name string, bm blob.Metadata) blobMetadataParam {
|
||||
return blobMetadataParam{key: name, val: bm}
|
||||
}
|
||||
42
internal/contentlog/contentlog_benchmark_test.go
Normal file
42
internal/contentlog/contentlog_benchmark_test.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package contentlog_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/kopia/kopia/internal/contentlog"
|
||||
"github.com/kopia/kopia/internal/contentlog/logparam"
|
||||
"github.com/kopia/kopia/internal/contentparam"
|
||||
"github.com/kopia/kopia/repo/content/index"
|
||||
)
|
||||
|
||||
func BenchmarkLogger(b *testing.B) {
|
||||
ctx := context.Background()
|
||||
|
||||
cid, err := index.ParseID("1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef")
|
||||
require.NoError(b, err)
|
||||
|
||||
// context params
|
||||
ctx = contentlog.WithParams(ctx,
|
||||
logparam.String("service", "test-service"),
|
||||
logparam.Int("version", 2),
|
||||
contentparam.ContentID("cid", cid),
|
||||
)
|
||||
|
||||
// logger params
|
||||
l := contentlog.NewLogger(func(data []byte) {},
|
||||
logparam.String("lservice", "test-service"),
|
||||
)
|
||||
|
||||
for b.Loop() {
|
||||
contentlog.Log(ctx, l, "baz")
|
||||
contentlog.Log1(ctx, l, "baz", logparam.String("arg1", "123\x01foobar"))
|
||||
contentlog.Log2(ctx, l, "baz", logparam.Int("arg1", 123), logparam.Int("arg2", 456))
|
||||
contentlog.Log3(ctx, l, "baz", logparam.Int("arg1", 123), logparam.Int("arg2", 456), logparam.Int("arg3", 789))
|
||||
contentlog.Log4(ctx, l, "baz", logparam.Int("arg1", 123), logparam.Int("arg2", 456), logparam.Int("arg3", 789), logparam.Int("arg4", 101112))
|
||||
contentlog.Log5(ctx, l, "baz", logparam.Int("arg1", 123), logparam.Int("arg2", 456), logparam.Int("arg3", 789), logparam.Int("arg4", 101112), logparam.Int("arg5", 123456))
|
||||
contentlog.Log6(ctx, l, "baz", logparam.Int("arg1", 123), logparam.Int("arg2", 456), logparam.Int("arg3", 789), logparam.Int("arg4", 101112), logparam.Int("arg5", 123456), logparam.Int("arg6", 123456))
|
||||
}
|
||||
}
|
||||
375
internal/contentlog/contentlog_json_writer.go
Normal file
375
internal/contentlog/contentlog_json_writer.go
Normal file
@@ -0,0 +1,375 @@
|
||||
// Package contentlog provides a JSON writer that can write JSON to a buffer
|
||||
// without any memory allocations and Logger that can write log entries
|
||||
// with strongly-typed parameters.
|
||||
package contentlog
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/kopia/kopia/internal/freepool"
|
||||
)
|
||||
|
||||
var commaSeparator = []byte(",")
|
||||
|
||||
const (
|
||||
decimal = 10
|
||||
hexadecimal = 16
|
||||
)
|
||||
|
||||
// JSONWriter is a writer that can write JSON to a buffer
|
||||
// without any memory allocations.
|
||||
type JSONWriter struct {
|
||||
buf []byte
|
||||
separator []byte
|
||||
|
||||
separatorStack [][]byte
|
||||
}
|
||||
|
||||
// ParamWriter must be implemented by all types that write a parameter ("key":value)to the JSON writer.
|
||||
type ParamWriter interface {
|
||||
WriteValueTo(jw *JSONWriter)
|
||||
}
|
||||
|
||||
func (jw *JSONWriter) beforeField(key string) {
|
||||
jw.buf = append(jw.buf, jw.separator...)
|
||||
jw.buf = append(jw.buf, '"')
|
||||
jw.buf = append(jw.buf, key...)
|
||||
jw.buf = append(jw.buf, '"', ':')
|
||||
jw.separator = commaSeparator
|
||||
}
|
||||
|
||||
// RawJSONField writes a raw JSON field where the value is already in JSON format.
|
||||
func (jw *JSONWriter) RawJSONField(key string, value []byte) {
|
||||
jw.beforeField(key)
|
||||
jw.buf = append(jw.buf, value...)
|
||||
}
|
||||
|
||||
func (jw *JSONWriter) beforeElement() {
|
||||
jw.buf = append(jw.buf, jw.separator...)
|
||||
jw.separator = commaSeparator
|
||||
}
|
||||
|
||||
// object
|
||||
|
||||
// BeginObjectField starts an object field.
|
||||
func (jw *JSONWriter) BeginObjectField(key string) {
|
||||
jw.beforeField(key)
|
||||
jw.BeginObject()
|
||||
}
|
||||
|
||||
// BeginObject starts an object.
|
||||
func (jw *JSONWriter) BeginObject() {
|
||||
jw.buf = append(jw.buf, '{')
|
||||
jw.separatorStack = append(jw.separatorStack, commaSeparator)
|
||||
jw.separator = nil
|
||||
}
|
||||
|
||||
// EndObject ends an object.
|
||||
func (jw *JSONWriter) EndObject() {
|
||||
jw.separator = jw.separatorStack[len(jw.separatorStack)-1]
|
||||
jw.separatorStack = jw.separatorStack[:len(jw.separatorStack)-1]
|
||||
jw.buf = append(jw.buf, '}')
|
||||
}
|
||||
|
||||
// list
|
||||
|
||||
// BeginListField starts a list field.
|
||||
func (jw *JSONWriter) BeginListField(key string) {
|
||||
jw.beforeField(key)
|
||||
jw.BeginList()
|
||||
}
|
||||
|
||||
// BeginList starts a list.
|
||||
func (jw *JSONWriter) BeginList() {
|
||||
jw.buf = append(jw.buf, '[')
|
||||
jw.separatorStack = append(jw.separatorStack, commaSeparator)
|
||||
jw.separator = nil
|
||||
}
|
||||
|
||||
// EndList ends a list.
|
||||
func (jw *JSONWriter) EndList() {
|
||||
jw.separator = jw.separatorStack[len(jw.separatorStack)-1]
|
||||
jw.separatorStack = jw.separatorStack[:len(jw.separatorStack)-1]
|
||||
jw.buf = append(jw.buf, ']')
|
||||
}
|
||||
|
||||
// string
|
||||
|
||||
// StringField writes a string field.
|
||||
func (jw *JSONWriter) StringField(key, value string) {
|
||||
jw.beforeField(key)
|
||||
jw.stringValue(value)
|
||||
}
|
||||
|
||||
// StringElement writes a string element.
|
||||
func (jw *JSONWriter) StringElement(value string) {
|
||||
jw.beforeElement()
|
||||
jw.stringValue(value)
|
||||
}
|
||||
|
||||
func (jw *JSONWriter) stringValue(value string) {
|
||||
jw.buf = append(jw.buf, '"')
|
||||
|
||||
for i := range len(value) {
|
||||
c := value[i]
|
||||
|
||||
//nolint:gocritic
|
||||
if c < ' ' {
|
||||
switch c {
|
||||
case '\b':
|
||||
jw.buf = append(jw.buf, '\\', 'b')
|
||||
case '\f':
|
||||
jw.buf = append(jw.buf, '\\', 'f')
|
||||
case '\n':
|
||||
jw.buf = append(jw.buf, '\\', 'n')
|
||||
case '\r':
|
||||
jw.buf = append(jw.buf, '\\', 'r')
|
||||
case '\t':
|
||||
jw.buf = append(jw.buf, '\\', 't')
|
||||
default:
|
||||
// Escape as unicode \u00XX
|
||||
jw.buf = append(jw.buf, '\\', 'u', '0', '0')
|
||||
|
||||
var hexBuf [8]byte
|
||||
|
||||
hex := strconv.AppendInt(hexBuf[:0], int64(c), hexadecimal)
|
||||
if len(hex) < 2 { //nolint:mnd
|
||||
jw.buf = append(jw.buf, '0')
|
||||
}
|
||||
|
||||
jw.buf = append(jw.buf, hex...)
|
||||
}
|
||||
} else if c == '"' {
|
||||
jw.buf = append(jw.buf, '\\', '"')
|
||||
} else if c == '\\' {
|
||||
jw.buf = append(jw.buf, '\\', '\\')
|
||||
} else {
|
||||
jw.buf = append(jw.buf, c)
|
||||
}
|
||||
}
|
||||
|
||||
jw.buf = append(jw.buf, '"')
|
||||
}
|
||||
|
||||
// null
|
||||
|
||||
// NullElement writes a null element.
|
||||
func (jw *JSONWriter) NullElement() {
|
||||
jw.beforeElement()
|
||||
jw.nullValue()
|
||||
}
|
||||
|
||||
// NullField writes a null field.
|
||||
func (jw *JSONWriter) NullField(key string) {
|
||||
jw.beforeField(key)
|
||||
jw.nullValue()
|
||||
}
|
||||
|
||||
func (jw *JSONWriter) nullValue() {
|
||||
jw.buf = append(jw.buf, "null"...)
|
||||
}
|
||||
|
||||
// boolean
|
||||
|
||||
// BoolField writes a boolean field.
|
||||
func (jw *JSONWriter) BoolField(key string, value bool) {
|
||||
jw.beforeField(key)
|
||||
jw.boolValue(value)
|
||||
}
|
||||
|
||||
// BoolElement writes a boolean element.
|
||||
func (jw *JSONWriter) BoolElement(value bool) {
|
||||
jw.beforeElement()
|
||||
jw.boolValue(value)
|
||||
}
|
||||
|
||||
func (jw *JSONWriter) boolValue(value bool) {
|
||||
if value {
|
||||
jw.buf = append(jw.buf, "true"...)
|
||||
} else {
|
||||
jw.buf = append(jw.buf, "false"...)
|
||||
}
|
||||
}
|
||||
|
||||
// signed integers
|
||||
|
||||
// IntField writes an int field.
|
||||
func (jw *JSONWriter) IntField(key string, value int) { jw.Int64Field(key, int64(value)) }
|
||||
|
||||
// IntElement writes an int element.
|
||||
func (jw *JSONWriter) IntElement(value int) { jw.Int64Element(int64(value)) }
|
||||
|
||||
// Int8Field writes an int8 field.
|
||||
func (jw *JSONWriter) Int8Field(key string, value int8) { jw.Int64Field(key, int64(value)) }
|
||||
|
||||
// Int8Element writes an int8 element.
|
||||
func (jw *JSONWriter) Int8Element(value int8) { jw.Int64Element(int64(value)) }
|
||||
|
||||
// Int16Field writes an int16 field.
|
||||
func (jw *JSONWriter) Int16Field(key string, value int16) { jw.Int64Field(key, int64(value)) }
|
||||
|
||||
// Int16Element writes an int16 element.
|
||||
func (jw *JSONWriter) Int16Element(value int16) { jw.Int64Element(int64(value)) }
|
||||
|
||||
// Int32Field writes an int32 field.
|
||||
func (jw *JSONWriter) Int32Field(key string, value int32) { jw.Int64Field(key, int64(value)) }
|
||||
|
||||
// Int32Element writes an int32 element.
|
||||
func (jw *JSONWriter) Int32Element(value int32) { jw.Int64Element(int64(value)) }
|
||||
|
||||
// Int64Field writes an int64 field.
|
||||
func (jw *JSONWriter) Int64Field(key string, value int64) {
|
||||
jw.beforeField(key)
|
||||
jw.int64Value(value)
|
||||
}
|
||||
|
||||
// Int64Element writes an int64 element.
|
||||
func (jw *JSONWriter) Int64Element(value int64) {
|
||||
jw.beforeElement()
|
||||
jw.int64Value(value)
|
||||
}
|
||||
|
||||
func (jw *JSONWriter) int64Value(value int64) {
|
||||
var buf [64]byte
|
||||
|
||||
jw.buf = append(jw.buf, strconv.AppendInt(buf[:0], value, decimal)...)
|
||||
}
|
||||
|
||||
// unsigned integers
|
||||
|
||||
// UIntField writes a uint field.
|
||||
func (jw *JSONWriter) UIntField(key string, value uint) { jw.UInt64Field(key, uint64(value)) }
|
||||
|
||||
// UIntElement writes a uint element.
|
||||
func (jw *JSONWriter) UIntElement(value uint) { jw.UInt64Element(uint64(value)) }
|
||||
|
||||
// UInt8Field writes a uint8 field.
|
||||
func (jw *JSONWriter) UInt8Field(key string, value uint8) { jw.UInt64Field(key, uint64(value)) }
|
||||
|
||||
// UInt8Element writes a uint8 element.
|
||||
func (jw *JSONWriter) UInt8Element(value uint8) { jw.UInt64Element(uint64(value)) }
|
||||
|
||||
// UInt16Field writes a uint16 field.
|
||||
func (jw *JSONWriter) UInt16Field(key string, value uint16) { jw.UInt64Field(key, uint64(value)) }
|
||||
|
||||
// UInt16Element writes a uint16 element.
|
||||
func (jw *JSONWriter) UInt16Element(value uint16) { jw.UInt64Element(uint64(value)) }
|
||||
|
||||
// UInt32Field writes a uint32 field.
|
||||
func (jw *JSONWriter) UInt32Field(key string, value uint32) { jw.UInt64Field(key, uint64(value)) }
|
||||
|
||||
// UInt32Element writes a uint32 element.
|
||||
func (jw *JSONWriter) UInt32Element(value uint32) { jw.UInt64Element(uint64(value)) }
|
||||
|
||||
// UInt64Field writes a uint64 field.
|
||||
func (jw *JSONWriter) UInt64Field(key string, value uint64) {
|
||||
jw.beforeField(key)
|
||||
jw.uint64Value(value)
|
||||
}
|
||||
|
||||
// UInt64Element writes a uint64 element.
|
||||
func (jw *JSONWriter) UInt64Element(value uint64) {
|
||||
jw.beforeElement()
|
||||
jw.uint64Value(value)
|
||||
}
|
||||
|
||||
func (jw *JSONWriter) uint64Value(value uint64) {
|
||||
var buf [64]byte
|
||||
|
||||
jw.buf = append(jw.buf, strconv.AppendUint(buf[:0], value, decimal)...)
|
||||
}
|
||||
|
||||
// error
|
||||
|
||||
// ErrorField writes an error field.
|
||||
func (jw *JSONWriter) ErrorField(key string, value error) {
|
||||
if value == nil {
|
||||
jw.NullField(key)
|
||||
} else {
|
||||
jw.StringField(key, value.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// time
|
||||
|
||||
// TimeField writes a time field.
|
||||
func (jw *JSONWriter) TimeField(key string, value time.Time) {
|
||||
jw.beforeField(key)
|
||||
jw.timeValue(value)
|
||||
}
|
||||
|
||||
// appendPaddedInt appends an integer with zero-padding to the buffer.
|
||||
func (jw *JSONWriter) appendPaddedInt(value int64, width int) {
|
||||
var numBuf [64]byte
|
||||
numStr := strconv.AppendInt(numBuf[:0], value, decimal)
|
||||
numLen := len(numStr)
|
||||
|
||||
// Add leading zeros
|
||||
for i := numLen; i < width; i++ {
|
||||
jw.buf = append(jw.buf, '0')
|
||||
}
|
||||
|
||||
jw.buf = append(jw.buf, numStr...)
|
||||
}
|
||||
|
||||
// TimeElement writes a time element.
|
||||
func (jw *JSONWriter) TimeElement(value time.Time) {
|
||||
jw.beforeElement()
|
||||
jw.timeValue(value)
|
||||
}
|
||||
|
||||
func (jw *JSONWriter) timeValue(value time.Time) {
|
||||
utc := value.UTC()
|
||||
|
||||
jw.buf = append(jw.buf, '"')
|
||||
jw.appendPaddedInt(int64(utc.Year()), 4) //nolint:mnd
|
||||
jw.buf = append(jw.buf, '-')
|
||||
jw.appendPaddedInt(int64(utc.Month()), 2) //nolint:mnd
|
||||
jw.buf = append(jw.buf, '-')
|
||||
jw.appendPaddedInt(int64(utc.Day()), 2) //nolint:mnd
|
||||
jw.buf = append(jw.buf, 'T')
|
||||
jw.appendPaddedInt(int64(utc.Hour()), 2) //nolint:mnd
|
||||
jw.buf = append(jw.buf, ':')
|
||||
jw.appendPaddedInt(int64(utc.Minute()), 2) //nolint:mnd
|
||||
jw.buf = append(jw.buf, ':')
|
||||
jw.appendPaddedInt(int64(utc.Second()), 2) //nolint:mnd
|
||||
jw.buf = append(jw.buf, '.')
|
||||
jw.appendPaddedInt(int64(utc.Nanosecond()/1000), 6) //nolint:mnd
|
||||
jw.buf = append(jw.buf, 'Z', '"')
|
||||
}
|
||||
|
||||
var freeJSONWriterPool = freepool.New(
|
||||
func() *JSONWriter {
|
||||
return &JSONWriter{
|
||||
buf: make([]byte, 0, 1024), //nolint:mnd
|
||||
separatorStack: make([][]byte, 0, 10), //nolint:mnd
|
||||
separator: nil,
|
||||
}
|
||||
}, func(jw *JSONWriter) {
|
||||
jw.buf = jw.buf[:0]
|
||||
jw.separatorStack = jw.separatorStack[:0]
|
||||
jw.separator = nil
|
||||
})
|
||||
|
||||
// Release releases the JSON writer back to the pool.
|
||||
func (jw *JSONWriter) Release() {
|
||||
freeJSONWriterPool.Return(jw)
|
||||
}
|
||||
|
||||
// Result returns the internal buffer for testing purposes.
|
||||
// This should only be used in tests.
|
||||
func (jw *JSONWriter) Result() []byte {
|
||||
return jw.buf
|
||||
}
|
||||
|
||||
// NewJSONWriter creates a new JSON writer.
|
||||
func NewJSONWriter() *JSONWriter {
|
||||
return freeJSONWriterPool.Take()
|
||||
}
|
||||
|
||||
// GetBufferForTesting returns the internal buffer for testing purposes.
|
||||
// This should only be used in tests.
|
||||
func (jw *JSONWriter) GetBufferForTesting() []byte {
|
||||
return jw.buf
|
||||
}
|
||||
804
internal/contentlog/contentlog_json_writer_test.go
Normal file
804
internal/contentlog/contentlog_json_writer_test.go
Normal file
@@ -0,0 +1,804 @@
|
||||
package contentlog
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestEntryWriter_EmptyObject(t *testing.T) {
|
||||
jw := NewJSONWriter()
|
||||
defer jw.Release()
|
||||
|
||||
jw.BeginObject()
|
||||
jw.EndObject()
|
||||
|
||||
require.Equal(t, "{}", string(jw.buf))
|
||||
}
|
||||
|
||||
func TestEntryWriter_AllTypes(t *testing.T) {
|
||||
jw := NewJSONWriter()
|
||||
defer jw.Release()
|
||||
|
||||
jw.BeginObject()
|
||||
jw.Int64Field("k1", 123)
|
||||
jw.Int64Field("a1", 1234)
|
||||
jw.Int64Field("b1", 12345)
|
||||
jw.BoolField("b2", true)
|
||||
jw.BoolField("b3", false)
|
||||
|
||||
jw.BeginObjectField("o1")
|
||||
jw.Int64Field("k2", 123456)
|
||||
jw.EndObject()
|
||||
|
||||
jw.BeginObjectField("o2")
|
||||
jw.EndObject()
|
||||
|
||||
jw.BeginListField("l1")
|
||||
jw.StringElement("aaa")
|
||||
jw.StringElement("bbb")
|
||||
jw.StringElement("ccc")
|
||||
jw.EndList()
|
||||
|
||||
jw.BeginListField("mixedList")
|
||||
jw.StringElement("aaa")
|
||||
jw.Int64Element(123)
|
||||
jw.NullElement()
|
||||
jw.BoolElement(true)
|
||||
jw.EndList()
|
||||
|
||||
jw.BeginObjectField("o3")
|
||||
jw.StringField("v", "xxx")
|
||||
jw.StringField("someUnicode", "😄")
|
||||
jw.EndObject()
|
||||
|
||||
jw.UInt64Field("u1", 123456)
|
||||
jw.StringField("s", "hello\nworld\r\t\b\f")
|
||||
jw.EndObject()
|
||||
|
||||
var v map[string]any
|
||||
|
||||
json.NewEncoder(os.Stdout).Encode("😄")
|
||||
|
||||
t.Logf("buf: %s", string(jw.buf))
|
||||
require.NoError(t, json.Unmarshal(jw.buf, &v))
|
||||
require.Equal(t, map[string]any{
|
||||
"k1": 123.0,
|
||||
"a1": 1234.0,
|
||||
"b1": 12345.0,
|
||||
"b2": true,
|
||||
"b3": false,
|
||||
"u1": 123456.0,
|
||||
"mixedList": []any{"aaa", 123.0, nil, true},
|
||||
"l1": []any{"aaa", "bbb", "ccc"},
|
||||
"o2": map[string]any{},
|
||||
"o3": map[string]any{
|
||||
"v": "xxx",
|
||||
"someUnicode": "😄",
|
||||
},
|
||||
"o1": map[string]any{
|
||||
"k2": 123456.0,
|
||||
},
|
||||
"s": "hello\nworld\r\t\b\f",
|
||||
}, v)
|
||||
}
|
||||
|
||||
func TestJSONWriter_IntTypes(t *testing.T) {
|
||||
jw := NewJSONWriter()
|
||||
defer jw.Release()
|
||||
|
||||
jw.BeginObject()
|
||||
jw.IntField("intField", 42)
|
||||
jw.Int8Field("int8Field", 8)
|
||||
jw.Int16Field("int16Field", 16)
|
||||
jw.Int32Field("int32Field", 32)
|
||||
jw.Int64Field("int64Field", 64)
|
||||
jw.EndObject()
|
||||
|
||||
var v map[string]any
|
||||
|
||||
require.NoError(t, json.Unmarshal(jw.buf, &v))
|
||||
require.Equal(t, map[string]any{
|
||||
"intField": 42.0,
|
||||
"int8Field": 8.0,
|
||||
"int16Field": 16.0,
|
||||
"int32Field": 32.0,
|
||||
"int64Field": 64.0,
|
||||
}, v)
|
||||
}
|
||||
|
||||
func TestJSONWriter_IntElements(t *testing.T) {
|
||||
jw := NewJSONWriter()
|
||||
defer jw.Release()
|
||||
|
||||
jw.BeginList()
|
||||
jw.IntElement(1)
|
||||
jw.Int8Element(2)
|
||||
jw.Int16Element(3)
|
||||
jw.Int32Element(4)
|
||||
jw.Int64Element(5)
|
||||
jw.EndList()
|
||||
|
||||
var v []any
|
||||
|
||||
require.NoError(t, json.Unmarshal(jw.buf, &v))
|
||||
require.Equal(t, []any{1.0, 2.0, 3.0, 4.0, 5.0}, v)
|
||||
}
|
||||
|
||||
func TestJSONWriter_UIntTypes(t *testing.T) {
|
||||
jw := NewJSONWriter()
|
||||
defer jw.Release()
|
||||
|
||||
jw.BeginObject()
|
||||
jw.UIntField("uintField", 100)
|
||||
jw.UInt8Field("uint8Field", 200)
|
||||
jw.UInt16Field("uint16Field", 300)
|
||||
jw.UInt32Field("uint32Field", 400)
|
||||
jw.UInt64Field("uint64Field", 500)
|
||||
jw.EndObject()
|
||||
|
||||
var v map[string]any
|
||||
|
||||
require.NoError(t, json.Unmarshal(jw.buf, &v))
|
||||
require.Equal(t, map[string]any{
|
||||
"uintField": 100.0,
|
||||
"uint8Field": 200.0,
|
||||
"uint16Field": 300.0,
|
||||
"uint32Field": 400.0,
|
||||
"uint64Field": 500.0,
|
||||
}, v)
|
||||
}
|
||||
|
||||
func TestJSONWriter_UIntElements(t *testing.T) {
|
||||
jw := NewJSONWriter()
|
||||
defer jw.Release()
|
||||
|
||||
jw.BeginList()
|
||||
jw.UIntElement(10)
|
||||
jw.UInt8Element(20)
|
||||
jw.UInt16Element(30)
|
||||
jw.UInt32Element(40)
|
||||
jw.UInt64Element(50)
|
||||
jw.EndList()
|
||||
|
||||
var v []any
|
||||
|
||||
require.NoError(t, json.Unmarshal(jw.buf, &v))
|
||||
require.Equal(t, []any{10.0, 20.0, 30.0, 40.0, 50.0}, v)
|
||||
}
|
||||
|
||||
func TestJSONWriter_NullField(t *testing.T) {
|
||||
jw := NewJSONWriter()
|
||||
defer jw.Release()
|
||||
|
||||
jw.BeginObject()
|
||||
jw.NullField("nullField")
|
||||
jw.EndObject()
|
||||
|
||||
var v map[string]any
|
||||
|
||||
require.NoError(t, json.Unmarshal(jw.buf, &v))
|
||||
require.Equal(t, map[string]any{
|
||||
"nullField": nil,
|
||||
}, v)
|
||||
}
|
||||
|
||||
func TestJSONWriter_ErrorField(t *testing.T) {
|
||||
jw := NewJSONWriter()
|
||||
defer jw.Release()
|
||||
|
||||
jw.BeginObject()
|
||||
jw.ErrorField("nilError", nil)
|
||||
jw.ErrorField("realError", &testError{msg: "test error message"})
|
||||
jw.EndObject()
|
||||
|
||||
var v map[string]any
|
||||
|
||||
require.NoError(t, json.Unmarshal(jw.buf, &v))
|
||||
require.Equal(t, map[string]any{
|
||||
"nilError": nil,
|
||||
"realError": "test error message",
|
||||
}, v)
|
||||
}
|
||||
|
||||
type testError struct {
|
||||
msg string
|
||||
}
|
||||
|
||||
func (e *testError) Error() string {
|
||||
return e.msg
|
||||
}
|
||||
|
||||
func TestJSONWriter_TimeField(t *testing.T) {
|
||||
jw := NewJSONWriter()
|
||||
defer jw.Release()
|
||||
|
||||
testTime := time.Date(2023, 12, 25, 15, 30, 45, 123456789, time.UTC)
|
||||
|
||||
jw.BeginObject()
|
||||
jw.TimeField("timeField", testTime)
|
||||
jw.EndObject()
|
||||
|
||||
var v map[string]any
|
||||
|
||||
require.NoError(t, json.Unmarshal(jw.buf, &v))
|
||||
require.Equal(t, map[string]any{
|
||||
"timeField": "2023-12-25T15:30:45.123456Z",
|
||||
}, v)
|
||||
}
|
||||
|
||||
func TestJSONWriter_TimeElement(t *testing.T) {
|
||||
jw := NewJSONWriter()
|
||||
defer jw.Release()
|
||||
|
||||
testTime := time.Date(2023, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
|
||||
jw.BeginList()
|
||||
jw.TimeElement(testTime)
|
||||
jw.EndList()
|
||||
|
||||
var v []any
|
||||
|
||||
require.NoError(t, json.Unmarshal(jw.buf, &v))
|
||||
require.Equal(t, []any{"2023-01-01T00:00:00.000000Z"}, v)
|
||||
}
|
||||
|
||||
func TestJSONWriter_BeginList(t *testing.T) {
|
||||
jw := NewJSONWriter()
|
||||
defer jw.Release()
|
||||
|
||||
jw.BeginList()
|
||||
jw.StringElement("first")
|
||||
jw.StringElement("second")
|
||||
jw.EndList()
|
||||
|
||||
var v []any
|
||||
|
||||
require.NoError(t, json.Unmarshal(jw.buf, &v))
|
||||
require.Equal(t, []any{"first", "second"}, v)
|
||||
}
|
||||
|
||||
func TestJSONWriter_EdgeCases(t *testing.T) {
|
||||
jw := NewJSONWriter()
|
||||
defer jw.Release()
|
||||
|
||||
jw.BeginObject()
|
||||
jw.StringField("emptyString", "")
|
||||
jw.StringField("unicodeString", "Hello 世界 🌍")
|
||||
jw.StringField("quotesAndSlashes", "quoted and backslash")
|
||||
jw.Int64Field("zero", 0)
|
||||
jw.Int64Field("negative", -1)
|
||||
jw.Int64Field("maxInt64", 9223372036854775807)
|
||||
jw.Int64Field("minInt64", -9223372036854775808)
|
||||
jw.UInt64Field("maxUInt64", 18446744073709551615)
|
||||
jw.BoolField("trueValue", true)
|
||||
jw.BoolField("falseValue", false)
|
||||
jw.EndObject()
|
||||
|
||||
var v map[string]any
|
||||
|
||||
require.NoError(t, json.Unmarshal(jw.buf, &v))
|
||||
require.Equal(t, map[string]any{
|
||||
"emptyString": "",
|
||||
"unicodeString": "Hello 世界 🌍",
|
||||
"quotesAndSlashes": "quoted and backslash",
|
||||
"zero": 0.0,
|
||||
"negative": -1.0,
|
||||
"maxInt64": 9223372036854775807.0,
|
||||
"minInt64": -9223372036854775808.0,
|
||||
"maxUInt64": 18446744073709551615.0,
|
||||
"trueValue": true,
|
||||
"falseValue": false,
|
||||
}, v)
|
||||
}
|
||||
|
||||
func TestJSONWriter_StringEscaping(t *testing.T) {
|
||||
jw := NewJSONWriter()
|
||||
defer jw.Release()
|
||||
|
||||
jw.BeginObject()
|
||||
jw.StringField("quotes", `"quoted string"`)
|
||||
jw.StringField("backslashes", `path\to\file`)
|
||||
jw.StringField("mixed", `"quoted" and \backslash\`)
|
||||
jw.StringField("newline", "line1\nline2")
|
||||
jw.StringField("carriageReturn", "line1\rline2")
|
||||
jw.StringField("tab", "col1\tcol2")
|
||||
jw.StringField("backspace", "text\btext")
|
||||
jw.StringField("formFeed", "text")
|
||||
jw.StringField("allControlChars", "a\bb\fc\nd\re\tf")
|
||||
jw.StringField("unicode", "Hello 世界 🌍")
|
||||
jw.StringField("empty", "")
|
||||
jw.EndObject()
|
||||
|
||||
var v map[string]any
|
||||
|
||||
require.NoError(t, json.Unmarshal(jw.buf, &v))
|
||||
|
||||
// Verify the JSON can be parsed and contains expected values
|
||||
require.Equal(t, `"quoted string"`, v["quotes"])
|
||||
require.Equal(t, `path\to\file`, v["backslashes"])
|
||||
require.Equal(t, `"quoted" and \backslash\`, v["mixed"])
|
||||
require.Equal(t, "line1\nline2", v["newline"])
|
||||
require.Equal(t, "line1\rline2", v["carriageReturn"])
|
||||
require.Equal(t, "col1\tcol2", v["tab"])
|
||||
require.Equal(t, "text\btext", v["backspace"])
|
||||
require.Equal(t, "text", v["formFeed"])
|
||||
require.Equal(t, "a\bb\fc\nd\re\tf", v["allControlChars"])
|
||||
require.Equal(t, "Hello 世界 🌍", v["unicode"])
|
||||
require.Empty(t, v["empty"])
|
||||
}
|
||||
|
||||
func TestJSONWriter_StringEscapingElements(t *testing.T) {
|
||||
jw := NewJSONWriter()
|
||||
defer jw.Release()
|
||||
|
||||
jw.BeginList()
|
||||
jw.StringElement(`"quoted"`)
|
||||
jw.StringElement(`\backslash\`)
|
||||
jw.StringElement("mixed\n\t\r")
|
||||
jw.StringElement("unicode: 世界")
|
||||
jw.EndList()
|
||||
|
||||
var v []any
|
||||
|
||||
require.NoError(t, json.Unmarshal(jw.buf, &v))
|
||||
require.Equal(t, []any{`"quoted"`, `\backslash\`, "mixed\n\t\r", "unicode: 世界"}, v)
|
||||
}
|
||||
|
||||
func TestJSONWriter_StringEscapingEdgeCases(t *testing.T) {
|
||||
jw := NewJSONWriter()
|
||||
defer jw.Release()
|
||||
|
||||
jw.BeginObject()
|
||||
jw.StringField("onlyQuote", `"`)
|
||||
jw.StringField("onlyBackslash", `\`)
|
||||
jw.StringField("onlyNewline", "\n")
|
||||
jw.StringField("onlyTab", "\t")
|
||||
jw.StringField("multipleQuotes", `""""`)
|
||||
jw.StringField("multipleBackslashes", `\\\\`)
|
||||
jw.StringField("quoteBackslash", `"\`)
|
||||
jw.StringField("backslashQuote", `\"`)
|
||||
jw.EndObject()
|
||||
|
||||
var v map[string]any
|
||||
|
||||
require.NoError(t, json.Unmarshal(jw.buf, &v))
|
||||
|
||||
require.Equal(t, `"`, v["onlyQuote"])
|
||||
require.Equal(t, `\`, v["onlyBackslash"])
|
||||
require.Equal(t, "\n", v["onlyNewline"])
|
||||
require.Equal(t, "\t", v["onlyTab"])
|
||||
require.Equal(t, `""""`, v["multipleQuotes"])
|
||||
require.Equal(t, `\\\\`, v["multipleBackslashes"])
|
||||
require.Equal(t, `"\`, v["quoteBackslash"])
|
||||
require.Equal(t, `\"`, v["backslashQuote"])
|
||||
}
|
||||
|
||||
func TestJSONWriter_StringEscapingRawOutput(t *testing.T) {
|
||||
jw := NewJSONWriter()
|
||||
defer jw.Release()
|
||||
|
||||
jw.BeginObject()
|
||||
jw.StringField("quotes", `"test"`)
|
||||
jw.StringField("backslashes", `\test\`)
|
||||
jw.StringField("mixed", `"quoted" and \backslash\`)
|
||||
jw.EndObject()
|
||||
|
||||
jsonOutput := string(jw.buf)
|
||||
|
||||
// Verify that quotes are properly escaped in the raw JSON
|
||||
require.Contains(t, jsonOutput, `\"test\"`)
|
||||
require.Contains(t, jsonOutput, `\\test\\`)
|
||||
require.Contains(t, jsonOutput, `\"quoted\" and \\backslash\\`)
|
||||
|
||||
// Verify the JSON is valid
|
||||
var v map[string]any
|
||||
|
||||
require.NoError(t, json.Unmarshal(jw.buf, &v))
|
||||
require.Equal(t, `"test"`, v["quotes"])
|
||||
require.Equal(t, `\test\`, v["backslashes"])
|
||||
require.Equal(t, `"quoted" and \backslash\`, v["mixed"])
|
||||
}
|
||||
|
||||
func TestJSONWriter_StringEscapingControlChars(t *testing.T) {
|
||||
jw := NewJSONWriter()
|
||||
defer jw.Release()
|
||||
|
||||
jw.BeginObject()
|
||||
// Test various control characters
|
||||
jw.StringField("nullChar", "\x00")
|
||||
jw.StringField("bellChar", "\x07")
|
||||
jw.StringField("verticalTab", "\x0b")
|
||||
jw.StringField("escapeChar", "\x1b")
|
||||
jw.StringField("delChar", "\x7f")
|
||||
jw.EndObject()
|
||||
|
||||
jsonOutput := string(jw.buf)
|
||||
t.Logf("Control chars JSON output: %q", jsonOutput)
|
||||
|
||||
// The JSONWriter doesn't properly escape control characters < ' ' except for \b, \f, \n, \r, \t
|
||||
// This means the JSON will be invalid for characters like \x00, \x07, etc.
|
||||
// This test documents the current behavior - these characters are not properly escaped
|
||||
var v map[string]any
|
||||
|
||||
if err := json.Unmarshal(jw.buf, &v); err != nil {
|
||||
t.Logf("Expected error due to unescaped control characters: %v", err)
|
||||
// This is expected behavior - the JSONWriter has a bug with control character escaping
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
// If it somehow works, verify the values
|
||||
require.Equal(t, "\x00", v["nullChar"])
|
||||
require.Equal(t, "\x07", v["bellChar"])
|
||||
require.Equal(t, "\x0b", v["verticalTab"])
|
||||
require.Equal(t, "\x1b", v["escapeChar"])
|
||||
require.Equal(t, "\x7f", v["delChar"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONWriter_StringEscapingProperlyHandledControlChars(t *testing.T) {
|
||||
jw := NewJSONWriter()
|
||||
defer jw.Release()
|
||||
|
||||
jw.BeginObject()
|
||||
// Test control characters that ARE properly handled by JSONWriter
|
||||
jw.StringField("backspace", "\b")
|
||||
jw.StringField("formFeed", "\f")
|
||||
jw.StringField("newline", "\n")
|
||||
jw.StringField("carriageReturn", "\r")
|
||||
jw.StringField("tab", "\t")
|
||||
jw.EndObject()
|
||||
|
||||
var v map[string]any
|
||||
|
||||
require.NoError(t, json.Unmarshal(jw.buf, &v))
|
||||
|
||||
// These should be properly escaped and work correctly
|
||||
require.Equal(t, "\b", v["backspace"])
|
||||
require.Equal(t, "\f", v["formFeed"])
|
||||
require.Equal(t, "\n", v["newline"])
|
||||
require.Equal(t, "\r", v["carriageReturn"])
|
||||
require.Equal(t, "\t", v["tab"])
|
||||
|
||||
// Verify the raw JSON contains proper escape sequences
|
||||
jsonOutput := string(jw.buf)
|
||||
t.Logf("JSON output: %q", jsonOutput)
|
||||
|
||||
// The JSON should be valid and contain the escaped control characters
|
||||
// We can see from the output that it contains \"\\b\" etc.
|
||||
require.Contains(t, jsonOutput, `backspace`)
|
||||
require.Contains(t, jsonOutput, `formFeed`)
|
||||
require.Contains(t, jsonOutput, `newline`)
|
||||
require.Contains(t, jsonOutput, `carriageReturn`)
|
||||
require.Contains(t, jsonOutput, `tab`)
|
||||
}
|
||||
|
||||
func TestJSONWriter_StringEscapingAllControlCharacters(t *testing.T) {
|
||||
jw := NewJSONWriter()
|
||||
defer jw.Release()
|
||||
|
||||
jw.BeginObject()
|
||||
|
||||
// Test all control characters from 0x00 to 0x1F
|
||||
controlChars := map[string]string{
|
||||
"null": "\x00", // NUL
|
||||
"startOfHeading": "\x01", // SOH
|
||||
"startOfText": "\x02", // STX
|
||||
"endOfText": "\x03", // ETX
|
||||
"endOfTransmit": "\x04", // EOT
|
||||
"enquiry": "\x05", // ENQ
|
||||
"acknowledge": "\x06", // ACK
|
||||
"bell": "\x07", // BEL
|
||||
"backspace": "\x08", // BS - handled specially
|
||||
"tab": "\x09", // TAB - handled specially
|
||||
"lineFeed": "\x0a", // LF - handled specially
|
||||
"verticalTab": "\x0b", // VT
|
||||
"formFeed": "\x0c", // FF - handled specially
|
||||
"carriageReturn": "\x0d", // CR - handled specially
|
||||
"shiftOut": "\x0e", // SO
|
||||
"shiftIn": "\x0f", // SI
|
||||
"dataLinkEscape": "\x10", // DLE
|
||||
"deviceCtrl1": "\x11", // DC1
|
||||
"deviceCtrl2": "\x12", // DC2
|
||||
"deviceCtrl3": "\x13", // DC3
|
||||
"deviceCtrl4": "\x14", // DC4
|
||||
"negativeAck": "\x15", // NAK
|
||||
"synchronousIdle": "\x16", // SYN
|
||||
"endOfTransBlock": "\x17", // ETB
|
||||
"cancel": "\x18", // CAN
|
||||
"endOfMedium": "\x19", // EM
|
||||
"substitute": "\x1a", // SUB
|
||||
"escape": "\x1b", // ESC
|
||||
"fileSeparator": "\x1c", // FS
|
||||
"groupSeparator": "\x1d", // GS
|
||||
"recordSeparator": "\x1e", // RS
|
||||
"unitSeparator": "\x1f", // US
|
||||
}
|
||||
|
||||
// Add all control characters as fields
|
||||
for name, char := range controlChars {
|
||||
jw.StringField(name, char)
|
||||
}
|
||||
|
||||
jw.EndObject()
|
||||
|
||||
var v map[string]any
|
||||
|
||||
require.NoError(t, json.Unmarshal(jw.buf, &v))
|
||||
|
||||
// Verify all control characters are properly handled
|
||||
for name, expectedChar := range controlChars {
|
||||
require.Equal(t, expectedChar, v[name], "Control character %s (0x%02x) not properly handled", name, expectedChar[0])
|
||||
}
|
||||
|
||||
// Verify the raw JSON contains proper Unicode escape sequences for non-special control chars
|
||||
jsonOutput := string(jw.buf)
|
||||
t.Logf("Control chars JSON output: %q", jsonOutput)
|
||||
|
||||
// Check that special control characters use their standard escape sequences
|
||||
require.Contains(t, jsonOutput, `\b`) // backspace
|
||||
require.Contains(t, jsonOutput, `\t`) // tab
|
||||
require.Contains(t, jsonOutput, `\n`) // line feed
|
||||
require.Contains(t, jsonOutput, `\f`) // form feed
|
||||
require.Contains(t, jsonOutput, `\r`) // carriage return
|
||||
|
||||
// Check that other control characters use Unicode escape sequences
|
||||
require.Contains(t, jsonOutput, `\u0000`) // null
|
||||
require.Contains(t, jsonOutput, `\u0001`) // start of heading
|
||||
require.Contains(t, jsonOutput, `\u0007`) // bell
|
||||
require.Contains(t, jsonOutput, `\u000b`) // vertical tab
|
||||
require.Contains(t, jsonOutput, `\u001b`) // escape
|
||||
require.Contains(t, jsonOutput, `\u001f`) // unit separator
|
||||
}
|
||||
|
||||
func TestJSONWriter_StringEscapingControlCharactersInElements(t *testing.T) {
|
||||
jw := NewJSONWriter()
|
||||
defer jw.Release()
|
||||
|
||||
jw.BeginList()
|
||||
|
||||
// Test control characters as list elements
|
||||
jw.StringElement("\x00") // null
|
||||
jw.StringElement("\x07") // bell
|
||||
jw.StringElement("\x08") // backspace
|
||||
jw.StringElement("\x09") // tab
|
||||
jw.StringElement("\x0a") // line feed
|
||||
jw.StringElement("\x0c") // form feed
|
||||
jw.StringElement("\x0d") // carriage return
|
||||
jw.StringElement("\x1b") // escape
|
||||
jw.StringElement("\x1f") // unit separator
|
||||
jw.EndList()
|
||||
|
||||
var v []any
|
||||
|
||||
require.NoError(t, json.Unmarshal(jw.buf, &v))
|
||||
|
||||
expected := []any{
|
||||
"\x00", // null
|
||||
"\x07", // bell
|
||||
"\x08", // backspace
|
||||
"\x09", // tab
|
||||
"\x0a", // line feed
|
||||
"\x0c", // form feed
|
||||
"\x0d", // carriage return
|
||||
"\x1b", // escape
|
||||
"\x1f", // unit separator
|
||||
}
|
||||
|
||||
require.Equal(t, expected, v)
|
||||
|
||||
// Verify the raw JSON contains proper escape sequences
|
||||
jsonOutput := string(jw.buf)
|
||||
t.Logf("Control chars in elements JSON output: %q", jsonOutput)
|
||||
|
||||
require.Contains(t, jsonOutput, `\u0000`) // null
|
||||
require.Contains(t, jsonOutput, `\u0007`) // bell
|
||||
require.Contains(t, jsonOutput, `\b`) // backspace
|
||||
require.Contains(t, jsonOutput, `\t`) // tab
|
||||
require.Contains(t, jsonOutput, `\n`) // line feed
|
||||
require.Contains(t, jsonOutput, `\f`) // form feed
|
||||
require.Contains(t, jsonOutput, `\r`) // carriage return
|
||||
require.Contains(t, jsonOutput, `\u001b`) // escape
|
||||
require.Contains(t, jsonOutput, `\u001f`) // unit separator
|
||||
}
|
||||
|
||||
func TestJSONWriter_StringEscapingMixedControlCharacters(t *testing.T) {
|
||||
jw := NewJSONWriter()
|
||||
defer jw.Release()
|
||||
|
||||
jw.BeginObject()
|
||||
|
||||
// Test strings with mixed control characters and regular characters
|
||||
jw.StringField("mixed1", "hello\x00world\x07test")
|
||||
jw.StringField("mixed2", "start\x08\x09\x0a\x0c\x0dend")
|
||||
jw.StringField("mixed3", "text\x1b\x1c\x1d\x1e\x1fmore")
|
||||
jw.StringField("mixed4", "a\x00b\x01c\x02d\x03e")
|
||||
jw.StringField("mixed5", "quotes\"and\\backslash\x00control")
|
||||
jw.EndObject()
|
||||
|
||||
var v map[string]any
|
||||
|
||||
require.NoError(t, json.Unmarshal(jw.buf, &v))
|
||||
|
||||
// Verify the parsed values match the original strings
|
||||
require.Equal(t, "hello\x00world\x07test", v["mixed1"])
|
||||
require.Equal(t, "start\x08\x09\x0a\x0c\x0dend", v["mixed2"])
|
||||
require.Equal(t, "text\x1b\x1c\x1d\x1e\x1fmore", v["mixed3"])
|
||||
require.Equal(t, "a\x00b\x01c\x02d\x03e", v["mixed4"])
|
||||
require.Equal(t, "quotes\"and\\backslash\x00control", v["mixed5"])
|
||||
|
||||
// Verify the raw JSON contains proper escape sequences
|
||||
jsonOutput := string(jw.buf)
|
||||
t.Logf("Mixed control chars JSON output: %q", jsonOutput)
|
||||
|
||||
// Check for Unicode escapes
|
||||
require.Contains(t, jsonOutput, `\u0000`) // null character
|
||||
require.Contains(t, jsonOutput, `\u0007`) // bell
|
||||
require.Contains(t, jsonOutput, `\u0001`) // start of heading
|
||||
require.Contains(t, jsonOutput, `\u0002`) // start of text
|
||||
require.Contains(t, jsonOutput, `\u0003`) // end of text
|
||||
require.Contains(t, jsonOutput, `\u001b`) // escape
|
||||
require.Contains(t, jsonOutput, `\u001c`) // file separator
|
||||
require.Contains(t, jsonOutput, `\u001d`) // group separator
|
||||
require.Contains(t, jsonOutput, `\u001e`) // record separator
|
||||
require.Contains(t, jsonOutput, `\u001f`) // unit separator
|
||||
|
||||
// Check for standard escapes
|
||||
require.Contains(t, jsonOutput, `\b`) // backspace
|
||||
require.Contains(t, jsonOutput, `\t`) // tab
|
||||
require.Contains(t, jsonOutput, `\n`) // line feed
|
||||
require.Contains(t, jsonOutput, `\f`) // form feed
|
||||
require.Contains(t, jsonOutput, `\r`) // carriage return
|
||||
|
||||
// Check for quote and backslash escapes
|
||||
require.Contains(t, jsonOutput, `\"`) // escaped quote
|
||||
require.Contains(t, jsonOutput, `\\`) // escaped backslash
|
||||
}
|
||||
|
||||
func TestJSONWriter_StringEscapingBoundaryValues(t *testing.T) {
|
||||
jw := NewJSONWriter()
|
||||
defer jw.Release()
|
||||
|
||||
jw.BeginObject()
|
||||
|
||||
// Test boundary values around control character range
|
||||
jw.StringField("space", " ") // 0x20 - first non-control character
|
||||
jw.StringField("del", "\x7f") // 0x7F - DEL character (not in 0x00-0x1F range)
|
||||
jw.StringField("lastControl", "\x1f") // 0x1F - last control character
|
||||
jw.StringField("firstNonControl", " ") // 0x20 - first non-control character
|
||||
jw.EndObject()
|
||||
|
||||
var v map[string]any
|
||||
|
||||
require.NoError(t, json.Unmarshal(jw.buf, &v))
|
||||
|
||||
// Verify the parsed values
|
||||
require.Equal(t, " ", v["space"])
|
||||
require.Equal(t, "\x7f", v["del"]) // DEL should not be escaped as Unicode
|
||||
require.Equal(t, "\x1f", v["lastControl"]) // Last control char should be escaped
|
||||
require.Equal(t, " ", v["firstNonControl"])
|
||||
|
||||
// Verify the raw JSON
|
||||
jsonOutput := string(jw.buf)
|
||||
t.Logf("Boundary values JSON output: %q", jsonOutput)
|
||||
|
||||
// Space (0x20) should not be escaped
|
||||
require.Contains(t, jsonOutput, `"space":" "`)
|
||||
require.Contains(t, jsonOutput, `"firstNonControl":" "`)
|
||||
|
||||
// DEL (0x7F) should not be escaped as Unicode (it's outside 0x00-0x1F range)
|
||||
// The DEL character is output as-is in the JSON (not escaped)
|
||||
require.Contains(t, jsonOutput, `"del":"`+string('\x7f')+`"`)
|
||||
|
||||
// Last control character (0x1F) should be escaped as Unicode
|
||||
require.Contains(t, jsonOutput, `\u001f`)
|
||||
}
|
||||
|
||||
func TestJSONWriter_StringEscapingUnicodeEscapeFormat(t *testing.T) {
|
||||
jw := NewJSONWriter()
|
||||
defer jw.Release()
|
||||
|
||||
jw.BeginObject()
|
||||
|
||||
// Test specific control characters to verify Unicode escape format
|
||||
jw.StringField("null", "\x00")
|
||||
jw.StringField("bell", "\x07")
|
||||
jw.StringField("verticalTab", "\x0b")
|
||||
jw.StringField("escape", "\x1b")
|
||||
jw.StringField("unitSeparator", "\x1f")
|
||||
jw.EndObject()
|
||||
|
||||
jsonOutput := string(jw.buf)
|
||||
t.Logf("Unicode escape format JSON output: %q", jsonOutput)
|
||||
|
||||
// Verify exact Unicode escape format: \u00XX where XX is the hex value
|
||||
require.Contains(t, jsonOutput, `\u0000`) // null (0x00)
|
||||
require.Contains(t, jsonOutput, `\u0007`) // bell (0x07)
|
||||
require.Contains(t, jsonOutput, `\u000b`) // vertical tab (0x0B)
|
||||
require.Contains(t, jsonOutput, `\u001b`) // escape (0x1B)
|
||||
require.Contains(t, jsonOutput, `\u001f`) // unit separator (0x1F)
|
||||
|
||||
// Verify the format is exactly 6 characters: \u + 4 hex digits
|
||||
// This is a more specific test to ensure the format is correct
|
||||
var v map[string]any
|
||||
|
||||
require.NoError(t, json.Unmarshal(jw.buf, &v))
|
||||
|
||||
// Verify the values are correctly parsed back
|
||||
require.Equal(t, "\x00", v["null"])
|
||||
require.Equal(t, "\x07", v["bell"])
|
||||
require.Equal(t, "\x0b", v["verticalTab"])
|
||||
require.Equal(t, "\x1b", v["escape"])
|
||||
require.Equal(t, "\x1f", v["unitSeparator"])
|
||||
}
|
||||
|
||||
func TestJSONWriter_StringEscapingPerformanceWithManyControlChars(t *testing.T) {
|
||||
jw := NewJSONWriter()
|
||||
defer jw.Release()
|
||||
|
||||
jw.BeginObject()
|
||||
|
||||
// Create a string with many control characters to test performance
|
||||
var testString string
|
||||
for i := range 100 {
|
||||
testString += string(rune(i % 32)) // Mix of control chars 0x00-0x1F
|
||||
}
|
||||
|
||||
jw.StringField("manyControlChars", testString)
|
||||
jw.EndObject()
|
||||
|
||||
var v map[string]any
|
||||
|
||||
require.NoError(t, json.Unmarshal(jw.buf, &v))
|
||||
|
||||
// Verify the string is correctly handled
|
||||
require.Equal(t, testString, v["manyControlChars"])
|
||||
|
||||
// Verify the JSON is valid and contains many Unicode escapes
|
||||
jsonOutput := string(jw.buf)
|
||||
t.Logf("Performance test JSON output length: %d", len(jsonOutput))
|
||||
|
||||
// Should contain many Unicode escape sequences
|
||||
require.Contains(t, jsonOutput, `\u0000`)
|
||||
require.Contains(t, jsonOutput, `\u0001`)
|
||||
require.Contains(t, jsonOutput, `\u000f`)
|
||||
require.Contains(t, jsonOutput, `\u001f`)
|
||||
}
|
||||
|
||||
func TestJSONWriter_StringEscapingEmptyAndSingleChar(t *testing.T) {
|
||||
jw := NewJSONWriter()
|
||||
defer jw.Release()
|
||||
|
||||
jw.BeginObject()
|
||||
|
||||
// Test edge cases with empty strings and single control characters
|
||||
jw.StringField("empty", "")
|
||||
jw.StringField("singleNull", "\x00")
|
||||
jw.StringField("singleBell", "\x07")
|
||||
jw.StringField("singleTab", "\x09")
|
||||
jw.StringField("singleNewline", "\x0a")
|
||||
jw.EndObject()
|
||||
|
||||
var v map[string]any
|
||||
|
||||
require.NoError(t, json.Unmarshal(jw.buf, &v))
|
||||
|
||||
// Verify edge cases
|
||||
require.Empty(t, v["empty"])
|
||||
require.Equal(t, "\x00", v["singleNull"])
|
||||
require.Equal(t, "\x07", v["singleBell"])
|
||||
require.Equal(t, "\x09", v["singleTab"])
|
||||
require.Equal(t, "\x0a", v["singleNewline"])
|
||||
|
||||
// Verify the raw JSON
|
||||
jsonOutput := string(jw.buf)
|
||||
t.Logf("Edge cases JSON output: %q", jsonOutput)
|
||||
|
||||
require.Contains(t, jsonOutput, `"empty":""`)
|
||||
require.Contains(t, jsonOutput, `"singleNull":"\u0000"`)
|
||||
require.Contains(t, jsonOutput, `"singleBell":"\u0007"`)
|
||||
require.Contains(t, jsonOutput, `"singleTab":"\t"`)
|
||||
require.Contains(t, jsonOutput, `"singleNewline":"\n"`)
|
||||
}
|
||||
164
internal/contentlog/contentlog_logger.go
Normal file
164
internal/contentlog/contentlog_logger.go
Normal file
@@ -0,0 +1,164 @@
|
||||
package contentlog
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/base32"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/kopia/kopia/internal/clock"
|
||||
)
|
||||
|
||||
// WriterTo is a type that can write itself to a JSON writer.
|
||||
type WriterTo interface {
|
||||
WriteTo(jw *JSONWriter)
|
||||
}
|
||||
|
||||
type loggerParamsKeyType string
|
||||
|
||||
const loggerParamsKey loggerParamsKeyType = "loggerParams"
|
||||
|
||||
// Emit writes the entry to the segment writer.
|
||||
// We are using this particular syntax to avoid allocating an intermediate interface value.
|
||||
// This allows exactly zero non-amortized allocations in all cases.
|
||||
func Emit[T WriterTo](ctx context.Context, l *Logger, entry T) {
|
||||
if l == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if l.output == nil {
|
||||
return
|
||||
}
|
||||
|
||||
jw := NewJSONWriter()
|
||||
defer jw.Release()
|
||||
|
||||
jw.BeginObject()
|
||||
jw.TimeField("t", l.timeFunc())
|
||||
|
||||
for _, param := range l.params {
|
||||
param.WriteValueTo(jw)
|
||||
}
|
||||
|
||||
params := ctx.Value(loggerParamsKey)
|
||||
if params != nil {
|
||||
if params, ok := params.([]ParamWriter); ok {
|
||||
for _, p := range params {
|
||||
p.WriteValueTo(jw)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
entry.WriteTo(jw)
|
||||
jw.EndObject()
|
||||
jw.buf = append(jw.buf, '\n')
|
||||
|
||||
l.output(jw.buf)
|
||||
}
|
||||
|
||||
// Log logs a message with no parameters.
|
||||
func Log(ctx context.Context, l *Logger, text string) {
|
||||
Emit(ctx, l, debugMessageWithParams[voidParamValue, voidParamValue, voidParamValue, voidParamValue, voidParamValue, voidParamValue]{text: text})
|
||||
}
|
||||
|
||||
// Log1 logs a message with one parameter.
|
||||
func Log1[T1 ParamWriter](ctx context.Context, l *Logger, format string, value1 T1) {
|
||||
Emit(ctx, l, debugMessageWithParams[T1, voidParamValue, voidParamValue, voidParamValue, voidParamValue, voidParamValue]{text: format, v1: value1})
|
||||
}
|
||||
|
||||
// Log2 logs a message with two parameters.
|
||||
func Log2[T1 ParamWriter, T2 ParamWriter](ctx context.Context, l *Logger, format string, value1 T1, value2 T2) {
|
||||
Emit(ctx, l, debugMessageWithParams[T1, T2, voidParamValue, voidParamValue, voidParamValue, voidParamValue]{text: format, v1: value1, v2: value2})
|
||||
}
|
||||
|
||||
// Log3 logs a message with three parameters.
|
||||
func Log3[T1 ParamWriter, T2 ParamWriter, T3 ParamWriter](ctx context.Context, l *Logger, format string, value1 T1, value2 T2, value3 T3) {
|
||||
Emit(ctx, l, debugMessageWithParams[T1, T2, T3, voidParamValue, voidParamValue, voidParamValue]{text: format, v1: value1, v2: value2, v3: value3})
|
||||
}
|
||||
|
||||
// Log4 logs a message with four parameters.
|
||||
func Log4[T1 ParamWriter, T2 ParamWriter, T3 ParamWriter, T4 ParamWriter](ctx context.Context, l *Logger, format string, value1 T1, value2 T2, value3 T3, value4 T4) {
|
||||
Emit(ctx, l, debugMessageWithParams[T1, T2, T3, T4, voidParamValue, voidParamValue]{text: format, v1: value1, v2: value2, v3: value3, v4: value4})
|
||||
}
|
||||
|
||||
// Log5 logs a message with five parameters.
|
||||
func Log5[T1 ParamWriter, T2 ParamWriter, T3 ParamWriter, T4 ParamWriter, T5 ParamWriter](ctx context.Context, l *Logger, format string, value1 T1, value2 T2, value3 T3, value4 T4, value5 T5) {
|
||||
Emit(ctx, l, debugMessageWithParams[T1, T2, T3, T4, T5, voidParamValue]{text: format, v1: value1, v2: value2, v3: value3, v4: value4, v5: value5})
|
||||
}
|
||||
|
||||
// Log6 logs a message with six parameters.
|
||||
func Log6[T1 ParamWriter, T2 ParamWriter, T3 ParamWriter, T4 ParamWriter, T5 ParamWriter, T6 ParamWriter](ctx context.Context, l *Logger, format string, value1 T1, value2 T2, value3 T3, value4 T4, value5 T5, value6 T6) {
|
||||
Emit(ctx, l, debugMessageWithParams[T1, T2, T3, T4, T5, T6]{text: format, v1: value1, v2: value2, v3: value3, v4: value4, v5: value5, v6: value6})
|
||||
}
|
||||
|
||||
// WithParams returns a new logger with the given parameters.
|
||||
func WithParams(ctx context.Context, params ...ParamWriter) context.Context {
|
||||
existing := ctx.Value(loggerParamsKey)
|
||||
if existing != nil {
|
||||
if existing, ok := existing.([]ParamWriter); ok {
|
||||
params = append(append([]ParamWriter(nil), existing...), params...)
|
||||
}
|
||||
}
|
||||
|
||||
return context.WithValue(ctx, loggerParamsKey, params)
|
||||
}
|
||||
|
||||
type voidParamValue struct{}
|
||||
|
||||
func (e voidParamValue) WriteValueTo(*JSONWriter) {}
|
||||
|
||||
type debugMessageWithParams[T1 ParamWriter, T2 ParamWriter, T3 ParamWriter, T4 ParamWriter, T5 ParamWriter, T6 ParamWriter] struct {
|
||||
text string
|
||||
v1 T1
|
||||
v2 T2
|
||||
v3 T3
|
||||
v4 T4
|
||||
v5 T5
|
||||
v6 T6
|
||||
}
|
||||
|
||||
func (e debugMessageWithParams[T1, T2, T3, T4, T5, T6]) WriteTo(jw *JSONWriter) {
|
||||
jw.StringField("m", e.text)
|
||||
e.v1.WriteValueTo(jw)
|
||||
e.v2.WriteValueTo(jw)
|
||||
e.v3.WriteValueTo(jw)
|
||||
e.v4.WriteValueTo(jw)
|
||||
e.v5.WriteValueTo(jw)
|
||||
e.v6.WriteValueTo(jw)
|
||||
}
|
||||
|
||||
// Logger is a logger that writes log entries to the output.
|
||||
type Logger struct {
|
||||
params []ParamWriter // Parameters to include in each log entry.
|
||||
output OutputFunc
|
||||
timeFunc func() time.Time
|
||||
}
|
||||
|
||||
// OutputFunc is a function that writes the log entry to the output.
|
||||
type OutputFunc func(data []byte)
|
||||
|
||||
// NewLogger creates a new logger.
|
||||
func NewLogger(out OutputFunc, params ...ParamWriter) *Logger {
|
||||
return &Logger{
|
||||
params: params,
|
||||
output: out,
|
||||
timeFunc: clock.Now,
|
||||
}
|
||||
}
|
||||
|
||||
// RandomSpanID generates a random span ID (40 bits encoded as 5 base32 characters == 8 ASCII characters).
|
||||
func RandomSpanID() string {
|
||||
var runID [5]byte
|
||||
|
||||
rand.Read(runID[:]) //nolint:errcheck
|
||||
|
||||
return strings.ToLower(base32.StdEncoding.WithPadding(base32.NoPadding).EncodeToString(runID[:]))
|
||||
}
|
||||
|
||||
// HashSpanID hashes a given value a Span ID.
|
||||
func HashSpanID(v string) string {
|
||||
spanID := sha256.Sum256([]byte(v))
|
||||
return strings.ToLower(base32.StdEncoding.WithPadding(base32.NoPadding).EncodeToString(spanID[:10]))
|
||||
}
|
||||
511
internal/contentlog/contentlog_logger_test.go
Normal file
511
internal/contentlog/contentlog_logger_test.go
Normal file
@@ -0,0 +1,511 @@
|
||||
package contentlog_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/kopia/kopia/internal/contentlog"
|
||||
"github.com/kopia/kopia/internal/contentlog/logparam"
|
||||
)
|
||||
|
||||
func TestNewLogger(t *testing.T) {
|
||||
t.Run("creates logger with output function", func(t *testing.T) {
|
||||
var captured []byte
|
||||
|
||||
outputFunc := func(data []byte) {
|
||||
captured = append(captured, data...)
|
||||
}
|
||||
|
||||
logger := contentlog.NewLogger(outputFunc)
|
||||
require.NotNil(t, logger)
|
||||
})
|
||||
|
||||
t.Run("creates logger with parameters", func(t *testing.T) {
|
||||
var captured []byte
|
||||
|
||||
outputFunc := func(data []byte) {
|
||||
captured = append(captured, data...)
|
||||
}
|
||||
|
||||
params := []contentlog.ParamWriter{
|
||||
logparam.String("service", "test"),
|
||||
logparam.Int("version", 1),
|
||||
}
|
||||
|
||||
logger := contentlog.NewLogger(outputFunc, params...)
|
||||
require.NotNil(t, logger)
|
||||
})
|
||||
|
||||
t.Run("creates logger with nil output", func(t *testing.T) {
|
||||
logger := contentlog.NewLogger(nil)
|
||||
require.NotNil(t, logger)
|
||||
})
|
||||
}
|
||||
|
||||
func TestLog(t *testing.T) {
|
||||
t.Run("logs message with no parameters", func(t *testing.T) {
|
||||
var captured []byte
|
||||
|
||||
outputFunc := func(data []byte) {
|
||||
captured = append(captured, data...)
|
||||
}
|
||||
|
||||
logger := contentlog.NewLogger(outputFunc)
|
||||
ctx := context.Background()
|
||||
|
||||
contentlog.Log(ctx, logger, "test message")
|
||||
|
||||
require.NotEmpty(t, captured)
|
||||
|
||||
var logEntry map[string]any
|
||||
err := json.Unmarshal(captured, &logEntry)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "test message", logEntry["m"])
|
||||
require.Contains(t, logEntry, "t") // timestamp field
|
||||
})
|
||||
|
||||
t.Run("logs message with logger parameters", func(t *testing.T) {
|
||||
var captured []byte
|
||||
|
||||
outputFunc := func(data []byte) {
|
||||
captured = append(captured, data...)
|
||||
}
|
||||
|
||||
params := []contentlog.ParamWriter{
|
||||
logparam.String("service", "test-service"),
|
||||
logparam.Int("version", 2),
|
||||
}
|
||||
|
||||
logger := contentlog.NewLogger(outputFunc, params...)
|
||||
ctx := context.Background()
|
||||
|
||||
contentlog.Log(ctx, logger, "test message")
|
||||
|
||||
require.NotEmpty(t, captured)
|
||||
|
||||
var logEntry map[string]any
|
||||
err := json.Unmarshal(captured, &logEntry)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "test message", logEntry["m"])
|
||||
require.Equal(t, "test-service", logEntry["service"])
|
||||
require.Equal(t, float64(2), logEntry["version"])
|
||||
require.Contains(t, logEntry, "t") // timestamp field
|
||||
})
|
||||
|
||||
t.Run("handles nil logger gracefully", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
// This should not panic
|
||||
contentlog.Log(ctx, nil, "test message")
|
||||
})
|
||||
|
||||
t.Run("handles nil output gracefully", func(t *testing.T) {
|
||||
logger := contentlog.NewLogger(nil)
|
||||
ctx := context.Background()
|
||||
// This should not panic
|
||||
contentlog.Log(ctx, logger, "test message")
|
||||
})
|
||||
}
|
||||
|
||||
func TestLog1(t *testing.T) {
|
||||
t.Run("logs message with one parameter", func(t *testing.T) {
|
||||
var captured []byte
|
||||
|
||||
outputFunc := func(data []byte) {
|
||||
captured = append(captured, data...)
|
||||
}
|
||||
|
||||
logger := contentlog.NewLogger(outputFunc)
|
||||
ctx := context.Background()
|
||||
|
||||
contentlog.Log1(ctx, logger, "processing item", logparam.String("id", "item-123"))
|
||||
|
||||
require.NotEmpty(t, captured)
|
||||
|
||||
var logEntry map[string]any
|
||||
err := json.Unmarshal(captured, &logEntry)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "processing item", logEntry["m"])
|
||||
require.Equal(t, "item-123", logEntry["id"])
|
||||
})
|
||||
|
||||
t.Run("logs message with different parameter types", func(t *testing.T) {
|
||||
var captured []byte
|
||||
|
||||
outputFunc := func(data []byte) {
|
||||
captured = append(captured, data...)
|
||||
}
|
||||
|
||||
logger := contentlog.NewLogger(outputFunc)
|
||||
ctx := context.Background()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
message string
|
||||
param contentlog.ParamWriter
|
||||
key string
|
||||
expected any
|
||||
}{
|
||||
{"string", "string param", logparam.String("str", "hello"), "str", "hello"},
|
||||
{"int", "int param", logparam.Int("num", 42), "num", float64(42)},
|
||||
{"int64", "int64 param", logparam.Int64("big", 9223372036854775807), "big", float64(9223372036854775807)},
|
||||
{"bool", "bool param", logparam.Bool("flag", true), "flag", true},
|
||||
{"uint64", "uint64 param", logparam.UInt64("unsigned", 18446744073709551615), "unsigned", float64(18446744073709551615)},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
captured = nil // reset
|
||||
|
||||
contentlog.Log1(ctx, logger, tc.message, tc.param)
|
||||
|
||||
require.NotEmpty(t, captured)
|
||||
|
||||
var logEntry map[string]any
|
||||
err := json.Unmarshal(captured, &logEntry)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.message, logEntry["m"])
|
||||
require.Equal(t, tc.expected, logEntry[tc.key])
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestLog2(t *testing.T) {
|
||||
t.Run("logs message with two parameters", func(t *testing.T) {
|
||||
var captured []byte
|
||||
|
||||
outputFunc := func(data []byte) {
|
||||
captured = append(captured, data...)
|
||||
}
|
||||
|
||||
logger := contentlog.NewLogger(outputFunc)
|
||||
ctx := context.Background()
|
||||
|
||||
contentlog.Log2(ctx, logger, "processing item",
|
||||
logparam.String("id", "item-123"),
|
||||
logparam.Int("count", 5))
|
||||
|
||||
require.NotEmpty(t, captured)
|
||||
|
||||
var logEntry map[string]any
|
||||
err := json.Unmarshal(captured, &logEntry)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "processing item", logEntry["m"])
|
||||
require.Equal(t, "item-123", logEntry["id"])
|
||||
require.Equal(t, float64(5), logEntry["count"])
|
||||
})
|
||||
}
|
||||
|
||||
func TestLog3(t *testing.T) {
|
||||
t.Run("logs message with three parameters", func(t *testing.T) {
|
||||
var captured []byte
|
||||
|
||||
outputFunc := func(data []byte) {
|
||||
captured = append(captured, data...)
|
||||
}
|
||||
|
||||
logger := contentlog.NewLogger(outputFunc)
|
||||
ctx := context.Background()
|
||||
|
||||
contentlog.Log3(ctx, logger, "processing item",
|
||||
logparam.String("id", "item-123"),
|
||||
logparam.Int("count", 5),
|
||||
logparam.Bool("active", true))
|
||||
|
||||
require.NotEmpty(t, captured)
|
||||
|
||||
var logEntry map[string]any
|
||||
err := json.Unmarshal(captured, &logEntry)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "processing item", logEntry["m"])
|
||||
require.Equal(t, "item-123", logEntry["id"])
|
||||
require.Equal(t, float64(5), logEntry["count"])
|
||||
require.Equal(t, true, logEntry["active"])
|
||||
})
|
||||
}
|
||||
|
||||
func TestLog4(t *testing.T) {
|
||||
t.Run("logs message with four parameters", func(t *testing.T) {
|
||||
var captured []byte
|
||||
|
||||
outputFunc := func(data []byte) {
|
||||
captured = append(captured, data...)
|
||||
}
|
||||
|
||||
logger := contentlog.NewLogger(outputFunc)
|
||||
ctx := context.Background()
|
||||
|
||||
contentlog.Log4(ctx, logger, "processing item",
|
||||
logparam.String("id", "item-123"),
|
||||
logparam.Int("count", 5),
|
||||
logparam.Bool("active", true),
|
||||
logparam.String("status", "processing"))
|
||||
|
||||
require.NotEmpty(t, captured)
|
||||
|
||||
var logEntry map[string]any
|
||||
err := json.Unmarshal(captured, &logEntry)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "processing item", logEntry["m"])
|
||||
require.Equal(t, "item-123", logEntry["id"])
|
||||
require.Equal(t, float64(5), logEntry["count"])
|
||||
require.Equal(t, true, logEntry["active"])
|
||||
require.Equal(t, "processing", logEntry["status"])
|
||||
})
|
||||
}
|
||||
|
||||
func TestLog5(t *testing.T) {
|
||||
t.Run("logs message with five parameters", func(t *testing.T) {
|
||||
var captured []byte
|
||||
|
||||
outputFunc := func(data []byte) {
|
||||
captured = append(captured, data...)
|
||||
}
|
||||
|
||||
logger := contentlog.NewLogger(outputFunc)
|
||||
ctx := context.Background()
|
||||
|
||||
contentlog.Log5(ctx, logger, "processing item",
|
||||
logparam.String("id", "item-123"),
|
||||
logparam.Int("count", 5),
|
||||
logparam.Bool("active", true),
|
||||
logparam.String("status", "processing"),
|
||||
logparam.Int64("size", 1024))
|
||||
|
||||
require.NotEmpty(t, captured)
|
||||
|
||||
var logEntry map[string]any
|
||||
err := json.Unmarshal(captured, &logEntry)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "processing item", logEntry["m"])
|
||||
require.Equal(t, "item-123", logEntry["id"])
|
||||
require.Equal(t, float64(5), logEntry["count"])
|
||||
require.Equal(t, true, logEntry["active"])
|
||||
require.Equal(t, "processing", logEntry["status"])
|
||||
require.Equal(t, float64(1024), logEntry["size"])
|
||||
})
|
||||
}
|
||||
|
||||
func TestLog6(t *testing.T) {
|
||||
t.Run("logs message with six parameters", func(t *testing.T) {
|
||||
var captured []byte
|
||||
|
||||
outputFunc := func(data []byte) {
|
||||
captured = append(captured, data...)
|
||||
}
|
||||
|
||||
logger := contentlog.NewLogger(outputFunc)
|
||||
ctx := context.Background()
|
||||
|
||||
contentlog.Log6(ctx, logger, "processing item",
|
||||
logparam.String("id", "item-123"),
|
||||
logparam.Int("count", 5),
|
||||
logparam.Bool("active", true),
|
||||
logparam.String("status", "processing"),
|
||||
logparam.Int64("size", 1024),
|
||||
logparam.UInt64("flags", 0xFF))
|
||||
|
||||
require.NotEmpty(t, captured)
|
||||
|
||||
var logEntry map[string]any
|
||||
err := json.Unmarshal(captured, &logEntry)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "processing item", logEntry["m"])
|
||||
require.Equal(t, "item-123", logEntry["id"])
|
||||
require.Equal(t, float64(5), logEntry["count"])
|
||||
require.Equal(t, true, logEntry["active"])
|
||||
require.Equal(t, "processing", logEntry["status"])
|
||||
require.Equal(t, float64(1024), logEntry["size"])
|
||||
require.Equal(t, float64(0xFF), logEntry["flags"])
|
||||
})
|
||||
}
|
||||
|
||||
func TestEmit(t *testing.T) {
|
||||
t.Run("emits custom WriterTo entry", func(t *testing.T) {
|
||||
var captured []byte
|
||||
|
||||
outputFunc := func(data []byte) {
|
||||
captured = append(captured, data...)
|
||||
}
|
||||
|
||||
logger := contentlog.NewLogger(outputFunc)
|
||||
ctx := context.Background()
|
||||
|
||||
customEntry := &customLogEntry{
|
||||
message: "custom entry",
|
||||
value: 42,
|
||||
}
|
||||
|
||||
contentlog.Emit(ctx, logger, customEntry)
|
||||
|
||||
require.NotEmpty(t, captured)
|
||||
|
||||
var logEntry map[string]any
|
||||
err := json.Unmarshal(captured, &logEntry)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "custom entry", logEntry["message"])
|
||||
require.Equal(t, float64(42), logEntry["value"])
|
||||
require.Contains(t, logEntry, "t") // timestamp field
|
||||
})
|
||||
|
||||
t.Run("emits with logger parameters", func(t *testing.T) {
|
||||
var captured []byte
|
||||
|
||||
outputFunc := func(data []byte) {
|
||||
captured = append(captured, data...)
|
||||
}
|
||||
|
||||
params := []contentlog.ParamWriter{
|
||||
logparam.String("service", "custom-service"),
|
||||
logparam.Int("version", 3),
|
||||
}
|
||||
|
||||
logger := contentlog.NewLogger(outputFunc, params...)
|
||||
ctx := context.Background()
|
||||
|
||||
customEntry := &customLogEntry{
|
||||
message: "custom entry",
|
||||
value: 42,
|
||||
}
|
||||
|
||||
contentlog.Emit(ctx, logger, customEntry)
|
||||
|
||||
require.NotEmpty(t, captured)
|
||||
|
||||
var logEntry map[string]any
|
||||
err := json.Unmarshal(captured, &logEntry)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "custom entry", logEntry["message"])
|
||||
require.Equal(t, float64(42), logEntry["value"])
|
||||
require.Equal(t, "custom-service", logEntry["service"])
|
||||
require.Equal(t, float64(3), logEntry["version"])
|
||||
})
|
||||
|
||||
t.Run("handles nil logger gracefully", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
customEntry := &customLogEntry{message: "test", value: 1}
|
||||
// This should not panic
|
||||
contentlog.Emit(ctx, nil, customEntry)
|
||||
})
|
||||
|
||||
t.Run("handles nil output gracefully", func(t *testing.T) {
|
||||
logger := contentlog.NewLogger(nil)
|
||||
ctx := context.Background()
|
||||
customEntry := &customLogEntry{message: "test", value: 1}
|
||||
// This should not panic
|
||||
contentlog.Emit(ctx, logger, customEntry)
|
||||
})
|
||||
}
|
||||
|
||||
func TestLoggerMultipleLogs(t *testing.T) {
|
||||
t.Run("handles multiple log entries", func(t *testing.T) {
|
||||
var captured []byte
|
||||
|
||||
outputFunc := func(data []byte) {
|
||||
captured = append(captured, data...)
|
||||
}
|
||||
|
||||
logger := contentlog.NewLogger(outputFunc)
|
||||
ctx := context.Background()
|
||||
|
||||
contentlog.Log(ctx, logger, "first message")
|
||||
contentlog.Log1(ctx, logger, "second message", logparam.String("id", "123"))
|
||||
contentlog.Log2(ctx, logger, "third message", logparam.Int("count", 5), logparam.Bool("flag", true))
|
||||
|
||||
require.NotEmpty(t, captured)
|
||||
|
||||
// Split by newlines to get individual log entries
|
||||
lines := strings.Split(strings.TrimSpace(string(captured)), "\n")
|
||||
require.Len(t, lines, 3)
|
||||
|
||||
// Check first entry
|
||||
var entry1 map[string]any
|
||||
err := json.Unmarshal([]byte(lines[0]), &entry1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "first message", entry1["m"])
|
||||
|
||||
// Check second entry
|
||||
var entry2 map[string]any
|
||||
err = json.Unmarshal([]byte(lines[1]), &entry2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "second message", entry2["m"])
|
||||
require.Equal(t, "123", entry2["id"])
|
||||
|
||||
// Check third entry
|
||||
var entry3 map[string]any
|
||||
err = json.Unmarshal([]byte(lines[2]), &entry3)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "third message", entry3["m"])
|
||||
require.Equal(t, float64(5), entry3["count"])
|
||||
require.Equal(t, true, entry3["flag"])
|
||||
})
|
||||
}
|
||||
|
||||
func TestLoggerErrorHandling(t *testing.T) {
|
||||
t.Run("handles nil error parameter", func(t *testing.T) {
|
||||
var captured []byte
|
||||
|
||||
outputFunc := func(data []byte) {
|
||||
captured = append(captured, data...)
|
||||
}
|
||||
|
||||
logger := contentlog.NewLogger(outputFunc)
|
||||
ctx := context.Background()
|
||||
|
||||
contentlog.Log1(ctx, logger, "error test", logparam.Error("err", nil))
|
||||
|
||||
require.NotEmpty(t, captured)
|
||||
|
||||
var logEntry map[string]any
|
||||
err := json.Unmarshal(captured, &logEntry)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "error test", logEntry["m"])
|
||||
require.Nil(t, logEntry["err"])
|
||||
})
|
||||
|
||||
t.Run("handles real error parameter", func(t *testing.T) {
|
||||
var captured []byte
|
||||
|
||||
outputFunc := func(data []byte) {
|
||||
captured = append(captured, data...)
|
||||
}
|
||||
|
||||
logger := contentlog.NewLogger(outputFunc)
|
||||
ctx := context.Background()
|
||||
|
||||
testErr := &testError{msg: "operation failed"}
|
||||
contentlog.Log1(ctx, logger, "error test", logparam.Error("err", testErr))
|
||||
|
||||
require.NotEmpty(t, captured)
|
||||
|
||||
var logEntry map[string]any
|
||||
err := json.Unmarshal(captured, &logEntry)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "error test", logEntry["m"])
|
||||
require.Equal(t, "operation failed", logEntry["err"])
|
||||
})
|
||||
}
|
||||
|
||||
// Helper types for testing
|
||||
|
||||
type testError struct {
|
||||
msg string
|
||||
}
|
||||
|
||||
func (e *testError) Error() string {
|
||||
return e.msg
|
||||
}
|
||||
|
||||
type customLogEntry struct {
|
||||
message string
|
||||
value int
|
||||
}
|
||||
|
||||
func (e *customLogEntry) WriteTo(jw *contentlog.JSONWriter) {
|
||||
jw.StringField("message", e.message)
|
||||
jw.Int64Field("value", int64(e.value))
|
||||
}
|
||||
134
internal/contentlog/logparam/logparam.go
Normal file
134
internal/contentlog/logparam/logparam.go
Normal file
@@ -0,0 +1,134 @@
|
||||
// Package logparam provides parameters for logging.
|
||||
package logparam
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/kopia/kopia/internal/contentlog"
|
||||
)
|
||||
|
||||
// String creates a string parameter.
|
||||
//
|
||||
//nolint:revive
|
||||
func String(key, value string) stringParam {
|
||||
return stringParam{Key: key, Value: value}
|
||||
}
|
||||
|
||||
// Int64 creates an int64 parameter.
|
||||
//
|
||||
//nolint:revive
|
||||
func Int64(key string, value int64) int64Param { return int64Param{Key: key, Value: value} }
|
||||
|
||||
// Int creates an int parameter.
|
||||
//
|
||||
//nolint:revive
|
||||
func Int(key string, value int) int64Param {
|
||||
return int64Param{Key: key, Value: int64(value)}
|
||||
}
|
||||
|
||||
// Int32 creates an int32 parameter.
|
||||
//
|
||||
//nolint:revive
|
||||
func Int32(key string, value int32) int64Param {
|
||||
return int64Param{Key: key, Value: int64(value)}
|
||||
}
|
||||
|
||||
// Bool creates a bool parameter.
|
||||
//
|
||||
//nolint:revive
|
||||
func Bool(key string, value bool) boolParam { return boolParam{Key: key, Value: value} }
|
||||
|
||||
// Time creates a time parameter.
|
||||
//
|
||||
//nolint:revive
|
||||
func Time(key string, value time.Time) timeParam { return timeParam{Key: key, Value: value} }
|
||||
|
||||
// Error creates an error parameter.
|
||||
//
|
||||
//nolint:revive
|
||||
func Error(key string, value error) errorParam { return errorParam{Key: key, Value: value} }
|
||||
|
||||
// UInt64 creates a uint64 parameter.
|
||||
//
|
||||
//nolint:revive
|
||||
func UInt64(key string, value uint64) uint64Param {
|
||||
return uint64Param{Key: key, Value: value}
|
||||
}
|
||||
|
||||
// UInt32 creates a uint32 parameter.
|
||||
//
|
||||
//nolint:revive
|
||||
func UInt32(key string, value uint32) uint64Param {
|
||||
return uint64Param{Key: key, Value: uint64(value)}
|
||||
}
|
||||
|
||||
// Duration creates a duration parameter.
|
||||
//
|
||||
//nolint:revive
|
||||
func Duration(key string, value time.Duration) durationParam {
|
||||
return durationParam{Key: key, Value: value}
|
||||
}
|
||||
|
||||
// int64Param is a parameter that writes a int64 value to the JSON writer.
|
||||
type int64Param struct {
|
||||
Key string
|
||||
Value int64
|
||||
}
|
||||
|
||||
func (v int64Param) WriteValueTo(jw *contentlog.JSONWriter) {
|
||||
jw.Int64Field(v.Key, v.Value)
|
||||
}
|
||||
|
||||
type uint64Param struct {
|
||||
Key string
|
||||
Value uint64
|
||||
}
|
||||
|
||||
func (v uint64Param) WriteValueTo(jw *contentlog.JSONWriter) {
|
||||
jw.UInt64Field(v.Key, v.Value)
|
||||
}
|
||||
|
||||
type timeParam struct {
|
||||
Key string
|
||||
Value time.Time
|
||||
}
|
||||
|
||||
func (v timeParam) WriteValueTo(jw *contentlog.JSONWriter) {
|
||||
jw.TimeField(v.Key, v.Value)
|
||||
}
|
||||
|
||||
type boolParam struct {
|
||||
Key string
|
||||
Value bool
|
||||
}
|
||||
|
||||
func (v boolParam) WriteValueTo(jw *contentlog.JSONWriter) {
|
||||
jw.BoolField(v.Key, v.Value)
|
||||
}
|
||||
|
||||
type durationParam struct {
|
||||
Key string
|
||||
Value time.Duration
|
||||
}
|
||||
|
||||
func (v durationParam) WriteValueTo(jw *contentlog.JSONWriter) {
|
||||
jw.Int64Field(v.Key, v.Value.Microseconds())
|
||||
}
|
||||
|
||||
type errorParam struct {
|
||||
Key string
|
||||
Value error
|
||||
}
|
||||
|
||||
func (v errorParam) WriteValueTo(jw *contentlog.JSONWriter) {
|
||||
jw.ErrorField(v.Key, v.Value)
|
||||
}
|
||||
|
||||
type stringParam struct {
|
||||
Key string
|
||||
Value string
|
||||
}
|
||||
|
||||
func (v stringParam) WriteValueTo(jw *contentlog.JSONWriter) {
|
||||
jw.StringField(v.Key, v.Value)
|
||||
}
|
||||
644
internal/contentlog/logparam/logparam_test.go
Normal file
644
internal/contentlog/logparam/logparam_test.go
Normal file
@@ -0,0 +1,644 @@
|
||||
package logparam
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/kopia/kopia/internal/clock"
|
||||
"github.com/kopia/kopia/internal/contentlog"
|
||||
)
|
||||
|
||||
func TestString(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
key string
|
||||
value string
|
||||
expected map[string]any
|
||||
}{
|
||||
{
|
||||
name: "simple string",
|
||||
key: "message",
|
||||
value: "hello world",
|
||||
expected: map[string]any{
|
||||
"message": "hello world",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty string",
|
||||
key: "empty",
|
||||
value: "",
|
||||
expected: map[string]any{
|
||||
"empty": "",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unicode string",
|
||||
key: "unicode",
|
||||
value: "😄🚀🎉",
|
||||
expected: map[string]any{
|
||||
"unicode": "😄🚀🎉",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "string with special chars",
|
||||
key: "special",
|
||||
value: "hello\nworld\r\t\b\f",
|
||||
expected: map[string]any{
|
||||
"special": "hello\nworld\r\t\b\f",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "string with control chars",
|
||||
key: "control",
|
||||
value: "hello\x00world\x07test",
|
||||
expected: map[string]any{
|
||||
"control": "hello\x00world\x07test",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Test memory allocations
|
||||
allocs := testing.AllocsPerRun(100, func() {
|
||||
_ = String(tt.key, tt.value)
|
||||
})
|
||||
require.Equal(t, float64(0), allocs, "String() should not allocate memory")
|
||||
|
||||
// Test output format
|
||||
param := String(tt.key, tt.value)
|
||||
|
||||
jw := contentlog.NewJSONWriter()
|
||||
defer jw.Release()
|
||||
|
||||
jw.BeginObject()
|
||||
param.WriteValueTo(jw)
|
||||
jw.EndObject()
|
||||
|
||||
var result map[string]any
|
||||
|
||||
require.NoError(t, json.Unmarshal(jw.GetBufferForTesting(), &result))
|
||||
require.Equal(t, tt.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestInt64(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
key string
|
||||
value int64
|
||||
expected map[string]any
|
||||
}{
|
||||
{
|
||||
name: "positive int64",
|
||||
key: "count",
|
||||
value: 123456789,
|
||||
expected: map[string]any{
|
||||
"count": 123456789.0,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "negative int64",
|
||||
key: "negative",
|
||||
value: -987654321,
|
||||
expected: map[string]any{
|
||||
"negative": -987654321.0,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "zero int64",
|
||||
key: "zero",
|
||||
value: 0,
|
||||
expected: map[string]any{
|
||||
"zero": 0.0,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "max int64",
|
||||
key: "max",
|
||||
value: 9223372036854775807,
|
||||
expected: map[string]any{
|
||||
"max": 9223372036854775807.0,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "min int64",
|
||||
key: "min",
|
||||
value: -9223372036854775808,
|
||||
expected: map[string]any{
|
||||
"min": -9223372036854775808.0,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Test memory allocations
|
||||
allocs := testing.AllocsPerRun(100, func() {
|
||||
_ = Int64(tt.key, tt.value)
|
||||
})
|
||||
require.Equal(t, float64(0), allocs, "Int64() should not allocate memory")
|
||||
|
||||
// Test output format
|
||||
param := Int64(tt.key, tt.value)
|
||||
|
||||
jw := contentlog.NewJSONWriter()
|
||||
defer jw.Release()
|
||||
|
||||
jw.BeginObject()
|
||||
param.WriteValueTo(jw)
|
||||
jw.EndObject()
|
||||
|
||||
var result map[string]any
|
||||
|
||||
require.NoError(t, json.Unmarshal(jw.GetBufferForTesting(), &result))
|
||||
require.Equal(t, tt.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestInt(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
key string
|
||||
value int
|
||||
expected map[string]any
|
||||
}{
|
||||
{
|
||||
name: "positive int",
|
||||
key: "count",
|
||||
value: 42,
|
||||
expected: map[string]any{
|
||||
"count": 42.0,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "negative int",
|
||||
key: "negative",
|
||||
value: -100,
|
||||
expected: map[string]any{
|
||||
"negative": -100.0,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "zero int",
|
||||
key: "zero",
|
||||
value: 0,
|
||||
expected: map[string]any{
|
||||
"zero": 0.0,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Test memory allocations
|
||||
allocs := testing.AllocsPerRun(100, func() {
|
||||
_ = Int(tt.key, tt.value)
|
||||
})
|
||||
require.Equal(t, float64(0), allocs, "Int() should not allocate memory")
|
||||
|
||||
// Test output format
|
||||
param := Int(tt.key, tt.value)
|
||||
|
||||
jw := contentlog.NewJSONWriter()
|
||||
defer jw.Release()
|
||||
|
||||
jw.BeginObject()
|
||||
param.WriteValueTo(jw)
|
||||
jw.EndObject()
|
||||
|
||||
var result map[string]any
|
||||
|
||||
require.NoError(t, json.Unmarshal(jw.GetBufferForTesting(), &result))
|
||||
require.Equal(t, tt.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestInt32(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
key string
|
||||
value int32
|
||||
expected map[string]any
|
||||
}{
|
||||
{
|
||||
name: "positive int32",
|
||||
key: "count",
|
||||
value: 2147483647,
|
||||
expected: map[string]any{
|
||||
"count": 2147483647.0,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "negative int32",
|
||||
key: "negative",
|
||||
value: -2147483648,
|
||||
expected: map[string]any{
|
||||
"negative": -2147483648.0,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "zero int32",
|
||||
key: "zero",
|
||||
value: 0,
|
||||
expected: map[string]any{
|
||||
"zero": 0.0,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Test memory allocations
|
||||
allocs := testing.AllocsPerRun(100, func() {
|
||||
_ = Int32(tt.key, tt.value)
|
||||
})
|
||||
require.Equal(t, float64(0), allocs, "Int32() should not allocate memory")
|
||||
|
||||
// Test output format
|
||||
param := Int32(tt.key, tt.value)
|
||||
|
||||
jw := contentlog.NewJSONWriter()
|
||||
defer jw.Release()
|
||||
|
||||
jw.BeginObject()
|
||||
param.WriteValueTo(jw)
|
||||
jw.EndObject()
|
||||
|
||||
var result map[string]any
|
||||
|
||||
require.NoError(t, json.Unmarshal(jw.GetBufferForTesting(), &result))
|
||||
require.Equal(t, tt.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBool(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
key string
|
||||
value bool
|
||||
expected map[string]any
|
||||
}{
|
||||
{
|
||||
name: "true bool",
|
||||
key: "enabled",
|
||||
value: true,
|
||||
expected: map[string]any{
|
||||
"enabled": true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "false bool",
|
||||
key: "disabled",
|
||||
value: false,
|
||||
expected: map[string]any{
|
||||
"disabled": false,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Test memory allocations
|
||||
allocs := testing.AllocsPerRun(100, func() {
|
||||
_ = Bool(tt.key, tt.value)
|
||||
})
|
||||
require.Equal(t, float64(0), allocs, "Bool() should not allocate memory")
|
||||
|
||||
// Test output format
|
||||
param := Bool(tt.key, tt.value)
|
||||
|
||||
jw := contentlog.NewJSONWriter()
|
||||
defer jw.Release()
|
||||
|
||||
jw.BeginObject()
|
||||
param.WriteValueTo(jw)
|
||||
jw.EndObject()
|
||||
|
||||
var result map[string]any
|
||||
|
||||
require.NoError(t, json.Unmarshal(jw.GetBufferForTesting(), &result))
|
||||
require.Equal(t, tt.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTime(t *testing.T) {
|
||||
now := clock.Now()
|
||||
utcTime := time.Date(2023, 12, 25, 15, 30, 45, 123456789, time.UTC)
|
||||
zeroTime := time.Time{}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
key string
|
||||
value time.Time
|
||||
expected map[string]any
|
||||
}{
|
||||
{
|
||||
name: "current time",
|
||||
key: "now",
|
||||
value: now,
|
||||
expected: map[string]any{
|
||||
"now": now.UTC().Format("2006-01-02T15:04:05.000000Z"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "UTC time",
|
||||
key: "utc",
|
||||
value: utcTime,
|
||||
expected: map[string]any{
|
||||
"utc": "2023-12-25T15:30:45.123456Z",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "zero time",
|
||||
key: "zero",
|
||||
value: zeroTime,
|
||||
expected: map[string]any{
|
||||
"zero": "0001-01-01T00:00:00.000000Z",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Test memory allocations
|
||||
allocs := testing.AllocsPerRun(100, func() {
|
||||
_ = Time(tt.key, tt.value)
|
||||
})
|
||||
require.Equal(t, float64(0), allocs, "Time() should not allocate memory")
|
||||
|
||||
// Test output format
|
||||
param := Time(tt.key, tt.value)
|
||||
|
||||
jw := contentlog.NewJSONWriter()
|
||||
defer jw.Release()
|
||||
|
||||
jw.BeginObject()
|
||||
param.WriteValueTo(jw)
|
||||
jw.EndObject()
|
||||
|
||||
var result map[string]any
|
||||
|
||||
require.NoError(t, json.Unmarshal(jw.GetBufferForTesting(), &result))
|
||||
require.Equal(t, tt.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestError(t *testing.T) {
|
||||
err1 := errors.Errorf("test error")
|
||||
err2 := errors.Errorf("another error")
|
||||
|
||||
var nilErr error
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
key string
|
||||
value error
|
||||
expected map[string]any
|
||||
}{
|
||||
{
|
||||
name: "simple error",
|
||||
key: "err",
|
||||
value: err1,
|
||||
expected: map[string]any{
|
||||
"err": "test error",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "another error",
|
||||
key: "error",
|
||||
value: err2,
|
||||
expected: map[string]any{
|
||||
"error": "another error",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "nil error",
|
||||
key: "nil",
|
||||
value: nilErr,
|
||||
expected: map[string]any{
|
||||
"nil": nil,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Test memory allocations
|
||||
allocs := testing.AllocsPerRun(100, func() {
|
||||
_ = Error(tt.key, tt.value)
|
||||
})
|
||||
require.Equal(t, float64(0), allocs, "Error() should not allocate memory")
|
||||
|
||||
// Test output format
|
||||
param := Error(tt.key, tt.value)
|
||||
|
||||
jw := contentlog.NewJSONWriter()
|
||||
defer jw.Release()
|
||||
|
||||
jw.BeginObject()
|
||||
param.WriteValueTo(jw)
|
||||
jw.EndObject()
|
||||
|
||||
var result map[string]any
|
||||
|
||||
require.NoError(t, json.Unmarshal(jw.GetBufferForTesting(), &result))
|
||||
require.Equal(t, tt.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUInt64(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
key string
|
||||
value uint64
|
||||
expected map[string]any
|
||||
}{
|
||||
{
|
||||
name: "positive uint64",
|
||||
key: "count",
|
||||
value: 18446744073709551615,
|
||||
expected: map[string]any{
|
||||
"count": 18446744073709551615.0,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "zero uint64",
|
||||
key: "zero",
|
||||
value: 0,
|
||||
expected: map[string]any{
|
||||
"zero": 0.0,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "large uint64",
|
||||
key: "large",
|
||||
value: 1234567890123456789,
|
||||
expected: map[string]any{
|
||||
"large": 1234567890123456789.0,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Test memory allocations
|
||||
allocs := testing.AllocsPerRun(100, func() {
|
||||
_ = UInt64(tt.key, tt.value)
|
||||
})
|
||||
require.Equal(t, float64(0), allocs, "UInt64() should not allocate memory")
|
||||
|
||||
// Test output format
|
||||
param := UInt64(tt.key, tt.value)
|
||||
|
||||
jw := contentlog.NewJSONWriter()
|
||||
defer jw.Release()
|
||||
|
||||
jw.BeginObject()
|
||||
param.WriteValueTo(jw)
|
||||
jw.EndObject()
|
||||
|
||||
var result map[string]any
|
||||
|
||||
require.NoError(t, json.Unmarshal(jw.GetBufferForTesting(), &result))
|
||||
require.Equal(t, tt.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDuration(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
key string
|
||||
value time.Duration
|
||||
expected map[string]any
|
||||
}{
|
||||
{
|
||||
name: "positive duration",
|
||||
key: "duration",
|
||||
value: 5 * time.Second,
|
||||
expected: map[string]any{
|
||||
"duration": 5000000.0, // microseconds
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "negative duration",
|
||||
key: "negative",
|
||||
value: -2 * time.Minute,
|
||||
expected: map[string]any{
|
||||
"negative": -120000000.0, // microseconds
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "zero duration",
|
||||
key: "zero",
|
||||
value: 0,
|
||||
expected: map[string]any{
|
||||
"zero": 0.0,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "microsecond duration",
|
||||
key: "micro",
|
||||
value: 123 * time.Microsecond,
|
||||
expected: map[string]any{
|
||||
"micro": 123.0,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "nanosecond duration",
|
||||
key: "nano",
|
||||
value: 500 * time.Nanosecond,
|
||||
expected: map[string]any{
|
||||
"nano": 0.0, // rounds down to microseconds
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Test memory allocations
|
||||
allocs := testing.AllocsPerRun(100, func() {
|
||||
_ = Duration(tt.key, tt.value)
|
||||
})
|
||||
require.Equal(t, float64(0), allocs, "Duration() should not allocate memory")
|
||||
|
||||
// Test output format
|
||||
param := Duration(tt.key, tt.value)
|
||||
|
||||
jw := contentlog.NewJSONWriter()
|
||||
defer jw.Release()
|
||||
|
||||
jw.BeginObject()
|
||||
param.WriteValueTo(jw)
|
||||
jw.EndObject()
|
||||
|
||||
var result map[string]any
|
||||
|
||||
require.NoError(t, json.Unmarshal(jw.GetBufferForTesting(), &result))
|
||||
require.Equal(t, tt.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestWriteValueToMemoryAllocations tests that WriteValueTo methods don't allocate memory.
|
||||
func TestWriteValueToMemoryAllocations(t *testing.T) {
|
||||
jw := contentlog.NewJSONWriter()
|
||||
defer jw.Release()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
param contentlog.ParamWriter
|
||||
}{
|
||||
{
|
||||
name: "stringParam",
|
||||
param: String("key", "value"),
|
||||
},
|
||||
{
|
||||
name: "int64Param",
|
||||
param: Int64("key", 123),
|
||||
},
|
||||
{
|
||||
name: "boolParam",
|
||||
param: Bool("key", true),
|
||||
},
|
||||
{
|
||||
name: "timeParam",
|
||||
param: Time("key", clock.Now()),
|
||||
},
|
||||
{
|
||||
name: "errorParam",
|
||||
param: Error("key", errors.New("test")),
|
||||
},
|
||||
{
|
||||
name: "uint64Param",
|
||||
param: UInt64("key", 123),
|
||||
},
|
||||
{
|
||||
name: "durationParam",
|
||||
param: Duration("key", time.Second),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
allocs := testing.AllocsPerRun(100, func() {
|
||||
jw.BeginObject()
|
||||
tt.param.WriteValueTo(jw)
|
||||
jw.EndObject()
|
||||
})
|
||||
require.Equal(t, float64(0), allocs, "%s.WriteValueTo() should not allocate memory", tt.name)
|
||||
})
|
||||
}
|
||||
}
|
||||
27
internal/contentparam/contentid_params.go
Normal file
27
internal/contentparam/contentid_params.go
Normal file
@@ -0,0 +1,27 @@
|
||||
// Package contentparam provides parameters for logging content-related operations.
|
||||
package contentparam
|
||||
|
||||
import (
|
||||
"github.com/kopia/kopia/internal/contentlog"
|
||||
"github.com/kopia/kopia/repo/content/index"
|
||||
)
|
||||
|
||||
const maxLoggedContentIDLength = 5
|
||||
|
||||
type contentIDParam struct {
|
||||
Key string
|
||||
Value index.ID
|
||||
}
|
||||
|
||||
func (e contentIDParam) WriteValueTo(jw *contentlog.JSONWriter) {
|
||||
var buf [128]byte
|
||||
|
||||
jw.RawJSONField(e.Key, e.Value.AppendToJSON(buf[:0], maxLoggedContentIDLength))
|
||||
}
|
||||
|
||||
// ContentID is a parameter that writes a content ID to the JSON writer.
|
||||
//
|
||||
//nolint:revive
|
||||
func ContentID(key string, value index.ID) contentIDParam {
|
||||
return contentIDParam{Key: key, Value: value}
|
||||
}
|
||||
@@ -13,10 +13,12 @@
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"github.com/kopia/kopia/internal/blobparam"
|
||||
"github.com/kopia/kopia/internal/completeset"
|
||||
"github.com/kopia/kopia/internal/contentlog"
|
||||
"github.com/kopia/kopia/internal/contentlog/logparam"
|
||||
"github.com/kopia/kopia/internal/gather"
|
||||
"github.com/kopia/kopia/repo/blob"
|
||||
"github.com/kopia/kopia/repo/logging"
|
||||
)
|
||||
|
||||
// LatestEpoch represents the current epoch number in GetCompleteIndexSet.
|
||||
@@ -182,7 +184,7 @@ type Manager struct {
|
||||
|
||||
st blob.Storage
|
||||
compact CompactionFunc
|
||||
log logging.Logger
|
||||
log *contentlog.Logger
|
||||
timeFunc func() time.Time
|
||||
|
||||
// wait group that waits for all compaction and cleanup goroutines.
|
||||
@@ -239,7 +241,11 @@ func (e *Manager) AdvanceDeletionWatermark(ctx context.Context, ts time.Time) er
|
||||
}
|
||||
|
||||
if ts.Before(cs.DeletionWatermark) {
|
||||
e.log.Debugf("ignoring attempt to move deletion watermark time backwards (%v < %v)", ts.Format(time.RFC3339), cs.DeletionWatermark.Format(time.RFC3339))
|
||||
contentlog.Log2(ctx, e.log,
|
||||
"ignoring attempt to move deletion watermark time backwards",
|
||||
logparam.Time("ts", ts),
|
||||
logparam.Time("deletionWatermark", cs.DeletionWatermark),
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -391,8 +397,8 @@ func (e *Manager) CleanupSupersededIndexes(ctx context.Context) error {
|
||||
// may have not observed them yet.
|
||||
maxReplacementTime := maxTime.Add(-p.CleanupSafetyMargin)
|
||||
|
||||
e.log.Debugw("Cleaning up superseded index blobs...",
|
||||
"maxReplacementTime", maxReplacementTime)
|
||||
contentlog.Log1(ctx, e.log, "Cleaning up superseded index blobs...",
|
||||
logparam.Time("maxReplacementTime", maxReplacementTime))
|
||||
|
||||
// delete uncompacted indexes for epochs that already have single-epoch compaction
|
||||
// that was written sufficiently long ago.
|
||||
@@ -459,7 +465,7 @@ func (e *Manager) refreshLocked(ctx context.Context) error {
|
||||
return errors.Wrap(ctx.Err(), "refreshAttemptLocked")
|
||||
}
|
||||
|
||||
e.log.Debugf("refresh attempt failed: %v, sleeping %v before next retry", err, nextDelayTime)
|
||||
contentlog.Log2(ctx, e.log, "refresh attempt failed", logparam.Error("error", err), logparam.Duration("nextDelayTime", nextDelayTime))
|
||||
time.Sleep(nextDelayTime)
|
||||
|
||||
nextDelayTime = time.Duration(float64(nextDelayTime) * maxRefreshAttemptSleepExponent)
|
||||
@@ -500,7 +506,7 @@ func (e *Manager) loadDeletionWatermark(ctx context.Context, cs *CurrentSnapshot
|
||||
for _, b := range blobs {
|
||||
t, ok := deletionWatermarkFromBlobID(b.BlobID)
|
||||
if !ok {
|
||||
e.log.Debugf("ignoring malformed deletion watermark: %v", b.BlobID)
|
||||
contentlog.Log1(ctx, e.log, "ignoring malformed deletion watermark", blobparam.BlobID("blobID", b.BlobID))
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -520,7 +526,10 @@ func (e *Manager) loadRangeCheckpoints(ctx context.Context, cs *CurrentSnapshot)
|
||||
return errors.Wrap(err, "error loading full checkpoints")
|
||||
}
|
||||
|
||||
e.log.Debugf("ranges: %v", blobs)
|
||||
contentlog.Log2(ctx, e.log,
|
||||
"ranges",
|
||||
logparam.Int("numRanges", len(blobs)),
|
||||
blobparam.BlobMetadataList("blobs", blobs))
|
||||
|
||||
var rangeCheckpointSets []*RangeMetadata
|
||||
|
||||
@@ -574,7 +583,7 @@ func (e *Manager) MaybeGenerateRangeCheckpoint(ctx context.Context) error {
|
||||
|
||||
latestSettled, firstNonRangeCompacted, compact := getRangeToCompact(cs, *p)
|
||||
if !compact {
|
||||
e.log.Debug("not generating range checkpoint")
|
||||
contentlog.Log(ctx, e.log, "not generating range checkpoint")
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -641,7 +650,11 @@ func (e *Manager) loadUncompactedEpochs(ctx context.Context, first, last int) (m
|
||||
// refreshAttemptLocked attempts to load the committedState of
|
||||
// the index and updates `lastKnownState` state atomically when complete.
|
||||
func (e *Manager) refreshAttemptLocked(ctx context.Context) error {
|
||||
e.log.Debug("refreshAttemptLocked")
|
||||
ctx = contentlog.WithParams(ctx,
|
||||
logparam.String("span:emr", contentlog.RandomSpanID()))
|
||||
|
||||
contentlog.Log(ctx, e.log, "refreshAttempt started")
|
||||
defer contentlog.Log(ctx, e.log, "refreshAttempt finished")
|
||||
|
||||
p, perr := e.getParameters(ctx)
|
||||
if perr != nil {
|
||||
@@ -682,12 +695,14 @@ func (e *Manager) refreshAttemptLocked(ctx context.Context) error {
|
||||
|
||||
cs.UncompactedEpochSets = ues
|
||||
|
||||
e.log.Debugf("current epoch %v, uncompacted epoch sets %v %v %v, valid until %v",
|
||||
cs.WriteEpoch,
|
||||
len(ues[cs.WriteEpoch-1]),
|
||||
len(ues[cs.WriteEpoch]),
|
||||
len(ues[cs.WriteEpoch+1]),
|
||||
cs.ValidUntil.Format(time.RFC3339Nano))
|
||||
contentlog.Log5(ctx, e.log,
|
||||
"epochs determined",
|
||||
logparam.Int("writeEpoch", cs.WriteEpoch),
|
||||
logparam.Int("ues1", len(ues[cs.WriteEpoch-1])),
|
||||
logparam.Int("ues2", len(ues[cs.WriteEpoch])),
|
||||
logparam.Int("ues3", len(ues[cs.WriteEpoch+1])),
|
||||
logparam.Time("validUntil", cs.ValidUntil),
|
||||
)
|
||||
|
||||
if now := e.timeFunc(); now.After(cs.ValidUntil) {
|
||||
atomic.AddInt32(e.committedStateRefreshTooSlow, 1)
|
||||
@@ -734,7 +749,12 @@ func (e *Manager) committedState(ctx context.Context, ensureMinTime time.Duratio
|
||||
defer e.mu.Unlock()
|
||||
|
||||
if now := e.timeFunc().Add(ensureMinTime); now.After(e.lastKnownState.ValidUntil) {
|
||||
e.log.Debugf("refreshing committed state because it's no longer valid (now %v, valid until %v)", now, e.lastKnownState.ValidUntil.Format(time.RFC3339Nano))
|
||||
contentlog.Log2(
|
||||
ctx, e.log,
|
||||
"refreshing committed state because it's no longer valid",
|
||||
logparam.Time("now", now),
|
||||
logparam.Time("validUntil", e.lastKnownState.ValidUntil),
|
||||
)
|
||||
|
||||
if err := e.refreshLocked(ctx); err != nil {
|
||||
return CurrentSnapshot{}, err
|
||||
@@ -758,7 +778,14 @@ func (e *Manager) GetCompleteIndexSet(ctx context.Context, maxEpoch int) ([]blob
|
||||
|
||||
result, err := e.getCompleteIndexSetForCommittedState(ctx, cs, 0, maxEpoch)
|
||||
if e.timeFunc().Before(cs.ValidUntil) {
|
||||
e.log.Debugf("Complete Index Set for [%v..%v]: %v, deletion watermark %v", 0, maxEpoch, blob.IDsFromMetadata(result), cs.DeletionWatermark)
|
||||
contentlog.Log4(ctx, e.log,
|
||||
"complete index set",
|
||||
logparam.Int("maxEpoch", maxEpoch),
|
||||
logparam.Int("resultLength", len(result)),
|
||||
blobparam.BlobIDList("result", blob.IDsFromMetadata(result)),
|
||||
logparam.Time("deletionWatermark", cs.DeletionWatermark),
|
||||
)
|
||||
|
||||
return result, cs.DeletionWatermark, err
|
||||
}
|
||||
|
||||
@@ -770,7 +797,7 @@ func (e *Manager) GetCompleteIndexSet(ctx context.Context, maxEpoch int) ([]blob
|
||||
// indexes that are still treated as authoritative according to old committed state.
|
||||
//
|
||||
// Retrying will re-examine the state of the world and re-do the logic.
|
||||
e.log.Debug("GetCompleteIndexSet took too long, retrying to ensure correctness")
|
||||
contentlog.Log(ctx, e.log, "GetCompleteIndexSet took too long, retrying to ensure correctness")
|
||||
atomic.AddInt32(e.getCompleteIndexSetTooSlow, 1)
|
||||
}
|
||||
}
|
||||
@@ -778,12 +805,15 @@ func (e *Manager) GetCompleteIndexSet(ctx context.Context, maxEpoch int) ([]blob
|
||||
var errWriteIndexTryAgain = errors.New("try again")
|
||||
|
||||
// WriteIndex writes new index blob by picking the appropriate prefix based on current epoch.
|
||||
func (e *Manager) WriteIndex(ctx context.Context, dataShards map[blob.ID]blob.Bytes) ([]blob.Metadata, error) {
|
||||
func (e *Manager) WriteIndex(ctx0 context.Context, dataShards map[blob.ID]blob.Bytes) ([]blob.Metadata, error) {
|
||||
written := map[blob.ID]blob.Metadata{}
|
||||
writtenForEpoch := -1
|
||||
|
||||
attempt := 0
|
||||
|
||||
for {
|
||||
e.log.Debug("WriteIndex")
|
||||
ctx := contentlog.WithParams(ctx0,
|
||||
logparam.String("span:writeEpochIndex", fmt.Sprintf("attempt-%v", attempt)))
|
||||
|
||||
p, err := e.getParameters(ctx)
|
||||
if err != nil {
|
||||
@@ -812,7 +842,7 @@ func (e *Manager) WriteIndex(ctx context.Context, dataShards map[blob.ID]blob.By
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
e.log.Debugw("index-write-error", "error", err)
|
||||
contentlog.Log1(ctx, e.log, "index-write-error", logparam.Error("error", err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -821,16 +851,16 @@ func (e *Manager) WriteIndex(ctx context.Context, dataShards map[blob.ID]blob.By
|
||||
break
|
||||
}
|
||||
|
||||
cs, err := e.committedState(ctx, 0)
|
||||
cs, err := e.committedState(ctx0, 0)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error getting committed state")
|
||||
}
|
||||
|
||||
if cs.WriteEpoch >= writtenForEpoch+2 {
|
||||
e.log.Debugw("index-write-extremely-slow")
|
||||
contentlog.Log(ctx0, e.log, "index-write-extremely-slow")
|
||||
|
||||
if err = e.deletePartiallyWrittenShards(ctx, written); err != nil {
|
||||
e.log.Debugw("index-write-extremely-slow-cleanup-failed", "error", err)
|
||||
if err = e.deletePartiallyWrittenShards(ctx0, written); err != nil {
|
||||
contentlog.Log1(ctx0, e.log, "index-write-extremely-slow-cleanup-failed", logparam.Error("error", err))
|
||||
}
|
||||
|
||||
return nil, ErrVerySlowIndexWrite
|
||||
@@ -842,7 +872,7 @@ func (e *Manager) WriteIndex(ctx context.Context, dataShards map[blob.ID]blob.By
|
||||
results = append(results, v)
|
||||
}
|
||||
|
||||
e.log.Debugw("index-write-success", "results", results)
|
||||
contentlog.Log1(ctx0, e.log, "index-write-success", blobparam.BlobMetadataList("results", results))
|
||||
|
||||
return results, nil
|
||||
}
|
||||
@@ -858,22 +888,23 @@ func (e *Manager) deletePartiallyWrittenShards(ctx context.Context, blobs map[bl
|
||||
}
|
||||
|
||||
func (e *Manager) writeIndexShards(ctx context.Context, dataShards map[blob.ID]blob.Bytes, written map[blob.ID]blob.Metadata, cs CurrentSnapshot) error {
|
||||
e.log.Debugw("writing index shards",
|
||||
"shardCount", len(dataShards),
|
||||
"validUntil", cs.ValidUntil,
|
||||
"remaining", cs.ValidUntil.Sub(e.timeFunc()))
|
||||
contentlog.Log3(ctx, e.log, "writing index shards",
|
||||
logparam.Int("shardCount", len(dataShards)),
|
||||
logparam.Time("validUntil", cs.ValidUntil),
|
||||
logparam.Duration("remaining", cs.ValidUntil.Sub(e.timeFunc())))
|
||||
|
||||
for unprefixedBlobID, data := range dataShards {
|
||||
blobID := UncompactedEpochBlobPrefix(cs.WriteEpoch) + unprefixedBlobID
|
||||
if _, ok := written[blobID]; ok {
|
||||
e.log.Debugw("already written",
|
||||
"blobID", blobID)
|
||||
contentlog.Log1(ctx, e.log,
|
||||
"already written",
|
||||
blobparam.BlobID("blobID", blobID))
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if now := e.timeFunc(); !now.Before(cs.ValidUntil) {
|
||||
e.log.Debugw("write was too slow, retrying",
|
||||
"validUntil", cs.ValidUntil)
|
||||
contentlog.Log1(ctx, e.log, "write was too slow, retrying", logparam.Time("validUntil", cs.ValidUntil))
|
||||
atomic.AddInt32(e.writeIndexTooSlow, 1)
|
||||
|
||||
return errWriteIndexTryAgain
|
||||
@@ -884,7 +915,7 @@ func (e *Manager) writeIndexShards(ctx context.Context, dataShards map[blob.ID]b
|
||||
return errors.Wrap(err, "error writing index blob")
|
||||
}
|
||||
|
||||
e.log.Debugw("wrote-index-shard", "metadata", bm)
|
||||
contentlog.Log1(ctx, e.log, "wrote-index-shard", blobparam.BlobMetadata("metadata", bm))
|
||||
|
||||
written[bm.BlobID] = bm
|
||||
}
|
||||
@@ -914,7 +945,12 @@ func (e *Manager) getCompleteIndexSetForCommittedState(ctx context.Context, cs C
|
||||
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
e.log.Debugf("adding incremental state for epochs %v..%v on top of %v", startEpoch, maxEpoch, result)
|
||||
contentlog.Log3(ctx, e.log, "adding incremental state for epochs",
|
||||
logparam.Int("startEpoch", startEpoch),
|
||||
logparam.Int("maxEpoch", maxEpoch),
|
||||
blobparam.BlobIDList("result", blob.IDsFromMetadata(result)),
|
||||
)
|
||||
|
||||
cnt := maxEpoch - startEpoch + 1
|
||||
|
||||
tmp := make([][]blob.Metadata, cnt)
|
||||
@@ -959,7 +995,7 @@ func (e *Manager) MaybeCompactSingleEpoch(ctx context.Context) error {
|
||||
}
|
||||
|
||||
if !cs.isSettledEpochNumber(uncompacted) {
|
||||
e.log.Debugw("there are no uncompacted epochs eligible for compaction", "oldestUncompactedEpoch", uncompacted)
|
||||
contentlog.Log1(ctx, e.log, "there are no uncompacted epochs eligible for compaction", logparam.Int("oldestUncompactedEpoch", uncompacted))
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -975,7 +1011,7 @@ func (e *Manager) MaybeCompactSingleEpoch(ctx context.Context) error {
|
||||
uncompactedBlobs = ue
|
||||
}
|
||||
|
||||
e.log.Debugf("starting single-epoch compaction for epoch %v", uncompacted)
|
||||
contentlog.Log1(ctx, e.log, "starting single-epoch compaction for epoch", logparam.Int("uncompacted", uncompacted))
|
||||
|
||||
if err := e.compact(ctx, blob.IDsFromMetadata(uncompactedBlobs), compactedEpochBlobPrefix(uncompacted)); err != nil {
|
||||
return errors.Wrapf(err, "unable to compact blobs for epoch %v: performance will be affected", uncompacted)
|
||||
@@ -1007,7 +1043,10 @@ func (e *Manager) getIndexesFromEpochInternal(ctx context.Context, cs CurrentSna
|
||||
}
|
||||
|
||||
func (e *Manager) generateRangeCheckpointFromCommittedState(ctx context.Context, cs CurrentSnapshot, minEpoch, maxEpoch int) error {
|
||||
e.log.Debugf("generating range checkpoint for %v..%v", minEpoch, maxEpoch)
|
||||
contentlog.Log2(ctx, e.log,
|
||||
"generating range checkpoint",
|
||||
logparam.Int("minEpoch", minEpoch),
|
||||
logparam.Int("maxEpoch", maxEpoch))
|
||||
|
||||
completeSet, err := e.getCompleteIndexSetForCommittedState(ctx, cs, minEpoch, maxEpoch)
|
||||
if err != nil {
|
||||
@@ -1039,7 +1078,7 @@ func rangeCheckpointBlobPrefix(epoch1, epoch2 int) blob.ID {
|
||||
}
|
||||
|
||||
// NewManager creates new epoch manager.
|
||||
func NewManager(st blob.Storage, paramProvider ParametersProvider, compactor CompactionFunc, log logging.Logger, timeNow func() time.Time) *Manager {
|
||||
func NewManager(st blob.Storage, paramProvider ParametersProvider, compactor CompactionFunc, log *contentlog.Logger, timeNow func() time.Time) *Manager {
|
||||
return &Manager{
|
||||
st: st,
|
||||
log: log,
|
||||
|
||||
@@ -90,7 +90,7 @@ func newTestEnv(t *testing.T) *epochManagerTestEnv {
|
||||
ft := faketime.NewClockTimeWithOffset(0)
|
||||
ms := blobtesting.NewMapStorage(data, nil, ft.NowFunc())
|
||||
fs := blobtesting.NewFaultyStorage(ms)
|
||||
st := logging.NewWrapper(fs, testlogging.NewTestLogger(t), "[STORAGE] ")
|
||||
st := logging.NewWrapper(fs, testlogging.NewTestLogger(t), nil, "[STORAGE] ")
|
||||
te := &epochManagerTestEnv{unloggedst: ms, st: st, ft: ft}
|
||||
m := NewManager(te.st, parameterProvider{&Parameters{
|
||||
Enabled: true,
|
||||
@@ -102,7 +102,7 @@ func newTestEnv(t *testing.T) *epochManagerTestEnv {
|
||||
EpochAdvanceOnCountThreshold: 15,
|
||||
EpochAdvanceOnTotalSizeBytesThreshold: 20 << 20,
|
||||
DeleteParallelism: 1,
|
||||
}}, te.compact, testlogging.NewTestLogger(t), te.ft.NowFunc())
|
||||
}}, te.compact, nil, te.ft.NowFunc())
|
||||
te.mgr = m
|
||||
te.faultyStorage = fs
|
||||
te.data = data
|
||||
@@ -379,7 +379,7 @@ func TestIndexEpochManager_NoCompactionInReadOnly(t *testing.T) {
|
||||
te2 := &epochManagerTestEnv{
|
||||
data: te.data,
|
||||
unloggedst: st,
|
||||
st: logging.NewWrapper(fs, testlogging.NewTestLogger(t), "[OTHER STORAGE] "),
|
||||
st: logging.NewWrapper(fs, testlogging.NewTestLogger(t), nil, "[OTHER STORAGE] "),
|
||||
ft: te.ft,
|
||||
faultyStorage: fs,
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -23,7 +24,6 @@
|
||||
"github.com/kopia/kopia/internal/clock"
|
||||
"github.com/kopia/kopia/internal/ospath"
|
||||
"github.com/kopia/kopia/internal/zaplogutil"
|
||||
"github.com/kopia/kopia/repo/content"
|
||||
"github.com/kopia/kopia/repo/logging"
|
||||
)
|
||||
|
||||
@@ -53,6 +53,7 @@ type loggingFlags struct {
|
||||
consoleLogTimestamps bool
|
||||
waitForLogSweep bool
|
||||
disableFileLogging bool
|
||||
disableContentLogs bool
|
||||
|
||||
cliApp *cli.App
|
||||
}
|
||||
@@ -61,6 +62,7 @@ func (c *loggingFlags) setup(cliApp *cli.App, app *kingpin.Application) {
|
||||
app.Flag("log-file", "Override log file.").StringVar(&c.logFile)
|
||||
app.Flag("content-log-file", "Override content log file.").Hidden().StringVar(&c.contentLogFile)
|
||||
app.Flag("disable-file-logging", "Disable file-based logging.").BoolVar(&c.disableFileLogging)
|
||||
app.Flag("disable-content-log", "Disable creation of content logs.").BoolVar(&c.disableContentLogs)
|
||||
|
||||
app.Flag("log-dir", "Directory where log files should be written.").Envar(cliApp.EnvName("KOPIA_LOG_DIR")).Default(ospath.LogsDir()).StringVar(&c.logDir)
|
||||
app.Flag("log-dir-max-files", "Maximum number of log files to retain").Envar(cliApp.EnvName("KOPIA_LOG_DIR_MAX_FILES")).Default("1000").Hidden().IntVar(&c.logDirMaxFiles)
|
||||
@@ -75,7 +77,7 @@ func (c *loggingFlags) setup(cliApp *cli.App, app *kingpin.Application) {
|
||||
app.Flag("json-log-console", "JSON log file").Hidden().BoolVar(&c.jsonLogConsole)
|
||||
app.Flag("json-log-file", "JSON log file").Hidden().BoolVar(&c.jsonLogFile)
|
||||
app.Flag("file-log-level", "File log level").Default("debug").EnumVar(&c.fileLogLevel, logLevels...)
|
||||
app.Flag("file-log-local-tz", "When logging to a file, use local timezone").Hidden().Envar(cliApp.EnvName("KOPIA_FILE_LOG_LOCAL_TZ")).BoolVar(&c.fileLogLocalTimezone)
|
||||
app.Flag("file-log-local-tz", "When logging to a file, use local timezone").Default("false").Hidden().Envar(cliApp.EnvName("KOPIA_FILE_LOG_LOCAL_TZ")).BoolVar(&c.fileLogLocalTimezone)
|
||||
app.Flag("force-color", "Force color output").Hidden().Envar(cliApp.EnvName("KOPIA_FORCE_COLOR")).BoolVar(&c.forceColor)
|
||||
app.Flag("disable-color", "Disable color output").Hidden().Envar(cliApp.EnvName("KOPIA_DISABLE_COLOR")).BoolVar(&c.disableColor)
|
||||
app.Flag("console-timestamps", "Log timestamps to stderr.").Hidden().Default("false").Envar(cliApp.EnvName("KOPIA_CONSOLE_TIMESTAMPS")).BoolVar(&c.consoleLogTimestamps)
|
||||
@@ -122,22 +124,15 @@ func (c *loggingFlags) initialize(ctx *kingpin.ParseContext) error {
|
||||
|
||||
rootLogger := zap.New(zapcore.NewTee(rootCores...), zap.WithClock(zaplogutil.Clock()))
|
||||
|
||||
var contentCore zapcore.Core
|
||||
if c.disableFileLogging {
|
||||
contentCore = c.setupConsoleCore()
|
||||
} else {
|
||||
contentCore = c.setupContentLogFileBackend(now, suffix)
|
||||
var contentLogWriter io.Writer
|
||||
|
||||
if !c.disableFileLogging && !c.disableContentLogs {
|
||||
contentLogWriter = c.setupLogFileBasedLogger(now, "content-logs", suffix, c.contentLogFile, c.contentLogDirMaxFiles, c.contentLogDirMaxTotalSizeMB, c.contentLogDirMaxAge)
|
||||
}
|
||||
|
||||
contentLogger := zap.New(contentCore, zap.WithClock(zaplogutil.Clock())).Sugar()
|
||||
|
||||
c.cliApp.SetLoggerFactory(func(module string) logging.Logger {
|
||||
if module == content.FormatLogModule {
|
||||
return contentLogger
|
||||
}
|
||||
|
||||
return rootLogger.Named(module).Sugar()
|
||||
})
|
||||
}, contentLogWriter)
|
||||
|
||||
if c.forceColor {
|
||||
color.NoColor = false
|
||||
@@ -152,7 +147,6 @@ func (c *loggingFlags) initialize(ctx *kingpin.ParseContext) error {
|
||||
|
||||
func (c *loggingFlags) setupConsoleCore() zapcore.Core {
|
||||
ec := zapcore.EncoderConfig{
|
||||
LevelKey: "l",
|
||||
MessageKey: "m",
|
||||
LineEnding: zapcore.DefaultLineEnding,
|
||||
EncodeTime: zapcore.RFC3339NanoTimeEncoder,
|
||||
@@ -185,10 +179,11 @@ func (c *loggingFlags) setupConsoleCore() zapcore.Core {
|
||||
if c.jsonLogConsole {
|
||||
ec.EncodeLevel = zapcore.CapitalLevelEncoder
|
||||
|
||||
stec.EmitLogLevel = false
|
||||
ec.NameKey = "n"
|
||||
ec.EncodeName = zapcore.FullNameEncoder
|
||||
} else {
|
||||
stec.EmitLogLevel = true
|
||||
stec.EmitLogLevel = false
|
||||
stec.DoNotEmitInfoLevel = true
|
||||
stec.ColoredLogLevel = !c.disableColor
|
||||
}
|
||||
@@ -302,17 +297,6 @@ func (c *loggingFlags) jsonOrConsoleEncoder(ec zaplogutil.StdConsoleEncoderConfi
|
||||
return zaplogutil.NewStdConsoleEncoder(ec)
|
||||
}
|
||||
|
||||
func (c *loggingFlags) setupContentLogFileBackend(now time.Time, suffix string) zapcore.Core {
|
||||
return zapcore.NewCore(
|
||||
zaplogutil.NewStdConsoleEncoder(zaplogutil.StdConsoleEncoderConfig{
|
||||
TimeLayout: zaplogutil.PreciseLayout,
|
||||
LocalTime: false,
|
||||
},
|
||||
),
|
||||
c.setupLogFileBasedLogger(now, "content-logs", suffix, c.contentLogFile, c.contentLogDirMaxFiles, c.contentLogDirMaxTotalSizeMB, c.contentLogDirMaxAge),
|
||||
zap.DebugLevel)
|
||||
}
|
||||
|
||||
func shouldSweepLog(maxFiles int, maxAge time.Duration) bool {
|
||||
return maxFiles > 0 || maxAge > 0
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -22,7 +23,6 @@
|
||||
|
||||
var (
|
||||
cliLogFormat = regexp.MustCompile(`^\d{4}-\d\d\-\d\dT\d\d:\d\d:\d\d\.\d{6}Z (DEBUG|INFO|WARN) [a-z/]+ .*$`)
|
||||
contentLogFormat = regexp.MustCompile(`^\d{4}-\d\d\-\d\dT\d\d:\d\d:\d\d\.\d{6}Z .*$`)
|
||||
cliLogFormatLocalTimezone = regexp.MustCompile(`^\d{4}-\d\d\-\d\dT\d\d:\d\d:\d\d\.\d{6}[^Z][^ ]+ (DEBUG|INFO|WARN) [a-z/]+ .*$`)
|
||||
)
|
||||
|
||||
@@ -56,7 +56,7 @@ func TestLoggingFlags(t *testing.T) {
|
||||
}
|
||||
|
||||
verifyFileLogFormat(t, filepath.Join(tmpLogDir, "cli-logs", "latest.log"), cliLogFormat)
|
||||
verifyFileLogFormat(t, filepath.Join(tmpLogDir, "content-logs", "latest.log"), contentLogFormat)
|
||||
verifyJSONLogFormat(t, filepath.Join(tmpLogDir, "content-logs", "latest.log"))
|
||||
|
||||
_, stderr, err = env.Run(t, false, "snap", "create", dir1,
|
||||
"--file-log-local-tz", "--no-progress", "--log-level=debug", "--disable-color",
|
||||
@@ -69,7 +69,7 @@ func TestLoggingFlags(t *testing.T) {
|
||||
verifyFileLogFormat(t, filepath.Join(tmpLogDir, "cli-logs", "latest.log"), cliLogFormatLocalTimezone)
|
||||
}
|
||||
|
||||
verifyFileLogFormat(t, filepath.Join(tmpLogDir, "content-logs", "latest.log"), contentLogFormat)
|
||||
verifyJSONLogFormat(t, filepath.Join(tmpLogDir, "content-logs", "latest.log"))
|
||||
|
||||
for _, l := range stderr {
|
||||
require.NotContains(t, l, "INFO") // INFO is omitted
|
||||
@@ -256,3 +256,18 @@ func getTotalDirSize(t *testing.T, dir string) int {
|
||||
|
||||
return totalSize
|
||||
}
|
||||
|
||||
func verifyJSONLogFormat(t *testing.T, fname string) {
|
||||
t.Helper()
|
||||
|
||||
f, err := os.Open(fname)
|
||||
require.NoError(t, err)
|
||||
|
||||
defer f.Close()
|
||||
|
||||
s := bufio.NewScanner(f)
|
||||
|
||||
for s.Scan() {
|
||||
require.True(t, json.Valid(s.Bytes()), "log line is not valid JSON: %q", s.Text())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -92,7 +92,7 @@ func openEquivalentStorageConnections(ctx context.Context, st blob.Storage, n in
|
||||
|
||||
log(ctx).Debugw("opened equivalent storage connection", "connectionID", i)
|
||||
|
||||
result = append(result, loggingwrapper.NewWrapper(c, log(ctx), fmt.Sprintf("[STORAGE-%v] ", i)))
|
||||
result = append(result, loggingwrapper.NewWrapper(c, log(ctx), nil, fmt.Sprintf("[STORAGE-%v] ", i)))
|
||||
}
|
||||
|
||||
return result, nil
|
||||
|
||||
@@ -2,28 +2,29 @@
|
||||
package repodiag
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
|
||||
"github.com/kopia/kopia/internal/clock"
|
||||
"github.com/kopia/kopia/internal/contentlog"
|
||||
"github.com/kopia/kopia/internal/contentlog/logparam"
|
||||
"github.com/kopia/kopia/internal/gather"
|
||||
"github.com/kopia/kopia/internal/zaplogutil"
|
||||
"github.com/kopia/kopia/repo/blob"
|
||||
)
|
||||
|
||||
const blobLoggerFlushThreshold = 4 << 20
|
||||
|
||||
// LogBlobPrefix is a prefix given to text logs stored in repository.
|
||||
const LogBlobPrefix = "_log_"
|
||||
|
||||
// LogManager manages writing encrypted log blobs to the repository.
|
||||
type LogManager struct {
|
||||
disableRepositoryLog bool
|
||||
|
||||
// Set by Enable(). Log blobs are not written to the repository until
|
||||
// Enable() is called.
|
||||
enabled atomic.Bool
|
||||
@@ -35,34 +36,28 @@ type LogManager struct {
|
||||
writer *BlobWriter
|
||||
|
||||
timeFunc func() time.Time
|
||||
flushThreshold int
|
||||
}
|
||||
flushThreshold int // +checklocksignore
|
||||
prefix blob.ID
|
||||
|
||||
func (m *LogManager) encryptAndWriteLogBlob(prefix blob.ID, data gather.Bytes, closeFunc func()) {
|
||||
m.writer.EncryptAndWriteBlobAsync(m.ctx, prefix, data, closeFunc)
|
||||
mu sync.Mutex
|
||||
currentSegment *gather.WriteBuffer
|
||||
startTime int64
|
||||
params []contentlog.ParamWriter
|
||||
textWriter io.Writer
|
||||
|
||||
nextChunkNumber atomic.Uint64
|
||||
gz *gzip.Writer
|
||||
}
|
||||
|
||||
// NewLogger creates new logger.
|
||||
func (m *LogManager) NewLogger() *zap.SugaredLogger {
|
||||
func (m *LogManager) NewLogger(name string) *contentlog.Logger {
|
||||
if m == nil {
|
||||
return zap.NewNop().Sugar()
|
||||
return nil
|
||||
}
|
||||
|
||||
var rnd [2]byte
|
||||
|
||||
rand.Read(rnd[:]) //nolint:errcheck
|
||||
|
||||
w := &logWriteSyncer{
|
||||
m: m,
|
||||
prefix: blob.ID(fmt.Sprintf("%v%v_%x", LogBlobPrefix, clock.Now().Local().Format("20060102150405"), rnd)),
|
||||
}
|
||||
|
||||
return zap.New(zapcore.NewCore(
|
||||
zaplogutil.NewStdConsoleEncoder(zaplogutil.StdConsoleEncoderConfig{
|
||||
TimeLayout: zaplogutil.PreciseLayout,
|
||||
LocalTime: false,
|
||||
}),
|
||||
w, zap.DebugLevel), zap.WithClock(zaplogutil.Clock())).Sugar()
|
||||
return contentlog.NewLogger(
|
||||
m.outputEntry,
|
||||
append(append([]contentlog.ParamWriter(nil), m.params...), logparam.String("n", name))...)
|
||||
}
|
||||
|
||||
// Enable enables writing log blobs to repository.
|
||||
@@ -75,12 +70,96 @@ func (m *LogManager) Enable() {
|
||||
m.enabled.Store(true)
|
||||
}
|
||||
|
||||
// NewLogManager creates a new LogManager that will emit logs as repository blobs.
|
||||
func NewLogManager(ctx context.Context, w *BlobWriter) *LogManager {
|
||||
return &LogManager{
|
||||
ctx: context.WithoutCancel(ctx),
|
||||
writer: w,
|
||||
flushThreshold: blobLoggerFlushThreshold,
|
||||
timeFunc: clock.Now,
|
||||
// Disable disables writing log blobs to repository.
|
||||
func (m *LogManager) Disable() {
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
|
||||
m.enabled.Store(false)
|
||||
}
|
||||
|
||||
func (m *LogManager) outputEntry(data []byte) {
|
||||
if m.textWriter != nil {
|
||||
m.textWriter.Write(data) //nolint:errcheck
|
||||
}
|
||||
|
||||
if !m.enabled.Load() || m.disableRepositoryLog {
|
||||
return
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
|
||||
var (
|
||||
flushBuffer *gather.WriteBuffer
|
||||
flushBlobID blob.ID
|
||||
)
|
||||
|
||||
if m.currentSegment == nil || m.currentSegment.Length() > m.flushThreshold {
|
||||
flushBuffer, flushBlobID = m.initNewBuffer()
|
||||
}
|
||||
|
||||
m.gz.Write(data) //nolint:errcheck
|
||||
m.mu.Unlock()
|
||||
|
||||
if flushBuffer != nil {
|
||||
m.flushNextBuffer(flushBlobID, flushBuffer)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *LogManager) flushNextBuffer(blobID blob.ID, buf *gather.WriteBuffer) {
|
||||
m.writer.EncryptAndWriteBlobAsync(m.ctx, blobID, buf.Bytes(), buf.Close)
|
||||
}
|
||||
|
||||
// Sync flushes the current buffer to the repository.
|
||||
func (m *LogManager) Sync() {
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
flushBuffer, flushBlobID := m.initNewBuffer()
|
||||
m.mu.Unlock()
|
||||
|
||||
if flushBuffer != nil {
|
||||
m.flushNextBuffer(flushBlobID, flushBuffer)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *LogManager) initNewBuffer() (flushBuffer *gather.WriteBuffer, flushBlobID blob.ID) {
|
||||
if m.gz != nil {
|
||||
m.gz.Close() //nolint:errcheck
|
||||
}
|
||||
|
||||
flushBuffer = m.currentSegment
|
||||
|
||||
if flushBuffer != nil {
|
||||
flushBlobID = blob.ID(fmt.Sprintf("%v_%v_%v_%v_", m.prefix, m.startTime, m.timeFunc().Unix(), m.nextChunkNumber.Add(1)))
|
||||
} else {
|
||||
flushBlobID = blob.ID("")
|
||||
}
|
||||
|
||||
m.startTime = m.timeFunc().Unix()
|
||||
m.currentSegment = gather.NewWriteBuffer()
|
||||
m.gz = gzip.NewWriter(m.currentSegment)
|
||||
|
||||
return flushBuffer, flushBlobID
|
||||
}
|
||||
|
||||
// NewLogManager creates a new LogManager that will emit logs as repository blobs.
|
||||
func NewLogManager(ctx context.Context, w *BlobWriter, disableRepositoryLog bool, textWriter io.Writer, params ...contentlog.ParamWriter) *LogManager {
|
||||
var rnd [2]byte
|
||||
|
||||
rand.Read(rnd[:]) //nolint:errcheck
|
||||
|
||||
return &LogManager{
|
||||
ctx: context.WithoutCancel(ctx),
|
||||
writer: w,
|
||||
timeFunc: clock.Now,
|
||||
params: params,
|
||||
flushThreshold: 4 << 20, //nolint:mnd
|
||||
disableRepositoryLog: disableRepositoryLog,
|
||||
prefix: blob.ID(fmt.Sprintf("%v%v_%x", LogBlobPrefix, clock.Now().Local().Format("20060102150405"), rnd)),
|
||||
textWriter: textWriter,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,12 +4,14 @@
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/kopia/kopia/internal/blobtesting"
|
||||
"github.com/kopia/kopia/internal/contentlog"
|
||||
"github.com/kopia/kopia/internal/repodiag"
|
||||
"github.com/kopia/kopia/internal/testlogging"
|
||||
)
|
||||
@@ -19,14 +21,14 @@ func TestLogManager_Enabled(t *testing.T) {
|
||||
st := blobtesting.NewMapStorage(d, nil, nil)
|
||||
w := repodiag.NewWriter(st, newStaticCrypter(t))
|
||||
ctx := testlogging.Context(t)
|
||||
lm := repodiag.NewLogManager(ctx, w)
|
||||
lm := repodiag.NewLogManager(ctx, w, false, io.Discard)
|
||||
|
||||
lm.Enable()
|
||||
l := lm.NewLogger()
|
||||
l.Info("hello")
|
||||
l := lm.NewLogger("test")
|
||||
contentlog.Log(ctx, l, "hello")
|
||||
|
||||
require.Empty(t, d)
|
||||
l.Sync()
|
||||
lm.Sync()
|
||||
w.Wait(ctx)
|
||||
|
||||
// make sure log messages are written
|
||||
@@ -43,10 +45,10 @@ func TestLogManager_AutoFlush(t *testing.T) {
|
||||
st := blobtesting.NewMapStorage(d, nil, nil)
|
||||
w := repodiag.NewWriter(st, newStaticCrypter(t))
|
||||
ctx := testlogging.Context(t)
|
||||
lm := repodiag.NewLogManager(ctx, w)
|
||||
lm := repodiag.NewLogManager(ctx, w, false, io.Discard)
|
||||
|
||||
lm.Enable()
|
||||
l := lm.NewLogger()
|
||||
l := lm.NewLogger("test")
|
||||
|
||||
// flush happens after 4 << 20 bytes (4MB) after compression,
|
||||
// write ~10MB of base16 data which compresses to ~5MB and writes 1 blob
|
||||
@@ -54,14 +56,14 @@ func TestLogManager_AutoFlush(t *testing.T) {
|
||||
var b [1024]byte
|
||||
|
||||
rand.Read(b[:])
|
||||
l.Info(hex.EncodeToString(b[:]))
|
||||
contentlog.Log(ctx, l, hex.EncodeToString(b[:]))
|
||||
}
|
||||
|
||||
w.Wait(ctx)
|
||||
|
||||
require.Len(t, d, 1)
|
||||
|
||||
l.Sync()
|
||||
lm.Sync()
|
||||
w.Wait(ctx)
|
||||
|
||||
require.Len(t, d, 2)
|
||||
@@ -72,13 +74,13 @@ func TestLogManager_NotEnabled(t *testing.T) {
|
||||
st := blobtesting.NewMapStorage(d, nil, nil)
|
||||
w := repodiag.NewWriter(st, newStaticCrypter(t))
|
||||
ctx := testlogging.Context(t)
|
||||
lm := repodiag.NewLogManager(ctx, w)
|
||||
lm := repodiag.NewLogManager(ctx, w, false, io.Discard)
|
||||
|
||||
l := lm.NewLogger()
|
||||
l.Info("hello")
|
||||
l := lm.NewLogger("test")
|
||||
contentlog.Log(ctx, l, "hello")
|
||||
|
||||
require.Empty(t, d)
|
||||
l.Sync()
|
||||
lm.Sync()
|
||||
w.Wait(ctx)
|
||||
|
||||
// make sure log messages are not written
|
||||
@@ -91,18 +93,18 @@ func TestLogManager_CancelledContext(t *testing.T) {
|
||||
w := repodiag.NewWriter(st, newStaticCrypter(t))
|
||||
ctx := testlogging.Context(t)
|
||||
cctx, cancel := context.WithCancel(ctx)
|
||||
lm := repodiag.NewLogManager(cctx, w)
|
||||
lm := repodiag.NewLogManager(cctx, w, false, io.Discard)
|
||||
|
||||
// cancel context, logs should still be written
|
||||
cancel()
|
||||
|
||||
lm.Enable()
|
||||
l := lm.NewLogger()
|
||||
l.Info("hello")
|
||||
l := lm.NewLogger("test")
|
||||
contentlog.Log(ctx, l, "hello")
|
||||
|
||||
require.Empty(t, d)
|
||||
|
||||
l.Sync()
|
||||
lm.Sync()
|
||||
w.Wait(ctx)
|
||||
|
||||
// make sure log messages are written
|
||||
@@ -112,9 +114,11 @@ func TestLogManager_CancelledContext(t *testing.T) {
|
||||
func TestLogManager_Null(t *testing.T) {
|
||||
var lm *repodiag.LogManager
|
||||
|
||||
ctx := testlogging.Context(t)
|
||||
|
||||
lm.Enable()
|
||||
|
||||
l := lm.NewLogger()
|
||||
l.Info("hello")
|
||||
l.Sync()
|
||||
l := lm.NewLogger("test")
|
||||
contentlog.Log(ctx, l, "hello")
|
||||
lm.Sync()
|
||||
}
|
||||
|
||||
@@ -1,121 +0,0 @@
|
||||
package repodiag
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/kopia/kopia/internal/gather"
|
||||
"github.com/kopia/kopia/repo/blob"
|
||||
)
|
||||
|
||||
// logWriteSyncer writes a sequence of log messages as blobs in the repository.
|
||||
type logWriteSyncer struct {
|
||||
nextChunkNumber atomic.Int32
|
||||
|
||||
m *LogManager
|
||||
mu sync.Mutex
|
||||
|
||||
// +checklocks:mu
|
||||
buf *gather.WriteBuffer
|
||||
// +checklocks:mu
|
||||
gzw *gzip.Writer
|
||||
|
||||
// +checklocks:mu
|
||||
startTime int64 // unix timestamp of the first log
|
||||
|
||||
prefix blob.ID // +checklocksignore
|
||||
}
|
||||
|
||||
func (l *logWriteSyncer) Write(b []byte) (int, error) {
|
||||
if l != nil {
|
||||
l.maybeEncryptAndWriteChunkUnlocked(l.addAndMaybeFlush(b))
|
||||
}
|
||||
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
func (l *logWriteSyncer) maybeEncryptAndWriteChunkUnlocked(data gather.Bytes, closeFunc func()) {
|
||||
if data.Length() == 0 {
|
||||
closeFunc()
|
||||
return
|
||||
}
|
||||
|
||||
if !l.m.enabled.Load() {
|
||||
closeFunc()
|
||||
return
|
||||
}
|
||||
|
||||
endTime := l.m.timeFunc().Unix()
|
||||
|
||||
l.mu.Lock()
|
||||
st := l.startTime
|
||||
l.mu.Unlock()
|
||||
|
||||
prefix := blob.ID(fmt.Sprintf("%v_%v_%v_%v_", l.prefix, st, endTime, l.nextChunkNumber.Add(1)))
|
||||
|
||||
l.m.encryptAndWriteLogBlob(prefix, data, closeFunc)
|
||||
}
|
||||
|
||||
func (l *logWriteSyncer) addAndMaybeFlush(b []byte) (payload gather.Bytes, closeFunc func()) {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
|
||||
w := l.ensureWriterInitializedLocked()
|
||||
|
||||
_, err := w.Write(b)
|
||||
l.logUnexpectedError(err)
|
||||
|
||||
if l.buf.Length() < l.m.flushThreshold {
|
||||
return gather.Bytes{}, func() {}
|
||||
}
|
||||
|
||||
return l.flushAndResetLocked()
|
||||
}
|
||||
|
||||
// +checklocks:l.mu
|
||||
func (l *logWriteSyncer) ensureWriterInitializedLocked() io.Writer {
|
||||
if l.gzw == nil {
|
||||
l.buf = gather.NewWriteBuffer()
|
||||
l.gzw = gzip.NewWriter(l.buf)
|
||||
l.startTime = l.m.timeFunc().Unix()
|
||||
}
|
||||
|
||||
return l.gzw
|
||||
}
|
||||
|
||||
// +checklocks:l.mu
|
||||
func (l *logWriteSyncer) flushAndResetLocked() (payload gather.Bytes, closeFunc func()) {
|
||||
if l.gzw == nil {
|
||||
return gather.Bytes{}, func() {}
|
||||
}
|
||||
|
||||
l.logUnexpectedError(l.gzw.Flush())
|
||||
l.logUnexpectedError(l.gzw.Close())
|
||||
|
||||
closeBuf := l.buf.Close
|
||||
res := l.buf.Bytes()
|
||||
|
||||
l.buf = nil
|
||||
l.gzw = nil
|
||||
|
||||
return res, closeBuf
|
||||
}
|
||||
|
||||
func (l *logWriteSyncer) logUnexpectedError(err error) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (l *logWriteSyncer) Sync() error {
|
||||
l.mu.Lock()
|
||||
data, closeFunc := l.flushAndResetLocked()
|
||||
l.mu.Unlock()
|
||||
|
||||
l.maybeEncryptAndWriteChunkUnlocked(data, closeFunc)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -30,9 +30,9 @@ func (p estimateTaskProgress) Processing(_ context.Context, dirname string) {
|
||||
|
||||
func (p estimateTaskProgress) Error(ctx context.Context, dirname string, err error, isIgnored bool) {
|
||||
if isIgnored {
|
||||
log(ctx).Errorf("ignored error in %v: %v", dirname, err)
|
||||
userLog(ctx).Errorf("ignored error in %v: %v", dirname, err)
|
||||
} else {
|
||||
log(ctx).Errorf("error in %v: %v", dirname, err)
|
||||
userLog(ctx).Errorf("error in %v: %v", dirname, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -76,7 +76,7 @@ func logBucketSamples(ctx context.Context, buckets upload.SampleBuckets, prefix
|
||||
units.BytesString(buckets[i-1].MinSize))
|
||||
}
|
||||
|
||||
log(ctx).Infof("%v files %v: %7v files, total size %v\n",
|
||||
userLog(ctx).Infof("%v files %v: %7v files, total size %v\n",
|
||||
prefix,
|
||||
sizeRange,
|
||||
bucket.Count, units.BytesString(bucket.TotalSize))
|
||||
@@ -84,16 +84,16 @@ func logBucketSamples(ctx context.Context, buckets upload.SampleBuckets, prefix
|
||||
hasAny = true
|
||||
|
||||
if showExamples && len(bucket.Examples) > 0 {
|
||||
log(ctx).Info("Examples:")
|
||||
userLog(ctx).Info("Examples:")
|
||||
|
||||
for _, sample := range bucket.Examples {
|
||||
log(ctx).Infof(" - %v\n", sample)
|
||||
userLog(ctx).Infof(" - %v\n", sample)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !hasAny {
|
||||
log(ctx).Infof("%v files: None", prefix)
|
||||
userLog(ctx).Infof("%v files: None", prefix)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ func handleMountCreate(ctx context.Context, rc requestContext) (any, *apiError)
|
||||
return nil, internalServerError(err)
|
||||
}
|
||||
|
||||
log(ctx).Debugf("mount for %v => %v", oid, c.MountPath())
|
||||
userLog(ctx).Debugf("mount for %v => %v", oid, c.MountPath())
|
||||
|
||||
return &serverapi.MountedSnapshot{
|
||||
Path: c.MountPath(),
|
||||
|
||||
@@ -85,7 +85,7 @@ func handleSourcesCreate(ctx context.Context, rc requestContext) (any, *apiError
|
||||
if req.CreateSnapshot {
|
||||
resp.SnapshotStarted = true
|
||||
|
||||
log(ctx).Debugf("scheduling snapshot of %v immediately...", sourceInfo)
|
||||
userLog(ctx).Debugf("scheduling snapshot of %v immediately...", sourceInfo)
|
||||
manager.scheduleSnapshotNow()
|
||||
}
|
||||
|
||||
|
||||
@@ -19,6 +19,8 @@
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
"github.com/kopia/kopia/internal/auth"
|
||||
"github.com/kopia/kopia/internal/contentlog"
|
||||
"github.com/kopia/kopia/internal/contentlog/logparam"
|
||||
"github.com/kopia/kopia/internal/gather"
|
||||
"github.com/kopia/kopia/internal/grpcapi"
|
||||
"github.com/kopia/kopia/notification"
|
||||
@@ -38,6 +40,10 @@ type grpcServerState struct {
|
||||
grpcapi.UnimplementedKopiaRepositoryServer
|
||||
|
||||
sem *semaphore.Weighted
|
||||
|
||||
// GRPC server instance for graceful shutdown
|
||||
grpcServer *grpc.Server
|
||||
grpcMutex sync.RWMutex
|
||||
}
|
||||
|
||||
// send sends the provided session response with the provided request ID.
|
||||
@@ -86,6 +92,9 @@ func (s *Server) Session(srv grpcapi.KopiaRepository_SessionServer) error {
|
||||
return status.Errorf(codes.Unavailable, "not connected to a direct repository")
|
||||
}
|
||||
|
||||
log := dr.LogManager().NewLogger("grpc-session")
|
||||
ctx = contentlog.WithParams(ctx, logparam.String("span:server-session", contentlog.RandomSpanID()))
|
||||
|
||||
usernameAtHostname, err := s.authenticateGRPCSession(ctx, dr)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -101,12 +110,12 @@ func (s *Server) Session(srv grpcapi.KopiaRepository_SessionServer) error {
|
||||
return status.Errorf(codes.PermissionDenied, "peer not found in context")
|
||||
}
|
||||
|
||||
log(ctx).Infof("starting session for user %q from %v", usernameAtHostname, p.Addr)
|
||||
defer log(ctx).Infof("session ended for user %q from %v", usernameAtHostname, p.Addr)
|
||||
userLog(ctx).Infof("starting session for user %q from %v", usernameAtHostname, p.Addr)
|
||||
defer userLog(ctx).Infof("session ended for user %q from %v", usernameAtHostname, p.Addr)
|
||||
|
||||
opt, err := s.handleInitialSessionHandshake(srv, dr)
|
||||
if err != nil {
|
||||
log(ctx).Errorf("session handshake error: %v", err)
|
||||
userLog(ctx).Errorf("session handshake error: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -119,7 +128,12 @@ func (s *Server) Session(srv grpcapi.KopiaRepository_SessionServer) error {
|
||||
// propagate any error from the goroutines
|
||||
select {
|
||||
case err := <-lastErr:
|
||||
log(ctx).Errorf("error handling session request: %v", err)
|
||||
userLog(ctx).Errorf("error handling session request: %v", err)
|
||||
|
||||
contentlog.Log1(ctx, log,
|
||||
"error handling session request",
|
||||
logparam.Error("error", err))
|
||||
|
||||
return err
|
||||
|
||||
default:
|
||||
@@ -622,18 +636,36 @@ func makeGRPCServerState(maxConcurrency int) grpcServerState {
|
||||
// GRPCRouterHandler returns HTTP handler that supports GRPC services and
|
||||
// routes non-GRPC calls to the provided handler.
|
||||
func (s *Server) GRPCRouterHandler(handler http.Handler) http.Handler {
|
||||
grpcServer := grpc.NewServer(
|
||||
grpc.MaxSendMsgSize(repo.MaxGRPCMessageSize),
|
||||
grpc.MaxRecvMsgSize(repo.MaxGRPCMessageSize),
|
||||
)
|
||||
s.grpcMutex.Lock()
|
||||
defer s.grpcMutex.Unlock()
|
||||
|
||||
s.RegisterGRPCHandlers(grpcServer)
|
||||
if s.grpcServer == nil {
|
||||
s.grpcServer = grpc.NewServer(
|
||||
grpc.MaxSendMsgSize(repo.MaxGRPCMessageSize),
|
||||
grpc.MaxRecvMsgSize(repo.MaxGRPCMessageSize),
|
||||
)
|
||||
|
||||
s.RegisterGRPCHandlers(s.grpcServer)
|
||||
}
|
||||
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.ProtoMajor == 2 && strings.Contains(r.Header.Get("Content-Type"), "application/grpc") {
|
||||
grpcServer.ServeHTTP(w, r)
|
||||
s.grpcServer.ServeHTTP(w, r)
|
||||
} else {
|
||||
handler.ServeHTTP(w, r)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// ShutdownGRPCServer shuts down the GRPC server.
|
||||
// Note: Since the GRPC server runs over HTTP handler transport,
|
||||
// GracefulStop() doesn't work. We use Stop() instead.
|
||||
func (s *Server) ShutdownGRPCServer() {
|
||||
s.grpcMutex.Lock()
|
||||
defer s.grpcMutex.Unlock()
|
||||
|
||||
if s.grpcServer != nil {
|
||||
s.grpcServer.Stop()
|
||||
s.grpcServer = nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,7 +38,7 @@
|
||||
"github.com/kopia/kopia/snapshot/snapshotmaintenance"
|
||||
)
|
||||
|
||||
var log = logging.Module("kopia/server")
|
||||
var userLog = logging.Module("kopia/server")
|
||||
|
||||
const (
|
||||
// retry initialization of repository starting at 1s doubling delay each time up to max 5 minutes
|
||||
@@ -220,7 +220,7 @@ func (s *Server) isAuthenticated(rc requestContext) bool {
|
||||
http.Error(rc.w, "Access denied.\n", http.StatusUnauthorized)
|
||||
|
||||
// Log failed authentication attempt
|
||||
log(rc.req.Context()).Warnf("failed login attempt by client %s for user %s", rc.req.RemoteAddr, username)
|
||||
userLog(rc.req.Context()).Warnf("failed login attempt by client %s for user %s", rc.req.RemoteAddr, username)
|
||||
|
||||
return false
|
||||
}
|
||||
@@ -229,7 +229,7 @@ func (s *Server) isAuthenticated(rc requestContext) bool {
|
||||
|
||||
ac, err := rc.srv.generateShortTermAuthCookie(username, now)
|
||||
if err != nil {
|
||||
log(rc.req.Context()).Errorf("unable to generate short-term auth cookie: %v", err)
|
||||
userLog(rc.req.Context()).Errorf("unable to generate short-term auth cookie: %v", err)
|
||||
} else {
|
||||
http.SetCookie(rc.w, &http.Cookie{
|
||||
Name: kopiaAuthCookie,
|
||||
@@ -240,7 +240,7 @@ func (s *Server) isAuthenticated(rc requestContext) bool {
|
||||
|
||||
if s.options.LogRequests {
|
||||
// Log successful authentication
|
||||
log(rc.req.Context()).Infof("successful login by client %s for user %s", rc.req.RemoteAddr, username)
|
||||
userLog(rc.req.Context()).Infof("successful login by client %s for user %s", rc.req.RemoteAddr, username)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -370,7 +370,7 @@ func (s *Server) handleRequestPossiblyNotConnected(isAuthorized isAuthorizedFunc
|
||||
rc.body = body
|
||||
|
||||
if s.options.LogRequests {
|
||||
log(ctx).Debugf("request %v (%v bytes)", rc.req.URL, len(body))
|
||||
userLog(ctx).Debugf("request %v (%v bytes)", rc.req.URL, len(body))
|
||||
}
|
||||
|
||||
rc.w.Header().Set("Content-Type", "application/json")
|
||||
@@ -397,10 +397,10 @@ func (s *Server) handleRequestPossiblyNotConnected(isAuthorized isAuthorizedFunc
|
||||
if err == nil {
|
||||
if b, ok := v.([]byte); ok {
|
||||
if _, err := rc.w.Write(b); err != nil {
|
||||
log(ctx).Errorf("error writing response: %v", err)
|
||||
userLog(ctx).Errorf("error writing response: %v", err)
|
||||
}
|
||||
} else if err := e.Encode(v); err != nil {
|
||||
log(ctx).Errorf("error encoding response: %v", err)
|
||||
userLog(ctx).Errorf("error encoding response: %v", err)
|
||||
}
|
||||
|
||||
return
|
||||
@@ -411,7 +411,7 @@ func (s *Server) handleRequestPossiblyNotConnected(isAuthorized isAuthorizedFunc
|
||||
rc.w.WriteHeader(err.httpErrorCode)
|
||||
|
||||
if s.options.LogRequests && err.apiErrorCode == serverapi.ErrorNotConnected {
|
||||
log(ctx).Debugf("%v: error code %v message %v", rc.req.URL, err.apiErrorCode, err.message)
|
||||
userLog(ctx).Debugf("%v: error code %v message %v", rc.req.URL, err.apiErrorCode, err.message)
|
||||
}
|
||||
|
||||
_ = e.Encode(&serverapi.ErrorResponse{
|
||||
@@ -438,7 +438,7 @@ func (s *Server) Refresh() {
|
||||
ctx := s.rootctx
|
||||
|
||||
if err := s.refreshLocked(ctx); err != nil {
|
||||
log(s.rootctx).Warnw("refresh error", "err", err)
|
||||
userLog(s.rootctx).Warnw("refresh error", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -458,13 +458,13 @@ func (s *Server) refreshLocked(ctx context.Context) error {
|
||||
|
||||
if s.authenticator != nil {
|
||||
if err := s.authenticator.Refresh(ctx); err != nil {
|
||||
log(ctx).Errorf("unable to refresh authenticator: %v", err)
|
||||
userLog(ctx).Errorf("unable to refresh authenticator: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if s.authorizer != nil {
|
||||
if err := s.authorizer.Refresh(ctx); err != nil {
|
||||
log(ctx).Errorf("unable to refresh authorizer: %v", err)
|
||||
userLog(ctx).Errorf("unable to refresh authorizer: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -498,7 +498,7 @@ func handleFlush(ctx context.Context, rc requestContext) (any, *apiError) {
|
||||
}
|
||||
|
||||
func handleShutdown(ctx context.Context, rc requestContext) (any, *apiError) {
|
||||
log(ctx).Info("shutting down due to API request")
|
||||
userLog(ctx).Info("shutting down due to API request")
|
||||
|
||||
rc.srv.requestShutdown(ctx)
|
||||
|
||||
@@ -509,7 +509,7 @@ func (s *Server) requestShutdown(ctx context.Context) {
|
||||
if f := s.OnShutdown; f != nil {
|
||||
go func() {
|
||||
if err := f(ctx); err != nil {
|
||||
log(ctx).Errorf("shutdown failed: %v", err)
|
||||
userLog(ctx).Errorf("shutdown failed: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
@@ -528,7 +528,7 @@ func (s *Server) beginUpload(ctx context.Context, src snapshot.SourceInfo) bool
|
||||
defer s.parallelSnapshotsMutex.Unlock()
|
||||
|
||||
for s.currentParallelSnapshots >= s.maxParallelSnapshots && ctx.Err() == nil {
|
||||
log(ctx).Debugf("waiting on for parallel snapshot upload slot to be available %v", src)
|
||||
userLog(ctx).Debugf("waiting on for parallel snapshot upload slot to be available %v", src)
|
||||
s.parallelSnapshotsChanged.Wait()
|
||||
}
|
||||
|
||||
@@ -547,7 +547,7 @@ func (s *Server) endUpload(ctx context.Context, src snapshot.SourceInfo, mwe *no
|
||||
s.parallelSnapshotsMutex.Lock()
|
||||
defer s.parallelSnapshotsMutex.Unlock()
|
||||
|
||||
log(ctx).Debugf("finished uploading %v", src)
|
||||
userLog(ctx).Debugf("finished uploading %v", src)
|
||||
|
||||
s.currentParallelSnapshots--
|
||||
|
||||
@@ -617,9 +617,9 @@ func (s *Server) SetRepository(ctx context.Context, rep repo.Repository) error {
|
||||
s.unmountAllLocked(ctx)
|
||||
|
||||
// close previous source managers
|
||||
log(ctx).Debug("stopping all source managers")
|
||||
userLog(ctx).Debug("stopping all source managers")
|
||||
s.stopAllSourceManagersLocked(ctx)
|
||||
log(ctx).Debug("stopped all source managers")
|
||||
userLog(ctx).Debug("stopped all source managers")
|
||||
|
||||
if err := s.rep.Close(ctx); err != nil {
|
||||
return errors.Wrap(err, "unable to close previous repository")
|
||||
@@ -919,7 +919,7 @@ func (s *Server) InitRepositoryAsync(ctx context.Context, mode string, initializ
|
||||
}
|
||||
|
||||
if rep == nil {
|
||||
log(ctx).Info("Repository not configured.")
|
||||
userLog(ctx).Info("Repository not configured.")
|
||||
}
|
||||
|
||||
if err = s.SetRepository(ctx, rep); err != nil {
|
||||
@@ -958,7 +958,7 @@ func RetryInitRepository(initialize InitRepositoryFunc) InitRepositoryFunc {
|
||||
return rep, nil
|
||||
}
|
||||
|
||||
log(ctx).Warnf("unable to open repository: %v, will keep trying until canceled. Sleeping for %v", rerr, nextSleepTime)
|
||||
userLog(ctx).Warnf("unable to open repository: %v, will keep trying until canceled. Sleeping for %v", rerr, nextSleepTime)
|
||||
|
||||
if !clock.SleepInterruptibly(ctx, nextSleepTime) {
|
||||
return nil, ctx.Err()
|
||||
@@ -1016,7 +1016,7 @@ func (s *Server) getOrCreateSourceManager(ctx context.Context, src snapshot.Sour
|
||||
defer s.serverMutex.Unlock()
|
||||
|
||||
if s.sourceManagers[src] == nil {
|
||||
log(ctx).Debugf("creating source manager for %v", src)
|
||||
userLog(ctx).Debugf("creating source manager for %v", src)
|
||||
sm := newSourceManager(src, s, s.rep)
|
||||
s.sourceManagers[src] = sm
|
||||
|
||||
@@ -1097,7 +1097,7 @@ func (s *Server) getSchedulerItems(ctx context.Context, now time.Time) []schedul
|
||||
NextTime: nst,
|
||||
})
|
||||
} else {
|
||||
log(ctx).Debugf("no snapshot scheduled for %v %v %v", sm.src, nst, now)
|
||||
userLog(ctx).Debugf("no snapshot scheduled for %v %v %v", sm.src, nst, now)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -36,7 +36,7 @@ func (s *Server) validateCSRFToken(r *http.Request) bool {
|
||||
|
||||
sessionCookie, err := r.Cookie(kopiaSessionCookie)
|
||||
if err != nil {
|
||||
log(ctx).Warnf("missing or invalid session cookie for %q: %v", path, err)
|
||||
userLog(ctx).Warnf("missing or invalid session cookie for %q: %v", path, err)
|
||||
|
||||
return false
|
||||
}
|
||||
@@ -45,7 +45,7 @@ func (s *Server) validateCSRFToken(r *http.Request) bool {
|
||||
|
||||
token := r.Header.Get(apiclient.CSRFTokenHeader)
|
||||
if token == "" {
|
||||
log(ctx).Warnf("missing CSRF token for %v", path)
|
||||
userLog(ctx).Warnf("missing CSRF token for %v", path)
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -53,7 +53,7 @@ func (s *Server) validateCSRFToken(r *http.Request) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
log(ctx).Warnf("got invalid CSRF token for %v: %v, want %v, session %v", path, token, validToken, sessionCookie.Value)
|
||||
userLog(ctx).Warnf("got invalid CSRF token for %v: %v, want %v, session %v", path, token, validToken, sessionCookie.Value)
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -56,7 +56,7 @@ func (s *srvMaintenance) stop(ctx context.Context) {
|
||||
close(s.closed)
|
||||
s.wg.Wait()
|
||||
|
||||
log(ctx).Debug("maintenance manager stopped")
|
||||
userLog(ctx).Debug("maintenance manager stopped")
|
||||
}
|
||||
|
||||
func (s *srvMaintenance) beforeRun() {
|
||||
@@ -84,7 +84,7 @@ func (s *srvMaintenance) refresh(ctx context.Context, notify bool) {
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if err := s.refreshLocked(ctx); err != nil {
|
||||
log(ctx).Debugw("unable to refresh maintenance manager", "err", err)
|
||||
userLog(ctx).Debugw("unable to refresh maintenance manager", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -130,7 +130,7 @@ func maybeStartMaintenanceManager(
|
||||
}
|
||||
|
||||
if rep.ClientOptions().ReadOnly {
|
||||
log(ctx).Warnln("the repository connection is read-only, maintenance tasks will not be performed on this repository")
|
||||
userLog(ctx).Warnln("the repository connection is read-only, maintenance tasks will not be performed on this repository")
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -148,7 +148,7 @@ func maybeStartMaintenanceManager(
|
||||
|
||||
m.wg.Add(1)
|
||||
|
||||
log(ctx).Debug("starting maintenance manager")
|
||||
userLog(ctx).Debug("starting maintenance manager")
|
||||
|
||||
m.refresh(ctx, false)
|
||||
|
||||
@@ -158,14 +158,14 @@ func maybeStartMaintenanceManager(
|
||||
for {
|
||||
select {
|
||||
case <-m.triggerChan:
|
||||
log(ctx).Debug("starting maintenance task")
|
||||
userLog(ctx).Debug("starting maintenance task")
|
||||
|
||||
m.beforeRun()
|
||||
|
||||
t0 := clock.Now()
|
||||
|
||||
if err := srv.runMaintenanceTask(mctx, dr); err != nil {
|
||||
log(ctx).Debugw("maintenance task failed", "err", err)
|
||||
userLog(ctx).Debugw("maintenance task failed", "err", err)
|
||||
m.afterFailedRun()
|
||||
|
||||
if srv.enableErrorNotifications() {
|
||||
@@ -182,7 +182,7 @@ func maybeStartMaintenanceManager(
|
||||
m.refresh(mctx, true)
|
||||
|
||||
case <-m.closed:
|
||||
log(ctx).Debug("stopping maintenance manager")
|
||||
userLog(ctx).Debug("stopping maintenance manager")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@ func (s *Server) getMountController(ctx context.Context, rep repo.Repository, oi
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
log(ctx).Debugf("mount controller for %v not found, starting", oid)
|
||||
userLog(ctx).Debugf("mount controller for %v not found, starting", oid)
|
||||
|
||||
c, err := mount.Directory(ctx, snapshotfs.DirectoryEntry(rep, oid, nil), "*", mount.Options{})
|
||||
if err != nil {
|
||||
@@ -60,7 +60,7 @@ func (s *Server) deleteMount(oid object.ID) {
|
||||
func (s *Server) unmountAllLocked(ctx context.Context) {
|
||||
for oid, c := range s.mounts {
|
||||
if err := c.Unmount(ctx); err != nil {
|
||||
log(ctx).Errorf("unable to unmount %v", oid)
|
||||
userLog(ctx).Errorf("unable to unmount %v", oid)
|
||||
}
|
||||
|
||||
delete(s.mounts, oid)
|
||||
|
||||
@@ -196,10 +196,10 @@ func (s *sourceManager) runLocal(ctx context.Context) {
|
||||
|
||||
s.setStatus("PENDING")
|
||||
|
||||
log(ctx).Debugw("snapshotting", "source", s.src)
|
||||
userLog(ctx).Debugw("snapshotting", "source", s.src)
|
||||
|
||||
if err := s.server.runSnapshotTask(ctx, s.src, s.snapshotInternal); err != nil {
|
||||
log(ctx).Errorf("snapshot error: %v", err)
|
||||
userLog(ctx).Errorf("snapshot error: %v", err)
|
||||
|
||||
s.backoffBeforeNextSnapshot()
|
||||
} else {
|
||||
@@ -252,17 +252,17 @@ func (s *sourceManager) scheduleSnapshotNow() {
|
||||
}
|
||||
|
||||
func (s *sourceManager) upload(ctx context.Context) serverapi.SourceActionResponse {
|
||||
log(ctx).Infof("upload triggered via API: %v", s.src)
|
||||
userLog(ctx).Infof("upload triggered via API: %v", s.src)
|
||||
s.scheduleSnapshotNow()
|
||||
|
||||
return serverapi.SourceActionResponse{Success: true}
|
||||
}
|
||||
|
||||
func (s *sourceManager) cancel(ctx context.Context) serverapi.SourceActionResponse {
|
||||
log(ctx).Debugw("cancel triggered via API", "source", s.src)
|
||||
userLog(ctx).Debugw("cancel triggered via API", "source", s.src)
|
||||
|
||||
if u := s.currentUploader(); u != nil {
|
||||
log(ctx).Info("canceling current upload")
|
||||
userLog(ctx).Info("canceling current upload")
|
||||
u.Cancel()
|
||||
}
|
||||
|
||||
@@ -270,14 +270,14 @@ func (s *sourceManager) cancel(ctx context.Context) serverapi.SourceActionRespon
|
||||
}
|
||||
|
||||
func (s *sourceManager) pause(ctx context.Context) serverapi.SourceActionResponse {
|
||||
log(ctx).Debugw("pause triggered via API", "source", s.src)
|
||||
userLog(ctx).Debugw("pause triggered via API", "source", s.src)
|
||||
|
||||
s.sourceMutex.Lock()
|
||||
s.paused = true
|
||||
s.sourceMutex.Unlock()
|
||||
|
||||
if u := s.currentUploader(); u != nil {
|
||||
log(ctx).Info("canceling current upload")
|
||||
userLog(ctx).Info("canceling current upload")
|
||||
u.Cancel()
|
||||
}
|
||||
|
||||
@@ -287,7 +287,7 @@ func (s *sourceManager) pause(ctx context.Context) serverapi.SourceActionRespons
|
||||
}
|
||||
|
||||
func (s *sourceManager) resume(ctx context.Context) serverapi.SourceActionResponse {
|
||||
log(ctx).Debugw("resume triggered via API", "source", s.src)
|
||||
userLog(ctx).Debugw("resume triggered via API", "source", s.src)
|
||||
|
||||
s.sourceMutex.Lock()
|
||||
s.paused = false
|
||||
@@ -300,7 +300,7 @@ func (s *sourceManager) resume(ctx context.Context) serverapi.SourceActionRespon
|
||||
|
||||
func (s *sourceManager) stop(ctx context.Context) {
|
||||
if u := s.currentUploader(); u != nil {
|
||||
log(ctx).Infow("canceling current upload", "src", s.src)
|
||||
userLog(ctx).Infow("canceling current upload", "src", s.src)
|
||||
u.Cancel()
|
||||
}
|
||||
|
||||
@@ -320,7 +320,7 @@ func (s *sourceManager) snapshotInternal(ctx context.Context, ctrl uitask.Contro
|
||||
// check if we got closed while waiting on semaphore
|
||||
select {
|
||||
case <-s.closed:
|
||||
log(ctx).Infof("not snapshotting %v because source manager is shutting down", s.src)
|
||||
userLog(ctx).Infof("not snapshotting %v because source manager is shutting down", s.src)
|
||||
return nil
|
||||
|
||||
default:
|
||||
@@ -351,7 +351,7 @@ func (s *sourceManager) snapshotInternal(ctx context.Context, ctrl uitask.Contro
|
||||
onUpload(numBytes)
|
||||
},
|
||||
}, func(ctx context.Context, w repo.RepositoryWriter) error {
|
||||
log(ctx).Debugf("uploading %v", s.src)
|
||||
userLog(ctx).Debugf("uploading %v", s.src)
|
||||
|
||||
u := upload.NewUploader(w)
|
||||
|
||||
@@ -372,7 +372,7 @@ func (s *sourceManager) snapshotInternal(ctx context.Context, ctrl uitask.Contro
|
||||
u.Progress.UploadedBytes(numBytes)
|
||||
}
|
||||
|
||||
log(ctx).Debugf("starting upload of %v", s.src)
|
||||
userLog(ctx).Debugf("starting upload of %v", s.src)
|
||||
s.setUploader(u)
|
||||
|
||||
manifest, err := u.Upload(ctx, localEntry, policyTree, s.src, manifestsSinceLastCompleteSnapshot...)
|
||||
@@ -389,7 +389,7 @@ func (s *sourceManager) snapshotInternal(ctx context.Context, ctrl uitask.Contro
|
||||
ignoreIdenticalSnapshot := policyTree.EffectivePolicy().RetentionPolicy.IgnoreIdenticalSnapshots.OrDefault(false)
|
||||
if ignoreIdenticalSnapshot && len(manifestsSinceLastCompleteSnapshot) > 0 {
|
||||
if manifestsSinceLastCompleteSnapshot[0].RootObjectID() == manifest.RootObjectID() {
|
||||
log(ctx).Debug("Not saving snapshot because no files have been changed since previous snapshot")
|
||||
userLog(ctx).Debug("Not saving snapshot because no files have been changed since previous snapshot")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -403,7 +403,7 @@ func (s *sourceManager) snapshotInternal(ctx context.Context, ctrl uitask.Contro
|
||||
return errors.Wrap(err, "unable to apply retention policy")
|
||||
}
|
||||
|
||||
log(ctx).Debugf("created snapshot %v", snapshotID)
|
||||
userLog(ctx).Debugf("created snapshot %v", snapshotID)
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
// Package stats provides helpers for simple stats
|
||||
package stats
|
||||
|
||||
import "sync/atomic"
|
||||
import (
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// CountSum holds sum and count values.
|
||||
type CountSum struct {
|
||||
|
||||
@@ -8,6 +8,9 @@
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
|
||||
"github.com/kopia/kopia/internal/blobparam"
|
||||
"github.com/kopia/kopia/internal/contentlog"
|
||||
"github.com/kopia/kopia/internal/contentlog/logparam"
|
||||
"github.com/kopia/kopia/internal/timetrack"
|
||||
"github.com/kopia/kopia/repo/blob"
|
||||
"github.com/kopia/kopia/repo/logging"
|
||||
@@ -22,15 +25,19 @@ type loggingStorage struct {
|
||||
base blob.Storage
|
||||
prefix string
|
||||
logger logging.Logger
|
||||
clog *contentlog.Logger
|
||||
}
|
||||
|
||||
func (s *loggingStorage) beginConcurrency() {
|
||||
func (s *loggingStorage) beginConcurrency(ctx context.Context) {
|
||||
v := s.concurrency.Add(1)
|
||||
|
||||
if mv := s.maxConcurrency.Load(); v > mv {
|
||||
if s.maxConcurrency.CompareAndSwap(mv, v) && v > 0 {
|
||||
s.logger.Debugw(s.prefix+"concurrency level reached",
|
||||
"maxConcurrency", v)
|
||||
|
||||
contentlog.Log1(ctx, s.clog, "concurrency level reached",
|
||||
logparam.Int32("maxConcurrency", v))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -43,7 +50,7 @@ func (s *loggingStorage) GetBlob(ctx context.Context, id blob.ID, offset, length
|
||||
ctx, span := tracer.Start(ctx, "GetBlob")
|
||||
defer span.End()
|
||||
|
||||
s.beginConcurrency()
|
||||
s.beginConcurrency(ctx)
|
||||
defer s.endConcurrency()
|
||||
|
||||
timer := timetrack.StartTimer()
|
||||
@@ -59,6 +66,15 @@ func (s *loggingStorage) GetBlob(ctx context.Context, id blob.ID, offset, length
|
||||
"duration", dt,
|
||||
)
|
||||
|
||||
contentlog.Log6(ctx, s.clog,
|
||||
"GetBlob",
|
||||
blobparam.BlobID("blobID", id),
|
||||
logparam.Int64("offset", offset),
|
||||
logparam.Int64("length", length),
|
||||
logparam.Int("outputLength", output.Length()),
|
||||
logparam.Error("error", err),
|
||||
logparam.Duration("duration", dt))
|
||||
|
||||
//nolint:wrapcheck
|
||||
return err
|
||||
}
|
||||
@@ -78,6 +94,13 @@ func (s *loggingStorage) GetCapacity(ctx context.Context) (blob.Capacity, error)
|
||||
"duration", dt,
|
||||
)
|
||||
|
||||
contentlog.Log4(ctx, s.clog,
|
||||
"GetCapacity",
|
||||
logparam.UInt64("sizeBytes", c.SizeB),
|
||||
logparam.UInt64("freeBytes", c.FreeB),
|
||||
logparam.Error("error", err),
|
||||
logparam.Duration("duration", dt))
|
||||
|
||||
//nolint:wrapcheck
|
||||
return c, err
|
||||
}
|
||||
@@ -90,7 +113,7 @@ func (s *loggingStorage) GetMetadata(ctx context.Context, id blob.ID) (blob.Meta
|
||||
ctx, span := tracer.Start(ctx, "GetMetadata")
|
||||
defer span.End()
|
||||
|
||||
s.beginConcurrency()
|
||||
s.beginConcurrency(ctx)
|
||||
defer s.endConcurrency()
|
||||
|
||||
timer := timetrack.StartTimer()
|
||||
@@ -104,6 +127,13 @@ func (s *loggingStorage) GetMetadata(ctx context.Context, id blob.ID) (blob.Meta
|
||||
"duration", dt,
|
||||
)
|
||||
|
||||
contentlog.Log4(ctx, s.clog,
|
||||
"GetMetadata",
|
||||
blobparam.BlobID("blobID", id),
|
||||
blobparam.BlobMetadata("result", result),
|
||||
logparam.Error("error", err),
|
||||
logparam.Duration("duration", dt))
|
||||
|
||||
//nolint:wrapcheck
|
||||
return result, err
|
||||
}
|
||||
@@ -112,7 +142,7 @@ func (s *loggingStorage) PutBlob(ctx context.Context, id blob.ID, data blob.Byte
|
||||
ctx, span := tracer.Start(ctx, "PutBlob")
|
||||
defer span.End()
|
||||
|
||||
s.beginConcurrency()
|
||||
s.beginConcurrency(ctx)
|
||||
defer s.endConcurrency()
|
||||
|
||||
timer := timetrack.StartTimer()
|
||||
@@ -126,6 +156,13 @@ func (s *loggingStorage) PutBlob(ctx context.Context, id blob.ID, data blob.Byte
|
||||
"duration", dt,
|
||||
)
|
||||
|
||||
contentlog.Log4(ctx, s.clog,
|
||||
"PutBlob",
|
||||
blobparam.BlobID("blobID", id),
|
||||
logparam.Int("length", data.Length()),
|
||||
logparam.Error("error", err),
|
||||
logparam.Duration("duration", dt))
|
||||
|
||||
//nolint:wrapcheck
|
||||
return err
|
||||
}
|
||||
@@ -134,7 +171,7 @@ func (s *loggingStorage) DeleteBlob(ctx context.Context, id blob.ID) error {
|
||||
ctx, span := tracer.Start(ctx, "DeleteBlob")
|
||||
defer span.End()
|
||||
|
||||
s.beginConcurrency()
|
||||
s.beginConcurrency(ctx)
|
||||
defer s.endConcurrency()
|
||||
|
||||
timer := timetrack.StartTimer()
|
||||
@@ -146,6 +183,13 @@ func (s *loggingStorage) DeleteBlob(ctx context.Context, id blob.ID) error {
|
||||
"error", s.translateError(err),
|
||||
"duration", dt,
|
||||
)
|
||||
|
||||
contentlog.Log3(ctx, s.clog,
|
||||
"DeleteBlob",
|
||||
blobparam.BlobID("blobID", id),
|
||||
logparam.Error("error", err),
|
||||
logparam.Duration("duration", dt))
|
||||
|
||||
//nolint:wrapcheck
|
||||
return err
|
||||
}
|
||||
@@ -154,7 +198,7 @@ func (s *loggingStorage) ListBlobs(ctx context.Context, prefix blob.ID, callback
|
||||
ctx, span := tracer.Start(ctx, "ListBlobs")
|
||||
defer span.End()
|
||||
|
||||
s.beginConcurrency()
|
||||
s.beginConcurrency(ctx)
|
||||
defer s.endConcurrency()
|
||||
|
||||
timer := timetrack.StartTimer()
|
||||
@@ -172,6 +216,13 @@ func (s *loggingStorage) ListBlobs(ctx context.Context, prefix blob.ID, callback
|
||||
"duration", dt,
|
||||
)
|
||||
|
||||
contentlog.Log4(ctx, s.clog,
|
||||
"ListBlobs",
|
||||
blobparam.BlobID("prefix", prefix),
|
||||
logparam.Int("resultCount", cnt),
|
||||
logparam.Error("error", err),
|
||||
logparam.Duration("duration", dt))
|
||||
|
||||
//nolint:wrapcheck
|
||||
return err
|
||||
}
|
||||
@@ -189,6 +240,11 @@ func (s *loggingStorage) Close(ctx context.Context) error {
|
||||
"duration", dt,
|
||||
)
|
||||
|
||||
contentlog.Log2(ctx, s.clog,
|
||||
"Close",
|
||||
logparam.Error("error", err),
|
||||
logparam.Duration("duration", dt))
|
||||
|
||||
//nolint:wrapcheck
|
||||
return err
|
||||
}
|
||||
@@ -211,6 +267,11 @@ func (s *loggingStorage) FlushCaches(ctx context.Context) error {
|
||||
"duration", dt,
|
||||
)
|
||||
|
||||
contentlog.Log2(ctx, s.clog,
|
||||
"FlushCaches",
|
||||
logparam.Error("error", err),
|
||||
logparam.Duration("duration", dt))
|
||||
|
||||
//nolint:wrapcheck
|
||||
return err
|
||||
}
|
||||
@@ -219,7 +280,7 @@ func (s *loggingStorage) ExtendBlobRetention(ctx context.Context, b blob.ID, opt
|
||||
ctx, span := tracer.Start(ctx, "ExtendBlobRetention")
|
||||
defer span.End()
|
||||
|
||||
s.beginConcurrency()
|
||||
s.beginConcurrency(ctx)
|
||||
defer s.endConcurrency()
|
||||
|
||||
timer := timetrack.StartTimer()
|
||||
@@ -231,6 +292,13 @@ func (s *loggingStorage) ExtendBlobRetention(ctx context.Context, b blob.ID, opt
|
||||
"error", err,
|
||||
"duration", dt,
|
||||
)
|
||||
|
||||
contentlog.Log3(ctx, s.clog,
|
||||
"ExtendBlobRetention",
|
||||
blobparam.BlobID("blobID", b),
|
||||
logparam.Error("error", err),
|
||||
logparam.Duration("duration", dt))
|
||||
|
||||
//nolint:wrapcheck
|
||||
return err
|
||||
}
|
||||
@@ -248,6 +316,6 @@ func (s *loggingStorage) translateError(err error) any {
|
||||
}
|
||||
|
||||
// NewWrapper returns a Storage wrapper that logs all storage commands.
|
||||
func NewWrapper(wrapped blob.Storage, logger logging.Logger, prefix string) blob.Storage {
|
||||
return &loggingStorage{base: wrapped, logger: logger, prefix: prefix}
|
||||
func NewWrapper(wrapped blob.Storage, logger logging.Logger, clog *contentlog.Logger, prefix string) blob.Storage {
|
||||
return &loggingStorage{base: wrapped, logger: logger, clog: clog, prefix: prefix}
|
||||
}
|
||||
|
||||
@@ -1,13 +1,17 @@
|
||||
package logging_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/kopia/kopia/internal/blobtesting"
|
||||
"github.com/kopia/kopia/internal/contentlog"
|
||||
"github.com/kopia/kopia/internal/testlogging"
|
||||
"github.com/kopia/kopia/repo/blob"
|
||||
"github.com/kopia/kopia/repo/blob/logging"
|
||||
@@ -31,7 +35,15 @@ func TestLoggingStorage(t *testing.T) {
|
||||
kt := map[blob.ID]time.Time{}
|
||||
underlying := blobtesting.NewMapStorage(data, kt, nil)
|
||||
|
||||
st := logging.NewWrapper(underlying, testlogging.Printf(myOutput, ""), myPrefix)
|
||||
var jsonOutputCount atomic.Int32
|
||||
|
||||
myJSONOutput := func(data []byte) {
|
||||
v := map[string]any{}
|
||||
require.NoError(t, json.Unmarshal(data, &v))
|
||||
jsonOutputCount.Add(1)
|
||||
}
|
||||
|
||||
st := logging.NewWrapper(underlying, testlogging.Printf(myOutput, ""), contentlog.NewLogger(myJSONOutput), myPrefix)
|
||||
if st == nil {
|
||||
t.Fatalf("unexpected result: %v", st)
|
||||
}
|
||||
@@ -47,6 +59,10 @@ func TestLoggingStorage(t *testing.T) {
|
||||
t.Errorf("did not write any output!")
|
||||
}
|
||||
|
||||
if jsonOutputCount.Load() == 0 {
|
||||
t.Errorf("did not write any JSON output!")
|
||||
}
|
||||
|
||||
if got, want := st.ConnectionInfo().Type, underlying.ConnectionInfo().Type; got != want {
|
||||
t.Errorf("unexpected connection info %v, want %v", got, want)
|
||||
}
|
||||
|
||||
@@ -250,7 +250,7 @@ func TestRCloneProviders(t *testing.T) {
|
||||
k, ok := st.(Killable)
|
||||
require.True(t, ok, "not killable")
|
||||
|
||||
blobtesting.VerifyStorage(ctx, t, logging.NewWrapper(st, testlogging.NewTestLogger(t), "[RCLONE-STORAGE] "),
|
||||
blobtesting.VerifyStorage(ctx, t, logging.NewWrapper(st, testlogging.NewTestLogger(t), nil, "[RCLONE-STORAGE] "),
|
||||
blob.PutOptions{})
|
||||
|
||||
blobtesting.AssertConnectionInfoRoundTrips(ctx, t, st)
|
||||
|
||||
@@ -55,7 +55,7 @@ func TestThrottling(t *testing.T) {
|
||||
ctx := testlogging.Context(t)
|
||||
m := &mockThrottler{}
|
||||
st := blobtesting.NewMapStorage(blobtesting.DataMap{}, nil, nil)
|
||||
l := bloblogging.NewWrapper(st, testlogging.Printf(m.Printf, ""), "inner.")
|
||||
l := bloblogging.NewWrapper(st, testlogging.Printf(m.Printf, ""), nil, "inner.")
|
||||
wrapped := throttling.NewWrapper(l, m)
|
||||
|
||||
var tmp gather.WriteBuffer
|
||||
|
||||
@@ -11,12 +11,14 @@
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"github.com/kopia/kopia/internal/blobparam"
|
||||
"github.com/kopia/kopia/internal/clock"
|
||||
"github.com/kopia/kopia/internal/contentlog"
|
||||
"github.com/kopia/kopia/internal/contentlog/logparam"
|
||||
"github.com/kopia/kopia/internal/gather"
|
||||
"github.com/kopia/kopia/repo/blob"
|
||||
"github.com/kopia/kopia/repo/content/index"
|
||||
"github.com/kopia/kopia/repo/format"
|
||||
"github.com/kopia/kopia/repo/logging"
|
||||
)
|
||||
|
||||
// smallIndexEntryCountThreshold is the threshold to determine whether an
|
||||
@@ -45,7 +47,7 @@ type committedContentIndex struct {
|
||||
// fetchIndexBlob retrieves one index blob from storage
|
||||
fetchIndexBlob func(ctx context.Context, blobID blob.ID, output *gather.WriteBuffer) error
|
||||
|
||||
log logging.Logger
|
||||
log *contentlog.Logger
|
||||
}
|
||||
|
||||
type committedContentIndexCache interface {
|
||||
@@ -113,7 +115,7 @@ func (c *committedContentIndex) addIndexBlob(ctx context.Context, indexBlobID bl
|
||||
return nil
|
||||
}
|
||||
|
||||
c.log.Debugf("use-new-committed-index %v", indexBlobID)
|
||||
contentlog.Log1(ctx, c.log, "use-new-committed-index", blobparam.BlobID("indexBlobID", indexBlobID))
|
||||
|
||||
ndx, err := c.cache.openIndex(ctx, indexBlobID)
|
||||
if err != nil {
|
||||
@@ -195,7 +197,7 @@ func (c *committedContentIndex) merge(ctx context.Context, indexFiles []blob.ID)
|
||||
return nil, nil, errors.Wrap(err, "unable to combine small indexes")
|
||||
}
|
||||
|
||||
c.log.Debugw("combined index segments", "original", len(newMerged), "merged", len(mergedAndCombined))
|
||||
contentlog.Log2(ctx, c.log, "combined index segments", logparam.Int("original", len(newMerged)), logparam.Int("merged", len(mergedAndCombined)))
|
||||
|
||||
return mergedAndCombined, newUsedMap, nil
|
||||
}
|
||||
@@ -212,7 +214,8 @@ func (c *committedContentIndex) use(ctx context.Context, indexFiles []blob.ID, i
|
||||
return nil
|
||||
}
|
||||
|
||||
c.log.Debugf("use-indexes %v", indexFiles)
|
||||
contentlog.Log1(ctx, c.log, "use-indexes",
|
||||
blobparam.BlobIDList("indexFiles", indexFiles))
|
||||
|
||||
mergedAndCombined, newInUse, err := c.merge(ctx, indexFiles)
|
||||
if err != nil {
|
||||
@@ -229,13 +232,17 @@ func (c *committedContentIndex) use(ctx context.Context, indexFiles []blob.ID, i
|
||||
for k, old := range oldInUse {
|
||||
if newInUse[k] == nil {
|
||||
if err := old.Close(); err != nil {
|
||||
c.log.Errorf("unable to close unused index file: %v", err)
|
||||
contentlog.Log1(ctx, c.log,
|
||||
"unable to close unused index file",
|
||||
logparam.Error("err", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := c.cache.expireUnused(ctx, indexFiles); err != nil {
|
||||
c.log.Errorf("unable to expire unused index files: %v", err)
|
||||
contentlog.Log1(ctx, c.log,
|
||||
"unable to expire unused index files",
|
||||
logparam.Error("err", err))
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -309,7 +316,7 @@ func (c *committedContentIndex) fetchIndexBlobs(ctx context.Context, isPermissiv
|
||||
return nil
|
||||
}
|
||||
|
||||
c.log.Debugf("Downloading %v new index blobs...", len(indexBlobs))
|
||||
contentlog.Log1(ctx, c.log, "Downloading new index blobs", logparam.Int("len", len(indexBlobs)))
|
||||
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
@@ -323,7 +330,7 @@ func (c *committedContentIndex) fetchIndexBlobs(ctx context.Context, isPermissiv
|
||||
|
||||
if err := c.fetchIndexBlob(ctx, indexBlobID, &data); err != nil {
|
||||
if isPermissiveCacheLoading {
|
||||
c.log.Errorf("skipping bad read of index blob %v", indexBlobID)
|
||||
contentlog.Log1(ctx, c.log, "skipping bad read of index blob", blobparam.BlobID("indexBlobID", indexBlobID))
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -343,7 +350,7 @@ func (c *committedContentIndex) fetchIndexBlobs(ctx context.Context, isPermissiv
|
||||
return errors.Wrap(err, "error downloading indexes")
|
||||
}
|
||||
|
||||
c.log.Debug("Index blobs downloaded.")
|
||||
contentlog.Log(ctx, c.log, "Index blobs downloaded")
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -372,7 +379,7 @@ func newCommittedContentIndex(caching *CachingOptions,
|
||||
formatProvider format.Provider,
|
||||
permissiveCacheLoading bool,
|
||||
fetchIndexBlob func(ctx context.Context, blobID blob.ID, output *gather.WriteBuffer) error,
|
||||
log logging.Logger,
|
||||
log *contentlog.Logger,
|
||||
minSweepAge time.Duration,
|
||||
) *committedContentIndex {
|
||||
var cache committedContentIndexCache
|
||||
|
||||
@@ -20,7 +20,12 @@ func TestCommittedContentIndexCache_Disk(t *testing.T) {
|
||||
|
||||
ta := faketime.NewClockTimeWithOffset(0)
|
||||
|
||||
testCache(t, &diskCommittedContentIndexCache{testutil.TempDirectory(t), ta.NowFunc(), func() int { return 3 }, testlogging.Printf(t.Logf, ""), DefaultIndexCacheSweepAge}, ta)
|
||||
testCache(t, &diskCommittedContentIndexCache{
|
||||
testutil.TempDirectory(t),
|
||||
ta.NowFunc(),
|
||||
func() int { return 3 },
|
||||
nil, DefaultIndexCacheSweepAge,
|
||||
}, ta)
|
||||
}
|
||||
|
||||
func TestCommittedContentIndexCache_Memory(t *testing.T) {
|
||||
|
||||
@@ -10,11 +10,13 @@
|
||||
"github.com/edsrzf/mmap-go"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kopia/kopia/internal/blobparam"
|
||||
"github.com/kopia/kopia/internal/cache"
|
||||
"github.com/kopia/kopia/internal/contentlog"
|
||||
"github.com/kopia/kopia/internal/contentlog/logparam"
|
||||
"github.com/kopia/kopia/internal/gather"
|
||||
"github.com/kopia/kopia/repo/blob"
|
||||
"github.com/kopia/kopia/repo/content/index"
|
||||
"github.com/kopia/kopia/repo/logging"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -25,7 +27,7 @@ type diskCommittedContentIndexCache struct {
|
||||
dirname string
|
||||
timeNow func() time.Time
|
||||
v1PerContentOverhead func() int
|
||||
log logging.Logger
|
||||
log *contentlog.Logger
|
||||
minSweepAge time.Duration
|
||||
}
|
||||
|
||||
@@ -33,10 +35,10 @@ func (c *diskCommittedContentIndexCache) indexBlobPath(indexBlobID blob.ID) stri
|
||||
return filepath.Join(c.dirname, string(indexBlobID)+simpleIndexSuffix)
|
||||
}
|
||||
|
||||
func (c *diskCommittedContentIndexCache) openIndex(_ context.Context, indexBlobID blob.ID) (index.Index, error) {
|
||||
func (c *diskCommittedContentIndexCache) openIndex(ctx context.Context, indexBlobID blob.ID) (index.Index, error) {
|
||||
fullpath := c.indexBlobPath(indexBlobID)
|
||||
|
||||
f, closeMmap, err := c.mmapOpenWithRetry(fullpath)
|
||||
f, closeMmap, err := c.mmapOpenWithRetry(ctx, fullpath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -52,7 +54,7 @@ func (c *diskCommittedContentIndexCache) openIndex(_ context.Context, indexBlobI
|
||||
|
||||
// mmapOpenWithRetry attempts mmap.Open() with exponential back-off to work around rare issue specific to Windows where
|
||||
// we can't open the file right after it has been written.
|
||||
func (c *diskCommittedContentIndexCache) mmapOpenWithRetry(path string) (mmap.MMap, func() error, error) {
|
||||
func (c *diskCommittedContentIndexCache) mmapOpenWithRetry(ctx context.Context, path string) (mmap.MMap, func() error, error) {
|
||||
const (
|
||||
maxRetries = 8
|
||||
startingDelay = 10 * time.Millisecond
|
||||
@@ -65,7 +67,9 @@ func (c *diskCommittedContentIndexCache) mmapOpenWithRetry(path string) (mmap.MM
|
||||
retryCount := 0
|
||||
for err != nil && retryCount < maxRetries {
|
||||
retryCount++
|
||||
c.log.Debugf("retry #%v unable to mmap.Open(): %v", retryCount, err)
|
||||
contentlog.Log2(ctx, c.log, "retry #%v unable to mmap.Open()",
|
||||
logparam.Int("retryCount", retryCount),
|
||||
logparam.Error("err", err))
|
||||
time.Sleep(nextDelay)
|
||||
nextDelay *= 2
|
||||
|
||||
@@ -165,10 +169,10 @@ func writeTempFileAtomic(dirname string, data []byte) (string, error) {
|
||||
return tf.Name(), nil
|
||||
}
|
||||
|
||||
func (c *diskCommittedContentIndexCache) expireUnused(_ context.Context, used []blob.ID) error {
|
||||
c.log.Debugw("expireUnused",
|
||||
"except", used,
|
||||
"minSweepAge", c.minSweepAge)
|
||||
func (c *diskCommittedContentIndexCache) expireUnused(ctx context.Context, used []blob.ID) error {
|
||||
contentlog.Log2(ctx, c.log, "expireUnused",
|
||||
blobparam.BlobIDList("except", used),
|
||||
logparam.Duration("minSweepAge", c.minSweepAge))
|
||||
|
||||
entries, err := os.ReadDir(c.dirname)
|
||||
if err != nil {
|
||||
@@ -200,18 +204,20 @@ func (c *diskCommittedContentIndexCache) expireUnused(_ context.Context, used []
|
||||
|
||||
for _, rem := range remaining {
|
||||
if c.timeNow().Sub(rem.ModTime()) > c.minSweepAge {
|
||||
c.log.Debugw("removing unused",
|
||||
"name", rem.Name(),
|
||||
"mtime", rem.ModTime())
|
||||
contentlog.Log2(ctx, c.log, "removing unused",
|
||||
logparam.String("name", rem.Name()),
|
||||
logparam.Time("mtime", rem.ModTime()))
|
||||
|
||||
if err := os.Remove(filepath.Join(c.dirname, rem.Name())); err != nil {
|
||||
c.log.Errorf("unable to remove unused index file: %v", err)
|
||||
contentlog.Log1(ctx, c.log,
|
||||
"unable to remove unused index file",
|
||||
logparam.Error("err", err))
|
||||
}
|
||||
} else {
|
||||
c.log.Debugw("keeping unused index because it's too new",
|
||||
"name", rem.Name(),
|
||||
"mtime", rem.ModTime(),
|
||||
"threshold", c.minSweepAge)
|
||||
contentlog.Log3(ctx, c.log, "keeping unused index because it's too new",
|
||||
logparam.String("name", rem.Name()),
|
||||
logparam.Time("mtime", rem.ModTime()),
|
||||
logparam.Duration("threshold", c.minSweepAge))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -8,11 +8,13 @@
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/kopia/kopia/internal/blobparam"
|
||||
"github.com/kopia/kopia/internal/cache"
|
||||
"github.com/kopia/kopia/internal/cacheprot"
|
||||
"github.com/kopia/kopia/internal/clock"
|
||||
"github.com/kopia/kopia/internal/contentlog"
|
||||
"github.com/kopia/kopia/internal/contentlog/logparam"
|
||||
"github.com/kopia/kopia/internal/epoch"
|
||||
"github.com/kopia/kopia/internal/gather"
|
||||
"github.com/kopia/kopia/internal/listcache"
|
||||
@@ -38,6 +40,8 @@
|
||||
|
||||
const ownWritesCacheDuration = 15 * time.Minute
|
||||
|
||||
var log = logging.Module("kopia/content") // +checklocksignore
|
||||
|
||||
// constants below specify how long to prevent cache entries from expiring.
|
||||
const (
|
||||
DefaultMetadataCacheSweepAge = 24 * time.Hour
|
||||
@@ -102,12 +106,10 @@ type SharedManager struct {
|
||||
paddingUnit int
|
||||
|
||||
// logger where logs should be written
|
||||
log logging.Logger
|
||||
log *contentlog.Logger
|
||||
|
||||
// logger associated with the context that opened the repository.
|
||||
contextLogger logging.Logger
|
||||
repoLogManager *repodiag.LogManager
|
||||
internalLogger *zap.SugaredLogger // backing logger for 'sharedBaseLogger'
|
||||
|
||||
metricsStruct
|
||||
}
|
||||
@@ -143,15 +145,20 @@ func (sm *SharedManager) readPackFileLocalIndex(ctx context.Context, packFile bl
|
||||
|
||||
if packFileLength >= indexRecoverPostambleSize {
|
||||
if err = sm.attemptReadPackFileLocalIndex(ctx, packFile, packFileLength-indexRecoverPostambleSize, indexRecoverPostambleSize, output); err == nil {
|
||||
sm.log.Debugf("recovered %v index bytes from blob %v using optimized method", output.Length(), packFile)
|
||||
contentlog.Log2(ctx, sm.log, "recovered index bytes from blob using optimized method", logparam.Int("length", output.Length()), blobparam.BlobID("packFile", packFile))
|
||||
return nil
|
||||
}
|
||||
|
||||
sm.log.Debugf("unable to recover using optimized method: %v", err)
|
||||
contentlog.Log1(ctx, sm.log,
|
||||
"unable to recover using optimized method",
|
||||
logparam.Error("err", err))
|
||||
}
|
||||
|
||||
if err = sm.attemptReadPackFileLocalIndex(ctx, packFile, 0, -1, output); err == nil {
|
||||
sm.log.Debugf("recovered %v index bytes from blob %v using full blob read", output.Length(), packFile)
|
||||
contentlog.Log2(ctx, sm.log,
|
||||
"recovered index bytes from blob using full blob read",
|
||||
logparam.Int("length", output.Length()),
|
||||
blobparam.BlobID("packFile", packFile))
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -202,9 +209,15 @@ func (sm *SharedManager) attemptReadPackFileLocalIndex(ctx context.Context, pack
|
||||
|
||||
// +checklocks:sm.indexesLock
|
||||
func (sm *SharedManager) loadPackIndexesLocked(ctx context.Context) error {
|
||||
ctx0 := contentlog.WithParams(ctx,
|
||||
logparam.String("span:loadindex", contentlog.RandomSpanID()))
|
||||
|
||||
nextSleepTime := 100 * time.Millisecond //nolint:mnd
|
||||
|
||||
for i := range indexLoadAttempts {
|
||||
ctx := contentlog.WithParams(ctx0,
|
||||
logparam.Int("loadAttempt", i))
|
||||
|
||||
ibm, err0 := sm.indexBlobManager(ctx)
|
||||
if err0 != nil {
|
||||
return err0
|
||||
@@ -217,11 +230,13 @@ func (sm *SharedManager) loadPackIndexesLocked(ctx context.Context) error {
|
||||
|
||||
if i > 0 {
|
||||
// invalidate any list caches.
|
||||
if err := sm.st.FlushCaches(ctx); err != nil {
|
||||
sm.log.Errorw("unable to flush caches", "err", err)
|
||||
}
|
||||
flushTimer := timetrack.StartTimer()
|
||||
flushErr := sm.st.FlushCaches(ctx)
|
||||
|
||||
contentlog.Log2(ctx, sm.log, "flushCaches",
|
||||
logparam.Duration("latency", flushTimer.Elapsed()),
|
||||
logparam.Error("error", flushErr))
|
||||
|
||||
sm.log.Debugf("encountered NOT_FOUND when loading, sleeping %v before retrying #%v", nextSleepTime, i)
|
||||
time.Sleep(nextSleepTime)
|
||||
nextSleepTime *= 2
|
||||
}
|
||||
@@ -244,7 +259,9 @@ func (sm *SharedManager) loadPackIndexesLocked(ctx context.Context) error {
|
||||
}
|
||||
|
||||
if len(indexBlobs) > indexBlobCompactionWarningThreshold {
|
||||
sm.log.Errorf("Found too many index blobs (%v), this may result in degraded performance.\n\nPlease ensure periodic repository maintenance is enabled or run 'kopia maintenance'.", len(indexBlobs))
|
||||
log(ctx).Errorf("Found too many index blobs (%v), this may result in degraded performance.\n\nPlease ensure periodic repository maintenance is enabled or run 'kopia maintenance'.", len(indexBlobs))
|
||||
|
||||
contentlog.Log1(ctx, sm.log, "Found too many index blobs", logparam.Int("len", len(indexBlobs)))
|
||||
}
|
||||
|
||||
sm.refreshIndexesAfter = sm.timeNow().Add(indexRefreshFrequency)
|
||||
@@ -411,14 +428,8 @@ func newCacheBackingStorage(ctx context.Context, caching *CachingOptions, subdir
|
||||
}, false)
|
||||
}
|
||||
|
||||
func (sm *SharedManager) namedLogger(n string) logging.Logger {
|
||||
if sm.internalLogger != nil {
|
||||
return logging.Broadcast(
|
||||
sm.contextLogger,
|
||||
sm.internalLogger.Named("["+n+"]"))
|
||||
}
|
||||
|
||||
return sm.contextLogger
|
||||
func (sm *SharedManager) namedLogger(n string) *contentlog.Logger {
|
||||
return sm.repoLogManager.NewLogger(n)
|
||||
}
|
||||
|
||||
func contentCacheSweepSettings(caching *CachingOptions) cache.SweepSettings {
|
||||
@@ -574,25 +585,12 @@ func (sm *SharedManager) CloseShared(ctx context.Context) error {
|
||||
sm.metadataCache.Close(ctx)
|
||||
sm.indexBlobCache.Close(ctx)
|
||||
|
||||
if sm.internalLogger != nil {
|
||||
sm.internalLogger.Sync() //nolint:errcheck
|
||||
}
|
||||
|
||||
sm.indexBlobManagerV1.EpochManager().Flush()
|
||||
sm.repoLogManager.Sync()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AlsoLogToContentLog wraps the provided content so that all logs are also sent to
|
||||
// internal content log.
|
||||
func (sm *SharedManager) AlsoLogToContentLog(ctx context.Context) context.Context {
|
||||
sm.repoLogManager.Enable()
|
||||
|
||||
return logging.WithAdditionalLogger(ctx, func(_ string) logging.Logger {
|
||||
return sm.log
|
||||
})
|
||||
}
|
||||
|
||||
func (sm *SharedManager) shouldRefreshIndexes() bool {
|
||||
sm.indexesLock.RLock()
|
||||
defer sm.indexesLock.RUnlock()
|
||||
@@ -624,16 +622,11 @@ func NewSharedManager(ctx context.Context, st blob.Storage, prov format.Provider
|
||||
paddingUnit: defaultPaddingUnit,
|
||||
checkInvariantsOnUnlock: os.Getenv("KOPIA_VERIFY_INVARIANTS") != "",
|
||||
repoLogManager: repoLogManager,
|
||||
contextLogger: logging.Module(FormatLogModule)(ctx),
|
||||
|
||||
metricsStruct: initMetricsStruct(mr),
|
||||
}
|
||||
|
||||
if !opts.DisableInternalLog {
|
||||
sm.internalLogger = sm.repoLogManager.NewLogger()
|
||||
}
|
||||
|
||||
sm.log = sm.namedLogger("shared-manager")
|
||||
sm.log = sm.repoLogManager.NewLogger("shared-manager")
|
||||
|
||||
caching = caching.CloneOrDefault()
|
||||
|
||||
|
||||
@@ -13,8 +13,12 @@
|
||||
"github.com/pkg/errors"
|
||||
"go.opentelemetry.io/otel"
|
||||
|
||||
"github.com/kopia/kopia/internal/blobparam"
|
||||
"github.com/kopia/kopia/internal/cache"
|
||||
"github.com/kopia/kopia/internal/clock"
|
||||
"github.com/kopia/kopia/internal/contentlog"
|
||||
"github.com/kopia/kopia/internal/contentlog/logparam"
|
||||
"github.com/kopia/kopia/internal/contentparam"
|
||||
"github.com/kopia/kopia/internal/gather"
|
||||
"github.com/kopia/kopia/internal/timetrack"
|
||||
"github.com/kopia/kopia/repo/blob"
|
||||
@@ -23,7 +27,6 @@
|
||||
"github.com/kopia/kopia/repo/content/indexblob"
|
||||
"github.com/kopia/kopia/repo/format"
|
||||
"github.com/kopia/kopia/repo/hashing"
|
||||
"github.com/kopia/kopia/repo/logging"
|
||||
)
|
||||
|
||||
// Prefixes for pack blobs.
|
||||
@@ -98,7 +101,7 @@ type WriteManager struct {
|
||||
|
||||
*SharedManager
|
||||
|
||||
log logging.Logger
|
||||
log *contentlog.Logger
|
||||
}
|
||||
|
||||
type pendingPackInfo struct {
|
||||
@@ -125,7 +128,9 @@ func (bm *WriteManager) DeleteContent(ctx context.Context, contentID ID) error {
|
||||
|
||||
bm.revision.Add(1)
|
||||
|
||||
bm.log.Debugf("delete-content %v", contentID)
|
||||
contentlog.Log1(ctx, bm.log,
|
||||
"delete-content",
|
||||
contentparam.ContentID("contentID", contentID))
|
||||
|
||||
// remove from all pending packs
|
||||
for _, pp := range bm.pendingPacks {
|
||||
@@ -228,7 +233,7 @@ func (bm *WriteManager) maybeRetryWritingFailedPacksUnlocked(ctx context.Context
|
||||
|
||||
// do not start new uploads while flushing
|
||||
for bm.flushing {
|
||||
bm.log.Debug("wait-before-retry")
|
||||
contentlog.Log(ctx, bm.log, "wait-before-retry")
|
||||
bm.cond.Wait()
|
||||
}
|
||||
|
||||
@@ -239,7 +244,7 @@ func (bm *WriteManager) maybeRetryWritingFailedPacksUnlocked(ctx context.Context
|
||||
// will remove from it on success.
|
||||
fp := append([]*pendingPackInfo(nil), bm.failedPacks...)
|
||||
for _, pp := range fp {
|
||||
bm.log.Debugf("retry-write %v", pp.packBlobID)
|
||||
contentlog.Log1(ctx, bm.log, "retry-write", blobparam.BlobID("packBlobID", pp.packBlobID))
|
||||
|
||||
if err := bm.writePackAndAddToIndexLocked(ctx, pp); err != nil {
|
||||
return errors.Wrap(err, "error writing previously failed pack")
|
||||
@@ -280,7 +285,7 @@ func (bm *WriteManager) addToPackUnlocked(ctx context.Context, contentID ID, dat
|
||||
|
||||
// do not start new uploads while flushing
|
||||
for bm.flushing {
|
||||
bm.log.Debug("wait-before-flush")
|
||||
contentlog.Log(ctx, bm.log, "wait-before-flush")
|
||||
bm.cond.Wait()
|
||||
}
|
||||
|
||||
@@ -291,7 +296,8 @@ func (bm *WriteManager) addToPackUnlocked(ctx context.Context, contentID ID, dat
|
||||
// will remove from it on success.
|
||||
fp := append([]*pendingPackInfo(nil), bm.failedPacks...)
|
||||
for _, pp := range fp {
|
||||
bm.log.Debugf("retry-write %v", pp.packBlobID)
|
||||
contentlog.Log1(ctx, bm.log, "retry-write",
|
||||
blobparam.BlobID("packBlobID", pp.packBlobID))
|
||||
|
||||
if err = bm.writePackAndAddToIndexLocked(ctx, pp); err != nil {
|
||||
bm.unlock(ctx)
|
||||
@@ -351,7 +357,7 @@ func (bm *WriteManager) DisableIndexFlush(ctx context.Context) {
|
||||
bm.lock()
|
||||
defer bm.unlock(ctx)
|
||||
|
||||
bm.log.Debug("DisableIndexFlush()")
|
||||
contentlog.Log(ctx, bm.log, "DisableIndexFlush()")
|
||||
|
||||
bm.disableIndexFlushCount++
|
||||
}
|
||||
@@ -362,7 +368,7 @@ func (bm *WriteManager) EnableIndexFlush(ctx context.Context) {
|
||||
bm.lock()
|
||||
defer bm.unlock(ctx)
|
||||
|
||||
bm.log.Debug("EnableIndexFlush()")
|
||||
contentlog.Log(ctx, bm.log, "EnableIndexFlush()")
|
||||
|
||||
bm.disableIndexFlushCount--
|
||||
}
|
||||
@@ -444,7 +450,7 @@ func (bm *WriteManager) flushPackIndexesLocked(ctx context.Context, mp format.Mu
|
||||
defer span.End()
|
||||
|
||||
if bm.disableIndexFlushCount > 0 {
|
||||
bm.log.Debug("not flushing index because flushes are currently disabled")
|
||||
contentlog.Log(ctx, bm.log, "not flushing index because flushes are currently disabled")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -551,23 +557,23 @@ func (bm *WriteManager) processWritePackResultLocked(pp *pendingPackInfo, packFi
|
||||
}
|
||||
|
||||
func (sm *SharedManager) prepareAndWritePackInternal(ctx context.Context, pp *pendingPackInfo, onUpload func(int64)) (index.Builder, error) {
|
||||
ctx = contentlog.WithParams(ctx,
|
||||
logparam.String("span:writePack", string(pp.packBlobID)))
|
||||
|
||||
mp, mperr := sm.format.GetMutableParameters(ctx)
|
||||
if mperr != nil {
|
||||
return nil, errors.Wrap(mperr, "mutable parameters")
|
||||
}
|
||||
|
||||
packFileIndex, err := sm.preparePackDataContent(mp, pp)
|
||||
packFileIndex, err := sm.preparePackDataContent(ctx, mp, pp)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error preparing data content")
|
||||
}
|
||||
|
||||
if pp.currentPackData.Length() > 0 {
|
||||
if err := sm.writePackFileNotLocked(ctx, pp.packBlobID, pp.currentPackData.Bytes(), onUpload); err != nil {
|
||||
sm.log.Debugf("failed-pack %v %v", pp.packBlobID, err)
|
||||
return nil, errors.Wrapf(err, "can't save pack data blob %v", pp.packBlobID)
|
||||
}
|
||||
|
||||
sm.log.Debugf("wrote-pack %v %v", pp.packBlobID, pp.currentPackData.Length())
|
||||
}
|
||||
|
||||
return packFileIndex, nil
|
||||
@@ -599,6 +605,9 @@ func (bm *WriteManager) setFlushingLocked(v bool) {
|
||||
// Any pending writes completed before Flush() has started are guaranteed to be committed to the
|
||||
// repository before Flush() returns.
|
||||
func (bm *WriteManager) Flush(ctx context.Context) error {
|
||||
ctx = contentlog.WithParams(ctx,
|
||||
logparam.String("span:flush", contentlog.RandomSpanID()))
|
||||
|
||||
mp, mperr := bm.format.GetMutableParameters(ctx)
|
||||
if mperr != nil {
|
||||
return errors.Wrap(mperr, "mutable parameters")
|
||||
@@ -607,8 +616,6 @@ func (bm *WriteManager) Flush(ctx context.Context) error {
|
||||
bm.lock()
|
||||
defer bm.unlock(ctx)
|
||||
|
||||
bm.log.Debug("flush")
|
||||
|
||||
// when finished flushing, notify goroutines that were waiting for it.
|
||||
defer bm.cond.Broadcast()
|
||||
|
||||
@@ -622,7 +629,7 @@ func (bm *WriteManager) Flush(ctx context.Context) error {
|
||||
// will remove from it on success.
|
||||
fp := append([]*pendingPackInfo(nil), bm.failedPacks...)
|
||||
for _, pp := range fp {
|
||||
bm.log.Debugf("retry-write %v", pp.packBlobID)
|
||||
contentlog.Log1(ctx, bm.log, "retry-write", blobparam.BlobID("packBlobID", pp.packBlobID))
|
||||
|
||||
if err := bm.writePackAndAddToIndexLocked(ctx, pp); err != nil {
|
||||
return errors.Wrap(err, "error writing previously failed pack")
|
||||
@@ -630,7 +637,7 @@ func (bm *WriteManager) Flush(ctx context.Context) error {
|
||||
}
|
||||
|
||||
for len(bm.writingPacks) > 0 {
|
||||
bm.log.Debugf("waiting for %v in-progress packs to finish", len(bm.writingPacks))
|
||||
contentlog.Log1(ctx, bm.log, "waiting for in-progress packs to finish", logparam.Int("len", len(bm.writingPacks)))
|
||||
|
||||
// wait packs that are currently writing in other goroutines to finish
|
||||
bm.cond.Wait()
|
||||
@@ -652,7 +659,8 @@ func (bm *WriteManager) Flush(ctx context.Context) error {
|
||||
// TODO(jkowalski): this will currently always re-encrypt and re-compress data, perhaps consider a
|
||||
// pass-through mode that preserves encrypted/compressed bits.
|
||||
func (bm *WriteManager) RewriteContent(ctx context.Context, contentID ID) error {
|
||||
bm.log.Debugf("rewrite-content %v", contentID)
|
||||
contentlog.Log1(ctx, bm.log, "rewrite-content",
|
||||
contentparam.ContentID("contentID", contentID))
|
||||
|
||||
mp, mperr := bm.format.GetMutableParameters(ctx)
|
||||
if mperr != nil {
|
||||
@@ -683,7 +691,8 @@ func (bm *WriteManager) getContentDataAndInfo(ctx context.Context, contentID ID,
|
||||
// and is mark deleted. If the content exists and is not marked deleted, this
|
||||
// operation is a no-op.
|
||||
func (bm *WriteManager) UndeleteContent(ctx context.Context, contentID ID) error {
|
||||
bm.log.Debugf("UndeleteContent(%q)", contentID)
|
||||
contentlog.Log1(ctx, bm.log, "UndeleteContent",
|
||||
contentparam.ContentID("contentID", contentID))
|
||||
|
||||
mp, mperr := bm.format.GetMutableParameters(ctx)
|
||||
if mperr != nil {
|
||||
@@ -811,12 +820,6 @@ func (bm *WriteManager) WriteContent(ctx context.Context, data gather.Bytes, pre
|
||||
_, bi, err := bm.getContentInfoReadLocked(ctx, contentID)
|
||||
bm.mu.RUnlock()
|
||||
|
||||
logbuf := logging.GetBuffer()
|
||||
defer logbuf.Release()
|
||||
|
||||
logbuf.AppendString("write-content ")
|
||||
contentID.AppendToLogBuffer(logbuf)
|
||||
|
||||
// content already tracked
|
||||
if err == nil {
|
||||
if !bi.Deleted {
|
||||
@@ -828,12 +831,14 @@ func (bm *WriteManager) WriteContent(ctx context.Context, data gather.Bytes, pre
|
||||
|
||||
previousWriteTime = bi.TimestampSeconds
|
||||
|
||||
logbuf.AppendString(" previously-deleted:")
|
||||
logbuf.AppendInt64(previousWriteTime)
|
||||
contentlog.Log2(ctx, bm.log, "write-content-previously-deleted",
|
||||
contentparam.ContentID("cid", contentID),
|
||||
logparam.Int64("previousWriteTime", previousWriteTime))
|
||||
} else {
|
||||
contentlog.Log1(ctx, bm.log, "write-content-new",
|
||||
contentparam.ContentID("cid", contentID))
|
||||
}
|
||||
|
||||
bm.log.Debug(logbuf.String())
|
||||
|
||||
return contentID, bm.addToPackUnlocked(ctx, contentID, data, false, comp, previousWriteTime, mp)
|
||||
}
|
||||
|
||||
@@ -857,7 +862,11 @@ func (bm *WriteManager) GetContent(ctx context.Context, contentID ID) (v []byte,
|
||||
|
||||
_, err = bm.getContentDataAndInfo(ctx, contentID, &tmp)
|
||||
if err != nil {
|
||||
bm.log.Debugf("getContentDataAndInfo(%v) error %v", contentID, err)
|
||||
contentlog.Log2(ctx, bm.log,
|
||||
"getContentDataAndInfo",
|
||||
contentparam.ContentID("contentID", contentID),
|
||||
logparam.Error("err", err))
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -911,7 +920,10 @@ func (bm *WriteManager) ContentInfo(ctx context.Context, contentID ID) (Info, er
|
||||
|
||||
_, bi, err := bm.getContentInfoReadLocked(ctx, contentID)
|
||||
if err != nil {
|
||||
bm.log.Debugf("ContentInfo(%q) - error %v", contentID, err)
|
||||
contentlog.Log2(ctx, bm.log, "ContentInfo",
|
||||
contentparam.ContentID("contentID", contentID),
|
||||
logparam.Error("err", err))
|
||||
|
||||
return Info{}, err
|
||||
}
|
||||
|
||||
@@ -948,7 +960,6 @@ func (bm *WriteManager) MetadataCache() cache.ContentCache {
|
||||
// ManagerOptions are the optional parameters for manager creation.
|
||||
type ManagerOptions struct {
|
||||
TimeNow func() time.Time // Time provider
|
||||
DisableInternalLog bool
|
||||
PermissiveCacheLoading bool
|
||||
}
|
||||
|
||||
@@ -1004,7 +1015,7 @@ func NewWriteManager(_ context.Context, sm *SharedManager, options SessionOption
|
||||
sm.uploadedBytes.Add(numBytes)
|
||||
},
|
||||
|
||||
log: sm.namedLogger(writeManagerID),
|
||||
log: sm.repoLogManager.NewLogger(writeManagerID),
|
||||
}
|
||||
|
||||
wm.cond = sync.NewCond(&wm.mu)
|
||||
|
||||
@@ -6,6 +6,8 @@
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kopia/kopia/internal/blobcrypto"
|
||||
"github.com/kopia/kopia/internal/contentlog"
|
||||
"github.com/kopia/kopia/internal/contentlog/logparam"
|
||||
"github.com/kopia/kopia/internal/gather"
|
||||
"github.com/kopia/kopia/internal/timetrack"
|
||||
"github.com/kopia/kopia/repo/blob"
|
||||
@@ -18,7 +20,7 @@ func (sm *SharedManager) Refresh(ctx context.Context) error {
|
||||
sm.indexesLock.Lock()
|
||||
defer sm.indexesLock.Unlock()
|
||||
|
||||
sm.log.Debug("Refresh started")
|
||||
timer := timetrack.StartTimer()
|
||||
|
||||
ibm, err := sm.indexBlobManager(ctx)
|
||||
if err != nil {
|
||||
@@ -27,10 +29,11 @@ func (sm *SharedManager) Refresh(ctx context.Context) error {
|
||||
|
||||
ibm.Invalidate()
|
||||
|
||||
timer := timetrack.StartTimer()
|
||||
|
||||
err = sm.loadPackIndexesLocked(ctx)
|
||||
sm.log.Debugf("Refresh completed in %v", timer.Elapsed())
|
||||
|
||||
contentlog.Log2(ctx, sm.log, "refreshIndexes",
|
||||
logparam.Duration("latency", timer.Elapsed()),
|
||||
logparam.Error("error", err))
|
||||
|
||||
return err
|
||||
}
|
||||
@@ -42,7 +45,11 @@ func (sm *SharedManager) CompactIndexes(ctx context.Context, opt indexblob.Compa
|
||||
sm.indexesLock.Lock()
|
||||
defer sm.indexesLock.Unlock()
|
||||
|
||||
sm.log.Debugf("CompactIndexes(%+v)", opt)
|
||||
contentlog.Log4(ctx, sm.log, "CompactIndexes",
|
||||
logparam.Bool("allIndexes", opt.AllIndexes),
|
||||
logparam.Int64("maxSmallBlobs", int64(opt.MaxSmallBlobs)),
|
||||
logparam.Time("dropDeletedBefore", opt.DropDeletedBefore),
|
||||
logparam.Bool("disableEventualConsistencySafety", opt.DisableEventualConsistencySafety))
|
||||
|
||||
ibm, err := sm.indexBlobManager(ctx)
|
||||
if err != nil {
|
||||
|
||||
@@ -10,6 +10,9 @@
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kopia/kopia/internal/bigmap"
|
||||
"github.com/kopia/kopia/internal/blobparam"
|
||||
"github.com/kopia/kopia/internal/contentlog"
|
||||
"github.com/kopia/kopia/internal/contentlog/logparam"
|
||||
"github.com/kopia/kopia/repo/blob"
|
||||
"github.com/kopia/kopia/repo/content/index"
|
||||
)
|
||||
@@ -235,7 +238,7 @@ func (bm *WriteManager) IterateUnreferencedPacks(ctx context.Context, blobPrefix
|
||||
|
||||
defer usedPacks.Close(ctx)
|
||||
|
||||
bm.log.Debug("determining blobs in use")
|
||||
contentlog.Log(ctx, bm.log, "determining blobs in use")
|
||||
// find packs in use
|
||||
if err := bm.IteratePacks(
|
||||
ctx,
|
||||
@@ -269,7 +272,8 @@ func(pi PackInfo) error {
|
||||
}
|
||||
}
|
||||
|
||||
bm.log.Debugf("scanning prefixes %v", prefixes)
|
||||
contentlog.Log1(ctx, bm.log, "scanning prefixes",
|
||||
blobparam.BlobIDList("prefixes", prefixes))
|
||||
|
||||
var unusedCount atomic.Int32
|
||||
|
||||
@@ -286,7 +290,7 @@ func(bm blob.Metadata) error {
|
||||
return errors.Wrap(err, "error iterating blobs")
|
||||
}
|
||||
|
||||
bm.log.Debugf("found %v pack blobs not in use", unusedCount.Load())
|
||||
contentlog.Log1(ctx, bm.log, "found pack blobs not in use", logparam.Int("unusedCount", int(unusedCount.Load())))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -12,6 +12,10 @@
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
|
||||
"github.com/kopia/kopia/internal/blobparam"
|
||||
"github.com/kopia/kopia/internal/contentlog"
|
||||
"github.com/kopia/kopia/internal/contentlog/logparam"
|
||||
"github.com/kopia/kopia/internal/contentparam"
|
||||
"github.com/kopia/kopia/internal/gather"
|
||||
"github.com/kopia/kopia/internal/timetrack"
|
||||
"github.com/kopia/kopia/repo/blob"
|
||||
@@ -19,7 +23,6 @@
|
||||
"github.com/kopia/kopia/repo/content/index"
|
||||
"github.com/kopia/kopia/repo/format"
|
||||
"github.com/kopia/kopia/repo/hashing"
|
||||
"github.com/kopia/kopia/repo/logging"
|
||||
)
|
||||
|
||||
const indexBlobCompactionWarningThreshold = 1000
|
||||
@@ -120,30 +123,25 @@ func (sm *SharedManager) getContentDataReadLocked(ctx context.Context, pp *pendi
|
||||
return sm.decryptContentAndVerify(payload.Bytes(), bi, output)
|
||||
}
|
||||
|
||||
func (sm *SharedManager) preparePackDataContent(mp format.MutableParameters, pp *pendingPackInfo) (index.Builder, error) {
|
||||
func (sm *SharedManager) preparePackDataContent(ctx context.Context, mp format.MutableParameters, pp *pendingPackInfo) (index.Builder, error) {
|
||||
packFileIndex := index.Builder{}
|
||||
haveContent := false
|
||||
|
||||
sb := logging.GetBuffer()
|
||||
defer sb.Release()
|
||||
|
||||
for _, info := range pp.currentPackItems {
|
||||
if info.PackBlobID == pp.packBlobID {
|
||||
haveContent = true
|
||||
}
|
||||
|
||||
sb.Reset()
|
||||
sb.AppendString("add-to-pack ")
|
||||
sb.AppendString(string(pp.packBlobID))
|
||||
sb.AppendString(" ")
|
||||
info.ContentID.AppendToLogBuffer(sb)
|
||||
sb.AppendString(" p:")
|
||||
sb.AppendString(string(info.PackBlobID))
|
||||
sb.AppendString(" ")
|
||||
sb.AppendUint32(info.PackedLength)
|
||||
sb.AppendString(" d:")
|
||||
sb.AppendBoolean(info.Deleted)
|
||||
sm.log.Debug(sb.String())
|
||||
contentlog.Log3(ctx, sm.log, "add-to-pack",
|
||||
contentparam.ContentID("cid", info.ContentID),
|
||||
logparam.UInt32("len", info.PackedLength),
|
||||
logparam.Bool("del", info.Deleted))
|
||||
} else {
|
||||
contentlog.Log4(ctx, sm.log, "move-to-pack",
|
||||
contentparam.ContentID("cid", info.ContentID),
|
||||
blobparam.BlobID("sourcePack", info.PackBlobID),
|
||||
logparam.UInt32("len", info.PackedLength),
|
||||
logparam.Bool("del", info.Deleted))
|
||||
}
|
||||
|
||||
packFileIndex.Add(info)
|
||||
}
|
||||
|
||||
@@ -1985,7 +1985,7 @@ func (s *contentManagerSuite) TestReadsOwnWritesWithEventualConsistencyPersisten
|
||||
cacheKeyTime := map[blob.ID]time.Time{}
|
||||
cacheSt := blobtesting.NewMapStorage(cacheData, cacheKeyTime, timeNow)
|
||||
ecst := blobtesting.NewEventuallyConsistentStorage(
|
||||
logging.NewWrapper(st, testlogging.NewTestLogger(t), "[STORAGE] "),
|
||||
logging.NewWrapper(st, testlogging.NewTestLogger(t), nil, "[STORAGE] "),
|
||||
3*time.Second,
|
||||
timeNow)
|
||||
|
||||
|
||||
@@ -6,6 +6,10 @@
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/kopia/kopia/internal/blobparam"
|
||||
"github.com/kopia/kopia/internal/contentlog"
|
||||
"github.com/kopia/kopia/internal/contentlog/logparam"
|
||||
"github.com/kopia/kopia/internal/contentparam"
|
||||
"github.com/kopia/kopia/internal/gather"
|
||||
"github.com/kopia/kopia/repo/blob"
|
||||
)
|
||||
@@ -116,17 +120,26 @@ type work struct {
|
||||
switch {
|
||||
case strings.HasPrefix(string(w.blobID), string(PackBlobIDPrefixRegular)):
|
||||
if err := bm.contentCache.PrefetchBlob(ctx, w.blobID); err != nil {
|
||||
bm.log.Debugw("error prefetching data blob", "blobID", w.blobID, "err", err)
|
||||
contentlog.Log2(ctx, bm.log,
|
||||
"error prefetching data blob",
|
||||
blobparam.BlobID("blobID", w.blobID),
|
||||
logparam.Error("err", err))
|
||||
}
|
||||
case strings.HasPrefix(string(w.blobID), string(PackBlobIDPrefixSpecial)):
|
||||
if err := bm.metadataCache.PrefetchBlob(ctx, w.blobID); err != nil {
|
||||
bm.log.Debugw("error prefetching metadata blob", "blobID", w.blobID, "err", err)
|
||||
contentlog.Log2(ctx, bm.log,
|
||||
"error prefetching metadata blob",
|
||||
blobparam.BlobID("blobID", w.blobID),
|
||||
logparam.Error("err", err))
|
||||
}
|
||||
case w.contentID != EmptyID:
|
||||
tmp.Reset()
|
||||
|
||||
if _, err := bm.getContentDataAndInfo(ctx, w.contentID, &tmp); err != nil {
|
||||
bm.log.Debugw("error prefetching content", "contentID", w.contentID, "err", err)
|
||||
contentlog.Log2(ctx, bm.log,
|
||||
"error prefetching content",
|
||||
contentparam.ContentID("contentID", w.contentID),
|
||||
logparam.Error("err", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kopia/kopia/repo/hashing"
|
||||
"github.com/kopia/kopia/repo/logging"
|
||||
)
|
||||
|
||||
// IDPrefix represents a content ID prefix (empty string or single character between 'g' and 'z').
|
||||
@@ -98,16 +97,25 @@ func (i ID) less(other ID) bool {
|
||||
return bytes.Compare(i.data[:i.idLen], other.data[:other.idLen]) < 0
|
||||
}
|
||||
|
||||
// AppendToLogBuffer appends content ID to log buffer.
|
||||
func (i ID) AppendToLogBuffer(sb *logging.Buffer) {
|
||||
var buf [128]byte
|
||||
|
||||
// AppendToJSON appends content ID to JSON buffer.
|
||||
func (i ID) AppendToJSON(buf []byte, maxLength uint8) []byte {
|
||||
buf = append(buf, '"')
|
||||
if i.prefix != 0 {
|
||||
sb.AppendByte(i.prefix)
|
||||
buf = append(buf, i.prefix)
|
||||
}
|
||||
|
||||
hex.Encode(buf[0:i.idLen*2], i.data[0:i.idLen])
|
||||
sb.AppendBytes(buf[0 : i.idLen*2])
|
||||
if maxLength > i.idLen {
|
||||
maxLength = i.idLen
|
||||
}
|
||||
|
||||
var tmp [128]byte
|
||||
|
||||
hex.Encode(tmp[0:maxLength*2], i.data[0:maxLength])
|
||||
|
||||
buf = append(buf, tmp[0:maxLength*2]...)
|
||||
buf = append(buf, '"')
|
||||
|
||||
return buf
|
||||
}
|
||||
|
||||
// Append appends content ID to the slice.
|
||||
|
||||
@@ -6,8 +6,6 @@
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/kopia/kopia/repo/logging"
|
||||
)
|
||||
|
||||
func TestIDValid(t *testing.T) {
|
||||
@@ -26,18 +24,26 @@ func TestIDValid(t *testing.T) {
|
||||
|
||||
var validContentIDsOrdered []ID
|
||||
|
||||
sb := logging.GetBuffer()
|
||||
defer sb.Release()
|
||||
|
||||
for _, s := range validIDsOrdered {
|
||||
cid, err := ParseID(s)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, s, cid.String())
|
||||
|
||||
sb.Reset()
|
||||
cid.AppendToLogBuffer(sb)
|
||||
require.Equal(t, s, sb.String())
|
||||
jb := cid.AppendToJSON(nil, 10)
|
||||
require.Equal(t, "\""+s+"\"", string(jb))
|
||||
|
||||
if s != "" {
|
||||
// limit to 3 bytes
|
||||
jb2 := cid.AppendToJSON(nil, 3)
|
||||
if len(s)%2 == 0 {
|
||||
// no prefix - 6 chars
|
||||
require.Equal(t, "\""+s[:6]+"\"", string(jb2))
|
||||
} else {
|
||||
// with prefix - 7 chars
|
||||
require.Equal(t, "\""+s[:7]+"\"", string(jb2))
|
||||
}
|
||||
}
|
||||
|
||||
validContentIDsOrdered = append(validContentIDsOrdered, cid)
|
||||
}
|
||||
|
||||
@@ -6,10 +6,13 @@
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kopia/kopia/internal/blobcrypto"
|
||||
"github.com/kopia/kopia/internal/blobparam"
|
||||
"github.com/kopia/kopia/internal/cache"
|
||||
"github.com/kopia/kopia/internal/contentlog"
|
||||
"github.com/kopia/kopia/internal/contentlog/logparam"
|
||||
"github.com/kopia/kopia/internal/gather"
|
||||
"github.com/kopia/kopia/internal/timetrack"
|
||||
"github.com/kopia/kopia/repo/blob"
|
||||
"github.com/kopia/kopia/repo/logging"
|
||||
)
|
||||
|
||||
// Metadata is an information about a single index blob managed by Manager.
|
||||
@@ -18,12 +21,68 @@ type Metadata struct {
|
||||
Superseded []blob.Metadata
|
||||
}
|
||||
|
||||
// WriteValueTo writes the metadata to a JSON writer.
|
||||
func (m Metadata) WriteValueTo(jw *contentlog.JSONWriter) {
|
||||
blobparam.BlobMetadata("metadata", m.Metadata).WriteValueTo(jw)
|
||||
jw.BeginListField("superseded")
|
||||
|
||||
for _, bm := range m.Superseded {
|
||||
jw.BeginObject()
|
||||
jw.StringField("blobID", string(bm.BlobID))
|
||||
jw.Int64Field("l", bm.Length)
|
||||
jw.TimeField("ts", bm.Timestamp)
|
||||
jw.EndObject()
|
||||
}
|
||||
}
|
||||
|
||||
type metadataParam struct {
|
||||
key string
|
||||
val Metadata
|
||||
}
|
||||
|
||||
func (v metadataParam) WriteValueTo(jw *contentlog.JSONWriter) {
|
||||
jw.BeginObjectField(v.key)
|
||||
v.val.WriteValueTo(jw)
|
||||
jw.EndObject()
|
||||
}
|
||||
|
||||
// MetadataParam creates a parameter for a metadata.
|
||||
//
|
||||
//nolint:revive
|
||||
func MetadataParam(name string, bm Metadata) metadataParam {
|
||||
return metadataParam{key: name, val: bm}
|
||||
}
|
||||
|
||||
type metadataListParam struct {
|
||||
key string
|
||||
list []Metadata
|
||||
}
|
||||
|
||||
func (v metadataListParam) WriteValueTo(jw *contentlog.JSONWriter) {
|
||||
jw.BeginListField(v.key)
|
||||
|
||||
for _, bm := range v.list {
|
||||
jw.BeginObject()
|
||||
bm.WriteValueTo(jw)
|
||||
jw.EndObject()
|
||||
}
|
||||
|
||||
jw.EndList()
|
||||
}
|
||||
|
||||
// MetadataListParam creates a parameter for a list of metadata.
|
||||
//
|
||||
//nolint:revive
|
||||
func MetadataListParam(name string, list []Metadata) metadataListParam {
|
||||
return metadataListParam{key: name, list: list}
|
||||
}
|
||||
|
||||
// EncryptionManager manages encryption and caching of index blobs.
|
||||
type EncryptionManager struct {
|
||||
st blob.Storage
|
||||
crypter blobcrypto.Crypter
|
||||
indexBlobCache *cache.PersistentCache
|
||||
log logging.Logger
|
||||
log *contentlog.Logger
|
||||
}
|
||||
|
||||
// GetEncryptedBlob fetches and decrypts the contents of a given encrypted blob
|
||||
@@ -52,15 +111,18 @@ func (m *EncryptionManager) EncryptAndWriteBlob(ctx context.Context, data gather
|
||||
return blob.Metadata{}, errors.Wrap(err, "error encrypting")
|
||||
}
|
||||
|
||||
t0 := timetrack.StartTimer()
|
||||
|
||||
bm, err := blob.PutBlobAndGetMetadata(ctx, m.st, blobID, data2.Bytes(), blob.PutOptions{})
|
||||
if err != nil {
|
||||
m.log.Debugf("write-index-blob %v failed %v", blobID, err)
|
||||
return blob.Metadata{}, errors.Wrapf(err, "error writing blob %v", blobID)
|
||||
}
|
||||
|
||||
m.log.Debugf("write-index-blob %v %v %v", blobID, bm.Length, bm.Timestamp)
|
||||
contentlog.Log5(ctx, m.log, "write-index-blob",
|
||||
blobparam.BlobID("indexBlobID", blobID),
|
||||
logparam.Int("len", data2.Length()),
|
||||
logparam.Time("timestamp", bm.Timestamp),
|
||||
logparam.Duration("latency", t0.Elapsed()),
|
||||
logparam.Error("error", err))
|
||||
|
||||
return bm, nil
|
||||
return bm, errors.Wrapf(err, "error writing blob %v", blobID)
|
||||
}
|
||||
|
||||
// NewEncryptionManager creates new encryption manager.
|
||||
@@ -68,7 +130,7 @@ func NewEncryptionManager(
|
||||
st blob.Storage,
|
||||
crypter blobcrypto.Crypter,
|
||||
indexBlobCache *cache.PersistentCache,
|
||||
log logging.Logger,
|
||||
log *contentlog.Logger,
|
||||
) *EncryptionManager {
|
||||
return &EncryptionManager{st, crypter, indexBlobCache, log}
|
||||
}
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
"github.com/kopia/kopia/repo/encryption"
|
||||
"github.com/kopia/kopia/repo/format"
|
||||
"github.com/kopia/kopia/repo/hashing"
|
||||
"github.com/kopia/kopia/repo/logging"
|
||||
)
|
||||
|
||||
type failingEncryptor struct {
|
||||
@@ -43,7 +42,7 @@ func TestEncryptedBlobManager(t *testing.T) {
|
||||
st: fs,
|
||||
crypter: blobcrypto.StaticCrypter{Hash: hf, Encryption: enc},
|
||||
indexBlobCache: nil,
|
||||
log: logging.NullLogger,
|
||||
log: nil,
|
||||
}
|
||||
|
||||
ctx := testlogging.Context(t)
|
||||
|
||||
@@ -8,11 +8,14 @@
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"github.com/kopia/kopia/internal/blobparam"
|
||||
"github.com/kopia/kopia/internal/contentlog"
|
||||
"github.com/kopia/kopia/internal/contentlog/logparam"
|
||||
"github.com/kopia/kopia/internal/contentparam"
|
||||
"github.com/kopia/kopia/internal/gather"
|
||||
"github.com/kopia/kopia/repo/blob"
|
||||
"github.com/kopia/kopia/repo/content/index"
|
||||
"github.com/kopia/kopia/repo/format"
|
||||
"github.com/kopia/kopia/repo/logging"
|
||||
)
|
||||
|
||||
// V0IndexBlobPrefix is the prefix for all legacy (v0) index blobs.
|
||||
@@ -62,7 +65,7 @@ type ManagerV0 struct {
|
||||
enc *EncryptionManager
|
||||
timeNow func() time.Time
|
||||
formattingOptions IndexFormattingOptions
|
||||
log logging.Logger
|
||||
log *contentlog.Logger
|
||||
}
|
||||
|
||||
// ListIndexBlobInfos lists active blob info structs.
|
||||
@@ -112,13 +115,9 @@ func (m *ManagerV0) ListActiveIndexBlobs(ctx context.Context) ([]Metadata, time.
|
||||
return nil, time.Time{}, errors.Wrap(err, "error listing indexes")
|
||||
}
|
||||
|
||||
for i, sib := range storageIndexBlobs {
|
||||
m.log.Debugf("found-index-blobs[%v] = %v", i, sib)
|
||||
}
|
||||
|
||||
for i, clm := range compactionLogMetadata {
|
||||
m.log.Debugf("found-compaction-blobs[%v] %v", i, clm)
|
||||
}
|
||||
contentlog.Log2(ctx, m.log, "found index blobs",
|
||||
blobparam.BlobMetadataList("storageIndexBlobs", storageIndexBlobs),
|
||||
blobparam.BlobMetadataList("compactionLogMetadata", compactionLogMetadata))
|
||||
|
||||
indexMap := map[blob.ID]*Metadata{}
|
||||
addBlobsToIndex(indexMap, storageIndexBlobs)
|
||||
@@ -129,16 +128,16 @@ func (m *ManagerV0) ListActiveIndexBlobs(ctx context.Context) ([]Metadata, time.
|
||||
}
|
||||
|
||||
// remove entries from indexMap that have been compacted and replaced by other indexes.
|
||||
m.removeCompactedIndexes(indexMap, compactionLogs)
|
||||
m.removeCompactedIndexes(ctx, indexMap, compactionLogs)
|
||||
|
||||
var results []Metadata
|
||||
for _, v := range indexMap {
|
||||
results = append(results, *v)
|
||||
}
|
||||
|
||||
for i, res := range results {
|
||||
m.log.Debugf("active-index-blobs[%v] = %v", i, res)
|
||||
}
|
||||
contentlog.Log1(ctx, m.log,
|
||||
"active index blobs",
|
||||
MetadataListParam("results", results))
|
||||
|
||||
return results, time.Time{}, nil
|
||||
}
|
||||
@@ -160,7 +159,7 @@ func (m *ManagerV0) Compact(ctx context.Context, opt CompactOptions) error {
|
||||
return errors.Wrap(mperr, "mutable parameters")
|
||||
}
|
||||
|
||||
blobsToCompact := m.getBlobsToCompact(indexBlobs, opt, mp)
|
||||
blobsToCompact := m.getBlobsToCompact(ctx, indexBlobs, opt, mp)
|
||||
|
||||
if err := m.compactIndexBlobs(ctx, blobsToCompact, opt); err != nil {
|
||||
return errors.Wrap(err, "error performing compaction")
|
||||
@@ -187,15 +186,12 @@ func (m *ManagerV0) registerCompaction(ctx context.Context, inputs, outputs []bl
|
||||
return errors.Wrap(err, "unable to write compaction log")
|
||||
}
|
||||
|
||||
for i, input := range inputs {
|
||||
m.log.Debugf("compacted-input[%v/%v] %v", i, len(inputs), input)
|
||||
}
|
||||
|
||||
for i, output := range outputs {
|
||||
m.log.Debugf("compacted-output[%v/%v] %v", i, len(outputs), output)
|
||||
}
|
||||
|
||||
m.log.Debugf("compaction-log %v %v", compactionLogBlobMetadata.BlobID, compactionLogBlobMetadata.Timestamp)
|
||||
contentlog.Log4(ctx, m.log,
|
||||
"registered compaction",
|
||||
blobparam.BlobMetadataList("inputs", inputs),
|
||||
blobparam.BlobMetadataList("outputs", outputs),
|
||||
blobparam.BlobID("compactionLogBlobID", compactionLogBlobMetadata.BlobID),
|
||||
logparam.Time("compactionLogBlobTimestamp", compactionLogBlobMetadata.Timestamp))
|
||||
|
||||
if err := m.deleteOldBlobs(ctx, compactionLogBlobMetadata, maxEventualConsistencySettleTime); err != nil {
|
||||
return errors.Wrap(err, "error deleting old index blobs")
|
||||
@@ -295,14 +291,18 @@ func (m *ManagerV0) deleteOldBlobs(ctx context.Context, latestBlob blob.Metadata
|
||||
compactionLogServerTimeCutoff := latestBlob.Timestamp.Add(-maxEventualConsistencySettleTime)
|
||||
compactionBlobs := blobsOlderThan(allCompactionLogBlobs, compactionLogServerTimeCutoff)
|
||||
|
||||
m.log.Debugf("fetching %v/%v compaction logs older than %v", len(compactionBlobs), len(allCompactionLogBlobs), compactionLogServerTimeCutoff)
|
||||
contentlog.Log3(ctx, m.log,
|
||||
"fetching compaction logs",
|
||||
logparam.Int("compactionBlobs", len(compactionBlobs)),
|
||||
logparam.Int("allCompactionLogBlobs", len(allCompactionLogBlobs)),
|
||||
logparam.Time("compactionLogServerTimeCutoff", compactionLogServerTimeCutoff))
|
||||
|
||||
compactionBlobEntries, err := m.getCompactionLogEntries(ctx, compactionBlobs)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to get compaction log entries")
|
||||
}
|
||||
|
||||
indexBlobsToDelete := m.findIndexBlobsToDelete(latestBlob.Timestamp, compactionBlobEntries, maxEventualConsistencySettleTime)
|
||||
indexBlobsToDelete := m.findIndexBlobsToDelete(ctx, latestBlob.Timestamp, compactionBlobEntries, maxEventualConsistencySettleTime)
|
||||
|
||||
// note that we must always delete index blobs first before compaction logs
|
||||
// otherwise we may inadvertently resurrect an index blob that should have been removed.
|
||||
@@ -310,7 +310,7 @@ func (m *ManagerV0) deleteOldBlobs(ctx context.Context, latestBlob blob.Metadata
|
||||
return errors.Wrap(err, "unable to delete compaction logs")
|
||||
}
|
||||
|
||||
compactionLogBlobsToDelayCleanup := m.findCompactionLogBlobsToDelayCleanup(compactionBlobs)
|
||||
compactionLogBlobsToDelayCleanup := m.findCompactionLogBlobsToDelayCleanup(ctx, compactionBlobs)
|
||||
|
||||
if err := m.delayCleanupBlobs(ctx, compactionLogBlobsToDelayCleanup, latestBlob.Timestamp); err != nil {
|
||||
return errors.Wrap(err, "unable to schedule delayed cleanup of blobs")
|
||||
@@ -319,18 +319,26 @@ func (m *ManagerV0) deleteOldBlobs(ctx context.Context, latestBlob blob.Metadata
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ManagerV0) findIndexBlobsToDelete(latestServerBlobTime time.Time, entries map[blob.ID]*compactionLogEntry, maxEventualConsistencySettleTime time.Duration) []blob.ID {
|
||||
func (m *ManagerV0) findIndexBlobsToDelete(ctx context.Context, latestServerBlobTime time.Time, entries map[blob.ID]*compactionLogEntry, maxEventualConsistencySettleTime time.Duration) []blob.ID {
|
||||
tmp := map[blob.ID]bool{}
|
||||
|
||||
for _, cl := range entries {
|
||||
// are the input index blobs in this compaction eligible for deletion?
|
||||
if age := latestServerBlobTime.Sub(cl.metadata.Timestamp); age < maxEventualConsistencySettleTime {
|
||||
m.log.Debugf("not deleting compacted index blob used as inputs for compaction %v, because it's too recent: %v < %v", cl.metadata.BlobID, age, maxEventualConsistencySettleTime)
|
||||
contentlog.Log3(ctx, m.log,
|
||||
"not deleting compacted index blob used as inputs for compaction",
|
||||
blobparam.BlobID("blobID", cl.metadata.BlobID),
|
||||
logparam.Duration("age", age),
|
||||
logparam.Duration("maxEventualConsistencySettleTime", maxEventualConsistencySettleTime))
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
for _, b := range cl.InputMetadata {
|
||||
m.log.Debugf("will delete old index %v compacted to %v", b, cl.OutputMetadata)
|
||||
contentlog.Log2(ctx, m.log,
|
||||
"will delete old index",
|
||||
blobparam.BlobMetadata("b", b),
|
||||
blobparam.BlobMetadataList("outputMetadata", cl.OutputMetadata))
|
||||
|
||||
tmp[b.BlobID] = true
|
||||
}
|
||||
@@ -345,11 +353,13 @@ func (m *ManagerV0) findIndexBlobsToDelete(latestServerBlobTime time.Time, entri
|
||||
return result
|
||||
}
|
||||
|
||||
func (m *ManagerV0) findCompactionLogBlobsToDelayCleanup(compactionBlobs []blob.Metadata) []blob.ID {
|
||||
func (m *ManagerV0) findCompactionLogBlobsToDelayCleanup(ctx context.Context, compactionBlobs []blob.Metadata) []blob.ID {
|
||||
var result []blob.ID
|
||||
|
||||
for _, cb := range compactionBlobs {
|
||||
m.log.Debugf("will delete compaction log blob %v", cb)
|
||||
contentlog.Log1(ctx, m.log, "will delete compaction log blob",
|
||||
blobparam.BlobMetadata("cb", cb))
|
||||
|
||||
result = append(result, cb.BlobID)
|
||||
}
|
||||
|
||||
@@ -390,11 +400,11 @@ func (m *ManagerV0) delayCleanupBlobs(ctx context.Context, blobIDs []blob.ID, cl
|
||||
func (m *ManagerV0) deleteBlobsFromStorageAndCache(ctx context.Context, blobIDs []blob.ID) error {
|
||||
for _, blobID := range blobIDs {
|
||||
if err := m.st.DeleteBlob(ctx, blobID); err != nil && !errors.Is(err, blob.ErrBlobNotFound) {
|
||||
m.log.Debugf("delete-blob failed %v %v", blobID, err)
|
||||
contentlog.Log2(ctx, m.log, "delete-blob failed", blobparam.BlobID("blobID", blobID), logparam.Error("err", err))
|
||||
return errors.Wrapf(err, "unable to delete blob %v", blobID)
|
||||
}
|
||||
|
||||
m.log.Debugf("delete-blob succeeded %v", blobID)
|
||||
contentlog.Log1(ctx, m.log, "delete-blob succeeded", blobparam.BlobID("blobID", blobID))
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -433,13 +443,13 @@ func (m *ManagerV0) cleanup(ctx context.Context, maxEventualConsistencySettleTim
|
||||
}
|
||||
|
||||
if err := m.st.FlushCaches(ctx); err != nil {
|
||||
m.log.Debugw("error flushing caches", "err", err)
|
||||
contentlog.Log1(ctx, m.log, "error flushing caches", logparam.Error("err", err))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ManagerV0) getBlobsToCompact(indexBlobs []Metadata, opt CompactOptions, mp format.MutableParameters) []Metadata {
|
||||
func (m *ManagerV0) getBlobsToCompact(ctx context.Context, indexBlobs []Metadata, opt CompactOptions, mp format.MutableParameters) []Metadata {
|
||||
var (
|
||||
nonCompactedBlobs, verySmallBlobs []Metadata
|
||||
totalSizeNonCompactedBlobs, totalSizeVerySmallBlobs, totalSizeMediumSizedBlobs int64
|
||||
@@ -465,16 +475,16 @@ func (m *ManagerV0) getBlobsToCompact(indexBlobs []Metadata, opt CompactOptions,
|
||||
|
||||
if len(nonCompactedBlobs) < opt.MaxSmallBlobs {
|
||||
// current count is below min allowed - nothing to do
|
||||
m.log.Debug("no small contents to Compact")
|
||||
contentlog.Log(ctx, m.log, "no small contents to Compact")
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(verySmallBlobs) > len(nonCompactedBlobs)/2 && mediumSizedBlobCount+1 < opt.MaxSmallBlobs {
|
||||
m.log.Debugf("compacting %v very small contents", len(verySmallBlobs))
|
||||
contentlog.Log1(ctx, m.log, "compacting very small contents", logparam.Int("len", len(verySmallBlobs)))
|
||||
return verySmallBlobs
|
||||
}
|
||||
|
||||
m.log.Debugf("compacting all %v non-compacted contents", len(nonCompactedBlobs))
|
||||
contentlog.Log1(ctx, m.log, "compacting all non-compacted contents", logparam.Int("len", len(nonCompactedBlobs)))
|
||||
|
||||
return nonCompactedBlobs
|
||||
}
|
||||
@@ -493,8 +503,11 @@ func (m *ManagerV0) compactIndexBlobs(ctx context.Context, indexBlobs []Metadata
|
||||
|
||||
var inputs, outputs []blob.Metadata
|
||||
|
||||
for i, indexBlob := range indexBlobs {
|
||||
m.log.Debugf("compacting-entries[%v/%v] %v", i, len(indexBlobs), indexBlob)
|
||||
for _, indexBlob := range indexBlobs {
|
||||
contentlog.Log2(ctx, m.log,
|
||||
"compacting-entries",
|
||||
blobparam.BlobMetadata("indexBlob", indexBlob.Metadata),
|
||||
blobparam.BlobMetadataList("superseded", indexBlob.Superseded))
|
||||
|
||||
if err := addIndexBlobsToBuilder(ctx, m.enc, bld.Add, indexBlob.BlobID); err != nil {
|
||||
return errors.Wrap(err, "error adding index to builder")
|
||||
@@ -505,7 +518,7 @@ func (m *ManagerV0) compactIndexBlobs(ctx context.Context, indexBlobs []Metadata
|
||||
|
||||
// after we built index map in memory, drop contents from it
|
||||
// we must do it after all input blobs have been merged, otherwise we may resurrect contents.
|
||||
m.dropContentsFromBuilder(bld, opt)
|
||||
m.dropContentsFromBuilder(ctx, bld, opt)
|
||||
|
||||
dataShards, cleanupShards, err := bld.BuildShards(mp.IndexVersion, false, DefaultIndexShardSize)
|
||||
if err != nil {
|
||||
@@ -528,25 +541,27 @@ func (m *ManagerV0) compactIndexBlobs(ctx context.Context, indexBlobs []Metadata
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ManagerV0) dropContentsFromBuilder(bld index.Builder, opt CompactOptions) {
|
||||
func (m *ManagerV0) dropContentsFromBuilder(ctx context.Context, bld index.Builder, opt CompactOptions) {
|
||||
for _, dc := range opt.DropContents {
|
||||
if _, ok := bld[dc]; ok {
|
||||
m.log.Debugf("manual-drop-from-index %v", dc)
|
||||
contentlog.Log1(ctx, m.log, "manual-drop-from-index", logparam.String("dc", dc.String()))
|
||||
delete(bld, dc)
|
||||
}
|
||||
}
|
||||
|
||||
if !opt.DropDeletedBefore.IsZero() {
|
||||
m.log.Debugf("drop-content-deleted-before %v", opt.DropDeletedBefore)
|
||||
contentlog.Log1(ctx, m.log, "drop-content-deleted-before", logparam.Time("dropDeletedBefore", opt.DropDeletedBefore))
|
||||
|
||||
for _, i := range bld {
|
||||
if i.Deleted && i.Timestamp().Before(opt.DropDeletedBefore) {
|
||||
m.log.Debugf("drop-from-index-old-deleted %v %v", i.ContentID, i.Timestamp())
|
||||
contentlog.Log2(ctx, m.log, "drop-from-index-old-deleted",
|
||||
contentparam.ContentID("contentID", i.ContentID),
|
||||
logparam.Time("timestamp", i.Timestamp()))
|
||||
delete(bld, i.ContentID)
|
||||
}
|
||||
}
|
||||
|
||||
m.log.Debugf("finished drop-content-deleted-before %v", opt.DropDeletedBefore)
|
||||
contentlog.Log1(ctx, m.log, "finished drop-content-deleted-before", logparam.Time("dropDeletedBefore", opt.DropDeletedBefore))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -584,7 +599,7 @@ func blobsOlderThan(m []blob.Metadata, cutoffTime time.Time) []blob.Metadata {
|
||||
return res
|
||||
}
|
||||
|
||||
func (m *ManagerV0) removeCompactedIndexes(bimap map[blob.ID]*Metadata, compactionLogs map[blob.ID]*compactionLogEntry) {
|
||||
func (m *ManagerV0) removeCompactedIndexes(ctx context.Context, bimap map[blob.ID]*Metadata, compactionLogs map[blob.ID]*compactionLogEntry) {
|
||||
var validCompactionLogs []*compactionLogEntry
|
||||
|
||||
for _, cl := range compactionLogs {
|
||||
@@ -595,7 +610,8 @@ func (m *ManagerV0) removeCompactedIndexes(bimap map[blob.ID]*Metadata, compacti
|
||||
if bimap[o.BlobID] == nil {
|
||||
haveAllOutputs = false
|
||||
|
||||
m.log.Debugf("blob %v referenced by compaction log is not found", o.BlobID)
|
||||
contentlog.Log1(ctx, m.log, "blob referenced by compaction log is not found",
|
||||
blobparam.BlobID("blobID", o.BlobID))
|
||||
|
||||
break
|
||||
}
|
||||
@@ -610,7 +626,9 @@ func (m *ManagerV0) removeCompactedIndexes(bimap map[blob.ID]*Metadata, compacti
|
||||
for _, cl := range validCompactionLogs {
|
||||
for _, ib := range cl.InputMetadata {
|
||||
if md := bimap[ib.BlobID]; md != nil && md.Superseded == nil {
|
||||
m.log.Debugf("ignore-index-blob %v compacted to %v", ib, cl.OutputMetadata)
|
||||
contentlog.Log2(ctx, m.log, "ignore-index-blob",
|
||||
blobparam.BlobMetadata("ib", ib),
|
||||
blobparam.BlobMetadataList("outputMetadata", cl.OutputMetadata))
|
||||
|
||||
delete(bimap, ib.BlobID)
|
||||
}
|
||||
@@ -624,7 +642,7 @@ func NewManagerV0(
|
||||
enc *EncryptionManager,
|
||||
timeNow func() time.Time,
|
||||
formattingOptions IndexFormattingOptions,
|
||||
log logging.Logger,
|
||||
log *contentlog.Logger,
|
||||
) *ManagerV0 {
|
||||
return &ManagerV0{st, enc, timeNow, formattingOptions, log}
|
||||
}
|
||||
|
||||
@@ -197,7 +197,7 @@ func TestIndexBlobManagerStress(t *testing.T) {
|
||||
for actorID := range numActors {
|
||||
loggedSt := logging.NewWrapper(st, testlogging.Printf(func(m string, args ...any) {
|
||||
t.Logf(fmt.Sprintf("@%v actor[%v]:", fakeTimeFunc().Format("150405.000"), actorID)+m, args...)
|
||||
}, ""), "")
|
||||
}, ""), nil, "")
|
||||
contentPrefix := fmt.Sprintf("a%v", actorID)
|
||||
|
||||
eg.Go(func() error {
|
||||
@@ -285,7 +285,7 @@ func TestCompactionCreatesPreviousIndex(t *testing.T) {
|
||||
st = blobtesting.NewEventuallyConsistentStorage(st, testEventualConsistencySettleTime, fakeTimeFunc)
|
||||
st = logging.NewWrapper(st, testlogging.Printf(func(msg string, args ...any) {
|
||||
t.Logf("[store] "+fakeTimeFunc().Format("150405.000")+" "+msg, args...)
|
||||
}, ""), "")
|
||||
}, ""), nil, "")
|
||||
m := newIndexBlobManagerForTesting(t, st, fakeTimeFunc)
|
||||
|
||||
numWritten := 0
|
||||
@@ -363,7 +363,7 @@ func verifyIndexBlobManagerPreventsResurrectOfDeletedContents(t *testing.T, dela
|
||||
st = blobtesting.NewEventuallyConsistentStorage(st, testEventualConsistencySettleTime, fakeTimeFunc)
|
||||
st = logging.NewWrapper(st, testlogging.Printf(func(msg string, args ...any) {
|
||||
t.Logf(fakeTimeFunc().Format("150405.000")+" "+msg, args...)
|
||||
}, ""), "")
|
||||
}, ""), nil, "")
|
||||
m := newIndexBlobManagerForTesting(t, st, fakeTimeFunc)
|
||||
|
||||
numWritten := 0
|
||||
@@ -780,18 +780,16 @@ func newIndexBlobManagerForTesting(t *testing.T, st blob.Storage, localTimeNow f
|
||||
15*time.Minute,
|
||||
)
|
||||
|
||||
log := testlogging.Printf(t.Logf, "")
|
||||
|
||||
m := &ManagerV0{
|
||||
st: st,
|
||||
enc: &EncryptionManager{
|
||||
st: st,
|
||||
indexBlobCache: nil,
|
||||
crypter: blobcrypto.StaticCrypter{Hash: hf, Encryption: enc},
|
||||
log: log,
|
||||
log: nil,
|
||||
},
|
||||
timeNow: localTimeNow,
|
||||
log: log,
|
||||
log: nil,
|
||||
}
|
||||
|
||||
return m
|
||||
|
||||
@@ -9,11 +9,12 @@
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kopia/kopia/internal/blobcrypto"
|
||||
"github.com/kopia/kopia/internal/contentlog"
|
||||
"github.com/kopia/kopia/internal/contentlog/logparam"
|
||||
"github.com/kopia/kopia/internal/epoch"
|
||||
"github.com/kopia/kopia/internal/gather"
|
||||
"github.com/kopia/kopia/repo/blob"
|
||||
"github.com/kopia/kopia/repo/content/index"
|
||||
"github.com/kopia/kopia/repo/logging"
|
||||
)
|
||||
|
||||
// ManagerV1 is the append-only implementation of indexblob.Manager
|
||||
@@ -23,7 +24,7 @@ type ManagerV1 struct {
|
||||
enc *EncryptionManager
|
||||
timeNow func() time.Time
|
||||
formattingOptions IndexFormattingOptions
|
||||
log logging.Logger
|
||||
log *contentlog.Logger
|
||||
|
||||
epochMgr *epoch.Manager
|
||||
}
|
||||
@@ -49,7 +50,7 @@ func (m *ManagerV1) ListActiveIndexBlobs(ctx context.Context) ([]Metadata, time.
|
||||
result = append(result, Metadata{Metadata: bm})
|
||||
}
|
||||
|
||||
m.log.Debugf("total active indexes %v, deletion watermark %v", len(active), deletionWatermark)
|
||||
contentlog.Log2(ctx, m.log, "total active indexes", logparam.Int("len", len(active)), logparam.Time("deletionWatermark", deletionWatermark))
|
||||
|
||||
return result, deletionWatermark, nil
|
||||
}
|
||||
@@ -175,7 +176,7 @@ func NewManagerV1(
|
||||
epochMgr *epoch.Manager,
|
||||
timeNow func() time.Time,
|
||||
formattingOptions IndexFormattingOptions,
|
||||
log logging.Logger,
|
||||
log *contentlog.Logger,
|
||||
) *ManagerV1 {
|
||||
return &ManagerV1{
|
||||
st: st,
|
||||
|
||||
@@ -1,142 +0,0 @@
|
||||
package logging
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Buffer is a specialized buffer that can be kept in a pool used
|
||||
// for constructing logging messages without allocation.
|
||||
type Buffer struct {
|
||||
buf [1024]byte
|
||||
validLen int // valid length
|
||||
}
|
||||
|
||||
//nolint:gochecknoglobals
|
||||
var bufPool = &sync.Pool{
|
||||
New: func() any {
|
||||
return &Buffer{}
|
||||
},
|
||||
}
|
||||
|
||||
// GetBuffer gets a logging buffer.
|
||||
func GetBuffer() *Buffer {
|
||||
//nolint:forcetypeassert
|
||||
return bufPool.Get().(*Buffer)
|
||||
}
|
||||
|
||||
// Release releases logging buffer back to the pool.
|
||||
func (b *Buffer) Release() {
|
||||
b.Reset()
|
||||
|
||||
bufPool.Put(b)
|
||||
}
|
||||
|
||||
// Reset resets logging buffer back to zero length.
|
||||
func (b *Buffer) Reset() {
|
||||
b.validLen = 0
|
||||
}
|
||||
|
||||
// AppendByte appends a single byte/character.
|
||||
func (b *Buffer) AppendByte(val byte) *Buffer {
|
||||
if b.validLen < len(b.buf) {
|
||||
b.buf[b.validLen] = val
|
||||
b.validLen++
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// AppendString appends a string of characters.
|
||||
func (b *Buffer) AppendString(val string) *Buffer {
|
||||
vl := len(val)
|
||||
|
||||
if b.validLen+vl > len(b.buf) {
|
||||
vl = len(b.buf) - b.validLen
|
||||
}
|
||||
|
||||
if vl > 0 {
|
||||
copy(b.buf[b.validLen:b.validLen+vl], val)
|
||||
b.validLen += vl
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// AppendTime appends a time representation.
|
||||
func (b *Buffer) AppendTime(val time.Time, layout string) *Buffer {
|
||||
var buf [64]byte
|
||||
|
||||
return b.AppendBytes(val.AppendFormat(buf[:0], layout))
|
||||
}
|
||||
|
||||
// AppendBytes appends a slice of bytes.
|
||||
func (b *Buffer) AppendBytes(val []byte) *Buffer {
|
||||
vl := len(val)
|
||||
|
||||
if b.validLen+vl > len(b.buf) {
|
||||
vl = len(b.buf) - b.validLen
|
||||
}
|
||||
|
||||
if vl > 0 {
|
||||
copy(b.buf[b.validLen:b.validLen+vl], val)
|
||||
b.validLen += vl
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// AppendBoolean appends boolean string ("true" or "false").
|
||||
func (b *Buffer) AppendBoolean(val bool) *Buffer {
|
||||
if val {
|
||||
return b.AppendString("true")
|
||||
}
|
||||
|
||||
return b.AppendString("false")
|
||||
}
|
||||
|
||||
// AppendInt32 appends int32 value formatted as a decimal string.
|
||||
func (b *Buffer) AppendInt32(val int32) *Buffer {
|
||||
return b.AppendInt(int64(val), 10) //nolint:mnd
|
||||
}
|
||||
|
||||
// AppendInt64 appends int64 value formatted as a decimal string.
|
||||
func (b *Buffer) AppendInt64(val int64) *Buffer {
|
||||
return b.AppendInt(val, 10) //nolint:mnd
|
||||
}
|
||||
|
||||
// AppendInt appends integer value formatted as a string in a given base.
|
||||
func (b *Buffer) AppendInt(val int64, base int) *Buffer {
|
||||
var buf [64]byte
|
||||
|
||||
return b.AppendBytes(strconv.AppendInt(buf[:0], val, base))
|
||||
}
|
||||
|
||||
// AppendUint32 appends uint32 value formatted as a decimal string.
|
||||
func (b *Buffer) AppendUint32(val uint32) *Buffer {
|
||||
return b.AppendUint(uint64(val), 10) //nolint:mnd
|
||||
}
|
||||
|
||||
// AppendUint64 appends uint64 value formatted as a decimal string.
|
||||
func (b *Buffer) AppendUint64(val uint64) *Buffer {
|
||||
return b.AppendUint(val, 10) //nolint:mnd
|
||||
}
|
||||
|
||||
// AppendUint appends unsigned integer value formatted as a string in a given base.
|
||||
func (b *Buffer) AppendUint(val uint64, base int) *Buffer {
|
||||
var buf [64]byte
|
||||
|
||||
return b.AppendBytes(strconv.AppendUint(buf[:0], val, base))
|
||||
}
|
||||
|
||||
// String returns a string value of a buffer. The value is valid as long as
|
||||
// string remains allocated and no Append*() methods have been called.
|
||||
func (b *Buffer) String() string {
|
||||
if b.validLen == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
return unsafe.String(&b.buf[0], b.validLen) //nolint:gosec
|
||||
}
|
||||
@@ -1,77 +0,0 @@
|
||||
package logging_test
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/kopia/kopia/repo/logging"
|
||||
)
|
||||
|
||||
func TestLoggingBuffer_ReusesMemory(t *testing.T) {
|
||||
b := logging.GetBuffer()
|
||||
defer b.Release()
|
||||
|
||||
b.AppendString("xx")
|
||||
|
||||
s := b.String()
|
||||
require.Equal(t, "xx", s)
|
||||
|
||||
// ensure we're reusing memory
|
||||
b.Reset()
|
||||
b.AppendString("yy")
|
||||
|
||||
require.Equal(t, "yy", s)
|
||||
}
|
||||
|
||||
func TestLoggingBuffer_Overflow(t *testing.T) {
|
||||
b := logging.GetBuffer()
|
||||
defer b.Release()
|
||||
|
||||
filler := strings.Repeat("x", 1020)
|
||||
b.AppendString(filler)
|
||||
b.AppendString("foobarbaz")
|
||||
|
||||
// only room for 4 more characters
|
||||
require.Equal(t, filler+"foob", b.String())
|
||||
|
||||
b.Reset()
|
||||
|
||||
b.AppendString(filler)
|
||||
b.AppendBytes([]byte{65, 66, 67, 68, 69})
|
||||
|
||||
// only room for 4 more characters
|
||||
require.Equal(t, filler+"ABCD", b.String())
|
||||
}
|
||||
|
||||
func TestLoggingBuffer_Append(t *testing.T) {
|
||||
b := logging.GetBuffer()
|
||||
defer b.Release()
|
||||
|
||||
require.Empty(t, b.String())
|
||||
|
||||
require.Equal(t,
|
||||
"xx ABC D -42 -23 true 42 false 23 2000-01-02T03:04:05Z",
|
||||
b.AppendString("xx").
|
||||
AppendString(" ").
|
||||
AppendBytes([]byte{65, 66, 67}).
|
||||
AppendString(" ").
|
||||
AppendByte('D').
|
||||
AppendString(" ").
|
||||
AppendInt32(-42).
|
||||
AppendString(" ").
|
||||
AppendInt64(-23).
|
||||
AppendString(" ").
|
||||
AppendBoolean(true).
|
||||
AppendString(" ").
|
||||
AppendUint32(42).
|
||||
AppendString(" ").
|
||||
AppendBoolean(false).
|
||||
AppendString(" ").
|
||||
AppendUint64(23).
|
||||
AppendString(" ").
|
||||
AppendTime(time.Date(2000, 1, 2, 3, 4, 5, 6, time.UTC), time.RFC3339).
|
||||
String())
|
||||
}
|
||||
@@ -7,8 +7,10 @@
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"github.com/kopia/kopia/internal/blobparam"
|
||||
"github.com/kopia/kopia/internal/contentlog"
|
||||
"github.com/kopia/kopia/internal/contentlog/logparam"
|
||||
"github.com/kopia/kopia/internal/stats"
|
||||
"github.com/kopia/kopia/internal/units"
|
||||
"github.com/kopia/kopia/repo"
|
||||
"github.com/kopia/kopia/repo/blob"
|
||||
"github.com/kopia/kopia/repo/content"
|
||||
@@ -26,6 +28,11 @@ type DeleteUnreferencedBlobsOptions struct {
|
||||
//
|
||||
//nolint:gocyclo,funlen
|
||||
func DeleteUnreferencedBlobs(ctx context.Context, rep repo.DirectRepositoryWriter, opt DeleteUnreferencedBlobsOptions, safety SafetyParameters) (int, error) {
|
||||
ctx = contentlog.WithParams(ctx,
|
||||
logparam.String("span:blob-gc", contentlog.RandomSpanID()))
|
||||
|
||||
log := rep.LogManager().NewLogger("maintenance-blob-gc")
|
||||
|
||||
if opt.Parallel == 0 {
|
||||
opt.Parallel = 16
|
||||
}
|
||||
@@ -49,7 +56,7 @@ func DeleteUnreferencedBlobs(ctx context.Context, rep repo.DirectRepositoryWrite
|
||||
|
||||
cnt, del := deleted.Add(bm.Length)
|
||||
if cnt%100 == 0 {
|
||||
log(ctx).Infof(" deleted %v unreferenced blobs (%v)", cnt, units.BytesString(del))
|
||||
contentlog.Log2(ctx, log, "deleted unreferenced blobs", logparam.UInt32("count", cnt), logparam.Int64("bytes", del))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -59,7 +66,7 @@ func DeleteUnreferencedBlobs(ctx context.Context, rep repo.DirectRepositoryWrite
|
||||
}
|
||||
|
||||
// iterate unreferenced blobs and count them + optionally send to the channel to be deleted
|
||||
log(ctx).Info("Looking for unreferenced blobs...")
|
||||
contentlog.Log(ctx, log, "Looking for unreferenced blobs...")
|
||||
|
||||
var prefixes []blob.ID
|
||||
if p := opt.Prefix; p != "" {
|
||||
@@ -89,19 +96,29 @@ func DeleteUnreferencedBlobs(ctx context.Context, rep repo.DirectRepositoryWrite
|
||||
// belong to alive sessions.
|
||||
if err := rep.ContentManager().IterateUnreferencedPacks(ctx, prefixes, opt.Parallel, func(bm blob.Metadata) error {
|
||||
if bm.Timestamp.After(cutoffTime) {
|
||||
log(ctx).Debugf(" preserving %v because it was created after maintenance started", bm.BlobID)
|
||||
contentlog.Log3(ctx, log,
|
||||
"preserving blob - after cutoff time",
|
||||
blobparam.BlobID("blobID", bm.BlobID),
|
||||
logparam.Time("cutoffTime", cutoffTime),
|
||||
logparam.Time("timestamp", bm.Timestamp))
|
||||
return nil
|
||||
}
|
||||
|
||||
if age := cutoffTime.Sub(bm.Timestamp); age < safety.BlobDeleteMinAge {
|
||||
log(ctx).Debugf(" preserving %v because it's too new (age: %v<%v)", bm.BlobID, age, safety.BlobDeleteMinAge)
|
||||
contentlog.Log2(ctx, log,
|
||||
"preserving blob - below min age",
|
||||
blobparam.BlobID("blobID", bm.BlobID),
|
||||
logparam.Duration("age", age))
|
||||
return nil
|
||||
}
|
||||
|
||||
sid := content.SessionIDFromBlobID(bm.BlobID)
|
||||
if s, ok := activeSessions[sid]; ok {
|
||||
if age := cutoffTime.Sub(s.CheckpointTime); age < safety.SessionExpirationAge {
|
||||
log(ctx).Debugf(" preserving %v because it's part of an active session (%v)", bm.BlobID, sid)
|
||||
contentlog.Log2(ctx, log,
|
||||
"preserving blob - part of active session",
|
||||
blobparam.BlobID("blobID", bm.BlobID),
|
||||
logparam.String("sessionID", string(sid)))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -120,7 +137,10 @@ func DeleteUnreferencedBlobs(ctx context.Context, rep repo.DirectRepositoryWrite
|
||||
close(unused)
|
||||
|
||||
unreferencedCount, unreferencedSize := unreferenced.Approximate()
|
||||
log(ctx).Debugf("Found %v blobs to delete (%v)", unreferencedCount, units.BytesString(unreferencedSize))
|
||||
contentlog.Log2(ctx, log,
|
||||
"Found blobs to delete",
|
||||
logparam.UInt32("count", unreferencedCount),
|
||||
logparam.Int64("bytes", unreferencedSize))
|
||||
|
||||
// wait for all delete workers to finish.
|
||||
if err := eg.Wait(); err != nil {
|
||||
@@ -133,7 +153,10 @@ func DeleteUnreferencedBlobs(ctx context.Context, rep repo.DirectRepositoryWrite
|
||||
|
||||
del, cnt := deleted.Approximate()
|
||||
|
||||
log(ctx).Infof("Deleted total %v unreferenced blobs (%v)", del, units.BytesString(cnt))
|
||||
contentlog.Log2(ctx, log,
|
||||
"Deleted total blobs",
|
||||
logparam.UInt32("count", del),
|
||||
logparam.Int64("bytes", cnt))
|
||||
|
||||
return int(del), nil
|
||||
}
|
||||
|
||||
@@ -9,6 +9,9 @@
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kopia/kopia/internal/blobparam"
|
||||
"github.com/kopia/kopia/internal/contentlog"
|
||||
"github.com/kopia/kopia/internal/contentlog/logparam"
|
||||
"github.com/kopia/kopia/repo"
|
||||
"github.com/kopia/kopia/repo/blob"
|
||||
"github.com/kopia/kopia/repo/format"
|
||||
@@ -26,6 +29,11 @@ type ExtendBlobRetentionTimeOptions struct {
|
||||
|
||||
// ExtendBlobRetentionTime extends the retention time of all relevant blobs managed by storage engine with Object Locking enabled.
|
||||
func ExtendBlobRetentionTime(ctx context.Context, rep repo.DirectRepositoryWriter, opt ExtendBlobRetentionTimeOptions) (int, error) {
|
||||
ctx = contentlog.WithParams(ctx,
|
||||
logparam.String("span:blob-retain", contentlog.RandomSpanID()))
|
||||
|
||||
log := rep.LogManager().NewLogger("maintenance-blob-retain")
|
||||
|
||||
const extendQueueSize = 100
|
||||
|
||||
var (
|
||||
@@ -47,7 +55,8 @@ func ExtendBlobRetentionTime(ctx context.Context, rep repo.DirectRepositoryWrite
|
||||
|
||||
if !blobCfg.IsRetentionEnabled() {
|
||||
// Blob retention is disabled
|
||||
log(ctx).Info("Object lock retention is disabled.")
|
||||
contentlog.Log(ctx, log, "Object lock retention is disabled.")
|
||||
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
@@ -67,7 +76,11 @@ func ExtendBlobRetentionTime(ctx context.Context, rep repo.DirectRepositoryWrite
|
||||
|
||||
for bm := range extend {
|
||||
if err1 := rep.BlobStorage().ExtendBlobRetention(ctx, bm.BlobID, extendOpts); err1 != nil {
|
||||
log(ctx).Errorf("Failed to extend blob %v: %v", bm.BlobID, err1)
|
||||
contentlog.Log2(ctx, log,
|
||||
"Failed to extend blob",
|
||||
blobparam.BlobID("blobID", bm.BlobID),
|
||||
logparam.Error("error", err1))
|
||||
|
||||
atomic.AddUint32(failedCnt, 1)
|
||||
|
||||
continue
|
||||
@@ -75,7 +88,7 @@ func ExtendBlobRetentionTime(ctx context.Context, rep repo.DirectRepositoryWrite
|
||||
|
||||
curCnt := atomic.AddUint32(cnt, 1)
|
||||
if curCnt%100 == 0 {
|
||||
log(ctx).Infof(" extended %v blobs", curCnt)
|
||||
contentlog.Log1(ctx, log, "extended blobs", logparam.UInt32("count", curCnt))
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -88,7 +101,7 @@ func ExtendBlobRetentionTime(ctx context.Context, rep repo.DirectRepositoryWrite
|
||||
}
|
||||
|
||||
// iterate all relevant (active, extendable) blobs and count them + optionally send to the channel to be extended
|
||||
log(ctx).Info("Extending retention time for blobs...")
|
||||
contentlog.Log(ctx, log, "Extending retention time for blobs...")
|
||||
|
||||
err = blob.IterateAllPrefixesInParallel(ctx, opt.Parallel, rep.BlobStorage(), prefixes, func(bm blob.Metadata) error {
|
||||
if !opt.DryRun {
|
||||
@@ -101,7 +114,7 @@ func ExtendBlobRetentionTime(ctx context.Context, rep repo.DirectRepositoryWrite
|
||||
})
|
||||
|
||||
close(extend)
|
||||
log(ctx).Infof("Found %v blobs to extend", *toExtend)
|
||||
contentlog.Log1(ctx, log, "Found blobs to extend", logparam.UInt32("count", *toExtend))
|
||||
|
||||
// wait for all extend workers to finish.
|
||||
wg.Wait()
|
||||
@@ -118,7 +131,7 @@ func ExtendBlobRetentionTime(ctx context.Context, rep repo.DirectRepositoryWrite
|
||||
return int(*toExtend), nil
|
||||
}
|
||||
|
||||
log(ctx).Infof("Extended total %v blobs", *cnt)
|
||||
contentlog.Log1(ctx, log, "Extended total blobs", logparam.UInt32("count", *cnt))
|
||||
|
||||
return int(*cnt), nil
|
||||
}
|
||||
@@ -130,7 +143,7 @@ func CheckExtendRetention(ctx context.Context, blobCfg format.BlobStorageConfigu
|
||||
}
|
||||
|
||||
if !p.FullCycle.Enabled {
|
||||
log(ctx).Warn("Object Lock extension will not function because Full-Maintenance is disabled")
|
||||
userLog(ctx).Warn("Object Lock extension will not function because Full-Maintenance is disabled")
|
||||
}
|
||||
|
||||
if blobCfg.RetentionPeriod > 0 && blobCfg.RetentionPeriod-p.FullCycle.Interval < minRetentionMaintenanceDiff {
|
||||
|
||||
@@ -8,7 +8,8 @@
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kopia/kopia/internal/clock"
|
||||
"github.com/kopia/kopia/internal/units"
|
||||
"github.com/kopia/kopia/internal/contentlog"
|
||||
"github.com/kopia/kopia/internal/contentlog/logparam"
|
||||
"github.com/kopia/kopia/repo"
|
||||
"github.com/kopia/kopia/repo/blob"
|
||||
)
|
||||
@@ -43,6 +44,11 @@ func defaultLogRetention() LogRetentionOptions {
|
||||
|
||||
// CleanupLogs deletes old logs blobs beyond certain age, total size or count.
|
||||
func CleanupLogs(ctx context.Context, rep repo.DirectRepositoryWriter, opt LogRetentionOptions) ([]blob.Metadata, error) {
|
||||
ctx = contentlog.WithParams(ctx,
|
||||
logparam.String("span:cleanup-logs", contentlog.RandomSpanID()))
|
||||
|
||||
log := rep.LogManager().NewLogger("maintenance-cleanup-logs")
|
||||
|
||||
if opt.TimeFunc == nil {
|
||||
opt.TimeFunc = clock.Now
|
||||
}
|
||||
@@ -82,7 +88,10 @@ func CleanupLogs(ctx context.Context, rep repo.DirectRepositoryWriter, opt LogRe
|
||||
|
||||
toDelete := allLogBlobs[deletePosition:]
|
||||
|
||||
log(ctx).Debugf("Keeping %v logs of total size %v", deletePosition, units.BytesString(totalSize))
|
||||
contentlog.Log2(ctx, log,
|
||||
"Keeping logs",
|
||||
logparam.Int("count", deletePosition),
|
||||
logparam.Int64("bytes", totalSize))
|
||||
|
||||
if !opt.DryRun {
|
||||
for _, bm := range toDelete {
|
||||
|
||||
@@ -42,7 +42,7 @@ func shouldRunContentIndexVerify(ctx context.Context) bool {
|
||||
|
||||
percentage, err := strconv.ParseFloat(v, 64)
|
||||
if err != nil {
|
||||
log(ctx).Warnf("The '%s' environment variable appears to have a non numeric value: '%q', %s", envName, v, err)
|
||||
userLog(ctx).Warnf("The '%s' environment variable appears to have a non numeric value: '%q', %s", envName, v, err)
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -9,7 +9,10 @@
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kopia/kopia/internal/units"
|
||||
"github.com/kopia/kopia/internal/blobparam"
|
||||
"github.com/kopia/kopia/internal/contentlog"
|
||||
"github.com/kopia/kopia/internal/contentlog/logparam"
|
||||
"github.com/kopia/kopia/internal/contentparam"
|
||||
"github.com/kopia/kopia/repo"
|
||||
"github.com/kopia/kopia/repo/blob"
|
||||
"github.com/kopia/kopia/repo/content"
|
||||
@@ -37,15 +40,22 @@ type contentInfoOrError struct {
|
||||
|
||||
// RewriteContents rewrites contents according to provided criteria and creates new
|
||||
// blobs and index entries to point at them.
|
||||
//
|
||||
//nolint:funlen
|
||||
func RewriteContents(ctx context.Context, rep repo.DirectRepositoryWriter, opt *RewriteContentsOptions, safety SafetyParameters) error {
|
||||
ctx = contentlog.WithParams(ctx,
|
||||
logparam.String("span:content-rewrite", contentlog.RandomSpanID()))
|
||||
|
||||
log := rep.LogManager().NewLogger("maintenance-content-rewrite")
|
||||
|
||||
if opt == nil {
|
||||
return errors.New("missing options")
|
||||
}
|
||||
|
||||
if opt.ShortPacks {
|
||||
log(ctx).Info("Rewriting contents from short packs...")
|
||||
contentlog.Log(ctx, log, "Rewriting contents from short packs...")
|
||||
} else {
|
||||
log(ctx).Info("Rewriting contents...")
|
||||
contentlog.Log(ctx, log, "Rewriting contents...")
|
||||
}
|
||||
|
||||
cnt := getContentToRewrite(ctx, rep, opt)
|
||||
@@ -77,18 +87,27 @@ func RewriteContents(ctx context.Context, rep repo.DirectRepositoryWriter, opt *
|
||||
return
|
||||
}
|
||||
|
||||
var optDeleted string
|
||||
if c.Deleted {
|
||||
optDeleted = " (deleted)"
|
||||
}
|
||||
|
||||
age := rep.Time().Sub(c.Timestamp())
|
||||
if age < safety.RewriteMinAge {
|
||||
log(ctx).Debugf("Not rewriting content %v (%v bytes) from pack %v%v %v, because it's too new.", c.ContentID, c.PackedLength, c.PackBlobID, optDeleted, age)
|
||||
contentlog.Log5(ctx, log,
|
||||
"Not rewriting content",
|
||||
contentparam.ContentID("contentID", c.ContentID),
|
||||
logparam.UInt32("bytes", c.PackedLength),
|
||||
blobparam.BlobID("packBlobID", c.PackBlobID),
|
||||
logparam.Bool("deleted", c.Deleted),
|
||||
logparam.Duration("age", age))
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
log(ctx).Debugf("Rewriting content %v (%v bytes) from pack %v%v %v", c.ContentID, c.PackedLength, c.PackBlobID, optDeleted, age)
|
||||
contentlog.Log5(ctx, log,
|
||||
"Rewriting content",
|
||||
contentparam.ContentID("contentID", c.ContentID),
|
||||
logparam.UInt32("bytes", c.PackedLength),
|
||||
blobparam.BlobID("packBlobID", c.PackBlobID),
|
||||
logparam.Bool("deleted", c.Deleted),
|
||||
logparam.Duration("age", age))
|
||||
|
||||
mu.Lock()
|
||||
totalBytes += int64(c.PackedLength)
|
||||
mu.Unlock()
|
||||
@@ -101,9 +120,16 @@ func RewriteContents(ctx context.Context, rep repo.DirectRepositoryWriter, opt *
|
||||
// provide option to ignore failures when rewriting deleted contents during maintenance
|
||||
// this is for advanced use only
|
||||
if os.Getenv("KOPIA_IGNORE_MAINTENANCE_REWRITE_ERROR") != "" && c.Deleted {
|
||||
log(ctx).Infof("IGNORED: unable to rewrite deleted content %q: %v", c.ContentID, err)
|
||||
contentlog.Log2(ctx, log,
|
||||
"IGNORED: unable to rewrite deleted content",
|
||||
contentparam.ContentID("contentID", c.ContentID),
|
||||
logparam.Error("error", err))
|
||||
} else {
|
||||
log(ctx).Infof("unable to rewrite content %q: %v", c.ContentID, err)
|
||||
contentlog.Log2(ctx, log,
|
||||
"unable to rewrite content",
|
||||
contentparam.ContentID("contentID", c.ContentID),
|
||||
logparam.Error("error", err))
|
||||
|
||||
mu.Lock()
|
||||
failedCount++
|
||||
mu.Unlock()
|
||||
@@ -115,7 +141,9 @@ func RewriteContents(ctx context.Context, rep repo.DirectRepositoryWriter, opt *
|
||||
|
||||
wg.Wait()
|
||||
|
||||
log(ctx).Infof("Total bytes rewritten %v", units.BytesString(totalBytes))
|
||||
contentlog.Log1(ctx, log,
|
||||
"Total bytes rewritten",
|
||||
logparam.Int64("bytes", totalBytes))
|
||||
|
||||
if failedCount == 0 {
|
||||
//nolint:wrapcheck
|
||||
|
||||
@@ -4,13 +4,20 @@
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/kopia/kopia/internal/contentlog"
|
||||
"github.com/kopia/kopia/internal/contentlog/logparam"
|
||||
"github.com/kopia/kopia/repo"
|
||||
"github.com/kopia/kopia/repo/content/indexblob"
|
||||
)
|
||||
|
||||
// dropDeletedContents rewrites indexes while dropping deleted contents above certain age.
|
||||
func dropDeletedContents(ctx context.Context, rep repo.DirectRepositoryWriter, dropDeletedBefore time.Time, safety SafetyParameters) error {
|
||||
log(ctx).Infof("Dropping contents deleted before %v", dropDeletedBefore)
|
||||
ctx = contentlog.WithParams(ctx,
|
||||
logparam.String("span:drop-deleted-contents", contentlog.RandomSpanID()))
|
||||
|
||||
log := rep.LogManager().NewLogger("maintenance-drop-deleted-contents")
|
||||
|
||||
contentlog.Log1(ctx, log, "Dropping deleted contents", logparam.Time("dropDeletedBefore", dropDeletedBefore))
|
||||
|
||||
//nolint:wrapcheck
|
||||
return rep.ContentManager().CompactIndexes(ctx, indexblob.CompactOptions{
|
||||
|
||||
@@ -3,13 +3,16 @@
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/kopia/kopia/internal/contentlog"
|
||||
"github.com/kopia/kopia/repo/content/indexblob"
|
||||
)
|
||||
|
||||
// runTaskIndexCompactionQuick rewrites index blobs to reduce their count but does not drop any contents.
|
||||
func runTaskIndexCompactionQuick(ctx context.Context, runParams RunParameters, s *Schedule, safety SafetyParameters) error {
|
||||
return reportRunAndMaybeCheckContentIndex(ctx, runParams.rep, TaskIndexCompaction, s, func() error {
|
||||
log(ctx).Info("Compacting indexes...")
|
||||
log := runParams.rep.LogManager().NewLogger("maintenance-index-compaction")
|
||||
|
||||
contentlog.Log(ctx, log, "Compacting indexes...")
|
||||
|
||||
const maxSmallBlobsForIndexCompaction = 8
|
||||
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kopia/kopia/internal/clock"
|
||||
"github.com/kopia/kopia/internal/contentlog"
|
||||
"github.com/kopia/kopia/internal/contentlog/logparam"
|
||||
"github.com/kopia/kopia/internal/epoch"
|
||||
"github.com/kopia/kopia/repo"
|
||||
"github.com/kopia/kopia/repo/content"
|
||||
@@ -17,7 +19,8 @@
|
||||
"github.com/kopia/kopia/repo/logging"
|
||||
)
|
||||
|
||||
var log = logging.Module("maintenance")
|
||||
// User-visible log output.
|
||||
var userLog = logging.Module("maintenance")
|
||||
|
||||
const maxClockSkew = 5 * time.Minute
|
||||
|
||||
@@ -56,7 +59,7 @@
|
||||
// shouldRun returns Mode if repository is due for periodic maintenance.
|
||||
func shouldRun(ctx context.Context, rep repo.DirectRepository, p *Params) (Mode, error) {
|
||||
if myUsername := rep.ClientOptions().UsernameAtHost(); p.Owner != myUsername {
|
||||
log(ctx).Debugf("maintenance owned by another user '%v'", p.Owner)
|
||||
userLog(ctx).Debugf("maintenance owned by another user '%v'", p.Owner)
|
||||
return ModeNone, nil
|
||||
}
|
||||
|
||||
@@ -68,25 +71,25 @@ func shouldRun(ctx context.Context, rep repo.DirectRepository, p *Params) (Mode,
|
||||
// check full cycle first, as it does more than the quick cycle
|
||||
if p.FullCycle.Enabled {
|
||||
if !rep.Time().Before(s.NextFullMaintenanceTime) {
|
||||
log(ctx).Debug("due for full maintenance cycle")
|
||||
userLog(ctx).Debug("due for full maintenance cycle")
|
||||
return ModeFull, nil
|
||||
}
|
||||
|
||||
log(ctx).Debugf("not due for full maintenance cycle until %v", s.NextFullMaintenanceTime)
|
||||
userLog(ctx).Debugf("not due for full maintenance cycle until %v", s.NextFullMaintenanceTime)
|
||||
} else {
|
||||
log(ctx).Debug("full maintenance cycle not enabled")
|
||||
userLog(ctx).Debug("full maintenance cycle not enabled")
|
||||
}
|
||||
|
||||
// no time for full cycle, check quick cycle
|
||||
if p.QuickCycle.Enabled {
|
||||
if !rep.Time().Before(s.NextQuickMaintenanceTime) {
|
||||
log(ctx).Debug("due for quick maintenance cycle")
|
||||
userLog(ctx).Debug("due for quick maintenance cycle")
|
||||
return ModeQuick, nil
|
||||
}
|
||||
|
||||
log(ctx).Debugf("not due for quick maintenance cycle until %v", s.NextQuickMaintenanceTime)
|
||||
userLog(ctx).Debugf("not due for quick maintenance cycle until %v", s.NextQuickMaintenanceTime)
|
||||
} else {
|
||||
log(ctx).Debug("quick maintenance cycle not enabled")
|
||||
userLog(ctx).Debug("quick maintenance cycle not enabled")
|
||||
}
|
||||
|
||||
return ModeNone, nil
|
||||
@@ -106,13 +109,13 @@ func updateSchedule(ctx context.Context, runParams RunParameters) error {
|
||||
// on full cycle, also update the quick cycle
|
||||
s.NextFullMaintenanceTime = rep.Time().Add(p.FullCycle.Interval)
|
||||
s.NextQuickMaintenanceTime = rep.Time().Add(p.QuickCycle.Interval)
|
||||
log(ctx).Debugf("scheduling next full cycle at %v", s.NextFullMaintenanceTime)
|
||||
log(ctx).Debugf("scheduling next quick cycle at %v", s.NextQuickMaintenanceTime)
|
||||
userLog(ctx).Debugf("scheduling next full cycle at %v", s.NextFullMaintenanceTime)
|
||||
userLog(ctx).Debugf("scheduling next quick cycle at %v", s.NextQuickMaintenanceTime)
|
||||
|
||||
return SetSchedule(ctx, rep, s)
|
||||
|
||||
case ModeQuick:
|
||||
log(ctx).Debugf("scheduling next quick cycle at %v", s.NextQuickMaintenanceTime)
|
||||
userLog(ctx).Debugf("scheduling next quick cycle at %v", s.NextQuickMaintenanceTime)
|
||||
s.NextQuickMaintenanceTime = rep.Time().Add(p.QuickCycle.Interval)
|
||||
|
||||
return SetSchedule(ctx, rep, s)
|
||||
@@ -149,9 +152,10 @@ func (e NotOwnedError) Error() string {
|
||||
// lock can be acquired. Lock is passed to the function, which ensures that every call to Run()
|
||||
// is within the exclusive context.
|
||||
func RunExclusive(ctx context.Context, rep repo.DirectRepositoryWriter, mode Mode, force bool, cb func(ctx context.Context, runParams RunParameters) error) error {
|
||||
rep.DisableIndexRefresh()
|
||||
ctx = contentlog.WithParams(ctx,
|
||||
logparam.String("span:maintenance", contentlog.RandomSpanID()))
|
||||
|
||||
ctx = rep.AlsoLogToContentLog(ctx)
|
||||
rep.DisableIndexRefresh()
|
||||
|
||||
p, err := GetParams(ctx, rep)
|
||||
if err != nil {
|
||||
@@ -170,12 +174,12 @@ func RunExclusive(ctx context.Context, rep repo.DirectRepositoryWriter, mode Mod
|
||||
}
|
||||
|
||||
if mode == ModeNone {
|
||||
log(ctx).Debug("not due for maintenance")
|
||||
userLog(ctx).Debug("not due for maintenance")
|
||||
return nil
|
||||
}
|
||||
|
||||
lockFile := rep.ConfigFilename() + ".mlock"
|
||||
log(ctx).Debugf("Acquiring maintenance lock in file %v", lockFile)
|
||||
userLog(ctx).Debugf("Acquiring maintenance lock in file %v", lockFile)
|
||||
|
||||
// acquire local lock on a config file
|
||||
l := flock.New(lockFile)
|
||||
@@ -186,7 +190,7 @@ func RunExclusive(ctx context.Context, rep repo.DirectRepositoryWriter, mode Mod
|
||||
}
|
||||
|
||||
if !ok {
|
||||
log(ctx).Debug("maintenance is already in progress locally")
|
||||
userLog(ctx).Debug("maintenance is already in progress locally")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -211,8 +215,8 @@ func RunExclusive(ctx context.Context, rep repo.DirectRepositoryWriter, mode Mod
|
||||
return errors.Wrap(err, "error checking for clock skew")
|
||||
}
|
||||
|
||||
log(ctx).Infof("Running %v maintenance...", runParams.Mode)
|
||||
defer log(ctx).Infof("Finished %v maintenance.", runParams.Mode)
|
||||
userLog(ctx).Infof("Running %v maintenance...", runParams.Mode)
|
||||
defer userLog(ctx).Infof("Finished %v maintenance.", runParams.Mode)
|
||||
|
||||
if err := runParams.rep.Refresh(ctx); err != nil {
|
||||
return errors.Wrap(err, "error refreshing indexes before maintenance")
|
||||
@@ -252,6 +256,8 @@ func Run(ctx context.Context, runParams RunParameters, safety SafetyParameters)
|
||||
}
|
||||
|
||||
func runQuickMaintenance(ctx context.Context, runParams RunParameters, safety SafetyParameters) error {
|
||||
log := runParams.rep.LogManager().NewLogger("maintenance-quick")
|
||||
|
||||
s, err := GetSchedule(ctx, runParams.rep)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to get schedule")
|
||||
@@ -259,7 +265,7 @@ func runQuickMaintenance(ctx context.Context, runParams RunParameters, safety Sa
|
||||
|
||||
em, ok, emerr := runParams.rep.ContentManager().EpochManager(ctx)
|
||||
if ok {
|
||||
log(ctx).Debug("running quick epoch maintenance only")
|
||||
userLog(ctx).Debug("running quick epoch maintenance only")
|
||||
|
||||
return runTaskEpochMaintenanceQuick(ctx, em, runParams, s)
|
||||
}
|
||||
@@ -275,7 +281,7 @@ func runQuickMaintenance(ctx context.Context, runParams RunParameters, safety Sa
|
||||
return errors.Wrap(err, "error rewriting metadata contents")
|
||||
}
|
||||
} else {
|
||||
notRewritingContents(ctx)
|
||||
notRewritingContents(ctx, log)
|
||||
}
|
||||
|
||||
if shouldDeleteOrphanedPacks(runParams.rep.Time(), s, safety) {
|
||||
@@ -286,10 +292,10 @@ func runQuickMaintenance(ctx context.Context, runParams RunParameters, safety Sa
|
||||
// running full orphaned blob deletion, otherwise next quick maintenance will start a quick rewrite
|
||||
// and we'd never delete blobs orphaned by full rewrite.
|
||||
if hadRecentFullRewrite(s) {
|
||||
log(ctx).Debug("Had recent full rewrite - performing full blob deletion.")
|
||||
userLog(ctx).Debug("Had recent full rewrite - performing full blob deletion.")
|
||||
err = runTaskDeleteOrphanedBlobsFull(ctx, runParams, s, safety)
|
||||
} else {
|
||||
log(ctx).Debug("Performing quick blob deletion.")
|
||||
userLog(ctx).Debug("Performing quick blob deletion.")
|
||||
err = runTaskDeleteOrphanedBlobsQuick(ctx, runParams, s, safety)
|
||||
}
|
||||
|
||||
@@ -297,37 +303,37 @@ func runQuickMaintenance(ctx context.Context, runParams RunParameters, safety Sa
|
||||
return errors.Wrap(err, "error deleting unreferenced metadata blobs")
|
||||
}
|
||||
} else {
|
||||
notDeletingOrphanedBlobs(ctx, s, safety)
|
||||
notDeletingOrphanedBlobs(ctx, log, s, safety)
|
||||
}
|
||||
|
||||
// consolidate many smaller indexes into fewer larger ones.
|
||||
if err := runTaskIndexCompactionQuick(ctx, runParams, s, safety); err != nil {
|
||||
if err := runTaskIndexCompactionQuick(contentlog.WithParams(ctx, logparam.String("span:index-compaction", contentlog.RandomSpanID())), runParams, s, safety); err != nil {
|
||||
return errors.Wrap(err, "error performing index compaction")
|
||||
}
|
||||
|
||||
// clean up logs last
|
||||
if err := runTaskCleanupLogs(ctx, runParams, s); err != nil {
|
||||
if err := runTaskCleanupLogs(contentlog.WithParams(ctx, logparam.String("span:cleanup-logs", contentlog.RandomSpanID())), runParams, s); err != nil {
|
||||
return errors.Wrap(err, "error cleaning up logs")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func notRewritingContents(ctx context.Context) {
|
||||
log(ctx).Info("Previous content rewrite has not been finalized yet, waiting until the next blob deletion.")
|
||||
func notRewritingContents(ctx context.Context, log *contentlog.Logger) {
|
||||
contentlog.Log(ctx, log, "Previous content rewrite has not been finalized yet, waiting until the next blob deletion.")
|
||||
}
|
||||
|
||||
func notDeletingOrphanedBlobs(ctx context.Context, s *Schedule, safety SafetyParameters) {
|
||||
func notDeletingOrphanedBlobs(ctx context.Context, log *contentlog.Logger, s *Schedule, safety SafetyParameters) {
|
||||
left := nextBlobDeleteTime(s, safety).Sub(clock.Now()).Truncate(time.Second)
|
||||
|
||||
log(ctx).Infof("Skipping blob deletion because not enough time has passed yet (%v left).", left)
|
||||
contentlog.Log1(ctx, log, "Skipping blob deletion because not enough time has passed yet", logparam.Duration("left", left))
|
||||
}
|
||||
|
||||
func runTaskCleanupLogs(ctx context.Context, runParams RunParameters, s *Schedule) error {
|
||||
return ReportRun(ctx, runParams.rep, TaskCleanupLogs, s, func() error {
|
||||
deleted, err := CleanupLogs(ctx, runParams.rep, runParams.Params.LogRetention.OrDefault())
|
||||
|
||||
log(ctx).Infof("Cleaned up %v logs.", len(deleted))
|
||||
userLog(ctx).Infof("Cleaned up %v logs.", len(deleted))
|
||||
|
||||
return err
|
||||
})
|
||||
@@ -335,14 +341,14 @@ func runTaskCleanupLogs(ctx context.Context, runParams RunParameters, s *Schedul
|
||||
|
||||
func runTaskEpochAdvance(ctx context.Context, em *epoch.Manager, runParams RunParameters, s *Schedule) error {
|
||||
return reportRunAndMaybeCheckContentIndex(ctx, runParams.rep, TaskEpochAdvance, s, func() error {
|
||||
log(ctx).Info("Cleaning up no-longer-needed epoch markers...")
|
||||
userLog(ctx).Info("Cleaning up no-longer-needed epoch markers...")
|
||||
return errors.Wrap(em.MaybeAdvanceWriteEpoch(ctx), "error advancing epoch marker")
|
||||
})
|
||||
}
|
||||
|
||||
func runTaskEpochMaintenanceQuick(ctx context.Context, em *epoch.Manager, runParams RunParameters, s *Schedule) error {
|
||||
err := reportRunAndMaybeCheckContentIndex(ctx, runParams.rep, TaskEpochCompactSingle, s, func() error {
|
||||
log(ctx).Info("Compacting an eligible uncompacted epoch...")
|
||||
userLog(ctx).Info("Compacting an eligible uncompacted epoch...")
|
||||
return errors.Wrap(em.MaybeCompactSingleEpoch(ctx), "error compacting single epoch")
|
||||
})
|
||||
if err != nil {
|
||||
@@ -366,7 +372,7 @@ func runTaskEpochMaintenanceFull(ctx context.Context, runParams RunParameters, s
|
||||
|
||||
// compact a single epoch
|
||||
if err := reportRunAndMaybeCheckContentIndex(ctx, runParams.rep, TaskEpochCompactSingle, s, func() error {
|
||||
log(ctx).Info("Compacting an eligible uncompacted epoch...")
|
||||
userLog(ctx).Info("Compacting an eligible uncompacted epoch...")
|
||||
return errors.Wrap(em.MaybeCompactSingleEpoch(ctx), "error compacting single epoch")
|
||||
}); err != nil {
|
||||
return err
|
||||
@@ -378,7 +384,7 @@ func runTaskEpochMaintenanceFull(ctx context.Context, runParams RunParameters, s
|
||||
|
||||
// compact range
|
||||
if err := reportRunAndMaybeCheckContentIndex(ctx, runParams.rep, TaskEpochGenerateRange, s, func() error {
|
||||
log(ctx).Info("Attempting to compact a range of epoch indexes ...")
|
||||
userLog(ctx).Info("Attempting to compact a range of epoch indexes ...")
|
||||
|
||||
return errors.Wrap(em.MaybeGenerateRangeCheckpoint(ctx), "error creating epoch range indexes")
|
||||
}); err != nil {
|
||||
@@ -387,7 +393,7 @@ func runTaskEpochMaintenanceFull(ctx context.Context, runParams RunParameters, s
|
||||
|
||||
// clean up epoch markers
|
||||
err := ReportRun(ctx, runParams.rep, TaskEpochCleanupMarkers, s, func() error {
|
||||
log(ctx).Info("Cleaning up unneeded epoch markers...")
|
||||
userLog(ctx).Info("Cleaning up unneeded epoch markers...")
|
||||
|
||||
return errors.Wrap(em.CleanupMarkers(ctx), "error removing epoch markers")
|
||||
})
|
||||
@@ -396,7 +402,7 @@ func runTaskEpochMaintenanceFull(ctx context.Context, runParams RunParameters, s
|
||||
}
|
||||
|
||||
return reportRunAndMaybeCheckContentIndex(ctx, runParams.rep, TaskEpochDeleteSupersededIndexes, s, func() error {
|
||||
log(ctx).Info("Cleaning up old index blobs which have already been compacted...")
|
||||
userLog(ctx).Info("Cleaning up old index blobs which have already been compacted...")
|
||||
return errors.Wrap(em.CleanupSupersededIndexes(ctx), "error removing superseded epoch index blobs")
|
||||
})
|
||||
}
|
||||
@@ -404,6 +410,8 @@ func runTaskEpochMaintenanceFull(ctx context.Context, runParams RunParameters, s
|
||||
func runTaskDropDeletedContentsFull(ctx context.Context, runParams RunParameters, s *Schedule, safety SafetyParameters) error {
|
||||
var safeDropTime time.Time
|
||||
|
||||
log := runParams.rep.LogManager().NewLogger("maintenance-drop-deleted-contents")
|
||||
|
||||
if safety.RequireTwoGCCycles {
|
||||
safeDropTime = findSafeDropTime(s.Runs[TaskSnapshotGarbageCollection], safety)
|
||||
} else {
|
||||
@@ -411,11 +419,12 @@ func runTaskDropDeletedContentsFull(ctx context.Context, runParams RunParameters
|
||||
}
|
||||
|
||||
if safeDropTime.IsZero() {
|
||||
log(ctx).Info("Not forgetting deleted contents yet since not enough time has passed since previous successful Snapshot GC. Will try again next time.")
|
||||
contentlog.Log(ctx, log,
|
||||
"Not forgetting deleted contents yet since not enough time has passed since previous successful Snapshot GC. Will try again next time.")
|
||||
return nil
|
||||
}
|
||||
|
||||
log(ctx).Infof("Found safe time to drop indexes: %v", safeDropTime)
|
||||
contentlog.Log1(ctx, log, "Found safe time to drop indexes", logparam.Time("safeDropTime", safeDropTime))
|
||||
|
||||
return reportRunAndMaybeCheckContentIndex(ctx, runParams.rep, TaskDropDeletedContentsFull, s, func() error {
|
||||
return dropDeletedContents(ctx, runParams.rep, safeDropTime, safety)
|
||||
@@ -472,6 +481,8 @@ func runTaskExtendBlobRetentionTimeFull(ctx context.Context, runParams RunParame
|
||||
}
|
||||
|
||||
func runFullMaintenance(ctx context.Context, runParams RunParameters, safety SafetyParameters) error {
|
||||
log := runParams.rep.LogManager().NewLogger("maintenance-full")
|
||||
|
||||
s, err := GetSchedule(ctx, runParams.rep)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to get schedule")
|
||||
@@ -484,7 +495,7 @@ func runFullMaintenance(ctx context.Context, runParams RunParameters, safety Saf
|
||||
return errors.Wrap(err, "error rewriting contents in short packs")
|
||||
}
|
||||
} else {
|
||||
notRewritingContents(ctx)
|
||||
notRewritingContents(ctx, log)
|
||||
}
|
||||
|
||||
// rewrite indexes by dropping content entries that have been marked
|
||||
@@ -499,7 +510,7 @@ func runFullMaintenance(ctx context.Context, runParams RunParameters, safety Saf
|
||||
return errors.Wrap(err, "error deleting unreferenced blobs")
|
||||
}
|
||||
} else {
|
||||
notDeletingOrphanedBlobs(ctx, s, safety)
|
||||
notDeletingOrphanedBlobs(ctx, log, s, safety)
|
||||
}
|
||||
|
||||
// extend retention-time on supported storage.
|
||||
@@ -508,7 +519,7 @@ func runFullMaintenance(ctx context.Context, runParams RunParameters, safety Saf
|
||||
return errors.Wrap(err, "error extending object lock retention time")
|
||||
}
|
||||
} else {
|
||||
log(ctx).Debug("Extending object lock retention-period is disabled.")
|
||||
userLog(ctx).Debug("Extending object lock retention-period is disabled.")
|
||||
}
|
||||
|
||||
if err := runTaskEpochMaintenanceFull(ctx, runParams, s); err != nil {
|
||||
|
||||
@@ -212,7 +212,7 @@ func ReportRun(ctx context.Context, rep repo.DirectRepositoryWriter, taskType Ta
|
||||
s.ReportRun(taskType, ri)
|
||||
|
||||
if err := SetSchedule(ctx, rep, s); err != nil {
|
||||
log(ctx).Errorf("unable to report run: %v", err)
|
||||
userLog(ctx).Errorf("unable to report run: %v", err)
|
||||
}
|
||||
|
||||
return runErr
|
||||
|
||||
31
repo/open.go
31
repo/open.go
@@ -3,6 +3,7 @@
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -14,6 +15,8 @@
|
||||
|
||||
"github.com/kopia/kopia/internal/cache"
|
||||
"github.com/kopia/kopia/internal/cacheprot"
|
||||
"github.com/kopia/kopia/internal/contentlog"
|
||||
"github.com/kopia/kopia/internal/contentlog/logparam"
|
||||
"github.com/kopia/kopia/internal/crypto"
|
||||
"github.com/kopia/kopia/internal/feature"
|
||||
"github.com/kopia/kopia/internal/metrics"
|
||||
@@ -66,12 +69,13 @@
|
||||
|
||||
// Options provides configuration parameters for connection to a repository.
|
||||
type Options struct {
|
||||
TraceStorage bool // Logs all storage access using provided Printf-style function
|
||||
TimeNowFunc func() time.Time // Time provider
|
||||
DisableInternalLog bool // Disable internal log
|
||||
UpgradeOwnerID string // Owner-ID of any upgrade in progress, when this is not set the access may be restricted
|
||||
DoNotWaitForUpgrade bool // Disable the exponential forever backoff on an upgrade lock.
|
||||
BeforeFlush []RepositoryWriterCallback // list of callbacks to invoke before every flush
|
||||
TraceStorage bool // Logs all storage access using provided Printf-style function
|
||||
ContentLogWriter io.Writer // Writer to which the content log is also written
|
||||
TimeNowFunc func() time.Time // Time provider
|
||||
DisableRepositoryLog bool // Disable repository log manager
|
||||
UpgradeOwnerID string // Owner-ID of any upgrade in progress, when this is not set the access may be restricted
|
||||
DoNotWaitForUpgrade bool // Disable the exponential forever backoff on an upgrade lock.
|
||||
BeforeFlush []RepositoryWriterCallback // list of callbacks to invoke before every flush
|
||||
|
||||
OnFatalError func(err error) // function to invoke when repository encounters a fatal error, usually invokes os.Exit
|
||||
|
||||
@@ -217,10 +221,6 @@ func openDirect(ctx context.Context, configFile string, lc *LocalConfig, passwor
|
||||
return nil, errors.Wrap(err, "cannot open storage")
|
||||
}
|
||||
|
||||
if options.TraceStorage {
|
||||
st = loggingwrapper.NewWrapper(st, log(ctx), "[STORAGE] ")
|
||||
}
|
||||
|
||||
if lc.ReadOnly {
|
||||
st = readonly.NewWrapper(st)
|
||||
}
|
||||
@@ -243,7 +243,6 @@ func openWithConfig(ctx context.Context, st blob.Storage, cliOpts ClientOptions,
|
||||
cacheOpts = cacheOpts.CloneOrDefault()
|
||||
cmOpts := &content.ManagerOptions{
|
||||
TimeNow: defaultTime(options.TimeNowFunc),
|
||||
DisableInternalLog: options.DisableInternalLog,
|
||||
PermissiveCacheLoading: cliOpts.PermissiveCacheLoading,
|
||||
}
|
||||
|
||||
@@ -323,7 +322,14 @@ func openWithConfig(ctx context.Context, st blob.Storage, cliOpts ClientOptions,
|
||||
}
|
||||
|
||||
dw := repodiag.NewWriter(st, fmgr)
|
||||
logManager := repodiag.NewLogManager(ctx, dw)
|
||||
|
||||
logManager := repodiag.NewLogManager(ctx, dw, options.DisableRepositoryLog, options.ContentLogWriter,
|
||||
logparam.String("span:client", contentlog.HashSpanID(cliOpts.UsernameAtHost())),
|
||||
logparam.String("span:repo", contentlog.RandomSpanID()))
|
||||
|
||||
if options.TraceStorage {
|
||||
st = loggingwrapper.NewWrapper(st, log(ctx), logManager.NewLogger("storage"), "[STORAGE] ")
|
||||
}
|
||||
|
||||
scm, ferr := content.NewSharedManager(ctx, st, fmgr, cacheOpts, cmOpts, logManager, mr)
|
||||
if ferr != nil {
|
||||
@@ -369,6 +375,7 @@ func openWithConfig(ctx context.Context, st blob.Storage, cliOpts ClientOptions,
|
||||
metricsRegistry: mr,
|
||||
refCountedCloser: closer,
|
||||
beforeFlush: options.BeforeFlush,
|
||||
logManager: logManager,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
"github.com/kopia/kopia/internal/crypto"
|
||||
"github.com/kopia/kopia/internal/grpcapi"
|
||||
"github.com/kopia/kopia/internal/metrics"
|
||||
"github.com/kopia/kopia/internal/repodiag"
|
||||
"github.com/kopia/kopia/repo/blob"
|
||||
"github.com/kopia/kopia/repo/blob/throttling"
|
||||
"github.com/kopia/kopia/repo/compression"
|
||||
@@ -81,13 +82,13 @@ type DirectRepository interface {
|
||||
ContentReader() content.Reader
|
||||
IndexBlobs(ctx context.Context, includeInactive bool) ([]indexblob.Metadata, error)
|
||||
NewDirectWriter(ctx context.Context, opt WriteSessionOptions) (context.Context, DirectRepositoryWriter, error)
|
||||
AlsoLogToContentLog(ctx context.Context) context.Context
|
||||
UniqueID() []byte
|
||||
ConfigFilename() string
|
||||
DeriveKey(purpose []byte, keyLength int) ([]byte, error)
|
||||
Token(password string) (string, error)
|
||||
Throttler() throttling.SettableThrottler
|
||||
DisableIndexRefresh()
|
||||
LogManager() *repodiag.LogManager
|
||||
}
|
||||
|
||||
// DirectRepositoryWriter provides low-level write access to the repository.
|
||||
@@ -108,6 +109,7 @@ type immutableDirectRepositoryParameters struct {
|
||||
throttler throttling.SettableThrottler
|
||||
metricsRegistry *metrics.Registry
|
||||
beforeFlush []RepositoryWriterCallback
|
||||
logManager *repodiag.LogManager
|
||||
|
||||
*refCountedCloser
|
||||
}
|
||||
@@ -206,6 +208,11 @@ func (r *directRepository) DisableIndexRefresh() {
|
||||
r.cmgr.DisableIndexRefresh()
|
||||
}
|
||||
|
||||
// LogManager returns the log manager.
|
||||
func (r *directRepository) LogManager() *repodiag.LogManager {
|
||||
return r.logManager
|
||||
}
|
||||
|
||||
// OpenObject opens the reader for a given object, returns object.ErrNotFound.
|
||||
func (r *directRepository) OpenObject(ctx context.Context, id object.ID) (object.Reader, error) {
|
||||
//nolint:wrapcheck
|
||||
@@ -275,11 +282,6 @@ func (r *directRepository) UpdateDescription(d string) {
|
||||
r.cliOpts.Description = d
|
||||
}
|
||||
|
||||
// AlsoLogToContentLog returns a context that causes all logs to also be sent to content log.
|
||||
func (r *directRepository) AlsoLogToContentLog(ctx context.Context) context.Context {
|
||||
return r.sm.AlsoLogToContentLog(ctx)
|
||||
}
|
||||
|
||||
// NewWriter returns new RepositoryWriter session for repository.
|
||||
func (r *directRepository) NewWriter(ctx context.Context, opt WriteSessionOptions) (context.Context, RepositoryWriter, error) {
|
||||
return r.NewDirectWriter(ctx, opt)
|
||||
|
||||
@@ -9,6 +9,9 @@
|
||||
|
||||
"github.com/kopia/kopia/fs"
|
||||
"github.com/kopia/kopia/internal/bigmap"
|
||||
"github.com/kopia/kopia/internal/contentlog"
|
||||
"github.com/kopia/kopia/internal/contentlog/logparam"
|
||||
"github.com/kopia/kopia/internal/contentparam"
|
||||
"github.com/kopia/kopia/internal/stats"
|
||||
"github.com/kopia/kopia/internal/units"
|
||||
"github.com/kopia/kopia/repo"
|
||||
@@ -21,9 +24,10 @@
|
||||
"github.com/kopia/kopia/snapshot/snapshotfs"
|
||||
)
|
||||
|
||||
var log = logging.Module("snapshotgc")
|
||||
// User-visible log output.
|
||||
var userLog = logging.Module("snapshotgc")
|
||||
|
||||
func findInUseContentIDs(ctx context.Context, rep repo.Repository, used *bigmap.Set) error {
|
||||
func findInUseContentIDs(ctx context.Context, log *contentlog.Logger, rep repo.Repository, used *bigmap.Set) error {
|
||||
ids, err := snapshot.ListSnapshotManifests(ctx, rep, nil, nil)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to list snapshot manifest IDs")
|
||||
@@ -56,7 +60,7 @@ func findInUseContentIDs(ctx context.Context, rep repo.Repository, used *bigmap.
|
||||
|
||||
defer w.Close(ctx)
|
||||
|
||||
log(ctx).Info("Looking for active contents...")
|
||||
contentlog.Log(ctx, log, "Looking for active contents...")
|
||||
|
||||
for _, m := range manifests {
|
||||
root, err := snapshotfs.SnapshotRoot(rep, m)
|
||||
@@ -82,21 +86,28 @@ func Run(ctx context.Context, rep repo.DirectRepositoryWriter, gcDelete bool, sa
|
||||
}
|
||||
|
||||
func runInternal(ctx context.Context, rep repo.DirectRepositoryWriter, gcDelete bool, safety maintenance.SafetyParameters, maintenanceStartTime time.Time) error {
|
||||
ctx = contentlog.WithParams(ctx,
|
||||
logparam.String("span:snapshot-gc", contentlog.RandomSpanID()))
|
||||
|
||||
log := rep.LogManager().NewLogger("maintenance-snapshot-gc")
|
||||
|
||||
used, serr := bigmap.NewSet(ctx)
|
||||
if serr != nil {
|
||||
return errors.Wrap(serr, "unable to create new set")
|
||||
}
|
||||
defer used.Close(ctx)
|
||||
|
||||
if err := findInUseContentIDs(ctx, rep, used); err != nil {
|
||||
if err := findInUseContentIDs(ctx, log, rep, used); err != nil {
|
||||
return errors.Wrap(err, "unable to find in-use content ID")
|
||||
}
|
||||
|
||||
return findUnreferencedAndRepairRereferenced(ctx, rep, gcDelete, safety, maintenanceStartTime, used)
|
||||
return findUnreferencedAndRepairRereferenced(ctx, log, rep, gcDelete, safety, maintenanceStartTime, used)
|
||||
}
|
||||
|
||||
//nolint:funlen
|
||||
func findUnreferencedAndRepairRereferenced(
|
||||
ctx context.Context,
|
||||
log *contentlog.Logger,
|
||||
rep repo.DirectRepositoryWriter,
|
||||
gcDelete bool,
|
||||
safety maintenance.SafetyParameters,
|
||||
@@ -105,7 +116,7 @@ func findUnreferencedAndRepairRereferenced(
|
||||
) error {
|
||||
var unused, inUse, system, tooRecent, undeleted stats.CountSum
|
||||
|
||||
log(ctx).Info("Looking for unreferenced contents...")
|
||||
contentlog.Log(ctx, log, "Looking for unreferenced contents...")
|
||||
|
||||
// Ensure that the iteration includes deleted contents, so those can be
|
||||
// undeleted (recovered).
|
||||
@@ -132,13 +143,22 @@ func findUnreferencedAndRepairRereferenced(
|
||||
}
|
||||
|
||||
if maintenanceStartTime.Sub(ci.Timestamp()) < safety.MinContentAgeSubjectToGC {
|
||||
log(ctx).Debugf("recent unreferenced content %v (%v bytes, modified %v)", ci.ContentID, ci.PackedLength, ci.Timestamp())
|
||||
contentlog.Log3(ctx, log,
|
||||
"recent unreferenced content",
|
||||
contentparam.ContentID("contentID", ci.ContentID),
|
||||
logparam.Int64("bytes", int64(ci.PackedLength)),
|
||||
logparam.Time("modified", ci.Timestamp()))
|
||||
tooRecent.Add(int64(ci.PackedLength))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
log(ctx).Debugf("unreferenced %v (%v bytes, modified %v)", ci.ContentID, ci.PackedLength, ci.Timestamp())
|
||||
contentlog.Log3(ctx, log,
|
||||
"unreferenced content",
|
||||
contentparam.ContentID("contentID", ci.ContentID),
|
||||
logparam.Int64("bytes", int64(ci.PackedLength)),
|
||||
logparam.Time("modified", ci.Timestamp()))
|
||||
|
||||
cnt, totalSize := unused.Add(int64(ci.PackedLength))
|
||||
|
||||
if gcDelete {
|
||||
@@ -148,7 +168,10 @@ func findUnreferencedAndRepairRereferenced(
|
||||
}
|
||||
|
||||
if cnt%100000 == 0 {
|
||||
log(ctx).Infof("... found %v unused contents so far (%v bytes)", cnt, units.BytesString(totalSize))
|
||||
contentlog.Log2(ctx, log,
|
||||
"found unused contents so far",
|
||||
logparam.UInt32("count", cnt),
|
||||
logparam.Int64("bytes", totalSize))
|
||||
|
||||
if gcDelete {
|
||||
if err := rep.Flush(ctx); err != nil {
|
||||
@@ -160,17 +183,38 @@ func findUnreferencedAndRepairRereferenced(
|
||||
return nil
|
||||
})
|
||||
|
||||
unusedCount, unusedBytes := toCountAndBytesString(&unused)
|
||||
inUseCount, inUseBytes := toCountAndBytesString(&inUse)
|
||||
systemCount, systemBytes := toCountAndBytesString(&system)
|
||||
tooRecentCount, tooRecentBytes := toCountAndBytesString(&tooRecent)
|
||||
undeletedCount, undeletedBytes := toCountAndBytesString(&undeleted)
|
||||
unusedCount, unusedBytes := unused.Approximate()
|
||||
inUseCount, inUseBytes := inUse.Approximate()
|
||||
systemCount, systemBytes := system.Approximate()
|
||||
tooRecentCount, tooRecentBytes := tooRecent.Approximate()
|
||||
undeletedCount, undeletedBytes := undeleted.Approximate()
|
||||
|
||||
log(ctx).Infof("GC found %v unused contents (%v)", unusedCount, unusedBytes)
|
||||
log(ctx).Infof("GC found %v unused contents that are too recent to delete (%v)", tooRecentCount, tooRecentBytes)
|
||||
log(ctx).Infof("GC found %v in-use contents (%v)", inUseCount, inUseBytes)
|
||||
log(ctx).Infof("GC found %v in-use system-contents (%v)", systemCount, systemBytes)
|
||||
log(ctx).Infof("GC undeleted %v contents (%v)", undeletedCount, undeletedBytes)
|
||||
userLog(ctx).Infof("GC found %v unused contents (%v)", unusedCount, units.BytesString(unusedBytes))
|
||||
userLog(ctx).Infof("GC found %v unused contents that are too recent to delete (%v)", tooRecentCount, units.BytesString(tooRecentBytes))
|
||||
userLog(ctx).Infof("GC found %v in-use contents (%v)", inUseCount, units.BytesString(inUseBytes))
|
||||
userLog(ctx).Infof("GC found %v in-use system-contents (%v)", systemCount, units.BytesString(systemBytes))
|
||||
userLog(ctx).Infof("GC undeleted %v contents (%v)", undeletedCount, units.BytesString(undeletedBytes))
|
||||
|
||||
contentlog.Log2(ctx, log,
|
||||
"GC found unused contents",
|
||||
logparam.UInt32("count", unusedCount),
|
||||
logparam.Int64("bytes", unusedBytes))
|
||||
contentlog.Log2(ctx, log,
|
||||
"GC found unused contents that are too recent to delete",
|
||||
logparam.UInt32("count", tooRecentCount),
|
||||
logparam.Int64("bytes", tooRecentBytes))
|
||||
contentlog.Log2(ctx, log,
|
||||
"GC found in-use contents",
|
||||
logparam.UInt32("count", inUseCount),
|
||||
logparam.Int64("bytes", inUseBytes))
|
||||
contentlog.Log2(ctx, log,
|
||||
"GC found in-use system-contents",
|
||||
logparam.UInt32("count", systemCount),
|
||||
logparam.Int64("bytes", systemBytes))
|
||||
contentlog.Log2(ctx, log,
|
||||
"GC undeleted contents",
|
||||
logparam.UInt32("count", undeletedCount),
|
||||
logparam.Int64("bytes", undeletedBytes))
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error iterating contents")
|
||||
@@ -186,9 +230,3 @@ func findUnreferencedAndRepairRereferenced(
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func toCountAndBytesString(cs *stats.CountSum) (uint32, string) {
|
||||
count, sumBytes := cs.Approximate()
|
||||
|
||||
return count, units.BytesString(sumBytes)
|
||||
}
|
||||
|
||||
@@ -20,6 +20,8 @@ func Run(ctx context.Context, dr repo.DirectRepositoryWriter, mode maintenance.M
|
||||
return ErrReadonly
|
||||
}
|
||||
|
||||
dr.LogManager().Enable()
|
||||
|
||||
//nolint:wrapcheck
|
||||
return maintenance.RunExclusive(ctx, dr, mode, force,
|
||||
func(ctx context.Context, runParams maintenance.RunParameters) error {
|
||||
|
||||
@@ -22,6 +22,8 @@
|
||||
|
||||
"github.com/kopia/kopia/fs"
|
||||
"github.com/kopia/kopia/fs/ignorefs"
|
||||
"github.com/kopia/kopia/internal/contentlog"
|
||||
"github.com/kopia/kopia/internal/contentlog/logparam"
|
||||
"github.com/kopia/kopia/internal/iocopy"
|
||||
"github.com/kopia/kopia/internal/timetrack"
|
||||
"github.com/kopia/kopia/internal/workshare"
|
||||
@@ -509,6 +511,8 @@ func (u *Uploader) uploadFileWithCheckpointing(ctx context.Context, relativePath
|
||||
// checkpointRoot invokes checkpoints on the provided registry and if a checkpoint entry was generated,
|
||||
// saves it in an incomplete snapshot manifest.
|
||||
func (u *Uploader) checkpointRoot(ctx context.Context, cp *checkpointRegistry, prototypeManifest *snapshot.Manifest) error {
|
||||
ctx = contentlog.WithParams(ctx, logparam.String("span:checkpoint", contentlog.RandomSpanID()))
|
||||
|
||||
var dmbCheckpoint snapshotfs.DirManifestBuilder
|
||||
if err := cp.runCheckpoints(&dmbCheckpoint); err != nil {
|
||||
return errors.Wrap(err, "running checkpointers")
|
||||
@@ -1260,6 +1264,15 @@ func (u *Uploader) Upload(
|
||||
ctx, span := uploadTracer.Start(ctx, "Upload")
|
||||
defer span.End()
|
||||
|
||||
ctx = contentlog.WithParams(ctx, logparam.String("span:upload", contentlog.HashSpanID(sourceInfo.String())))
|
||||
|
||||
if dr, ok := u.repo.(repo.DirectRepository); ok {
|
||||
log := dr.LogManager().NewLogger("uploader")
|
||||
|
||||
contentlog.Log(ctx, log, "uploading started")
|
||||
defer contentlog.Log(ctx, log, "uploading finished")
|
||||
}
|
||||
|
||||
u.traceEnabled = span.IsRecording()
|
||||
|
||||
u.Progress.UploadStarted()
|
||||
|
||||
@@ -78,7 +78,7 @@ func newUploadTestHarness(ctx context.Context, t *testing.T) *uploadTestHarness
|
||||
require.NoError(t, err, "cannot create storage directory")
|
||||
|
||||
faulty := blobtesting.NewFaultyStorage(storage)
|
||||
logged := bloblogging.NewWrapper(faulty, testlogging.Printf(t.Logf, "{STORAGE} "), "")
|
||||
logged := bloblogging.NewWrapper(faulty, testlogging.Printf(t.Logf, "{STORAGE} "), nil, "")
|
||||
rec := repotesting.NewReconnectableStorage(t, logged)
|
||||
|
||||
err = repo.Initialize(ctx, rec, &repo.NewRepositoryOptions{}, masterPassword)
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -10,6 +11,7 @@
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/kopia/kopia/internal/apiclient"
|
||||
@@ -252,6 +254,9 @@ func testAPIServerRepository(t *testing.T, allowRepositoryUsers bool) {
|
||||
|
||||
wait2()
|
||||
|
||||
// wait for the logs to be uploaded
|
||||
verifyServerJSONLogs(t, e.RunAndExpectSuccess(t, "logs", "show", "--younger-than=2h"))
|
||||
|
||||
// open repository client to a dead server, this should fail quickly instead of retrying forever.
|
||||
timer := timetrack.StartTimer()
|
||||
|
||||
@@ -267,6 +272,30 @@ func testAPIServerRepository(t *testing.T, allowRepositoryUsers bool) {
|
||||
require.Less(t, timer.Elapsed(), 15*time.Second)
|
||||
}
|
||||
|
||||
func verifyServerJSONLogs(t require.TestingT, s []string) {
|
||||
clientSpans := make(map[string]int)
|
||||
remoteSessionSpans := make(map[string]int)
|
||||
|
||||
for _, l := range s {
|
||||
var logLine map[string]any
|
||||
|
||||
json.Unmarshal([]byte(l), &logLine)
|
||||
|
||||
if s, ok := logLine["span:client"].(string); ok {
|
||||
clientSpans[s]++
|
||||
}
|
||||
|
||||
if s, ok := logLine["span:server-session"].(string); ok {
|
||||
remoteSessionSpans[s]++
|
||||
}
|
||||
}
|
||||
|
||||
// there should be 2 client session (initial setup + server),
|
||||
// at least 3 remote sessions (GRPC clients) - may be more due to transparent retries
|
||||
assert.Len(t, clientSpans, 2)
|
||||
assert.GreaterOrEqual(t, len(remoteSessionSpans), 3)
|
||||
}
|
||||
|
||||
func verifyFindManifestCount(ctx context.Context, t *testing.T, rep repo.Repository, pageSize int32, labels map[string]string, wantCount int) {
|
||||
t.Helper()
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ func (s *formatSpecificTestSuite) TestFullMaintenance(t *testing.T) {
|
||||
runner := testenv.NewInProcRunner(t)
|
||||
e := testenv.NewCLITest(t, s.formatFlags, runner)
|
||||
|
||||
e.RunAndExpectSuccess(t, "repo", "create", "filesystem", "--path", e.RepoDir, "--disable-internal-log")
|
||||
e.RunAndExpectSuccess(t, "repo", "create", "filesystem", "--path", e.RepoDir, "--disable-repository-log")
|
||||
defer e.RunAndExpectSuccess(t, "repo", "disconnect")
|
||||
|
||||
var (
|
||||
@@ -35,11 +35,11 @@ func (s *formatSpecificTestSuite) TestFullMaintenance(t *testing.T) {
|
||||
beforeSnapshotBlobs := e.RunAndExpectSuccess(t, "blob", "list", "--data-only")
|
||||
_ = beforeSnapshotBlobs
|
||||
|
||||
testutil.MustParseJSONLines(t, e.RunAndExpectSuccess(t, "snapshot", "create", sharedTestDataDir1, "--json", "--disable-internal-log"), &snap)
|
||||
testutil.MustParseJSONLines(t, e.RunAndExpectSuccess(t, "snapshot", "create", sharedTestDataDir1, "--json", "--disable-repository-log"), &snap)
|
||||
|
||||
// avoid create and delete in the same second.
|
||||
time.Sleep(2 * time.Second)
|
||||
e.RunAndExpectSuccess(t, "snapshot", "delete", string(snap.ID), "--delete", "--disable-internal-log")
|
||||
e.RunAndExpectSuccess(t, "snapshot", "delete", string(snap.ID), "--delete", "--disable-repository-log")
|
||||
|
||||
e.RunAndVerifyOutputLineCount(t, 0, "snapshot", "list")
|
||||
|
||||
@@ -47,7 +47,7 @@ func (s *formatSpecificTestSuite) TestFullMaintenance(t *testing.T) {
|
||||
|
||||
e.RunAndExpectSuccess(t, "maintenance", "info")
|
||||
testutil.MustParseJSONLines(t, e.RunAndExpectSuccess(t, "maintenance", "info", "--json"), &mi)
|
||||
e.RunAndVerifyOutputLineCount(t, 0, "maintenance", "run", "--full", "--disable-internal-log")
|
||||
e.RunAndVerifyOutputLineCount(t, 0, "maintenance", "run", "--full", "--disable-repository-log")
|
||||
e.RunAndExpectSuccess(t, "maintenance", "info")
|
||||
testutil.MustParseJSONLines(t, e.RunAndExpectSuccess(t, "maintenance", "info", "--json"), &mi)
|
||||
|
||||
@@ -56,7 +56,7 @@ func (s *formatSpecificTestSuite) TestFullMaintenance(t *testing.T) {
|
||||
}
|
||||
|
||||
// now rerun with --safety=none
|
||||
e.RunAndExpectSuccess(t, "maintenance", "run", "--full", "--safety=none", "--disable-internal-log")
|
||||
e.RunAndExpectSuccess(t, "maintenance", "run", "--full", "--safety=none", "--disable-repository-log")
|
||||
e.RunAndExpectSuccess(t, "maintenance", "info")
|
||||
testutil.MustParseJSONLines(t, e.RunAndExpectSuccess(t, "maintenance", "info", "--json"), &mi)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user