mirror of
https://github.com/kopia/kopia.git
synced 2026-01-24 06:18:02 -05:00
The dual time measurement is described in https://go.googlesource.com/proposal/+/master/design/12914-monotonic.md The fix is to discard hidden monotonic time component of time.Time by converting to unix time and back. Reviewed usage of clock.Now() and replaced with timetrack.StartTimer() when measuring time. The problem in #1402 was that passage of time was measured using the monotonic time and not wall clock time. When the computer goes to sleep, monotonic time is still monotonic while wall clock time makes a leap when the computer wakes up. This is the behavior that epoch manager (and most other compontents in Kopia) rely upon. Fixes #1402 Co-authored-by: Julio Lopez <julio+gh@kasten.io>
106 lines
2.6 KiB
Go
106 lines
2.6 KiB
Go
package cli
|
|
|
|
import (
|
|
"context"
|
|
"time"
|
|
|
|
"github.com/pkg/errors"
|
|
|
|
"github.com/kopia/kopia/internal/clock"
|
|
"github.com/kopia/kopia/internal/units"
|
|
"github.com/kopia/kopia/repo"
|
|
"github.com/kopia/kopia/repo/maintenance"
|
|
)
|
|
|
|
type commandMaintenanceInfo struct {
|
|
jo jsonOutput
|
|
out textOutput
|
|
}
|
|
|
|
// MaintenanceInfo is used to display the maintenance info in JSON format.
|
|
type MaintenanceInfo struct {
|
|
maintenance.Params
|
|
maintenance.Schedule `json:"schedule"`
|
|
}
|
|
|
|
func (c *commandMaintenanceInfo) setup(svc appServices, parent commandParent) {
|
|
cmd := parent.Command("info", "Display maintenance information").Alias("status")
|
|
c.jo.setup(svc, cmd)
|
|
cmd.Action(svc.directRepositoryReadAction(c.run))
|
|
c.out.setup(svc)
|
|
}
|
|
|
|
func (c *commandMaintenanceInfo) run(ctx context.Context, rep repo.DirectRepository) error {
|
|
p, err := maintenance.GetParams(ctx, rep)
|
|
if err != nil {
|
|
return errors.Wrap(err, "unable to get maintenance params")
|
|
}
|
|
|
|
s, err := maintenance.GetSchedule(ctx, rep)
|
|
if err != nil {
|
|
return errors.Wrap(err, "unable to get maintenance schedule")
|
|
}
|
|
|
|
if c.jo.jsonOutput {
|
|
mi := MaintenanceInfo{
|
|
Params: *p,
|
|
Schedule: *s,
|
|
}
|
|
|
|
c.out.printStdout("%s\n", c.jo.jsonBytes(mi))
|
|
|
|
return nil
|
|
}
|
|
|
|
c.out.printStdout("Owner: %v\n", p.Owner)
|
|
c.out.printStdout("Quick Cycle:\n")
|
|
c.displayCycleInfo(&p.QuickCycle, s.NextQuickMaintenanceTime, rep)
|
|
|
|
c.out.printStdout("Full Cycle:\n")
|
|
c.displayCycleInfo(&p.FullCycle, s.NextFullMaintenanceTime, rep)
|
|
|
|
cl := p.LogRetention.OrDefault()
|
|
|
|
c.out.printStdout("Log Retention:\n")
|
|
c.out.printStdout(" max count: %v\n", cl.MaxCount)
|
|
c.out.printStdout(" max age of logs: %v\n", cl.MaxAge)
|
|
c.out.printStdout(" max total size: %v\n", units.BytesStringBase2(cl.MaxTotalSize))
|
|
|
|
c.out.printStdout("Recent Maintenance Runs:\n")
|
|
|
|
for run, timings := range s.Runs {
|
|
c.out.printStdout(" %v:\n", run)
|
|
|
|
for _, t := range timings {
|
|
var errInfo string
|
|
if t.Success {
|
|
errInfo = "SUCCESS"
|
|
} else {
|
|
errInfo = "ERROR: " + t.Error
|
|
}
|
|
|
|
c.out.printStdout(
|
|
" %v (%v) %v\n",
|
|
formatTimestamp(t.Start),
|
|
t.End.Sub(t.Start).Truncate(time.Second),
|
|
errInfo)
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func (c *commandMaintenanceInfo) displayCycleInfo(cp *maintenance.CycleParams, t time.Time, rep repo.DirectRepository) {
|
|
c.out.printStdout(" scheduled: %v\n", cp.Enabled)
|
|
|
|
if cp.Enabled {
|
|
c.out.printStdout(" interval: %v\n", cp.Interval)
|
|
|
|
if rep.Time().Before(t) {
|
|
c.out.printStdout(" next run: %v (in %v)\n", formatTimestamp(t), t.Sub(clock.Now()).Truncate(time.Second))
|
|
} else {
|
|
c.out.printStdout(" next run: now\n")
|
|
}
|
|
}
|
|
}
|