removed legacy oid format

This commit is contained in:
Jarek Kowalski
2018-03-19 14:43:43 -07:00
parent 2e61cc5688
commit 1c4e0e1e47
19 changed files with 589 additions and 508 deletions

View File

@@ -661,33 +661,41 @@ func (bm *Manager) Repackage(maxLength uint64) error {
log.Printf("%v blocks to re-package (%v total bytes)", len(toRepackage), totalBytes)
for _, m := range toRepackage {
data, err := bm.getBlockInternalLocked(m.PackBlockId)
if err != nil {
return fmt.Errorf("can't fetch block %q for repackaging: %v", m.PackBlockId, err)
if err := bm.repackageBlock(m, done); err != nil {
return err
}
}
for blockID, os := range m.Items {
if done[blockID] {
continue
}
done[blockID] = true
log.Printf("re-packaging: %v %v", blockID, os)
return nil
}
offset, size := unpackOffsetAndSize(os)
blockData := data[offset : offset+size]
if err := bm.addToPackLocked(blockID, blockData, true); err != nil {
return fmt.Errorf("unable to re-package %q: %v", blockID, err)
}
func (bm *Manager) repackageBlock(m *blockmgrpb.Index, done map[string]bool) error {
data, err := bm.getBlockInternalLocked(m.PackBlockId)
if err != nil {
return fmt.Errorf("can't fetch block %q for repackaging: %v", m.PackBlockId, err)
}
for blockID, os := range m.Items {
if done[blockID] {
continue
}
done[blockID] = true
log.Printf("re-packaging: %v %v", blockID, os)
for blockID, blockData := range m.InlineItems {
if done[blockID] {
continue
}
done[blockID] = true
if err := bm.addToPackLocked(blockID, blockData, true); err != nil {
return fmt.Errorf("unable to re-package %q: %v", blockID, err)
}
offset, size := unpackOffsetAndSize(os)
blockData := data[offset : offset+size]
if err := bm.addToPackLocked(blockID, blockData, true); err != nil {
return fmt.Errorf("unable to re-package %q: %v", blockID, err)
}
}
for blockID, blockData := range m.InlineItems {
if done[blockID] {
continue
}
done[blockID] = true
if err := bm.addToPackLocked(blockID, blockData, true); err != nil {
return fmt.Errorf("unable to re-package %q: %v", blockID, err)
}
}

View File

@@ -17,6 +17,13 @@
blockIndexShowIDs = blockIndexShowCommand.Arg("id", "IDs of index blocks to show").Required().Strings()
)
type blockIndexEntryInfo struct {
blockID string
offset uint32
size uint32
inline bool
}
func runShowBlockIndexesAction(context *kingpin.ParseContext) error {
rep := mustOpenRepository(nil)
defer rep.Close() //nolint: errcheck
@@ -50,51 +57,53 @@ func runShowBlockIndexesAction(context *kingpin.ParseContext) error {
}
for _, ndx := range d.Indexes {
fmt.Printf("pack:%v len:%v created:%v\n", ndx.PackBlockId, ndx.PackLength, time.Unix(0, int64(ndx.CreateTimeNanos)).Local())
type unpacked struct {
blockID string
offset uint32
size uint32
inline bool
}
var lines []unpacked
for blk, os := range ndx.Items {
lines = append(lines, unpacked{blk, uint32(os >> 32), uint32(os), false})
}
for blk, d := range ndx.InlineItems {
lines = append(lines, unpacked{blk, 0, uint32(len(d)), true})
}
switch *blockIndexShowSort {
case "offset":
sort.Slice(lines, func(i, j int) bool {
return lines[i].offset < lines[j].offset
})
case "blockID":
sort.Slice(lines, func(i, j int) bool {
return lines[i].blockID < lines[j].blockID
})
case "size":
sort.Slice(lines, func(i, j int) bool {
return lines[i].size < lines[j].size
})
}
for _, l := range lines {
if l.inline {
fmt.Printf(" added %-40v size:%v (inline)\n", l.blockID, l.size)
} else {
fmt.Printf(" added %-40v offset:%-10v size:%v\n", l.blockID, l.offset, l.size)
}
}
for _, del := range ndx.DeletedItems {
fmt.Printf(" deleted %v\n", del)
}
printIndex(ndx)
}
}
return nil
}
func printIndex(ndx *blockmgrpb.Index) {
fmt.Printf("pack:%v len:%v created:%v\n", ndx.PackBlockId, ndx.PackLength, time.Unix(0, int64(ndx.CreateTimeNanos)).Local())
var lines []blockIndexEntryInfo
for blk, os := range ndx.Items {
lines = append(lines, blockIndexEntryInfo{blk, uint32(os >> 32), uint32(os), false})
}
for blk, d := range ndx.InlineItems {
lines = append(lines, blockIndexEntryInfo{blk, 0, uint32(len(d)), true})
}
sortIndexBlocks(lines)
for _, l := range lines {
if l.inline {
fmt.Printf(" added %-40v size:%v (inline)\n", l.blockID, l.size)
} else {
fmt.Printf(" added %-40v offset:%-10v size:%v\n", l.blockID, l.offset, l.size)
}
}
for _, del := range ndx.DeletedItems {
fmt.Printf(" deleted %v\n", del)
}
}
func sortIndexBlocks(lines []blockIndexEntryInfo) {
switch *blockIndexShowSort {
case "offset":
sort.Slice(lines, func(i, j int) bool {
return lines[i].offset < lines[j].offset
})
case "blockID":
sort.Slice(lines, func(i, j int) bool {
return lines[i].blockID < lines[j].blockID
})
case "size":
sort.Slice(lines, func(i, j int) bool {
return lines[i].size < lines[j].size
})
}
}
func init() {
blockIndexShowCommand.Action(runShowBlockIndexesAction)
}

View File

@@ -26,22 +26,7 @@ func runListBlocksAction(context *kingpin.ParseContext) error {
return err
}
maybeReverse := func(b bool) bool { return b }
if *blockListReverse {
maybeReverse = func(b bool) bool { return !b }
}
switch *blockListSort {
case "name":
sort.Slice(blocks, func(i, j int) bool { return maybeReverse(blocks[i].BlockID < blocks[j].BlockID) })
case "size":
sort.Slice(blocks, func(i, j int) bool { return maybeReverse(blocks[i].Length < blocks[j].Length) })
case "time":
sort.Slice(blocks, func(i, j int) bool { return maybeReverse(blocks[i].Timestamp.Before(blocks[j].Timestamp)) })
case "pack":
sort.Slice(blocks, func(i, j int) bool { return maybeReverse(comparePacks(blocks[i], blocks[j])) })
}
sortBlocks(blocks)
var count int
var totalSize int64
@@ -70,6 +55,25 @@ func runListBlocksAction(context *kingpin.ParseContext) error {
return nil
}
func sortBlocks(blocks []block.Info) {
maybeReverse := func(b bool) bool { return b }
if *blockListReverse {
maybeReverse = func(b bool) bool { return !b }
}
switch *blockListSort {
case "name":
sort.Slice(blocks, func(i, j int) bool { return maybeReverse(blocks[i].BlockID < blocks[j].BlockID) })
case "size":
sort.Slice(blocks, func(i, j int) bool { return maybeReverse(blocks[i].Length < blocks[j].Length) })
case "time":
sort.Slice(blocks, func(i, j int) bool { return maybeReverse(blocks[i].Timestamp.Before(blocks[j].Timestamp)) })
case "pack":
sort.Slice(blocks, func(i, j int) bool { return maybeReverse(comparePacks(blocks[i], blocks[j])) })
}
}
func comparePacks(a, b block.Info) bool {
if a, b := a.PackBlockID, b.PackBlockID; a != b {
return a < b

View File

@@ -6,6 +6,7 @@
"github.com/rs/zerolog/log"
"github.com/kopia/kopia/fs"
"github.com/kopia/kopia/object"
"github.com/kopia/kopia/snapshot"
@@ -50,26 +51,33 @@ func (v *verifier) verifyDirectory(oid object.ID, path string) error {
}
for _, e := range entries {
m := e.Metadata()
objectID := e.(object.HasObjectID).ObjectID()
childPath := path + "/" + m.Name
if m.FileMode().IsDir() {
if *verifyCommandRecursive {
if err := v.verifyDirectory(objectID, childPath); err != nil {
if v.reportError(childPath, err) {
return err
}
}
}
if err = v.verifyDirectoryEntry(path, e); err != nil {
return err
}
}
if err := v.verifyObject(objectID, childPath, m.FileSize); err != nil {
if v.reportError(childPath, err) {
return err
return nil
}
func (v *verifier) verifyDirectoryEntry(path string, e fs.Entry) error {
m := e.Metadata()
objectID := e.(object.HasObjectID).ObjectID()
childPath := path + "/" + m.Name
if m.FileMode().IsDir() {
if *verifyCommandRecursive {
if err := v.verifyDirectory(objectID, childPath); err != nil {
if v.reportError(childPath, err) {
return err
}
}
}
}
if err := v.verifyObject(objectID, childPath, m.FileSize); err != nil {
if v.reportError(childPath, err) {
return err
}
}
return nil
}

View File

@@ -62,63 +62,10 @@ func setPolicy(context *kingpin.ParseContext) error {
p = &snapshot.Policy{}
}
if err := applyPolicyNumber(target, "number of annual backups to keep", &p.RetentionPolicy.KeepAnnual, *policySetKeepAnnual); err != nil {
if err := setPolicyFromFlags(target, p); err != nil {
return err
}
if err := applyPolicyNumber(target, "number of monthly backups to keep", &p.RetentionPolicy.KeepMonthly, *policySetKeepMonthly); err != nil {
return err
}
if err := applyPolicyNumber(target, "number of weekly backups to keep", &p.RetentionPolicy.KeepWeekly, *policySetKeepWeekly); err != nil {
return err
}
if err := applyPolicyNumber(target, "number of daily backups to keep", &p.RetentionPolicy.KeepDaily, *policySetKeepDaily); err != nil {
return err
}
if err := applyPolicyNumber(target, "number of hourly backups to keep", &p.RetentionPolicy.KeepHourly, *policySetKeepHourly); err != nil {
return err
}
if err := applyPolicyNumber(target, "number of latest backups to keep", &p.RetentionPolicy.KeepLatest, *policySetKeepLatest); err != nil {
return err
}
// It's not really a list, just optional boolean.
for _, inherit := range *policySetInherit {
p.NoParent = !inherit
}
for _, path := range *policySetAddExclude {
p.FilesPolicy.Exclude = addString(p.FilesPolicy.Exclude, path)
}
for _, path := range *policySetRemoveExclude {
p.FilesPolicy.Exclude = removeString(p.FilesPolicy.Exclude, path)
}
if *policySetClearExclude {
p.FilesPolicy.Exclude = nil
}
for _, path := range *policySetAddInclude {
p.FilesPolicy.Include = addString(p.FilesPolicy.Include, path)
}
for _, path := range *policySetRemoveInclude {
p.FilesPolicy.Include = removeString(p.FilesPolicy.Include, path)
}
if *policySetClearInclude {
p.FilesPolicy.Include = nil
}
for _, freq := range *policySetFrequency {
p.SchedulingPolicy.Frequency = freq
}
if err := mgr.SetPolicy(target, p); err != nil {
return fmt.Errorf("can't save policy for %v: %v", target, err)
}
@@ -127,22 +74,67 @@ func setPolicy(context *kingpin.ParseContext) error {
return nil
}
func addString(p []string, s string) []string {
p = append(removeString(p, s), s)
sort.Strings(p)
return p
func setPolicyFromFlags(target snapshot.SourceInfo, p *snapshot.Policy) error {
cases := []struct {
desc string
max **int
flagValue *string
}{
{"number of annual backups to keep", &p.RetentionPolicy.KeepAnnual, policySetKeepAnnual},
{"number of monthly backups to keep", &p.RetentionPolicy.KeepMonthly, policySetKeepMonthly},
{"number of weekly backups to keep", &p.RetentionPolicy.KeepWeekly, policySetKeepWeekly},
{"number of daily backups to keep", &p.RetentionPolicy.KeepDaily, policySetKeepDaily},
{"number of hourly backups to keep", &p.RetentionPolicy.KeepHourly, policySetKeepHourly},
{"number of latest backups to keep", &p.RetentionPolicy.KeepLatest, policySetKeepLatest},
}
for _, c := range cases {
if err := applyPolicyNumber(target, c.desc, c.max, *c.flagValue); err != nil {
return err
}
}
// It's not really a list, just optional boolean.
for _, inherit := range *policySetInherit {
p.NoParent = !inherit
}
if *policySetClearExclude {
p.FilesPolicy.Exclude = nil
} else {
p.FilesPolicy.Exclude = addRemoveDedupeAndSort(p.FilesPolicy.Exclude, *policySetAddExclude, *policySetRemoveExclude)
}
if *policySetClearInclude {
p.FilesPolicy.Include = nil
} else {
p.FilesPolicy.Include = addRemoveDedupeAndSort(p.FilesPolicy.Include, *policySetAddInclude, *policySetRemoveInclude)
}
for _, freq := range *policySetFrequency {
p.SchedulingPolicy.Frequency = freq
}
return nil
}
func removeString(p []string, s string) []string {
var result []string
for _, item := range p {
if item == s {
continue
}
result = append(result, item)
func addRemoveDedupeAndSort(base, add, remove []string) []string {
entries := map[string]bool{}
for _, b := range base {
entries[b] = true
}
return result
for _, b := range add {
entries[b] = true
}
for _, b := range remove {
delete(entries, b)
}
var s []string
for k := range entries {
s = append(s, k)
}
sort.Strings(s)
return s
}
func applyPolicyNumber(src snapshot.SourceInfo, desc string, val **int, str string) error {

View File

@@ -47,29 +47,8 @@ func runMigrateCommand(context *kingpin.ParseContext) error {
break
}
log.Printf("migrating source %v", s)
manifests := sourceSM.ListSnapshotManifests(&s)
snapshots, err := sourceSM.LoadSnapshots(manifests)
if err != nil {
return fmt.Errorf("unable to load snapshot manifests for %v: %v", s, err)
}
for _, m := range filterSnapshotsToMigrate(snapshots) {
d := sourceSM.DirectoryEntry(m.RootObjectID)
newm, err := uploader.Upload(d, m.Source, nil)
if err != nil {
return fmt.Errorf("error migrating shapshot %v @ %v: %v", m.Source, m.StartTime, err)
}
m.RootObjectID = newm.RootObjectID
m.HashCacheID = newm.HashCacheID
m.Stats = newm.Stats
m.IncompleteReason = newm.IncompleteReason
if _, err := destSM.SaveSnapshot(m); err != nil {
return fmt.Errorf("cannot save manifest: %v", err)
}
if err := migrateSingleSource(uploader, sourceSM, destSM, s); err != nil {
return err
}
}
@@ -90,6 +69,35 @@ func runMigrateCommand(context *kingpin.ParseContext) error {
return nil
}
func migrateSingleSource(uploader *snapshot.Uploader, sourceSM, destSM *snapshot.Manager, s snapshot.SourceInfo) error {
log.Printf("migrating source %v", s)
manifests := sourceSM.ListSnapshotManifests(&s)
snapshots, err := sourceSM.LoadSnapshots(manifests)
if err != nil {
return fmt.Errorf("unable to load snapshot manifests for %v: %v", s, err)
}
for _, m := range filterSnapshotsToMigrate(snapshots) {
d := sourceSM.DirectoryEntry(m.RootObjectID)
newm, err := uploader.Upload(d, m.Source, nil)
if err != nil {
return fmt.Errorf("error migrating shapshot %v @ %v: %v", m.Source, m.StartTime, err)
}
m.RootObjectID = newm.RootObjectID
m.HashCacheID = newm.HashCacheID
m.Stats = newm.Stats
m.IncompleteReason = newm.IncompleteReason
if _, err := destSM.SaveSnapshot(m); err != nil {
return fmt.Errorf("cannot save manifest: %v", err)
}
}
return nil
}
func filterSnapshotsToMigrate(s []*snapshot.Manifest) []*snapshot.Manifest {
if *migrateLatestOnly && len(s) > 0 {
s = s[0:1]

View File

@@ -3,6 +3,7 @@
import (
"fmt"
"os"
"sort"
"strings"
"time"
@@ -23,87 +24,121 @@
snapshotExpireDelete = snapshotExpireCommand.Flag("delete", "Whether to actually delete snapshots").Default("no").String()
)
type cutoffTimes struct {
annual time.Time
monthly time.Time
daily time.Time
hourly time.Time
weekly time.Time
}
func yearsAgo(base time.Time, n int) time.Time {
return base.AddDate(-n, 0, 0)
}
func monthsAgo(base time.Time, n int) time.Time {
return base.AddDate(0, -n, 0)
}
func daysAgo(base time.Time, n int) time.Time {
return base.AddDate(0, 0, -n)
}
func weeksAgo(base time.Time, n int) time.Time {
return base.AddDate(0, 0, -n*7)
}
func hoursAgo(base time.Time, n int) time.Time {
return base.Add(time.Duration(-n) * time.Hour)
}
func expireSnapshotsForSingleSource(snapshots []*snapshot.Manifest, src snapshot.SourceInfo, pol *snapshot.Policy, snapshotNames []string) []string {
var toDelete []string
now := time.Now()
maxTime := now.Add(365 * 24 * time.Hour)
cutoffTime := func(setting *int, add func(time.Time, int) time.Time) time.Time {
if setting != nil {
return add(now, *setting)
}
return maxTime
}
cutoff := cutoffTimes{
annual: cutoffTime(pol.RetentionPolicy.KeepAnnual, yearsAgo),
monthly: cutoffTime(pol.RetentionPolicy.KeepMonthly, monthsAgo),
daily: cutoffTime(pol.RetentionPolicy.KeepDaily, daysAgo),
hourly: cutoffTime(pol.RetentionPolicy.KeepHourly, hoursAgo),
weekly: cutoffTime(pol.RetentionPolicy.KeepHourly, weeksAgo),
}
fmt.Printf("\nProcessing %v\n", src)
ids := make(map[string]bool)
idCounters := make(map[string]int)
var annualCutoffTime time.Time
var monthlyCutoffTime time.Time
var dailyCutoffTime time.Time
var hourlyCutoffTime time.Time
var weeklyCutoffTime time.Time
if pol.RetentionPolicy.KeepAnnual != nil {
annualCutoffTime = time.Now().AddDate(-*pol.RetentionPolicy.KeepAnnual, 0, 0)
}
if pol.RetentionPolicy.KeepMonthly != nil {
monthlyCutoffTime = time.Now().AddDate(0, -*pol.RetentionPolicy.KeepMonthly, 0)
}
if pol.RetentionPolicy.KeepDaily != nil {
dailyCutoffTime = time.Now().AddDate(0, 0, -*pol.RetentionPolicy.KeepDaily)
}
if pol.RetentionPolicy.KeepHourly != nil {
hourlyCutoffTime = time.Now().Add(time.Duration(-*pol.RetentionPolicy.KeepHourly) * time.Hour)
}
if pol.RetentionPolicy.KeepWeekly != nil {
weeklyCutoffTime = time.Now().AddDate(0, 0, -7**pol.RetentionPolicy.KeepWeekly)
}
fmt.Printf("\n%v\n", src)
for i, s := range snapshots {
var keep []string
registerSnapshot := func(timePeriodID string, timePeriodType string, max int) {
if _, exists := ids[timePeriodID]; !exists && idCounters[timePeriodType] < max {
ids[timePeriodID] = true
idCounters[timePeriodType]++
keep = append(keep, timePeriodType)
}
}
if s.IncompleteReason != "" {
continue
}
if pol.RetentionPolicy.KeepLatest != nil {
registerSnapshot(fmt.Sprintf("%v", i), "latest", *pol.RetentionPolicy.KeepLatest)
}
if s.StartTime.After(annualCutoffTime) && pol.RetentionPolicy.KeepAnnual != nil {
registerSnapshot(s.StartTime.Format("2006"), "annual", *pol.RetentionPolicy.KeepAnnual)
}
if s.StartTime.After(monthlyCutoffTime) && pol.RetentionPolicy.KeepMonthly != nil {
registerSnapshot(s.StartTime.Format("2006-01"), "monthly", *pol.RetentionPolicy.KeepMonthly)
}
if s.StartTime.After(weeklyCutoffTime) && pol.RetentionPolicy.KeepWeekly != nil {
yyyy, wk := s.StartTime.ISOWeek()
registerSnapshot(fmt.Sprintf("%04v-%02v", yyyy, wk), "weekly", *pol.RetentionPolicy.KeepWeekly)
}
if s.StartTime.After(dailyCutoffTime) && pol.RetentionPolicy.KeepDaily != nil {
registerSnapshot(s.StartTime.Format("2006-01-02"), "daily", *pol.RetentionPolicy.KeepDaily)
}
if s.StartTime.After(hourlyCutoffTime) && pol.RetentionPolicy.KeepHourly != nil {
registerSnapshot(s.StartTime.Format("2006-01-02 15"), "hourly", *pol.RetentionPolicy.KeepHourly)
}
keep := getReasonsToKeep(i, s, cutoff, pol, ids, idCounters)
tm := s.StartTime.Local().Format("2006-01-02 15:04:05 MST")
if len(keep) > 0 {
fmt.Printf(" keeping %v %v\n", tm, strings.Join(keep, ","))
fmt.Printf(" keeping %v (%v) %v\n", tm, s.ID, strings.Join(keep, ","))
} else {
fmt.Printf(" deleting %v\n", tm)
toDelete = append(toDelete, snapshotNames[i])
fmt.Printf(" deleting %v (%v)\n", tm, s.ID)
toDelete = append(toDelete, s.ID)
}
}
return toDelete
}
func getReasonsToKeep(i int, s *snapshot.Manifest, cutoff cutoffTimes, pol *snapshot.Policy, ids map[string]bool, idCounters map[string]int) []string {
if s.IncompleteReason != "" {
return nil
}
var keepReasons []string
var zeroTime time.Time
yyyy, wk := s.StartTime.ISOWeek()
cases := []struct {
cutoffTime time.Time
timePeriodID string
timePeriodType string
max *int
}{
{zeroTime, fmt.Sprintf("%v", i), "latest", pol.RetentionPolicy.KeepLatest},
{cutoff.annual, s.StartTime.Format("2006"), "annual", pol.RetentionPolicy.KeepAnnual},
{cutoff.monthly, s.StartTime.Format("2006-01"), "monthly", pol.RetentionPolicy.KeepMonthly},
{cutoff.weekly, fmt.Sprintf("%04v-%02v", yyyy, wk), "weekly", pol.RetentionPolicy.KeepWeekly},
{cutoff.daily, s.StartTime.Format("2006-01-02"), "daily", pol.RetentionPolicy.KeepDaily},
{cutoff.hourly, s.StartTime.Format("2006-01-02 15"), "hourly", pol.RetentionPolicy.KeepHourly},
}
for _, c := range cases {
if c.max == nil {
continue
}
if s.StartTime.Before(c.cutoffTime) {
continue
}
if _, exists := ids[c.timePeriodID]; exists {
continue
}
if idCounters[c.timePeriodType] < *c.max {
ids[c.timePeriodID] = true
idCounters[c.timePeriodType]++
keepReasons = append(keepReasons, c.timePeriodType)
}
}
return keepReasons
}
func getSnapshotNamesToExpire(mgr *snapshot.Manager) ([]string, error) {
if !*snapshotExpireAll && len(*snapshotExpirePaths) == 0 {
return nil, fmt.Errorf("Must specify paths to expire or --all")
@@ -163,6 +198,16 @@ func expireSnapshots(pmgr *snapshot.PolicyManager, snapshots []*snapshot.Manifes
return nil
}
sort.Slice(snapshots, func(i, j int) bool {
s1, s2 := snapshots[i].Source, snapshots[j].Source
if s1.String() != s2.String() {
return s1.String() < s2.String()
}
return snapshots[i].StartTime.Before(snapshots[j].StartTime)
})
for i, s := range snapshots {
if s.Source != lastSource {
lastSource = s.Source

View File

@@ -23,7 +23,7 @@
maxResultsPerPath = snapshotListCommand.Flag("max-results", "Maximum number of results.").Default("1000").Int()
)
func findBackups(mgr *snapshot.Manager, sourceInfo snapshot.SourceInfo) (manifestIDs []string, relPath string, err error) {
func findSnapshotsForSource(mgr *snapshot.Manager, sourceInfo snapshot.SourceInfo) (manifestIDs []string, relPath string, err error) {
for len(sourceInfo.Path) > 0 {
list := mgr.ListSnapshotManifests(&sourceInfo)
@@ -49,52 +49,51 @@ func findBackups(mgr *snapshot.Manager, sourceInfo snapshot.SourceInfo) (manifes
return nil, "", nil
}
func findManifestIDs(mgr *snapshot.Manager, source string) ([]string, string, error) {
if source == "" {
return mgr.ListSnapshotManifests(nil), "", nil
}
si, err := snapshot.ParseSourceInfo(source, getHostName(), getUserName())
if err != nil {
return nil, "", fmt.Errorf("invalid directory: '%s': %s", source, err)
}
manifestIDs, relPath, err := findSnapshotsForSource(mgr, si)
if relPath != "" {
relPath = "/" + relPath
}
return manifestIDs, relPath, err
}
func runBackupsCommand(context *kingpin.ParseContext) error {
rep := mustOpenRepository(nil)
defer rep.Close() //nolint: errcheck
mgr := snapshot.NewManager(rep)
var previous []string
var relPath string
var err error
if *snapshotListPath != "" {
var si snapshot.SourceInfo
si, err = snapshot.ParseSourceInfo(*snapshotListPath, getHostName(), getUserName())
if err != nil {
return fmt.Errorf("invalid directory: '%s': %s", *snapshotListPath, err)
}
previous, relPath, err = findBackups(mgr, si)
if relPath != "" {
relPath = "/" + relPath
}
if err != nil {
return fmt.Errorf("cannot list snapshots: %v", err)
}
} else {
previous = mgr.ListSnapshotManifests(nil)
}
var lastSource snapshot.SourceInfo
var count int
manifestToItemID := map[*snapshot.Manifest]string{}
manifests, err := mgr.LoadSnapshots(previous)
manifestIDs, relPath, err := findManifestIDs(mgr, *snapshotListPath)
if err != nil {
return err
}
for i, m := range manifests {
manifestToItemID[m] = previous[i]
manifests, err := mgr.LoadSnapshots(manifestIDs)
if err != nil {
return err
}
sort.Sort(manifestSorter(manifests))
outputManifests(manifests, relPath)
return nil
}
func outputManifests(manifests []*snapshot.Manifest, relPath string) {
var lastSource snapshot.SourceInfo
var count int
var lastTotalFileSize int64
for _, m := range manifests {
maybeIncomplete := ""
if m.IncompleteReason != "" {
@@ -123,9 +122,7 @@ func runBackupsCommand(context *kingpin.ParseContext) error {
maybeIncomplete,
)
if *snapshotListShowItemID {
if i, ok := manifestToItemID[m]; ok {
fmt.Printf(" metadata: %v\n", i)
}
fmt.Printf(" metadata: %v\n", m.ID)
}
if *snapshotListShowHashCache {
fmt.Printf(" hashcache: %v\n", m.HashCacheID)
@@ -137,8 +134,6 @@ func runBackupsCommand(context *kingpin.ParseContext) error {
lastTotalFileSize = m.Stats.TotalFileSize
}
}
return nil
}
type manifestSorter []*snapshot.Manifest

View File

@@ -60,11 +60,7 @@ func sweepBlock(rep *repo.Repository, bm storage.BlockMetadata, inUseIndexBlocks
}
fmt.Printf("deleting unused block %v age %v\n", bm.BlockID, age)
if err := rep.Storage.DeleteBlock(bm.BlockID); err != nil {
return err
}
return nil
return rep.Storage.DeleteBlock(bm.BlockID)
}
func init() {

View File

@@ -110,60 +110,74 @@ func defaultConfigFileName() string {
return filepath.Join(ospath.ConfigDir(), "repository.config")
}
func getRepositoryCredentials(isNew bool) (auth.Credentials, error) {
if *key != "" {
k, err := hex.DecodeString(*key)
if err != nil {
return nil, fmt.Errorf("invalid key format: %v", err)
}
return auth.MasterKey(k)
func getRepositoryCredentialsFromMasterKey() (auth.Credentials, error) {
k, err := hex.DecodeString(*key)
if err != nil {
return nil, fmt.Errorf("invalid key format: %v", err)
}
if *password != "" {
return auth.Password(strings.TrimSpace(*password))
return auth.MasterKey(k)
}
func getRepositoryCredentialsFromKeyFile() (auth.Credentials, error) {
key, err := ioutil.ReadFile(*keyFile)
if err != nil {
return nil, fmt.Errorf("unable to read key file: %v", err)
}
if *keyFile != "" {
key, err := ioutil.ReadFile(*keyFile)
if err != nil {
return nil, fmt.Errorf("unable to read key file: %v", err)
}
return auth.MasterKey(key)
}
return auth.MasterKey(key)
func getRepositoryCredentialsFromPasswordFile() (auth.Credentials, error) {
f, err := ioutil.ReadFile(*passwordFile)
if err != nil {
return nil, fmt.Errorf("unable to read password file: %v", err)
}
if *passwordFile != "" {
f, err := ioutil.ReadFile(*passwordFile)
if err != nil {
return nil, fmt.Errorf("unable to read password file: %v", err)
}
return auth.Password(strings.TrimSpace(string(f)))
}
return auth.Password(strings.TrimSpace(string(f)))
}
if isNew {
for {
p1, err := AskPass("Enter password to create new repository: ")
if err != nil {
return nil, err
}
p2, err := AskPass("Re-enter password for verification: ")
if err != nil {
return nil, err
}
if p1 != p2 {
fmt.Println("Passwords don't match!")
} else {
return auth.Password(p1)
}
}
} else {
p1, err := AskPass("Enter password to open repository: ")
func askForNewRepositoryPassword() (auth.Credentials, error) {
for {
p1, err := AskPass("Enter password to create new repository: ")
if err != nil {
return nil, err
}
fmt.Println()
return auth.Password(p1)
p2, err := AskPass("Re-enter password for verification: ")
if err != nil {
return nil, err
}
if p1 != p2 {
fmt.Println("Passwords don't match!")
} else {
return auth.Password(p1)
}
}
}
func askForExistingRepositoryPassword() (auth.Credentials, error) {
p1, err := AskPass("Enter password to open repository: ")
if err != nil {
return nil, err
}
fmt.Println()
return auth.Password(p1)
}
func getRepositoryCredentials(isNew bool) (auth.Credentials, error) {
switch {
case *key != "":
return getRepositoryCredentialsFromMasterKey()
case *password != "":
return auth.Password(strings.TrimSpace(*password))
case *keyFile != "":
return getRepositoryCredentialsFromKeyFile()
case *passwordFile != "":
return getRepositoryCredentialsFromPasswordFile()
case isNew:
return askForNewRepositoryPassword()
default:
return askForExistingRepositoryPassword()
}
}

View File

@@ -87,6 +87,27 @@ func (c *Cache) Readdir(d fs.Directory) (fs.Entries, error) {
return d.Readdir()
}
func (c *Cache) getEntriesFromCache(id string) fs.Entries {
if v, ok := c.data[id]; id != "" && ok {
if time.Now().Before(v.expireAfter) {
c.moveToHead(v)
c.mu.Unlock()
if c.debug {
log.Printf("cache hit for %q (valid until %v)", id, v.expireAfter)
}
return v.entries
}
// time expired
if c.debug {
log.Printf("removing expired cache entry %q after %v", id, v.expireAfter)
}
c.removeEntryLocked(v)
}
return nil
}
// getEntries consults the cache and either retrieves the contents of directory listing from the cache
// or invokes the provides callback and adds the results to cache.
func (c *Cache) getEntries(id string, expirationTime time.Duration, cb Loader) (fs.Entries, error) {
@@ -95,21 +116,8 @@ func (c *Cache) getEntries(id string, expirationTime time.Duration, cb Loader) (
}
c.mu.Lock()
if v, ok := c.data[id]; id != "" && ok {
if time.Now().Before(v.expireAfter) {
c.moveToHead(v)
c.mu.Unlock()
if c.debug {
log.Printf("cache hit for %q (valid until %v)", id, v.expireAfter)
}
return v.entries, nil
}
// time expired
if c.debug {
log.Printf("removing expired cache entry %q after %v", id, v.expireAfter)
}
c.removeEntryLocked(v)
if entries := c.getEntriesFromCache(id); entries != nil {
return entries, nil
}
if c.debug {

View File

@@ -36,18 +36,9 @@ func VerifyStorage(t *testing.T, r storage.Storage) {
// List
ch, cancel := r.ListBlocks(string("ab"))
defer cancel()
e1, ok := <-ch
if !ok || e1.BlockID != blocks[0].blk {
t.Errorf("missing result 0")
}
e2, ok := <-ch
if !ok || e2.BlockID != blocks[2].blk {
t.Errorf("missing result 2")
}
e3, ok := <-ch
if !ok || e3.BlockID != blocks[3].blk {
t.Errorf("missing result 3")
}
e1 := verifyNextBlock(t, blocks[0].blk, ch)
e2 := verifyNextBlock(t, blocks[2].blk, ch)
e3 := verifyNextBlock(t, blocks[3].blk, ch)
e4, ok := <-ch
if ok {
t.Errorf("unexpected item: %v", e4)
@@ -57,3 +48,11 @@ func VerifyStorage(t *testing.T, r storage.Storage) {
t.Errorf("timings are not sorted: %v %v %v", e1.TimeStamp, e2.TimeStamp, e3.TimeStamp)
}
}
func verifyNextBlock(t *testing.T, expectedBlockID string, ch <-chan storage.BlockMetadata) storage.BlockMetadata {
bm, ok := <-ch
if !ok || bm.BlockID != expectedBlockID {
t.Errorf("missing result: %v", expectedBlockID)
}
return bm
}

View File

@@ -248,6 +248,34 @@ func (m *Manager) load() error {
func (m *Manager) loadManifestBlocks(blocks []block.Info) error {
t0 := time.Now()
log.Debug().Dur("duration_ms", time.Since(t0)).Msgf("finished loading manifest blocks.")
for _, b := range blocks {
m.blockIDs = append(m.blockIDs, b.BlockID)
}
manifests, err := m.loadBlocksInParallel(blocks)
if err != nil {
return err
}
for _, man := range manifests {
for _, e := range man.Entries {
m.mergeEntry(e)
}
}
// after merging, remove blocks marked as deleted.
for k, e := range m.entries {
if e.Deleted {
delete(m.entries, k)
}
}
return nil
}
func (m *Manager) loadBlocksInParallel(blocks []block.Info) ([]manifest, error) {
errors := make(chan error, len(blocks))
manifests := make(chan manifest, len(blocks))
blockIDs := make(chan string, len(blocks))
@@ -281,30 +309,18 @@ func (m *Manager) loadManifestBlocks(blocks []block.Info) error {
wg.Wait()
close(errors)
close(manifests)
log.Debug().Dur("duration_ms", time.Since(t0)).Msgf("finished loading manifest blocks.")
// if there was any error, forward it
if err := <-errors; err != nil {
return err
return nil, err
}
for _, b := range blocks {
m.blockIDs = append(m.blockIDs, b.BlockID)
var man []manifest
for m := range manifests {
man = append(man, m)
}
for man := range manifests {
for _, e := range man.Entries {
m.mergeEntry(e)
}
}
// after merging, remove blocks marked as deleted.
for k, e := range m.entries {
if e.Deleted {
delete(m.entries, k)
}
}
return nil
return man, nil
}
func (m *Manager) loadManifestBlock(blockID string) (manifest, error) {

View File

@@ -112,35 +112,39 @@ func (om *Manager) VerifyObject(oid ID) (int64, []string, error) {
return l, blocks.blockIDs(), nil
}
func (om *Manager) verifyIndirectObjectInternal(oid ID, blocks *blockTracker) (int64, error) {
if _, err := om.verifyObjectInternal(*oid.Indirect, blocks); err != nil {
return 0, fmt.Errorf("unable to read index: %v", err)
}
rd, err := om.Open(*oid.Indirect)
if err != nil {
return 0, err
}
defer rd.Close() //nolint:errcheck
seekTable, err := om.flattenListChunk(rd)
if err != nil {
return 0, err
}
for i, m := range seekTable {
l, err := om.verifyObjectInternal(m.Object, blocks)
if err != nil {
return 0, err
}
if l != m.Length {
return 0, fmt.Errorf("unexpected length of part %#v of indirect object %q: %v %v, expected %v", i, oid, m.Object, l, m.Length)
}
}
totalLength := seekTable[len(seekTable)-1].endOffset()
return totalLength, nil
}
func (om *Manager) verifyObjectInternal(oid ID, blocks *blockTracker) (int64, error) {
if oid.Indirect != nil {
if _, err := om.verifyObjectInternal(*oid.Indirect, blocks); err != nil {
return 0, fmt.Errorf("unable to read index: %v", err)
}
rd, err := om.Open(*oid.Indirect)
if err != nil {
return 0, err
}
defer rd.Close() //nolint:errcheck
seekTable, err := om.flattenListChunk(rd)
if err != nil {
return 0, err
}
for i, m := range seekTable {
l, err := om.verifyObjectInternal(m.Object, blocks)
if err != nil {
return 0, err
}
if l != m.Length {
return 0, fmt.Errorf("unexpected length of part %#v of indirect object %q: %v %v, expected %v", i, oid, m.Object, l, m.Length)
}
}
totalLength := seekTable[len(seekTable)-1].endOffset()
return totalLength, nil
return om.verifyIndirectObjectInternal(oid, blocks)
}
p, err := om.blockMgr.BlockInfo(oid.StorageBlock)

View File

@@ -2,8 +2,6 @@
import (
"encoding/json"
"strconv"
"strings"
"fmt"
)
@@ -103,50 +101,14 @@ func ParseID(s string) (ID, error) {
content := s[1:]
switch chunkType {
case 'P':
// legacy
parts := strings.Split(content, "@")
if len(parts) == 2 && len(parts[0]) > 0 && len(parts[1]) > 0 {
return ID{
StorageBlock: parts[0],
}, nil
}
case 'I', 'D':
if chunkType == 'I' {
if len(content) < 2 || content[1] != ',' {
base, err := ParseID(content)
if err != nil {
return NullID, err
}
return ID{Indirect: &base}, nil
}
// legacy
comma := strings.Index(content, ",")
if comma < 0 {
// malformed
break
}
indirectLevel, err := strconv.Atoi(content[0:comma])
base, err := ParseID(content)
if err != nil {
break
}
if indirectLevel <= 0 {
break
}
content = content[comma+1:]
if content == "" {
break
return NullID, err
}
o := &ID{StorageBlock: content}
for i := 0; i < indirectLevel; i++ {
o = &ID{Indirect: o}
}
return *o, nil
return ID{Indirect: &base}, nil
}
return ID{StorageBlock: content}, nil

View File

@@ -15,11 +15,8 @@ func TestParseObjectID(t *testing.T) {
}{
{"Dfoo", ID{StorageBlock: "foo"}},
{"IDfoo", ID{Indirect: &ID{StorageBlock: "foo"}}},
{"I1,foo", ID{Indirect: &ID{StorageBlock: "foo"}}},
{"I2,foo", ID{Indirect: &ID{Indirect: &ID{StorageBlock: "foo"}}}},
{"IDfoo", ID{Indirect: &ID{StorageBlock: "foo"}}},
{"IIDfoo", ID{Indirect: &ID{Indirect: &ID{StorageBlock: "foo"}}}},
{"Pfoo@bar", ID{StorageBlock: "foo"}}, // legacy
}
for _, tc := range cases {

View File

@@ -57,17 +57,7 @@ func Open(ctx context.Context, configFile string, options *Options) (rep *Reposi
return nil, err
}
var creds auth.Credentials
if len(lc.Connection.Key) > 0 {
log.Debug().Msg("getting credentials from master key")
creds, err = auth.MasterKey(lc.Connection.Key)
} else {
if options.CredentialsCallback == nil {
return nil, errors.New("key not persisted and no credentials specified")
}
log.Debug().Msg("getting credentials using callback")
creds, err = options.CredentialsCallback()
}
creds, err := getCredentials(lc, options)
if err != nil {
return nil, fmt.Errorf("invalid credentials: %v", err)
@@ -99,6 +89,20 @@ func Open(ctx context.Context, configFile string, options *Options) (rep *Reposi
return r, nil
}
func getCredentials(lc *config.LocalConfig, options *Options) (auth.Credentials, error) {
if len(lc.Connection.Key) > 0 {
log.Debug().Msg("getting credentials from master key")
return auth.MasterKey(lc.Connection.Key)
}
if options.CredentialsCallback == nil {
return nil, errors.New("key not persisted and no credentials specified")
}
log.Debug().Msg("getting credentials using callback")
return options.CredentialsCallback()
}
// SetCachingConfig changes caching configuration for a given repository config file.
func SetCachingConfig(ctx context.Context, configFile string, opt block.CachingOptions) error {
configFile, err := filepath.Abs(configFile)

View File

@@ -428,7 +428,7 @@ func toChannel(items []*uploadWorkItem) <-chan *uploadWorkItem {
return ch
}
func (u *Uploader) processUploadWorkItems(workItems []*uploadWorkItem, dw *dir.Writer) error {
func (u *Uploader) launchWorkItems(workItems []*uploadWorkItem, wg *sync.WaitGroup) {
// allocate result channel for each work item.
for _, it := range workItems {
it.resultChan = make(chan entryResult, 1)
@@ -440,7 +440,6 @@ func (u *Uploader) processUploadWorkItems(workItems []*uploadWorkItem, dw *dir.W
}
ch := toChannel(workItems)
var wg sync.WaitGroup
for i := 0; i < workerCount; i++ {
wg.Add(1)
go func(workerID int) {
@@ -454,6 +453,11 @@ func (u *Uploader) processUploadWorkItems(workItems []*uploadWorkItem, dw *dir.W
}
}(i)
}
}
func (u *Uploader) processUploadWorkItems(workItems []*uploadWorkItem, dw *dir.Writer) error {
var wg sync.WaitGroup
u.launchWorkItems(workItems, &wg)
// Read result channels in order.
for _, it := range workItems {

View File

@@ -69,55 +69,8 @@ func (d *davStorage) executeRequestInternal(req *http.Request, body []byte) (*ht
req.SetBasicAuth(d.Username, d.Password)
case "Digest":
var ha1, ha2 string
nonce := params["nonce"]
realm := params["realm"]
algo := params["algorithm"]
opaque := params["opaque"]
if algo == "" {
algo = "MD5"
}
qop := params["qop"]
switch algo {
case "MD5":
ha1 = h(fmt.Sprintf("%s:%s:%s", d.Username, realm, d.Password))
default:
// TODO - implement me
return nil, fmt.Errorf("unsupported digest algorithm: %q", algo)
}
switch qop {
case "auth", "":
ha2 = h(fmt.Sprintf("%s:%s", req.Method, req.URL.RequestURI()))
default:
// TODO - implement me
return nil, fmt.Errorf("unsupported digest qop: %q", qop)
}
switch qop {
case "auth":
cnonce := makeClientNonce()
nonceCount := atomic.AddInt32(&d.clientNonceCount, 1)
response := h(fmt.Sprintf("%s:%s:%08x:%s:%s:%s", ha1, nonce, nonceCount, cnonce, qop, ha2))
authHeader := fmt.Sprintf(`Digest username="%s", realm="%s", nonce="%s", uri="%s", cnonce="%s", nc=%08x, qop=%s, response="%s", algorithm=%s`,
d.Username, realm, nonce, req.URL.RequestURI(), cnonce, nonceCount, qop, response, algo)
if opaque != "" {
authHeader += fmt.Sprintf(`, opaque="%s"`, opaque)
}
req.Header.Add("Authorization", authHeader)
case "":
response := h(fmt.Sprintf("%s:%s:%s", ha1, nonce, ha2))
authHeader := fmt.Sprintf(`Digest username="%s", realm="%s", nonce="%s", uri="%s", qop=%s, response="%s", algorithm=%s`,
d.Username, realm, nonce, req.URL.RequestURI(), qop, response, algo)
if opaque != "" {
authHeader += fmt.Sprintf(`, opaque="%s"`, opaque)
}
req.Header.Add("Authorization", authHeader)
if err = d.setDigestAuth(params, req); err != nil {
return nil, err
}
default:
@@ -131,6 +84,61 @@ func (d *davStorage) executeRequestInternal(req *http.Request, body []byte) (*ht
return d.Client.Do(req)
}
func (d *davStorage) setDigestAuth(params map[string]string, req *http.Request) error {
var ha1, ha2 string
nonce := params["nonce"]
realm := params["realm"]
algo := params["algorithm"]
opaque := params["opaque"]
if algo == "" {
algo = "MD5"
}
qop := params["qop"]
switch algo {
case "MD5":
ha1 = h(fmt.Sprintf("%s:%s:%s", d.Username, realm, d.Password))
default:
// TODO - implement me
return fmt.Errorf("unsupported digest algorithm: %q", algo)
}
switch qop {
case "auth", "":
ha2 = h(fmt.Sprintf("%s:%s", req.Method, req.URL.RequestURI()))
default:
// TODO - implement me
return fmt.Errorf("unsupported digest qop: %q", qop)
}
switch qop {
case "auth":
cnonce := makeClientNonce()
nonceCount := atomic.AddInt32(&d.clientNonceCount, 1)
response := h(fmt.Sprintf("%s:%s:%08x:%s:%s:%s", ha1, nonce, nonceCount, cnonce, qop, ha2))
authHeader := fmt.Sprintf(`Digest username="%s", realm="%s", nonce="%s", uri="%s", cnonce="%s", nc=%08x, qop=%s, response="%s", algorithm=%s`,
d.Username, realm, nonce, req.URL.RequestURI(), cnonce, nonceCount, qop, response, algo)
if opaque != "" {
authHeader += fmt.Sprintf(`, opaque="%s"`, opaque)
}
req.Header.Add("Authorization", authHeader)
case "":
response := h(fmt.Sprintf("%s:%s:%s", ha1, nonce, ha2))
authHeader := fmt.Sprintf(`Digest username="%s", realm="%s", nonce="%s", uri="%s", qop=%s, response="%s", algorithm=%s`,
d.Username, realm, nonce, req.URL.RequestURI(), qop, response, algo)
if opaque != "" {
authHeader += fmt.Sprintf(`, opaque="%s"`, opaque)
}
req.Header.Add("Authorization", authHeader)
}
return nil
}
func makeClientNonce() string {
tmp := make([]byte, 8)
io.ReadFull(rand.Reader, tmp) //nolint:errcheck