mirror of
https://github.com/opencloud-eu/opencloud.git
synced 2026-02-07 21:01:43 -05:00
Merge pull request #213 from opencloud-eu/bump-reva-9db89fb
Bump reva 9db89fb
This commit is contained in:
2
go.mod
2
go.mod
@@ -65,7 +65,7 @@ require (
|
||||
github.com/onsi/ginkgo/v2 v2.22.2
|
||||
github.com/onsi/gomega v1.36.2
|
||||
github.com/open-policy-agent/opa v1.1.0
|
||||
github.com/opencloud-eu/reva/v2 v2.27.3-0.20250218085216-6d8d9c5e692c
|
||||
github.com/opencloud-eu/reva/v2 v2.27.3-0.20250220084142-9db89fb25fdf
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/owncloud/libre-graph-api-go v1.0.5-0.20240829135935-80dc00d6f5ea
|
||||
github.com/pkg/errors v0.9.1
|
||||
|
||||
4
go.sum
4
go.sum
@@ -860,8 +860,8 @@ github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
|
||||
github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
|
||||
github.com/open-policy-agent/opa v1.1.0 h1:HMz2evdEMTyNqtdLjmu3Vyx06BmhNYAx67Yz3Ll9q2s=
|
||||
github.com/open-policy-agent/opa v1.1.0/go.mod h1:T1pASQ1/vwfTa+e2fYcfpLCvWgYtqtiUv+IuA/dLPQs=
|
||||
github.com/opencloud-eu/reva/v2 v2.27.3-0.20250218085216-6d8d9c5e692c h1:i9SLvfA1lOjMYPa8yx1UX7wP+t/e7AuulZ0T9IeDonc=
|
||||
github.com/opencloud-eu/reva/v2 v2.27.3-0.20250218085216-6d8d9c5e692c/go.mod h1:CxSyCOgUD/IJV2YdUhunkVrsrMDhT/84I9uwhk//XxM=
|
||||
github.com/opencloud-eu/reva/v2 v2.27.3-0.20250220084142-9db89fb25fdf h1:HaakY/eHk2SeU6q/mPW8yvtBk3TEVHSCSfKoTVg2AQU=
|
||||
github.com/opencloud-eu/reva/v2 v2.27.3-0.20250220084142-9db89fb25fdf/go.mod h1:CxSyCOgUD/IJV2YdUhunkVrsrMDhT/84I9uwhk//XxM=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
|
||||
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
|
||||
|
||||
@@ -134,7 +134,7 @@ func (dp *DataProvider) getBlobPath(path string) (string, Inconsistency) {
|
||||
|
||||
if bid := m["user.oc.blobid"]; string(bid) != "" {
|
||||
spaceID, _ := getIDsFromPath(filepath.Join(dp.discpath, path))
|
||||
return dp.lbs.Path(&node.Node{BlobID: string(bid), SpaceID: spaceID}), ""
|
||||
return dp.lbs.Path(&node.Node{BaseNode: node.BaseNode{SpaceID: spaceID}, BlobID: string(bid)}), ""
|
||||
}
|
||||
|
||||
return "", ""
|
||||
|
||||
@@ -213,7 +213,7 @@ func dumpCmd(cfg *config.Config) *cli.Command {
|
||||
Usage: `print the metadata of the given node. String attributes will be enclosed in quotes. Binary attributes will be returned encoded as base64 with their value being prefixed with '0s'.`,
|
||||
Action: func(c *cli.Context) error {
|
||||
lu, backend := getBackend(c)
|
||||
path, err := getPath(c, lu)
|
||||
path, err := getNode(c, lu)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -242,7 +242,7 @@ func getCmd(cfg *config.Config) *cli.Command {
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
lu, backend := getBackend(c)
|
||||
path, err := getPath(c, lu)
|
||||
path, err := getNode(c, lu)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -278,7 +278,7 @@ func setCmd(cfg *config.Config) *cli.Command {
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
lu, backend := getBackend(c)
|
||||
path, err := getPath(c, lu)
|
||||
n, err := getNode(c, lu)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -300,7 +300,7 @@ func setCmd(cfg *config.Config) *cli.Command {
|
||||
}
|
||||
}
|
||||
|
||||
err = backend.Set(c.Context, path, c.String("attribute"), []byte(v))
|
||||
err = backend.Set(c.Context, n, c.String("attribute"), []byte(v))
|
||||
if err != nil {
|
||||
fmt.Println("Error setting attribute")
|
||||
return err
|
||||
@@ -332,27 +332,15 @@ func getBackend(c *cli.Context) (*lookup.Lookup, metadata.Backend) {
|
||||
return lu, backend
|
||||
}
|
||||
|
||||
func getPath(c *cli.Context, lu *lookup.Lookup) (string, error) {
|
||||
func getNode(c *cli.Context, lu *lookup.Lookup) (*node.Node, error) {
|
||||
nodeFlag := c.String("node")
|
||||
|
||||
path := ""
|
||||
if strings.HasPrefix(nodeFlag, "/") {
|
||||
path = nodeFlag
|
||||
} else {
|
||||
nId := c.String("node")
|
||||
id, err := storagespace.ParseID(nId)
|
||||
if err != nil {
|
||||
fmt.Println("Invalid node id.")
|
||||
return "", err
|
||||
}
|
||||
n, err := lu.NodeFromID(context.Background(), &id)
|
||||
if err != nil || !n.Exists {
|
||||
fmt.Println("Can not find node '" + nId + "'")
|
||||
return "", err
|
||||
}
|
||||
path = n.InternalPath()
|
||||
id, err := storagespace.ParseID(nodeFlag)
|
||||
if err != nil {
|
||||
fmt.Println("Invalid node id.")
|
||||
return nil, err
|
||||
}
|
||||
return path, nil
|
||||
return lu.NodeFromID(context.Background(), &id)
|
||||
}
|
||||
|
||||
func printAttribs(attribs map[string][]byte, onlyAttribute string) {
|
||||
|
||||
@@ -1,571 +0,0 @@
|
||||
package command
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
collaboration "github.com/cs3org/go-cs3apis/cs3/sharing/collaboration/v1beta1"
|
||||
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
tw "github.com/olekukonko/tablewriter"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/publicshare"
|
||||
publicregistry "github.com/opencloud-eu/reva/v2/pkg/publicshare/manager/registry"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/rgrpc/todo/pool"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/share"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/share/manager/jsoncs3"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/share/manager/jsoncs3/providercache"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/share/manager/jsoncs3/shareid"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/share/manager/registry"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/timemanager"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/lookup"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/migrator"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/options"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/utils/metadata"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rs/zerolog"
|
||||
|
||||
"github.com/opencloud-eu/opencloud/opencloud/pkg/register"
|
||||
"github.com/opencloud-eu/opencloud/pkg/config"
|
||||
"github.com/opencloud-eu/opencloud/pkg/config/configlog"
|
||||
"github.com/opencloud-eu/opencloud/pkg/config/parser"
|
||||
oclog "github.com/opencloud-eu/opencloud/pkg/log"
|
||||
mregistry "github.com/opencloud-eu/opencloud/pkg/registry"
|
||||
sharing "github.com/opencloud-eu/opencloud/services/sharing/pkg/config"
|
||||
sharingparser "github.com/opencloud-eu/opencloud/services/sharing/pkg/config/parser"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
// Migrate is the entrypoint for the Migrate command.
|
||||
func Migrate(cfg *config.Config) *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "migrate",
|
||||
Usage: "migrate data from an existing to another instance",
|
||||
Category: "migration",
|
||||
Subcommands: []*cli.Command{
|
||||
MigrateDecomposedfs(cfg),
|
||||
MigrateShares(cfg),
|
||||
MigratePublicShares(cfg),
|
||||
RebuildJSONCS3Indexes(cfg),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
register.AddCommand(Migrate)
|
||||
}
|
||||
|
||||
// RebuildJSONCS3Indexes rebuilds the share indexes from the shares json
|
||||
func RebuildJSONCS3Indexes(cfg *config.Config) *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "rebuild-jsoncs3-indexes",
|
||||
Usage: "rebuild the share indexes from the shares json",
|
||||
Subcommands: []*cli.Command{},
|
||||
Flags: []cli.Flag{},
|
||||
Before: func(c *cli.Context) error {
|
||||
// Parse base config
|
||||
if err := parser.ParseConfig(cfg, true); err != nil {
|
||||
return configlog.ReturnError(err)
|
||||
}
|
||||
|
||||
// Parse sharing config
|
||||
cfg.Sharing.Commons = cfg.Commons
|
||||
return configlog.ReturnError(sharingparser.ParseConfig(cfg.Sharing))
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
log := logger()
|
||||
ctx := log.WithContext(context.Background())
|
||||
rcfg := revaShareConfig(cfg.Sharing)
|
||||
|
||||
// Initialize registry to make service lookup work
|
||||
_ = mregistry.GetRegistry()
|
||||
|
||||
// Get a jsoncs3 manager to operate its caches
|
||||
type config struct {
|
||||
GatewayAddr string `mapstructure:"gateway_addr"`
|
||||
MaxConcurrency int `mapstructure:"max_concurrency"`
|
||||
ProviderAddr string `mapstructure:"provider_addr"`
|
||||
ServiceUserID string `mapstructure:"service_user_id"`
|
||||
ServiceUserIdp string `mapstructure:"service_user_idp"`
|
||||
MachineAuthAPIKey string `mapstructure:"machine_auth_apikey"`
|
||||
}
|
||||
conf := &config{}
|
||||
if err := mapstructure.Decode(rcfg["jsoncs3"], conf); err != nil {
|
||||
err = errors.Wrap(err, "error creating a new manager")
|
||||
return err
|
||||
}
|
||||
s, err := metadata.NewCS3Storage(conf.GatewayAddr, conf.ProviderAddr, conf.ServiceUserID, conf.ServiceUserIdp, conf.MachineAuthAPIKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = s.Init(ctx, "jsoncs3-share-manager-metadata")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
gatewaySelector, err := pool.GatewaySelector(conf.GatewayAddr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mgr, err := jsoncs3.New(s, gatewaySelector, 0, nil, 1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rebuild indexes
|
||||
errorsOccured := false
|
||||
storages, err := s.ReadDir(ctx, "storages")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for iStorage, storage := range storages {
|
||||
fmt.Printf("Scanning storage %s (%d/%d)\n", storage, iStorage+1, len(storages))
|
||||
spaces, err := s.ReadDir(ctx, filepath.Join("storages", storage))
|
||||
if err != nil {
|
||||
fmt.Printf("failed! (%s)\n", err.Error())
|
||||
errorsOccured = true
|
||||
continue
|
||||
}
|
||||
|
||||
for iSpace, space := range spaces {
|
||||
fmt.Printf(" Rebuilding space '%s' %d/%d...", strings.TrimSuffix(space, ".json"), iSpace+1, len(spaces))
|
||||
|
||||
spaceBlob, err := s.SimpleDownload(ctx, filepath.Join("storages", storage, space))
|
||||
if err != nil {
|
||||
fmt.Printf(" failed! (%s)\n", err.Error())
|
||||
errorsOccured = true
|
||||
continue
|
||||
}
|
||||
shares := &providercache.Shares{}
|
||||
err = json.Unmarshal(spaceBlob, shares)
|
||||
if err != nil {
|
||||
fmt.Printf(" failed! (%s)\n", err.Error())
|
||||
errorsOccured = true
|
||||
continue
|
||||
}
|
||||
for _, share := range shares.Shares {
|
||||
err = mgr.Cache.Add(ctx, share.ResourceId.StorageId, share.ResourceId.SpaceId, share.Id.OpaqueId, share)
|
||||
if err != nil {
|
||||
fmt.Printf(" adding share '%s' to the cache failed! (%s)\n", share.Id.OpaqueId, err.Error())
|
||||
errorsOccured = true
|
||||
}
|
||||
err = mgr.CreatedCache.Add(ctx, share.Creator.OpaqueId, share.Id.OpaqueId)
|
||||
if err != nil {
|
||||
fmt.Printf(" adding share '%s' to the created cache failed! (%s)\n", share.Id.OpaqueId, err.Error())
|
||||
errorsOccured = true
|
||||
}
|
||||
|
||||
spaceId := share.ResourceId.StorageId + shareid.IDDelimiter + share.ResourceId.SpaceId
|
||||
switch share.Grantee.Type {
|
||||
case provider.GranteeType_GRANTEE_TYPE_USER:
|
||||
userid := share.Grantee.GetUserId().GetOpaqueId()
|
||||
existingState, err := mgr.UserReceivedStates.Get(ctx, userid, spaceId, share.Id.OpaqueId)
|
||||
if err != nil {
|
||||
fmt.Printf(" retrieving current state of received share '%s' from the user cache failed! (%s)\n", share.Id.OpaqueId, err.Error())
|
||||
errorsOccured = true
|
||||
} else if existingState == nil {
|
||||
rs := &collaboration.ReceivedShare{
|
||||
Share: share,
|
||||
State: collaboration.ShareState_SHARE_STATE_PENDING,
|
||||
}
|
||||
err := mgr.UserReceivedStates.Add(ctx, userid, spaceId, rs)
|
||||
if err != nil {
|
||||
fmt.Printf(" adding share '%s' to the user cache failed! (%s)\n", share.Id.OpaqueId, err.Error())
|
||||
errorsOccured = true
|
||||
}
|
||||
}
|
||||
case provider.GranteeType_GRANTEE_TYPE_GROUP:
|
||||
groupid := share.Grantee.GetGroupId().GetOpaqueId()
|
||||
err := mgr.GroupReceivedCache.Add(ctx, groupid, spaceId)
|
||||
if err != nil {
|
||||
fmt.Printf(" adding share '%s' to the group cache failed! (%s)\n", share.Id.OpaqueId, err.Error())
|
||||
errorsOccured = true
|
||||
}
|
||||
}
|
||||
}
|
||||
fmt.Printf(" done\n")
|
||||
}
|
||||
fmt.Printf("done\n")
|
||||
}
|
||||
if errorsOccured {
|
||||
return errors.New("There were errors. Please review the logs or try again.")
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// MigrateDecomposedfs is the entrypoint for the decomposedfs migrate command
|
||||
func MigrateDecomposedfs(cfg *config.Config) *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "decomposedfs",
|
||||
Usage: "run a decomposedfs migration",
|
||||
Subcommands: []*cli.Command{
|
||||
ListDecomposedfsMigrations(cfg),
|
||||
},
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "direction",
|
||||
Aliases: []string{"d"},
|
||||
Value: "migrate",
|
||||
Usage: "direction of the migration to run ('migrate' or 'rollback')",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "migration",
|
||||
Aliases: []string{"m"},
|
||||
Value: "",
|
||||
Usage: "ID of the migration to run",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "root",
|
||||
Aliases: []string{"r"},
|
||||
Required: true,
|
||||
Usage: "Path to the root directory of the decomposedfs",
|
||||
},
|
||||
},
|
||||
Before: func(c *cli.Context) error {
|
||||
// Parse base config
|
||||
if err := parser.ParseConfig(cfg, true); err != nil {
|
||||
return configlog.ReturnError(err)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
log := logger()
|
||||
rootFlag := c.String("root")
|
||||
bod := lookup.DetectBackendOnDisk(rootFlag)
|
||||
backend := backend(rootFlag, bod)
|
||||
lu := lookup.New(backend, &options.Options{
|
||||
Root: rootFlag,
|
||||
MetadataBackend: bod,
|
||||
}, &timemanager.Manager{})
|
||||
|
||||
m := migrator.New(lu, log)
|
||||
|
||||
err := m.RunMigration(c.String("migration"), c.String("direction") == "down")
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("failed")
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// ListDecomposedfsMigrations is the entrypoint for the decomposedfs list migrations command
|
||||
func ListDecomposedfsMigrations(cfg *config.Config) *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "list",
|
||||
Usage: "list decomposedfs migrations",
|
||||
Action: func(c *cli.Context) error {
|
||||
rootFlag := c.String("root")
|
||||
bod := lookup.DetectBackendOnDisk(rootFlag)
|
||||
backend := backend(rootFlag, bod)
|
||||
lu := lookup.New(backend, &options.Options{
|
||||
Root: rootFlag,
|
||||
MetadataBackend: bod,
|
||||
}, &timemanager.Manager{})
|
||||
|
||||
m := migrator.New(lu, logger())
|
||||
migrationStates, err := m.Migrations()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
migrations := []string{}
|
||||
for m := range migrationStates {
|
||||
migrations = append(migrations, m)
|
||||
}
|
||||
sort.Strings(migrations)
|
||||
|
||||
table := tw.NewWriter(os.Stdout)
|
||||
table.SetHeader([]string{"Migration", "State", "Message"})
|
||||
table.SetAutoFormatHeaders(false)
|
||||
for _, migration := range migrations {
|
||||
table.Append([]string{migration, migrationStates[migration].State, migrationStates[migration].Message})
|
||||
}
|
||||
table.Render()
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func MigrateShares(cfg *config.Config) *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "shares",
|
||||
Usage: "migrates shares from the previous to the new share manager",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "from",
|
||||
Value: "json",
|
||||
Usage: "Share manager to export the data from",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "to",
|
||||
Value: "jsoncs3",
|
||||
Usage: "Share manager to import the data into",
|
||||
},
|
||||
},
|
||||
Before: func(c *cli.Context) error {
|
||||
// Parse base config
|
||||
if err := parser.ParseConfig(cfg, true); err != nil {
|
||||
return configlog.ReturnError(err)
|
||||
}
|
||||
|
||||
// Parse sharing config
|
||||
cfg.Sharing.Commons = cfg.Commons
|
||||
return configlog.ReturnError(sharingparser.ParseConfig(cfg.Sharing))
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
log := logger()
|
||||
ctx := log.WithContext(context.Background())
|
||||
rcfg := revaShareConfig(cfg.Sharing)
|
||||
oldDriver := c.String("from")
|
||||
newDriver := c.String("to")
|
||||
shareChan := make(chan *collaboration.Share)
|
||||
receivedShareChan := make(chan share.ReceivedShareWithUser)
|
||||
|
||||
f, ok := registry.NewFuncs[oldDriver]
|
||||
if !ok {
|
||||
log.Error().Msg("Unknown share manager type '" + oldDriver + "'")
|
||||
os.Exit(1)
|
||||
}
|
||||
oldMgr, err := f(rcfg[oldDriver].(map[string]interface{}))
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("failed to initiate source share manager")
|
||||
os.Exit(1)
|
||||
}
|
||||
dumpMgr, ok := oldMgr.(share.DumpableManager)
|
||||
if !ok {
|
||||
log.Error().Msg("Share manager type '" + oldDriver + "' does not support dumping its shares.")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
f, ok = registry.NewFuncs[newDriver]
|
||||
if !ok {
|
||||
log.Error().Msg("Unknown share manager type '" + newDriver + "'")
|
||||
os.Exit(1)
|
||||
}
|
||||
newMgr, err := f(rcfg[newDriver].(map[string]interface{}))
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("failed to initiate destination share manager")
|
||||
os.Exit(1)
|
||||
}
|
||||
loadMgr, ok := newMgr.(share.LoadableManager)
|
||||
if !ok {
|
||||
log.Error().Msg("Share manager type '" + newDriver + "' does not support loading a shares dump.")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
go func() {
|
||||
log.Info().Msg("Migrating shares...")
|
||||
err = loadMgr.Load(ctx, shareChan, receivedShareChan)
|
||||
log.Info().Msg("Finished migrating shares.")
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Error while loading shares")
|
||||
os.Exit(1)
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
go func() {
|
||||
err = dumpMgr.Dump(ctx, shareChan, receivedShareChan)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Error while dumping shares")
|
||||
os.Exit(1)
|
||||
}
|
||||
close(shareChan)
|
||||
close(receivedShareChan)
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Wait()
|
||||
return nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func MigratePublicShares(cfg *config.Config) *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "publicshares",
|
||||
Usage: "migrates public shares from the previous to the new public share manager",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "from",
|
||||
Value: "json",
|
||||
Usage: "Public share manager to export the data from",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "to",
|
||||
Value: "jsoncs3",
|
||||
Usage: "Public share manager to import the data into",
|
||||
},
|
||||
},
|
||||
Before: func(c *cli.Context) error {
|
||||
// Parse base config
|
||||
if err := parser.ParseConfig(cfg, true); err != nil {
|
||||
return configlog.ReturnError(err)
|
||||
}
|
||||
|
||||
// Parse sharing config
|
||||
cfg.Sharing.Commons = cfg.Commons
|
||||
return configlog.ReturnError(sharingparser.ParseConfig(cfg.Sharing))
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
log := logger()
|
||||
ctx := log.WithContext(context.Background())
|
||||
|
||||
rcfg := revaPublicShareConfig(cfg.Sharing)
|
||||
oldDriver := c.String("from")
|
||||
newDriver := c.String("to")
|
||||
shareChan := make(chan *publicshare.WithPassword)
|
||||
|
||||
f, ok := publicregistry.NewFuncs[oldDriver]
|
||||
if !ok {
|
||||
log.Error().Msg("Unknown public share manager type '" + oldDriver + "'")
|
||||
os.Exit(1)
|
||||
}
|
||||
oldMgr, err := f(rcfg[oldDriver].(map[string]interface{}))
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("failed to initiate source public share manager")
|
||||
os.Exit(1)
|
||||
}
|
||||
dumpMgr, ok := oldMgr.(publicshare.DumpableManager)
|
||||
if !ok {
|
||||
log.Error().Msg("Public share manager type '" + oldDriver + "' does not support dumping its public shares.")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
f, ok = publicregistry.NewFuncs[newDriver]
|
||||
if !ok {
|
||||
log.Error().Msg("Unknown public share manager type '" + newDriver + "'")
|
||||
os.Exit(1)
|
||||
}
|
||||
newMgr, err := f(rcfg[newDriver].(map[string]interface{}))
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("failed to initiate destination public share manager")
|
||||
os.Exit(1)
|
||||
}
|
||||
loadMgr, ok := newMgr.(publicshare.LoadableManager)
|
||||
if !ok {
|
||||
log.Error().Msg("Public share manager type '" + newDriver + "' does not support loading a public shares dump.")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
go func() {
|
||||
log.Info().Msg("Migrating public shares...")
|
||||
err = loadMgr.Load(ctx, shareChan)
|
||||
log.Info().Msg("Finished migrating public shares.")
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Error while loading public shares")
|
||||
os.Exit(1)
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
go func() {
|
||||
err = dumpMgr.Dump(ctx, shareChan)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Error while dumping public shares")
|
||||
os.Exit(1)
|
||||
}
|
||||
close(shareChan)
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Wait()
|
||||
return nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func revaShareConfig(cfg *sharing.Config) map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"json": map[string]interface{}{
|
||||
"file": cfg.UserSharingDrivers.JSON.File,
|
||||
"gateway_addr": cfg.Reva.Address,
|
||||
},
|
||||
"sql": map[string]interface{}{ // cernbox sql
|
||||
"db_username": cfg.UserSharingDrivers.SQL.DBUsername,
|
||||
"db_password": cfg.UserSharingDrivers.SQL.DBPassword,
|
||||
"db_host": cfg.UserSharingDrivers.SQL.DBHost,
|
||||
"db_port": cfg.UserSharingDrivers.SQL.DBPort,
|
||||
"db_name": cfg.UserSharingDrivers.SQL.DBName,
|
||||
"password_hash_cost": cfg.UserSharingDrivers.SQL.PasswordHashCost,
|
||||
"enable_expired_shares_cleanup": cfg.UserSharingDrivers.SQL.EnableExpiredSharesCleanup,
|
||||
"janitor_run_interval": cfg.UserSharingDrivers.SQL.JanitorRunInterval,
|
||||
},
|
||||
"owncloudsql": map[string]interface{}{
|
||||
"gateway_addr": cfg.Reva.Address,
|
||||
"storage_mount_id": cfg.UserSharingDrivers.OwnCloudSQL.UserStorageMountID,
|
||||
"db_username": cfg.UserSharingDrivers.OwnCloudSQL.DBUsername,
|
||||
"db_password": cfg.UserSharingDrivers.OwnCloudSQL.DBPassword,
|
||||
"db_host": cfg.UserSharingDrivers.OwnCloudSQL.DBHost,
|
||||
"db_port": cfg.UserSharingDrivers.OwnCloudSQL.DBPort,
|
||||
"db_name": cfg.UserSharingDrivers.OwnCloudSQL.DBName,
|
||||
},
|
||||
"cs3": map[string]interface{}{
|
||||
"gateway_addr": cfg.UserSharingDrivers.CS3.ProviderAddr,
|
||||
"provider_addr": cfg.UserSharingDrivers.CS3.ProviderAddr,
|
||||
"service_user_id": cfg.UserSharingDrivers.CS3.SystemUserID,
|
||||
"service_user_idp": cfg.UserSharingDrivers.CS3.SystemUserIDP,
|
||||
"machine_auth_apikey": cfg.UserSharingDrivers.CS3.SystemUserAPIKey,
|
||||
},
|
||||
"jsoncs3": map[string]interface{}{
|
||||
"gateway_addr": cfg.Reva.Address,
|
||||
"provider_addr": cfg.UserSharingDrivers.JSONCS3.ProviderAddr,
|
||||
"service_user_id": cfg.UserSharingDrivers.JSONCS3.SystemUserID,
|
||||
"service_user_idp": cfg.UserSharingDrivers.JSONCS3.SystemUserIDP,
|
||||
"machine_auth_apikey": cfg.UserSharingDrivers.JSONCS3.SystemUserAPIKey,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func revaPublicShareConfig(cfg *sharing.Config) map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"json": map[string]interface{}{
|
||||
"file": cfg.PublicSharingDrivers.JSON.File,
|
||||
"gateway_addr": cfg.Reva.Address,
|
||||
},
|
||||
"jsoncs3": map[string]interface{}{
|
||||
"gateway_addr": cfg.Reva.Address,
|
||||
"provider_addr": cfg.PublicSharingDrivers.JSONCS3.ProviderAddr,
|
||||
"service_user_id": cfg.PublicSharingDrivers.JSONCS3.SystemUserID,
|
||||
"service_user_idp": cfg.PublicSharingDrivers.JSONCS3.SystemUserIDP,
|
||||
"machine_auth_apikey": cfg.PublicSharingDrivers.JSONCS3.SystemUserAPIKey,
|
||||
},
|
||||
"sql": map[string]interface{}{
|
||||
"db_username": cfg.PublicSharingDrivers.SQL.DBUsername,
|
||||
"db_password": cfg.PublicSharingDrivers.SQL.DBPassword,
|
||||
"db_host": cfg.PublicSharingDrivers.SQL.DBHost,
|
||||
"db_port": cfg.PublicSharingDrivers.SQL.DBPort,
|
||||
"db_name": cfg.PublicSharingDrivers.SQL.DBName,
|
||||
"password_hash_cost": cfg.PublicSharingDrivers.SQL.PasswordHashCost,
|
||||
"enable_expired_shares_cleanup": cfg.PublicSharingDrivers.SQL.EnableExpiredSharesCleanup,
|
||||
"janitor_run_interval": cfg.PublicSharingDrivers.SQL.JanitorRunInterval,
|
||||
},
|
||||
"cs3": map[string]interface{}{
|
||||
"gateway_addr": cfg.PublicSharingDrivers.CS3.ProviderAddr,
|
||||
"provider_addr": cfg.PublicSharingDrivers.CS3.ProviderAddr,
|
||||
"service_user_id": cfg.PublicSharingDrivers.CS3.SystemUserID,
|
||||
"service_user_idp": cfg.PublicSharingDrivers.CS3.SystemUserIDP,
|
||||
"machine_auth_apikey": cfg.PublicSharingDrivers.CS3.SystemUserAPIKey,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func logger() *zerolog.Logger {
|
||||
log := oclog.NewLogger(
|
||||
oclog.Name("migrate"),
|
||||
oclog.Level("info"),
|
||||
oclog.Pretty(true),
|
||||
oclog.Color(true)).Logger
|
||||
return &log
|
||||
}
|
||||
@@ -15,7 +15,9 @@ import (
|
||||
"github.com/opencloud-eu/opencloud/pkg/config"
|
||||
"github.com/opencloud-eu/opencloud/pkg/config/configlog"
|
||||
"github.com/opencloud-eu/opencloud/pkg/config/parser"
|
||||
oclog "github.com/opencloud-eu/opencloud/pkg/log"
|
||||
mregistry "github.com/opencloud-eu/opencloud/pkg/registry"
|
||||
sharing "github.com/opencloud-eu/opencloud/services/sharing/pkg/config"
|
||||
sharingparser "github.com/opencloud-eu/opencloud/services/sharing/pkg/config/parser"
|
||||
)
|
||||
|
||||
@@ -126,3 +128,87 @@ func cleanup(c *cli.Context, cfg *config.Config) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func revaShareConfig(cfg *sharing.Config) map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"json": map[string]interface{}{
|
||||
"file": cfg.UserSharingDrivers.JSON.File,
|
||||
"gateway_addr": cfg.Reva.Address,
|
||||
},
|
||||
"sql": map[string]interface{}{ // cernbox sql
|
||||
"db_username": cfg.UserSharingDrivers.SQL.DBUsername,
|
||||
"db_password": cfg.UserSharingDrivers.SQL.DBPassword,
|
||||
"db_host": cfg.UserSharingDrivers.SQL.DBHost,
|
||||
"db_port": cfg.UserSharingDrivers.SQL.DBPort,
|
||||
"db_name": cfg.UserSharingDrivers.SQL.DBName,
|
||||
"password_hash_cost": cfg.UserSharingDrivers.SQL.PasswordHashCost,
|
||||
"enable_expired_shares_cleanup": cfg.UserSharingDrivers.SQL.EnableExpiredSharesCleanup,
|
||||
"janitor_run_interval": cfg.UserSharingDrivers.SQL.JanitorRunInterval,
|
||||
},
|
||||
"owncloudsql": map[string]interface{}{
|
||||
"gateway_addr": cfg.Reva.Address,
|
||||
"storage_mount_id": cfg.UserSharingDrivers.OwnCloudSQL.UserStorageMountID,
|
||||
"db_username": cfg.UserSharingDrivers.OwnCloudSQL.DBUsername,
|
||||
"db_password": cfg.UserSharingDrivers.OwnCloudSQL.DBPassword,
|
||||
"db_host": cfg.UserSharingDrivers.OwnCloudSQL.DBHost,
|
||||
"db_port": cfg.UserSharingDrivers.OwnCloudSQL.DBPort,
|
||||
"db_name": cfg.UserSharingDrivers.OwnCloudSQL.DBName,
|
||||
},
|
||||
"cs3": map[string]interface{}{
|
||||
"gateway_addr": cfg.UserSharingDrivers.CS3.ProviderAddr,
|
||||
"provider_addr": cfg.UserSharingDrivers.CS3.ProviderAddr,
|
||||
"service_user_id": cfg.UserSharingDrivers.CS3.SystemUserID,
|
||||
"service_user_idp": cfg.UserSharingDrivers.CS3.SystemUserIDP,
|
||||
"machine_auth_apikey": cfg.UserSharingDrivers.CS3.SystemUserAPIKey,
|
||||
},
|
||||
"jsoncs3": map[string]interface{}{
|
||||
"gateway_addr": cfg.Reva.Address,
|
||||
"provider_addr": cfg.UserSharingDrivers.JSONCS3.ProviderAddr,
|
||||
"service_user_id": cfg.UserSharingDrivers.JSONCS3.SystemUserID,
|
||||
"service_user_idp": cfg.UserSharingDrivers.JSONCS3.SystemUserIDP,
|
||||
"machine_auth_apikey": cfg.UserSharingDrivers.JSONCS3.SystemUserAPIKey,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func revaPublicShareConfig(cfg *sharing.Config) map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"json": map[string]interface{}{
|
||||
"file": cfg.PublicSharingDrivers.JSON.File,
|
||||
"gateway_addr": cfg.Reva.Address,
|
||||
},
|
||||
"jsoncs3": map[string]interface{}{
|
||||
"gateway_addr": cfg.Reva.Address,
|
||||
"provider_addr": cfg.PublicSharingDrivers.JSONCS3.ProviderAddr,
|
||||
"service_user_id": cfg.PublicSharingDrivers.JSONCS3.SystemUserID,
|
||||
"service_user_idp": cfg.PublicSharingDrivers.JSONCS3.SystemUserIDP,
|
||||
"machine_auth_apikey": cfg.PublicSharingDrivers.JSONCS3.SystemUserAPIKey,
|
||||
},
|
||||
"sql": map[string]interface{}{
|
||||
"db_username": cfg.PublicSharingDrivers.SQL.DBUsername,
|
||||
"db_password": cfg.PublicSharingDrivers.SQL.DBPassword,
|
||||
"db_host": cfg.PublicSharingDrivers.SQL.DBHost,
|
||||
"db_port": cfg.PublicSharingDrivers.SQL.DBPort,
|
||||
"db_name": cfg.PublicSharingDrivers.SQL.DBName,
|
||||
"password_hash_cost": cfg.PublicSharingDrivers.SQL.PasswordHashCost,
|
||||
"enable_expired_shares_cleanup": cfg.PublicSharingDrivers.SQL.EnableExpiredSharesCleanup,
|
||||
"janitor_run_interval": cfg.PublicSharingDrivers.SQL.JanitorRunInterval,
|
||||
},
|
||||
"cs3": map[string]interface{}{
|
||||
"gateway_addr": cfg.PublicSharingDrivers.CS3.ProviderAddr,
|
||||
"provider_addr": cfg.PublicSharingDrivers.CS3.ProviderAddr,
|
||||
"service_user_id": cfg.PublicSharingDrivers.CS3.SystemUserID,
|
||||
"service_user_idp": cfg.PublicSharingDrivers.CS3.SystemUserIDP,
|
||||
"machine_auth_apikey": cfg.PublicSharingDrivers.CS3.SystemUserAPIKey,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func logger() *zerolog.Logger {
|
||||
log := oclog.NewLogger(
|
||||
oclog.Name("migrate"),
|
||||
oclog.Level("info"),
|
||||
oclog.Pretty(true),
|
||||
oclog.Color(true)).Logger
|
||||
return &log
|
||||
}
|
||||
|
||||
@@ -127,8 +127,10 @@ func (bs *Blobstore) List() ([]*node.Node, error) {
|
||||
_, s, _ := strings.Cut(d, "spaces")
|
||||
spaceraw, blobraw, _ := strings.Cut(s, "blobs")
|
||||
blobids = append(blobids, &node.Node{
|
||||
SpaceID: strings.ReplaceAll(spaceraw, "/", ""),
|
||||
BlobID: strings.ReplaceAll(blobraw, "/", ""),
|
||||
BaseNode: node.BaseNode{
|
||||
SpaceID: strings.ReplaceAll(spaceraw, "/", ""),
|
||||
},
|
||||
BlobID: strings.ReplaceAll(blobraw, "/", ""),
|
||||
})
|
||||
}
|
||||
return blobids, nil
|
||||
|
||||
@@ -141,8 +141,10 @@ func (bs *Blobstore) List() ([]*node.Node, error) {
|
||||
}
|
||||
spaceid, blobid, _ := strings.Cut(oi.Key, "/")
|
||||
ids = append(ids, &node.Node{
|
||||
SpaceID: strings.ReplaceAll(spaceid, "/", ""),
|
||||
BlobID: strings.ReplaceAll(blobid, "/", ""),
|
||||
BaseNode: node.BaseNode{
|
||||
SpaceID: strings.ReplaceAll(spaceid, "/", ""),
|
||||
},
|
||||
BlobID: strings.ReplaceAll(blobid, "/", ""),
|
||||
})
|
||||
}
|
||||
return ids, err
|
||||
|
||||
2
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/decomposed_s3/decomposed_s3.go
generated
vendored
2
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/decomposed_s3/decomposed_s3.go
generated
vendored
@@ -30,7 +30,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
registry.Register("decomposed-s3", New)
|
||||
registry.Register("decomposed_s3", New)
|
||||
}
|
||||
|
||||
// New returns an implementation to of the storage.FS interface that talk to
|
||||
|
||||
104
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/lookup/lookup.go
generated
vendored
104
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/lookup/lookup.go
generated
vendored
@@ -37,7 +37,6 @@ import (
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/usermapper"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/utils/templates"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storagespace"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rogpeppe/go-internal/lockedfile"
|
||||
"go.opentelemetry.io/otel"
|
||||
@@ -102,30 +101,28 @@ func (lu *Lookup) GetCachedID(ctx context.Context, spaceID, nodeID string) (stri
|
||||
return lu.IDCache.Get(ctx, spaceID, nodeID)
|
||||
}
|
||||
|
||||
// IDsForPath returns the space and opaque id for the given path
|
||||
func (lu *Lookup) IDsForPath(ctx context.Context, path string) (string, string, error) {
|
||||
// IDsForPath returns the space and opaque id for the given path
|
||||
spaceID, nodeID, ok := lu.IDCache.GetByPath(ctx, path)
|
||||
if !ok {
|
||||
return "", "", fmt.Errorf("path %s not found in cache", path)
|
||||
return "", "", errtypes.NotFound("path not found in cache:" + path)
|
||||
}
|
||||
return spaceID, nodeID, nil
|
||||
}
|
||||
|
||||
// NodeFromPath returns the node for the given path
|
||||
func (lu *Lookup) NodeIDFromParentAndName(ctx context.Context, parent *node.Node, name string) (string, error) {
|
||||
id, err := lu.metadataBackend.Get(ctx, filepath.Join(parent.InternalPath(), name), prefixes.IDAttr)
|
||||
if err != nil {
|
||||
if metadata.IsNotExist(err) {
|
||||
return "", errtypes.NotFound(name)
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
_, err = os.Stat(filepath.Join(parent.InternalPath(), name))
|
||||
if err != nil {
|
||||
return "", err
|
||||
parentPath, ok := lu.GetCachedID(ctx, parent.SpaceID, parent.ID)
|
||||
if !ok {
|
||||
return "", errtypes.NotFound(parent.ID)
|
||||
}
|
||||
|
||||
return string(id), nil
|
||||
childPath := filepath.Join(parentPath, name)
|
||||
_, childID, err := lu.IDsForPath(ctx, childPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return childID, nil
|
||||
}
|
||||
|
||||
// MetadataBackend returns the metadata backend
|
||||
@@ -133,46 +130,14 @@ func (lu *Lookup) MetadataBackend() metadata.Backend {
|
||||
return lu.metadataBackend
|
||||
}
|
||||
|
||||
func (lu *Lookup) ReadBlobIDAndSizeAttr(ctx context.Context, path string, _ node.Attributes) (string, int64, error) {
|
||||
fi, err := os.Stat(path)
|
||||
func (lu *Lookup) ReadBlobIDAndSizeAttr(ctx context.Context, n metadata.MetadataNode, _ node.Attributes) (string, int64, error) {
|
||||
fi, err := os.Stat(n.InternalPath())
|
||||
if err != nil {
|
||||
return "", 0, errors.Wrap(err, "error stating file")
|
||||
}
|
||||
return "", fi.Size(), nil
|
||||
}
|
||||
|
||||
// TypeFromPath returns the type of the node at the given path
|
||||
func (lu *Lookup) TypeFromPath(ctx context.Context, path string) provider.ResourceType {
|
||||
// Try to read from xattrs
|
||||
typeAttr, err := lu.metadataBackend.GetInt64(ctx, path, prefixes.TypeAttr)
|
||||
if err == nil {
|
||||
return provider.ResourceType(int32(typeAttr))
|
||||
}
|
||||
|
||||
t := provider.ResourceType_RESOURCE_TYPE_INVALID
|
||||
// Fall back to checking on disk
|
||||
fi, err := os.Lstat(path)
|
||||
if err != nil {
|
||||
return t
|
||||
}
|
||||
|
||||
switch {
|
||||
case fi.IsDir():
|
||||
if _, err = lu.metadataBackend.Get(ctx, path, prefixes.ReferenceAttr); err == nil {
|
||||
t = provider.ResourceType_RESOURCE_TYPE_REFERENCE
|
||||
} else {
|
||||
t = provider.ResourceType_RESOURCE_TYPE_CONTAINER
|
||||
}
|
||||
case fi.Mode().IsRegular():
|
||||
t = provider.ResourceType_RESOURCE_TYPE_FILE
|
||||
case fi.Mode()&os.ModeSymlink != 0:
|
||||
t = provider.ResourceType_RESOURCE_TYPE_SYMLINK
|
||||
// TODO reference using ext attr on a symlink
|
||||
// nodeType = provider.ResourceType_RESOURCE_TYPE_REFERENCE
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// NodeFromResource takes in a request path or request id and converts it to a Node
|
||||
func (lu *Lookup) NodeFromResource(ctx context.Context, ref *provider.Reference) (*node.Node, error) {
|
||||
ctx, span := tracer.Start(ctx, "NodeFromResource")
|
||||
@@ -320,6 +285,12 @@ func (lu *Lookup) InternalPath(spaceID, nodeID string) string {
|
||||
return ""
|
||||
}
|
||||
return filepath.Join(spaceRoot, RevisionsDir, Pathify(nodeID, 4, 2))
|
||||
} else if strings.HasSuffix(nodeID, node.CurrentIDDelimiter) {
|
||||
spaceRoot, _ := lu.IDCache.Get(context.Background(), spaceID, spaceID)
|
||||
if len(spaceRoot) == 0 {
|
||||
return ""
|
||||
}
|
||||
filepath.Join(spaceRoot, RevisionsDir, Pathify(nodeID, 4, 2)+_currentSuffix)
|
||||
}
|
||||
|
||||
path, _ := lu.IDCache.Get(context.Background(), spaceID, nodeID)
|
||||
@@ -347,12 +318,6 @@ func (lu *Lookup) CurrentPath(spaceID, nodeID string) string {
|
||||
return filepath.Join(spaceRoot, RevisionsDir, Pathify(nodeID, 4, 2)+_currentSuffix)
|
||||
}
|
||||
|
||||
// // ReferenceFromAttr returns a CS3 reference from xattr of a node.
|
||||
// // Supported formats are: "cs3:storageid/nodeid"
|
||||
// func ReferenceFromAttr(b []byte) (*provider.Reference, error) {
|
||||
// return refFromCS3(b)
|
||||
// }
|
||||
|
||||
// refFromCS3 creates a CS3 reference from a set of bytes. This method should remain private
|
||||
// and only be called after validation because it can potentially panic.
|
||||
func refFromCS3(b []byte) (*provider.Reference, error) {
|
||||
@@ -369,7 +334,7 @@ func refFromCS3(b []byte) (*provider.Reference, error) {
|
||||
// The optional filter function can be used to filter by attribute name, e.g. by checking a prefix
|
||||
// For the source file, a shared lock is acquired.
|
||||
// NOTE: target resource will be write locked!
|
||||
func (lu *Lookup) CopyMetadata(ctx context.Context, src, target string, filter func(attributeName string, value []byte) (newValue []byte, copy bool), acquireTargetLock bool) (err error) {
|
||||
func (lu *Lookup) CopyMetadata(ctx context.Context, src, target metadata.MetadataNode, filter func(attributeName string, value []byte) (newValue []byte, copy bool), acquireTargetLock bool) (err error) {
|
||||
// Acquire a read log on the source node
|
||||
// write lock existing node before reading treesize or tree time
|
||||
lock, err := lockedfile.OpenFile(lu.MetadataBackend().LockfilePath(src), os.O_RDONLY|os.O_CREATE, 0600)
|
||||
@@ -396,15 +361,15 @@ func (lu *Lookup) CopyMetadata(ctx context.Context, src, target string, filter f
|
||||
// The optional filter function can be used to filter by attribute name, e.g. by checking a prefix
|
||||
// For the source file, a matching lockedfile is required.
|
||||
// NOTE: target resource will be write locked!
|
||||
func (lu *Lookup) CopyMetadataWithSourceLock(ctx context.Context, sourcePath, targetPath string, filter func(attributeName string, value []byte) (newValue []byte, copy bool), lockedSource *lockedfile.File, acquireTargetLock bool) (err error) {
|
||||
func (lu *Lookup) CopyMetadataWithSourceLock(ctx context.Context, src, target metadata.MetadataNode, filter func(attributeName string, value []byte) (newValue []byte, copy bool), lockedSource *lockedfile.File, acquireTargetLock bool) (err error) {
|
||||
switch {
|
||||
case lockedSource == nil:
|
||||
return errors.New("no lock provided")
|
||||
case lockedSource.File.Name() != lu.MetadataBackend().LockfilePath(sourcePath):
|
||||
case lockedSource.File.Name() != lu.MetadataBackend().LockfilePath(src):
|
||||
return errors.New("lockpath does not match filepath")
|
||||
}
|
||||
|
||||
attrs, err := lu.metadataBackend.All(ctx, sourcePath)
|
||||
attrs, err := lu.metadataBackend.All(ctx, src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -420,7 +385,7 @@ func (lu *Lookup) CopyMetadataWithSourceLock(ctx context.Context, sourcePath, ta
|
||||
newAttrs[attrName] = val
|
||||
}
|
||||
|
||||
return lu.MetadataBackend().SetMultiple(ctx, targetPath, newAttrs, acquireTargetLock)
|
||||
return lu.MetadataBackend().SetMultiple(ctx, target, newAttrs, acquireTargetLock)
|
||||
}
|
||||
|
||||
// GenerateSpaceID generates a space id for the given space type and owner
|
||||
@@ -429,21 +394,20 @@ func (lu *Lookup) GenerateSpaceID(spaceType string, owner *user.User) (string, e
|
||||
case _spaceTypeProject:
|
||||
return uuid.New().String(), nil
|
||||
case _spaceTypePersonal:
|
||||
path := templates.WithUser(owner, lu.Options.UserLayout)
|
||||
path := templates.WithUser(owner, lu.Options.PersonalSpacePathTemplate)
|
||||
|
||||
spaceID, err := lu.metadataBackend.Get(context.Background(), filepath.Join(lu.Options.Root, path), prefixes.IDAttr)
|
||||
spaceID, _, err := lu.IDsForPath(context.TODO(), filepath.Join(lu.Options.Root, path))
|
||||
if err != nil {
|
||||
if metadata.IsNotExist(err) || metadata.IsAttrUnset(err) {
|
||||
return uuid.New().String(), nil
|
||||
} else {
|
||||
return "", err
|
||||
_, err := os.Stat(filepath.Join(lu.Options.Root, path))
|
||||
if err != nil {
|
||||
if metadata.IsNotExist(err) || metadata.IsAttrUnset(err) {
|
||||
return uuid.New().String(), nil
|
||||
} else {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
}
|
||||
resID, err := storagespace.ParseID(string(spaceID))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return resID.SpaceId, nil
|
||||
return spaceID, nil
|
||||
default:
|
||||
return "", fmt.Errorf("unsupported space type: %s", spaceType)
|
||||
}
|
||||
|
||||
12
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/trashbin/trashbin.go
generated
vendored
12
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/trashbin/trashbin.go
generated
vendored
@@ -133,13 +133,12 @@ func (tb *Trashbin) MoveToTrash(ctx context.Context, n *node.Node, path string)
|
||||
if err = tb.lu.IDCache.DeleteByPath(ctx, path); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
itemTrashPath := filepath.Join(trashPath, "files", key+".trashitem")
|
||||
err = tb.lu.MetadataBackend().Rename(path, itemTrashPath)
|
||||
err = tb.lu.MetadataBackend().Purge(ctx, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
itemTrashPath := filepath.Join(trashPath, "files", key+".trashitem")
|
||||
return os.Rename(path, itemTrashPath)
|
||||
}
|
||||
|
||||
@@ -241,13 +240,13 @@ func (tb *Trashbin) RestoreRecycleItem(ctx context.Context, ref *provider.Refere
|
||||
}
|
||||
restorePath := filepath.Join(restoreBaseNode.InternalPath(), restoreRef.GetPath())
|
||||
|
||||
id, err := tb.lu.MetadataBackend().Get(ctx, trashPath, prefixes.IDAttr)
|
||||
spaceID, id, _, err := tb.lu.MetadataBackend().IdentifyPath(ctx, trashPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// update parent id in case it was restored to a different location
|
||||
parentID, err := tb.lu.MetadataBackend().Get(ctx, filepath.Dir(restorePath), prefixes.IDAttr)
|
||||
_, parentID, _, err := tb.lu.MetadataBackend().IdentifyPath(ctx, filepath.Dir(restorePath))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -255,7 +254,8 @@ func (tb *Trashbin) RestoreRecycleItem(ctx context.Context, ref *provider.Refere
|
||||
return fmt.Errorf("trashbin: parent id not found for %s", restorePath)
|
||||
}
|
||||
|
||||
err = tb.lu.MetadataBackend().Set(ctx, trashPath, prefixes.ParentidAttr, parentID)
|
||||
trashNode := node.NewBaseNode(spaceID, id, tb.lu)
|
||||
err = tb.lu.MetadataBackend().Set(ctx, trashNode, prefixes.ParentidAttr, []byte(parentID))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
249
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/assimilation.go
generated
vendored
249
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/assimilation.go
generated
vendored
@@ -33,6 +33,7 @@ import (
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pkg/xattr"
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
userv1beta1 "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1"
|
||||
@@ -71,6 +72,24 @@ type queueItem struct {
|
||||
|
||||
const dirtyFlag = "user.oc.dirty"
|
||||
|
||||
type assimilationNode struct {
|
||||
path string
|
||||
nodeId string
|
||||
spaceID string
|
||||
}
|
||||
|
||||
func (d assimilationNode) GetID() string {
|
||||
return d.nodeId
|
||||
}
|
||||
|
||||
func (d assimilationNode) GetSpaceID() string {
|
||||
return d.spaceID
|
||||
}
|
||||
|
||||
func (d assimilationNode) InternalPath() string {
|
||||
return d.path
|
||||
}
|
||||
|
||||
// NewScanDebouncer returns a new SpaceDebouncer instance
|
||||
func NewScanDebouncer(d time.Duration, f func(item scanItem)) *ScanDebouncer {
|
||||
return &ScanDebouncer{
|
||||
@@ -254,35 +273,34 @@ func (t *Tree) Scan(path string, action EventAction, isDir bool) error {
|
||||
}
|
||||
|
||||
func (t *Tree) HandleFileDelete(path string) error {
|
||||
// purge metadata
|
||||
if err := t.lookup.(*lookup.Lookup).IDCache.DeleteByPath(context.Background(), path); err != nil {
|
||||
t.log.Error().Err(err).Str("path", path).Msg("could not delete id cache entry by path")
|
||||
}
|
||||
if err := t.lookup.MetadataBackend().Purge(context.Background(), path); err != nil {
|
||||
t.log.Error().Err(err).Str("path", path).Msg("could not purge metadata")
|
||||
}
|
||||
|
||||
// send event
|
||||
owner, spaceID, nodeID, parentID, err := t.getOwnerAndIDs(filepath.Dir(path))
|
||||
n, err := t.getNodeForPath(filepath.Dir(path))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// purge metadata
|
||||
if err := t.lookup.(*lookup.Lookup).IDCache.DeleteByPath(context.Background(), path); err != nil {
|
||||
t.log.Error().Err(err).Str("path", path).Msg("could not delete id cache entry by path")
|
||||
}
|
||||
if err := t.lookup.MetadataBackend().Purge(context.Background(), n); err != nil {
|
||||
t.log.Error().Err(err).Str("path", path).Msg("could not purge metadata")
|
||||
}
|
||||
|
||||
t.PublishEvent(events.ItemTrashed{
|
||||
Owner: owner,
|
||||
Executant: owner,
|
||||
Owner: n.Owner(),
|
||||
Executant: n.Owner(),
|
||||
Ref: &provider.Reference{
|
||||
ResourceId: &provider.ResourceId{
|
||||
StorageId: t.options.MountID,
|
||||
SpaceId: spaceID,
|
||||
OpaqueId: parentID,
|
||||
SpaceId: n.SpaceID,
|
||||
OpaqueId: n.ParentID,
|
||||
},
|
||||
Path: filepath.Base(path),
|
||||
},
|
||||
ID: &provider.ResourceId{
|
||||
StorageId: t.options.MountID,
|
||||
SpaceId: spaceID,
|
||||
OpaqueId: nodeID,
|
||||
SpaceId: n.SpaceID,
|
||||
OpaqueId: n.ID,
|
||||
},
|
||||
Timestamp: utils.TSNow(),
|
||||
})
|
||||
@@ -290,37 +308,15 @@ func (t *Tree) HandleFileDelete(path string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Tree) getOwnerAndIDs(path string) (*userv1beta1.UserId, string, string, string, error) {
|
||||
func (t *Tree) getNodeForPath(path string) (*node.Node, error) {
|
||||
lu := t.lookup.(*lookup.Lookup)
|
||||
|
||||
spaceID, nodeID, err := lu.IDsForPath(context.Background(), path)
|
||||
if err != nil {
|
||||
return nil, "", "", "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
attrs, err := t.lookup.MetadataBackend().All(context.Background(), path)
|
||||
if err != nil {
|
||||
return nil, "", "", "", err
|
||||
}
|
||||
|
||||
parentID := string(attrs[prefixes.ParentidAttr])
|
||||
|
||||
spacePath, ok := lu.GetCachedID(context.Background(), spaceID, spaceID)
|
||||
if !ok {
|
||||
return nil, "", "", "", fmt.Errorf("could not find space root for path %s", path)
|
||||
}
|
||||
|
||||
spaceAttrs, err := t.lookup.MetadataBackend().All(context.Background(), spacePath)
|
||||
if err != nil {
|
||||
return nil, "", "", "", err
|
||||
}
|
||||
|
||||
owner := &userv1beta1.UserId{
|
||||
Idp: string(spaceAttrs[prefixes.OwnerIDPAttr]),
|
||||
OpaqueId: string(spaceAttrs[prefixes.OwnerIDAttr]),
|
||||
}
|
||||
|
||||
return owner, nodeID, spaceID, parentID, nil
|
||||
return node.ReadNode(context.Background(), lu, spaceID, nodeID, false, nil, false)
|
||||
}
|
||||
|
||||
func (t *Tree) findSpaceId(path string) (string, node.Attributes, error) {
|
||||
@@ -328,8 +324,7 @@ func (t *Tree) findSpaceId(path string) (string, node.Attributes, error) {
|
||||
spaceCandidate := path
|
||||
spaceAttrs := node.Attributes{}
|
||||
for strings.HasPrefix(spaceCandidate, t.options.Root) {
|
||||
spaceAttrs, err := t.lookup.MetadataBackend().All(context.Background(), spaceCandidate)
|
||||
spaceID := spaceAttrs[prefixes.SpaceIDAttr]
|
||||
spaceID, _, err := t.lookup.(*lookup.Lookup).IDsForPath(context.Background(), spaceCandidate)
|
||||
if err == nil && len(spaceID) > 0 {
|
||||
if t.options.UseSpaceGroups {
|
||||
// set the uid and gid for the space
|
||||
@@ -353,7 +348,6 @@ func (t *Tree) findSpaceId(path string) (string, node.Attributes, error) {
|
||||
}
|
||||
|
||||
func (t *Tree) assimilate(item scanItem) error {
|
||||
var id []byte
|
||||
var err error
|
||||
|
||||
// First find the space id
|
||||
@@ -362,8 +356,13 @@ func (t *Tree) assimilate(item scanItem) error {
|
||||
return err
|
||||
}
|
||||
|
||||
assimilationNode := &assimilationNode{
|
||||
spaceID: spaceID,
|
||||
path: item.Path,
|
||||
}
|
||||
|
||||
// lock the file for assimilation
|
||||
unlock, err := t.lookup.MetadataBackend().Lock(item.Path)
|
||||
unlock, err := t.lookup.MetadataBackend().Lock(assimilationNode)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to lock item for assimilation")
|
||||
}
|
||||
@@ -377,27 +376,28 @@ func (t *Tree) assimilate(item scanItem) error {
|
||||
}
|
||||
|
||||
// check for the id attribute again after grabbing the lock, maybe the file was assimilated/created by us in the meantime
|
||||
md, err := t.lookup.MetadataBackend().All(context.Background(), item.Path)
|
||||
_, id, mtime, err := t.lookup.MetadataBackend().IdentifyPath(context.Background(), item.Path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
attrs := node.Attributes(md)
|
||||
|
||||
// compare metadata mtime with actual mtime. if it matches we can skip the assimilation because the file was handled by us
|
||||
mtime, err := attrs.Time(prefixes.MTimeAttr)
|
||||
fi, err := os.Stat(item.Path)
|
||||
if err == nil {
|
||||
fi, err := os.Stat(item.Path)
|
||||
if err == nil {
|
||||
if mtime.Equal(fi.ModTime()) {
|
||||
return nil
|
||||
}
|
||||
// FIXME the mtime does not change on a move, so we have to compare ctime
|
||||
stat := fi.Sys().(*syscall.Stat_t)
|
||||
ctime := time.Unix(int64(stat.Ctim.Sec), int64(stat.Ctim.Nsec))
|
||||
if mtime.Equal(ctime) && !item.ForceRescan {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
id = attrs[prefixes.IDAttr]
|
||||
if err == nil {
|
||||
if id != "" {
|
||||
// the file has an id set, we already know it from the past
|
||||
n := node.NewBaseNode(spaceID, id, t.lookup)
|
||||
|
||||
previousPath, ok := t.lookup.(*lookup.Lookup).GetCachedID(context.Background(), spaceID, string(id))
|
||||
previousParentID, _ := t.lookup.MetadataBackend().Get(context.Background(), item.Path, prefixes.ParentidAttr)
|
||||
previousParentID, _ := t.lookup.MetadataBackend().Get(context.Background(), n, prefixes.ParentidAttr)
|
||||
|
||||
// was it moved or copied/restored with a clashing id?
|
||||
if ok && len(previousParentID) > 0 && previousPath != item.Path {
|
||||
@@ -406,7 +406,7 @@ func (t *Tree) assimilate(item scanItem) error {
|
||||
// this id clashes with an existing item -> clear metadata and re-assimilate
|
||||
t.log.Debug().Str("path", item.Path).Msg("ID clash detected, purging metadata and re-assimilating")
|
||||
|
||||
if err := t.lookup.MetadataBackend().Purge(context.Background(), item.Path); err != nil {
|
||||
if err := t.lookup.MetadataBackend().Purge(context.Background(), assimilationNode); err != nil {
|
||||
t.log.Error().Err(err).Str("path", item.Path).Msg("could not purge metadata")
|
||||
}
|
||||
go func() {
|
||||
@@ -421,23 +421,16 @@ func (t *Tree) assimilate(item scanItem) error {
|
||||
if err := t.lookup.(*lookup.Lookup).CacheID(context.Background(), spaceID, string(id), item.Path); err != nil {
|
||||
t.log.Error().Err(err).Str("spaceID", spaceID).Str("id", string(id)).Str("path", item.Path).Msg("could not cache id")
|
||||
}
|
||||
_, err := t.updateFile(item.Path, string(id), spaceID)
|
||||
_, attrs, err := t.updateFile(item.Path, string(id), spaceID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// purge original metadata. Only delete the path entry using DeletePath(reverse lookup), not the whole entry pair.
|
||||
// Delete the path entry using DeletePath(reverse lookup), not the whole entry pair.
|
||||
if err := t.lookup.(*lookup.Lookup).IDCache.DeletePath(context.Background(), previousPath); err != nil {
|
||||
t.log.Error().Err(err).Str("path", previousPath).Msg("could not delete id cache entry by path")
|
||||
}
|
||||
if err := t.lookup.MetadataBackend().Purge(context.Background(), previousPath); err != nil {
|
||||
t.log.Error().Err(err).Str("path", previousPath).Msg("could not purge metadata")
|
||||
}
|
||||
|
||||
fi, err := os.Stat(item.Path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if fi.IsDir() {
|
||||
// if it was moved and it is a directory we need to propagate the move
|
||||
go func() {
|
||||
@@ -447,13 +440,13 @@ func (t *Tree) assimilate(item scanItem) error {
|
||||
}()
|
||||
}
|
||||
|
||||
parentID, err := t.lookup.MetadataBackend().Get(context.Background(), item.Path, prefixes.ParentidAttr)
|
||||
if err == nil && len(parentID) > 0 {
|
||||
parentID := attrs.String(prefixes.ParentidAttr)
|
||||
if len(parentID) > 0 {
|
||||
ref := &provider.Reference{
|
||||
ResourceId: &provider.ResourceId{
|
||||
StorageId: t.options.MountID,
|
||||
SpaceId: spaceID,
|
||||
OpaqueId: string(parentID),
|
||||
OpaqueId: parentID,
|
||||
},
|
||||
Path: filepath.Base(item.Path),
|
||||
}
|
||||
@@ -482,7 +475,7 @@ func (t *Tree) assimilate(item scanItem) error {
|
||||
t.log.Error().Err(err).Str("spaceID", spaceID).Str("id", string(id)).Str("path", item.Path).Msg("could not cache id")
|
||||
}
|
||||
|
||||
_, err := t.updateFile(item.Path, string(id), spaceID)
|
||||
_, _, err := t.updateFile(item.Path, string(id), spaceID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -491,7 +484,7 @@ func (t *Tree) assimilate(item scanItem) error {
|
||||
t.log.Debug().Str("path", item.Path).Msg("new item detected")
|
||||
// assimilate new file
|
||||
newId := uuid.New().String()
|
||||
fi, err := t.updateFile(item.Path, newId, spaceID)
|
||||
fi, _, err := t.updateFile(item.Path, newId, spaceID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -531,26 +524,32 @@ func (t *Tree) assimilate(item scanItem) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Tree) updateFile(path, id, spaceID string) (fs.FileInfo, error) {
|
||||
func (t *Tree) updateFile(path, id, spaceID string) (fs.FileInfo, node.Attributes, error) {
|
||||
retries := 1
|
||||
parentID := ""
|
||||
bn := assimilationNode{spaceID: spaceID, nodeId: id, path: path}
|
||||
assimilate:
|
||||
if id != spaceID {
|
||||
// read parent
|
||||
parentAttribs, err := t.lookup.MetadataBackend().All(context.Background(), filepath.Dir(path))
|
||||
var err error
|
||||
_, parentID, err = t.lookup.(*lookup.Lookup).IDsForPath(context.Background(), filepath.Dir(path))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read parent item attributes")
|
||||
return nil, nil, fmt.Errorf("failed to read parent id")
|
||||
}
|
||||
parentAttribs, err := t.lookup.MetadataBackend().All(context.Background(), node.NewBaseNode(spaceID, parentID, t.lookup))
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to read parent item attributes")
|
||||
}
|
||||
|
||||
if len(parentAttribs) == 0 || len(parentAttribs[prefixes.IDAttr]) == 0 {
|
||||
if retries == 0 {
|
||||
return nil, fmt.Errorf("got empty parent attribs even after assimilating")
|
||||
return nil, nil, fmt.Errorf("got empty parent attribs even after assimilating")
|
||||
}
|
||||
|
||||
// assimilate parent first
|
||||
err = t.assimilate(scanItem{Path: filepath.Dir(path), ForceRescan: false})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// retry
|
||||
@@ -563,12 +562,12 @@ assimilate:
|
||||
// assimilate file
|
||||
fi, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to stat item")
|
||||
return nil, nil, errors.Wrap(err, "failed to stat item")
|
||||
}
|
||||
|
||||
attrs, err := t.lookup.MetadataBackend().All(context.Background(), path)
|
||||
attrs, err := t.lookup.MetadataBackend().All(context.Background(), bn)
|
||||
if err != nil && !metadata.IsAttrUnset(err) {
|
||||
return nil, errors.Wrap(err, "failed to get item attribs")
|
||||
return nil, nil, errors.Wrap(err, "failed to get item attribs")
|
||||
}
|
||||
previousAttribs := node.Attributes(attrs)
|
||||
|
||||
@@ -587,6 +586,7 @@ assimilate:
|
||||
attributes[prefixes.ChecksumPrefix+"adler32"] = adler32h.Sum(nil)
|
||||
}
|
||||
|
||||
var n *node.Node
|
||||
if fi.IsDir() {
|
||||
attributes.SetInt64(prefixes.TypeAttr, int64(provider.ResourceType_RESOURCE_TYPE_CONTAINER))
|
||||
attributes.SetInt64(prefixes.TreesizeAttr, 0)
|
||||
@@ -594,19 +594,26 @@ assimilate:
|
||||
attributes[prefixes.TreesizeAttr] = previousAttribs[prefixes.TreesizeAttr]
|
||||
}
|
||||
attributes[prefixes.PropagationAttr] = []byte("1")
|
||||
treeSize, err := attributes.Int64(prefixes.TreesizeAttr)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to parse treesize")
|
||||
}
|
||||
n = node.New(spaceID, id, parentID, filepath.Base(path), treeSize, "", provider.ResourceType_RESOURCE_TYPE_CONTAINER, nil, t.lookup)
|
||||
} else {
|
||||
attributes.SetString(prefixes.BlobIDAttr, uuid.NewString())
|
||||
blobID := uuid.NewString()
|
||||
attributes.SetString(prefixes.BlobIDAttr, blobID)
|
||||
attributes.SetInt64(prefixes.BlobsizeAttr, fi.Size())
|
||||
attributes.SetInt64(prefixes.TypeAttr, int64(provider.ResourceType_RESOURCE_TYPE_FILE))
|
||||
n = node.New(spaceID, id, parentID, filepath.Base(path), fi.Size(), blobID, provider.ResourceType_RESOURCE_TYPE_FILE, nil, t.lookup)
|
||||
}
|
||||
attributes.SetTime(prefixes.MTimeAttr, fi.ModTime())
|
||||
|
||||
n := node.New(spaceID, id, parentID, filepath.Base(path), fi.Size(), "", provider.ResourceType_RESOURCE_TYPE_FILE, nil, t.lookup)
|
||||
n.SpaceRoot = &node.Node{SpaceID: spaceID, ID: spaceID}
|
||||
n.SpaceRoot = &node.Node{BaseNode: node.BaseNode{SpaceID: spaceID, ID: spaceID}}
|
||||
|
||||
go func() {
|
||||
// Copy the previous current version to a revision
|
||||
currentPath := t.lookup.(*lookup.Lookup).CurrentPath(n.SpaceID, n.ID)
|
||||
currentNode := node.NewBaseNode(n.SpaceID, n.ID+node.CurrentIDDelimiter, t.lookup)
|
||||
currentPath := currentNode.InternalPath()
|
||||
stat, err := os.Stat(currentPath)
|
||||
if err != nil {
|
||||
t.log.Error().Err(err).Str("path", path).Str("currentPath", currentPath).Msg("could not stat current path")
|
||||
@@ -640,7 +647,7 @@ assimilate:
|
||||
return
|
||||
}
|
||||
|
||||
err = t.lookup.CopyMetadata(context.Background(), n.InternalPath(), currentPath, func(attributeName string, value []byte) (newValue []byte, copy bool) {
|
||||
err = t.lookup.CopyMetadata(context.Background(), n, currentNode, func(attributeName string, value []byte) (newValue []byte, copy bool) {
|
||||
return value, strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) ||
|
||||
attributeName == prefixes.TypeAttr ||
|
||||
attributeName == prefixes.BlobIDAttr ||
|
||||
@@ -654,26 +661,26 @@ assimilate:
|
||||
|
||||
err = t.Propagate(context.Background(), n, 0)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to propagate")
|
||||
return nil, nil, errors.Wrap(err, "failed to propagate")
|
||||
}
|
||||
|
||||
t.log.Debug().Str("path", path).Interface("attributes", attributes).Msg("setting attributes")
|
||||
err = t.lookup.MetadataBackend().SetMultiple(context.Background(), path, attributes, false)
|
||||
err = t.lookup.MetadataBackend().SetMultiple(context.Background(), bn, attributes, false)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to set attributes")
|
||||
return nil, nil, errors.Wrap(err, "failed to set attributes")
|
||||
}
|
||||
|
||||
if err := t.lookup.(*lookup.Lookup).CacheID(context.Background(), spaceID, id, path); err != nil {
|
||||
t.log.Error().Err(err).Str("spaceID", spaceID).Str("id", id).Str("path", path).Msg("could not cache id")
|
||||
}
|
||||
|
||||
return fi, nil
|
||||
return fi, attributes, nil
|
||||
}
|
||||
|
||||
// WarmupIDCache warms up the id cache
|
||||
func (t *Tree) WarmupIDCache(root string, assimilate, onlyDirty bool) error {
|
||||
root = filepath.Clean(root)
|
||||
spaceID := []byte("")
|
||||
spaceID := ""
|
||||
|
||||
scopeSpace := func(spaceCandidate string) error {
|
||||
if !t.options.UseSpaceGroups {
|
||||
@@ -723,9 +730,8 @@ func (t *Tree) WarmupIDCache(root string, assimilate, onlyDirty bool) error {
|
||||
sizes[path] += 0 // Make sure to set the size to 0 for empty directories
|
||||
}
|
||||
|
||||
attribs, err := t.lookup.MetadataBackend().All(context.Background(), path)
|
||||
if err == nil && len(attribs[prefixes.IDAttr]) > 0 {
|
||||
nodeSpaceID := attribs[prefixes.SpaceIDAttr]
|
||||
nodeSpaceID, id, _, err := t.lookup.MetadataBackend().IdentifyPath(context.Background(), path)
|
||||
if err == nil && len(id) > 0 {
|
||||
if len(nodeSpaceID) > 0 {
|
||||
spaceID = nodeSpaceID
|
||||
|
||||
@@ -737,7 +743,16 @@ func (t *Tree) WarmupIDCache(root string, assimilate, onlyDirty bool) error {
|
||||
// try to find space
|
||||
spaceCandidate := path
|
||||
for strings.HasPrefix(spaceCandidate, t.options.Root) {
|
||||
spaceID, err = t.lookup.MetadataBackend().Get(context.Background(), spaceCandidate, prefixes.SpaceIDAttr)
|
||||
spaceID, _, err = t.lookup.(*lookup.Lookup).IDsForPath(context.Background(), spaceCandidate)
|
||||
if err == nil && len(spaceID) > 0 {
|
||||
err = scopeSpace(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
spaceID, _, _, err = t.lookup.MetadataBackend().IdentifyPath(context.Background(), spaceCandidate)
|
||||
if err == nil {
|
||||
err = scopeSpace(path)
|
||||
if err != nil {
|
||||
@@ -752,20 +767,18 @@ func (t *Tree) WarmupIDCache(root string, assimilate, onlyDirty bool) error {
|
||||
return nil // no space found
|
||||
}
|
||||
|
||||
id, ok := attribs[prefixes.IDAttr]
|
||||
if ok {
|
||||
if id != "" {
|
||||
// Check if the item on the previous still exists. In this case it might have been a copy with extended attributes -> set new ID
|
||||
previousPath, ok := t.lookup.(*lookup.Lookup).GetCachedID(context.Background(), string(spaceID), string(id))
|
||||
previousPath, ok := t.lookup.(*lookup.Lookup).GetCachedID(context.Background(), spaceID, id)
|
||||
if ok && previousPath != path {
|
||||
// this id clashes with an existing id -> clear metadata and re-assimilate
|
||||
// this id clashes with an existing id -> re-assimilate
|
||||
_, err := os.Stat(previousPath)
|
||||
if err == nil {
|
||||
_ = t.lookup.MetadataBackend().Purge(context.Background(), path)
|
||||
_ = t.assimilate(scanItem{Path: path, ForceRescan: true})
|
||||
}
|
||||
}
|
||||
if err := t.lookup.(*lookup.Lookup).CacheID(context.Background(), string(spaceID), string(id), path); err != nil {
|
||||
t.log.Error().Err(err).Str("spaceID", string(spaceID)).Str("id", string(id)).Str("path", path).Msg("could not cache id")
|
||||
if err := t.lookup.(*lookup.Lookup).CacheID(context.Background(), spaceID, id, path); err != nil {
|
||||
t.log.Error().Err(err).Str("spaceID", spaceID).Str("id", id).Str("path", path).Msg("could not cache id")
|
||||
}
|
||||
}
|
||||
} else if assimilate {
|
||||
@@ -777,13 +790,23 @@ func (t *Tree) WarmupIDCache(root string, assimilate, onlyDirty bool) error {
|
||||
})
|
||||
|
||||
for dir, size := range sizes {
|
||||
spaceID, id, err := t.lookup.(*lookup.Lookup).IDsForPath(context.Background(), dir)
|
||||
if err != nil {
|
||||
t.log.Error().Err(err).Str("path", dir).Msg("could not get ids for path")
|
||||
continue
|
||||
}
|
||||
n, err := node.ReadNode(context.Background(), t.lookup, spaceID, id, true, nil, false)
|
||||
if err != nil {
|
||||
t.log.Error().Err(err).Str("path", dir).Msg("could not read directory node")
|
||||
continue
|
||||
}
|
||||
if dir == root {
|
||||
// Propagate the size diff further up the tree
|
||||
if err := t.propagateSizeDiff(dir, size); err != nil {
|
||||
if err := t.propagateSizeDiff(n, size); err != nil {
|
||||
t.log.Error().Err(err).Str("path", dir).Msg("could not propagate size diff")
|
||||
}
|
||||
}
|
||||
if err := t.lookup.MetadataBackend().Set(context.Background(), dir, prefixes.TreesizeAttr, []byte(fmt.Sprintf("%d", size))); err != nil {
|
||||
if err := t.lookup.MetadataBackend().Set(context.Background(), n, prefixes.TreesizeAttr, []byte(fmt.Sprintf("%d", size))); err != nil {
|
||||
t.log.Error().Err(err).Str("path", dir).Int64("size", size).Msg("could not set tree size")
|
||||
}
|
||||
}
|
||||
@@ -795,24 +818,12 @@ func (t *Tree) WarmupIDCache(root string, assimilate, onlyDirty bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Tree) propagateSizeDiff(dir string, size int64) error {
|
||||
// First find the space id
|
||||
spaceID, _, err := t.findSpaceId(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
attrs, err := t.lookup.MetadataBackend().All(context.Background(), dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n, err := t.lookup.NodeFromID(context.Background(), &provider.ResourceId{
|
||||
StorageId: t.options.MountID,
|
||||
SpaceId: spaceID,
|
||||
OpaqueId: string(attrs[prefixes.IDAttr]),
|
||||
})
|
||||
func (t *Tree) propagateSizeDiff(n *node.Node, size int64) error {
|
||||
attrs, err := t.lookup.MetadataBackend().All(context.Background(), n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
oldSize, err := node.Attributes(attrs).Int64(prefixes.TreesizeAttr)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -821,11 +832,11 @@ func (t *Tree) propagateSizeDiff(dir string, size int64) error {
|
||||
}
|
||||
|
||||
func (t *Tree) setDirty(path string, dirty bool) error {
|
||||
return t.lookup.MetadataBackend().Set(context.Background(), path, dirtyFlag, []byte(fmt.Sprintf("%t", dirty)))
|
||||
return xattr.Set(path, dirtyFlag, []byte(fmt.Sprintf("%t", dirty)))
|
||||
}
|
||||
|
||||
func (t *Tree) isDirty(path string) (bool, error) {
|
||||
dirtyAttr, err := t.lookup.MetadataBackend().Get(context.Background(), path, dirtyFlag)
|
||||
dirtyAttr, err := xattr.Get(path, dirtyFlag)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
28
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/revisions.go
generated
vendored
28
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/revisions.go
generated
vendored
@@ -33,7 +33,7 @@ import (
|
||||
|
||||
"github.com/opencloud-eu/reva/v2/pkg/appctx"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/errtypes"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/lookup"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storagespace"
|
||||
@@ -46,7 +46,8 @@ import (
|
||||
|
||||
// CreateRevision creates a new version of the node
|
||||
func (tp *Tree) CreateRevision(ctx context.Context, n *node.Node, version string, f *lockedfile.File) (string, error) {
|
||||
versionPath := tp.lookup.VersionPath(n.SpaceID, n.ID, version)
|
||||
revNode := node.NewBaseNode(n.SpaceID, n.ID+node.RevisionIDDelimiter+version, tp.lookup)
|
||||
versionPath := revNode.InternalPath()
|
||||
|
||||
err := os.MkdirAll(filepath.Dir(versionPath), 0700)
|
||||
if err != nil {
|
||||
@@ -69,7 +70,7 @@ func (tp *Tree) CreateRevision(ctx context.Context, n *node.Node, version string
|
||||
}
|
||||
|
||||
// copy blob metadata to version node
|
||||
if err := tp.lookup.CopyMetadataWithSourceLock(ctx, n.InternalPath(), versionPath, func(attributeName string, value []byte) (newValue []byte, copy bool) {
|
||||
if err := tp.lookup.CopyMetadataWithSourceLock(ctx, n, revNode, func(attributeName string, value []byte) (newValue []byte, copy bool) {
|
||||
return value, strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) ||
|
||||
attributeName == prefixes.TypeAttr ||
|
||||
attributeName == prefixes.BlobIDAttr ||
|
||||
@@ -125,7 +126,7 @@ func (tp *Tree) ListRevisions(ctx context.Context, ref *provider.Reference) (rev
|
||||
Key: n.ID + node.RevisionIDDelimiter + parts[1],
|
||||
Mtime: uint64(mtime.Unix()),
|
||||
}
|
||||
_, blobSize, err := tp.lookup.ReadBlobIDAndSizeAttr(ctx, items[i], nil)
|
||||
_, blobSize, err := tp.lookup.ReadBlobIDAndSizeAttr(ctx, node.NewBaseNode(n.SpaceID, rev.Key, tp.lookup), nil)
|
||||
if err != nil {
|
||||
appctx.GetLogger(ctx).Error().Err(err).Str("name", fi.Name()).Msg("error reading blobsize xattr, using 0")
|
||||
}
|
||||
@@ -187,9 +188,8 @@ func (tp *Tree) DownloadRevision(ctx context.Context, ref *provider.Reference, r
|
||||
return nil, nil, errtypes.NotFound(f)
|
||||
}
|
||||
|
||||
contentPath := tp.lookup.InternalPath(spaceID, revisionKey)
|
||||
|
||||
_, blobsize, err := tp.lookup.ReadBlobIDAndSizeAttr(ctx, contentPath, nil)
|
||||
revNode := node.NewBaseNode(spaceID, revisionKey, tp.lookup)
|
||||
_, blobsize, err := tp.lookup.ReadBlobIDAndSizeAttr(ctx, revNode, nil)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "Decomposedfs: could not read blob id and size for revision '%s' of node '%s'", kp[1], n.ID)
|
||||
}
|
||||
@@ -261,8 +261,9 @@ func (tp *Tree) getRevisionNode(ctx context.Context, ref *provider.Reference, re
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (tp *Tree) RestoreRevision(ctx context.Context, spaceID, nodeID, source string) error {
|
||||
target := tp.lookup.InternalPath(spaceID, nodeID)
|
||||
func (tp *Tree) RestoreRevision(ctx context.Context, srcNode, targetNode metadata.MetadataNode) error {
|
||||
source := srcNode.InternalPath()
|
||||
target := targetNode.InternalPath()
|
||||
rf, err := os.Open(source)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -280,7 +281,7 @@ func (tp *Tree) RestoreRevision(ctx context.Context, spaceID, nodeID, source str
|
||||
return err
|
||||
}
|
||||
|
||||
err = tp.lookup.CopyMetadata(ctx, source, target, func(attributeName string, value []byte) (newValue []byte, copy bool) {
|
||||
err = tp.lookup.CopyMetadata(ctx, srcNode, targetNode, func(attributeName string, value []byte) (newValue []byte, copy bool) {
|
||||
return value, strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) ||
|
||||
attributeName == prefixes.TypeAttr ||
|
||||
attributeName == prefixes.BlobIDAttr ||
|
||||
@@ -293,7 +294,7 @@ func (tp *Tree) RestoreRevision(ctx context.Context, spaceID, nodeID, source str
|
||||
// always set the node mtime to the current time
|
||||
mtime := time.Now()
|
||||
os.Chtimes(target, mtime, mtime)
|
||||
err = tp.lookup.MetadataBackend().SetMultiple(ctx, target,
|
||||
err = tp.lookup.MetadataBackend().SetMultiple(ctx, targetNode,
|
||||
map[string][]byte{
|
||||
prefixes.MTimeAttr: []byte(mtime.UTC().Format(time.RFC3339Nano)),
|
||||
},
|
||||
@@ -304,7 +305,8 @@ func (tp *Tree) RestoreRevision(ctx context.Context, spaceID, nodeID, source str
|
||||
|
||||
// update "current" revision
|
||||
if tp.options.EnableFSRevisions {
|
||||
currentPath := tp.lookup.(*lookup.Lookup).CurrentPath(spaceID, nodeID)
|
||||
currenNode := node.NewBaseNode(targetNode.GetSpaceID(), targetNode.GetID()+node.CurrentIDDelimiter, tp.lookup)
|
||||
currentPath := currenNode.InternalPath()
|
||||
w, err := os.OpenFile(currentPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0600)
|
||||
if err != nil {
|
||||
tp.log.Error().Err(err).Str("currentPath", currentPath).Str("source", source).Msg("could not open current path for writing")
|
||||
@@ -323,7 +325,7 @@ func (tp *Tree) RestoreRevision(ctx context.Context, spaceID, nodeID, source str
|
||||
tp.log.Error().Err(err).Str("currentPath", currentPath).Str("source", source).Msg("could not copy new version to current version")
|
||||
return err
|
||||
}
|
||||
err = tp.lookup.CopyMetadata(ctx, source, currentPath, func(attributeName string, value []byte) (newValue []byte, copy bool) {
|
||||
err = tp.lookup.CopyMetadata(ctx, srcNode, currenNode, func(attributeName string, value []byte) (newValue []byte, copy bool) {
|
||||
return value, strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) ||
|
||||
attributeName == prefixes.TypeAttr ||
|
||||
attributeName == prefixes.BlobIDAttr ||
|
||||
|
||||
245
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/tree.go
generated
vendored
245
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/tree.go
generated
vendored
@@ -206,7 +206,7 @@ func (t *Tree) TouchFile(ctx context.Context, n *node.Node, markprocessing bool,
|
||||
nodePath := filepath.Join(parentPath, n.Name)
|
||||
|
||||
// lock the meta file
|
||||
unlock, err := t.lookup.MetadataBackend().Lock(nodePath)
|
||||
unlock, err := t.lookup.MetadataBackend().Lock(n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -436,15 +436,13 @@ func (t *Tree) ListFolder(ctx context.Context, n *node.Node) ([]*node.Node, erro
|
||||
|
||||
for name := range work {
|
||||
path := filepath.Join(dir, name)
|
||||
nodeID, err := t.lookup.MetadataBackend().Get(ctx, path, prefixes.IDAttr)
|
||||
|
||||
_, nodeID, err := t.lookup.(*lookup.Lookup).IDsForPath(ctx, path)
|
||||
if err != nil {
|
||||
if metadata.IsAttrUnset(err) {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
child, err := node.ReadNode(ctx, t.lookup, n.SpaceID, string(nodeID), false, n.SpaceRoot, true)
|
||||
child, err := node.ReadNode(ctx, t.lookup, n.SpaceID, nodeID, false, n.SpaceRoot, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -526,159 +524,6 @@ func (t *Tree) Delete(ctx context.Context, n *node.Node) error {
|
||||
return t.Propagate(ctx, n, sizeDiff)
|
||||
}
|
||||
|
||||
// RestoreRecycleItemFunc returns a node and a function to restore it from the trash.
|
||||
func (t *Tree) RestoreRecycleItemFunc(ctx context.Context, spaceid, key, trashPath string, targetNode *node.Node) (*node.Node, *node.Node, func() error, error) {
|
||||
recycleNode, trashItem, deletedNodePath, origin, err := t.readRecycleItem(ctx, spaceid, key, trashPath)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
targetRef := &provider.Reference{
|
||||
ResourceId: &provider.ResourceId{SpaceId: spaceid, OpaqueId: spaceid},
|
||||
Path: utils.MakeRelativePath(origin),
|
||||
}
|
||||
|
||||
if targetNode == nil {
|
||||
targetNode, err = t.lookup.NodeFromResource(ctx, targetRef)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := targetNode.CheckLock(ctx); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
parent, err := targetNode.Parent(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
fn := func() error {
|
||||
if targetNode.Exists {
|
||||
return errtypes.AlreadyExists("origin already exists")
|
||||
}
|
||||
|
||||
// add the entry for the parent dir
|
||||
err = os.Symlink("../../../../../"+lookup.Pathify(recycleNode.ID, 4, 2), filepath.Join(targetNode.ParentPath(), targetNode.Name))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// rename to node only name, so it is picked up by id
|
||||
nodePath := recycleNode.InternalPath()
|
||||
|
||||
// attempt to rename only if we're not in a subfolder
|
||||
if deletedNodePath != nodePath {
|
||||
err = os.Rename(deletedNodePath, nodePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = t.lookup.MetadataBackend().Rename(deletedNodePath, nodePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
targetNode.Exists = true
|
||||
|
||||
attrs := node.Attributes{}
|
||||
attrs.SetString(prefixes.NameAttr, targetNode.Name)
|
||||
if trashPath != "" {
|
||||
// set ParentidAttr to restorePath's node parent id
|
||||
attrs.SetString(prefixes.ParentidAttr, targetNode.ParentID)
|
||||
}
|
||||
|
||||
if err = recycleNode.SetXattrsWithContext(ctx, attrs, true); err != nil {
|
||||
return errors.Wrap(err, "Decomposedfs: could not update recycle node")
|
||||
}
|
||||
|
||||
// delete item link in trash
|
||||
deletePath := trashItem
|
||||
if trashPath != "" && trashPath != "/" {
|
||||
resolvedTrashRoot, err := filepath.EvalSymlinks(trashItem)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Decomposedfs: could not resolve trash root")
|
||||
}
|
||||
deletePath = filepath.Join(resolvedTrashRoot, trashPath)
|
||||
}
|
||||
if err = os.Remove(deletePath); err != nil {
|
||||
log.Error().Err(err).Str("trashItem", trashItem).Msg("error deleting trash item")
|
||||
}
|
||||
|
||||
var sizeDiff int64
|
||||
if recycleNode.IsDir(ctx) {
|
||||
treeSize, err := recycleNode.GetTreeSize(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sizeDiff = int64(treeSize)
|
||||
} else {
|
||||
sizeDiff = recycleNode.Blobsize
|
||||
}
|
||||
return t.Propagate(ctx, targetNode, sizeDiff)
|
||||
}
|
||||
return recycleNode, parent, fn, nil
|
||||
}
|
||||
|
||||
// PurgeRecycleItemFunc returns a node and a function to purge it from the trash
|
||||
func (t *Tree) PurgeRecycleItemFunc(ctx context.Context, spaceid, key string, path string) (*node.Node, func() error, error) {
|
||||
rn, trashItem, deletedNodePath, _, err := t.readRecycleItem(ctx, spaceid, key, path)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
fn := func() error {
|
||||
if err := t.removeNode(ctx, deletedNodePath, rn); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// delete item link in trash
|
||||
deletePath := trashItem
|
||||
if path != "" && path != "/" {
|
||||
resolvedTrashRoot, err := filepath.EvalSymlinks(trashItem)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Decomposedfs: could not resolve trash root")
|
||||
}
|
||||
deletePath = filepath.Join(resolvedTrashRoot, path)
|
||||
}
|
||||
if err = os.Remove(deletePath); err != nil {
|
||||
log.Error().Err(err).Str("deletePath", deletePath).Msg("error deleting trash item")
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return rn, fn, nil
|
||||
}
|
||||
|
||||
func (t *Tree) removeNode(ctx context.Context, path string, n *node.Node) error {
|
||||
// delete the actual node
|
||||
if err := utils.RemoveItem(path); err != nil {
|
||||
log.Error().Err(err).Str("path", path).Msg("error purging node")
|
||||
return err
|
||||
}
|
||||
|
||||
if err := t.lookup.MetadataBackend().Purge(ctx, path); err != nil {
|
||||
log.Error().Err(err).Str("path", t.lookup.MetadataBackend().MetadataPath(path)).Msg("error purging node metadata")
|
||||
return err
|
||||
}
|
||||
|
||||
// delete blob from blobstore
|
||||
if n.BlobID != "" {
|
||||
if err := t.DeleteBlob(n); err != nil {
|
||||
log.Error().Err(err).Str("blobID", n.BlobID).Msg("error purging nodes blob")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// delete revisions
|
||||
// posixfs doesn't do revisions yet
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Propagate propagates changes to the root of the tree
|
||||
func (t *Tree) Propagate(ctx context.Context, n *node.Node, sizeDiff int64) (err error) {
|
||||
// We do not propagate size diffs here but rely on the assimilation to take care of the tree sizes instead
|
||||
@@ -686,15 +531,15 @@ func (t *Tree) Propagate(ctx context.Context, n *node.Node, sizeDiff int64) (err
|
||||
}
|
||||
|
||||
// WriteBlob writes a blob to the blobstore
|
||||
func (t *Tree) WriteBlob(node *node.Node, source string) error {
|
||||
func (t *Tree) WriteBlob(n *node.Node, source string) error {
|
||||
var currentPath string
|
||||
var err error
|
||||
|
||||
if t.options.EnableFSRevisions {
|
||||
currentPath = t.lookup.(*lookup.Lookup).CurrentPath(node.SpaceID, node.ID)
|
||||
currentPath = t.lookup.(*lookup.Lookup).CurrentPath(n.SpaceID, n.ID)
|
||||
|
||||
defer func() {
|
||||
_ = t.lookup.CopyMetadata(context.Background(), node.InternalPath(), currentPath, func(attributeName string, value []byte) (newValue []byte, copy bool) {
|
||||
_ = t.lookup.CopyMetadata(context.Background(), n, node.NewBaseNode(n.SpaceID, n.ID+node.CurrentIDDelimiter, t.lookup), func(attributeName string, value []byte) (newValue []byte, copy bool) {
|
||||
return value, strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) ||
|
||||
attributeName == prefixes.TypeAttr ||
|
||||
attributeName == prefixes.BlobIDAttr ||
|
||||
@@ -703,7 +548,7 @@ func (t *Tree) WriteBlob(node *node.Node, source string) error {
|
||||
}()
|
||||
}
|
||||
|
||||
err = t.blobstore.Upload(node, source, currentPath)
|
||||
err = t.blobstore.Upload(n, source, currentPath)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -740,7 +585,7 @@ func (t *Tree) InitNewNode(ctx context.Context, n *node.Node, fsize uint64) (met
|
||||
}
|
||||
|
||||
// create and write lock new node metadata
|
||||
unlock, err := t.lookup.MetadataBackend().Lock(n.InternalPath())
|
||||
unlock, err := t.lookup.MetadataBackend().Lock(n)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -776,7 +621,7 @@ func (t *Tree) createDirNode(ctx context.Context, n *node.Node) (err error) {
|
||||
path := filepath.Join(parentPath, n.Name)
|
||||
|
||||
// lock the meta file
|
||||
unlock, err := t.lookup.MetadataBackend().Lock(path)
|
||||
unlock, err := t.lookup.MetadataBackend().Lock(n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -803,76 +648,6 @@ func (t *Tree) createDirNode(ctx context.Context, n *node.Node) (err error) {
|
||||
|
||||
var nodeIDRegep = regexp.MustCompile(`.*/nodes/([^.]*).*`)
|
||||
|
||||
// TODO refactor the returned params into Node properties? would make all the path transformations go away...
|
||||
func (t *Tree) readRecycleItem(ctx context.Context, spaceID, key, path string) (recycleNode *node.Node, trashItem string, deletedNodePath string, origin string, err error) {
|
||||
if key == "" {
|
||||
return nil, "", "", "", errtypes.InternalError("key is empty")
|
||||
}
|
||||
|
||||
backend := t.lookup.MetadataBackend()
|
||||
var nodeID string
|
||||
|
||||
trashItem = filepath.Join(t.lookup.InternalRoot(), "spaces", lookup.Pathify(spaceID, 1, 2), "trash", lookup.Pathify(key, 4, 2))
|
||||
resolvedTrashItem, err := filepath.EvalSymlinks(trashItem)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
deletedNodePath, err = filepath.EvalSymlinks(filepath.Join(resolvedTrashItem, path))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
nodeID = nodeIDRegep.ReplaceAllString(deletedNodePath, "$1")
|
||||
nodeID = strings.ReplaceAll(nodeID, "/", "")
|
||||
|
||||
recycleNode = node.New(spaceID, nodeID, "", "", 0, "", provider.ResourceType_RESOURCE_TYPE_INVALID, nil, t.lookup)
|
||||
recycleNode.SpaceRoot, err = node.ReadNode(ctx, t.lookup, spaceID, spaceID, false, nil, false)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
recycleNode.SetType(t.lookup.TypeFromPath(ctx, deletedNodePath))
|
||||
|
||||
var attrBytes []byte
|
||||
if recycleNode.Type(ctx) == provider.ResourceType_RESOURCE_TYPE_FILE {
|
||||
// lookup blobID in extended attributes
|
||||
if attrBytes, err = backend.Get(ctx, deletedNodePath, prefixes.BlobIDAttr); err == nil {
|
||||
recycleNode.BlobID = string(attrBytes)
|
||||
} else {
|
||||
return
|
||||
}
|
||||
|
||||
// lookup blobSize in extended attributes
|
||||
if recycleNode.Blobsize, err = backend.GetInt64(ctx, deletedNodePath, prefixes.BlobsizeAttr); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// lookup parent id in extended attributes
|
||||
if attrBytes, err = backend.Get(ctx, deletedNodePath, prefixes.ParentidAttr); err == nil {
|
||||
recycleNode.ParentID = string(attrBytes)
|
||||
} else {
|
||||
return
|
||||
}
|
||||
|
||||
// lookup name in extended attributes
|
||||
if attrBytes, err = backend.Get(ctx, deletedNodePath, prefixes.NameAttr); err == nil {
|
||||
recycleNode.Name = string(attrBytes)
|
||||
} else {
|
||||
return
|
||||
}
|
||||
|
||||
// get origin node, is relative to space root
|
||||
origin = "/"
|
||||
|
||||
// lookup origin path in extended attributes
|
||||
if attrBytes, err = backend.Get(ctx, resolvedTrashItem, prefixes.TrashOriginAttr); err == nil {
|
||||
origin = filepath.Join(string(attrBytes), path)
|
||||
} else {
|
||||
log.Error().Err(err).Str("trashItem", trashItem).Str("deletedNodePath", deletedNodePath).Msg("could not read origin path, restoring to /")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (t *Tree) isIgnored(path string) bool {
|
||||
return isLockFile(path) || isTrash(path) || t.isUpload(path) || isInternal(path)
|
||||
}
|
||||
|
||||
11
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/decomposedfs.go
generated
vendored
11
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/decomposedfs.go
generated
vendored
@@ -52,7 +52,6 @@ import (
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/aspects"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/lookup"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/migrator"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/options"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/permissions"
|
||||
@@ -193,14 +192,6 @@ func New(o *options.Options, aspects aspects.Aspects, log *zerolog.Logger) (stor
|
||||
return nil, errors.Wrap(err, "could not setup tree")
|
||||
}
|
||||
|
||||
// Run migrations & return
|
||||
m := migrator.New(aspects.Lookup, log)
|
||||
err = m.RunMigrations()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("could not migrate tree")
|
||||
return nil, errors.Wrap(err, "could not migrate tree")
|
||||
}
|
||||
|
||||
if o.MaxAcquireLockCycles != 0 {
|
||||
filelocks.SetMaxLockCycles(o.MaxAcquireLockCycles)
|
||||
}
|
||||
@@ -373,7 +364,7 @@ func (fs *Decomposedfs) Postprocessing(ch <-chan events.Event) {
|
||||
var isVersion bool
|
||||
if session.NodeExists() {
|
||||
info, err := session.GetInfo(ctx)
|
||||
if err == nil && info.MetaData["versionsPath"] != "" {
|
||||
if err == nil && info.MetaData["versionID"] != "" {
|
||||
isVersion = true
|
||||
}
|
||||
}
|
||||
|
||||
2
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/grants.go
generated
vendored
2
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/grants.go
generated
vendored
@@ -293,7 +293,7 @@ func (fs *Decomposedfs) loadGrant(ctx context.Context, ref *provider.Reference,
|
||||
}
|
||||
|
||||
// lock the metadata file
|
||||
unlockFunc, err := fs.lu.MetadataBackend().Lock(n.InternalPath())
|
||||
unlockFunc, err := fs.lu.MetadataBackend().Lock(n)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
63
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/lookup/lookup.go
generated
vendored
63
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/lookup/lookup.go
generated
vendored
@@ -72,7 +72,7 @@ func (lu *Lookup) MetadataBackend() metadata.Backend {
|
||||
return lu.metadataBackend
|
||||
}
|
||||
|
||||
func (lu *Lookup) ReadBlobIDAndSizeAttr(ctx context.Context, path string, attrs node.Attributes) (string, int64, error) {
|
||||
func (lu *Lookup) ReadBlobIDAndSizeAttr(ctx context.Context, n metadata.MetadataNode, attrs node.Attributes) (string, int64, error) {
|
||||
blobID := ""
|
||||
blobSize := int64(0)
|
||||
var err error
|
||||
@@ -86,7 +86,7 @@ func (lu *Lookup) ReadBlobIDAndSizeAttr(ctx context.Context, path string, attrs
|
||||
}
|
||||
}
|
||||
} else {
|
||||
attrs, err := lu.metadataBackend.All(ctx, path)
|
||||
attrs, err := lu.metadataBackend.All(ctx, n)
|
||||
if err != nil {
|
||||
return "", 0, errors.Wrapf(err, "error reading blobid xattr")
|
||||
}
|
||||
@@ -100,56 +100,14 @@ func (lu *Lookup) ReadBlobIDAndSizeAttr(ctx context.Context, path string, attrs
|
||||
return blobID, blobSize, nil
|
||||
}
|
||||
|
||||
func readChildNodeFromLink(path string) (string, error) {
|
||||
link, err := os.Readlink(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
nodeID := strings.TrimLeft(link, "/.")
|
||||
nodeID = strings.ReplaceAll(nodeID, "/", "")
|
||||
return nodeID, nil
|
||||
}
|
||||
|
||||
func (lu *Lookup) NodeIDFromParentAndName(ctx context.Context, parent *node.Node, name string) (string, error) {
|
||||
nodeID, err := readChildNodeFromLink(filepath.Join(parent.InternalPath(), name))
|
||||
nodeID, err := node.ReadChildNodeFromLink(ctx, filepath.Join(parent.InternalPath(), name))
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "decomposedfs: Wrap: readlink error")
|
||||
}
|
||||
return nodeID, nil
|
||||
}
|
||||
|
||||
// TypeFromPath returns the type of the node at the given path
|
||||
func (lu *Lookup) TypeFromPath(ctx context.Context, path string) provider.ResourceType {
|
||||
// Try to read from xattrs
|
||||
typeAttr, err := lu.metadataBackend.GetInt64(ctx, path, prefixes.TypeAttr)
|
||||
if err == nil {
|
||||
return provider.ResourceType(int32(typeAttr))
|
||||
}
|
||||
|
||||
t := provider.ResourceType_RESOURCE_TYPE_INVALID
|
||||
// Fall back to checking on disk
|
||||
fi, err := os.Lstat(path)
|
||||
if err != nil {
|
||||
return t
|
||||
}
|
||||
|
||||
switch {
|
||||
case fi.IsDir():
|
||||
if _, err = lu.metadataBackend.Get(ctx, path, prefixes.ReferenceAttr); err == nil {
|
||||
t = provider.ResourceType_RESOURCE_TYPE_REFERENCE
|
||||
} else {
|
||||
t = provider.ResourceType_RESOURCE_TYPE_CONTAINER
|
||||
}
|
||||
case fi.Mode().IsRegular():
|
||||
t = provider.ResourceType_RESOURCE_TYPE_FILE
|
||||
case fi.Mode()&os.ModeSymlink != 0:
|
||||
t = provider.ResourceType_RESOURCE_TYPE_SYMLINK
|
||||
// TODO reference using ext attr on a symlink
|
||||
// nodeType = provider.ResourceType_RESOURCE_TYPE_REFERENCE
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// NodeFromResource takes in a request path or request id and converts it to a Node
|
||||
func (lu *Lookup) NodeFromResource(ctx context.Context, ref *provider.Reference) (*node.Node, error) {
|
||||
ctx, span := tracer.Start(ctx, "NodeFromResource")
|
||||
@@ -310,6 +268,7 @@ func (lu *Lookup) InternalPath(spaceID, nodeID string) string {
|
||||
}
|
||||
|
||||
// VersionPath returns the internal path for a version of a node
|
||||
// Deprecated: use InternalPath instead
|
||||
func (lu *Lookup) VersionPath(spaceID, nodeID, version string) string {
|
||||
return lu.InternalPath(spaceID, nodeID) + node.RevisionIDDelimiter + version
|
||||
}
|
||||
@@ -336,10 +295,10 @@ func refFromCS3(b []byte) (*provider.Reference, error) {
|
||||
// The optional filter function can be used to filter by attribute name, e.g. by checking a prefix
|
||||
// For the source file, a shared lock is acquired.
|
||||
// NOTE: target resource will be write locked!
|
||||
func (lu *Lookup) CopyMetadata(ctx context.Context, src, target string, filter func(attributeName string, value []byte) (newValue []byte, copy bool), acquireTargetLock bool) (err error) {
|
||||
func (lu *Lookup) CopyMetadata(ctx context.Context, sourceNode, targetNode metadata.MetadataNode, filter func(attributeName string, value []byte) (newValue []byte, copy bool), acquireTargetLock bool) (err error) {
|
||||
// Acquire a read log on the source node
|
||||
// write lock existing node before reading treesize or tree time
|
||||
lock, err := lockedfile.OpenFile(lu.MetadataBackend().LockfilePath(src), os.O_RDONLY|os.O_CREATE, 0600)
|
||||
lock, err := lockedfile.OpenFile(lu.MetadataBackend().LockfilePath(sourceNode), os.O_RDONLY|os.O_CREATE, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -356,22 +315,22 @@ func (lu *Lookup) CopyMetadata(ctx context.Context, src, target string, filter f
|
||||
}
|
||||
}()
|
||||
|
||||
return lu.CopyMetadataWithSourceLock(ctx, src, target, filter, lock, acquireTargetLock)
|
||||
return lu.CopyMetadataWithSourceLock(ctx, sourceNode, targetNode, filter, lock, acquireTargetLock)
|
||||
}
|
||||
|
||||
// CopyMetadataWithSourceLock copies all extended attributes from source to target.
|
||||
// The optional filter function can be used to filter by attribute name, e.g. by checking a prefix
|
||||
// For the source file, a matching lockedfile is required.
|
||||
// NOTE: target resource will be write locked!
|
||||
func (lu *Lookup) CopyMetadataWithSourceLock(ctx context.Context, sourcePath, targetPath string, filter func(attributeName string, value []byte) (newValue []byte, copy bool), lockedSource *lockedfile.File, acquireTargetLock bool) (err error) {
|
||||
func (lu *Lookup) CopyMetadataWithSourceLock(ctx context.Context, sourceNode, targetNode metadata.MetadataNode, filter func(attributeName string, value []byte) (newValue []byte, copy bool), lockedSource *lockedfile.File, acquireTargetLock bool) (err error) {
|
||||
switch {
|
||||
case lockedSource == nil:
|
||||
return errors.New("no lock provided")
|
||||
case lockedSource.File.Name() != lu.MetadataBackend().LockfilePath(sourcePath):
|
||||
case lockedSource.File.Name() != lu.MetadataBackend().LockfilePath(sourceNode):
|
||||
return errors.New("lockpath does not match filepath")
|
||||
}
|
||||
|
||||
attrs, err := lu.metadataBackend.All(ctx, sourcePath)
|
||||
attrs, err := lu.metadataBackend.All(ctx, sourceNode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -387,7 +346,7 @@ func (lu *Lookup) CopyMetadataWithSourceLock(ctx context.Context, sourcePath, ta
|
||||
newAttrs[attrName] = val
|
||||
}
|
||||
|
||||
return lu.MetadataBackend().SetMultiple(ctx, targetPath, newAttrs, acquireTargetLock)
|
||||
return lu.MetadataBackend().SetMultiple(ctx, targetNode, newAttrs, acquireTargetLock)
|
||||
}
|
||||
|
||||
// TimeManager returns the time manager
|
||||
|
||||
@@ -27,9 +27,11 @@ import (
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/renameio/v2"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/cache"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes"
|
||||
"github.com/pkg/xattr"
|
||||
"github.com/rogpeppe/go-internal/lockedfile"
|
||||
"github.com/shamaton/msgpack/v2"
|
||||
@@ -59,33 +61,63 @@ func NewMessagePackBackend(rootPath string, o cache.Config) MessagePackBackend {
|
||||
// Name returns the name of the backend
|
||||
func (MessagePackBackend) Name() string { return "messagepack" }
|
||||
|
||||
// IdentifyPath returns the id and mtime of a file
|
||||
func (b MessagePackBackend) IdentifyPath(_ context.Context, path string) (string, string, time.Time, error) {
|
||||
metaPath := filepath.Join(path + ".mpk")
|
||||
source, err := os.Open(metaPath)
|
||||
// // No cached entry found. Read from storage and store in cache
|
||||
if err != nil {
|
||||
return "", "", time.Time{}, err
|
||||
}
|
||||
msgBytes, err := io.ReadAll(source)
|
||||
if err != nil || len(msgBytes) == 0 {
|
||||
return "", "", time.Time{}, err
|
||||
|
||||
}
|
||||
attribs := map[string][]byte{}
|
||||
err = msgpack.Unmarshal(msgBytes, &attribs)
|
||||
if err != nil {
|
||||
return "", "", time.Time{}, err
|
||||
}
|
||||
|
||||
spaceID := attribs[prefixes.IDAttr]
|
||||
id := attribs[prefixes.IDAttr]
|
||||
|
||||
mtimeAttr := attribs[prefixes.MTimeAttr]
|
||||
mtime, err := time.Parse(time.RFC3339Nano, string(mtimeAttr))
|
||||
if err != nil {
|
||||
return "", "", time.Time{}, err
|
||||
}
|
||||
return string(spaceID), string(id), mtime, nil
|
||||
}
|
||||
|
||||
// All reads all extended attributes for a node
|
||||
func (b MessagePackBackend) All(ctx context.Context, path string) (map[string][]byte, error) {
|
||||
return b.loadAttributes(ctx, path, nil)
|
||||
func (b MessagePackBackend) All(ctx context.Context, n MetadataNode) (map[string][]byte, error) {
|
||||
return b.loadAttributes(ctx, n, nil)
|
||||
}
|
||||
|
||||
// Get an extended attribute value for the given key
|
||||
func (b MessagePackBackend) Get(ctx context.Context, path, key string) ([]byte, error) {
|
||||
attribs, err := b.loadAttributes(ctx, path, nil)
|
||||
func (b MessagePackBackend) Get(ctx context.Context, n MetadataNode, key string) ([]byte, error) {
|
||||
attribs, err := b.loadAttributes(ctx, n, nil)
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
val, ok := attribs[key]
|
||||
if !ok {
|
||||
return []byte{}, &xattr.Error{Op: "mpk.get", Path: path, Name: key, Err: xattr.ENOATTR}
|
||||
return []byte{}, &xattr.Error{Op: "mpk.get", Path: n.InternalPath(), Name: key, Err: xattr.ENOATTR}
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// GetInt64 reads a string as int64 from the xattrs
|
||||
func (b MessagePackBackend) GetInt64(ctx context.Context, path, key string) (int64, error) {
|
||||
attribs, err := b.loadAttributes(ctx, path, nil)
|
||||
func (b MessagePackBackend) GetInt64(ctx context.Context, n MetadataNode, key string) (int64, error) {
|
||||
attribs, err := b.loadAttributes(ctx, n, nil)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
val, ok := attribs[key]
|
||||
if !ok {
|
||||
return 0, &xattr.Error{Op: "mpk.get", Path: path, Name: key, Err: xattr.ENOATTR}
|
||||
return 0, &xattr.Error{Op: "mpk.get", Path: n.InternalPath(), Name: key, Err: xattr.ENOATTR}
|
||||
}
|
||||
i, err := strconv.ParseInt(string(val), 10, 64)
|
||||
if err != nil {
|
||||
@@ -96,8 +128,8 @@ func (b MessagePackBackend) GetInt64(ctx context.Context, path, key string) (int
|
||||
|
||||
// List retrieves a list of names of extended attributes associated with the
|
||||
// given path in the file system.
|
||||
func (b MessagePackBackend) List(ctx context.Context, path string) ([]string, error) {
|
||||
attribs, err := b.loadAttributes(ctx, path, nil)
|
||||
func (b MessagePackBackend) List(ctx context.Context, n MetadataNode) ([]string, error) {
|
||||
attribs, err := b.loadAttributes(ctx, n, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -109,27 +141,27 @@ func (b MessagePackBackend) List(ctx context.Context, path string) ([]string, er
|
||||
}
|
||||
|
||||
// Set sets one attribute for the given path
|
||||
func (b MessagePackBackend) Set(ctx context.Context, path, key string, val []byte) error {
|
||||
return b.SetMultiple(ctx, path, map[string][]byte{key: val}, true)
|
||||
func (b MessagePackBackend) Set(ctx context.Context, n MetadataNode, key string, val []byte) error {
|
||||
return b.SetMultiple(ctx, n, map[string][]byte{key: val}, true)
|
||||
}
|
||||
|
||||
// SetMultiple sets a set of attribute for the given path
|
||||
func (b MessagePackBackend) SetMultiple(ctx context.Context, path string, attribs map[string][]byte, acquireLock bool) error {
|
||||
return b.saveAttributes(ctx, path, attribs, nil, acquireLock)
|
||||
func (b MessagePackBackend) SetMultiple(ctx context.Context, n MetadataNode, attribs map[string][]byte, acquireLock bool) error {
|
||||
return b.saveAttributes(ctx, n, attribs, nil, acquireLock)
|
||||
}
|
||||
|
||||
// Remove an extended attribute key
|
||||
func (b MessagePackBackend) Remove(ctx context.Context, path, key string, acquireLock bool) error {
|
||||
return b.saveAttributes(ctx, path, nil, []string{key}, acquireLock)
|
||||
func (b MessagePackBackend) Remove(ctx context.Context, n MetadataNode, key string, acquireLock bool) error {
|
||||
return b.saveAttributes(ctx, n, nil, []string{key}, acquireLock)
|
||||
}
|
||||
|
||||
// AllWithLockedSource reads all extended attributes from the given reader (if possible).
|
||||
// The path argument is used for storing the data in the cache
|
||||
func (b MessagePackBackend) AllWithLockedSource(ctx context.Context, path string, source io.Reader) (map[string][]byte, error) {
|
||||
return b.loadAttributes(ctx, path, source)
|
||||
func (b MessagePackBackend) AllWithLockedSource(ctx context.Context, n MetadataNode, source io.Reader) (map[string][]byte, error) {
|
||||
return b.loadAttributes(ctx, n, source)
|
||||
}
|
||||
|
||||
func (b MessagePackBackend) saveAttributes(ctx context.Context, path string, setAttribs map[string][]byte, deleteAttribs []string, acquireLock bool) error {
|
||||
func (b MessagePackBackend) saveAttributes(ctx context.Context, n MetadataNode, setAttribs map[string][]byte, deleteAttribs []string, acquireLock bool) error {
|
||||
var (
|
||||
err error
|
||||
f readWriteCloseSeekTruncater
|
||||
@@ -144,8 +176,8 @@ func (b MessagePackBackend) saveAttributes(ctx context.Context, path string, set
|
||||
span.End()
|
||||
}()
|
||||
|
||||
lockPath := b.LockfilePath(path)
|
||||
metaPath := b.MetadataPath(path)
|
||||
lockPath := b.LockfilePath(n)
|
||||
metaPath := b.MetadataPath(n)
|
||||
if acquireLock {
|
||||
_, subspan := tracer.Start(ctx, "lockedfile.OpenFile")
|
||||
f, err = lockedfile.OpenFile(lockPath, os.O_RDWR|os.O_CREATE, 0600)
|
||||
@@ -199,21 +231,21 @@ func (b MessagePackBackend) saveAttributes(ctx context.Context, path string, set
|
||||
subspan.End()
|
||||
|
||||
_, subspan = tracer.Start(ctx, "metaCache.PushToCache")
|
||||
err = b.metaCache.PushToCache(b.cacheKey(path), attribs)
|
||||
err = b.metaCache.PushToCache(b.cacheKey(n), attribs)
|
||||
subspan.End()
|
||||
return err
|
||||
}
|
||||
|
||||
func (b MessagePackBackend) loadAttributes(ctx context.Context, path string, source io.Reader) (map[string][]byte, error) {
|
||||
func (b MessagePackBackend) loadAttributes(ctx context.Context, n MetadataNode, source io.Reader) (map[string][]byte, error) {
|
||||
ctx, span := tracer.Start(ctx, "loadAttributes")
|
||||
defer span.End()
|
||||
attribs := map[string][]byte{}
|
||||
err := b.metaCache.PullFromCache(b.cacheKey(path), &attribs)
|
||||
err := b.metaCache.PullFromCache(b.cacheKey(n), &attribs)
|
||||
if err == nil {
|
||||
return attribs, err
|
||||
}
|
||||
|
||||
metaPath := b.MetadataPath(path)
|
||||
metaPath := b.MetadataPath(n)
|
||||
var msgBytes []byte
|
||||
|
||||
if source == nil {
|
||||
@@ -229,7 +261,7 @@ func (b MessagePackBackend) loadAttributes(ctx context.Context, path string, sou
|
||||
// actual file (not the metafile) does not exist in order to
|
||||
// determine whether a node exists or not -> stat the actual node
|
||||
_, subspan := tracer.Start(ctx, "os.Stat")
|
||||
_, err := os.Stat(path)
|
||||
_, err := os.Stat(n.InternalPath())
|
||||
subspan.End()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -258,7 +290,7 @@ func (b MessagePackBackend) loadAttributes(ctx context.Context, path string, sou
|
||||
}
|
||||
|
||||
_, subspan := tracer.Start(ctx, "metaCache.PushToCache")
|
||||
err = b.metaCache.PushToCache(b.cacheKey(path), attribs)
|
||||
err = b.metaCache.PushToCache(b.cacheKey(n), attribs)
|
||||
subspan.End()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -273,40 +305,40 @@ func (MessagePackBackend) IsMetaFile(path string) bool {
|
||||
}
|
||||
|
||||
// Purge purges the data of a given path
|
||||
func (b MessagePackBackend) Purge(_ context.Context, path string) error {
|
||||
if err := b.metaCache.RemoveMetadata(b.cacheKey(path)); err != nil {
|
||||
func (b MessagePackBackend) Purge(_ context.Context, n MetadataNode) error {
|
||||
if err := b.metaCache.RemoveMetadata(b.cacheKey(n)); err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Remove(b.MetadataPath(path))
|
||||
return os.Remove(b.MetadataPath(n))
|
||||
}
|
||||
|
||||
// Rename moves the data for a given path to a new path
|
||||
func (b MessagePackBackend) Rename(oldPath, newPath string) error {
|
||||
func (b MessagePackBackend) Rename(oldNode, newNode MetadataNode) error {
|
||||
data := map[string][]byte{}
|
||||
err := b.metaCache.PullFromCache(b.cacheKey(oldPath), &data)
|
||||
err := b.metaCache.PullFromCache(b.cacheKey(oldNode), &data)
|
||||
if err == nil {
|
||||
err = b.metaCache.PushToCache(b.cacheKey(newPath), data)
|
||||
err = b.metaCache.PushToCache(b.cacheKey(newNode), data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
err = b.metaCache.RemoveMetadata(b.cacheKey(oldPath))
|
||||
err = b.metaCache.RemoveMetadata(b.cacheKey(oldNode))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.Rename(b.MetadataPath(oldPath), b.MetadataPath(newPath))
|
||||
return os.Rename(b.MetadataPath(oldNode), b.MetadataPath(newNode))
|
||||
}
|
||||
|
||||
// MetadataPath returns the path of the file holding the metadata for the given path
|
||||
func (MessagePackBackend) MetadataPath(path string) string { return path + ".mpk" }
|
||||
func (MessagePackBackend) MetadataPath(n MetadataNode) string { return n.InternalPath() + ".mpk" }
|
||||
|
||||
// LockfilePath returns the path of the lock file
|
||||
func (MessagePackBackend) LockfilePath(path string) string { return path + ".mlock" }
|
||||
func (MessagePackBackend) LockfilePath(n MetadataNode) string { return n.InternalPath() + ".mlock" }
|
||||
|
||||
// Lock locks the metadata for the given path
|
||||
func (b MessagePackBackend) Lock(path string) (UnlockFunc, error) {
|
||||
metaLockPath := b.LockfilePath(path)
|
||||
func (b MessagePackBackend) Lock(n MetadataNode) (UnlockFunc, error) {
|
||||
metaLockPath := b.LockfilePath(n)
|
||||
mlock, err := lockedfile.OpenFile(metaLockPath, os.O_RDWR|os.O_CREATE, 0600)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -320,9 +352,6 @@ func (b MessagePackBackend) Lock(path string) (UnlockFunc, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (b MessagePackBackend) cacheKey(path string) string {
|
||||
// rootPath is guaranteed to have no trailing slash
|
||||
// the cache key shouldn't begin with a slash as some stores drop it which can cause
|
||||
// confusion
|
||||
return strings.TrimPrefix(path, b.rootPath+"/")
|
||||
func (b MessagePackBackend) cacheKey(n MetadataNode) string {
|
||||
return n.GetSpaceID() + "/" + n.GetID()
|
||||
}
|
||||
|
||||
67
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/metadata.go
generated
vendored
67
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/metadata.go
generated
vendored
@@ -22,6 +22,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
@@ -37,27 +38,34 @@ var errUnconfiguredError = errors.New("no metadata backend configured. Bailing o
|
||||
|
||||
type UnlockFunc func() error
|
||||
|
||||
type MetadataNode interface {
|
||||
GetSpaceID() string
|
||||
GetID() string
|
||||
InternalPath() string
|
||||
}
|
||||
|
||||
// Backend defines the interface for file attribute backends
|
||||
type Backend interface {
|
||||
Name() string
|
||||
IdentifyPath(ctx context.Context, path string) (string, string, time.Time, error)
|
||||
|
||||
All(ctx context.Context, path string) (map[string][]byte, error)
|
||||
Get(ctx context.Context, path, key string) ([]byte, error)
|
||||
All(ctx context.Context, n MetadataNode) (map[string][]byte, error)
|
||||
AllWithLockedSource(ctx context.Context, n MetadataNode, source io.Reader) (map[string][]byte, error)
|
||||
|
||||
GetInt64(ctx context.Context, path, key string) (int64, error)
|
||||
List(ctx context.Context, path string) (attribs []string, err error)
|
||||
Set(ctx context.Context, path, key string, val []byte) error
|
||||
SetMultiple(ctx context.Context, path string, attribs map[string][]byte, acquireLock bool) error
|
||||
Remove(ctx context.Context, path, key string, acquireLock bool) error
|
||||
Get(ctx context.Context, n MetadataNode, key string) ([]byte, error)
|
||||
GetInt64(ctx context.Context, n MetadataNode, key string) (int64, error)
|
||||
List(ctx context.Context, n MetadataNode) (attribs []string, err error)
|
||||
Set(ctx context.Context, n MetadataNode, key string, val []byte) error
|
||||
SetMultiple(ctx context.Context, n MetadataNode, attribs map[string][]byte, acquireLock bool) error
|
||||
Remove(ctx context.Context, n MetadataNode, key string, acquireLock bool) error
|
||||
|
||||
Lock(n MetadataNode) (UnlockFunc, error)
|
||||
Purge(ctx context.Context, n MetadataNode) error
|
||||
Rename(oldNode, newNode MetadataNode) error
|
||||
MetadataPath(n MetadataNode) string
|
||||
LockfilePath(n MetadataNode) string
|
||||
|
||||
Lock(path string) (UnlockFunc, error)
|
||||
Purge(ctx context.Context, path string) error
|
||||
Rename(oldPath, newPath string) error
|
||||
IsMetaFile(path string) bool
|
||||
MetadataPath(path string) string
|
||||
LockfilePath(path string) string
|
||||
|
||||
AllWithLockedSource(ctx context.Context, path string, source io.Reader) (map[string][]byte, error)
|
||||
}
|
||||
|
||||
// NullBackend is the default stub backend, used to enforce the configuration of a proper backend
|
||||
@@ -66,44 +74,49 @@ type NullBackend struct{}
|
||||
// Name returns the name of the backend
|
||||
func (NullBackend) Name() string { return "null" }
|
||||
|
||||
// IdentifyPath returns the ids and mtime of a file
|
||||
func (NullBackend) IdentifyPath(ctx context.Context, path string) (string, string, time.Time, error) {
|
||||
return "", "", time.Time{}, errUnconfiguredError
|
||||
}
|
||||
|
||||
// All reads all extended attributes for a node
|
||||
func (NullBackend) All(ctx context.Context, path string) (map[string][]byte, error) {
|
||||
func (NullBackend) All(ctx context.Context, n MetadataNode) (map[string][]byte, error) {
|
||||
return nil, errUnconfiguredError
|
||||
}
|
||||
|
||||
// Get an extended attribute value for the given key
|
||||
func (NullBackend) Get(ctx context.Context, path, key string) ([]byte, error) {
|
||||
func (NullBackend) Get(ctx context.Context, n MetadataNode, key string) ([]byte, error) {
|
||||
return []byte{}, errUnconfiguredError
|
||||
}
|
||||
|
||||
// GetInt64 reads a string as int64 from the xattrs
|
||||
func (NullBackend) GetInt64(ctx context.Context, path, key string) (int64, error) {
|
||||
func (NullBackend) GetInt64(ctx context.Context, n MetadataNode, key string) (int64, error) {
|
||||
return 0, errUnconfiguredError
|
||||
}
|
||||
|
||||
// List retrieves a list of names of extended attributes associated with the
|
||||
// given path in the file system.
|
||||
func (NullBackend) List(ctx context.Context, path string) ([]string, error) {
|
||||
func (NullBackend) List(ctx context.Context, n MetadataNode) ([]string, error) {
|
||||
return nil, errUnconfiguredError
|
||||
}
|
||||
|
||||
// Set sets one attribute for the given path
|
||||
func (NullBackend) Set(ctx context.Context, path string, key string, val []byte) error {
|
||||
func (NullBackend) Set(ctx context.Context, n MetadataNode, key string, val []byte) error {
|
||||
return errUnconfiguredError
|
||||
}
|
||||
|
||||
// SetMultiple sets a set of attribute for the given path
|
||||
func (NullBackend) SetMultiple(ctx context.Context, path string, attribs map[string][]byte, acquireLock bool) error {
|
||||
func (NullBackend) SetMultiple(ctx context.Context, n MetadataNode, attribs map[string][]byte, acquireLock bool) error {
|
||||
return errUnconfiguredError
|
||||
}
|
||||
|
||||
// Remove removes an extended attribute key
|
||||
func (NullBackend) Remove(ctx context.Context, path string, key string, acquireLock bool) error {
|
||||
func (NullBackend) Remove(ctx context.Context, n MetadataNode, key string, acquireLock bool) error {
|
||||
return errUnconfiguredError
|
||||
}
|
||||
|
||||
// Lock locks the metadata for the given path
|
||||
func (NullBackend) Lock(path string) (UnlockFunc, error) {
|
||||
func (NullBackend) Lock(n MetadataNode) (UnlockFunc, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -111,19 +124,19 @@ func (NullBackend) Lock(path string) (UnlockFunc, error) {
|
||||
func (NullBackend) IsMetaFile(path string) bool { return false }
|
||||
|
||||
// Purge purges the data of a given path from any cache that might hold it
|
||||
func (NullBackend) Purge(_ context.Context, purges string) error { return errUnconfiguredError }
|
||||
func (NullBackend) Purge(_ context.Context, n MetadataNode) error { return errUnconfiguredError }
|
||||
|
||||
// Rename moves the data for a given path to a new path
|
||||
func (NullBackend) Rename(oldPath, newPath string) error { return errUnconfiguredError }
|
||||
func (NullBackend) Rename(oldNode, newNode MetadataNode) error { return errUnconfiguredError }
|
||||
|
||||
// MetadataPath returns the path of the file holding the metadata for the given path
|
||||
func (NullBackend) MetadataPath(path string) string { return "" }
|
||||
func (NullBackend) MetadataPath(n MetadataNode) string { return "" }
|
||||
|
||||
// LockfilePath returns the path of the lock file
|
||||
func (NullBackend) LockfilePath(path string) string { return "" }
|
||||
func (NullBackend) LockfilePath(n MetadataNode) string { return "" }
|
||||
|
||||
// AllWithLockedSource reads all extended attributes from the given reader
|
||||
// The path argument is used for storing the data in the cache
|
||||
func (NullBackend) AllWithLockedSource(ctx context.Context, path string, source io.Reader) (map[string][]byte, error) {
|
||||
func (NullBackend) AllWithLockedSource(ctx context.Context, n MetadataNode, source io.Reader) (map[string][]byte, error) {
|
||||
return nil, errUnconfiguredError
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/cache"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes"
|
||||
@@ -50,22 +51,32 @@ func NewXattrsBackend(rootPath string, o cache.Config) XattrsBackend {
|
||||
// Name returns the name of the backend
|
||||
func (XattrsBackend) Name() string { return "xattrs" }
|
||||
|
||||
// IdentifyPath returns the space id, node id and mtime of a file
|
||||
func (b XattrsBackend) IdentifyPath(_ context.Context, path string) (string, string, time.Time, error) {
|
||||
spaceID, _ := xattr.Get(path, prefixes.SpaceIDAttr)
|
||||
id, _ := xattr.Get(path, prefixes.IDAttr)
|
||||
|
||||
mtimeAttr, _ := xattr.Get(path, prefixes.MTimeAttr)
|
||||
mtime, _ := time.Parse(time.RFC3339Nano, string(mtimeAttr))
|
||||
return string(spaceID), string(id), mtime, nil
|
||||
}
|
||||
|
||||
// Get an extended attribute value for the given key
|
||||
// No file locking is involved here as reading a single xattr is
|
||||
// considered to be atomic.
|
||||
func (b XattrsBackend) Get(ctx context.Context, path, key string) ([]byte, error) {
|
||||
func (b XattrsBackend) Get(ctx context.Context, n MetadataNode, key string) ([]byte, error) {
|
||||
attribs := map[string][]byte{}
|
||||
err := b.metaCache.PullFromCache(b.cacheKey(path), &attribs)
|
||||
err := b.metaCache.PullFromCache(b.cacheKey(n), &attribs)
|
||||
if err == nil && len(attribs[key]) > 0 {
|
||||
return attribs[key], err
|
||||
}
|
||||
|
||||
return xattr.Get(path, key)
|
||||
return xattr.Get(n.InternalPath(), key)
|
||||
}
|
||||
|
||||
// GetInt64 reads a string as int64 from the xattrs
|
||||
func (b XattrsBackend) GetInt64(ctx context.Context, filePath, key string) (int64, error) {
|
||||
attr, err := b.Get(ctx, filePath, key)
|
||||
func (b XattrsBackend) GetInt64(ctx context.Context, n MetadataNode, key string) (int64, error) {
|
||||
attr, err := b.Get(ctx, n, key)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -78,11 +89,12 @@ func (b XattrsBackend) GetInt64(ctx context.Context, filePath, key string) (int6
|
||||
|
||||
// List retrieves a list of names of extended attributes associated with the
|
||||
// given path in the file system.
|
||||
func (b XattrsBackend) List(ctx context.Context, filePath string) (attribs []string, err error) {
|
||||
return b.list(ctx, filePath, true)
|
||||
func (b XattrsBackend) List(ctx context.Context, n MetadataNode) (attribs []string, err error) {
|
||||
return b.list(ctx, n, true)
|
||||
}
|
||||
|
||||
func (b XattrsBackend) list(ctx context.Context, filePath string, acquireLock bool) (attribs []string, err error) {
|
||||
func (b XattrsBackend) list(ctx context.Context, n MetadataNode, acquireLock bool) (attribs []string, err error) {
|
||||
filePath := n.InternalPath()
|
||||
attrs, err := xattr.List(filePath)
|
||||
if err == nil {
|
||||
return attrs, nil
|
||||
@@ -102,21 +114,21 @@ func (b XattrsBackend) list(ctx context.Context, filePath string, acquireLock bo
|
||||
|
||||
// All reads all extended attributes for a node, protected by a
|
||||
// shared file lock
|
||||
func (b XattrsBackend) All(ctx context.Context, path string) (map[string][]byte, error) {
|
||||
return b.getAll(ctx, path, false, true)
|
||||
func (b XattrsBackend) All(ctx context.Context, n MetadataNode) (map[string][]byte, error) {
|
||||
return b.getAll(ctx, n, false, true)
|
||||
}
|
||||
|
||||
func (b XattrsBackend) getAll(ctx context.Context, path string, skipCache, acquireLock bool) (map[string][]byte, error) {
|
||||
func (b XattrsBackend) getAll(ctx context.Context, n MetadataNode, skipCache, acquireLock bool) (map[string][]byte, error) {
|
||||
attribs := map[string][]byte{}
|
||||
|
||||
if !skipCache {
|
||||
err := b.metaCache.PullFromCache(b.cacheKey(path), &attribs)
|
||||
err := b.metaCache.PullFromCache(b.cacheKey(n), &attribs)
|
||||
if err == nil {
|
||||
return attribs, err
|
||||
}
|
||||
}
|
||||
|
||||
attrNames, err := b.list(ctx, path, acquireLock)
|
||||
attrNames, err := b.list(ctx, n, acquireLock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -132,6 +144,7 @@ func (b XattrsBackend) getAll(ctx context.Context, path string, skipCache, acqui
|
||||
// error handling: Count if there are errors while reading all attribs.
|
||||
// if there were any, return an error.
|
||||
attribs = make(map[string][]byte, len(attrNames))
|
||||
path := n.InternalPath()
|
||||
for _, name := range attrNames {
|
||||
var val []byte
|
||||
if val, xerr = xattr.Get(path, name); xerr != nil && !IsAttrUnset(xerr) {
|
||||
@@ -145,7 +158,7 @@ func (b XattrsBackend) getAll(ctx context.Context, path string, skipCache, acqui
|
||||
return nil, errors.Wrap(xerr, "Failed to read all xattrs")
|
||||
}
|
||||
|
||||
err = b.metaCache.PushToCache(b.cacheKey(path), attribs)
|
||||
err = b.metaCache.PushToCache(b.cacheKey(n), attribs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -154,18 +167,19 @@ func (b XattrsBackend) getAll(ctx context.Context, path string, skipCache, acqui
|
||||
}
|
||||
|
||||
// Set sets one attribute for the given path
|
||||
func (b XattrsBackend) Set(ctx context.Context, path string, key string, val []byte) (err error) {
|
||||
return b.SetMultiple(ctx, path, map[string][]byte{key: val}, true)
|
||||
func (b XattrsBackend) Set(ctx context.Context, n MetadataNode, key string, val []byte) (err error) {
|
||||
return b.SetMultiple(ctx, n, map[string][]byte{key: val}, true)
|
||||
}
|
||||
|
||||
// SetMultiple sets a set of attribute for the given path
|
||||
func (b XattrsBackend) SetMultiple(ctx context.Context, path string, attribs map[string][]byte, acquireLock bool) (err error) {
|
||||
func (b XattrsBackend) SetMultiple(ctx context.Context, n MetadataNode, attribs map[string][]byte, acquireLock bool) (err error) {
|
||||
path := n.InternalPath()
|
||||
if acquireLock {
|
||||
err := os.MkdirAll(filepath.Dir(path), 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
lockedFile, err := lockedfile.OpenFile(b.LockfilePath(path), os.O_CREATE|os.O_WRONLY, 0600)
|
||||
lockedFile, err := lockedfile.OpenFile(b.LockfilePath(n), os.O_CREATE|os.O_WRONLY, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -188,15 +202,16 @@ func (b XattrsBackend) SetMultiple(ctx context.Context, path string, attribs map
|
||||
return errors.Wrap(xerr, "Failed to set all xattrs")
|
||||
}
|
||||
|
||||
attribs, err = b.getAll(ctx, path, true, false)
|
||||
attribs, err = b.getAll(ctx, n, true, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return b.metaCache.PushToCache(b.cacheKey(path), attribs)
|
||||
return b.metaCache.PushToCache(b.cacheKey(n), attribs)
|
||||
}
|
||||
|
||||
// Remove an extended attribute key
|
||||
func (b XattrsBackend) Remove(ctx context.Context, path string, key string, acquireLock bool) error {
|
||||
func (b XattrsBackend) Remove(ctx context.Context, n MetadataNode, key string, acquireLock bool) error {
|
||||
path := n.InternalPath()
|
||||
if acquireLock {
|
||||
lockedFile, err := lockedfile.OpenFile(path+filelocks.LockFileSuffix, os.O_CREATE|os.O_WRONLY, 0600)
|
||||
if err != nil {
|
||||
@@ -209,21 +224,22 @@ func (b XattrsBackend) Remove(ctx context.Context, path string, key string, acqu
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
attribs, err := b.getAll(ctx, path, true, false)
|
||||
attribs, err := b.getAll(ctx, n, true, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return b.metaCache.PushToCache(b.cacheKey(path), attribs)
|
||||
return b.metaCache.PushToCache(b.cacheKey(n), attribs)
|
||||
}
|
||||
|
||||
// IsMetaFile returns whether the given path represents a meta file
|
||||
func (XattrsBackend) IsMetaFile(path string) bool { return strings.HasSuffix(path, ".meta.lock") }
|
||||
|
||||
// Purge purges the data of a given path
|
||||
func (b XattrsBackend) Purge(ctx context.Context, path string) error {
|
||||
func (b XattrsBackend) Purge(ctx context.Context, n MetadataNode) error {
|
||||
path := n.InternalPath()
|
||||
_, err := os.Stat(path)
|
||||
if err == nil {
|
||||
attribs, err := b.getAll(ctx, path, true, true)
|
||||
attribs, err := b.getAll(ctx, n, true, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -238,31 +254,31 @@ func (b XattrsBackend) Purge(ctx context.Context, path string) error {
|
||||
}
|
||||
}
|
||||
|
||||
return b.metaCache.RemoveMetadata(b.cacheKey(path))
|
||||
return b.metaCache.RemoveMetadata(b.cacheKey(n))
|
||||
}
|
||||
|
||||
// Rename moves the data for a given path to a new path
|
||||
func (b XattrsBackend) Rename(oldPath, newPath string) error {
|
||||
func (b XattrsBackend) Rename(oldNode, newNode MetadataNode) error {
|
||||
data := map[string][]byte{}
|
||||
err := b.metaCache.PullFromCache(b.cacheKey(oldPath), &data)
|
||||
err := b.metaCache.PullFromCache(b.cacheKey(oldNode), &data)
|
||||
if err == nil {
|
||||
err = b.metaCache.PushToCache(b.cacheKey(newPath), data)
|
||||
err = b.metaCache.PushToCache(b.cacheKey(newNode), data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return b.metaCache.RemoveMetadata(b.cacheKey(oldPath))
|
||||
return b.metaCache.RemoveMetadata(b.cacheKey(oldNode))
|
||||
}
|
||||
|
||||
// MetadataPath returns the path of the file holding the metadata for the given path
|
||||
func (XattrsBackend) MetadataPath(path string) string { return path }
|
||||
func (XattrsBackend) MetadataPath(n MetadataNode) string { return n.InternalPath() }
|
||||
|
||||
// LockfilePath returns the path of the lock file
|
||||
func (XattrsBackend) LockfilePath(path string) string { return path + ".mlock" }
|
||||
func (XattrsBackend) LockfilePath(n MetadataNode) string { return n.InternalPath() + ".mlock" }
|
||||
|
||||
// Lock locks the metadata for the given path
|
||||
func (b XattrsBackend) Lock(path string) (UnlockFunc, error) {
|
||||
metaLockPath := b.LockfilePath(path)
|
||||
func (b XattrsBackend) Lock(n MetadataNode) (UnlockFunc, error) {
|
||||
metaLockPath := b.LockfilePath(n)
|
||||
mlock, err := lockedfile.OpenFile(metaLockPath, os.O_RDWR|os.O_CREATE, 0600)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -276,20 +292,20 @@ func (b XattrsBackend) Lock(path string) (UnlockFunc, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func cleanupLockfile(ctx context.Context, f *lockedfile.File) {
|
||||
func cleanupLockfile(_ context.Context, f *lockedfile.File) {
|
||||
_ = f.Close()
|
||||
_ = os.Remove(f.Name())
|
||||
}
|
||||
|
||||
// AllWithLockedSource reads all extended attributes from the given reader.
|
||||
// The path argument is used for storing the data in the cache
|
||||
func (b XattrsBackend) AllWithLockedSource(ctx context.Context, path string, _ io.Reader) (map[string][]byte, error) {
|
||||
return b.All(ctx, path)
|
||||
func (b XattrsBackend) AllWithLockedSource(ctx context.Context, n MetadataNode, _ io.Reader) (map[string][]byte, error) {
|
||||
return b.All(ctx, n)
|
||||
}
|
||||
|
||||
func (b XattrsBackend) cacheKey(path string) string {
|
||||
func (b XattrsBackend) cacheKey(n MetadataNode) string {
|
||||
// rootPath is guaranteed to have no trailing slash
|
||||
// the cache key shouldn't begin with a slash as some stores drop it which can cause
|
||||
// confusion
|
||||
return strings.TrimPrefix(path, b.rootPath+"/")
|
||||
return n.GetSpaceID() + "/" + n.GetID()
|
||||
}
|
||||
|
||||
@@ -1,138 +0,0 @@
|
||||
// Copyright 2018-2023 CERN
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// In applying this license, CERN does not waive the privileges and immunities
|
||||
// granted to it by virtue of its status as an Intergovernmental Organization
|
||||
// or submit itself to any jurisdiction.
|
||||
|
||||
package migrator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/lookup"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerMigration("0001", Migration0001{})
|
||||
}
|
||||
|
||||
type Migration0001 struct{}
|
||||
|
||||
// Migrate creates the spaces directory structure
|
||||
func (m Migration0001) Migrate(migrator *Migrator) (Result, error) {
|
||||
migrator.log.Info().Msg("Migrating spaces directory structure...")
|
||||
|
||||
// create spaces folder and iterate over existing nodes to populate it
|
||||
nodesPath := filepath.Join(migrator.lu.InternalRoot(), "nodes")
|
||||
fi, err := os.Stat(nodesPath)
|
||||
if err == nil && fi.IsDir() {
|
||||
f, err := os.Open(nodesPath)
|
||||
if err != nil {
|
||||
return stateFailed, err
|
||||
}
|
||||
nodes, err := f.Readdir(0)
|
||||
if err != nil {
|
||||
return stateFailed, err
|
||||
}
|
||||
|
||||
for _, n := range nodes {
|
||||
nodePath := filepath.Join(nodesPath, n.Name())
|
||||
|
||||
attr, err := migrator.lu.MetadataBackend().Get(context.Background(), nodePath, prefixes.ParentidAttr)
|
||||
if err == nil && string(attr) == node.RootID {
|
||||
if err := m.moveNode(migrator, n.Name(), n.Name()); err != nil {
|
||||
migrator.log.Error().Err(err).
|
||||
Str("space", n.Name()).
|
||||
Msg("could not move space")
|
||||
continue
|
||||
}
|
||||
m.linkSpaceNode(migrator, "personal", n.Name())
|
||||
}
|
||||
}
|
||||
// TODO delete nodesPath if empty
|
||||
}
|
||||
return stateSucceeded, nil
|
||||
}
|
||||
|
||||
// Rollback is not implemented
|
||||
func (Migration0001) Rollback(_ *Migrator) (Result, error) {
|
||||
return stateFailed, errors.New("rollback not implemented")
|
||||
}
|
||||
|
||||
func (m Migration0001) moveNode(migrator *Migrator, spaceID, nodeID string) error {
|
||||
dirPath := filepath.Join(migrator.lu.InternalRoot(), "nodes", nodeID)
|
||||
f, err := os.Open(dirPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
children, err := f.Readdir(0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, child := range children {
|
||||
old := filepath.Join(migrator.lu.InternalRoot(), "nodes", child.Name())
|
||||
new := filepath.Join(migrator.lu.InternalRoot(), "spaces", lookup.Pathify(spaceID, 1, 2), "nodes", lookup.Pathify(child.Name(), 4, 2))
|
||||
if err := os.Rename(old, new); err != nil {
|
||||
migrator.log.Error().Err(err).
|
||||
Str("space", spaceID).
|
||||
Str("nodes", child.Name()).
|
||||
Str("oldpath", old).
|
||||
Str("newpath", new).
|
||||
Msg("could not rename node")
|
||||
}
|
||||
if child.IsDir() {
|
||||
if err := m.moveNode(migrator, spaceID, child.Name()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// linkSpace creates a new symbolic link for a space with the given type st, and node id
|
||||
func (m Migration0001) linkSpaceNode(migrator *Migrator, spaceType, spaceID string) {
|
||||
spaceTypesPath := filepath.Join(migrator.lu.InternalRoot(), "spacetypes", spaceType, spaceID)
|
||||
expectedTarget := "../../spaces/" + lookup.Pathify(spaceID, 1, 2) + "/nodes/" + lookup.Pathify(spaceID, 4, 2)
|
||||
linkTarget, err := os.Readlink(spaceTypesPath)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
err = os.Symlink(expectedTarget, spaceTypesPath)
|
||||
if err != nil {
|
||||
migrator.log.Error().Err(err).
|
||||
Str("space_type", spaceType).
|
||||
Str("space", spaceID).
|
||||
Msg("could not create symlink")
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
migrator.log.Error().Err(err).
|
||||
Str("space_type", spaceType).
|
||||
Str("space", spaceID).
|
||||
Msg("could not read symlink")
|
||||
}
|
||||
if linkTarget != expectedTarget {
|
||||
migrator.log.Warn().
|
||||
Str("space_type", spaceType).
|
||||
Str("space", spaceID).
|
||||
Str("expected", expectedTarget).
|
||||
Str("actual", linkTarget).
|
||||
Msg("expected a different link target")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,150 +0,0 @@
|
||||
// Copyright 2018-2023 CERN
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// In applying this license, CERN does not waive the privileges and immunities
|
||||
// granted to it by virtue of its status as an Intergovernmental Organization
|
||||
// or submit itself to any jurisdiction.
|
||||
|
||||
package migrator
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/opencloud-eu/reva/v2/pkg/logger"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerMigration("0002", Migration0002{})
|
||||
}
|
||||
|
||||
type Migration0002 struct{}
|
||||
|
||||
// Migrate migrates spacetypes to indexes
|
||||
func (m Migration0002) Migrate(migrator *Migrator) (Result, error) {
|
||||
migrator.log.Info().Msg("Migrating space types indexes...")
|
||||
|
||||
spaceTypesPath := filepath.Join(migrator.lu.InternalRoot(), "spacetypes")
|
||||
fi, err := os.Stat(spaceTypesPath)
|
||||
if err == nil && fi.IsDir() {
|
||||
|
||||
f, err := os.Open(spaceTypesPath)
|
||||
if err != nil {
|
||||
return stateFailed, err
|
||||
}
|
||||
spaceTypes, err := f.Readdir(0)
|
||||
if err != nil {
|
||||
return stateFailed, err
|
||||
}
|
||||
|
||||
for _, st := range spaceTypes {
|
||||
err := m.moveSpaceType(migrator, st.Name())
|
||||
if err != nil {
|
||||
logger.New().Error().Err(err).
|
||||
Str("space", st.Name()).
|
||||
Msg("could not move space")
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// delete spacetypespath
|
||||
d, err := os.Open(spaceTypesPath)
|
||||
if err != nil {
|
||||
logger.New().Error().Err(err).
|
||||
Str("spacetypesdir", spaceTypesPath).
|
||||
Msg("could not open spacetypesdir")
|
||||
return stateFailed, nil
|
||||
}
|
||||
defer d.Close()
|
||||
_, err = d.Readdirnames(1) // Or f.Readdir(1)
|
||||
if err == io.EOF {
|
||||
// directory is empty we can delete
|
||||
err := os.Remove(spaceTypesPath)
|
||||
if err != nil {
|
||||
logger.New().Error().Err(err).
|
||||
Str("spacetypesdir", d.Name()).
|
||||
Msg("could not delete")
|
||||
}
|
||||
} else {
|
||||
logger.New().Error().Err(err).
|
||||
Str("spacetypesdir", d.Name()).
|
||||
Msg("could not delete, not empty")
|
||||
}
|
||||
}
|
||||
return stateSucceeded, nil
|
||||
}
|
||||
|
||||
// Rollback is not implemented
|
||||
func (Migration0002) Rollback(_ *Migrator) (Result, error) {
|
||||
return stateFailed, errors.New("rollback not implemented")
|
||||
}
|
||||
|
||||
func (m Migration0002) moveSpaceType(migrator *Migrator, spaceType string) error {
|
||||
dirPath := filepath.Join(migrator.lu.InternalRoot(), "spacetypes", spaceType)
|
||||
f, err := os.Open(dirPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
children, err := f.Readdir(0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, child := range children {
|
||||
old := filepath.Join(migrator.lu.InternalRoot(), "spacetypes", spaceType, child.Name())
|
||||
target, err := os.Readlink(old)
|
||||
if err != nil {
|
||||
logger.New().Error().Err(err).
|
||||
Str("space", spaceType).
|
||||
Str("nodes", child.Name()).
|
||||
Str("oldLink", old).
|
||||
Msg("could not read old symlink")
|
||||
continue
|
||||
}
|
||||
newDir := filepath.Join(migrator.lu.InternalRoot(), "indexes", "by-type", spaceType)
|
||||
if err := os.MkdirAll(newDir, 0700); err != nil {
|
||||
logger.New().Error().Err(err).
|
||||
Str("space", spaceType).
|
||||
Str("nodes", child.Name()).
|
||||
Str("targetDir", newDir).
|
||||
Msg("could not read old symlink")
|
||||
}
|
||||
newLink := filepath.Join(newDir, child.Name())
|
||||
if err := os.Symlink(filepath.Join("..", target), newLink); err != nil {
|
||||
logger.New().Error().Err(err).
|
||||
Str("space", spaceType).
|
||||
Str("nodes", child.Name()).
|
||||
Str("oldpath", old).
|
||||
Str("newpath", newLink).
|
||||
Msg("could not rename node")
|
||||
continue
|
||||
}
|
||||
if err := os.Remove(old); err != nil {
|
||||
logger.New().Error().Err(err).
|
||||
Str("space", spaceType).
|
||||
Str("nodes", child.Name()).
|
||||
Str("oldLink", old).
|
||||
Msg("could not remove old symlink")
|
||||
continue
|
||||
}
|
||||
}
|
||||
if err := os.Remove(dirPath); err != nil {
|
||||
logger.New().Error().Err(err).
|
||||
Str("space", spaceType).
|
||||
Str("dir", dirPath).
|
||||
Msg("could not remove spaces folder, folder probably not empty")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,120 +0,0 @@
|
||||
// Copyright 2018-2023 CERN
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// In applying this license, CERN does not waive the privileges and immunities
|
||||
// granted to it by virtue of its status as an Intergovernmental Organization
|
||||
// or submit itself to any jurisdiction.
|
||||
|
||||
package migrator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/cache"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/lookup"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerMigration("0003", Migration0003{})
|
||||
}
|
||||
|
||||
type Migration0003 struct{}
|
||||
|
||||
// Migrate migrates the file metadata to the current backend.
|
||||
// Only the xattrs -> messagepack path is supported.
|
||||
func (m Migration0003) Migrate(migrator *Migrator) (Result, error) {
|
||||
bod := lookup.DetectBackendOnDisk(migrator.lu.InternalRoot())
|
||||
if bod == "" {
|
||||
return stateFailed, errors.New("could not detect metadata backend on disk")
|
||||
}
|
||||
|
||||
if bod != "xattrs" || migrator.lu.MetadataBackend().Name() != "messagepack" {
|
||||
return stateSucceededRunAgain, nil
|
||||
}
|
||||
|
||||
migrator.log.Info().Str("root", migrator.lu.InternalRoot()).Msg("Migrating to messagepack metadata backend...")
|
||||
xattrs := metadata.NewXattrsBackend(migrator.lu.InternalRoot(), cache.Config{})
|
||||
mpk := metadata.NewMessagePackBackend(migrator.lu.InternalRoot(), cache.Config{})
|
||||
|
||||
spaces, _ := filepath.Glob(filepath.Join(migrator.lu.InternalRoot(), "spaces", "*", "*"))
|
||||
for _, space := range spaces {
|
||||
err := filepath.WalkDir(filepath.Join(space, "nodes"), func(path string, _ fs.DirEntry, err error) error {
|
||||
// Do not continue on error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if strings.HasSuffix(path, ".mpk") || strings.HasSuffix(path, ".flock") {
|
||||
// None of our business
|
||||
return nil
|
||||
}
|
||||
|
||||
fi, err := os.Lstat(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !fi.IsDir() && !fi.Mode().IsRegular() {
|
||||
return nil
|
||||
}
|
||||
|
||||
mpkPath := mpk.MetadataPath(path)
|
||||
_, err = os.Stat(mpkPath)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
attribs, err := xattrs.All(context.Background(), path)
|
||||
if err != nil {
|
||||
migrator.log.Error().Err(err).Str("path", path).Msg("error converting file")
|
||||
return err
|
||||
}
|
||||
if len(attribs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
err = mpk.SetMultiple(context.Background(), path, attribs, false)
|
||||
if err != nil {
|
||||
migrator.log.Error().Err(err).Str("path", path).Msg("error setting attributes")
|
||||
return err
|
||||
}
|
||||
|
||||
for k := range attribs {
|
||||
err = xattrs.Remove(context.Background(), path, k, false)
|
||||
if err != nil {
|
||||
migrator.log.Debug().Err(err).Str("path", path).Msg("error removing xattr")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
migrator.log.Error().Err(err).Msg("error migrating nodes to messagepack metadata backend")
|
||||
}
|
||||
}
|
||||
|
||||
migrator.log.Info().Msg("done.")
|
||||
return stateSucceeded, nil
|
||||
}
|
||||
|
||||
// Rollback is not implemented
|
||||
func (Migration0003) Rollback(_ *Migrator) (Result, error) {
|
||||
return stateFailed, errors.New("rollback not implemented")
|
||||
}
|
||||
@@ -1,203 +0,0 @@
|
||||
// Copyright 2018-2023 CERN
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// In applying this license, CERN does not waive the privileges and immunities
|
||||
// granted to it by virtue of its status as an Intergovernmental Organization
|
||||
// or submit itself to any jurisdiction.
|
||||
|
||||
package migrator
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/shamaton/msgpack/v2"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerMigration("0004", Migration0004{})
|
||||
}
|
||||
|
||||
type Migration0004 struct{}
|
||||
|
||||
// Migrate migrates the directory tree based space indexes to messagepack
|
||||
func (Migration0004) Migrate(migrator *Migrator) (Result, error) {
|
||||
root := migrator.lu.InternalRoot()
|
||||
|
||||
// migrate user indexes
|
||||
users, err := os.ReadDir(filepath.Join(root, "indexes", "by-user-id"))
|
||||
if err != nil {
|
||||
migrator.log.Warn().Err(err).Msg("error listing user indexes")
|
||||
}
|
||||
for _, user := range users {
|
||||
if !user.IsDir() {
|
||||
continue
|
||||
}
|
||||
id := user.Name()
|
||||
indexPath := filepath.Join(root, "indexes", "by-user-id", id+".mpk")
|
||||
dirIndexPath := filepath.Join(root, "indexes", "by-user-id", id)
|
||||
|
||||
migrator.log.Info().Str("root", migrator.lu.InternalRoot()).Msg("Migrating " + indexPath + " to messagepack index format...")
|
||||
err := migrateSpaceIndex(indexPath, dirIndexPath)
|
||||
if err != nil {
|
||||
migrator.log.Error().Err(err).Str("path", dirIndexPath).Msg("error migrating index")
|
||||
}
|
||||
}
|
||||
|
||||
// migrate group indexes
|
||||
groups, err := os.ReadDir(filepath.Join(root, "indexes", "by-group-id"))
|
||||
if err != nil {
|
||||
migrator.log.Warn().Err(err).Msg("error listing group indexes")
|
||||
}
|
||||
for _, group := range groups {
|
||||
if !group.IsDir() {
|
||||
continue
|
||||
}
|
||||
id := group.Name()
|
||||
indexPath := filepath.Join(root, "indexes", "by-group-id", id+".mpk")
|
||||
dirIndexPath := filepath.Join(root, "indexes", "by-group-id", id)
|
||||
|
||||
migrator.log.Info().Str("root", migrator.lu.InternalRoot()).Msg("Migrating " + indexPath + " to messagepack index format...")
|
||||
err := migrateSpaceIndex(indexPath, dirIndexPath)
|
||||
if err != nil {
|
||||
migrator.log.Error().Err(err).Str("path", dirIndexPath).Msg("error migrating index")
|
||||
}
|
||||
}
|
||||
|
||||
// migrate project indexes
|
||||
for _, spaceType := range []string{"personal", "project", "share"} {
|
||||
indexPath := filepath.Join(root, "indexes", "by-type", spaceType+".mpk")
|
||||
dirIndexPath := filepath.Join(root, "indexes", "by-type", spaceType)
|
||||
|
||||
_, err := os.Stat(dirIndexPath)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
migrator.log.Info().Str("root", migrator.lu.InternalRoot()).Msg("Migrating " + indexPath + " to messagepack index format...")
|
||||
err = migrateSpaceIndex(indexPath, dirIndexPath)
|
||||
if err != nil {
|
||||
migrator.log.Error().Err(err).Str("path", dirIndexPath).Msg("error migrating index")
|
||||
}
|
||||
}
|
||||
|
||||
migrator.log.Info().Msg("done.")
|
||||
return stateSucceeded, nil
|
||||
}
|
||||
|
||||
func migrateSpaceIndex(indexPath, dirIndexPath string) error {
|
||||
links := map[string][]byte{}
|
||||
m, err := filepath.Glob(dirIndexPath + "/*")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, match := range m {
|
||||
link, err := os.Readlink(match)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
links[filepath.Base(match)] = []byte(link)
|
||||
}
|
||||
|
||||
// rewrite index as file
|
||||
d, err := msgpack.Marshal(links)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.WriteFile(indexPath, d, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.RemoveAll(dirIndexPath)
|
||||
}
|
||||
|
||||
// Rollback migrates the directory messagepack indexes to symlinks
|
||||
func (Migration0004) Rollback(m *Migrator) (Result, error) {
|
||||
root := m.lu.InternalRoot()
|
||||
|
||||
// migrate user indexes
|
||||
users, err := filepath.Glob(filepath.Join(root, "indexes", "by-user-id", "*.mpk"))
|
||||
if err != nil {
|
||||
m.log.Warn().Err(err).Msg("error listing user indexes")
|
||||
}
|
||||
for _, indexPath := range users {
|
||||
dirIndexPath := strings.TrimSuffix(indexPath, ".mpk")
|
||||
|
||||
m.log.Info().Str("root", m.lu.InternalRoot()).Msg("Migrating " + indexPath + " to symlinks index format...")
|
||||
err := downSpaceIndex(indexPath, dirIndexPath)
|
||||
if err != nil {
|
||||
m.log.Error().Err(err).Str("path", dirIndexPath).Msg("error migrating index")
|
||||
}
|
||||
}
|
||||
|
||||
// migrate group indexes
|
||||
groups, err := filepath.Glob(filepath.Join(root, "indexes", "by-group-id", "*.mpk"))
|
||||
if err != nil {
|
||||
m.log.Warn().Err(err).Msg("error listing group indexes")
|
||||
}
|
||||
for _, indexPath := range groups {
|
||||
dirIndexPath := strings.TrimSuffix(indexPath, ".mpk")
|
||||
|
||||
m.log.Info().Str("root", m.lu.InternalRoot()).Msg("Migrating " + indexPath + " to symlinks index format...")
|
||||
err := downSpaceIndex(indexPath, dirIndexPath)
|
||||
if err != nil {
|
||||
m.log.Error().Err(err).Str("path", dirIndexPath).Msg("error migrating index")
|
||||
}
|
||||
}
|
||||
|
||||
// migrate project indexes
|
||||
for _, spaceType := range []string{"personal", "project", "share"} {
|
||||
indexPath := filepath.Join(root, "indexes", "by-type", spaceType+".mpk")
|
||||
dirIndexPath := filepath.Join(root, "indexes", "by-type", spaceType)
|
||||
|
||||
_, err := os.Stat(indexPath)
|
||||
if err != nil || os.IsNotExist(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
m.log.Info().Str("root", m.lu.InternalRoot()).Msg("Migrating " + indexPath + " to symlinks index format...")
|
||||
err = downSpaceIndex(indexPath, dirIndexPath)
|
||||
if err != nil {
|
||||
m.log.Error().Err(err).Str("path", dirIndexPath).Msg("error migrating index")
|
||||
}
|
||||
}
|
||||
return stateDown, nil
|
||||
}
|
||||
|
||||
func downSpaceIndex(indexPath, dirIndexPath string) error {
|
||||
d, err := os.ReadFile(indexPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
links := map[string][]byte{}
|
||||
err = msgpack.Unmarshal(d, &links)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = os.MkdirAll(dirIndexPath, 0700)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for link, target := range links {
|
||||
err = os.Symlink(string(target), filepath.Join(dirIndexPath, link))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return os.Remove(indexPath)
|
||||
}
|
||||
@@ -1,114 +0,0 @@
|
||||
// Copyright 2018-2023 CERN
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// In applying this license, CERN does not waive the privileges and immunities
|
||||
// granted to it by virtue of its status as an Intergovernmental Organization
|
||||
// or submit itself to any jurisdiction.
|
||||
|
||||
package migrator
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/shamaton/msgpack/v2"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerMigration("0005", Migration0005{})
|
||||
}
|
||||
|
||||
type Migration0005 struct{}
|
||||
|
||||
// Migrate fixes the messagepack space index data structure
|
||||
func (Migration0005) Migrate(migrator *Migrator) (Result, error) {
|
||||
root := migrator.lu.InternalRoot()
|
||||
|
||||
indexes, err := filepath.Glob(filepath.Join(root, "indexes", "**", "*.mpk"))
|
||||
if err != nil {
|
||||
return stateFailed, err
|
||||
}
|
||||
for _, i := range indexes {
|
||||
migrator.log.Info().Str("root", migrator.lu.InternalRoot()).Msg("Fixing index format of " + i)
|
||||
|
||||
// Read old-format index
|
||||
oldData, err := os.ReadFile(i)
|
||||
if err != nil {
|
||||
return stateFailed, err
|
||||
}
|
||||
oldIndex := map[string][]byte{}
|
||||
err = msgpack.Unmarshal(oldData, &oldIndex)
|
||||
if err != nil {
|
||||
// likely already migrated -> skip
|
||||
migrator.log.Warn().Str("root", migrator.lu.InternalRoot()).Msg("Invalid index format found in " + i)
|
||||
continue
|
||||
}
|
||||
|
||||
// Write new-format index
|
||||
newIndex := map[string]string{}
|
||||
for k, v := range oldIndex {
|
||||
newIndex[k] = string(v)
|
||||
}
|
||||
newData, err := msgpack.Marshal(newIndex)
|
||||
if err != nil {
|
||||
return stateFailed, err
|
||||
}
|
||||
err = os.WriteFile(i, newData, 0600)
|
||||
if err != nil {
|
||||
return stateFailed, err
|
||||
}
|
||||
}
|
||||
migrator.log.Info().Msg("done.")
|
||||
return stateSucceeded, nil
|
||||
}
|
||||
|
||||
// Rollback rolls back the migration
|
||||
func (Migration0005) Rollback(migrator *Migrator) (Result, error) {
|
||||
root := migrator.lu.InternalRoot()
|
||||
|
||||
indexes, err := filepath.Glob(filepath.Join(root, "indexes", "**", "*.mpk"))
|
||||
if err != nil {
|
||||
return stateFailed, err
|
||||
}
|
||||
for _, i := range indexes {
|
||||
migrator.log.Info().Str("root", migrator.lu.InternalRoot()).Msg("Fixing index format of " + i)
|
||||
|
||||
oldData, err := os.ReadFile(i)
|
||||
if err != nil {
|
||||
return stateFailed, err
|
||||
}
|
||||
oldIndex := map[string]string{}
|
||||
err = msgpack.Unmarshal(oldData, &oldIndex)
|
||||
if err != nil {
|
||||
migrator.log.Warn().Str("root", migrator.lu.InternalRoot()).Msg("Invalid index format found in " + i)
|
||||
continue
|
||||
}
|
||||
|
||||
// Write new-format index
|
||||
newIndex := map[string][]byte{}
|
||||
for k, v := range oldIndex {
|
||||
newIndex[k] = []byte(v)
|
||||
}
|
||||
newData, err := msgpack.Marshal(newIndex)
|
||||
if err != nil {
|
||||
return stateFailed, err
|
||||
}
|
||||
err = os.WriteFile(i, newData, 0600)
|
||||
if err != nil {
|
||||
return stateFailed, err
|
||||
}
|
||||
}
|
||||
migrator.log.Info().Msg("done.")
|
||||
return stateDown, nil
|
||||
}
|
||||
217
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/migrator/migrator.go
generated
vendored
217
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/migrator/migrator.go
generated
vendored
@@ -1,217 +0,0 @@
|
||||
// Copyright 2018-2021 CERN
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// In applying this license, CERN does not waive the privileges and immunities
|
||||
// granted to it by virtue of its status as an Intergovernmental Organization
|
||||
// or submit itself to any jurisdiction.
|
||||
|
||||
package migrator
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
|
||||
"github.com/rogpeppe/go-internal/lockedfile"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
const (
|
||||
statePending = "pending"
|
||||
stateFailed = "failed"
|
||||
stateSucceeded = "succeeded"
|
||||
stateDown = "down"
|
||||
stateSucceededRunAgain = "runagain"
|
||||
)
|
||||
|
||||
type migration interface {
|
||||
Migrate(*Migrator) (Result, error)
|
||||
Rollback(*Migrator) (Result, error)
|
||||
}
|
||||
|
||||
var migrations = map[string]migration{}
|
||||
|
||||
type migrationStates map[string]MigrationState
|
||||
|
||||
func registerMigration(name string, migration migration) {
|
||||
migrations[name] = migration
|
||||
}
|
||||
|
||||
func allMigrations() []string {
|
||||
ms := []string{}
|
||||
|
||||
for k := range migrations {
|
||||
ms = append(ms, k)
|
||||
}
|
||||
|
||||
sort.Strings(ms)
|
||||
return ms
|
||||
}
|
||||
|
||||
// MigrationState holds the state of a migration
|
||||
type MigrationState struct {
|
||||
State string
|
||||
Message string
|
||||
}
|
||||
|
||||
// Result represents the result of a migration run
|
||||
type Result string
|
||||
|
||||
// Migrator runs migrations on an existing decomposedfs
|
||||
type Migrator struct {
|
||||
lu node.PathLookup
|
||||
states migrationStates
|
||||
log *zerolog.Logger
|
||||
}
|
||||
|
||||
// New returns a new Migrator instance
|
||||
func New(lu node.PathLookup, log *zerolog.Logger) Migrator {
|
||||
return Migrator{
|
||||
lu: lu,
|
||||
log: log,
|
||||
}
|
||||
}
|
||||
|
||||
// Migrations returns the list of migrations and their states
|
||||
func (m *Migrator) Migrations() (map[string]MigrationState, error) {
|
||||
err := m.readStates()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
states := map[string]MigrationState{}
|
||||
for _, migration := range allMigrations() {
|
||||
if s, ok := m.states[migration]; ok {
|
||||
states[migration] = s
|
||||
} else {
|
||||
states[migration] = MigrationState{
|
||||
State: statePending,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return states, nil
|
||||
}
|
||||
|
||||
// RunMigration runs or rolls back a migration
|
||||
func (m *Migrator) RunMigration(id string, rollback bool) error {
|
||||
if _, ok := migrations[id]; !ok {
|
||||
return fmt.Errorf("invalid migration '%s'", id)
|
||||
}
|
||||
|
||||
lock, err := lockedfile.OpenFile(filepath.Join(m.lu.InternalRoot(), ".migrations.lock"), os.O_WRONLY|os.O_CREATE, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer lock.Close()
|
||||
|
||||
err = m.readStates()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var res Result
|
||||
if !rollback {
|
||||
m.log.Info().Msg("Running migration " + id + "...")
|
||||
res, err = migrations[id].Migrate(m)
|
||||
} else {
|
||||
m.log.Info().Msg("Rolling back migration " + id + "...")
|
||||
res, err = migrations[id].Rollback(m)
|
||||
}
|
||||
|
||||
// write back state
|
||||
s := m.states[id]
|
||||
s.State = string(res)
|
||||
|
||||
if err != nil {
|
||||
m.log.Error().Err(err).Msg("migration " + id + " failed")
|
||||
s.Message = err.Error()
|
||||
}
|
||||
|
||||
m.states[id] = s
|
||||
err = m.writeStates()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.log.Info().Msg("done")
|
||||
return nil
|
||||
}
|
||||
|
||||
// RunMigrations runs all migrations in sequence. Note this sequence must not be changed or it might
|
||||
// damage existing decomposed fs.
|
||||
func (m *Migrator) RunMigrations() error {
|
||||
lock, err := lockedfile.OpenFile(filepath.Join(m.lu.InternalRoot(), ".migrations.lock"), os.O_WRONLY|os.O_CREATE, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer lock.Close()
|
||||
|
||||
err = m.readStates()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, migration := range allMigrations() {
|
||||
s := m.states[migration]
|
||||
if s.State == stateSucceeded || s.State == stateDown {
|
||||
continue
|
||||
}
|
||||
|
||||
res, err := migrations[migration].Migrate(m)
|
||||
s.State = string(res)
|
||||
if err != nil {
|
||||
m.log.Error().Err(err).Msg("migration " + migration + " failed")
|
||||
s.Message = err.Error()
|
||||
}
|
||||
|
||||
m.states[migration] = s
|
||||
err = m.writeStates()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Migrator) readStates() error {
|
||||
m.states = migrationStates{}
|
||||
|
||||
d, err := os.ReadFile(filepath.Join(m.lu.InternalRoot(), ".migrations"))
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(d) > 0 {
|
||||
err = json.Unmarshal(d, &m.states)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Migrator) writeStates() error {
|
||||
d, err := json.Marshal(m.states)
|
||||
if err != nil {
|
||||
m.log.Error().Err(err).Msg("could not marshal migration states")
|
||||
return nil
|
||||
}
|
||||
return os.WriteFile(filepath.Join(m.lu.InternalRoot(), ".migrations"), d, 0600)
|
||||
}
|
||||
104
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node/node.go
generated
vendored
104
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node/node.go
generated
vendored
@@ -77,6 +77,7 @@ const (
|
||||
// TrashIDDelimiter represents the characters used to separate the nodeid and the deletion time.
|
||||
TrashIDDelimiter = ".T."
|
||||
RevisionIDDelimiter = ".REV."
|
||||
CurrentIDDelimiter = ".CURRENT"
|
||||
|
||||
// RootID defines the root node's ID
|
||||
RootID = "root"
|
||||
@@ -120,11 +121,9 @@ type Tree interface {
|
||||
// CreateReference(ctx context.Context, node *Node, targetURI *url.URL) error
|
||||
Move(ctx context.Context, oldNode *Node, newNode *Node) (err error)
|
||||
Delete(ctx context.Context, node *Node) (err error)
|
||||
RestoreRecycleItemFunc(ctx context.Context, spaceid, key, trashPath string, target *Node) (*Node, *Node, func() error, error)
|
||||
PurgeRecycleItemFunc(ctx context.Context, spaceid, key, purgePath string) (*Node, func() error, error)
|
||||
|
||||
InitNewNode(ctx context.Context, n *Node, fsize uint64) (metadata.UnlockFunc, error)
|
||||
RestoreRevision(ctx context.Context, spaceID, nodeID, sourcePath string) (err error)
|
||||
RestoreRevision(ctx context.Context, source, target metadata.MetadataNode) (err error)
|
||||
|
||||
WriteBlob(node *Node, source string) error
|
||||
ReadBlob(node *Node) (io.ReadCloser, error)
|
||||
@@ -156,10 +155,9 @@ type PathLookup interface {
|
||||
Path(ctx context.Context, n *Node, hasPermission PermissionFunc) (path string, err error)
|
||||
MetadataBackend() metadata.Backend
|
||||
TimeManager() TimeManager
|
||||
ReadBlobIDAndSizeAttr(ctx context.Context, path string, attrs Attributes) (string, int64, error)
|
||||
TypeFromPath(ctx context.Context, path string) provider.ResourceType
|
||||
CopyMetadataWithSourceLock(ctx context.Context, sourcePath, targetPath string, filter func(attributeName string, value []byte) (newValue []byte, copy bool), lockedSource *lockedfile.File, acquireTargetLock bool) (err error)
|
||||
CopyMetadata(ctx context.Context, src, target string, filter func(attributeName string, value []byte) (newValue []byte, copy bool), acquireTargetLock bool) (err error)
|
||||
ReadBlobIDAndSizeAttr(ctx context.Context, n metadata.MetadataNode, attrs Attributes) (string, int64, error)
|
||||
CopyMetadataWithSourceLock(ctx context.Context, sourceNode, targetNode metadata.MetadataNode, filter func(attributeName string, value []byte) (newValue []byte, copy bool), lockedSource *lockedfile.File, acquireTargetLock bool) (err error)
|
||||
CopyMetadata(ctx context.Context, sourceNode, targetNode metadata.MetadataNode, filter func(attributeName string, value []byte) (newValue []byte, copy bool), acquireTargetLock bool) (err error)
|
||||
}
|
||||
|
||||
type IDCacher interface {
|
||||
@@ -167,11 +165,33 @@ type IDCacher interface {
|
||||
GetCachedID(ctx context.Context, spaceID, nodeID string) (string, bool)
|
||||
}
|
||||
|
||||
type BaseNode struct {
|
||||
SpaceID string
|
||||
ID string
|
||||
|
||||
lu PathLookup
|
||||
}
|
||||
|
||||
func NewBaseNode(spaceID, nodeID string, lu PathLookup) *BaseNode {
|
||||
return &BaseNode{
|
||||
SpaceID: spaceID,
|
||||
ID: nodeID,
|
||||
lu: lu,
|
||||
}
|
||||
}
|
||||
|
||||
func (n *BaseNode) GetSpaceID() string { return n.SpaceID }
|
||||
func (n *BaseNode) GetID() string { return n.ID }
|
||||
|
||||
// InternalPath returns the internal path of the Node
|
||||
func (n *BaseNode) InternalPath() string {
|
||||
return n.lu.InternalPath(n.SpaceID, n.ID)
|
||||
}
|
||||
|
||||
// Node represents a node in the tree and provides methods to get a Parent or Child instance
|
||||
type Node struct {
|
||||
SpaceID string
|
||||
BaseNode
|
||||
ParentID string
|
||||
ID string
|
||||
Name string
|
||||
Blobsize int64
|
||||
BlobID string
|
||||
@@ -179,7 +199,6 @@ type Node struct {
|
||||
Exists bool
|
||||
SpaceRoot *Node
|
||||
|
||||
lu PathLookup
|
||||
xattrsCache map[string][]byte
|
||||
nodeType *provider.ResourceType
|
||||
}
|
||||
@@ -190,13 +209,15 @@ func New(spaceID, id, parentID, name string, blobsize int64, blobID string, t pr
|
||||
blobID = uuid.New().String()
|
||||
}
|
||||
return &Node{
|
||||
SpaceID: spaceID,
|
||||
ID: id,
|
||||
BaseNode: BaseNode{
|
||||
SpaceID: spaceID,
|
||||
ID: id,
|
||||
lu: lu,
|
||||
},
|
||||
ParentID: parentID,
|
||||
Name: name,
|
||||
Blobsize: blobsize,
|
||||
owner: owner,
|
||||
lu: lu,
|
||||
BlobID: blobID,
|
||||
nodeType: &t,
|
||||
}
|
||||
@@ -318,9 +339,11 @@ func ReadNode(ctx context.Context, lu PathLookup, spaceID, nodeID string, canLis
|
||||
if spaceRoot == nil {
|
||||
// read space root
|
||||
spaceRoot = &Node{
|
||||
SpaceID: spaceID,
|
||||
lu: lu,
|
||||
ID: spaceID,
|
||||
BaseNode: BaseNode{
|
||||
SpaceID: spaceID,
|
||||
lu: lu,
|
||||
ID: spaceID,
|
||||
},
|
||||
}
|
||||
spaceRoot.SpaceRoot = spaceRoot
|
||||
spaceRoot.owner, err = spaceRoot.readOwner(ctx)
|
||||
@@ -368,12 +391,13 @@ func ReadNode(ctx context.Context, lu PathLookup, spaceID, nodeID string, canLis
|
||||
|
||||
// read node
|
||||
n := &Node{
|
||||
SpaceID: spaceID,
|
||||
lu: lu,
|
||||
ID: nodeID,
|
||||
BaseNode: BaseNode{
|
||||
SpaceID: spaceID,
|
||||
lu: lu,
|
||||
ID: nodeID,
|
||||
},
|
||||
SpaceRoot: spaceRoot,
|
||||
}
|
||||
nodePath := n.InternalPath()
|
||||
|
||||
// append back revision to nodeid, even when returning a not existing node
|
||||
defer func() {
|
||||
@@ -395,7 +419,7 @@ func ReadNode(ctx context.Context, lu PathLookup, spaceID, nodeID string, canLis
|
||||
n.Name = attrs.String(prefixes.NameAttr)
|
||||
n.ParentID = attrs.String(prefixes.ParentidAttr)
|
||||
if n.ParentID == "" {
|
||||
d, _ := os.ReadFile(lu.MetadataBackend().MetadataPath(n.InternalPath()))
|
||||
d, _ := os.ReadFile(lu.MetadataBackend().MetadataPath(n))
|
||||
if _, ok := lu.MetadataBackend().(metadata.MessagePackBackend); ok {
|
||||
appctx.GetLogger(ctx).Error().Str("path", n.InternalPath()).Str("nodeid", n.ID).Interface("attrs", attrs).Bytes("messagepack", d).Msg("missing parent id")
|
||||
}
|
||||
@@ -403,13 +427,13 @@ func ReadNode(ctx context.Context, lu PathLookup, spaceID, nodeID string, canLis
|
||||
}
|
||||
|
||||
if revisionSuffix == "" {
|
||||
n.BlobID, n.Blobsize, err = lu.ReadBlobIDAndSizeAttr(ctx, nodePath, attrs)
|
||||
n.BlobID, n.Blobsize, err = lu.ReadBlobIDAndSizeAttr(ctx, n, attrs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
versionPath := lu.VersionPath(spaceID, nodeID, revisionSuffix)
|
||||
n.BlobID, n.Blobsize, err = lu.ReadBlobIDAndSizeAttr(ctx, versionPath, nil)
|
||||
versionNode := NewBaseNode(spaceID, nodeID+RevisionIDDelimiter+revisionSuffix, lu)
|
||||
n.BlobID, n.Blobsize, err = lu.ReadBlobIDAndSizeAttr(ctx, versionNode, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -430,8 +454,10 @@ func (n *Node) Child(ctx context.Context, name string) (*Node, error) {
|
||||
spaceID = n.SpaceRoot.ID
|
||||
}
|
||||
c := &Node{
|
||||
SpaceID: spaceID,
|
||||
lu: n.lu,
|
||||
BaseNode: BaseNode{
|
||||
SpaceID: spaceID,
|
||||
lu: n.lu,
|
||||
},
|
||||
ParentID: n.ID,
|
||||
Name: name,
|
||||
SpaceRoot: n.SpaceRoot,
|
||||
@@ -461,9 +487,11 @@ func (n *Node) ParentWithReader(ctx context.Context, r io.Reader) (*Node, error)
|
||||
return nil, fmt.Errorf("decomposedfs: root has no parent")
|
||||
}
|
||||
p := &Node{
|
||||
SpaceID: n.SpaceID,
|
||||
lu: n.lu,
|
||||
ID: n.ParentID,
|
||||
BaseNode: BaseNode{
|
||||
SpaceID: n.SpaceID,
|
||||
lu: n.lu,
|
||||
ID: n.ParentID,
|
||||
},
|
||||
SpaceRoot: n.SpaceRoot,
|
||||
}
|
||||
|
||||
@@ -563,11 +591,6 @@ func (n *Node) PermissionSet(ctx context.Context) (*provider.ResourcePermissions
|
||||
return NoPermissions(), true
|
||||
}
|
||||
|
||||
// InternalPath returns the internal path of the Node
|
||||
func (n *Node) InternalPath() string {
|
||||
return n.lu.InternalPath(n.SpaceID, n.ID)
|
||||
}
|
||||
|
||||
// ParentPath returns the internal path of the parent of the current node
|
||||
func (n *Node) ParentPath() string {
|
||||
return n.lu.InternalPath(n.SpaceID, n.ParentID)
|
||||
@@ -1398,3 +1421,16 @@ func (n *Node) GetDTime(ctx context.Context) (time.Time, error) {
|
||||
func (n *Node) SetDTime(ctx context.Context, t *time.Time) (err error) {
|
||||
return n.lu.TimeManager().SetDTime(ctx, n, t)
|
||||
}
|
||||
|
||||
// ReadChildNodeFromLink reads the child node id from a link
|
||||
func ReadChildNodeFromLink(ctx context.Context, path string) (string, error) {
|
||||
_, span := tracer.Start(ctx, "readChildNodeFromLink")
|
||||
defer span.End()
|
||||
link, err := os.Readlink(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
nodeID := strings.TrimLeft(link, "/.")
|
||||
nodeID = strings.ReplaceAll(nodeID, "/", "")
|
||||
return nodeID, nil
|
||||
}
|
||||
|
||||
14
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node/xattrs.go
generated
vendored
14
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node/xattrs.go
generated
vendored
@@ -81,7 +81,7 @@ func (n *Node) SetXattrsWithContext(ctx context.Context, attribs map[string][]by
|
||||
}
|
||||
}
|
||||
|
||||
return n.lu.MetadataBackend().SetMultiple(ctx, n.InternalPath(), attribs, acquireLock)
|
||||
return n.lu.MetadataBackend().SetMultiple(ctx, n, attribs, acquireLock)
|
||||
}
|
||||
|
||||
// SetXattrs sets multiple extended attributes on the write-through cache/node
|
||||
@@ -95,7 +95,7 @@ func (n *Node) SetXattr(ctx context.Context, key string, val []byte) (err error)
|
||||
n.xattrsCache[key] = val
|
||||
}
|
||||
|
||||
return n.lu.MetadataBackend().Set(ctx, n.InternalPath(), key, val)
|
||||
return n.lu.MetadataBackend().Set(ctx, n, key, val)
|
||||
}
|
||||
|
||||
// SetXattrString sets a string extended attribute on the write-through cache/node
|
||||
@@ -104,7 +104,7 @@ func (n *Node) SetXattrString(ctx context.Context, key, val string) (err error)
|
||||
n.xattrsCache[key] = []byte(val)
|
||||
}
|
||||
|
||||
return n.lu.MetadataBackend().Set(ctx, n.InternalPath(), key, []byte(val))
|
||||
return n.lu.MetadataBackend().Set(ctx, n, key, []byte(val))
|
||||
}
|
||||
|
||||
// RemoveXattr removes an extended attribute from the write-through cache/node
|
||||
@@ -112,7 +112,7 @@ func (n *Node) RemoveXattr(ctx context.Context, key string, acquireLock bool) er
|
||||
if n.xattrsCache != nil {
|
||||
delete(n.xattrsCache, key)
|
||||
}
|
||||
return n.lu.MetadataBackend().Remove(ctx, n.InternalPath(), key, acquireLock)
|
||||
return n.lu.MetadataBackend().Remove(ctx, n, key, acquireLock)
|
||||
}
|
||||
|
||||
// XattrsWithReader returns the extended attributes of the node. If the attributes have already
|
||||
@@ -131,9 +131,9 @@ func (n *Node) XattrsWithReader(ctx context.Context, r io.Reader) (Attributes, e
|
||||
var attrs Attributes
|
||||
var err error
|
||||
if r != nil {
|
||||
attrs, err = n.lu.MetadataBackend().AllWithLockedSource(ctx, n.InternalPath(), r)
|
||||
attrs, err = n.lu.MetadataBackend().AllWithLockedSource(ctx, n, r)
|
||||
} else {
|
||||
attrs, err = n.lu.MetadataBackend().All(ctx, n.InternalPath())
|
||||
attrs, err = n.lu.MetadataBackend().All(ctx, n)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -166,7 +166,7 @@ func (n *Node) Xattr(ctx context.Context, key string) ([]byte, error) {
|
||||
}
|
||||
|
||||
if n.xattrsCache == nil {
|
||||
attrs, err := n.lu.MetadataBackend().All(ctx, path)
|
||||
attrs, err := n.lu.MetadataBackend().All(ctx, n)
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
|
||||
71
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/recycle.go
generated
vendored
71
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/recycle.go
generated
vendored
@@ -37,6 +37,7 @@ import (
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/lookup"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/tree"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storagespace"
|
||||
)
|
||||
|
||||
@@ -101,20 +102,21 @@ func (tb *DecomposedfsTrashbin) ListRecycle(ctx context.Context, ref *provider.R
|
||||
|
||||
trashRootPath := filepath.Join(tb.getRecycleRoot(spaceID), lookup.Pathify(key, 4, 2))
|
||||
originalPath, _, timeSuffix, err := readTrashLink(trashRootPath)
|
||||
originalNode := node.NewBaseNode(spaceID, key, tb.fs.lu)
|
||||
if err != nil {
|
||||
sublog.Error().Err(err).Str("trashRoot", trashRootPath).Msg("error reading trash link")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
origin := ""
|
||||
attrs, err := tb.fs.lu.MetadataBackend().All(ctx, originalPath)
|
||||
raw, err := tb.fs.lu.MetadataBackend().All(ctx, originalNode)
|
||||
attrs := node.Attributes(raw)
|
||||
if err != nil {
|
||||
return items, err
|
||||
}
|
||||
// lookup origin path in extended attributes
|
||||
if attrBytes, ok := attrs[prefixes.TrashOriginAttr]; ok {
|
||||
origin = string(attrBytes)
|
||||
} else {
|
||||
origin = attrs.String(prefixes.TrashOriginAttr)
|
||||
if origin == "" {
|
||||
sublog.Error().Err(err).Str("spaceid", spaceID).Msg("could not read origin path, skipping")
|
||||
return nil, err
|
||||
}
|
||||
@@ -130,24 +132,27 @@ func (tb *DecomposedfsTrashbin) ListRecycle(ctx context.Context, ref *provider.R
|
||||
sublog.Error().Err(err).Msg("could not parse time format, ignoring")
|
||||
}
|
||||
|
||||
var size int64
|
||||
var size uint64
|
||||
if relativePath == "" {
|
||||
// this is the case when we want to directly list a file in the trashbin
|
||||
nodeType := tb.fs.lu.TypeFromPath(ctx, originalPath)
|
||||
switch nodeType {
|
||||
typeInt, err := attrs.Int64(prefixes.TypeAttr)
|
||||
if err != nil {
|
||||
return items, err
|
||||
}
|
||||
switch provider.ResourceType(typeInt) {
|
||||
case provider.ResourceType_RESOURCE_TYPE_FILE:
|
||||
_, size, err = tb.fs.lu.ReadBlobIDAndSizeAttr(ctx, originalPath, nil)
|
||||
size, err = attrs.UInt64(prefixes.BlobsizeAttr)
|
||||
if err != nil {
|
||||
return items, err
|
||||
}
|
||||
case provider.ResourceType_RESOURCE_TYPE_CONTAINER:
|
||||
size, err = tb.fs.lu.MetadataBackend().GetInt64(ctx, originalPath, prefixes.TreesizeAttr)
|
||||
size, err = attrs.UInt64(prefixes.TreesizeAttr)
|
||||
if err != nil {
|
||||
return items, err
|
||||
}
|
||||
}
|
||||
item := &provider.RecycleItem{
|
||||
Type: tb.fs.lu.TypeFromPath(ctx, originalPath),
|
||||
Type: provider.ResourceType(typeInt),
|
||||
Size: uint64(size),
|
||||
Key: filepath.Join(key, relativePath),
|
||||
DeletionTime: deletionTime,
|
||||
@@ -171,37 +176,44 @@ func (tb *DecomposedfsTrashbin) ListRecycle(ctx context.Context, ref *provider.R
|
||||
return nil, err
|
||||
}
|
||||
for _, name := range names {
|
||||
resolvedChildPath, err := filepath.EvalSymlinks(filepath.Join(childrenPath, name))
|
||||
nodeID, err := node.ReadChildNodeFromLink(ctx, filepath.Join(childrenPath, name))
|
||||
if err != nil {
|
||||
sublog.Error().Err(err).Str("name", name).Msg("could not resolve symlink, skipping")
|
||||
continue
|
||||
sublog.Error().Err(err).Str("name", name).Msg("could not read child node")
|
||||
provider.ResourceType_RESOURCE_TYPE_CONTAINER.Number()
|
||||
}
|
||||
childNode := node.NewBaseNode(spaceID, nodeID, tb.fs.lu)
|
||||
|
||||
// reset size
|
||||
size = 0
|
||||
|
||||
nodeType := tb.fs.lu.TypeFromPath(ctx, resolvedChildPath)
|
||||
switch nodeType {
|
||||
raw, err := tb.fs.lu.MetadataBackend().All(ctx, childNode)
|
||||
attrs := node.Attributes(raw)
|
||||
typeInt, err := attrs.Int64(prefixes.TypeAttr)
|
||||
if err != nil {
|
||||
sublog.Error().Err(err).Str("name", name).Msg("could not read node type, skipping")
|
||||
continue
|
||||
}
|
||||
switch provider.ResourceType(typeInt) {
|
||||
case provider.ResourceType_RESOURCE_TYPE_FILE:
|
||||
_, size, err = tb.fs.lu.ReadBlobIDAndSizeAttr(ctx, resolvedChildPath, nil)
|
||||
size, err = attrs.UInt64(prefixes.BlobsizeAttr)
|
||||
if err != nil {
|
||||
sublog.Error().Err(err).Str("name", name).Msg("invalid blob size, skipping")
|
||||
continue
|
||||
}
|
||||
case provider.ResourceType_RESOURCE_TYPE_CONTAINER:
|
||||
size, err = tb.fs.lu.MetadataBackend().GetInt64(ctx, resolvedChildPath, prefixes.TreesizeAttr)
|
||||
size, err = attrs.UInt64(prefixes.TreesizeAttr)
|
||||
if err != nil {
|
||||
sublog.Error().Err(err).Str("name", name).Msg("invalid tree size, skipping")
|
||||
continue
|
||||
}
|
||||
case provider.ResourceType_RESOURCE_TYPE_INVALID:
|
||||
sublog.Error().Err(err).Str("name", name).Str("resolvedChildPath", resolvedChildPath).Msg("invalid node type, skipping")
|
||||
sublog.Error().Err(err).Str("name", name).Str("resolvedChildPath", filepath.Join(childrenPath, name)).Msg("invalid node type, skipping")
|
||||
continue
|
||||
}
|
||||
|
||||
item := &provider.RecycleItem{
|
||||
Type: nodeType,
|
||||
Size: uint64(size),
|
||||
Type: provider.ResourceType(typeInt),
|
||||
Size: size,
|
||||
Key: filepath.Join(key, relativePath, name),
|
||||
DeletionTime: deletionTime,
|
||||
Ref: &provider.Reference{
|
||||
@@ -282,26 +294,33 @@ func (tb *DecomposedfsTrashbin) listTrashRoot(ctx context.Context, spaceID strin
|
||||
continue
|
||||
}
|
||||
|
||||
baseNode := node.NewBaseNode(spaceID, nodeID+node.TrashIDDelimiter+timeSuffix, tb.fs.lu)
|
||||
|
||||
md, err := os.Stat(nodePath)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("trashRoot", trashRoot).Str("item", itemPath).Str("node_path", nodePath).Msg("could not stat trash item, skipping")
|
||||
continue
|
||||
}
|
||||
|
||||
attrs, err := tb.fs.lu.MetadataBackend().All(ctx, nodePath)
|
||||
raw, err := tb.fs.lu.MetadataBackend().All(ctx, baseNode)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("trashRoot", trashRoot).Str("item", itemPath).Str("node_path", nodePath).Msg("could not get extended attributes, skipping")
|
||||
continue
|
||||
}
|
||||
attrs := node.Attributes(raw)
|
||||
|
||||
nodeType := tb.fs.lu.TypeFromPath(ctx, nodePath)
|
||||
if nodeType == provider.ResourceType_RESOURCE_TYPE_INVALID {
|
||||
typeInt, err := attrs.Int64(prefixes.TypeAttr)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("trashRoot", trashRoot).Str("item", itemPath).Str("node_path", nodePath).Msg("could not get node type, skipping")
|
||||
continue
|
||||
}
|
||||
if provider.ResourceType(typeInt) == provider.ResourceType_RESOURCE_TYPE_INVALID {
|
||||
log.Error().Err(err).Str("trashRoot", trashRoot).Str("item", itemPath).Str("node_path", nodePath).Msg("invalid node type, skipping")
|
||||
continue
|
||||
}
|
||||
|
||||
item := &provider.RecycleItem{
|
||||
Type: nodeType,
|
||||
Type: provider.ResourceType(typeInt),
|
||||
Size: uint64(md.Size()),
|
||||
Key: nodeID,
|
||||
}
|
||||
@@ -363,7 +382,7 @@ func (tb *DecomposedfsTrashbin) RestoreRecycleItem(ctx context.Context, ref *pro
|
||||
targetNode = tn
|
||||
}
|
||||
|
||||
rn, parent, restoreFunc, err := tb.fs.tp.RestoreRecycleItemFunc(ctx, ref.ResourceId.SpaceId, key, relativePath, targetNode)
|
||||
rn, parent, restoreFunc, err := tb.fs.tp.(*tree.Tree).RestoreRecycleItemFunc(ctx, ref.ResourceId.SpaceId, key, relativePath, targetNode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -408,7 +427,7 @@ func (tb *DecomposedfsTrashbin) PurgeRecycleItem(ctx context.Context, ref *provi
|
||||
return errtypes.BadRequest("missing reference, needs a space id")
|
||||
}
|
||||
|
||||
rn, purgeFunc, err := tb.fs.tp.PurgeRecycleItemFunc(ctx, ref.ResourceId.OpaqueId, key, relativePath)
|
||||
rn, purgeFunc, err := tb.fs.tp.(*tree.Tree).PurgeRecycleItemFunc(ctx, ref.ResourceId.OpaqueId, key, relativePath)
|
||||
if err != nil {
|
||||
if errors.Is(err, iofs.ErrNotExist) {
|
||||
return errtypes.NotFound(key)
|
||||
|
||||
16
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/revisions.go
generated
vendored
16
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/revisions.go
generated
vendored
@@ -99,17 +99,16 @@ func (fs *Decomposedfs) RestoreRevision(ctx context.Context, ref *provider.Refer
|
||||
}
|
||||
|
||||
// write lock node before copying metadata
|
||||
f, err := lockedfile.OpenFile(fs.lu.MetadataBackend().LockfilePath(n.InternalPath()), os.O_RDWR|os.O_CREATE, 0600)
|
||||
f, err := lockedfile.OpenFile(fs.lu.MetadataBackend().LockfilePath(n), os.O_RDWR|os.O_CREATE, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = f.Close()
|
||||
_ = os.Remove(fs.lu.MetadataBackend().LockfilePath(n.InternalPath()))
|
||||
_ = os.Remove(fs.lu.MetadataBackend().LockfilePath(n))
|
||||
}()
|
||||
|
||||
// move current version to new revision
|
||||
nodePath := fs.lu.InternalPath(spaceID, kp[0])
|
||||
mtime, err := n.GetMTime(ctx)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Interface("ref", ref).Str("originalnode", kp[0]).Str("revisionKey", revisionKey).Msg("cannot read mtime")
|
||||
@@ -123,7 +122,8 @@ func (fs *Decomposedfs) RestoreRevision(ctx context.Context, ref *provider.Refer
|
||||
|
||||
// restore revision
|
||||
restoredRevisionPath := fs.lu.InternalPath(spaceID, revisionKey)
|
||||
if err := fs.tp.RestoreRevision(ctx, spaceID, kp[0], restoredRevisionPath); err != nil {
|
||||
revisionNode := node.NewBaseNode(spaceID, revisionKey, fs.lu)
|
||||
if err := fs.tp.RestoreRevision(ctx, revisionNode, n); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -131,19 +131,19 @@ func (fs *Decomposedfs) RestoreRevision(ctx context.Context, ref *provider.Refer
|
||||
if err := os.Remove(restoredRevisionPath); err != nil {
|
||||
log.Warn().Err(err).Interface("ref", ref).Str("originalnode", kp[0]).Str("revisionKey", revisionKey).Msg("could not delete old revision, continuing")
|
||||
}
|
||||
if err := os.Remove(fs.lu.MetadataBackend().MetadataPath(restoredRevisionPath)); err != nil {
|
||||
if err := os.Remove(fs.lu.MetadataBackend().MetadataPath(revisionNode)); err != nil {
|
||||
log.Warn().Err(err).Interface("ref", ref).Str("originalnode", kp[0]).Str("revisionKey", revisionKey).Msg("could not delete old revision metadata, continuing")
|
||||
}
|
||||
if err := os.Remove(fs.lu.MetadataBackend().LockfilePath(restoredRevisionPath)); err != nil {
|
||||
if err := os.Remove(fs.lu.MetadataBackend().LockfilePath(revisionNode)); err != nil {
|
||||
log.Warn().Err(err).Interface("ref", ref).Str("originalnode", kp[0]).Str("revisionKey", revisionKey).Msg("could not delete old revision metadata lockfile, continuing")
|
||||
}
|
||||
if err := fs.lu.MetadataBackend().Purge(ctx, restoredRevisionPath); err != nil {
|
||||
if err := fs.lu.MetadataBackend().Purge(ctx, revisionNode); err != nil {
|
||||
log.Warn().Err(err).Interface("ref", ref).Str("originalnode", kp[0]).Str("revisionKey", revisionKey).Msg("could not purge old revision from cache, continuing")
|
||||
}
|
||||
|
||||
// revision 5, current 10 (restore a smaller blob) -> 5-10 = -5
|
||||
// revision 10, current 5 (restore a bigger blob) -> 10-5 = +5
|
||||
revisionSize, err := fs.lu.MetadataBackend().GetInt64(ctx, nodePath, prefixes.BlobsizeAttr)
|
||||
revisionSize, err := fs.lu.MetadataBackend().GetInt64(ctx, revisionNode, prefixes.BlobsizeAttr)
|
||||
if err != nil {
|
||||
return errtypes.InternalError("failed to read blob size xattr from old revision")
|
||||
}
|
||||
|
||||
6
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/spaces.go
generated
vendored
6
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/spaces.go
generated
vendored
@@ -741,7 +741,7 @@ func (fs *Decomposedfs) DeleteStorageSpace(ctx context.Context, req *provider.De
|
||||
}
|
||||
|
||||
// invalidate cache
|
||||
if err := fs.lu.MetadataBackend().Purge(ctx, n.InternalPath()); err != nil {
|
||||
if err := fs.lu.MetadataBackend().Purge(ctx, n); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -773,8 +773,8 @@ func (fs *Decomposedfs) DeleteStorageSpace(ctx context.Context, req *provider.De
|
||||
}
|
||||
|
||||
if err := fs.tp.DeleteBlob(&node.Node{
|
||||
BlobID: string(bid),
|
||||
SpaceID: spaceID,
|
||||
BaseNode: node.BaseNode{SpaceID: spaceID},
|
||||
BlobID: string(bid),
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -29,7 +29,6 @@ import (
|
||||
"github.com/google/renameio/v2"
|
||||
"github.com/google/uuid"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/appctx"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/lookup"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
|
||||
@@ -42,6 +41,12 @@ import (
|
||||
|
||||
var _propagationGracePeriod = 3 * time.Minute
|
||||
|
||||
type PropagationNode interface {
|
||||
GetSpaceID() string
|
||||
GetID() string
|
||||
InternalPath() string
|
||||
}
|
||||
|
||||
// AsyncPropagator implements asynchronous treetime & treesize propagation
|
||||
type AsyncPropagator struct {
|
||||
treeSizeAccounting bool
|
||||
@@ -122,7 +127,9 @@ func NewAsyncPropagator(treeSizeAccounting, treeTimeAccounting bool, o options.A
|
||||
|
||||
now := time.Now()
|
||||
_ = os.Chtimes(changesDirPath, now, now)
|
||||
p.propagate(context.Background(), parts[0], strings.TrimSuffix(parts[1], ".processing"), true, *log)
|
||||
|
||||
n := node.NewBaseNode(parts[0], strings.TrimSuffix(parts[1], ".processing"), lookup)
|
||||
p.propagate(context.Background(), n, true, *log)
|
||||
}()
|
||||
}
|
||||
}
|
||||
@@ -155,14 +162,14 @@ func (p AsyncPropagator) Propagate(ctx context.Context, n *node.Node, sizeDiff i
|
||||
SyncTime: time.Now().UTC(),
|
||||
SizeDiff: sizeDiff,
|
||||
}
|
||||
go p.queuePropagation(ctx, n.SpaceID, n.ParentID, c, log)
|
||||
go p.queuePropagation(ctx, n, c, log)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p AsyncPropagator) queuePropagation(ctx context.Context, spaceID, nodeID string, change Change, log zerolog.Logger) {
|
||||
func (p AsyncPropagator) queuePropagation(ctx context.Context, n *node.Node, change Change, log zerolog.Logger) {
|
||||
// add a change to the parent node
|
||||
changePath := p.changesPath(spaceID, nodeID, uuid.New().String()+".mpk")
|
||||
changePath := p.changesPath(n.SpaceID, n.ID, uuid.New().String()+".mpk")
|
||||
|
||||
data, err := msgpack.Marshal(change)
|
||||
if err != nil {
|
||||
@@ -203,7 +210,7 @@ func (p AsyncPropagator) queuePropagation(ctx context.Context, spaceID, nodeID s
|
||||
|
||||
log.Debug().Msg("propagating")
|
||||
// add a change to the parent node
|
||||
changeDirPath := p.changesPath(spaceID, nodeID, "")
|
||||
changeDirPath := p.changesPath(n.SpaceID, n.ID, "")
|
||||
|
||||
// first rename the existing node dir
|
||||
err = os.Rename(changeDirPath, changeDirPath+".processing")
|
||||
@@ -215,11 +222,11 @@ func (p AsyncPropagator) queuePropagation(ctx context.Context, spaceID, nodeID s
|
||||
// -> ignore, the previous propagation will pick the new changes up
|
||||
return
|
||||
}
|
||||
p.propagate(ctx, spaceID, nodeID, false, log)
|
||||
p.propagate(ctx, n, false, log)
|
||||
}
|
||||
|
||||
func (p AsyncPropagator) propagate(ctx context.Context, spaceID, nodeID string, recalculateTreeSize bool, log zerolog.Logger) {
|
||||
changeDirPath := p.changesPath(spaceID, nodeID, "")
|
||||
func (p AsyncPropagator) propagate(ctx context.Context, pn PropagationNode, recalculateTreeSize bool, log zerolog.Logger) {
|
||||
changeDirPath := p.changesPath(pn.GetSpaceID(), pn.GetID(), "")
|
||||
processingPath := changeDirPath + ".processing"
|
||||
|
||||
cleanup := func() {
|
||||
@@ -278,10 +285,9 @@ func (p AsyncPropagator) propagate(ctx context.Context, spaceID, nodeID string,
|
||||
|
||||
var f *lockedfile.File
|
||||
// lock parent before reading treesize or tree time
|
||||
nodePath := filepath.Join(p.lookup.InternalRoot(), "spaces", lookup.Pathify(spaceID, 1, 2), "nodes", lookup.Pathify(nodeID, 4, 2))
|
||||
|
||||
_, subspan = tracer.Start(ctx, "lockedfile.OpenFile")
|
||||
lockFilepath := p.lookup.MetadataBackend().LockfilePath(nodePath)
|
||||
lockFilepath := p.lookup.MetadataBackend().LockfilePath(pn)
|
||||
f, err = lockedfile.OpenFile(lockFilepath, os.O_RDWR|os.O_CREATE, 0600)
|
||||
subspan.End()
|
||||
if err != nil {
|
||||
@@ -301,8 +307,8 @@ func (p AsyncPropagator) propagate(ctx context.Context, spaceID, nodeID string,
|
||||
}()
|
||||
|
||||
_, subspan = tracer.Start(ctx, "node.ReadNode")
|
||||
var n *node.Node
|
||||
if n, err = node.ReadNode(ctx, p.lookup, spaceID, nodeID, false, nil, false); err != nil {
|
||||
n, err := node.ReadNode(ctx, p.lookup, pn.GetSpaceID(), pn.GetID(), false, nil, false)
|
||||
if err != nil {
|
||||
log.Error().Err(err).
|
||||
Msg("Propagation failed. Could not read node.")
|
||||
cleanup()
|
||||
@@ -366,7 +372,7 @@ func (p AsyncPropagator) propagate(ctx context.Context, spaceID, nodeID string,
|
||||
case recalculateTreeSize || metadata.IsAttrUnset(err):
|
||||
// fallback to calculating the treesize
|
||||
log.Warn().Msg("treesize attribute unset, falling back to calculating the treesize")
|
||||
newSize, err = calculateTreeSize(ctx, p.lookup, n.InternalPath())
|
||||
newSize, err = calculateTreeSize(ctx, p.lookup, n)
|
||||
if err != nil {
|
||||
log.Error().Err(err).
|
||||
Msg("Error when calculating treesize of node.") // FIXME wat?
|
||||
@@ -414,7 +420,7 @@ func (p AsyncPropagator) propagate(ctx context.Context, spaceID, nodeID string,
|
||||
cleanup()
|
||||
|
||||
if !n.IsSpaceRoot(ctx) {
|
||||
p.queuePropagation(ctx, n.SpaceID, n.ParentID, pc, log)
|
||||
p.queuePropagation(ctx, n, pc, log)
|
||||
}
|
||||
|
||||
// Check for a changes dir that might have been added meanwhile and pick it up
|
||||
@@ -430,7 +436,7 @@ func (p AsyncPropagator) propagate(ctx context.Context, spaceID, nodeID string,
|
||||
// -> ignore, the previous propagation will pick the new changes up
|
||||
return
|
||||
}
|
||||
p.propagate(ctx, spaceID, nodeID, false, log)
|
||||
p.propagate(ctx, n, false, log)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -54,7 +54,9 @@ func New(lookup node.PathLookup, o *options.Options, log *zerolog.Logger) Propag
|
||||
}
|
||||
}
|
||||
|
||||
func calculateTreeSize(ctx context.Context, lookup node.PathLookup, childrenPath string) (uint64, error) {
|
||||
func calculateTreeSize(ctx context.Context, lookup node.PathLookup, n *node.Node) (uint64, error) {
|
||||
childrenPath := n.InternalPath()
|
||||
|
||||
ctx, span := tracer.Start(ctx, "calculateTreeSize")
|
||||
defer span.End()
|
||||
var size uint64
|
||||
@@ -73,14 +75,15 @@ func calculateTreeSize(ctx context.Context, lookup node.PathLookup, childrenPath
|
||||
}
|
||||
for i := range names {
|
||||
cPath := filepath.Join(childrenPath, names[i])
|
||||
resolvedPath, err := filepath.EvalSymlinks(cPath)
|
||||
if err != nil {
|
||||
appctx.GetLogger(ctx).Error().Err(err).Str("childpath", cPath).Msg("could not resolve child entry symlink")
|
||||
continue // continue after an error
|
||||
}
|
||||
|
||||
// raw read of the attributes for performance reasons
|
||||
attribs, err := lookup.MetadataBackend().All(ctx, resolvedPath)
|
||||
nodeID, err := node.ReadChildNodeFromLink(ctx, cPath)
|
||||
if err != nil {
|
||||
appctx.GetLogger(ctx).Error().Err(err).Str("childpath", cPath).Msg("could not read child node")
|
||||
continue // continue after an error
|
||||
}
|
||||
n := node.NewBaseNode(n.SpaceID, nodeID, lookup)
|
||||
attribs, err := lookup.MetadataBackend().All(ctx, n)
|
||||
if err != nil {
|
||||
appctx.GetLogger(ctx).Error().Err(err).Str("childpath", cPath).Msg("could not read attributes of child entry")
|
||||
continue // continue after an error
|
||||
|
||||
@@ -98,7 +98,8 @@ func (p SyncPropagator) propagateItem(ctx context.Context, n *node.Node, sTime t
|
||||
// lock parent before reading treesize or tree time
|
||||
|
||||
_, subspan := tracer.Start(ctx, "lockedfile.OpenFile")
|
||||
parentFilename := p.lookup.MetadataBackend().LockfilePath(n.ParentPath())
|
||||
parentNode := node.NewBaseNode(n.SpaceID, n.ParentID, p.lookup)
|
||||
parentFilename := p.lookup.MetadataBackend().LockfilePath(parentNode)
|
||||
f, err := lockedfile.OpenFile(parentFilename, os.O_RDWR|os.O_CREATE, 0600)
|
||||
subspan.End()
|
||||
if err != nil {
|
||||
@@ -174,7 +175,7 @@ func (p SyncPropagator) propagateItem(ctx context.Context, n *node.Node, sTime t
|
||||
case metadata.IsAttrUnset(err):
|
||||
// fallback to calculating the treesize
|
||||
log.Warn().Msg("treesize attribute unset, falling back to calculating the treesize")
|
||||
newSize, err = calculateTreeSize(ctx, p.lookup, n.InternalPath())
|
||||
newSize, err = calculateTreeSize(ctx, p.lookup, n)
|
||||
if err != nil {
|
||||
return n, true, err
|
||||
}
|
||||
|
||||
27
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/tree/revisions.go
generated
vendored
27
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/tree/revisions.go
generated
vendored
@@ -33,6 +33,7 @@ import (
|
||||
|
||||
"github.com/opencloud-eu/reva/v2/pkg/appctx"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/errtypes"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storagespace"
|
||||
@@ -49,7 +50,8 @@ import (
|
||||
|
||||
// CreateVersion creates a new version of the node
|
||||
func (tp *Tree) CreateRevision(ctx context.Context, n *node.Node, version string, f *lockedfile.File) (string, error) {
|
||||
versionPath := tp.lookup.VersionPath(n.SpaceID, n.ID, version)
|
||||
versionNode := node.NewBaseNode(n.SpaceID, n.ID+node.RevisionIDDelimiter+version, tp.lookup)
|
||||
versionPath := versionNode.InternalPath()
|
||||
|
||||
err := os.MkdirAll(filepath.Dir(versionPath), 0700)
|
||||
if err != nil {
|
||||
@@ -64,7 +66,7 @@ func (tp *Tree) CreateRevision(ctx context.Context, n *node.Node, version string
|
||||
defer vf.Close()
|
||||
|
||||
// copy blob metadata to version node
|
||||
if err := tp.lookup.CopyMetadataWithSourceLock(ctx, n.InternalPath(), versionPath, func(attributeName string, value []byte) (newValue []byte, copy bool) {
|
||||
if err := tp.lookup.CopyMetadataWithSourceLock(ctx, n, versionNode, func(attributeName string, value []byte) (newValue []byte, copy bool) {
|
||||
return value, strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) ||
|
||||
attributeName == prefixes.TypeAttr ||
|
||||
attributeName == prefixes.BlobIDAttr ||
|
||||
@@ -120,7 +122,8 @@ func (tp *Tree) ListRevisions(ctx context.Context, ref *provider.Reference) (rev
|
||||
Key: n.ID + node.RevisionIDDelimiter + parts[1],
|
||||
Mtime: uint64(mtime.Unix()),
|
||||
}
|
||||
_, blobSize, err := tp.lookup.ReadBlobIDAndSizeAttr(ctx, items[i], nil)
|
||||
baseNode := node.NewBaseNode(n.SpaceID, n.ID+node.RevisionIDDelimiter+parts[1], tp.lookup)
|
||||
_, blobSize, err := tp.lookup.ReadBlobIDAndSizeAttr(ctx, baseNode, nil)
|
||||
if err != nil {
|
||||
appctx.GetLogger(ctx).Error().Err(err).Str("name", fi.Name()).Msg("error reading blobsize xattr, using 0")
|
||||
}
|
||||
@@ -182,14 +185,17 @@ func (tp *Tree) DownloadRevision(ctx context.Context, ref *provider.Reference, r
|
||||
return nil, nil, errtypes.NotFound(f)
|
||||
}
|
||||
|
||||
contentPath := tp.lookup.InternalPath(spaceID, revisionKey)
|
||||
|
||||
blobid, blobsize, err := tp.lookup.ReadBlobIDAndSizeAttr(ctx, contentPath, nil)
|
||||
baseNode := node.NewBaseNode(spaceID, revisionKey, tp.lookup)
|
||||
blobid, blobsize, err := tp.lookup.ReadBlobIDAndSizeAttr(ctx, baseNode, nil)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "Decomposedfs: could not read blob id and size for revision '%s' of node '%s'", kp[1], n.ID)
|
||||
}
|
||||
|
||||
revisionNode := node.Node{SpaceID: spaceID, BlobID: blobid, Blobsize: blobsize} // blobsize is needed for the s3ng blobstore
|
||||
revisionNode := node.Node{
|
||||
BaseNode: node.BaseNode{SpaceID: spaceID},
|
||||
BlobID: blobid,
|
||||
Blobsize: blobsize,
|
||||
} // blobsize is needed for the s3ng blobstore
|
||||
|
||||
ri, err := n.AsResourceInfo(ctx, rp, nil, []string{"size", "mimetype", "etag"}, true)
|
||||
if err != nil {
|
||||
@@ -274,9 +280,8 @@ func (tp *Tree) getRevisionNode(ctx context.Context, ref *provider.Reference, re
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (tp *Tree) RestoreRevision(ctx context.Context, spaceID, nodeID, source string) error {
|
||||
target := tp.lookup.InternalPath(spaceID, nodeID)
|
||||
err := tp.lookup.CopyMetadata(ctx, source, target, func(attributeName string, value []byte) (newValue []byte, copy bool) {
|
||||
func (tp *Tree) RestoreRevision(ctx context.Context, sourceNode, targetNode metadata.MetadataNode) error {
|
||||
err := tp.lookup.CopyMetadata(ctx, sourceNode, targetNode, func(attributeName string, value []byte) (newValue []byte, copy bool) {
|
||||
return value, strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) ||
|
||||
attributeName == prefixes.TypeAttr ||
|
||||
attributeName == prefixes.BlobIDAttr ||
|
||||
@@ -286,7 +291,7 @@ func (tp *Tree) RestoreRevision(ctx context.Context, spaceID, nodeID, source str
|
||||
return errtypes.InternalError("failed to copy blob xattrs to old revision to node: " + err.Error())
|
||||
}
|
||||
// always set the node mtime to the current time
|
||||
err = tp.lookup.MetadataBackend().SetMultiple(ctx, target,
|
||||
err = tp.lookup.MetadataBackend().SetMultiple(ctx, targetNode,
|
||||
map[string][]byte{
|
||||
prefixes.MTimeAttr: []byte(time.Now().UTC().Format(time.RFC3339Nano)),
|
||||
},
|
||||
|
||||
132
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/tree/tree.go
generated
vendored
132
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/tree/tree.go
generated
vendored
@@ -326,18 +326,6 @@ func (t *Tree) Move(ctx context.Context, oldNode *node.Node, newNode *node.Node)
|
||||
return nil
|
||||
}
|
||||
|
||||
func readChildNodeFromLink(ctx context.Context, path string) (string, error) {
|
||||
_, span := tracer.Start(ctx, "readChildNodeFromLink")
|
||||
defer span.End()
|
||||
link, err := os.Readlink(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
nodeID := strings.TrimLeft(link, "/.")
|
||||
nodeID = strings.ReplaceAll(nodeID, "/", "")
|
||||
return nodeID, nil
|
||||
}
|
||||
|
||||
// ListFolder lists the content of a folder node
|
||||
func (t *Tree) ListFolder(ctx context.Context, n *node.Node) ([]*node.Node, error) {
|
||||
ctx, span := tracer.Start(ctx, "ListFolder")
|
||||
@@ -392,7 +380,7 @@ func (t *Tree) ListFolder(ctx context.Context, n *node.Node) ([]*node.Node, erro
|
||||
path := filepath.Join(dir, name)
|
||||
nodeID := getNodeIDFromCache(ctx, path, t.idCache)
|
||||
if nodeID == "" {
|
||||
nodeID, err = readChildNodeFromLink(ctx, path)
|
||||
nodeID, err = node.ReadChildNodeFromLink(ctx, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -502,7 +490,8 @@ func (t *Tree) Delete(ctx context.Context, n *node.Node) (err error) {
|
||||
// at this point we have a symlink pointing to a non existing destination, which is fine
|
||||
|
||||
// rename the trashed node so it is not picked up when traversing up the tree and matches the symlink
|
||||
trashPath := nodePath + node.TrashIDDelimiter + deletionTime
|
||||
trashNode := node.NewBaseNode(n.SpaceID, n.ID+node.TrashIDDelimiter+deletionTime, t.lookup)
|
||||
trashPath := trashNode.InternalPath()
|
||||
err = os.Rename(nodePath, trashPath)
|
||||
if err != nil {
|
||||
// To roll back changes
|
||||
@@ -511,7 +500,7 @@ func (t *Tree) Delete(ctx context.Context, n *node.Node) (err error) {
|
||||
_ = n.RemoveXattr(ctx, prefixes.TrashOriginAttr, true)
|
||||
return
|
||||
}
|
||||
err = t.lookup.MetadataBackend().Rename(nodePath, trashPath)
|
||||
err = t.lookup.MetadataBackend().Rename(n, trashNode)
|
||||
if err != nil {
|
||||
_ = n.RemoveXattr(ctx, prefixes.TrashOriginAttr, true)
|
||||
_ = os.Rename(trashPath, nodePath)
|
||||
@@ -540,7 +529,7 @@ func (t *Tree) RestoreRecycleItemFunc(ctx context.Context, spaceid, key, trashPa
|
||||
defer span.End()
|
||||
logger := appctx.GetLogger(ctx)
|
||||
|
||||
recycleNode, trashItem, deletedNodePath, origin, err := t.readRecycleItem(ctx, spaceid, key, trashPath)
|
||||
recycleNode, trashItem, origin, err := t.readRecycleItem(ctx, spaceid, key, trashPath)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
@@ -571,22 +560,23 @@ func (t *Tree) RestoreRecycleItemFunc(ctx context.Context, spaceid, key, trashPa
|
||||
return errtypes.AlreadyExists("origin already exists")
|
||||
}
|
||||
|
||||
parts := strings.SplitN(recycleNode.ID, node.TrashIDDelimiter, 2)
|
||||
originalId := parts[0]
|
||||
restoreNode := node.NewBaseNode(targetNode.SpaceID, originalId, t.lookup)
|
||||
|
||||
// add the entry for the parent dir
|
||||
err = os.Symlink("../../../../../"+lookup.Pathify(recycleNode.ID, 4, 2), filepath.Join(targetNode.ParentPath(), targetNode.Name))
|
||||
err = os.Symlink("../../../../../"+lookup.Pathify(originalId, 4, 2), filepath.Join(targetNode.ParentPath(), targetNode.Name))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// rename to node only name, so it is picked up by id
|
||||
nodePath := recycleNode.InternalPath()
|
||||
|
||||
// attempt to rename only if we're not in a subfolder
|
||||
if deletedNodePath != nodePath {
|
||||
err = os.Rename(deletedNodePath, nodePath)
|
||||
if recycleNode.ID != restoreNode.ID {
|
||||
err = os.Rename(recycleNode.InternalPath(), restoreNode.InternalPath())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = t.lookup.MetadataBackend().Rename(deletedNodePath, nodePath)
|
||||
err = t.lookup.MetadataBackend().Rename(recycleNode, restoreNode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -599,7 +589,7 @@ func (t *Tree) RestoreRecycleItemFunc(ctx context.Context, spaceid, key, trashPa
|
||||
// set ParentidAttr to restorePath's node parent id
|
||||
attrs.SetString(prefixes.ParentidAttr, targetNode.ParentID)
|
||||
|
||||
if err = recycleNode.SetXattrsWithContext(ctx, attrs, true); err != nil {
|
||||
if err = t.lookup.MetadataBackend().SetMultiple(ctx, restoreNode, map[string][]byte(attrs), true); err != nil {
|
||||
return errors.Wrap(err, "Decomposedfs: could not update recycle node")
|
||||
}
|
||||
|
||||
@@ -641,41 +631,30 @@ func (t *Tree) PurgeRecycleItemFunc(ctx context.Context, spaceid, key string, pa
|
||||
defer span.End()
|
||||
logger := appctx.GetLogger(ctx)
|
||||
|
||||
rn, trashItem, deletedNodePath, _, err := t.readRecycleItem(ctx, spaceid, key, path)
|
||||
recycleNode, trashItem, _, err := t.readRecycleItem(ctx, spaceid, key, path)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
ts := ""
|
||||
timeSuffix := strings.SplitN(filepath.Base(deletedNodePath), node.TrashIDDelimiter, 2)
|
||||
if len(timeSuffix) == 2 {
|
||||
ts = timeSuffix[1]
|
||||
}
|
||||
|
||||
fn := func() error {
|
||||
|
||||
if err := t.removeNode(ctx, deletedNodePath, ts, rn); err != nil {
|
||||
if err := t.removeNode(ctx, recycleNode); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// delete item link in trash
|
||||
deletePath := trashItem
|
||||
if path != "" && path != "/" {
|
||||
resolvedTrashRoot, err := filepath.EvalSymlinks(trashItem)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Decomposedfs: could not resolve trash root")
|
||||
}
|
||||
deletePath = filepath.Join(resolvedTrashRoot, path)
|
||||
return nil
|
||||
}
|
||||
if err = utils.RemoveItem(deletePath); err != nil {
|
||||
logger.Error().Err(err).Str("deletePath", deletePath).Msg("error deleting trash item")
|
||||
if err = utils.RemoveItem(trashItem); err != nil {
|
||||
logger.Error().Err(err).Str("trashItem", trashItem).Msg("error deleting trash item")
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return rn, fn, nil
|
||||
return recycleNode, fn, nil
|
||||
}
|
||||
|
||||
// InitNewNode initializes a new node
|
||||
@@ -693,7 +672,7 @@ func (t *Tree) InitNewNode(ctx context.Context, n *node.Node, fsize uint64) (met
|
||||
|
||||
// create and write lock new node metadata
|
||||
_, subspan = tracer.Start(ctx, "metadata.Lock")
|
||||
unlock, err := t.lookup.MetadataBackend().Lock(n.InternalPath())
|
||||
unlock, err := t.lookup.MetadataBackend().Lock(n)
|
||||
subspan.End()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -737,20 +716,17 @@ func (t *Tree) InitNewNode(ctx context.Context, n *node.Node, fsize uint64) (met
|
||||
return unlock, nil
|
||||
}
|
||||
|
||||
func (t *Tree) removeNode(ctx context.Context, path, timeSuffix string, n *node.Node) error {
|
||||
func (t *Tree) removeNode(ctx context.Context, n *node.Node) error {
|
||||
path := n.InternalPath()
|
||||
logger := appctx.GetLogger(ctx)
|
||||
|
||||
if timeSuffix != "" {
|
||||
n.ID = n.ID + node.TrashIDDelimiter + timeSuffix
|
||||
}
|
||||
|
||||
if n.IsDir(ctx) {
|
||||
item, err := t.ListFolder(ctx, n)
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Str("path", path).Msg("error listing folder")
|
||||
} else {
|
||||
for _, child := range item {
|
||||
if err := t.removeNode(ctx, child.InternalPath(), "", child); err != nil {
|
||||
if err := t.removeNode(ctx, child); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -763,8 +739,8 @@ func (t *Tree) removeNode(ctx context.Context, path, timeSuffix string, n *node.
|
||||
return err
|
||||
}
|
||||
|
||||
if err := t.lookup.MetadataBackend().Purge(ctx, path); err != nil {
|
||||
logger.Error().Err(err).Str("path", t.lookup.MetadataBackend().MetadataPath(path)).Msg("error purging node metadata")
|
||||
if err := t.lookup.MetadataBackend().Purge(ctx, n); err != nil {
|
||||
logger.Error().Err(err).Str("path", t.lookup.MetadataBackend().MetadataPath(n)).Msg("error purging node metadata")
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -777,7 +753,8 @@ func (t *Tree) removeNode(ctx context.Context, path, timeSuffix string, n *node.
|
||||
}
|
||||
|
||||
// delete revisions
|
||||
revs, err := filepath.Glob(n.InternalPath() + node.RevisionIDDelimiter + "*")
|
||||
originalNodeID := nodeIDRegep.ReplaceAllString(n.InternalPath(), "$1")
|
||||
revs, err := filepath.Glob(originalNodeID + node.RevisionIDDelimiter + "*")
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Str("path", n.InternalPath()+node.RevisionIDDelimiter+"*").Msg("glob failed badly")
|
||||
return err
|
||||
@@ -787,7 +764,11 @@ func (t *Tree) removeNode(ctx context.Context, path, timeSuffix string, n *node.
|
||||
continue
|
||||
}
|
||||
|
||||
bID, _, err := t.lookup.ReadBlobIDAndSizeAttr(ctx, rev, nil)
|
||||
revID := nodeFullIDRegep.ReplaceAllString(rev, "$1")
|
||||
revID = strings.ReplaceAll(revID, "/", "")
|
||||
revNode := node.NewBaseNode(n.SpaceID, revID, t.lookup)
|
||||
|
||||
bID, _, err := t.lookup.ReadBlobIDAndSizeAttr(ctx, revNode, nil)
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Str("revision", rev).Msg("error reading blobid attribute")
|
||||
return err
|
||||
@@ -799,7 +780,10 @@ func (t *Tree) removeNode(ctx context.Context, path, timeSuffix string, n *node.
|
||||
}
|
||||
|
||||
if bID != "" {
|
||||
if err := t.DeleteBlob(&node.Node{SpaceID: n.SpaceID, BlobID: bID}); err != nil {
|
||||
if err := t.DeleteBlob(&node.Node{
|
||||
BaseNode: node.BaseNode{
|
||||
SpaceID: n.SpaceID,
|
||||
}, BlobID: bID}); err != nil {
|
||||
logger.Error().Err(err).Str("revision", rev).Str("blobID", bID).Msg("error removing revision node blob")
|
||||
return err
|
||||
}
|
||||
@@ -883,30 +867,31 @@ func (t *Tree) createDirNode(ctx context.Context, n *node.Node) (err error) {
|
||||
}
|
||||
|
||||
var nodeIDRegep = regexp.MustCompile(`.*/nodes/([^.]*).*`)
|
||||
var nodeFullIDRegep = regexp.MustCompile(`.*/nodes/(.*)`)
|
||||
|
||||
// TODO refactor the returned params into Node properties? would make all the path transformations go away...
|
||||
func (t *Tree) readRecycleItem(ctx context.Context, spaceID, key, path string) (recycleNode *node.Node, trashItem string, deletedNodePath string, origin string, err error) {
|
||||
func (t *Tree) readRecycleItem(ctx context.Context, spaceID, key, path string) (recycleNode *node.Node, trashItem string, origin string, err error) {
|
||||
_, span := tracer.Start(ctx, "readRecycleItem")
|
||||
defer span.End()
|
||||
logger := appctx.GetLogger(ctx)
|
||||
|
||||
if key == "" {
|
||||
return nil, "", "", "", errtypes.InternalError("key is empty")
|
||||
return nil, "", "", errtypes.InternalError("key is empty")
|
||||
}
|
||||
|
||||
backend := t.lookup.MetadataBackend()
|
||||
var nodeID string
|
||||
|
||||
trashItem = filepath.Join(t.lookup.InternalRoot(), "spaces", lookup.Pathify(spaceID, 1, 2), "trash", lookup.Pathify(key, 4, 2))
|
||||
resolvedTrashItem, err := filepath.EvalSymlinks(trashItem)
|
||||
resolvedTrashRootNodePath, err := filepath.EvalSymlinks(trashItem)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
deletedNodePath, err = filepath.EvalSymlinks(filepath.Join(resolvedTrashItem, path))
|
||||
recycleNodePath, err := filepath.EvalSymlinks(filepath.Join(resolvedTrashRootNodePath, path))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
nodeID = nodeIDRegep.ReplaceAllString(deletedNodePath, "$1")
|
||||
nodeID = nodeFullIDRegep.ReplaceAllString(recycleNodePath, "$1")
|
||||
nodeID = strings.ReplaceAll(nodeID, "/", "")
|
||||
|
||||
recycleNode = node.New(spaceID, nodeID, "", "", 0, "", provider.ResourceType_RESOURCE_TYPE_INVALID, nil, t.lookup)
|
||||
@@ -914,34 +899,36 @@ func (t *Tree) readRecycleItem(ctx context.Context, spaceID, key, path string) (
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
recycleNode.SetType(t.lookup.TypeFromPath(ctx, deletedNodePath))
|
||||
raw, err := t.lookup.MetadataBackend().All(ctx, recycleNode)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
attrs := node.Attributes(raw)
|
||||
|
||||
var attrBytes []byte
|
||||
if recycleNode.Type(ctx) == provider.ResourceType_RESOURCE_TYPE_FILE {
|
||||
typeInt, err := attrs.Int64(prefixes.TypeAttr)
|
||||
if provider.ResourceType(typeInt) == provider.ResourceType_RESOURCE_TYPE_FILE {
|
||||
// lookup blobID in extended attributes
|
||||
if attrBytes, err = backend.Get(ctx, deletedNodePath, prefixes.BlobIDAttr); err == nil {
|
||||
recycleNode.BlobID = string(attrBytes)
|
||||
} else {
|
||||
recycleNode.BlobID = attrs.String(prefixes.BlobIDAttr)
|
||||
if recycleNode.BlobID == "" {
|
||||
return
|
||||
}
|
||||
|
||||
// lookup blobSize in extended attributes
|
||||
if recycleNode.Blobsize, err = backend.GetInt64(ctx, deletedNodePath, prefixes.BlobsizeAttr); err != nil {
|
||||
if recycleNode.Blobsize, err = attrs.Int64(prefixes.BlobsizeAttr); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// lookup parent id in extended attributes
|
||||
if attrBytes, err = backend.Get(ctx, deletedNodePath, prefixes.ParentidAttr); err == nil {
|
||||
recycleNode.ParentID = string(attrBytes)
|
||||
} else {
|
||||
recycleNode.ParentID = attrs.String(prefixes.ParentidAttr)
|
||||
if recycleNode.ParentID == "" {
|
||||
return
|
||||
}
|
||||
|
||||
// lookup name in extended attributes
|
||||
if attrBytes, err = backend.Get(ctx, deletedNodePath, prefixes.NameAttr); err == nil {
|
||||
recycleNode.Name = string(attrBytes)
|
||||
} else {
|
||||
recycleNode.Name = attrs.String(prefixes.NameAttr)
|
||||
if recycleNode.Name == "" {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -949,10 +936,11 @@ func (t *Tree) readRecycleItem(ctx context.Context, spaceID, key, path string) (
|
||||
origin = "/"
|
||||
|
||||
// lookup origin path in extended attributes
|
||||
if attrBytes, err = backend.Get(ctx, resolvedTrashItem, prefixes.TrashOriginAttr); err == nil {
|
||||
rootNode := node.NewBaseNode(spaceID, nodeID, t.lookup)
|
||||
if attrBytes, err = backend.Get(ctx, rootNode, prefixes.TrashOriginAttr); err == nil {
|
||||
origin = filepath.Join(string(attrBytes), path)
|
||||
} else {
|
||||
logger.Error().Err(err).Str("trashItem", trashItem).Str("deletedNodePath", deletedNodePath).Msg("could not read origin path, restoring to /")
|
||||
logger.Error().Err(err).Str("trashItem", trashItem).Str("deletedNodePath", recycleNodePath).Msg("could not read origin path, restoring to /")
|
||||
}
|
||||
|
||||
return
|
||||
|
||||
24
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload/store.go
generated
vendored
24
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload/store.go
generated
vendored
@@ -304,10 +304,9 @@ func (store DecomposedFsStore) CreateNodeForUpload(ctx context.Context, session
|
||||
func (store DecomposedFsStore) updateExistingNode(ctx context.Context, session *DecomposedFsSession, n *node.Node, spaceID string, fsize uint64) (metadata.UnlockFunc, error) {
|
||||
_, span := tracer.Start(ctx, "updateExistingNode")
|
||||
defer span.End()
|
||||
targetPath := n.InternalPath()
|
||||
|
||||
// write lock existing node before reading any metadata
|
||||
f, err := lockedfile.OpenFile(store.lu.MetadataBackend().LockfilePath(targetPath), os.O_RDWR|os.O_CREATE, 0600)
|
||||
f, err := lockedfile.OpenFile(store.lu.MetadataBackend().LockfilePath(n), os.O_RDWR|os.O_CREATE, 0600)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -365,7 +364,9 @@ func (store DecomposedFsStore) updateExistingNode(ctx context.Context, session *
|
||||
|
||||
if !store.disableVersioning {
|
||||
span.AddEvent("CreateVersion")
|
||||
versionPath, err := session.store.tp.CreateRevision(ctx, n, oldNodeMtime.UTC().Format(time.RFC3339Nano), f)
|
||||
timestamp := oldNodeMtime.UTC().Format(time.RFC3339Nano)
|
||||
versionID := n.ID + node.RevisionIDDelimiter + timestamp
|
||||
versionPath, err := session.store.tp.CreateRevision(ctx, n, timestamp, f)
|
||||
if err != nil {
|
||||
if !errors.Is(err, os.ErrExist) {
|
||||
return unlock, err
|
||||
@@ -373,29 +374,30 @@ func (store DecomposedFsStore) updateExistingNode(ctx context.Context, session *
|
||||
|
||||
// a revision with this mtime does already exist.
|
||||
// If the blobs are the same we can just delete the old one
|
||||
if err := validateChecksums(ctx, old, session, versionPath); err != nil {
|
||||
versionNode := node.NewBaseNode(n.SpaceID, versionID, session.store.lu)
|
||||
if err := validateChecksums(ctx, old, session, versionNode); err != nil {
|
||||
return unlock, err
|
||||
}
|
||||
|
||||
// delete old blob
|
||||
bID, _, err := session.store.lu.ReadBlobIDAndSizeAttr(ctx, versionPath, nil)
|
||||
bID, _, err := session.store.lu.ReadBlobIDAndSizeAttr(ctx, versionNode, nil)
|
||||
if err != nil {
|
||||
return unlock, err
|
||||
}
|
||||
if err := session.store.tp.DeleteBlob(&node.Node{BlobID: bID, SpaceID: n.SpaceID}); err != nil {
|
||||
if err := session.store.tp.DeleteBlob(&node.Node{BaseNode: node.BaseNode{SpaceID: n.SpaceID}, BlobID: bID}); err != nil {
|
||||
return unlock, err
|
||||
}
|
||||
|
||||
// clean revision file
|
||||
if versionPath, err = session.store.tp.CreateRevision(ctx, n, oldNodeMtime.UTC().Format(time.RFC3339Nano), f); err != nil {
|
||||
if versionPath, err = session.store.tp.CreateRevision(ctx, n, timestamp, f); err != nil {
|
||||
return unlock, err
|
||||
}
|
||||
}
|
||||
|
||||
session.info.MetaData["versionsPath"] = versionPath
|
||||
session.info.MetaData["versionID"] = versionID
|
||||
// keep mtime from previous version
|
||||
span.AddEvent("os.Chtimes")
|
||||
if err := os.Chtimes(session.info.MetaData["versionsPath"], oldNodeMtime, oldNodeMtime); err != nil {
|
||||
if err := os.Chtimes(versionPath, oldNodeMtime, oldNodeMtime); err != nil {
|
||||
return unlock, errtypes.InternalError(fmt.Sprintf("failed to change mtime of version node: %s", err))
|
||||
}
|
||||
}
|
||||
@@ -405,7 +407,7 @@ func (store DecomposedFsStore) updateExistingNode(ctx context.Context, session *
|
||||
return unlock, nil
|
||||
}
|
||||
|
||||
func validateChecksums(ctx context.Context, n *node.Node, session *DecomposedFsSession, versionPath string) error {
|
||||
func validateChecksums(ctx context.Context, n *node.Node, session *DecomposedFsSession, versionNode metadata.MetadataNode) error {
|
||||
for _, t := range []string{"md5", "sha1", "adler32"} {
|
||||
key := prefixes.ChecksumPrefix + t
|
||||
|
||||
@@ -414,7 +416,7 @@ func validateChecksums(ctx context.Context, n *node.Node, session *DecomposedFsS
|
||||
return err
|
||||
}
|
||||
|
||||
revisionChecksum, err := session.store.lu.MetadataBackend().Get(ctx, versionPath, key)
|
||||
revisionChecksum, err := session.store.lu.MetadataBackend().Get(ctx, versionNode, key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
14
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload/upload.go
generated
vendored
14
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload/upload.go
generated
vendored
@@ -325,20 +325,22 @@ func (session *DecomposedFsSession) Cleanup(revertNodeMetadata, cleanBin, cleanI
|
||||
if err != nil {
|
||||
appctx.GetLogger(ctx).Error().Err(err).Str("sessionid", session.ID()).Msg("reading node for session failed")
|
||||
} else {
|
||||
if session.NodeExists() && session.info.MetaData["versionsPath"] != "" {
|
||||
p := session.info.MetaData["versionsPath"]
|
||||
if err := session.store.lu.CopyMetadata(ctx, p, n.InternalPath(), func(attributeName string, value []byte) (newValue []byte, copy bool) {
|
||||
if session.NodeExists() && session.info.MetaData["versionID"] != "" {
|
||||
versionID := session.info.MetaData["versionID"]
|
||||
revisionNode := node.NewBaseNode(n.SpaceID, versionID, session.store.lu)
|
||||
|
||||
if err := session.store.lu.CopyMetadata(ctx, revisionNode, n, func(attributeName string, value []byte) (newValue []byte, copy bool) {
|
||||
return value, strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) ||
|
||||
attributeName == prefixes.TypeAttr ||
|
||||
attributeName == prefixes.BlobIDAttr ||
|
||||
attributeName == prefixes.BlobsizeAttr ||
|
||||
attributeName == prefixes.MTimeAttr
|
||||
}, true); err != nil {
|
||||
appctx.GetLogger(ctx).Info().Str("versionpath", p).Str("nodepath", n.InternalPath()).Err(err).Msg("renaming version node failed")
|
||||
appctx.GetLogger(ctx).Info().Str("version", versionID).Str("nodepath", n.InternalPath()).Err(err).Msg("renaming version node failed")
|
||||
}
|
||||
|
||||
if err := os.RemoveAll(p); err != nil {
|
||||
appctx.GetLogger(ctx).Info().Str("versionpath", p).Str("nodepath", n.InternalPath()).Err(err).Msg("error removing version")
|
||||
if err := os.RemoveAll(revisionNode.InternalPath()); err != nil {
|
||||
appctx.GetLogger(ctx).Info().Str("version", versionID).Str("nodepath", n.InternalPath()).Err(err).Msg("error removing version")
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
3
vendor/modules.txt
vendored
3
vendor/modules.txt
vendored
@@ -1189,7 +1189,7 @@ github.com/open-policy-agent/opa/v1/types
|
||||
github.com/open-policy-agent/opa/v1/util
|
||||
github.com/open-policy-agent/opa/v1/util/decoding
|
||||
github.com/open-policy-agent/opa/v1/version
|
||||
# github.com/opencloud-eu/reva/v2 v2.27.3-0.20250218085216-6d8d9c5e692c
|
||||
# github.com/opencloud-eu/reva/v2 v2.27.3-0.20250220084142-9db89fb25fdf
|
||||
## explicit; go 1.23.1
|
||||
github.com/opencloud-eu/reva/v2/cmd/revad/internal/grace
|
||||
github.com/opencloud-eu/reva/v2/cmd/revad/runtime
|
||||
@@ -1506,7 +1506,6 @@ github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/aspects
|
||||
github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/lookup
|
||||
github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata
|
||||
github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/metadata/prefixes
|
||||
github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/migrator
|
||||
github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/mtimesyncedcache
|
||||
github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node
|
||||
github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/options
|
||||
|
||||
Reference in New Issue
Block a user