fix tests & bump reva

Signed-off-by: Christian Richter <crichter@owncloud.com>
This commit is contained in:
Christian Richter
2023-06-19 14:45:32 +02:00
parent 14a66a98f1
commit 6d9ad6fcf5
13 changed files with 223 additions and 43 deletions

2
go.mod
View File

@@ -13,7 +13,7 @@ require (
github.com/coreos/go-oidc v2.2.1+incompatible
github.com/coreos/go-oidc/v3 v3.6.0
github.com/cs3org/go-cs3apis v0.0.0-20230516150832-730ac860c71d
github.com/cs3org/reva/v2 v2.14.1-0.20230616125400-b30fdde17262
github.com/cs3org/reva/v2 v2.14.1-0.20230619124027-9bd3d7359257
github.com/disintegration/imaging v1.6.2
github.com/dutchcoders/go-clamd v0.0.0-20170520113014-b970184f4d9e
github.com/egirna/icap-client v0.1.1

4
go.sum
View File

@@ -625,8 +625,8 @@ github.com/crewjam/httperr v0.2.0 h1:b2BfXR8U3AlIHwNeFFvZ+BV1LFvKLlzMjzaTnZMybNo
github.com/crewjam/httperr v0.2.0/go.mod h1:Jlz+Sg/XqBQhyMjdDiC+GNNRzZTD7x39Gu3pglZ5oH4=
github.com/crewjam/saml v0.4.13 h1:TYHggH/hwP7eArqiXSJUvtOPNzQDyQ7vwmwEqlFWhMc=
github.com/crewjam/saml v0.4.13/go.mod h1:igEejV+fihTIlHXYP8zOec3V5A8y3lws5bQBFsTm4gA=
github.com/cs3org/reva/v2 v2.14.1-0.20230616125400-b30fdde17262 h1:qtK30bpLgJsUha9XbJseTksGXaNQtTteynVZEobL1fw=
github.com/cs3org/reva/v2 v2.14.1-0.20230616125400-b30fdde17262/go.mod h1:E32krZG159YflDSjDWfx/QGIC2529PS5LiPnGNHu3d0=
github.com/cs3org/reva/v2 v2.14.1-0.20230619124027-9bd3d7359257 h1:Z+iEDQlIEjnOd/GQa1AJN5gPVCSPJ2aKFVCM2xj8Q+g=
github.com/cs3org/reva/v2 v2.14.1-0.20230619124027-9bd3d7359257/go.mod h1:E32krZG159YflDSjDWfx/QGIC2529PS5LiPnGNHu3d0=
github.com/cubewise-code/go-mime v0.0.0-20200519001935-8c5762b177d8 h1:Z9lwXumT5ACSmJ7WGnFl+OMLLjpz5uR2fyz7dC255FI=
github.com/cubewise-code/go-mime v0.0.0-20200519001935-8c5762b177d8/go.mod h1:4abs/jPXcmJzYoYGF91JF9Uq9s/KL5n1jvFDix8KcqY=
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=

View File

@@ -287,7 +287,7 @@ var testCases = []struct {
require.NoError(t, json.Unmarshal(b, &ev))
// AuditEvent fields
checkBaseAuditEvent(t, ev.AuditEvent, "sharing-userid", "2001-09-09T01:46:40Z", "link 'shareid' was accessed. Success: true", "public_link_accessed")
checkBaseAuditEvent(t, ev.AuditEvent, "sharing-userid", "2001-09-09T01:46:40Z", "link with token 'token-123' was accessed. Success: true", "public_link_accessed")
// AuditEventSharing fields
checkSharingAuditEvent(t, ev.AuditEventSharing, "itemid-1", "sharing-userid", "shareid")
// AuditEventShareUpdated fields
@@ -310,7 +310,7 @@ var testCases = []struct {
require.NoError(t, json.Unmarshal(b, &ev))
// AuditEvent fields
checkBaseAuditEvent(t, ev.AuditEvent, "", "", "link 'shareid' was accessed. Success: false", "public_link_accessed")
checkBaseAuditEvent(t, ev.AuditEvent, "", "", "link with token 'token-123' was accessed. Success: false", "public_link_accessed")
// AuditEventSharing fields
checkSharingAuditEvent(t, ev.AuditEventSharing, "", "", "shareid")
// AuditEventShareUpdated fields

View File

@@ -168,6 +168,7 @@ func LinkAccessFailed(r *link.GetPublicShareByTokenResponse, req *link.GetPublic
Status: r.Status.Code,
Message: r.Status.Message,
Timestamp: utils.TSNow(),
Token: req.Token,
}
if r.Share != nil {
e.ShareID = r.Share.Id

View File

@@ -49,6 +49,7 @@ import (
"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/lookup"
"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/metadata"
"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/migrator"
"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/mtimesyncedcache"
"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/node"
"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/options"
"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/tree"
@@ -99,7 +100,8 @@ type Decomposedfs struct {
stream events.Stream
cache cache.StatCache
UserCache *ttlcache.Cache
UserCache *ttlcache.Cache
spaceIDCache mtimesyncedcache.Cache[string, map[string]string]
}
// NewDefault returns an instance with default components

View File

@@ -141,7 +141,7 @@ func (lu *Lookup) NodeFromID(ctx context.Context, id *provider.ResourceId) (n *n
}
if id.OpaqueId == "" {
// The Resource references the root of a space
return lu.NodeFromSpaceID(ctx, id)
return lu.NodeFromSpaceID(ctx, id.SpaceId)
}
return node.ReadNode(ctx, lu, id.SpaceId, id.OpaqueId, false, nil, false)
}
@@ -162,9 +162,9 @@ func Pathify(id string, depth, width int) string {
return b.String()
}
// NodeFromSpaceID converts a resource id without an opaque id into a Node
func (lu *Lookup) NodeFromSpaceID(ctx context.Context, id *provider.ResourceId) (n *node.Node, err error) {
node, err := node.ReadNode(ctx, lu, id.SpaceId, id.OpaqueId, false, nil, false)
// NodeFromSpaceID converts a resource id into a Node
func (lu *Lookup) NodeFromSpaceID(ctx context.Context, spaceID string) (n *node.Node, err error) {
node, err := node.ReadNode(ctx, lu, spaceID, spaceID, false, nil, false)
if err != nil {
return nil, err
}

View File

@@ -0,0 +1,36 @@
package mtimesyncedcache
import "sync"
type Map[K comparable, V any] struct {
m sync.Map
}
func (m *Map[K, V]) Delete(key K) { m.m.Delete(key) }
func (m *Map[K, V]) Load(key K) (value V, ok bool) {
v, ok := m.m.Load(key)
if !ok {
return value, ok
}
return v.(V), ok
}
func (m *Map[K, V]) LoadAndDelete(key K) (value V, loaded bool) {
v, loaded := m.m.LoadAndDelete(key)
if !loaded {
return value, loaded
}
return v.(V), loaded
}
func (m *Map[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool) {
a, loaded := m.m.LoadOrStore(key, value)
return a.(V), loaded
}
func (m *Map[K, V]) Range(f func(key K, value V) bool) {
m.m.Range(func(key, value any) bool { return f(key.(K), value.(V)) })
}
func (m *Map[K, V]) Store(key K, value V) { m.m.Store(key, value) }

View File

@@ -0,0 +1,59 @@
package mtimesyncedcache
import (
"sync"
"time"
)
type Cache[K comparable, T any] struct {
entries Map[K, *entry[T]]
}
type entry[T any] struct {
mtime time.Time
value T
mu sync.Mutex
}
func New[K comparable, T any]() Cache[K, T] {
return Cache[K, T]{
entries: Map[K, *entry[T]]{},
}
}
func (c *Cache[K, T]) Store(key K, mtime time.Time, value T) error {
c.entries.Store(key, &entry[T]{
mtime: mtime,
value: value,
})
return nil
}
func (c *Cache[K, T]) Load(key K) (T, bool) {
entry, ok := c.entries.Load(key)
if !ok {
var t T
return t, false
}
return entry.value, true
}
func (c *Cache[K, T]) LoadOrStore(key K, mtime time.Time, f func() (T, error)) (T, error) {
e, _ := c.entries.LoadOrStore(key, &entry[T]{})
e.mu.Lock()
defer e.mu.Unlock()
if mtime.After(e.mtime) {
e.mtime = mtime
v, err := f()
if err != nil {
var t T
return t, err
}
e.value = v
c.entries.Store(key, e)
}
return e.value, nil
}

View File

@@ -58,7 +58,7 @@ func (fs *Decomposedfs) ListRecycle(ctx context.Context, ref *provider.Reference
sublog := appctx.GetLogger(ctx).With().Str("space", spaceID).Str("key", key).Str("relative_path", relativePath).Logger()
// check permissions
trashnode, err := fs.lu.NodeFromSpaceID(ctx, ref.ResourceId)
trashnode, err := fs.lu.NodeFromSpaceID(ctx, spaceID)
if err != nil {
return nil, err
}

View File

@@ -299,17 +299,37 @@ func (fs *Decomposedfs) ListStorageSpaces(ctx context.Context, filter []*provide
matches := map[string]struct{}{}
if requestedUserID != nil {
path := filepath.Join(fs.o.Root, "indexes", "by-user-id", requestedUserID.GetOpaqueId(), nodeID)
m, err := filepath.Glob(path)
allMatches := map[string]string{}
indexPath := filepath.Join(fs.o.Root, "indexes", "by-user-id", requestedUserID.GetOpaqueId())
fi, err := os.Stat(indexPath)
if err == nil {
allMatches, err = fs.spaceIDCache.LoadOrStore("by-user-id:"+requestedUserID.GetOpaqueId(), fi.ModTime(), func() (map[string]string, error) {
path := filepath.Join(fs.o.Root, "indexes", "by-user-id", requestedUserID.GetOpaqueId(), "*")
m, err := filepath.Glob(path)
if err != nil {
return nil, err
}
matches := map[string]string{}
for _, match := range m {
link, err := os.Readlink(match)
if err != nil {
continue
}
matches[match] = link
}
return matches, nil
})
}
if err != nil {
return nil, err
}
for _, match := range m {
link, err := os.Readlink(match)
if err != nil {
continue
if nodeID == spaceIDAny {
for _, match := range allMatches {
matches[match] = struct{}{}
}
matches[link] = struct{}{}
} else {
matches[allMatches[nodeID]] = struct{}{}
}
// get Groups for userid
@@ -323,17 +343,37 @@ func (fs *Decomposedfs) ListStorageSpaces(ctx context.Context, filter []*provide
}
for _, group := range user.Groups {
path := filepath.Join(fs.o.Root, "indexes", "by-group-id", group, nodeID)
m, err := filepath.Glob(path)
indexPath := filepath.Join(fs.o.Root, "indexes", "by-group-id", group)
fi, err := os.Stat(indexPath)
if err != nil {
continue
}
allMatches, err := fs.spaceIDCache.LoadOrStore("by-group-id:"+group, fi.ModTime(), func() (map[string]string, error) {
path := filepath.Join(fs.o.Root, "indexes", "by-group-id", group, "*")
m, err := filepath.Glob(path)
if err != nil {
return nil, err
}
matches := map[string]string{}
for _, match := range m {
link, err := os.Readlink(match)
if err != nil {
continue
}
matches[match] = link
}
return matches, nil
})
if err != nil {
return nil, err
}
for _, match := range m {
link, err := os.Readlink(match)
if err != nil {
continue
if nodeID == spaceIDAny {
for _, match := range allMatches {
matches[match] = struct{}{}
}
matches[link] = struct{}{}
} else {
matches[allMatches[nodeID]] = struct{}{}
}
}
@@ -341,17 +381,40 @@ func (fs *Decomposedfs) ListStorageSpaces(ctx context.Context, filter []*provide
if requestedUserID == nil {
for spaceType := range spaceTypes {
path := filepath.Join(fs.o.Root, "indexes", "by-type", spaceType, nodeID)
m, err := filepath.Glob(path)
indexPath := filepath.Join(fs.o.Root, "indexes", "by-type")
if spaceType != spaceTypeAny {
indexPath = filepath.Join(indexPath, spaceType)
}
fi, err := os.Stat(indexPath)
if err != nil {
continue
}
allMatches, err := fs.spaceIDCache.LoadOrStore("by-type:"+spaceType, fi.ModTime(), func() (map[string]string, error) {
path := filepath.Join(fs.o.Root, "indexes", "by-type", spaceType, "*")
m, err := filepath.Glob(path)
if err != nil {
return nil, err
}
matches := map[string]string{}
for _, match := range m {
link, err := os.Readlink(match)
if err != nil {
continue
}
matches[match] = link
}
return matches, nil
})
if err != nil {
return nil, err
}
for _, match := range m {
link, err := os.Readlink(match)
if err != nil {
continue
if nodeID == spaceIDAny {
for _, match := range allMatches {
matches[match] = struct{}{}
}
matches[link] = struct{}{}
} else {
matches[allMatches[nodeID]] = struct{}{}
}
}
}
@@ -777,14 +840,16 @@ func (fs *Decomposedfs) linkStorageSpaceType(ctx context.Context, spaceType stri
if isAlreadyExists(err) {
appctx.GetLogger(ctx).Debug().Err(err).Str("space", spaceID).Str("spacetype", spaceType).Msg("symlink already exists")
// FIXME: is it ok to wipe this err if the symlink already exists?
err = nil
} else {
// TODO how should we handle error cases here?
appctx.GetLogger(ctx).Error().Err(err).Str("space", spaceID).Str("spacetype", spaceType).Msg("could not create symlink")
return err
}
}
return err
// touch index root to invalidate caches
now := time.Now()
return os.Chtimes(filepath.Join(fs.o.Root, "indexes", "by-type"), now, now)
}
func (fs *Decomposedfs) storageSpaceFromNode(ctx context.Context, n *node.Node, checkPermissions bool) (*provider.StorageSpace, error) {

View File

@@ -376,7 +376,7 @@ func (t *Tree) ListFolder(ctx context.Context, n *node.Node) ([]*node.Node, erro
}
}
child, err := node.ReadNode(ctx, t.lookup, n.SpaceID, nodeID, false, n, true)
child, err := node.ReadNode(ctx, t.lookup, n.SpaceID, nodeID, false, n.SpaceRoot, true)
if err != nil {
return err
}
@@ -691,7 +691,12 @@ func (t *Tree) removeNode(path string, n *node.Node) error {
// Propagate propagates changes to the root of the tree
func (t *Tree) Propagate(ctx context.Context, n *node.Node, sizeDiff int64) (err error) {
sublog := appctx.GetLogger(ctx).With().Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Logger()
sublog := appctx.GetLogger(ctx).With().
Str("method", "tree.Propagate").
Str("spaceid", n.SpaceID).
Str("nodeid", n.ID).
Int64("sizeDiff", sizeDiff).
Logger()
if !t.options.TreeTimeAccounting && (!t.options.TreeSizeAccounting || sizeDiff == 0) {
// no propagation enabled
sublog.Debug().Msg("propagation disabled or nothing to propagate")
@@ -712,15 +717,22 @@ func (t *Tree) Propagate(ctx context.Context, n *node.Node, sizeDiff int64) (err
var f *lockedfile.File
// lock parent before reading treesize or tree time
var parentFilename string
switch t.lookup.MetadataBackend().(type) {
case metadata.MessagePackBackend:
f, err = lockedfile.OpenFile(t.lookup.MetadataBackend().MetadataPath(n.ParentPath()), os.O_RDWR|os.O_CREATE, 0600)
parentFilename = t.lookup.MetadataBackend().MetadataPath(n.ParentPath())
f, err = lockedfile.OpenFile(parentFilename, os.O_RDWR|os.O_CREATE, 0600)
case metadata.XattrsBackend:
// we have to use dedicated lockfiles to lock directories
// this only works because the xattr backend also locks folders with separate lock files
f, err = lockedfile.OpenFile(n.ParentPath()+filelocks.LockFileSuffix, os.O_RDWR|os.O_CREATE, 0600)
parentFilename = n.ParentPath() + filelocks.LockFileSuffix
f, err = lockedfile.OpenFile(parentFilename, os.O_RDWR|os.O_CREATE, 0600)
}
if err != nil {
sublog.Error().Err(err).
Str("parent filename", parentFilename).
Msg("Propagation failed. Could not open metadata for parent with lock.")
return err
}
// always log error if closing node fails
@@ -733,6 +745,8 @@ func (t *Tree) Propagate(ctx context.Context, n *node.Node, sizeDiff int64) (err
}()
if n, err = n.ParentWithReader(f); err != nil {
sublog.Error().Err(err).
Msg("Propagation failed. Could not read parent node.")
return err
}
@@ -788,11 +802,14 @@ func (t *Tree) Propagate(ctx context.Context, n *node.Node, sizeDiff int64) (err
switch {
case metadata.IsAttrUnset(err):
// fallback to calculating the treesize
sublog.Warn().Msg("treesize attribute unset, falling back to calculating the treesize")
newSize, err = t.calculateTreeSize(ctx, n.InternalPath())
if err != nil {
return err
}
case err != nil:
sublog.Error().Err(err).
Msg("Faild to propagate treesize change. Error when reading the treesize attribute from parent")
return err
case sizeDiff > 0:
newSize = treeSize + uint64(sizeDiff)
@@ -813,12 +830,14 @@ func (t *Tree) Propagate(ctx context.Context, n *node.Node, sizeDiff int64) (err
}
if err = n.SetXattrs(attrs, false); err != nil {
sublog.Error().Err(err).Msg("Failed to update extend attributes of parent node")
return err
}
// Release node lock early, ignore already closed error
cerr := f.Close()
if cerr != nil && !errors.Is(cerr, os.ErrClosed) {
sublog.Error().Err(cerr).Msg("Failed to close parent node and release lock")
return cerr
}
}

View File

@@ -70,10 +70,7 @@ func New(ctx context.Context, info tusd.FileInfo, lu *lookup.Lookup, tp Tree, p
return nil, errors.New("Decomposedfs: missing dir in metadata")
}
n, err := lu.NodeFromSpaceID(ctx, &provider.ResourceId{
SpaceId: info.Storage["SpaceRoot"],
OpaqueId: info.Storage["SpaceRoot"],
})
n, err := lu.NodeFromSpaceID(ctx, info.Storage["SpaceRoot"])
if err != nil {
return nil, errors.Wrap(err, "Decomposedfs: error getting space root node")
}

3
vendor/modules.txt vendored
View File

@@ -352,7 +352,7 @@ github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1
github.com/cs3org/go-cs3apis/cs3/storage/registry/v1beta1
github.com/cs3org/go-cs3apis/cs3/tx/v1beta1
github.com/cs3org/go-cs3apis/cs3/types/v1beta1
# github.com/cs3org/reva/v2 v2.14.1-0.20230616125400-b30fdde17262
# github.com/cs3org/reva/v2 v2.14.1-0.20230619124027-9bd3d7359257
## explicit; go 1.20
github.com/cs3org/reva/v2/cmd/revad/internal/grace
github.com/cs3org/reva/v2/cmd/revad/runtime
@@ -655,6 +655,7 @@ github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/lookup
github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/metadata
github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/metadata/prefixes
github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/migrator
github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/mtimesyncedcache
github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/node
github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/options
github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/tree