Merge pull request #10321 from owncloud/bump-reva-to-e5baaccf6614

bump reva to e5baaccf6614
This commit is contained in:
Michael Barz
2024-10-16 21:19:33 +02:00
committed by GitHub
22 changed files with 242 additions and 90 deletions

View File

@@ -2,6 +2,7 @@ Enhancement: Bump reva to 2.xx.x
TODO
https://github.com/owncloud/ocis/pull/10321
https://github.com/owncloud/ocis/pull/10236
https://github.com/owncloud/ocis/pull/10216
https://github.com/owncloud/ocis/pull/10315

2
go.mod
View File

@@ -15,7 +15,7 @@ require (
github.com/cenkalti/backoff v2.2.1+incompatible
github.com/coreos/go-oidc/v3 v3.11.0
github.com/cs3org/go-cs3apis v0.0.0-20240724121416-062c4e3046cb
github.com/cs3org/reva/v2 v2.25.1-0.20241015142909-99548fe690bd
github.com/cs3org/reva/v2 v2.25.1-0.20241016145214-e5baaccf6614
github.com/dhowden/tag v0.0.0-20230630033851-978a0926ee25
github.com/dutchcoders/go-clamd v0.0.0-20170520113014-b970184f4d9e
github.com/egirna/icap-client v0.1.1

6
go.sum
View File

@@ -250,10 +250,8 @@ github.com/crewjam/saml v0.4.14 h1:g9FBNx62osKusnFzs3QTN5L9CVA/Egfgm+stJShzw/c=
github.com/crewjam/saml v0.4.14/go.mod h1:UVSZCf18jJkk6GpWNVqcyQJMD5HsRugBPf4I1nl2mME=
github.com/cs3org/go-cs3apis v0.0.0-20240724121416-062c4e3046cb h1:KmYZDReplv/yfwc1LNYpDcVhVujC3Pasv6WjXx1haSU=
github.com/cs3org/go-cs3apis v0.0.0-20240724121416-062c4e3046cb/go.mod h1:yyP8PRo0EZou3nSH7H4qjlzQwaydPeIRNgX50npQHpE=
github.com/cs3org/reva/v2 v2.25.1-0.20241004072335-2a6fdbed139d h1:ETrSkU/XK50QzsxCAHQrr0b7klOHb9TVmJjmOsjMhr8=
github.com/cs3org/reva/v2 v2.25.1-0.20241004072335-2a6fdbed139d/go.mod h1:p7CHBXcg6sSqB+0JMNDfC1S7TSh9FghXkw1kTV3KcJI=
github.com/cs3org/reva/v2 v2.25.1-0.20241015142909-99548fe690bd h1:+cV0mcJR2v4Fidrs1ckM7c0+dSINTLsmECO76AbW8No=
github.com/cs3org/reva/v2 v2.25.1-0.20241015142909-99548fe690bd/go.mod h1:p7CHBXcg6sSqB+0JMNDfC1S7TSh9FghXkw1kTV3KcJI=
github.com/cs3org/reva/v2 v2.25.1-0.20241016145214-e5baaccf6614 h1:f9s9KJoxM+EKlBKLhD4LsoqcELYct4DLToqXPJj/mC0=
github.com/cs3org/reva/v2 v2.25.1-0.20241016145214-e5baaccf6614/go.mod h1:p7CHBXcg6sSqB+0JMNDfC1S7TSh9FghXkw1kTV3KcJI=
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=

View File

@@ -51,6 +51,7 @@ func ShareCreated(r *collaboration.CreateShareResponse, executant *user.User) ev
GranteeUserID: r.Share.GetGrantee().GetUserId(),
GranteeGroupID: r.Share.GetGrantee().GetGroupId(),
ItemID: r.Share.ResourceId,
ResourceName: utils.ReadPlainFromOpaque(r.Opaque, "resourcename"),
CTime: r.Share.Ctime,
Permissions: r.Share.Permissions,
}
@@ -73,6 +74,7 @@ func ShareRemoved(r *collaboration.RemoveShareResponse, req *collaboration.Remov
GranteeUserID: userid,
GranteeGroupID: groupid,
ItemID: rid,
ResourceName: utils.ReadPlainFromOpaque(r.Opaque, "resourcename"),
Timestamp: time.Now(),
}
}
@@ -83,6 +85,7 @@ func ShareUpdated(r *collaboration.UpdateShareResponse, req *collaboration.Updat
Executant: executant.GetId(),
ShareID: r.Share.Id,
ItemID: r.Share.ResourceId,
ResourceName: utils.ReadPlainFromOpaque(r.Opaque, "resourcename"),
Permissions: r.Share.Permissions,
GranteeUserID: r.Share.GetGrantee().GetUserId(),
GranteeGroupID: r.Share.GetGrantee().GetGroupId(),
@@ -114,6 +117,7 @@ func LinkCreated(r *link.CreatePublicShareResponse, executant *user.User) events
ShareID: r.Share.Id,
Sharer: r.Share.Creator,
ItemID: r.Share.ResourceId,
ResourceName: utils.ReadPlainFromOpaque(r.Opaque, "resourcename"),
Permissions: r.Share.Permissions,
DisplayName: r.Share.DisplayName,
Expiration: r.Share.Expiration,
@@ -130,6 +134,7 @@ func LinkUpdated(r *link.UpdatePublicShareResponse, req *link.UpdatePublicShareR
ShareID: r.Share.Id,
Sharer: r.Share.Creator,
ItemID: r.Share.ResourceId,
ResourceName: utils.ReadPlainFromOpaque(r.Opaque, "resourcename"),
Permissions: r.Share.Permissions,
DisplayName: r.Share.DisplayName,
Expiration: r.Share.Expiration,
@@ -177,11 +182,12 @@ func LinkRemoved(r *link.RemovePublicShareResponse, req *link.RemovePublicShareR
var rid *provider.ResourceId
_ = utils.ReadJSONFromOpaque(r.Opaque, "resourceid", &rid)
return events.LinkRemoved{
Executant: executant.GetId(),
ShareID: req.Ref.GetId(),
ShareToken: req.Ref.GetToken(),
Timestamp: utils.TSNow(),
ItemID: rid,
Executant: executant.GetId(),
ShareID: req.Ref.GetId(),
ShareToken: req.Ref.GetToken(),
Timestamp: utils.TSNow(),
ItemID: rid,
ResourceName: utils.ReadPlainFromOpaque(r.Opaque, "resourcename"),
}
}

View File

@@ -332,6 +332,7 @@ func (s *service) CreatePublicShare(ctx context.Context, req *link.CreatePublicS
default:
res.Status = status.NewOK(ctx)
res.Share = share
res.Opaque = utils.AppendPlainToOpaque(nil, "resourcename", sRes.GetInfo().GetName())
}
return res, nil
@@ -353,15 +354,15 @@ func (s *service) RemovePublicShare(ctx context.Context, req *link.RemovePublicS
Status: status.NewInternal(ctx, "error loading public share"),
}, err
}
if !publicshare.IsCreatedByUser(ps, user) {
sRes, err := gatewayClient.Stat(ctx, &provider.StatRequest{Ref: &provider.Reference{ResourceId: ps.ResourceId}})
if err != nil {
log.Err(err).Interface("resource_id", ps.ResourceId).Msg("failed to stat shared resource")
return &link.RemovePublicShareResponse{
Status: status.NewInternal(ctx, "failed to stat shared resource"),
}, err
}
sRes, err := gatewayClient.Stat(ctx, &provider.StatRequest{Ref: &provider.Reference{ResourceId: ps.ResourceId}})
if err != nil {
log.Err(err).Interface("resource_id", ps.ResourceId).Msg("failed to stat shared resource")
return &link.RemovePublicShareResponse{
Status: status.NewInternal(ctx, "failed to stat shared resource"),
}, err
}
if !publicshare.IsCreatedByUser(ps, user) {
if !sRes.GetInfo().GetPermissionSet().RemoveGrant {
return &link.RemovePublicShareResponse{
Status: status.NewPermissionDenied(ctx, nil, "no permission to delete public share"),
@@ -374,8 +375,10 @@ func (s *service) RemovePublicShare(ctx context.Context, req *link.RemovePublicS
Status: status.NewInternal(ctx, "error deleting public share"),
}, err
}
o := utils.AppendJSONToOpaque(nil, "resourceid", ps.GetResourceId())
o = utils.AppendPlainToOpaque(o, "resourcename", sRes.GetInfo().GetName())
return &link.RemovePublicShareResponse{
Opaque: utils.AppendJSONToOpaque(nil, "resourceid", ps.GetResourceId()),
Opaque: o,
Status: status.NewOK(ctx),
}, nil
}
@@ -603,6 +606,7 @@ func (s *service) UpdatePublicShare(ctx context.Context, req *link.UpdatePublicS
res := &link.UpdatePublicShareResponse{
Status: status.NewOK(ctx),
Share: updateR,
Opaque: utils.AppendPlainToOpaque(nil, "resourcename", sRes.GetInfo().GetName()),
}
return res, nil
}

View File

@@ -248,6 +248,7 @@ func (s *service) CreateShare(ctx context.Context, req *collaboration.CreateShar
return &collaboration.CreateShareResponse{
Status: status.NewOK(ctx),
Share: createdShare,
Opaque: utils.AppendPlainToOpaque(nil, "resourcename", sRes.GetInfo().GetName()),
}, nil
}
@@ -296,6 +297,7 @@ func (s *service) RemoveShare(ctx context.Context, req *collaboration.RemoveShar
}
o := utils.AppendJSONToOpaque(nil, "resourceid", share.GetResourceId())
o = utils.AppendPlainToOpaque(o, "resourcename", sRes.GetInfo().GetName())
if user := share.GetGrantee().GetUserId(); user != nil {
o = utils.AppendJSONToOpaque(o, "granteeuserid", user)
} else {
@@ -447,6 +449,7 @@ func (s *service) UpdateShare(ctx context.Context, req *collaboration.UpdateShar
res := &collaboration.UpdateShareResponse{
Status: status.NewOK(ctx),
Share: share,
Opaque: utils.AppendPlainToOpaque(nil, "resourcename", sRes.GetInfo().GetName()),
}
return res, nil
}
@@ -535,16 +538,21 @@ func (s *service) UpdateReceivedShare(ctx context.Context, req *collaboration.Up
var uid userpb.UserId
_ = utils.ReadJSONFromOpaque(req.Opaque, "userid", &uid)
updatedShare, err := s.sm.UpdateReceivedShare(ctx, req.Share, req.UpdateMask, &uid)
if err != nil {
switch err.(type) {
case nil:
return &collaboration.UpdateReceivedShareResponse{
Status: status.NewInternal(ctx, "error updating received share"),
Status: status.NewOK(ctx),
Share: updatedShare,
}, nil
case errtypes.NotFound:
return &collaboration.UpdateReceivedShareResponse{
Status: status.NewNotFound(ctx, "error getting received share"),
}, nil
default:
return &collaboration.UpdateReceivedShareResponse{
Status: status.NewInternal(ctx, "error getting received share"),
}, nil
}
return &collaboration.UpdateReceivedShareResponse{
Status: status.NewOK(ctx),
Share: updatedShare,
}, nil
}
func setReceivedShareMountPoint(ctx context.Context, gwc gateway.GatewayAPIClient, req *collaboration.UpdateReceivedShareRequest) (*rpc.Status, error) {

View File

@@ -523,7 +523,11 @@ func (p *Handler) propfindResponse(ctx context.Context, w http.ResponseWriter, r
}
// TODO this is just a stat -> rename
func (p *Handler) statSpace(ctx context.Context, client gateway.GatewayAPIClient, ref *provider.Reference, metadataKeys, fieldMaskPaths []string) (*provider.ResourceInfo, *rpc.Status, error) {
func (p *Handler) statSpace(ctx context.Context, ref *provider.Reference, metadataKeys, fieldMaskPaths []string) (*provider.ResourceInfo, *rpc.Status, error) {
client, err := p.selector.Next()
if err != nil {
return nil, nil, err
}
req := &provider.StatRequest{
Ref: ref,
ArbitraryMetadataKeys: metadataKeys,
@@ -542,18 +546,12 @@ func (p *Handler) getResourceInfos(ctx context.Context, w http.ResponseWriter, r
span.SetAttributes(attribute.KeyValue{Key: "depth", Value: attribute.StringValue(depth.String())})
defer span.End()
client, err := p.selector.Next()
if err != nil {
log.Error().Err(err).Msg("error getting grpc client")
w.WriteHeader(http.StatusInternalServerError)
return nil, false, false
}
metadataKeys, fieldMaskPaths := metadataKeys(pf)
// we need to stat all spaces to aggregate the root etag, mtime and size
// TODO cache per space (hah, no longer per user + per space!)
var (
err error
rootInfo *provider.ResourceInfo
mostRecentChildInfo *provider.ResourceInfo
aggregatedChildSize uint64
@@ -569,7 +567,7 @@ func (p *Handler) getResourceInfos(ctx context.Context, w http.ResponseWriter, r
if err != nil {
continue
}
info, status, err := p.statSpace(ctx, client, &spaceRef, metadataKeys, fieldMaskPaths)
info, status, err := p.statSpace(ctx, &spaceRef, metadataKeys, fieldMaskPaths)
if err != nil || status.GetCode() != rpc.Code_CODE_OK {
continue
}
@@ -587,7 +585,7 @@ func (p *Handler) getResourceInfos(ctx context.Context, w http.ResponseWriter, r
info = space.RootInfo
} else {
var status *rpc.Status
info, status, err = p.statSpace(ctx, client, spaceRef, metadataKeys, fieldMaskPaths)
info, status, err = p.statSpace(ctx, spaceRef, metadataKeys, fieldMaskPaths)
if err != nil || status.GetCode() != rpc.Code_CODE_OK {
continue
}
@@ -656,6 +654,12 @@ func (p *Handler) getResourceInfos(ctx context.Context, w http.ResponseWriter, r
case spaceInfo.Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER && depth == net.DepthOne:
switch {
case strings.HasPrefix(requestPath, spaceInfo.Path) && spaceData.SpaceType != "virtual":
client, err := p.selector.Next()
if err != nil {
log.Error().Err(err).Msg("error getting grpc client")
w.WriteHeader(http.StatusInternalServerError)
return nil, false, false
}
req := &provider.ListContainerRequest{
Ref: spaceData.Ref,
ArbitraryMetadataKeys: metadataKeys,
@@ -692,6 +696,12 @@ func (p *Handler) getResourceInfos(ctx context.Context, w http.ResponseWriter, r
if info.Type != provider.ResourceType_RESOURCE_TYPE_CONTAINER || spaceData.SpaceType == "virtual" {
continue
}
client, err := p.selector.Next()
if err != nil {
log.Error().Err(err).Msg("error getting grpc client")
w.WriteHeader(http.StatusInternalServerError)
return nil, false, false
}
req := &provider.ListContainerRequest{
Ref: &provider.Reference{
ResourceId: spaceInfo.Id,

View File

@@ -41,6 +41,7 @@ type ShareCreated struct {
GranteeGroupID *group.GroupId
Sharee *provider.Grantee
ItemID *provider.ResourceId
ResourceName string
Permissions *collaboration.SharePermissions
CTime *types.Timestamp
}
@@ -62,8 +63,9 @@ type ShareRemoved struct {
GranteeUserID *user.UserId
GranteeGroupID *group.GroupId
ItemID *provider.ResourceId
Timestamp time.Time
ItemID *provider.ResourceId
ResourceName string
Timestamp time.Time
}
// Unmarshal to fulfill umarshaller interface
@@ -78,6 +80,7 @@ type ShareUpdated struct {
Executant *user.UserId
ShareID *collaboration.ShareId
ItemID *provider.ResourceId
ResourceName string
Permissions *collaboration.SharePermissions
GranteeUserID *user.UserId
GranteeGroupID *group.GroupId
@@ -101,6 +104,7 @@ type ShareExpired struct {
ShareID *collaboration.ShareId
ShareOwner *user.UserId
ItemID *provider.ResourceId
Path string
ExpiredAt time.Time
// split the protobuf Grantee oneof so we can use stdlib encoding/json
GranteeUserID *user.UserId
@@ -119,6 +123,7 @@ type ReceivedShareUpdated struct {
Executant *user.UserId
ShareID *collaboration.ShareId
ItemID *provider.ResourceId
Path string
Permissions *collaboration.SharePermissions
GranteeUserID *user.UserId
GranteeGroupID *group.GroupId
@@ -141,6 +146,7 @@ type LinkCreated struct {
ShareID *link.PublicShareId
Sharer *user.UserId
ItemID *provider.ResourceId
ResourceName string
Permissions *link.PublicSharePermissions
DisplayName string
Expiration *types.Timestamp
@@ -162,6 +168,7 @@ type LinkUpdated struct {
ShareID *link.PublicShareId
Sharer *user.UserId
ItemID *provider.ResourceId
ResourceName string
Permissions *link.PublicSharePermissions
DisplayName string
Expiration *types.Timestamp
@@ -185,6 +192,7 @@ type LinkAccessed struct {
ShareID *link.PublicShareId
Sharer *user.UserId
ItemID *provider.ResourceId
Path string
Permissions *link.PublicSharePermissions
DisplayName string
Expiration *types.Timestamp
@@ -221,10 +229,11 @@ func (LinkAccessFailed) Unmarshal(v []byte) (interface{}, error) {
type LinkRemoved struct {
Executant *user.UserId
// split protobuf Ref
ShareID *link.PublicShareId
ShareToken string
Timestamp *types.Timestamp
ItemID *provider.ResourceId
ShareID *link.PublicShareId
ShareToken string
Timestamp *types.Timestamp
ItemID *provider.ResourceId
ResourceName string
}
// Unmarshal to fulfill umarshaller interface

View File

@@ -28,13 +28,15 @@ import (
"sync"
"time"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"golang.org/x/exp/maps"
"github.com/cs3org/reva/v2/pkg/appctx"
"github.com/cs3org/reva/v2/pkg/errtypes"
"github.com/cs3org/reva/v2/pkg/share/manager/jsoncs3/shareid"
"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/mtimesyncedcache"
"github.com/cs3org/reva/v2/pkg/storage/utils/metadata"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
)
// name is the Tracer name used to identify this instrumentation library.
@@ -246,7 +248,7 @@ func (c *Cache) List(ctx context.Context, userid string) (map[string]SpaceShareI
for ssid, cached := range us.UserShares {
r[ssid] = SpaceShareIDs{
IDs: cached.IDs,
IDs: maps.Clone(cached.IDs),
}
}
return r, nil

View File

@@ -116,6 +116,11 @@ func (lu *Lookup) NodeIDFromParentAndName(ctx context.Context, parent *node.Node
}
return "", err
}
_, err = os.Stat(filepath.Join(parent.InternalPath(), name))
if err != nil {
return "", err
}
return string(id), nil
}
@@ -245,8 +250,6 @@ func (lu *Lookup) Path(ctx context.Context, n *node.Node, hasPermission node.Per
appctx.GetLogger(ctx).
Error().Err(err).
Str("path", p).
Str("spaceid", n.SpaceID).
Str("nodeid", n.ID).
Msg("Path()")
return
}

View File

@@ -24,6 +24,7 @@ import (
microstore "go-micro.dev/v4/store"
"github.com/cs3org/reva/v2/pkg/appctx"
"github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/options"
"github.com/cs3org/reva/v2/pkg/store"
)
@@ -64,15 +65,43 @@ func (c *StoreIDCache) Delete(_ context.Context, spaceID, nodeID string) error {
func (c *StoreIDCache) DeleteByPath(ctx context.Context, path string) error {
spaceID, nodeID, ok := c.GetByPath(ctx, path)
if !ok {
return nil
appctx.GetLogger(ctx).Error().Str("record", path).Msg("could not get spaceID and nodeID from cache")
} else {
err := c.cache.Delete(reverseCacheKey(path))
if err != nil {
appctx.GetLogger(ctx).Error().Err(err).Str("record", path).Str("spaceID", spaceID).Str("nodeID", nodeID).Msg("could not get spaceID and nodeID from cache")
}
err = c.cache.Delete(cacheKey(spaceID, nodeID))
if err != nil {
appctx.GetLogger(ctx).Error().Err(err).Str("record", path).Str("spaceID", spaceID).Str("nodeID", nodeID).Msg("could not get spaceID and nodeID from cache")
}
}
err := c.cache.Delete(reverseCacheKey(path))
list, err := c.cache.List(
microstore.ListPrefix(reverseCacheKey(path) + "/"),
)
if err != nil {
return err
}
for _, record := range list {
spaceID, nodeID, ok := c.GetByPath(ctx, record)
if !ok {
appctx.GetLogger(ctx).Error().Str("record", record).Msg("could not get spaceID and nodeID from cache")
continue
}
return c.cache.Delete(cacheKey(spaceID, nodeID))
err := c.cache.Delete(reverseCacheKey(record))
if err != nil {
appctx.GetLogger(ctx).Error().Err(err).Str("record", record).Str("spaceID", spaceID).Str("nodeID", nodeID).Msg("could not get spaceID and nodeID from cache")
}
err = c.cache.Delete(cacheKey(spaceID, nodeID))
if err != nil {
appctx.GetLogger(ctx).Error().Err(err).Str("record", record).Str("spaceID", spaceID).Str("nodeID", nodeID).Msg("could not get spaceID and nodeID from cache")
}
}
return nil
}
// DeletePath removes only the path entry from the cache

View File

@@ -4,7 +4,7 @@ package timemanager
import "syscall"
// StatCtime returns the change time
// StatCtime returns the creation time
func StatCTime(st *syscall.Stat_t) syscall.Timespec {
return st.Ctimespec
}

View File

@@ -4,7 +4,7 @@ package timemanager
import "syscall"
// StatCtime returns the change time
// StatCtime returns the creation time
func StatCTime(st *syscall.Stat_t) syscall.Timespec {
return st.Ctim
}

View File

@@ -66,6 +66,8 @@ type queueItem struct {
timer *time.Timer
}
const dirtyFlag = "user.ocis.dirty"
// NewScanDebouncer returns a new SpaceDebouncer instance
func NewScanDebouncer(d time.Duration, f func(item scanItem)) *ScanDebouncer {
return &ScanDebouncer{
@@ -147,7 +149,7 @@ func (t *Tree) workScanQueue() {
}
if item.Recurse {
err = t.WarmupIDCache(item.Path, true)
err = t.WarmupIDCache(item.Path, true, false)
if err != nil {
log.Error().Err(err).Str("path", item.Path).Msg("failed to warmup id cache")
}
@@ -172,6 +174,9 @@ func (t *Tree) Scan(path string, action EventAction, isDir bool, recurse bool) e
ForceRescan: false,
})
}
if err := t.setDirty(filepath.Dir(path), true); err != nil {
return err
}
t.scanDebouncer.Debounce(scanItem{
Path: filepath.Dir(path),
ForceRescan: true,
@@ -180,6 +185,9 @@ func (t *Tree) Scan(path string, action EventAction, isDir bool, recurse bool) e
} else {
// 2. New directory
// -> scan directory
if err := t.setDirty(path, true); err != nil {
return err
}
t.scanDebouncer.Debounce(scanItem{
Path: path,
ForceRescan: true,
@@ -368,7 +376,7 @@ func (t *Tree) assimilate(item scanItem) error {
}
if fi.IsDir() {
// if it was moved and it is a directory we need to propagate the move
go func() { _ = t.WarmupIDCache(item.Path, false) }()
go func() { _ = t.WarmupIDCache(item.Path, false, true) }()
}
parentID, err := t.lookup.MetadataBackend().Get(context.Background(), item.Path, prefixes.ParentidAttr)
@@ -536,7 +544,8 @@ assimilate:
}
// WarmupIDCache warms up the id cache
func (t *Tree) WarmupIDCache(root string, assimilate bool) error {
func (t *Tree) WarmupIDCache(root string, assimilate, onlyDirty bool) error {
root = filepath.Clean(root)
spaceID := []byte("")
scopeSpace := func(spaceCandidate string) error {
@@ -557,10 +566,13 @@ func (t *Tree) WarmupIDCache(root string, assimilate bool) error {
sizes := make(map[string]int64)
err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
// skip lock files
// skip lock and upload files
if isLockFile(path) {
return nil
}
if isTrash(path) || t.isUpload(path) {
return filepath.SkipDir
}
if err != nil {
return err
@@ -568,8 +580,19 @@ func (t *Tree) WarmupIDCache(root string, assimilate bool) error {
// calculate tree sizes
if !info.IsDir() {
dir := filepath.Dir(path)
sizes[dir] += info.Size()
dir := path
for dir != root {
dir = filepath.Clean(filepath.Dir(dir))
sizes[dir] += info.Size()
}
} else if onlyDirty {
dirty, err := t.isDirty(path)
if err != nil {
return err
}
if !dirty {
return filepath.SkipDir
}
}
attribs, err := t.lookup.MetadataBackend().All(context.Background(), path)
@@ -618,10 +641,14 @@ func (t *Tree) WarmupIDCache(root string, assimilate bool) error {
} else if assimilate {
_ = t.assimilate(scanItem{Path: path, ForceRescan: true})
}
return nil
return t.setDirty(path, false)
})
for dir, size := range sizes {
if dir == root {
// Propagate the size diff further up the tree
_ = t.propagateSizeDiff(dir, size)
}
_ = t.lookup.MetadataBackend().Set(context.Background(), dir, prefixes.TreesizeAttr, []byte(fmt.Sprintf("%d", size)))
}
@@ -631,3 +658,40 @@ func (t *Tree) WarmupIDCache(root string, assimilate bool) error {
return nil
}
func (t *Tree) propagateSizeDiff(dir string, size int64) error {
// First find the space id
spaceID, _, err := t.findSpaceId(dir)
if err != nil {
return err
}
attrs, err := t.lookup.MetadataBackend().All(context.Background(), dir)
if err != nil {
return err
}
n, err := t.lookup.NodeFromID(context.Background(), &provider.ResourceId{
StorageId: t.options.MountID,
SpaceId: spaceID,
OpaqueId: string(attrs[prefixes.IDAttr]),
})
if err != nil {
return err
}
oldSize, err := node.Attributes(attrs).Int64(prefixes.TreesizeAttr)
if err != nil {
return err
}
return t.Propagate(context.Background(), n, size-oldSize)
}
func (t *Tree) setDirty(path string, dirty bool) error {
return t.lookup.MetadataBackend().Set(context.Background(), path, dirtyFlag, []byte(fmt.Sprintf("%t", dirty)))
}
func (t *Tree) isDirty(path string) (bool, error) {
dirtyAttr, err := t.lookup.MetadataBackend().Get(context.Background(), path, dirtyFlag)
if err != nil {
return false, err
}
return string(dirtyAttr) == "true", nil
}

View File

@@ -59,7 +59,7 @@ start:
if err != nil {
continue
}
if isLockFile(ev.Path) || isTrash(ev.Path) {
if isLockFile(ev.Path) || isTrash(ev.Path) || w.tree.isUpload(ev.Path) {
continue
}
switch ev.Event {
@@ -73,7 +73,7 @@ start:
case "RENAME":
go func() {
_ = w.tree.Scan(ev.Path, ActionMove, false, true)
_ = w.tree.WarmupIDCache(ev.Path, false)
_ = w.tree.WarmupIDCache(ev.Path, false, false)
}()
}
case io.EOF:

View File

@@ -41,22 +41,24 @@ func (w *GpfsWatchFolderWatcher) Watch(topic string) {
continue
}
if isLockFile(lwev.Path) || isTrash(lwev.Path) {
if isLockFile(lwev.Path) || isTrash(lwev.Path) || w.tree.isUpload(lwev.Path) {
continue
}
isDir := strings.Contains(lwev.Event, "IN_ISDIR")
switch {
case strings.Contains(lwev.Event, "IN_CREATE"):
go func() { _ = w.tree.Scan(lwev.Path, ActionCreate, false, false) }()
go func() { _ = w.tree.Scan(lwev.Path, ActionCreate, isDir, false) }()
case strings.Contains(lwev.Event, "IN_CLOSE_WRITE"):
bytesWritten, err := strconv.Atoi(lwev.BytesWritten)
if err == nil && bytesWritten > 0 {
go func() { _ = w.tree.Scan(lwev.Path, ActionUpdate, false, true) }()
go func() { _ = w.tree.Scan(lwev.Path, ActionUpdate, isDir, true) }()
}
case strings.Contains(lwev.Event, "IN_MOVED_TO"):
go func() {
_ = w.tree.Scan(lwev.Path, ActionMove, false, true)
_ = w.tree.WarmupIDCache(lwev.Path, false)
_ = w.tree.Scan(lwev.Path, ActionMove, isDir, true)
}()
}
}

View File

@@ -42,7 +42,7 @@ func (iw *InotifyWatcher) Watch(path string) {
select {
case event := <-events:
for _, e := range event.Events {
if isLockFile(event.Filename) || isTrash(event.Filename) {
if isLockFile(event.Filename) || isTrash(event.Filename) || iw.tree.isUpload(event.Filename) {
continue
}
switch e {

View File

@@ -142,7 +142,7 @@ func New(lu node.PathLookup, bs Blobstore, um usermapper.Mapper, trashbin *trash
go t.watcher.Watch(watchPath)
go t.workScanQueue()
go func() {
_ = t.WarmupIDCache(o.Root, true)
_ = t.WarmupIDCache(o.Root, true, false)
}()
}
@@ -328,7 +328,7 @@ func (t *Tree) Move(ctx context.Context, oldNode *node.Node, newNode *node.Node)
_ = t.lookup.(*lookup.Lookup).CacheID(ctx, newNode.SpaceID, newNode.ID, filepath.Join(newNode.ParentPath(), newNode.Name))
// update id cache for the moved subtree.
if oldNode.IsDir(ctx) {
err = t.WarmupIDCache(filepath.Join(newNode.ParentPath(), newNode.Name), false)
err = t.WarmupIDCache(filepath.Join(newNode.ParentPath(), newNode.Name), false, false)
if err != nil {
return err
}
@@ -650,9 +650,9 @@ func (t *Tree) removeNode(ctx context.Context, path string, n *node.Node) error
}
// Propagate propagates changes to the root of the tree
func (t *Tree) Propagate(ctx context.Context, n *node.Node, _ int64) (err error) {
func (t *Tree) Propagate(ctx context.Context, n *node.Node, sizeDiff int64) (err error) {
// We do not propagate size diffs here but rely on the assimilation to take care of the tree sizes instead
return t.propagator.Propagate(ctx, n, 0)
return t.propagator.Propagate(ctx, n, sizeDiff)
}
// WriteBlob writes a blob to the blobstore
@@ -699,6 +699,9 @@ func (t *Tree) InitNewNode(ctx context.Context, n *node.Node, fsize uint64) (met
// we also need to touch the actual node file here it stores the mtime of the resource
h, err := os.OpenFile(n.InternalPath(), os.O_CREATE|os.O_EXCL, 0600)
if err != nil {
if os.IsExist(err) {
return unlock, errtypes.AlreadyExists(n.InternalPath())
}
return unlock, err
}
h.Close()
@@ -826,3 +829,7 @@ func isLockFile(path string) bool {
func isTrash(path string) bool {
return strings.HasSuffix(path, ".trashinfo") || strings.HasSuffix(path, ".trashitem")
}
func (t *Tree) isUpload(path string) bool {
return strings.HasPrefix(path, t.options.UploadDirectory)
}

View File

@@ -78,28 +78,35 @@ func (b XattrsBackend) GetInt64(ctx context.Context, filePath, key string) (int6
// List retrieves a list of names of extended attributes associated with the
// given path in the file system.
func (XattrsBackend) List(ctx context.Context, filePath string) (attribs []string, err error) {
func (b XattrsBackend) List(ctx context.Context, filePath string) (attribs []string, err error) {
return b.list(ctx, filePath, true)
}
func (b XattrsBackend) list(ctx context.Context, filePath string, acquireLock bool) (attribs []string, err error) {
attrs, err := xattr.List(filePath)
if err == nil {
return attrs, nil
}
f, err := lockedfile.OpenFile(filePath+filelocks.LockFileSuffix, os.O_CREATE|os.O_WRONLY, 0600)
if err != nil {
return nil, err
}
defer cleanupLockfile(f)
// listing xattrs failed, try again, either with lock or without
if acquireLock {
f, err := lockedfile.OpenFile(filePath+filelocks.LockFileSuffix, os.O_CREATE|os.O_WRONLY, 0600)
if err != nil {
return nil, err
}
defer cleanupLockfile(ctx, f)
}
return xattr.List(filePath)
}
// All reads all extended attributes for a node, protected by a
// shared file lock
func (b XattrsBackend) All(ctx context.Context, path string) (map[string][]byte, error) {
return b.getAll(ctx, path, false)
return b.getAll(ctx, path, false, true)
}
func (b XattrsBackend) getAll(ctx context.Context, path string, skipCache bool) (map[string][]byte, error) {
func (b XattrsBackend) getAll(ctx context.Context, path string, skipCache, acquireLock bool) (map[string][]byte, error) {
attribs := map[string][]byte{}
if !skipCache {
@@ -109,7 +116,7 @@ func (b XattrsBackend) getAll(ctx context.Context, path string, skipCache bool)
}
}
attrNames, err := b.List(ctx, path)
attrNames, err := b.list(ctx, path, acquireLock)
if err != nil {
return nil, err
}
@@ -162,7 +169,7 @@ func (b XattrsBackend) SetMultiple(ctx context.Context, path string, attribs map
if err != nil {
return err
}
defer cleanupLockfile(lockedFile)
defer cleanupLockfile(ctx, lockedFile)
}
// error handling: Count if there are errors while setting the attribs.
@@ -181,7 +188,7 @@ func (b XattrsBackend) SetMultiple(ctx context.Context, path string, attribs map
return errors.Wrap(xerr, "Failed to set all xattrs")
}
attribs, err = b.getAll(ctx, path, true)
attribs, err = b.getAll(ctx, path, true, false)
if err != nil {
return err
}
@@ -195,15 +202,14 @@ func (b XattrsBackend) Remove(ctx context.Context, path string, key string, acqu
if err != nil {
return err
}
defer cleanupLockfile(lockedFile)
defer cleanupLockfile(ctx, lockedFile)
}
err := xattr.Remove(path, key)
if err != nil {
return err
}
attribs, err := b.getAll(ctx, path, true)
attribs, err := b.getAll(ctx, path, true, false)
if err != nil {
return err
}
@@ -217,7 +223,7 @@ func (XattrsBackend) IsMetaFile(path string) bool { return strings.HasSuffix(pat
func (b XattrsBackend) Purge(ctx context.Context, path string) error {
_, err := os.Stat(path)
if err == nil {
attribs, err := b.getAll(ctx, path, true)
attribs, err := b.getAll(ctx, path, true, true)
if err != nil {
return err
}
@@ -270,7 +276,7 @@ func (b XattrsBackend) Lock(path string) (UnlockFunc, error) {
}, nil
}
func cleanupLockfile(f *lockedfile.File) {
func cleanupLockfile(ctx context.Context, f *lockedfile.File) {
_ = f.Close()
_ = os.Remove(f.Name())
}

View File

@@ -86,7 +86,10 @@ func (s *OcisSession) Purge(ctx context.Context) error {
if err != nil {
return err
}
defer f.Close()
defer func() {
f.Close()
os.Remove(sessionPath + ".lock")
}()
if err := os.Remove(sessionPath); err != nil {
return err
}

View File

@@ -115,7 +115,7 @@ func (session *OcisSession) FinishUpload(ctx context.Context) error {
sha1h, md5h, adler32h, err := node.CalculateChecksums(ctx, session.binPath())
if err != nil {
log.Info().Err(err).Msg("error copying checksums")
return err
}
// compare if they match the sent checksum

2
vendor/modules.txt vendored
View File

@@ -364,7 +364,7 @@ github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1
github.com/cs3org/go-cs3apis/cs3/storage/registry/v1beta1
github.com/cs3org/go-cs3apis/cs3/tx/v1beta1
github.com/cs3org/go-cs3apis/cs3/types/v1beta1
# github.com/cs3org/reva/v2 v2.25.1-0.20241015142909-99548fe690bd
# github.com/cs3org/reva/v2 v2.25.1-0.20241016145214-e5baaccf6614
## explicit; go 1.21
github.com/cs3org/reva/v2/cmd/revad/internal/grace
github.com/cs3org/reva/v2/cmd/revad/runtime