mirror of
https://github.com/opencloud-eu/opencloud.git
synced 2026-01-22 04:50:43 -05:00
Merge branch 'master' into backmerge-master-2
This commit is contained in:
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
@@ -1,4 +1,4 @@
|
||||
services/settings/ @kulmann @lookacat
|
||||
services/settings/ @kulmann
|
||||
services/web/ @kulmann
|
||||
.drone.star @wkloucek @micbar @phil-davis @individual-it
|
||||
assets/End-User-License-Agreement-for-ownCloud-Infinite-Scale.pdf @micbar @dragotin @tbsbdr
|
||||
|
||||
@@ -2,6 +2,7 @@ Enhancement: Update reva to latest edge version
|
||||
|
||||
We update reva to the latest edge version to get the latest fixes and features.
|
||||
|
||||
https://github.com/owncloud/ocis/pull/8287
|
||||
https://github.com/owncloud/ocis/pull/8278
|
||||
https://github.com/owncloud/ocis/pull/8264
|
||||
https://github.com/owncloud/ocis/pull/8100
|
||||
|
||||
@@ -0,0 +1,7 @@
|
||||
Bugfix: Disallow to delete a file during the processing
|
||||
|
||||
We want to disallow deleting a file during the processing to prevent collecting the orphan uploads.
|
||||
|
||||
https://github.com/owncloud/ocis/pull/8132
|
||||
https://github.com/cs3org/reva/pull/4446
|
||||
https://github.com/owncloud/ocis/issues/8127
|
||||
7
changelog/unreleased/fix-resource-name.md
Normal file
7
changelog/unreleased/fix-resource-name.md
Normal file
@@ -0,0 +1,7 @@
|
||||
Bugfix: Fix the resource name
|
||||
|
||||
We fixed a problem where after renaming resource as sharer the receiver see a new name.
|
||||
|
||||
https://github.com/owncloud/ocis/pull/8246
|
||||
https://github.com/cs3org/reva/pull/4463
|
||||
https://github.com/owncloud/ocis/issues/8242
|
||||
5
changelog/unreleased/postprocessing-bulk-restart.md
Normal file
5
changelog/unreleased/postprocessing-bulk-restart.md
Normal file
@@ -0,0 +1,5 @@
|
||||
Enhancement: Allow restarting multiple uploads with one command
|
||||
|
||||
Allows to restart all commands in a specific state.
|
||||
|
||||
https://github.com/owncloud/ocis/pull/8287
|
||||
4
go.mod
4
go.mod
@@ -13,7 +13,7 @@ require (
|
||||
github.com/coreos/go-oidc v2.2.1+incompatible
|
||||
github.com/coreos/go-oidc/v3 v3.9.0
|
||||
github.com/cs3org/go-cs3apis v0.0.0-20231023073225-7748710e0781
|
||||
github.com/cs3org/reva/v2 v2.18.1-0.20240124094635-6eec406c0be7
|
||||
github.com/cs3org/reva/v2 v2.18.1-0.20240126141248-c9e4a3bcd0da
|
||||
github.com/dhowden/tag v0.0.0-20230630033851-978a0926ee25
|
||||
github.com/disintegration/imaging v1.6.2
|
||||
github.com/dutchcoders/go-clamd v0.0.0-20170520113014-b970184f4d9e
|
||||
@@ -354,7 +354,7 @@ replace github.com/go-micro/plugins/v4/store/nats-js-kv => github.com/kobergj/pl
|
||||
|
||||
replace github.com/studio-b12/gowebdav => github.com/aduffeck/gowebdav v0.0.0-20231215102054-212d4a4374f6
|
||||
|
||||
replace github.com/egirna/icap-client => github.com/fschade/icap-client v0.0.0-20240105150744-9c2d8aff3ef2
|
||||
replace github.com/egirna/icap-client => github.com/fschade/icap-client v0.0.0-20240123094924-5af178158eaf
|
||||
|
||||
// exclude the v2 line of go-sqlite3 which was released accidentally and prevents pulling in newer versions of go-sqlite3
|
||||
// see https://github.com/mattn/go-sqlite3/issues/965 for more details
|
||||
|
||||
8
go.sum
8
go.sum
@@ -1018,8 +1018,8 @@ github.com/crewjam/saml v0.4.14 h1:g9FBNx62osKusnFzs3QTN5L9CVA/Egfgm+stJShzw/c=
|
||||
github.com/crewjam/saml v0.4.14/go.mod h1:UVSZCf18jJkk6GpWNVqcyQJMD5HsRugBPf4I1nl2mME=
|
||||
github.com/cs3org/go-cs3apis v0.0.0-20231023073225-7748710e0781 h1:BUdwkIlf8IS2FasrrPg8gGPHQPOrQ18MS1Oew2tmGtY=
|
||||
github.com/cs3org/go-cs3apis v0.0.0-20231023073225-7748710e0781/go.mod h1:UXha4TguuB52H14EMoSsCqDj7k8a/t7g4gVP+bgY5LY=
|
||||
github.com/cs3org/reva/v2 v2.18.1-0.20240124094635-6eec406c0be7 h1:g7vQAbo64ziFqqhKcim3JCjDW1zqHy9imAm2HZmmK8w=
|
||||
github.com/cs3org/reva/v2 v2.18.1-0.20240124094635-6eec406c0be7/go.mod h1:GCN3g6uYE0Nvd31dGlhaGGyUviUfbG2NkecPRv5oSc4=
|
||||
github.com/cs3org/reva/v2 v2.18.1-0.20240126141248-c9e4a3bcd0da h1:VgWIr/lE6cv2f5IjjWgR0LOAK41gsUytBsSZo/4DRq4=
|
||||
github.com/cs3org/reva/v2 v2.18.1-0.20240126141248-c9e4a3bcd0da/go.mod h1:GCN3g6uYE0Nvd31dGlhaGGyUviUfbG2NkecPRv5oSc4=
|
||||
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
|
||||
github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
|
||||
github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
|
||||
@@ -1113,8 +1113,8 @@ github.com/foxcpp/go-mockdns v1.0.0 h1:7jBqxd3WDWwi/6WhDvacvH1XsN3rOLXyHM1uhvIx6
|
||||
github.com/foxcpp/go-mockdns v1.0.0/go.mod h1:lgRN6+KxQBawyIghpnl5CezHFGS9VLzvtVlwxvzXTQ4=
|
||||
github.com/frankban/quicktest v1.14.0 h1:+cqqvzZV87b4adx/5ayVOaYZ2CrvM4ejQvUdBzPPUss=
|
||||
github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og=
|
||||
github.com/fschade/icap-client v0.0.0-20240105150744-9c2d8aff3ef2 h1:PJERPsceXsS4uTJuDvHy/4rgrZyZWttbKesaacKmXiI=
|
||||
github.com/fschade/icap-client v0.0.0-20240105150744-9c2d8aff3ef2/go.mod h1:Curjbe9P7SKWAtoXuu/huL8VnqzuBzetEpEPt9TLToE=
|
||||
github.com/fschade/icap-client v0.0.0-20240123094924-5af178158eaf h1:3IzYXRblwIxeis+EtLLWTK0QitcefZT7YfpF7jfTFYA=
|
||||
github.com/fschade/icap-client v0.0.0-20240123094924-5af178158eaf/go.mod h1:Curjbe9P7SKWAtoXuu/huL8VnqzuBzetEpEPt9TLToE=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
|
||||
@@ -94,10 +94,13 @@ func (g Graph) CreateUploadSession(w http.ResponseWriter, r *http.Request) {
|
||||
if cusr.Item.Name != "" {
|
||||
ref.Path = utils.MakeRelativePath(cusr.Item.Name)
|
||||
}
|
||||
// TODO size?
|
||||
req := &storageprovider.InitiateFileUploadRequest{
|
||||
Ref: ref,
|
||||
Opaque: utils.AppendPlainToOpaque(nil, "Upload-Length", strconv.FormatUint(uint64(cusr.Item.FileSize), 10)),
|
||||
}
|
||||
|
||||
ctx := r.Context()
|
||||
res, err := gatewayClient.InitiateFileUpload(ctx, &storageprovider.InitiateFileUploadRequest{Ref: ref})
|
||||
res, err := gatewayClient.InitiateFileUpload(ctx, req)
|
||||
switch {
|
||||
case err != nil:
|
||||
errorcode.GeneralException.Render(w, r, http.StatusInternalServerError, err.Error())
|
||||
@@ -1007,6 +1010,8 @@ func cs3ResourceToDriveItem(logger *log.Logger, res *storageprovider.ResourceInf
|
||||
parentRef.SetDriveType(res.GetSpace().GetSpaceType())
|
||||
parentRef.SetDriveId(storagespace.FormatStorageID(res.GetParentId().GetStorageId(), res.GetParentId().GetSpaceId()))
|
||||
parentRef.SetId(storagespace.FormatResourceID(*res.GetParentId()))
|
||||
parentRef.SetName(res.GetName())
|
||||
parentRef.SetPath(res.GetPath())
|
||||
driveItem.ParentReference = parentRef
|
||||
}
|
||||
if res.GetType() == storageprovider.ResourceType_RESOURCE_TYPE_FILE && res.GetMimeType() != "" {
|
||||
|
||||
@@ -90,3 +90,11 @@ ocis storage-users uploads list
|
||||
```bash
|
||||
ocis postprocessing restart -u <uploadID>
|
||||
```
|
||||
|
||||
Instead of starting one specific upload, a system admin can also restart all uploads that are currently in a specific step.
|
||||
Examples:
|
||||
```
|
||||
ocis postprocessing restart # Restarts all uploads where postprocessing is finished, but upload is not finished
|
||||
ocis postprocessing restart -s "finished" # Equivalent to the above
|
||||
ocis postprocessing restart -s "virusscan" # Restart all uploads currently in virusscan step
|
||||
```
|
||||
|
||||
@@ -2,7 +2,6 @@ package command
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/cs3org/reva/v2/pkg/events"
|
||||
"github.com/cs3org/reva/v2/pkg/events/stream"
|
||||
@@ -20,10 +19,15 @@ func RestartPostprocessing(cfg *config.Config) *cli.Command {
|
||||
Usage: "restart postprocessing for an uploadID",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "upload-id",
|
||||
Aliases: []string{"u"},
|
||||
Required: true,
|
||||
Usage: "the uploadid to restart",
|
||||
Name: "upload-id",
|
||||
Aliases: []string{"u"},
|
||||
Usage: "the uploadid to restart. Ignored if unset.",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "step",
|
||||
Aliases: []string{"s"},
|
||||
Usage: "restarts all uploads in the given postprocessing step. Ignored if upload-id is set.",
|
||||
Value: "finished", // Calling `ocis postprocessing restart` without any arguments will restart all uploads that are finished but failed to move the uploed from the upload area to the blobstore.
|
||||
},
|
||||
},
|
||||
Before: func(c *cli.Context) error {
|
||||
@@ -35,24 +39,18 @@ func RestartPostprocessing(cfg *config.Config) *cli.Command {
|
||||
return err
|
||||
}
|
||||
|
||||
uid, step := c.String("upload-id"), ""
|
||||
if uid == "" {
|
||||
step = c.String("step")
|
||||
}
|
||||
|
||||
ev := events.ResumePostprocessing{
|
||||
UploadID: c.String("upload-id"),
|
||||
UploadID: uid,
|
||||
Step: events.Postprocessingstep(step),
|
||||
Timestamp: utils.TSNow(),
|
||||
}
|
||||
|
||||
if err := events.Publish(context.Background(), stream, ev); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// go-micro nats implementation uses async publishing,
|
||||
// therefore we need to manually wait.
|
||||
//
|
||||
// FIXME: upstream pr
|
||||
//
|
||||
// https://github.com/go-micro/plugins/blob/3e77393890683be4bacfb613bc5751867d584692/v4/events/natsjs/nats.go#L115
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
return nil
|
||||
return events.Publish(context.Background(), stream, ev)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -65,14 +65,14 @@ func (pp *Postprocessing) NextStep(ev events.PostprocessingStepFinished) interfa
|
||||
|
||||
// CurrentStep returns the current postprocessing step
|
||||
func (pp *Postprocessing) CurrentStep() interface{} {
|
||||
if pp.Status.Outcome != "" {
|
||||
if pp.Status.CurrentStep == events.PPStepFinished {
|
||||
return pp.finished(pp.Status.Outcome)
|
||||
}
|
||||
return pp.step(pp.Status.CurrentStep)
|
||||
}
|
||||
|
||||
// Delay will sleep the configured time then continue
|
||||
func (pp *Postprocessing) Delay(ev events.StartPostprocessingStep) interface{} {
|
||||
func (pp *Postprocessing) Delay() interface{} {
|
||||
time.Sleep(pp.config.Delayprocessing)
|
||||
return pp.next(events.PPStepDelay)
|
||||
}
|
||||
@@ -106,6 +106,7 @@ func (pp *Postprocessing) step(next events.Postprocessingstep) events.StartPostp
|
||||
}
|
||||
|
||||
func (pp *Postprocessing) finished(outcome events.PostprocessingOutcome) events.PostprocessingFinished {
|
||||
pp.Status.CurrentStep = events.PPStepFinished
|
||||
pp.Status.Outcome = outcome
|
||||
return events.PostprocessingFinished{
|
||||
UploadID: pp.ID,
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/cs3org/reva/v2/pkg/events"
|
||||
"github.com/cs3org/reva/v2/pkg/utils"
|
||||
"github.com/owncloud/ocis/v2/ocis-pkg/log"
|
||||
"github.com/owncloud/ocis/v2/services/postprocessing/pkg/config"
|
||||
"github.com/owncloud/ocis/v2/services/postprocessing/pkg/postprocessing"
|
||||
@@ -142,29 +143,20 @@ func (pps *PostprocessingService) processEvent(e events.Event) error {
|
||||
pps.log.Error().Str("uploadID", ev.UploadID).Err(err).Msg("cannot get upload")
|
||||
return fmt.Errorf("%w: cannot get upload", errEvent)
|
||||
}
|
||||
next = pp.Delay(ev)
|
||||
next = pp.Delay()
|
||||
case events.UploadReady:
|
||||
if ev.Failed {
|
||||
// the upload failed - let's keep it around for a while
|
||||
return nil
|
||||
}
|
||||
|
||||
// the storage provider thinks the upload is done - so no need to keep it any more
|
||||
if err := pps.store.Delete(ev.UploadID); err != nil {
|
||||
pps.log.Error().Str("uploadID", ev.UploadID).Err(err).Msg("cannot delete upload")
|
||||
return fmt.Errorf("%w: cannot delete upload", errEvent)
|
||||
}
|
||||
case events.ResumePostprocessing:
|
||||
pp, err = pps.getPP(pps.store, ev.UploadID)
|
||||
if err != nil {
|
||||
if err == store.ErrNotFound {
|
||||
if err := events.Publish(ctx, pps.pub, events.RestartPostprocessing{
|
||||
UploadID: ev.UploadID,
|
||||
Timestamp: ev.Timestamp,
|
||||
}); err != nil {
|
||||
pps.log.Error().Str("uploadID", ev.UploadID).Err(err).Msg("cannot publish RestartPostprocessing event")
|
||||
}
|
||||
return fmt.Errorf("%w: cannot publish RestartPostprocessing event", errEvent)
|
||||
}
|
||||
pps.log.Error().Str("uploadID", ev.UploadID).Err(err).Msg("cannot get upload")
|
||||
return fmt.Errorf("%w: cannot get upload", errEvent)
|
||||
}
|
||||
next = pp.CurrentStep()
|
||||
return pps.handleResumePPEvent(ctx, ev)
|
||||
}
|
||||
|
||||
if pp != nil {
|
||||
@@ -182,30 +174,6 @@ func (pps *PostprocessingService) processEvent(e events.Event) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func getSteps(c config.Postprocessing) []events.Postprocessingstep {
|
||||
// NOTE: improved version only allows configuring order of postprocessing steps
|
||||
// But we aim for a system where postprocessing steps can be configured per space, ideally by the spaceadmin itself
|
||||
// We need to iterate over configuring PP service when we see fit
|
||||
var steps []events.Postprocessingstep
|
||||
for _, s := range c.Steps {
|
||||
steps = append(steps, events.Postprocessingstep(s))
|
||||
}
|
||||
|
||||
return steps
|
||||
}
|
||||
|
||||
func storePP(sto store.Store, pp *postprocessing.Postprocessing) error {
|
||||
b, err := json.Marshal(pp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return sto.Write(&store.Record{
|
||||
Key: pp.ID,
|
||||
Value: b,
|
||||
})
|
||||
}
|
||||
|
||||
func (pps *PostprocessingService) getPP(sto store.Store, uploadID string) (*postprocessing.Postprocessing, error) {
|
||||
recs, err := sto.Read(uploadID)
|
||||
if err != nil {
|
||||
@@ -224,3 +192,95 @@ func (pps *PostprocessingService) getPP(sto store.Store, uploadID string) (*post
|
||||
|
||||
return pp, nil
|
||||
}
|
||||
|
||||
func getSteps(c config.Postprocessing) []events.Postprocessingstep {
|
||||
// NOTE: improved version only allows configuring order of postprocessing steps
|
||||
// But we aim for a system where postprocessing steps can be configured per space, ideally by the spaceadmin itself
|
||||
// We need to iterate over configuring PP service when we see fit
|
||||
steps := make([]events.Postprocessingstep, 0, len(c.Steps))
|
||||
for _, s := range c.Steps {
|
||||
steps = append(steps, events.Postprocessingstep(s))
|
||||
}
|
||||
|
||||
return steps
|
||||
}
|
||||
|
||||
func storePP(sto store.Store, pp *postprocessing.Postprocessing) error {
|
||||
b, err := json.Marshal(pp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return sto.Write(&store.Record{
|
||||
Key: pp.ID,
|
||||
Value: b,
|
||||
})
|
||||
}
|
||||
|
||||
func (pps *PostprocessingService) handleResumePPEvent(ctx context.Context, ev events.ResumePostprocessing) error {
|
||||
ids := []string{ev.UploadID}
|
||||
if ev.Step != "" {
|
||||
ids = pps.findUploadsByStep(ev.Step)
|
||||
}
|
||||
|
||||
for _, id := range ids {
|
||||
if err := pps.resumePP(ctx, id); err != nil {
|
||||
pps.log.Error().Str("uploadID", id).Err(err).Msg("cannot resume upload")
|
||||
return fmt.Errorf("%w: cannot resume upload", errEvent)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pps *PostprocessingService) resumePP(ctx context.Context, uploadID string) error {
|
||||
pp, err := pps.getPP(pps.store, uploadID)
|
||||
if err != nil {
|
||||
if err == store.ErrNotFound {
|
||||
if err := events.Publish(ctx, pps.pub, events.RestartPostprocessing{
|
||||
UploadID: uploadID,
|
||||
Timestamp: utils.TSNow(),
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("%w: cannot get upload", errEvent)
|
||||
}
|
||||
|
||||
return events.Publish(ctx, pps.pub, pp.CurrentStep())
|
||||
}
|
||||
|
||||
func (pps *PostprocessingService) findUploadsByStep(step events.Postprocessingstep) []string {
|
||||
var ids []string
|
||||
|
||||
keys, err := pps.store.List()
|
||||
if err != nil {
|
||||
pps.log.Error().Err(err).Msg("cannot list uploads")
|
||||
}
|
||||
|
||||
for _, k := range keys {
|
||||
rec, err := pps.store.Read(k)
|
||||
if err != nil {
|
||||
pps.log.Error().Err(err).Msg("cannot read upload")
|
||||
continue
|
||||
}
|
||||
|
||||
if len(rec) != 1 {
|
||||
pps.log.Error().Err(err).Msg("expected only one result")
|
||||
continue
|
||||
}
|
||||
|
||||
pp := &postprocessing.Postprocessing{}
|
||||
err = json.Unmarshal(rec[0].Value, pp)
|
||||
if err != nil {
|
||||
pps.log.Error().Err(err).Msg("cannot unmarshal upload")
|
||||
continue
|
||||
}
|
||||
|
||||
if pp.Status.CurrentStep == step {
|
||||
ids = append(ids, pp.ID)
|
||||
}
|
||||
}
|
||||
|
||||
return ids
|
||||
}
|
||||
|
||||
@@ -36,10 +36,13 @@ When using Infinite Scale as user storage, a directory named `storage/users/uplo
|
||||
|
||||
Example cases for expired uploads
|
||||
|
||||
* When a user uploads a big file but the file exceeds the user-quota, the upload can't be moved to the target after it has finished. The file stays at the upload location until it is manually cleared.
|
||||
* In the final step the upload blob is moved from the upload area to the final blobstore (e.g. S3).
|
||||
|
||||
* If the bandwidth is limited and the file to transfer can't be transferred completely before the upload expiration time is reached, the file expires and can't be processed.
|
||||
|
||||
There are two commands available to manage unfinished uploads
|
||||
The admin can restart the postprocessing for this with the postprocessing cli.
|
||||
|
||||
The storage users service can only list and clean upload sessions:
|
||||
|
||||
```bash
|
||||
ocis storage-users uploads <command>
|
||||
@@ -47,21 +50,38 @@ ocis storage-users uploads <command>
|
||||
|
||||
```plaintext
|
||||
COMMANDS:
|
||||
list Print a list of all incomplete uploads
|
||||
clean Clean up leftovers from expired uploads
|
||||
sessions Print a list of upload sessions
|
||||
clean Clean up leftovers from expired uploads
|
||||
list Print a list of all incomplete uploads (deprecated)
|
||||
```
|
||||
|
||||
#### Command Examples
|
||||
|
||||
Command to identify incomplete uploads
|
||||
Command to list ongoing upload sessions
|
||||
|
||||
```bash
|
||||
ocis storage-users uploads list
|
||||
ocis storage-users sessions --expired=false
|
||||
```
|
||||
|
||||
```plaintext
|
||||
Incomplete uploads:
|
||||
- 455bd640-cd08-46e8-a5a0-9304908bd40a (file_example_PPT_1MB.ppt, Size: 1028608, Expires: 2022-08-17T12:35:34+02:00)
|
||||
Not expired sessions:
|
||||
+--------------------------------------+--------------------------------------+---------+--------+------+--------------------------------------+--------------------------------------+---------------------------+------------+
|
||||
| Space | Upload Id | Name | Offset | Size | Executant | Owner | Expires | Processing |
|
||||
+--------------------------------------+--------------------------------------+---------+--------+------+--------------------------------------+--------------------------------------+---------------------------+------------+
|
||||
| f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c | 5e387954-7313-4223-a904-bf996da6ec0b | foo.txt | 0 | 1234 | f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c | f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c | 2024-01-26T13:04:31+01:00 | false |
|
||||
| f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c | f066244d-97b2-48e7-a30d-b40fcb60cec6 | bar.txt | 0 | 4321 | f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c | f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c | 2024-01-26T13:18:47+01:00 | false |
|
||||
+--------------------------------------+--------------------------------------+---------+--------+------+--------------------------------------+--------------------------------------+---------------------------+------------+
|
||||
```
|
||||
|
||||
The sessions command can also output json
|
||||
|
||||
```bash
|
||||
ocis storage-users sessions --expired=false --json
|
||||
```
|
||||
|
||||
```json
|
||||
{"id":"5e387954-7313-4223-a904-bf996da6ec0b","space":"f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c","filename":"foo.txt","offset":0,"size":1234,"executant":{"idp":"https://cloud.ocis.test","opaque_id":"f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c"},"spaceowner":{"opaque_id":"f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c"},"expires":"2024-01-26T13:04:31+01:00","processing":false}
|
||||
{"id":"f066244d-97b2-48e7-a30d-b40fcb60cec6","space":"f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c","filename":"bar.txt","offset":0,"size":4321,"executant":{"idp":"https://cloud.ocis.test","opaque_id":"f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c"},"spaceowner":{"opaque_id":"f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c"},"expires":"2024-01-26T13:18:47+01:00","processing":false}
|
||||
```
|
||||
|
||||
Command to clear expired uploads
|
||||
@@ -74,6 +94,17 @@ Cleaned uploads:
|
||||
- 455bd640-cd08-46e8-a5a0-9304908bd40a (Filename: file_example_PPT_1MB.ppt, Size: 1028608, Expires: 2022-08-17T12:35:34+02:00)
|
||||
```
|
||||
|
||||
Deprecated list command to identify unfinished uploads
|
||||
|
||||
```bash
|
||||
ocis storage-users uploads list
|
||||
```
|
||||
|
||||
```plaintext
|
||||
Incomplete uploads:
|
||||
- 455bd640-cd08-46e8-a5a0-9304908bd40a (file_example_PPT_1MB.ppt, Size: 1028608, Expires: 2022-08-17T12:35:34+02:00)
|
||||
```
|
||||
|
||||
### Purge Expired Space Trash-Bins Items
|
||||
|
||||
<!-- referencing: https://github.com/owncloud/ocis/pull/5500 -->
|
||||
|
||||
@@ -1,12 +1,18 @@
|
||||
package command
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
tw "github.com/olekukonko/tablewriter"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1"
|
||||
"github.com/cs3org/reva/v2/pkg/storage"
|
||||
"github.com/cs3org/reva/v2/pkg/storage/fs/registry"
|
||||
"github.com/owncloud/ocis/v2/ocis-pkg/config/configlog"
|
||||
@@ -22,6 +28,7 @@ func Uploads(cfg *config.Config) *cli.Command {
|
||||
Usage: "manage unfinished uploads",
|
||||
Subcommands: []*cli.Command{
|
||||
ListUploads(cfg),
|
||||
ListUploadSessions(cfg),
|
||||
PurgeExpiredUploads(cfg),
|
||||
},
|
||||
}
|
||||
@@ -31,7 +38,7 @@ func Uploads(cfg *config.Config) *cli.Command {
|
||||
func ListUploads(cfg *config.Config) *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "list",
|
||||
Usage: "Print a list of all incomplete uploads",
|
||||
Usage: "Print a list of all incomplete uploads (deprecated, use sessions)",
|
||||
Before: func(c *cli.Context) error {
|
||||
return configlog.ReturnFatal(parser.ParseConfig(cfg))
|
||||
},
|
||||
@@ -50,7 +57,7 @@ func ListUploads(cfg *config.Config) *cli.Command {
|
||||
|
||||
managingFS, ok := fs.(storage.UploadSessionLister)
|
||||
if !ok {
|
||||
fmt.Fprintf(os.Stderr, "'%s' storage does not support listing expired uploads\n", cfg.Driver)
|
||||
fmt.Fprintf(os.Stderr, "'%s' storage does not support listing upload sessions\n", cfg.Driver)
|
||||
os.Exit(1)
|
||||
}
|
||||
expired := false
|
||||
@@ -69,7 +76,162 @@ func ListUploads(cfg *config.Config) *cli.Command {
|
||||
}
|
||||
}
|
||||
|
||||
// PurgeExpiredUploads is the entry point for the server command.
|
||||
// ListUploadSessions prints a list of upload sessiens
|
||||
func ListUploadSessions(cfg *config.Config) *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "sessions",
|
||||
Usage: "Print a list of upload sessions",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "id",
|
||||
DefaultText: "unset",
|
||||
Usage: "filter sessions by upload session id",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "processing",
|
||||
DefaultText: "unset",
|
||||
Usage: "filter sessions by processing status",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "expired",
|
||||
DefaultText: "unset",
|
||||
Usage: "filter sessions by expired status",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "json",
|
||||
Usage: "output as json",
|
||||
},
|
||||
},
|
||||
Before: func(c *cli.Context) error {
|
||||
return configlog.ReturnFatal(parser.ParseConfig(cfg))
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
f, ok := registry.NewFuncs[cfg.Driver]
|
||||
if !ok {
|
||||
fmt.Fprintf(os.Stderr, "Unknown filesystem driver '%s'\n", cfg.Driver)
|
||||
os.Exit(1)
|
||||
}
|
||||
drivers := revaconfig.StorageProviderDrivers(cfg)
|
||||
fs, err := f(drivers[cfg.Driver].(map[string]interface{}), nil)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to initialize filesystem driver '%s'\n", cfg.Driver)
|
||||
return err
|
||||
}
|
||||
|
||||
managingFS, ok := fs.(storage.UploadSessionLister)
|
||||
if !ok {
|
||||
fmt.Fprintf(os.Stderr, "'%s' storage does not support listing upload sessions\n", cfg.Driver)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
var b strings.Builder
|
||||
filter := storage.UploadSessionFilter{}
|
||||
if c.IsSet("processing") {
|
||||
processingValue := c.Bool("processing")
|
||||
filter.Processing = &processingValue
|
||||
if !processingValue {
|
||||
b.WriteString("Not ")
|
||||
}
|
||||
if b.Len() == 0 {
|
||||
b.WriteString("Processing ")
|
||||
} else {
|
||||
b.WriteString("processing ")
|
||||
}
|
||||
}
|
||||
if c.IsSet("expired") {
|
||||
expiredValue := c.Bool("expired")
|
||||
filter.Expired = &expiredValue
|
||||
if !expiredValue {
|
||||
if b.Len() == 0 {
|
||||
b.WriteString("Not ")
|
||||
} else {
|
||||
b.WriteString(", not ")
|
||||
}
|
||||
}
|
||||
if b.Len() == 0 {
|
||||
b.WriteString("Expired ")
|
||||
} else {
|
||||
b.WriteString("expired ")
|
||||
}
|
||||
}
|
||||
if b.Len() == 0 {
|
||||
b.WriteString("Sessions")
|
||||
} else {
|
||||
b.WriteString("sessions")
|
||||
}
|
||||
if c.IsSet("id") {
|
||||
idValue := c.String("id")
|
||||
filter.ID = &idValue
|
||||
b.WriteString(" with id '" + idValue + "'")
|
||||
}
|
||||
b.WriteString(":")
|
||||
uploads, err := managingFS.ListUploadSessions(c.Context, filter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var table *tw.Table
|
||||
if c.Bool("json") {
|
||||
for _, u := range uploads {
|
||||
ref := u.Reference()
|
||||
s := struct {
|
||||
ID string `json:"id"`
|
||||
Space string `json:"space"`
|
||||
Filename string `json:"filename"`
|
||||
Offset int64 `json:"offset"`
|
||||
Size int64 `json:"size"`
|
||||
Executant userpb.UserId `json:"executant"`
|
||||
SpaceOwner *userpb.UserId `json:"spaceowner,omitempty"`
|
||||
Expires time.Time `json:"expires"`
|
||||
Processing bool `json:"processing"`
|
||||
}{
|
||||
Space: ref.GetResourceId().GetSpaceId(),
|
||||
ID: u.ID(),
|
||||
Filename: u.Filename(),
|
||||
Offset: u.Offset(),
|
||||
Size: u.Size(),
|
||||
Executant: u.Executant(),
|
||||
SpaceOwner: u.SpaceOwner(),
|
||||
Expires: u.Expires(),
|
||||
Processing: u.IsProcessing(),
|
||||
}
|
||||
j, err := json.Marshal(s)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
fmt.Println(string(j))
|
||||
}
|
||||
} else {
|
||||
|
||||
// Print what the user requested
|
||||
fmt.Println(b.String())
|
||||
|
||||
// start a table
|
||||
table = tw.NewWriter(os.Stdout)
|
||||
table.SetHeader([]string{"Space", "Upload Id", "Name", "Offset", "Size", "Executant", "Owner", "Expires", "Processing"})
|
||||
table.SetAutoFormatHeaders(false)
|
||||
|
||||
for _, u := range uploads {
|
||||
table.Append([]string{
|
||||
u.Reference().ResourceId.GetSpaceId(),
|
||||
u.ID(),
|
||||
u.Filename(),
|
||||
strconv.FormatInt(u.Offset(), 10),
|
||||
strconv.FormatInt(u.Size(), 10),
|
||||
u.Executant().OpaqueId,
|
||||
u.SpaceOwner().GetOpaqueId(),
|
||||
u.Expires().Format(time.RFC3339),
|
||||
strconv.FormatBool(u.IsProcessing()),
|
||||
})
|
||||
}
|
||||
table.Render()
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// PurgeExpiredUploads is the entry point for the clean command
|
||||
func PurgeExpiredUploads(cfg *config.Config) *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "clean",
|
||||
|
||||
@@ -67,6 +67,27 @@ class GraphHelper {
|
||||
return self::getUUIDv4Regex() . ':' . self::getUUIDv4Regex() . ':' . self::getUUIDv4Regex();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return string
|
||||
*/
|
||||
public static function getFileIdRegex(): string {
|
||||
return self::getUUIDv4Regex() . '\\\$' . self::getUUIDv4Regex() . '!' . self::getUUIDv4Regex();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return string
|
||||
*/
|
||||
public static function getShareIdRegex(): string {
|
||||
return self::getUUIDv4Regex() . '\\\$' . self::getUUIDv4Regex() . '!' . self::getUUIDv4Regex() . ':' . self::getUUIDv4Regex() . ':' . self::getUUIDv4Regex();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return string
|
||||
*/
|
||||
public static function getEtagRegex(): string {
|
||||
return "^\\\"[a-f0-9:.]{1,32}\\\"$";
|
||||
}
|
||||
|
||||
/**
|
||||
* Key name can consist of @@@
|
||||
* This function separate such key and return its actual value from actual drive response which can be used for assertion
|
||||
|
||||
@@ -149,6 +149,7 @@ default:
|
||||
- OCSContext:
|
||||
- GraphContext:
|
||||
- OcisConfigContext:
|
||||
- SettingsContext:
|
||||
|
||||
apiDepthInfinity:
|
||||
paths:
|
||||
|
||||
@@ -90,3 +90,13 @@ Feature: CORS headers
|
||||
And the following headers should be set
|
||||
| header | value |
|
||||
| Access-Control-Allow-Origin | https://aphno.badal |
|
||||
|
||||
|
||||
Scenario: CORS headers should be returned when setting CORS domain sending origin header in the settings api
|
||||
When user "Alice" lists values-list with headers using the Settings API
|
||||
| header | value |
|
||||
| Origin | https://aphno.badal |
|
||||
Then the HTTP status code should be "201"
|
||||
And the following headers should be set
|
||||
| header | value |
|
||||
| Access-Control-Allow-Origin | https://aphno.badal |
|
||||
|
||||
@@ -123,3 +123,109 @@ Feature: Reshare a share invitation
|
||||
| Editor | Viewer |
|
||||
| Editor | Editor |
|
||||
| Editor | Uploader |
|
||||
|
||||
|
||||
Scenario: try to reshare a resource to higher roles
|
||||
Given user "Alice" has uploaded file with content "to share" to "/textfile1.txt"
|
||||
And user "Alice" has sent the following share invitation:
|
||||
| resourceType | file |
|
||||
| resource | textfile1.txt |
|
||||
| space | Personal |
|
||||
| sharee | Brian |
|
||||
| shareType | user |
|
||||
| permissionsRole | Viewer |
|
||||
When user "Brian" sends the following share invitation using the Graph API:
|
||||
| resourceType | file |
|
||||
| resource | textfile1.txt |
|
||||
| space | Shares |
|
||||
| sharee | Carol |
|
||||
| shareType | user |
|
||||
| permissionsRole | File Editor |
|
||||
Then the HTTP status code should be "403"
|
||||
And the JSON data of the response should match
|
||||
"""
|
||||
{
|
||||
"type": "object",
|
||||
"required": [
|
||||
"error"
|
||||
],
|
||||
"properties": {
|
||||
"error": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"code",
|
||||
"message"
|
||||
],
|
||||
"properties": {
|
||||
"code": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"accessDenied"
|
||||
]
|
||||
},
|
||||
"message": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"insufficient permissions to create that kind of share"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
And for user "Carol" the space Shares should not contain these entries:
|
||||
| textfile1.txt |
|
||||
|
||||
|
||||
Scenario: user with role Uploader tries to reshare a folder
|
||||
Given user "Alice" has created folder "FolderToShare"
|
||||
And user "Alice" has sent the following share invitation:
|
||||
| resourceType | folder |
|
||||
| resource | FolderToShare |
|
||||
| space | Personal |
|
||||
| sharee | Brian |
|
||||
| shareType | user |
|
||||
| permissionsRole | Uploader |
|
||||
When user "Brian" sends the following share invitation using the Graph API:
|
||||
| resourceType | folder |
|
||||
| resource | FolderToShare |
|
||||
| space | Shares |
|
||||
| sharee | Carol |
|
||||
| shareType | user |
|
||||
| permissionsRole | Viewer |
|
||||
Then the HTTP status code should be "403"
|
||||
And the JSON data of the response should match
|
||||
"""
|
||||
{
|
||||
"type": "object",
|
||||
"required": [
|
||||
"error"
|
||||
],
|
||||
"properties": {
|
||||
"error": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"code",
|
||||
"message"
|
||||
],
|
||||
"properties": {
|
||||
"code": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"accessDenied"
|
||||
]
|
||||
},
|
||||
"message": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"no permission to add grants on shared resource"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
And for user "Carol" the space Shares should not contain these entries:
|
||||
| textfile1.txt |
|
||||
|
||||
@@ -1192,3 +1192,297 @@ Feature: Send a sharing invitations
|
||||
| Viewer | folder | FolderToShare |
|
||||
| Editor | folder | FolderToShare |
|
||||
| Uploader | folder | FolderToShare |
|
||||
|
||||
|
||||
Scenario Outline: send share invitation to wrong user id
|
||||
Given user "Alice" has uploaded file with content "to share" to "/textfile1.txt"
|
||||
And user "Alice" has created folder "FolderToShare"
|
||||
When user "Alice" tries to send the following share invitation using the Graph API:
|
||||
| resourceType | <resource-type> |
|
||||
| resource | <path> |
|
||||
| space | Personal |
|
||||
| shareeId | a4c0c83e-ae24-4870-93c3-fcaf2a2228f7 |
|
||||
| shareType | user |
|
||||
| permissionsRole | Viewer |
|
||||
Then the HTTP status code should be "400"
|
||||
And the JSON data of the response should match
|
||||
"""
|
||||
{
|
||||
"type": "object",
|
||||
"required": [
|
||||
"error"
|
||||
],
|
||||
"properties": {
|
||||
"error": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"code",
|
||||
"message"
|
||||
],
|
||||
"properties": {
|
||||
"code": {
|
||||
"type": "string",
|
||||
"enum": ["generalException"]
|
||||
},
|
||||
"message": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"itemNotFound: not found"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
Examples:
|
||||
| resource-type | path |
|
||||
| file | /textfile1.txt |
|
||||
| folder | FolderToShare |
|
||||
|
||||
|
||||
Scenario Outline: send share invitation with empty user id
|
||||
Given user "Alice" has uploaded file with content "to share" to "/textfile1.txt"
|
||||
And user "Alice" has created folder "FolderToShare"
|
||||
When user "Alice" tries to send the following share invitation using the Graph API:
|
||||
| resourceType | <resource-type> |
|
||||
| resource | <path> |
|
||||
| space | Personal |
|
||||
| shareeId | |
|
||||
| shareType | user |
|
||||
| permissionsRole | Viewer |
|
||||
Then the HTTP status code should be "400"
|
||||
And the JSON data of the response should match
|
||||
"""
|
||||
{
|
||||
"type": "object",
|
||||
"required": [
|
||||
"error"
|
||||
],
|
||||
"properties": {
|
||||
"error": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"code",
|
||||
"message"
|
||||
],
|
||||
"properties": {
|
||||
"code": {
|
||||
"type": "string",
|
||||
"enum": ["invalidRequest"]
|
||||
},
|
||||
"message": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"Key: 'DriveItemInvite.Recipients[0].ObjectId' Error:Field validation for 'ObjectId' failed on the 'ne' tag"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
Examples:
|
||||
| resource-type | path |
|
||||
| file | /textfile1.txt |
|
||||
| folder | FolderToShare |
|
||||
|
||||
|
||||
Scenario Outline: send share invitation to user with wrong recipient type
|
||||
Given user "Alice" has uploaded file with content "to share" to "textfile1.txt"
|
||||
And user "Alice" has created folder "FolderToShare"
|
||||
When user "Alice" tries to send the following share invitation using the Graph API:
|
||||
| resourceType | <resource-type> |
|
||||
| resource | <path> |
|
||||
| space | Personal |
|
||||
| sharee | Brian |
|
||||
| shareType | wrongShareType |
|
||||
| permissionsRole | Viewer |
|
||||
Then the HTTP status code should be "400"
|
||||
And the JSON data of the response should match
|
||||
"""
|
||||
{
|
||||
"type": "object",
|
||||
"required": [
|
||||
"error"
|
||||
],
|
||||
"properties": {
|
||||
"error": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"code",
|
||||
"message"
|
||||
],
|
||||
"properties": {
|
||||
"code": {
|
||||
"type": "string",
|
||||
"enum": ["invalidRequest"]
|
||||
},
|
||||
"message": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"Key: 'DriveItemInvite.Recipients[0].LibreGraphRecipientType' Error:Field validation for 'LibreGraphRecipientType' failed on the 'oneof' tag"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
Examples:
|
||||
| resource-type | path |
|
||||
| file | /textfile1.txt |
|
||||
| folder | FolderToShare |
|
||||
|
||||
|
||||
Scenario Outline: send share invitation to group with wrong recipient type
|
||||
Given user "Carol" has been created with default attributes and without skeleton files
|
||||
And user "Alice" has uploaded file with content "to share" to "textfile1.txt"
|
||||
And user "Alice" has created folder "FolderToShare"
|
||||
And group "grp1" has been created
|
||||
And the following users have been added to the following groups
|
||||
| username | groupname |
|
||||
| Brian | grp1 |
|
||||
| Carol | grp1 |
|
||||
When user "Alice" tries to send the following share invitation using the Graph API:
|
||||
| resourceType | <resource-type> |
|
||||
| resource | <path> |
|
||||
| space | Personal |
|
||||
| sharee | grp1 |
|
||||
| shareType | wrongShareType |
|
||||
| permissionsRole | Viewer |
|
||||
Then the HTTP status code should be "400"
|
||||
And the JSON data of the response should match
|
||||
"""
|
||||
{
|
||||
"type": "object",
|
||||
"required": [
|
||||
"error"
|
||||
],
|
||||
"properties": {
|
||||
"error": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"code",
|
||||
"message"
|
||||
],
|
||||
"properties": {
|
||||
"code": {
|
||||
"type": "string",
|
||||
"enum": ["invalidRequest"]
|
||||
},
|
||||
"message": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"Key: 'DriveItemInvite.Recipients[0].LibreGraphRecipientType' Error:Field validation for 'LibreGraphRecipientType' failed on the 'oneof' tag"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
Examples:
|
||||
| resource-type | path |
|
||||
| file | /textfile1.txt |
|
||||
| folder | FolderToShare |
|
||||
|
||||
|
||||
Scenario Outline: send share invitation to user with empty recipient type
|
||||
Given user "Alice" has uploaded file with content "to share" to "textfile1.txt"
|
||||
And user "Alice" has created folder "FolderToShare"
|
||||
When user "Alice" tries to send the following share invitation using the Graph API:
|
||||
| resourceType | <resource-type> |
|
||||
| resource | <path> |
|
||||
| space | Personal |
|
||||
| sharee | Brian |
|
||||
| shareType | |
|
||||
| permissionsRole | Viewer |
|
||||
Then the HTTP status code should be "400"
|
||||
And the JSON data of the response should match
|
||||
"""
|
||||
{
|
||||
"type": "object",
|
||||
"required": [
|
||||
"error"
|
||||
],
|
||||
"properties": {
|
||||
"error": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"code",
|
||||
"message"
|
||||
],
|
||||
"properties": {
|
||||
"code": {
|
||||
"type": "string",
|
||||
"enum": ["invalidRequest"]
|
||||
},
|
||||
"message": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"Key: 'DriveItemInvite.Recipients[0].LibreGraphRecipientType' Error:Field validation for 'LibreGraphRecipientType' failed on the 'oneof' tag"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
Examples:
|
||||
| resource-type | path |
|
||||
| file | /textfile1.txt |
|
||||
| folder | FolderToShare |
|
||||
|
||||
|
||||
Scenario Outline: send share invitation to group with empty recipient type
|
||||
Given user "Carol" has been created with default attributes and without skeleton files
|
||||
And user "Alice" has uploaded file with content "to share" to "textfile1.txt"
|
||||
And user "Alice" has created folder "FolderToShare"
|
||||
And group "grp1" has been created
|
||||
And the following users have been added to the following groups
|
||||
| username | groupname |
|
||||
| Brian | grp1 |
|
||||
| Carol | grp1 |
|
||||
When user "Alice" tries to send the following share invitation using the Graph API:
|
||||
| resourceType | <resource-type> |
|
||||
| resource | <path> |
|
||||
| space | Personal |
|
||||
| sharee | grp1 |
|
||||
| shareType | |
|
||||
| permissionsRole | Viewer |
|
||||
Then the HTTP status code should be "400"
|
||||
And the JSON data of the response should match
|
||||
"""
|
||||
{
|
||||
"type": "object",
|
||||
"required": [
|
||||
"error"
|
||||
],
|
||||
"properties": {
|
||||
"error": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"code",
|
||||
"message"
|
||||
],
|
||||
"properties": {
|
||||
"code": {
|
||||
"type": "string",
|
||||
"enum": ["invalidRequest"]
|
||||
},
|
||||
"message": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"Key: 'DriveItemInvite.Recipients[0].LibreGraphRecipientType' Error:Field validation for 'LibreGraphRecipientType' failed on the 'oneof' tag"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
Examples:
|
||||
| resource-type | path |
|
||||
| file | /textfile1.txt |
|
||||
| folder | FolderToShare |
|
||||
|
||||
476
tests/acceptance/features/apiSharingNg/sharedWithMe.feature
Normal file
476
tests/acceptance/features/apiSharingNg/sharedWithMe.feature
Normal file
@@ -0,0 +1,476 @@
|
||||
Feature: an user gets the resources shared to them
|
||||
As a user
|
||||
I want to get resources shared with me
|
||||
So that I can know about what resources I have access to
|
||||
|
||||
https://owncloud.dev/libre-graph-api/#/me.drive/ListSharedWithMe
|
||||
|
||||
Background:
|
||||
Given these users have been created with default attributes and without skeleton files:
|
||||
| username |
|
||||
| Alice |
|
||||
| Brian |
|
||||
|
||||
|
||||
Scenario: user lists the file shared with them
|
||||
Given user "Alice" has uploaded file with content "hello world" to "/textfile0.txt"
|
||||
And user "Alice" has shared file "textfile0.txt" with user "Brian"
|
||||
When user "Brian" lists the resources shared with him using the Graph API
|
||||
Then the HTTP status code should be "200"
|
||||
And the JSON data of the response should match
|
||||
"""
|
||||
{
|
||||
"type": "object",
|
||||
"required": [
|
||||
"value"
|
||||
],
|
||||
"properties": {
|
||||
"value": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"createdBy",
|
||||
"eTag",
|
||||
"file",
|
||||
"id",
|
||||
"lastModifiedDateTime",
|
||||
"name",
|
||||
"parentReference",
|
||||
"remoteItem"
|
||||
],
|
||||
"properties": {
|
||||
"createdBy": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"user"
|
||||
],
|
||||
"properties": {
|
||||
"user": {
|
||||
"type": "object",
|
||||
"required": ["displayName", "id"],
|
||||
"properties": {
|
||||
"displayName": {
|
||||
"type": "string",
|
||||
"enum": ["Alice Hansen"]
|
||||
},
|
||||
"id": {
|
||||
"type": "string",
|
||||
"pattern": "^%user_id_pattern%$"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"eTag": {
|
||||
"type": "string",
|
||||
"pattern": "%eTag%"
|
||||
},
|
||||
"file": {
|
||||
"type": "object",
|
||||
"required": ["mimeType"],
|
||||
"properties": {
|
||||
"mimeType": {
|
||||
"type": "string",
|
||||
"enum": ["text/plain"]
|
||||
}
|
||||
}
|
||||
},
|
||||
"id": {
|
||||
"type": "string",
|
||||
"pattern": "^%share_id_pattern%$"
|
||||
},
|
||||
"name": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"textfile0.txt"
|
||||
]
|
||||
},
|
||||
"parentReference": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"driveId",
|
||||
"driveType"
|
||||
],
|
||||
"properties": {
|
||||
"driveId": {
|
||||
"type": "string",
|
||||
"pattern": "^%space_id_pattern%$"
|
||||
},
|
||||
"driveType" : {
|
||||
"type": "string",
|
||||
"enum": ["virtual"]
|
||||
}
|
||||
}
|
||||
},
|
||||
"remoteItem": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"createdBy",
|
||||
"eTag",
|
||||
"file",
|
||||
"id",
|
||||
"lastModifiedDateTime",
|
||||
"name",
|
||||
"parentReference",
|
||||
"permissions",
|
||||
"shared",
|
||||
"size"
|
||||
],
|
||||
"properties": {
|
||||
"createdBy": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"user"
|
||||
],
|
||||
"properties": {
|
||||
"user": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"id",
|
||||
"displayName"
|
||||
],
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "string",
|
||||
"pattern": "^%user_id_pattern%$"
|
||||
},
|
||||
"displayName": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"Alice Hansen"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"eTag": {
|
||||
"type": "string",
|
||||
"pattern": "%eTag%"
|
||||
},
|
||||
"file": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"mimeType"
|
||||
],
|
||||
"properties": {
|
||||
"mimeType": {
|
||||
"type": "string",
|
||||
"pattern": "text/plain"
|
||||
}
|
||||
}
|
||||
},
|
||||
"id": {
|
||||
"type": "string",
|
||||
"pattern": "^%file_id_pattern%$"
|
||||
},
|
||||
"name": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"textfile0.txt"
|
||||
]
|
||||
},
|
||||
"shared": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"sharedBy",
|
||||
"owner"
|
||||
],
|
||||
"properties": {
|
||||
"owner": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"user"
|
||||
],
|
||||
"properties": {
|
||||
"user": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"id",
|
||||
"displayName"
|
||||
],
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "string",
|
||||
"pattern": "^%user_id_pattern%$"
|
||||
},
|
||||
"displayName": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"Alice Hansen"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"sharedBy": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"user"
|
||||
],
|
||||
"properties": {
|
||||
"user": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"id",
|
||||
"displayName"
|
||||
],
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "string",
|
||||
"pattern": "^%user_id_pattern%$"
|
||||
},
|
||||
"displayName": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"Alice Hansen"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"size": {
|
||||
"type": "number",
|
||||
"enum": [
|
||||
11
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
Scenario: user lists the folder shared with them
|
||||
Given user "Alice" has created folder "folder"
|
||||
And user "Alice" has shared folder "folder" with user "Brian"
|
||||
When user "Brian" lists the resources shared with him using the Graph API
|
||||
Then the HTTP status code should be "200"
|
||||
And the JSON data of the response should match
|
||||
"""
|
||||
{
|
||||
"type": "object",
|
||||
"required": [
|
||||
"value"
|
||||
],
|
||||
"properties": {
|
||||
"value": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"createdBy",
|
||||
"eTag",
|
||||
"folder",
|
||||
"id",
|
||||
"lastModifiedDateTime",
|
||||
"name",
|
||||
"parentReference",
|
||||
"remoteItem"
|
||||
],
|
||||
"properties": {
|
||||
"createdBy": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"user"
|
||||
],
|
||||
"properties": {
|
||||
"user": {
|
||||
"type": "object",
|
||||
"required": ["displayName", "id"],
|
||||
"properties": {
|
||||
"displayName": {
|
||||
"type": "string",
|
||||
"enum": ["Alice Hansen"]
|
||||
},
|
||||
"id": {
|
||||
"type": "string",
|
||||
"pattern": "^%user_id_pattern%$"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"eTag": {
|
||||
"type": "string",
|
||||
"pattern": "%eTag%"
|
||||
},
|
||||
"folder": {
|
||||
"type": "object",
|
||||
"required": [],
|
||||
"properties": {}
|
||||
},
|
||||
"id": {
|
||||
"type": "string",
|
||||
"pattern": "^%share_id_pattern%$"
|
||||
},
|
||||
"name": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"folder"
|
||||
]
|
||||
},
|
||||
"parentReference": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"driveId",
|
||||
"driveType"
|
||||
],
|
||||
"properties": {
|
||||
"driveId": {
|
||||
"type": "string",
|
||||
"pattern": "^%space_id_pattern%$"
|
||||
},
|
||||
"driveType" : {
|
||||
"type": "string",
|
||||
"enum": ["virtual"]
|
||||
}
|
||||
}
|
||||
},
|
||||
"remoteItem": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"createdBy",
|
||||
"eTag",
|
||||
"folder",
|
||||
"id",
|
||||
"lastModifiedDateTime",
|
||||
"name",
|
||||
"parentReference",
|
||||
"permissions",
|
||||
"shared"
|
||||
],
|
||||
"properties": {
|
||||
"createdBy": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"user"
|
||||
],
|
||||
"properties": {
|
||||
"user": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"id",
|
||||
"displayName"
|
||||
],
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "string",
|
||||
"pattern": "^%user_id_pattern%$"
|
||||
},
|
||||
"displayName": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"Alice Hansen"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"eTag": {
|
||||
"type": "string",
|
||||
"pattern": "%eTag%"
|
||||
},
|
||||
"file": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"mimeType"
|
||||
],
|
||||
"properties": {
|
||||
"mimeType": {
|
||||
"type": "string",
|
||||
"pattern": "text/plain"
|
||||
}
|
||||
}
|
||||
},
|
||||
"id": {
|
||||
"type": "string",
|
||||
"pattern": "^%file_id_pattern%$"
|
||||
},
|
||||
"name": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"folder"
|
||||
]
|
||||
},
|
||||
"shared": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"sharedBy",
|
||||
"owner"
|
||||
],
|
||||
"properties": {
|
||||
"owner": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"user"
|
||||
],
|
||||
"properties": {
|
||||
"user": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"id",
|
||||
"displayName"
|
||||
],
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "string",
|
||||
"pattern": "^%user_id_pattern%$"
|
||||
},
|
||||
"displayName": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"Alice Hansen"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"sharedBy": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"user"
|
||||
],
|
||||
"properties": {
|
||||
"user": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"id",
|
||||
"displayName"
|
||||
],
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "string",
|
||||
"pattern": "^%user_id_pattern%$"
|
||||
},
|
||||
"displayName": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"Alice Hansen"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
@@ -94,8 +94,8 @@ Feature: moving/renaming file using file id
|
||||
And user "Brian" has uploaded file with content "some data" to "/test.txt"
|
||||
And we save it into "FILEID"
|
||||
When user "Brian" moves a file "test.txt" into "folder" inside space "Shares" using file-id path "<dav-path>"
|
||||
Then the HTTP status code should be "403"
|
||||
And the value of the item "/d:error/s:message" in the response about user "Brian" should be "cross storage moves are not permitted, use copy and delete"
|
||||
Then the HTTP status code should be "502"
|
||||
And the value of the item "/d:error/s:message" in the response about user "Brian" should be "cross storage moves are not supported, use copy and delete"
|
||||
And for user "Brian" folder "/" of the space "Personal" should contain these files:
|
||||
| test.txt |
|
||||
But for user "Alice" folder "folder" of the space "Personal" should not contain these files:
|
||||
@@ -351,7 +351,7 @@ Feature: moving/renaming file using file id
|
||||
And user "Alice" has created folder "testshare"
|
||||
And user "Alice" has shared folder "testshare" with user "Brian" with permissions "<permissions>"
|
||||
When user "Brian" moves a file "textfile.txt" into "testshare" inside space "Shares" using file-id path "<dav-path>"
|
||||
Then the HTTP status code should be "403"
|
||||
Then the HTTP status code should be "502"
|
||||
And for user "Brian" folder "/" of the space "project-space" should contain these files:
|
||||
| textfile.txt |
|
||||
But for user "Brian" folder "testshare" of the space "Shares" should not contain these files:
|
||||
@@ -475,7 +475,7 @@ Feature: moving/renaming file using file id
|
||||
And we save it into "FILEID"
|
||||
And user "Alice" has shared folder "folder" with user "Brian" with permissions "read"
|
||||
When user "Brian" moves a file "Shares/folder/test.txt" into "folder/sub-folder" inside space "Shares" using file-id path "<dav-path>"
|
||||
Then the HTTP status code should be "403"
|
||||
Then the HTTP status code should be "502"
|
||||
And for user "Brian" folder "folder/sub-folder" of the space "Shares" should not contain these files:
|
||||
| test.txt |
|
||||
And for user "Alice" folder "folder/sub-folder" of the space "Personal" should not contain these files:
|
||||
@@ -499,7 +499,7 @@ Feature: moving/renaming file using file id
|
||||
And user "Alice" has shared folder "testshare1" with user "Brian" with permissions "<from_permissions>"
|
||||
And user "Alice" has shared folder "testshare2" with user "Brian" with permissions "<to_permissions>"
|
||||
When user "Brian" moves a file "Shares/testshare1/textfile.txt" into "testshare2" inside space "Shares" using file-id path "<dav-path>"
|
||||
Then the HTTP status code should be "403"
|
||||
Then the HTTP status code should be "502"
|
||||
And for user "Brian" folder "testshare1" of the space "Shares" should contain these files:
|
||||
| textfile.txt |
|
||||
But for user "Brian" folder "testshare2" of the space "Shares" should not contain these files:
|
||||
@@ -697,7 +697,7 @@ Feature: moving/renaming file using file id
|
||||
And we save it into "FILEID"
|
||||
And user "Alice" has shared folder "/folder" with user "Brian" with permissions "read"
|
||||
When user "Brian" renames a file "Shares/folder/test.txt" into "folder/sub-folder/renamed.txt" inside space "Shares" using file-id path "<dav-path>"
|
||||
Then the HTTP status code should be "403"
|
||||
Then the HTTP status code should be "502"
|
||||
And for user "Brian" folder "folder" of the space "Shares" should contain these files:
|
||||
| test.txt |
|
||||
But for user "Brian" folder "folder/sub-folder" of the space "Shares" should not contain these files:
|
||||
|
||||
@@ -125,7 +125,7 @@ Feature: move (rename) file
|
||||
| role | <role> |
|
||||
And user "Brian" has shared folder "/testshare" with user "Alice" with permissions "<permissions>"
|
||||
When user "Alice" moves file "project.txt" from space "Project" to "/testshare/project.txt" inside space "Shares" using the WebDAV API
|
||||
Then the HTTP status code should be "403"
|
||||
Then the HTTP status code should be "502"
|
||||
And for user "Alice" the space "Project" should contain these entries:
|
||||
| project.txt |
|
||||
But for user "Alice" folder "testshare" of the space "Shares" should not contain these entries:
|
||||
@@ -168,7 +168,7 @@ Feature: move (rename) file
|
||||
And user "Brian" has shared folder "/testshare" with user "Alice" with permissions "<permissions>"
|
||||
And user "Alice" has uploaded file with content "personal content" to "personal.txt"
|
||||
When user "Alice" moves file "personal.txt" from space "Personal" to "/testshare/personal.txt" inside space "Shares" using the WebDAV API
|
||||
Then the HTTP status code should be "403"
|
||||
Then the HTTP status code should be "502"
|
||||
And for user "Alice" the space "Personal" should contain these entries:
|
||||
| personal.txt |
|
||||
But for user "Alice" folder "testshare" of the space "Shares" should not contain these entries:
|
||||
@@ -185,7 +185,7 @@ Feature: move (rename) file
|
||||
And user "Brian" has uploaded file with content "testshare content" to "/testshare/testshare.txt"
|
||||
And user "Brian" has shared folder "/testshare" with user "Alice" with permissions "<permissions>"
|
||||
When user "Alice" moves file "/testshare/testshare.txt" from space "Shares" to "testshare.txt" inside space "Personal" using the WebDAV API
|
||||
Then the HTTP status code should be "403"
|
||||
Then the HTTP status code should be "502"
|
||||
And for user "Alice" the space "Personal" should not contain these entries:
|
||||
| testshare.txt |
|
||||
And for user "Alice" folder "testshare" of the space "Shares" should contain these entries:
|
||||
@@ -207,7 +207,7 @@ Feature: move (rename) file
|
||||
And user "Brian" has uploaded file with content "testshare content" to "/testshare/testshare.txt"
|
||||
And user "Brian" has shared folder "/testshare" with user "Alice" with permissions "<permissions>"
|
||||
When user "Alice" moves file "/testshare/testshare.txt" from space "Shares" to "testshare.txt" inside space "Project" using the WebDAV API
|
||||
Then the HTTP status code should be "403"
|
||||
Then the HTTP status code should be "502"
|
||||
And for user "Alice" the space "Project" should not contain these entries:
|
||||
| /testshare.txt |
|
||||
And for user "Alice" folder "testshare" of the space "Shares" should contain these entries:
|
||||
@@ -232,7 +232,7 @@ Feature: move (rename) file
|
||||
And user "Brian" has shared folder "/testshare1" with user "Alice" with permissions "<from_permissions>"
|
||||
And user "Brian" has shared folder "/testshare2" with user "Alice" with permissions "<to_permissions>"
|
||||
When user "Alice" moves file "/testshare1/testshare1.txt" from space "Shares" to "/testshare2/testshare1.txt" inside space "Shares" using the WebDAV API
|
||||
Then the HTTP status code should be "403"
|
||||
Then the HTTP status code should be "502"
|
||||
And for user "Alice" folder "testshare1" of the space "Shares" should contain these entries:
|
||||
| testshare1.txt |
|
||||
But for user "Alice" folder "testshare2" of the space "Shares" should not contain these entries:
|
||||
|
||||
@@ -2799,6 +2799,30 @@ class FeatureContext extends BehatVariablesContext {
|
||||
"getPermissionsIdRegex"
|
||||
],
|
||||
"parameter" => []
|
||||
],
|
||||
[
|
||||
"code" => "%file_id_pattern%",
|
||||
"function" => [
|
||||
__NAMESPACE__ . '\TestHelpers\GraphHelper',
|
||||
"getFileIdRegex"
|
||||
],
|
||||
"parameter" => []
|
||||
],
|
||||
[
|
||||
"code" => "%share_id_pattern%",
|
||||
"function" => [
|
||||
__NAMESPACE__ . '\TestHelpers\GraphHelper',
|
||||
"getShareIdRegex"
|
||||
],
|
||||
"parameter" => []
|
||||
],
|
||||
[
|
||||
"code" => "%eTag%",
|
||||
"function" => [
|
||||
__NAMESPACE__ . '\TestHelpers\GraphHelper',
|
||||
"getEtagRegex"
|
||||
],
|
||||
"parameter" => []
|
||||
]
|
||||
];
|
||||
if ($user !== null) {
|
||||
|
||||
@@ -2505,4 +2505,24 @@ class GraphContext implements Context {
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* @When user :user lists the resources shared with him/her using the Graph API
|
||||
*
|
||||
* @param string $user
|
||||
*
|
||||
* @return void
|
||||
* @throws GuzzleException
|
||||
*/
|
||||
public function userListsTheResourcesSharedWithThemUsingGraphApi(string $user): void {
|
||||
$credentials = $this->getAdminOrUserCredentials($user);
|
||||
$this->featureContext->setResponse(
|
||||
GraphHelper::getSharesSharedWithMe(
|
||||
$this->featureContext->getBaseUrl(),
|
||||
$this->featureContext->getStepLineRef(),
|
||||
$credentials['username'],
|
||||
$credentials['password']
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ use Behat\Behat\Hook\Scope\BeforeScenarioScope;
|
||||
use PHPUnit\Framework\Assert;
|
||||
use Psr\Http\Message\ResponseInterface;
|
||||
use TestHelpers\HttpRequestHelper;
|
||||
use Behat\Gherkin\Node\TableNode;
|
||||
|
||||
require_once 'bootstrap.php';
|
||||
|
||||
@@ -339,13 +340,14 @@ class SettingsContext implements Context {
|
||||
|
||||
/**
|
||||
* @param string $user
|
||||
* @param array $headers
|
||||
*
|
||||
* @return ResponseInterface
|
||||
*
|
||||
* @throws GuzzleException
|
||||
* @throws Exception
|
||||
*/
|
||||
public function sendRequestGetSettingsValuesList(string $user): ResponseInterface {
|
||||
public function sendRequestGetSettingsValuesList(string $user, array $headers = null): ResponseInterface {
|
||||
$fullUrl = $this->baseUrl . $this->settingsUrl . "values-list";
|
||||
$body = json_encode(["account_uuid" => "me"], JSON_THROW_ON_ERROR);
|
||||
return HttpRequestHelper::post(
|
||||
@@ -353,11 +355,33 @@ class SettingsContext implements Context {
|
||||
$this->featureContext->getStepLineRef(),
|
||||
$user,
|
||||
$this->featureContext->getPasswordForUser($user),
|
||||
null,
|
||||
$headers,
|
||||
$body
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* @When /^user "([^"]*)" lists values-list with headers using the Settings API$/
|
||||
*
|
||||
* @param string $user
|
||||
* @param TableNode $headersTable
|
||||
*
|
||||
* @return void
|
||||
*
|
||||
* @throws GuzzleException
|
||||
* @throws Exception
|
||||
*/
|
||||
public function theUserListsAllValuesListWithHeadersUsingSettingsApi(string $user, TableNode $headersTable): void {
|
||||
$this->featureContext->verifyTableNodeColumns(
|
||||
$headersTable,
|
||||
['header', 'value']
|
||||
);
|
||||
foreach ($headersTable as $row) {
|
||||
$headers[$row['header']] = $row ['value'];
|
||||
}
|
||||
$this->featureContext->setResponse($this->sendRequestGetSettingsValuesList($user, $headers));
|
||||
}
|
||||
|
||||
/**
|
||||
* @param string $user
|
||||
*
|
||||
|
||||
@@ -156,16 +156,18 @@ class SharingNgContext implements Context {
|
||||
: $this->spacesContext->getFileId($user, $rows['space'], $rows['resource']);
|
||||
}
|
||||
|
||||
$sharees = array_map('trim', explode(',', $rows['sharee']));
|
||||
$shareTypes = array_map('trim', explode(',', $rows['shareType']));
|
||||
if (\array_key_exists('shareeId', $rows)) {
|
||||
$shareeIds[] = $rows['shareeId'];
|
||||
$shareTypes[] = $rows['shareType'];
|
||||
} else {
|
||||
$sharees = array_map('trim', explode(',', $rows['sharee']));
|
||||
$shareTypes = array_map('trim', explode(',', $rows['shareType']));
|
||||
|
||||
$shareeIds = [];
|
||||
foreach ($sharees as $index => $sharee) {
|
||||
$shareType = $shareTypes[$index];
|
||||
// for non-exiting group or user, generate random id
|
||||
$shareeIds[] = (($shareType === 'user')
|
||||
? $this->featureContext->getAttributeOfCreatedUser($sharee, 'id')
|
||||
: $this->featureContext->getAttributeOfCreatedGroup($sharee, 'id')) ?: WebDavHelper::generateUUIDv4();
|
||||
foreach ($sharees as $sharee) {
|
||||
// for non-exiting group or user, generate random id
|
||||
$shareeIds[] = $this->featureContext->getAttributeOfCreatedUser($sharee, 'id')
|
||||
?: ($this->featureContext->getAttributeOfCreatedGroup($sharee, 'id') ?: WebDavHelper::generateUUIDv4());
|
||||
}
|
||||
}
|
||||
|
||||
$permissionsRole = $rows['permissionsRole'] ?? null;
|
||||
@@ -208,6 +210,7 @@ class SharingNgContext implements Context {
|
||||
|
||||
/**
|
||||
* @When /^user "([^"]*)" sends the following share invitation using the Graph API:$/
|
||||
* @When /^user "([^"]*)" tries to send the following share invitation using the Graph API:$/
|
||||
*
|
||||
* @param string $user
|
||||
* @param TableNode $table
|
||||
|
||||
@@ -1720,7 +1720,7 @@ trait WebDav {
|
||||
string $entry = "file",
|
||||
?string $path = null,
|
||||
string $type = "files"
|
||||
):ResponseInterface {
|
||||
):void {
|
||||
$user = $this->getActualUsername($user);
|
||||
$path = $this->substituteInLineCodes($path);
|
||||
$response = $this->listFolder(
|
||||
@@ -1733,7 +1733,7 @@ trait WebDav {
|
||||
$statusCode = $response->getStatusCode();
|
||||
if ($statusCode < 401 || $statusCode > 404) {
|
||||
try {
|
||||
$responseXml = $this->featureContext->getResponseXml(
|
||||
$responseXml = HttpRequestHelper::getResponseXml(
|
||||
$response,
|
||||
__METHOD__
|
||||
);
|
||||
@@ -1756,7 +1756,6 @@ trait WebDav {
|
||||
"$entry '$path' should not exist. But it does exist and is a $actualResourceType"
|
||||
);
|
||||
}
|
||||
return $response;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -20,7 +20,7 @@ Feature: moving a share inside another share
|
||||
|
||||
Scenario: share receiver cannot move a whole share inside another share
|
||||
When user "Brian" moves folder "Shares/folderB" to "Shares/folderA/folderB" using the WebDAV API
|
||||
Then the HTTP status code should be "403"
|
||||
Then the HTTP status code should be "502"
|
||||
And as "Alice" folder "/folderB" should exist
|
||||
And as "Brian" folder "/Shares/folderB" should exist
|
||||
And as "Alice" file "/folderB/fileB.txt" should exist
|
||||
@@ -43,7 +43,7 @@ Feature: moving a share inside another share
|
||||
And user "Brian" has created folder "localFolder/subFolder"
|
||||
And user "Brian" has uploaded file with content "local text" to "/localFolder/localFile.txt"
|
||||
When user "Brian" moves folder "localFolder" to "Shares/folderA/localFolder" using the WebDAV API
|
||||
Then the HTTP status code should be "403"
|
||||
Then the HTTP status code should be "502"
|
||||
And as "Brian" folder "/Shares/folderA/localFolder" should not exist
|
||||
And as "Alice" folder "/folderA/localFolder" should not exist
|
||||
And as "Brian" folder "/localFolder" should exist
|
||||
@@ -52,8 +52,7 @@ Feature: moving a share inside another share
|
||||
Scenario: share receiver tries to move a whole share inside a local folder
|
||||
Given user "Brian" has created folder "localFolder"
|
||||
And user "Brian" has uploaded file with content "local text" to "/localFolder/localFile.txt"
|
||||
# On oCIS you cannot move received shares out of the "Shares" folder
|
||||
When user "Brian" moves folder "Shares/folderB" to "localFolder/folderB" using the WebDAV API
|
||||
Then the HTTP status code should be "403"
|
||||
Then the HTTP status code should be "502"
|
||||
And as "Alice" file "/folderB/fileB.txt" should exist
|
||||
And as "Brian" file "/Shares/folderB/fileB.txt" should exist
|
||||
|
||||
@@ -20,7 +20,7 @@ Feature: sharing
|
||||
And user "Alice" has shared folder "/share1" with user "Brian"
|
||||
And user "Alice" has shared folder "/share2" with user "Brian"
|
||||
When user "Brian" moves file "/Shares/share1/textfile0.txt" to "/Shares/share2/textfile0.txt" using the WebDAV API
|
||||
Then the HTTP status code should be "403"
|
||||
Then the HTTP status code should be "502"
|
||||
And as "Brian" file "/Shares/share1/textfile0.txt" should exist
|
||||
And as "Alice" file "share1/textfile0.txt" should exist
|
||||
But as "Brian" file "/Shares/share2/textfile0.txt" should not exist
|
||||
|
||||
@@ -45,7 +45,7 @@ Feature: move (rename) file
|
||||
| shareWith | Alice |
|
||||
And user "Alice" has uploaded file with content "test data" to "/testfile.txt"
|
||||
When user "Alice" moves file "/testfile.txt" to "Shares/testshare/testfile.txt" using the WebDAV API
|
||||
Then the HTTP status code should be "403"
|
||||
Then the HTTP status code should be "502"
|
||||
And as "Alice" file "Shares/testshare/testfile.txt" should not exist
|
||||
And as "Brian" file "testshare/testfile.txt" should not exist
|
||||
But as "Alice" file "/testfile.txt" should exist
|
||||
@@ -95,7 +95,7 @@ Feature: move (rename) file
|
||||
| permissions | <permissions> |
|
||||
| shareWith | Alice |
|
||||
When user "Alice" moves file "/Shares/testshare/testfile.txt" to "/testfile.txt" using the WebDAV API
|
||||
Then the HTTP status code should be "403"
|
||||
Then the HTTP status code should be "502"
|
||||
And as "Alice" file "/Shares/testshare/testfile.txt" should exist
|
||||
And as "Brian" file "/testshare/testfile.txt" should exist
|
||||
Examples:
|
||||
@@ -146,7 +146,7 @@ Feature: move (rename) file
|
||||
And user "Alice" has created folder "/testsubfolder"
|
||||
And user "Alice" has uploaded file with content "test data" to "/testsubfolder/testfile.txt"
|
||||
When user "Alice" moves folder "/testsubfolder" to "Shares/testshare/testsubfolder" using the WebDAV API
|
||||
Then the HTTP status code should be "403"
|
||||
Then the HTTP status code should be "502"
|
||||
And as "Alice" folder "/Shares/testshare/testsubfolder" should not exist
|
||||
And as "Brian" folder "/testshare/testsubfolder" should not exist
|
||||
But as "Alice" folder "/testsubfolder" should exist
|
||||
@@ -202,7 +202,7 @@ Feature: move (rename) file
|
||||
| permissions | <permissions> |
|
||||
| shareWith | Alice |
|
||||
When user "Alice" moves folder "/Shares/testshare/testsubfolder" to "/testsubfolder" using the WebDAV API
|
||||
Then the HTTP status code should be "403"
|
||||
Then the HTTP status code should be "502"
|
||||
And as "Alice" folder "/Shares/testshare/testsubfolder" should exist
|
||||
And as "Brian" folder "/testshare/testsubfolder" should exist
|
||||
Examples:
|
||||
|
||||
2
vendor/github.com/cs3org/reva/v2/internal/grpc/services/gateway/storageprovider.go
generated
vendored
2
vendor/github.com/cs3org/reva/v2/internal/grpc/services/gateway/storageprovider.go
generated
vendored
@@ -709,7 +709,7 @@ func (s *svc) Move(ctx context.Context, req *provider.MoveRequest) (*provider.Mo
|
||||
|
||||
if sourceProviderInfo.Address != destProviderInfo.Address {
|
||||
return &provider.MoveResponse{
|
||||
Status: status.NewPermissionDenied(ctx, nil, "cross storage moves are not permitted, use copy and delete"),
|
||||
Status: status.NewUnimplemented(ctx, nil, "cross storage moves are not supported, use copy and delete"),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -68,7 +68,6 @@ type config struct {
|
||||
}
|
||||
|
||||
type passwordPolicy struct {
|
||||
Disabled bool `mapstructure:"disabled"`
|
||||
MinCharacters int `mapstructure:"min_characters"`
|
||||
MinLowerCaseCharacters int `mapstructure:"min_lowercase_characters"`
|
||||
MinUpperCaseCharacters int `mapstructure:"min_uppercase_characters"`
|
||||
@@ -174,10 +173,9 @@ func New(m map[string]interface{}, ss *grpc.Server) (rgrpc.Service, error) {
|
||||
|
||||
func newPasswordPolicy(c *passwordPolicy) password.Validator {
|
||||
if c == nil {
|
||||
return password.NewPasswordPolicy(true, 0, 0, 0, 0, 0, nil)
|
||||
return password.NewPasswordPolicy(0, 0, 0, 0, 0, nil)
|
||||
}
|
||||
return password.NewPasswordPolicy(
|
||||
c.Disabled,
|
||||
c.MinCharacters,
|
||||
c.MinLowerCaseCharacters,
|
||||
c.MinUpperCaseCharacters,
|
||||
|
||||
@@ -667,7 +667,7 @@ func (s *service) Move(ctx context.Context, req *provider.MoveRequest) (*provide
|
||||
|
||||
if dstReceivedShare.Share.Id.OpaqueId != srcReceivedShare.Share.Id.OpaqueId {
|
||||
return &provider.MoveResponse{
|
||||
Status: status.NewPermissionDenied(ctx, nil, "cross storage moves are not permitted, use copy and delete"),
|
||||
Status: status.NewUnimplemented(ctx, nil, "cross storage moves are not supported, use copy and delete"),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
3
vendor/github.com/cs3org/reva/v2/internal/http/services/appprovider/appprovider.go
generated
vendored
3
vendor/github.com/cs3org/reva/v2/internal/http/services/appprovider/appprovider.go
generated
vendored
@@ -276,9 +276,8 @@ func (s *svc) handleNew(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
defer httpRes.Body.Close()
|
||||
if httpRes.StatusCode == http.StatusForbidden {
|
||||
if httpRes.StatusCode == http.StatusBadRequest {
|
||||
// the file upload was already finished since it is a zero byte file
|
||||
// TODO: why do we get a 401 then!?
|
||||
} else if httpRes.StatusCode != http.StatusOK {
|
||||
writeError(w, r, appErrorServerError, "failed to create the file", nil)
|
||||
return
|
||||
|
||||
@@ -117,7 +117,6 @@ type CapabilitiesGraph struct {
|
||||
|
||||
// CapabilitiesPasswordPolicy hold the password policy capabilities
|
||||
type CapabilitiesPasswordPolicy struct {
|
||||
Disabled bool `json:"disabled" xml:"disabled" mapstructure:"disabled"`
|
||||
MinCharacters int `json:"min_characters" xml:"min_characters" mapstructure:"min_characters"`
|
||||
MaxCharacters int `json:"max_characters" xml:"max_characters" mapstructure:"max_characters"`
|
||||
MinLowerCaseCharacters int `json:"min_lowercase_characters" xml:"min_lowercase_characters" mapstructure:"min_lowercase_characters"`
|
||||
|
||||
@@ -1714,10 +1714,9 @@ func publicPwdEnforced(c *config.Config) passwordEnforced {
|
||||
|
||||
func passwordPolicies(c *config.Config) password.Validator {
|
||||
if c.Capabilities.Capabilities == nil || c.Capabilities.Capabilities.PasswordPolicy == nil {
|
||||
return password.NewPasswordPolicy(true, 0, 0, 0, 0, 0, nil)
|
||||
return password.NewPasswordPolicy(0, 0, 0, 0, 0, nil)
|
||||
}
|
||||
return password.NewPasswordPolicy(
|
||||
c.Capabilities.Capabilities.PasswordPolicy.Disabled,
|
||||
c.Capabilities.Capabilities.PasswordPolicy.MinCharacters,
|
||||
c.Capabilities.Capabilities.PasswordPolicy.MinLowerCaseCharacters,
|
||||
c.Capabilities.Capabilities.PasswordPolicy.MinUpperCaseCharacters,
|
||||
|
||||
3
vendor/github.com/cs3org/reva/v2/pkg/events/postprocessing.go
generated
vendored
3
vendor/github.com/cs3org/reva/v2/pkg/events/postprocessing.go
generated
vendored
@@ -42,6 +42,8 @@ var (
|
||||
PPStepPolicies Postprocessingstep = "policies"
|
||||
// PPStepDelay is the step that processing. Useful for testing or user annoyment
|
||||
PPStepDelay Postprocessingstep = "delay"
|
||||
// PPStepFinished is the step that signals that postprocessing is finished, but storage provider hasn't acknowledged it yet
|
||||
PPStepFinished Postprocessingstep = "finished"
|
||||
|
||||
// PPOutcomeDelete means that the file and the upload should be deleted
|
||||
PPOutcomeDelete PostprocessingOutcome = "delete"
|
||||
@@ -193,6 +195,7 @@ func (UploadReady) Unmarshal(v []byte) (interface{}, error) {
|
||||
// ResumePostprocessing can be emitted to repair broken postprocessing
|
||||
type ResumePostprocessing struct {
|
||||
UploadID string
|
||||
Step Postprocessingstep
|
||||
Timestamp *types.Timestamp
|
||||
}
|
||||
|
||||
|
||||
7
vendor/github.com/cs3org/reva/v2/pkg/password/password_policies.go
generated
vendored
7
vendor/github.com/cs3org/reva/v2/pkg/password/password_policies.go
generated
vendored
@@ -18,7 +18,6 @@ type Validator interface {
|
||||
|
||||
// Policies represents a password validation rules
|
||||
type Policies struct {
|
||||
disabled bool
|
||||
minCharacters int
|
||||
minLowerCaseCharacters int
|
||||
minUpperCaseCharacters int
|
||||
@@ -30,9 +29,8 @@ type Policies struct {
|
||||
}
|
||||
|
||||
// NewPasswordPolicy returns a new NewPasswordPolicy instance
|
||||
func NewPasswordPolicy(disabled bool, minCharacters, minLowerCaseCharacters, minUpperCaseCharacters, minDigits, minSpecialCharacters int, bannedPasswordsList map[string]struct{}) Validator {
|
||||
func NewPasswordPolicy(minCharacters, minLowerCaseCharacters, minUpperCaseCharacters, minDigits, minSpecialCharacters int, bannedPasswordsList map[string]struct{}) Validator {
|
||||
p := &Policies{
|
||||
disabled: disabled,
|
||||
minCharacters: minCharacters,
|
||||
minLowerCaseCharacters: minLowerCaseCharacters,
|
||||
minUpperCaseCharacters: minUpperCaseCharacters,
|
||||
@@ -48,9 +46,6 @@ func NewPasswordPolicy(disabled bool, minCharacters, minLowerCaseCharacters, min
|
||||
|
||||
// Validate implements a password validation regarding the policy
|
||||
func (s Policies) Validate(str string) error {
|
||||
if s.disabled {
|
||||
return nil
|
||||
}
|
||||
var allErr error
|
||||
if !utf8.ValidString(str) {
|
||||
return fmt.Errorf("the password contains invalid characters")
|
||||
|
||||
7
vendor/github.com/cs3org/reva/v2/pkg/rhttp/datatx/manager/simple/simple.go
generated
vendored
7
vendor/github.com/cs3org/reva/v2/pkg/rhttp/datatx/manager/simple/simple.go
generated
vendored
@@ -93,6 +93,13 @@ func (m *manager) Handler(fs storage.FS) (http.Handler, error) {
|
||||
defer func() {
|
||||
metrics.UploadsActive.Sub(1)
|
||||
}()
|
||||
|
||||
if r.ContentLength == 0 {
|
||||
sublog.Info().Msg("received invalid 0-byte PUT request")
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
fn := r.URL.Path
|
||||
defer r.Body.Close()
|
||||
|
||||
|
||||
10
vendor/github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/decomposedfs.go
generated
vendored
10
vendor/github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/decomposedfs.go
generated
vendored
@@ -286,8 +286,9 @@ func (fs *Decomposedfs) Postprocessing(ch <-chan events.Event) {
|
||||
}
|
||||
|
||||
var (
|
||||
failed bool
|
||||
keepUpload bool
|
||||
failed bool
|
||||
revertNodeMetadata bool
|
||||
keepUpload bool
|
||||
)
|
||||
unmarkPostprocessing := true
|
||||
|
||||
@@ -297,12 +298,14 @@ func (fs *Decomposedfs) Postprocessing(ch <-chan events.Event) {
|
||||
fallthrough
|
||||
case events.PPOutcomeAbort:
|
||||
failed = true
|
||||
revertNodeMetadata = true
|
||||
keepUpload = true
|
||||
metrics.UploadSessionsAborted.Inc()
|
||||
case events.PPOutcomeContinue:
|
||||
if err := session.Finalize(); err != nil {
|
||||
log.Error().Err(err).Str("uploadID", ev.UploadID).Msg("could not finalize upload")
|
||||
failed = true
|
||||
revertNodeMetadata = false
|
||||
keepUpload = true
|
||||
// keep postprocessing status so the upload is not deleted during housekeeping
|
||||
unmarkPostprocessing = false
|
||||
@@ -311,6 +314,7 @@ func (fs *Decomposedfs) Postprocessing(ch <-chan events.Event) {
|
||||
}
|
||||
case events.PPOutcomeDelete:
|
||||
failed = true
|
||||
revertNodeMetadata = true
|
||||
metrics.UploadSessionsDeleted.Inc()
|
||||
}
|
||||
|
||||
@@ -337,7 +341,7 @@ func (fs *Decomposedfs) Postprocessing(ch <-chan events.Event) {
|
||||
}
|
||||
}
|
||||
|
||||
fs.sessionStore.Cleanup(ctx, session, failed, keepUpload, unmarkPostprocessing)
|
||||
fs.sessionStore.Cleanup(ctx, session, revertNodeMetadata, keepUpload, unmarkPostprocessing)
|
||||
|
||||
// remove cache entry in gateway
|
||||
fs.cache.RemoveStatContext(ctx, ev.ExecutingUser.GetId(), &provider.ResourceId{SpaceId: n.SpaceID, OpaqueId: n.ID})
|
||||
|
||||
8
vendor/github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/upload/session.go
generated
vendored
8
vendor/github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/upload/session.go
generated
vendored
@@ -295,13 +295,9 @@ func (s *OcisSession) MTime() time.Time {
|
||||
return t
|
||||
}
|
||||
|
||||
// IsProcessing returns true if the node has entered postprocessing state
|
||||
// IsProcessing returns true if all bytes have been received. The session then has entered postprocessing state.
|
||||
func (s *OcisSession) IsProcessing() bool {
|
||||
n, err := s.Node(context.Background())
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return n.IsProcessing(context.Background())
|
||||
return s.info.Size == s.info.Offset
|
||||
}
|
||||
|
||||
// binPath returns the path to the file storing the binary data.
|
||||
|
||||
2
vendor/github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/upload/upload.go
generated
vendored
2
vendor/github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/upload/upload.go
generated
vendored
@@ -191,7 +191,7 @@ func (session *OcisSession) FinishUpload(ctx context.Context) error {
|
||||
|
||||
n, err := session.store.CreateNodeForUpload(session, attrs)
|
||||
if err != nil {
|
||||
session.store.Cleanup(ctx, session, true, false, true)
|
||||
session.store.Cleanup(ctx, session, true, false, false)
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
10
vendor/github.com/egirna/icap-client/client.go
generated
vendored
10
vendor/github.com/egirna/icap-client/client.go
generated
vendored
@@ -31,9 +31,7 @@ func NewClient(options ...ConfigOption) (Client, error) {
|
||||
}
|
||||
|
||||
// Do is the main function of the client that makes the ICAP request
|
||||
func (c *Client) Do(req Request) (Response, error) {
|
||||
var err error
|
||||
|
||||
func (c *Client) Do(req Request) (res Response, err error) {
|
||||
// establish connection to the icap server
|
||||
err = c.conn.Connect(req.ctx, req.URL.Host)
|
||||
if err != nil {
|
||||
@@ -57,15 +55,15 @@ func (c *Client) Do(req Request) (Response, error) {
|
||||
return Response{}, err
|
||||
}
|
||||
|
||||
resp, err := toClientResponse(bufio.NewReader(strings.NewReader(string(dataRes))))
|
||||
res, err = toClientResponse(bufio.NewReader(strings.NewReader(string(dataRes))))
|
||||
if err != nil {
|
||||
return Response{}, err
|
||||
}
|
||||
|
||||
// check if the message is fully done scanning or if it needs to be sent another chunk
|
||||
done := !(resp.StatusCode == http.StatusContinue && !req.bodyFittedInPreview && req.previewSet)
|
||||
done := !(res.StatusCode == http.StatusContinue && !req.bodyFittedInPreview && req.previewSet)
|
||||
if done {
|
||||
return resp, nil
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// get the remaining body bytes
|
||||
|
||||
3
vendor/github.com/egirna/icap-client/request.go
generated
vendored
3
vendor/github.com/egirna/icap-client/request.go
generated
vendored
@@ -55,10 +55,9 @@ func NewRequest(ctx context.Context, method, urlStr string, httpReq *http.Reques
|
||||
|
||||
// SetPreview sets the preview bytes in the icap header
|
||||
// todo: defer close error
|
||||
func (r *Request) SetPreview(maxBytes int) error {
|
||||
func (r *Request) SetPreview(maxBytes int) (err error) {
|
||||
var bodyBytes []byte
|
||||
var previewBytes int
|
||||
var err error
|
||||
|
||||
// receiving the body bites to determine the preview bytes depending on the request ICAP method
|
||||
if r.Method == MethodREQMOD {
|
||||
|
||||
@@ -459,7 +459,7 @@ func (c *cache) Stop() {
|
||||
}
|
||||
|
||||
func (c *cache) String() string {
|
||||
return "cache"
|
||||
return "cached"
|
||||
}
|
||||
|
||||
// New returns a new cache
|
||||
|
||||
6
vendor/modules.txt
vendored
6
vendor/modules.txt
vendored
@@ -362,7 +362,7 @@ github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1
|
||||
github.com/cs3org/go-cs3apis/cs3/storage/registry/v1beta1
|
||||
github.com/cs3org/go-cs3apis/cs3/tx/v1beta1
|
||||
github.com/cs3org/go-cs3apis/cs3/types/v1beta1
|
||||
# github.com/cs3org/reva/v2 v2.18.1-0.20240124094635-6eec406c0be7
|
||||
# github.com/cs3org/reva/v2 v2.18.1-0.20240126141248-c9e4a3bcd0da
|
||||
## explicit; go 1.21
|
||||
github.com/cs3org/reva/v2/cmd/revad/internal/grace
|
||||
github.com/cs3org/reva/v2/cmd/revad/runtime
|
||||
@@ -763,7 +763,7 @@ github.com/dutchcoders/go-clamd
|
||||
# github.com/egirna/icap v0.0.0-20181108071049-d5ee18bd70bc
|
||||
## explicit
|
||||
github.com/egirna/icap
|
||||
# github.com/egirna/icap-client v0.1.1 => github.com/fschade/icap-client v0.0.0-20240105150744-9c2d8aff3ef2
|
||||
# github.com/egirna/icap-client v0.1.1 => github.com/fschade/icap-client v0.0.0-20240123094924-5af178158eaf
|
||||
## explicit; go 1.21
|
||||
github.com/egirna/icap-client
|
||||
# github.com/emirpasic/gods v1.18.1
|
||||
@@ -2320,4 +2320,4 @@ stash.kopano.io/kgol/oidc-go
|
||||
stash.kopano.io/kgol/rndm
|
||||
# github.com/go-micro/plugins/v4/store/nats-js-kv => github.com/kobergj/plugins/v4/store/nats-js-kv v0.0.0-20231207143248-4d424e3ae348
|
||||
# github.com/studio-b12/gowebdav => github.com/aduffeck/gowebdav v0.0.0-20231215102054-212d4a4374f6
|
||||
# github.com/egirna/icap-client => github.com/fschade/icap-client v0.0.0-20240105150744-9c2d8aff3ef2
|
||||
# github.com/egirna/icap-client => github.com/fschade/icap-client v0.0.0-20240123094924-5af178158eaf
|
||||
|
||||
Reference in New Issue
Block a user