mirror of
https://github.com/opencloud-eu/opencloud.git
synced 2025-12-24 14:50:39 -05:00
Compare commits
20 Commits
v2.0.2
...
stable-2.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
59e3e081da | ||
|
|
4d2bf41274 | ||
|
|
4cab81a857 | ||
|
|
63b201b2a7 | ||
|
|
da64699172 | ||
|
|
723284193d | ||
|
|
ddaf1f86b0 | ||
|
|
1e6ee49221 | ||
|
|
db04f851ca | ||
|
|
a8fc417e18 | ||
|
|
a240389a06 | ||
|
|
195e9ffdac | ||
|
|
d76d5cd49c | ||
|
|
bf7fabddf7 | ||
|
|
0160118996 | ||
|
|
0935c44c1c | ||
|
|
f60e36e8d7 | ||
|
|
4828c5c4bc | ||
|
|
5dfd52aa32 | ||
|
|
d1a8463036 |
@@ -1,3 +1,3 @@
|
||||
# The test runner source for UI tests
|
||||
WEB_COMMITID=59e329cc5fe288cf247cc5272547a77ac59cc000
|
||||
WEB_COMMITID=3cc779ddb45b52134fa986d21587f18316e2135a
|
||||
WEB_BRANCH=stable-2.1
|
||||
|
||||
@@ -1,12 +1,6 @@
|
||||
"""OpenCloud CI definition
|
||||
"""
|
||||
|
||||
# Production release tags
|
||||
# NOTE: need to be updated if new production releases are determined
|
||||
# - follow semver
|
||||
# - omit 'v' prefix
|
||||
PRODUCTION_RELEASE_TAGS = ["2.0", "3.0"]
|
||||
|
||||
# Repository
|
||||
|
||||
repo_slug = "opencloud-eu/opencloud"
|
||||
@@ -52,6 +46,8 @@ READY_RELEASE_GO = "woodpeckerci/plugin-ready-release-go:latest"
|
||||
DEFAULT_PHP_VERSION = "8.2"
|
||||
DEFAULT_NODEJS_VERSION = "20"
|
||||
|
||||
CACHE_S3_SERVER = "https://s3.ci.opencloud.eu"
|
||||
|
||||
dirs = {
|
||||
"base": "/woodpecker/src/github.com/opencloud-eu/opencloud",
|
||||
"web": "/woodpecker/src/github.com/opencloud-eu/opencloud/webTestRunner",
|
||||
@@ -341,6 +337,20 @@ config = {
|
||||
},
|
||||
"dockerReleases": {
|
||||
"architectures": ["arm64", "amd64"],
|
||||
"production": {
|
||||
# NOTE: need to be updated if new production releases are determined
|
||||
"tags": ["2.0"],
|
||||
"repo": docker_repo_slug,
|
||||
"build_type": "production",
|
||||
},
|
||||
"rolling": {
|
||||
"repo": docker_repo_slug + "-rolling",
|
||||
"build_type": "rolling",
|
||||
},
|
||||
"daily": {
|
||||
"repo": docker_repo_slug + "-rolling",
|
||||
"build_type": "daily",
|
||||
},
|
||||
},
|
||||
"litmus": True,
|
||||
"codestyle": True,
|
||||
@@ -361,9 +371,7 @@ MINIO_MC_ENV = {
|
||||
"CACHE_BUCKET": {
|
||||
"from_secret": "cache_s3_bucket",
|
||||
},
|
||||
"MC_HOST": {
|
||||
"from_secret": "cache_s3_server",
|
||||
},
|
||||
"MC_HOST": CACHE_S3_SERVER,
|
||||
"AWS_ACCESS_KEY_ID": {
|
||||
"from_secret": "cache_s3_access_key",
|
||||
},
|
||||
@@ -661,9 +669,7 @@ def testOpencloud(ctx):
|
||||
"name": "scan-result-cache",
|
||||
"image": PLUGINS_S3,
|
||||
"settings": {
|
||||
"endpoint": {
|
||||
"from_secret": "cache_s3_server",
|
||||
},
|
||||
"endpoint": CACHE_S3_SERVER,
|
||||
"bucket": "cache",
|
||||
"source": "cache/**/*",
|
||||
"target": "%s/%s" % (repo_slug, ctx.build.commit + "-${CI_PIPELINE_NUMBER}"),
|
||||
@@ -1465,9 +1471,7 @@ def uploadTracingResult(ctx):
|
||||
"bucket": {
|
||||
"from_secret": "cache_public_s3_bucket",
|
||||
},
|
||||
"endpoint": {
|
||||
"from_secret": "cache_public_s3_server",
|
||||
},
|
||||
"endpoint": CACHE_S3_SERVER,
|
||||
"path_style": True,
|
||||
"source": "webTestRunner/reports/e2e/playwright/tracing/**/*",
|
||||
"strip_prefix": "webTestRunner/reports/e2e/playwright/tracing",
|
||||
@@ -1515,26 +1519,31 @@ def logTracingResults():
|
||||
def dockerReleases(ctx):
|
||||
pipelines = []
|
||||
docker_repos = []
|
||||
build_type = "daily"
|
||||
build_type = ""
|
||||
|
||||
# dockerhub repo
|
||||
# - "opencloudeu/opencloud-rolling"
|
||||
repo = docker_repo_slug + "-rolling"
|
||||
docker_repos.append(repo)
|
||||
|
||||
# production release repo
|
||||
if ctx.build.event == "tag":
|
||||
tag = ctx.build.ref.replace("refs/tags/v", "").lower()
|
||||
for prod_tag in PRODUCTION_RELEASE_TAGS:
|
||||
|
||||
is_production = False
|
||||
for prod_tag in config["dockerReleases"]["production"]["tags"]:
|
||||
if tag.startswith(prod_tag):
|
||||
docker_repos.append(docker_repo_slug)
|
||||
is_production = True
|
||||
break
|
||||
|
||||
if is_production:
|
||||
docker_repos.append(config["dockerReleases"]["production"]["repo"])
|
||||
build_type = config["dockerReleases"]["production"]["build_type"]
|
||||
|
||||
else:
|
||||
docker_repos.append(config["dockerReleases"]["rolling"]["repo"])
|
||||
build_type = config["dockerReleases"]["rolling"]["build_type"]
|
||||
|
||||
else:
|
||||
docker_repos.append(config["dockerReleases"]["daily"]["repo"])
|
||||
build_type = config["dockerReleases"]["daily"]["build_type"]
|
||||
|
||||
for repo in docker_repos:
|
||||
repo_pipelines = []
|
||||
if ctx.build.event == "tag":
|
||||
build_type = "rolling" if "rolling" in repo else "production"
|
||||
|
||||
repo_pipelines.append(dockerRelease(ctx, repo, build_type))
|
||||
|
||||
# manifest = releaseDockerManifest(ctx, repo, build_type)
|
||||
@@ -1550,10 +1559,10 @@ def dockerReleases(ctx):
|
||||
return pipelines
|
||||
|
||||
def dockerRelease(ctx, repo, build_type):
|
||||
build_args = [
|
||||
"REVISION=%s" % (ctx.build.commit),
|
||||
"VERSION=%s" % (ctx.build.ref.replace("refs/tags/", "") if ctx.build.event == "tag" else "daily"),
|
||||
]
|
||||
build_args = {
|
||||
"REVISION": "%s" % (ctx.build.commit),
|
||||
"VERSION": "%s" % (ctx.build.ref.replace("refs/tags/", "") if ctx.build.event == "tag" else "daily"),
|
||||
}
|
||||
|
||||
depends_on = getPipelineNames(getGoBinForTesting(ctx))
|
||||
|
||||
@@ -2310,9 +2319,7 @@ def genericCache(name, action, mounts, cache_path):
|
||||
"name": "%s_%s" % (action, name),
|
||||
"image": PLUGINS_S3_CACHE,
|
||||
"settings": {
|
||||
"endpoint": {
|
||||
"from_secret": "cache_s3_server",
|
||||
},
|
||||
"endpoint": CACHE_S3_SERVER,
|
||||
"rebuild": rebuild,
|
||||
"restore": restore,
|
||||
"mount": mounts,
|
||||
@@ -2343,9 +2350,7 @@ def genericCachePurge(flush_path):
|
||||
"secret_key": {
|
||||
"from_secret": "cache_s3_secret_key",
|
||||
},
|
||||
"endpoint": {
|
||||
"from_secret": "cache_s3_server",
|
||||
},
|
||||
"endpoint": CACHE_S3_SERVER,
|
||||
"flush": True,
|
||||
"flush_age": 1,
|
||||
"flush_path": flush_path,
|
||||
|
||||
31
CHANGELOG.md
31
CHANGELOG.md
@@ -1,5 +1,36 @@
|
||||
# Changelog
|
||||
|
||||
## [2.0.5](https://github.com/opencloud-eu/opencloud/releases/tag/v2.0.5) - 2025-10-29
|
||||
|
||||
### ❤️ Thanks to all contributors! ❤️
|
||||
|
||||
@ScharfViktor
|
||||
|
||||
### ✅ Tests
|
||||
|
||||
- [full-ci] Bump reva 2.29.5 [[#1738](https://github.com/opencloud-eu/opencloud/pull/1738)]
|
||||
- Fix tests in the stable branch [[#1731](https://github.com/opencloud-eu/opencloud/pull/1731)]
|
||||
|
||||
## [2.0.4](https://github.com/opencloud-eu/opencloud/releases/tag/v2.0.4) - 2025-07-11
|
||||
|
||||
### ❤️ Thanks to all contributors! ❤️
|
||||
|
||||
@micbar
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- [Backport] fix: build_args is now an object [[#1213](https://github.com/opencloud-eu/opencloud/pull/1213)]
|
||||
|
||||
## [2.0.3](https://github.com/opencloud-eu/opencloud/releases/tag/v2.0.3) - 2025-07-10
|
||||
|
||||
### ❤️ Thanks to all contributors! ❤️
|
||||
|
||||
@ScharfViktor
|
||||
|
||||
### 📦️ Dependencies
|
||||
|
||||
- [full-ci] Reva bump 2.29.4 [[#1202](https://github.com/opencloud-eu/opencloud/pull/1202)]
|
||||
|
||||
## [2.0.2](https://github.com/opencloud-eu/opencloud/releases/tag/v2.0.2) - 2025-05-02
|
||||
|
||||
### ❤️ Thanks to all contributors! ❤️
|
||||
|
||||
2
go.mod
2
go.mod
@@ -63,7 +63,7 @@ require (
|
||||
github.com/onsi/ginkgo/v2 v2.23.3
|
||||
github.com/onsi/gomega v1.36.3
|
||||
github.com/open-policy-agent/opa v1.2.0
|
||||
github.com/opencloud-eu/reva/v2 v2.29.3
|
||||
github.com/opencloud-eu/reva/v2 v2.29.5
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/owncloud/libre-graph-api-go v1.0.5-0.20240829135935-80dc00d6f5ea
|
||||
github.com/pkg/errors v0.9.1
|
||||
|
||||
4
go.sum
4
go.sum
@@ -865,8 +865,8 @@ github.com/onsi/gomega v1.36.3 h1:hID7cr8t3Wp26+cYnfcjR6HpJ00fdogN6dqZ1t6IylU=
|
||||
github.com/onsi/gomega v1.36.3/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0=
|
||||
github.com/open-policy-agent/opa v1.2.0 h1:88NDVCM0of1eO6Z4AFeL3utTEtMuwloFmWWU7dRV1z0=
|
||||
github.com/open-policy-agent/opa v1.2.0/go.mod h1:30euUmOvuBoebRCcJ7DMF42bRBOPznvt0ACUMYDUGVY=
|
||||
github.com/opencloud-eu/reva/v2 v2.29.3 h1:y0vfye984kOIB9nib4LkN3wEnJ2vR0/1CKoVcIRQ7JI=
|
||||
github.com/opencloud-eu/reva/v2 v2.29.3/go.mod h1:+nkCU7w6E6cyNSsKRYj1rb0cCI7QswEQ7KOPljctebM=
|
||||
github.com/opencloud-eu/reva/v2 v2.29.5 h1:T4RjTSDk650PVn0hAL8HpF+61ChqQ/UwNoWMYYAMOGU=
|
||||
github.com/opencloud-eu/reva/v2 v2.29.5/go.mod h1:+nkCU7w6E6cyNSsKRYj1rb0cCI7QswEQ7KOPljctebM=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
|
||||
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
|
||||
|
||||
@@ -16,7 +16,7 @@ var (
|
||||
// LatestTag is the latest released version plus the dev meta version.
|
||||
// Will be overwritten by the release pipeline
|
||||
// Needs a manual change for every tagged release
|
||||
LatestTag = "2.0.1+dev"
|
||||
LatestTag = "2.0.5+dev"
|
||||
|
||||
// Date indicates the build date.
|
||||
// This has been removed, it looks like you can only replace static strings with recent go versions
|
||||
|
||||
@@ -472,7 +472,10 @@ module.exports = function(webpackEnv) {
|
||||
// its runtime that would otherwise be processed through "file" loader.
|
||||
// Also exclude `html` and `json` extensions so they get processed
|
||||
// by webpacks internal loaders.
|
||||
exclude: [/\.(js|mjs|jsx|ts|tsx)$/, /\.html$/, /\.json$/],
|
||||
//
|
||||
// html-webpack-plugin has a known bug,
|
||||
// fixed by /^$/ https://github.com/jantimon/html-webpack-plugin/issues/1589
|
||||
exclude: [/^$/, /\.(js|mjs|jsx|ts|tsx)$/, /\.html$/, /\.json$/],
|
||||
options: {
|
||||
name: 'static/media/[name].[hash:8].[ext]',
|
||||
},
|
||||
|
||||
@@ -120,3 +120,26 @@ Feature: Propfind test
|
||||
| Manager | RDNVWZP |
|
||||
| Space Editor | DNVW |
|
||||
| Space Viewer | |
|
||||
|
||||
@issue-1523
|
||||
Scenario: propfind response contains a restored folder with correct name
|
||||
Given user "Alice" has created a folder "folderMain" in space "Personal"
|
||||
And user "Alice" has deleted folder "folderMain"
|
||||
And user "Alice" has created a folder "folderMain" in space "Personal"
|
||||
When user "Alice" restores the folder with original path "/folderMain" to "/folderMain (1)" using the trashbin API
|
||||
And user "Alice" sends PROPFIND request to space "Personal" using the WebDAV API
|
||||
Then the HTTP status code should be "207"
|
||||
And as user "Alice" the PROPFIND response should contain a resource "folderMain" with these key and value pairs:
|
||||
| key | value |
|
||||
| oc:fileid | %file_id_pattern% |
|
||||
| oc:file-parent | %file_id_pattern% |
|
||||
| oc:name | folderMain |
|
||||
| oc:permissions | RDNVCKZP |
|
||||
| oc:size | 0 |
|
||||
And as user "Alice" the PROPFIND response should contain a resource "folderMain (1)" with these key and value pairs:
|
||||
| key | value |
|
||||
| oc:fileid | %file_id_pattern% |
|
||||
| oc:file-parent | %file_id_pattern% |
|
||||
| oc:name | folderMain (1) |
|
||||
| oc:permissions | RDNVCKZP |
|
||||
| oc:size | 0 |
|
||||
|
||||
@@ -23,6 +23,7 @@ Feature: Send a sharing invitations
|
||||
| shareType | user |
|
||||
| permissionsRole | <permissions-role> |
|
||||
Then the HTTP status code should be "200"
|
||||
And user "Brian" has a share "<resource>" synced
|
||||
And user "Brian" should have a share "<resource>" shared by user "Alice" from space "Personal"
|
||||
And the JSON data of the response should match
|
||||
"""
|
||||
@@ -113,7 +114,9 @@ Feature: Send a sharing invitations
|
||||
| shareType | group |
|
||||
| permissionsRole | <permissions-role> |
|
||||
Then the HTTP status code should be "200"
|
||||
And user "Brian" has a share "<resource>" synced
|
||||
And user "Brian" should have a share "<resource>" shared by user "Alice" from space "Personal"
|
||||
And user "Carol" has a share "<resource>" synced
|
||||
And user "Carol" should have a share "<resource>" shared by user "Alice" from space "Personal"
|
||||
And the JSON data of the response should match
|
||||
"""
|
||||
@@ -1998,6 +2001,7 @@ Feature: Send a sharing invitations
|
||||
| shareType | user |
|
||||
| permissionsRole | <permissions-role> |
|
||||
Then the HTTP status code should be "200"
|
||||
And user "Brian" has a share "<resource>" synced
|
||||
And user "Brian" should have a share "<resource>" shared by user "Alice" from space "NewSpace"
|
||||
And the JSON data of the response should match
|
||||
"""
|
||||
@@ -2086,7 +2090,9 @@ Feature: Send a sharing invitations
|
||||
| shareType | group |
|
||||
| permissionsRole | <permissions-role> |
|
||||
Then the HTTP status code should be "200"
|
||||
And user "Brian" has a share "<resource>" synced
|
||||
And user "Brian" should have a share "<resource>" shared by user "Alice" from space "NewSpace"
|
||||
And user "Carol" has a share "<resource>" synced
|
||||
And user "Carol" should have a share "<resource>" shared by user "Alice" from space "NewSpace"
|
||||
And the JSON data of the response should match
|
||||
"""
|
||||
@@ -3166,6 +3172,7 @@ Feature: Send a sharing invitations
|
||||
| shareType | user |
|
||||
| permissionsRole | Viewer |
|
||||
Then the HTTP status code should be "200"
|
||||
And user "Brian" has a share "textfile.txt" synced
|
||||
And user "Brian" should have a share "textfile.txt" shared by user "Alice" from space "NewSpace"
|
||||
When user "Alice" sends the following resource share invitation using the Graph API:
|
||||
| resource | textfile.txt |
|
||||
@@ -3174,6 +3181,7 @@ Feature: Send a sharing invitations
|
||||
| shareType | group |
|
||||
| permissionsRole | Viewer |
|
||||
Then the HTTP status code should be "200"
|
||||
And user "Carol" has a share "textfile.txt" synced
|
||||
And user "Carol" should have a share "textfile.txt" shared by user "Alice" from space "NewSpace"
|
||||
|
||||
|
||||
@@ -3193,4 +3201,5 @@ Feature: Send a sharing invitations
|
||||
| shareType | group |
|
||||
| permissionsRole | Viewer |
|
||||
Then the HTTP status code should be "200"
|
||||
And user "Brian" has a share "textfile.txt" synced
|
||||
And user "Brian" should have a share "textfile.txt" shared by user "Alice" from space "NewSpace"
|
||||
|
||||
@@ -19,7 +19,7 @@ Feature: Update permission of a share
|
||||
| sharee | Brian |
|
||||
| shareType | user |
|
||||
| permissionsRole | Viewer |
|
||||
| expirationDateTime | 2025-07-15T14:00:00Z |
|
||||
| expirationDateTime | 2027-07-15T14:00:00Z |
|
||||
When user "Alice" updates the last resource share with the following properties using the Graph API:
|
||||
| space | Personal |
|
||||
| resource | testfile.txt |
|
||||
@@ -91,7 +91,7 @@ Feature: Update permission of a share
|
||||
| sharee | Brian |
|
||||
| shareType | user |
|
||||
| permissionsRole | Viewer |
|
||||
| expirationDateTime | 2025-07-15T14:00:00Z |
|
||||
| expirationDateTime | 2027-07-15T14:00:00Z |
|
||||
When user "Alice" updates the last resource share with the following properties using the Graph API:
|
||||
| space | Personal |
|
||||
| resource | <resource> |
|
||||
@@ -394,7 +394,7 @@ Feature: Update permission of a share
|
||||
| sharee | grp1 |
|
||||
| shareType | group |
|
||||
| permissionsRole | <permissions-role> |
|
||||
| expirationDateTime | 2025-07-15T14:00:00Z |
|
||||
| expirationDateTime | 2027-07-15T14:00:00Z |
|
||||
When user "Alice" updates the last drive share with the following using root endpoint of the Graph API:
|
||||
| permissionsRole | <new-permissions-role> |
|
||||
| space | NewSpace |
|
||||
@@ -474,7 +474,7 @@ Feature: Update permission of a share
|
||||
| sharee | grp1 |
|
||||
| shareType | group |
|
||||
| permissionsRole | <permissions-role> |
|
||||
| expirationDateTime | 2025-07-15T14:00:00Z |
|
||||
| expirationDateTime | 2027-07-15T14:00:00Z |
|
||||
When user "Alice" updates the last drive share with the following using root endpoint of the Graph API:
|
||||
| expirationDateTime | |
|
||||
| space | NewSpace |
|
||||
@@ -554,7 +554,7 @@ Feature: Update permission of a share
|
||||
| sharee | grp1 |
|
||||
| shareType | group |
|
||||
| permissionsRole | <permissions-role> |
|
||||
| expirationDateTime | 2025-07-15T14:00:00Z |
|
||||
| expirationDateTime | 2027-07-15T14:00:00Z |
|
||||
When user "Alice" updates the last drive share with the following using root endpoint of the Graph API:
|
||||
| expirationDateTime | 2200-07-15T14:00:00Z |
|
||||
| space | NewSpace |
|
||||
@@ -636,7 +636,7 @@ Feature: Update permission of a share
|
||||
| sharee | grp1 |
|
||||
| shareType | group |
|
||||
| permissionsRole | <permissions-role> |
|
||||
| expirationDateTime | 2025-07-15T14:00:00Z |
|
||||
| expirationDateTime | 2027-07-15T14:00:00Z |
|
||||
When user "Alice" updates the last drive share with the following using root endpoint of the Graph API:
|
||||
| expirationDateTime | |
|
||||
| permissionsRole | <new-permissions-role> |
|
||||
@@ -717,7 +717,7 @@ Feature: Update permission of a share
|
||||
| sharee | grp1 |
|
||||
| shareType | group |
|
||||
| permissionsRole | <permissions-role> |
|
||||
| expirationDateTime | 2025-07-15T14:00:00Z |
|
||||
| expirationDateTime | 2027-07-15T14:00:00Z |
|
||||
When user "Alice" updates the last drive share with the following using root endpoint of the Graph API:
|
||||
| expirationDateTime | 2200-07-15T14:00:00Z |
|
||||
| permissionsRole | <new-permissions-role> |
|
||||
@@ -799,7 +799,7 @@ Feature: Update permission of a share
|
||||
| sharee | Brian |
|
||||
| shareType | user |
|
||||
| permissionsRole | <permissions-role> |
|
||||
| expirationDateTime | 2025-07-15T14:00:00Z |
|
||||
| expirationDateTime | 2027-07-15T14:00:00Z |
|
||||
When user "Alice" updates the last drive share with the following using root endpoint of the Graph API:
|
||||
| permissionsRole | <new-permissions-role> |
|
||||
| space | NewSpace |
|
||||
@@ -875,7 +875,7 @@ Feature: Update permission of a share
|
||||
| sharee | Brian |
|
||||
| shareType | user |
|
||||
| permissionsRole | <permissions-role> |
|
||||
| expirationDateTime | 2025-07-15T14:00:00Z |
|
||||
| expirationDateTime | 2027-07-15T14:00:00Z |
|
||||
When user "Alice" updates the last drive share with the following using root endpoint of the Graph API:
|
||||
| expirationDateTime | |
|
||||
| space | NewSpace |
|
||||
@@ -951,7 +951,7 @@ Feature: Update permission of a share
|
||||
| sharee | Brian |
|
||||
| shareType | user |
|
||||
| permissionsRole | <permissions-role> |
|
||||
| expirationDateTime | 2025-07-15T14:00:00Z |
|
||||
| expirationDateTime | 2027-07-15T14:00:00Z |
|
||||
When user "Alice" updates the last drive share with the following using root endpoint of the Graph API:
|
||||
| expirationDateTime | 2200-07-15T14:00:00Z |
|
||||
| permissionsRole | <new-permissions-role> |
|
||||
@@ -1033,7 +1033,7 @@ Feature: Update permission of a share
|
||||
| sharee | Brian |
|
||||
| shareType | user |
|
||||
| permissionsRole | <permissions-role> |
|
||||
| expirationDateTime | 2025-07-15T14:00:00Z |
|
||||
| expirationDateTime | 2027-07-15T14:00:00Z |
|
||||
When user "Alice" updates the last drive share with the following using root endpoint of the Graph API:
|
||||
| expirationDateTime | |
|
||||
| permissionsRole | <new-permissions-role> |
|
||||
@@ -1110,7 +1110,7 @@ Feature: Update permission of a share
|
||||
| sharee | Brian |
|
||||
| shareType | user |
|
||||
| permissionsRole | <permissions-role> |
|
||||
| expirationDateTime | 2025-07-15T14:00:00Z |
|
||||
| expirationDateTime | 2027-07-15T14:00:00Z |
|
||||
When user "Alice" updates the last drive share with the following using root endpoint of the Graph API:
|
||||
| expirationDateTime | 2200-07-15T14:00:00Z |
|
||||
| permissionsRole | <new-permissions-role> |
|
||||
|
||||
@@ -21,6 +21,7 @@ Feature: sharing
|
||||
| shareType | user |
|
||||
| permissionsRole | Viewer |
|
||||
And using SharingNG
|
||||
And user "Brian" has a share "textfile0.txt" synced
|
||||
When user "Brian" gets all the shares shared with him using the sharing API
|
||||
Then the OCS status code should be "<ocs-status-code>"
|
||||
And the HTTP status code should be "200"
|
||||
@@ -47,6 +48,8 @@ Feature: sharing
|
||||
| shareType | user |
|
||||
| permissionsRole | Viewer |
|
||||
And using SharingNG
|
||||
And user "Brian" has a share "textfile0.txt" synced
|
||||
And user "Brian" has a share "textfile1.txt" synced
|
||||
When user "Brian" gets all the shares shared with him that are received as file "/Shares/textfile1.txt" using the provisioning API
|
||||
Then the OCS status code should be "<ocs-status-code>"
|
||||
And the HTTP status code should be "200"
|
||||
@@ -73,6 +76,8 @@ Feature: sharing
|
||||
| shareType | user |
|
||||
| permissionsRole | Viewer |
|
||||
And using SharingNG
|
||||
And user "Brian" has a share "textfile0.txt" synced
|
||||
And user "Brian" has a share "textfile1.txt" synced
|
||||
When user "Brian" gets all the shares shared with him that are received as file "/Shares/textfile0.txt" using the provisioning API
|
||||
Then the OCS status code should be "<ocs-status-code>"
|
||||
And the HTTP status code should be "200"
|
||||
@@ -94,6 +99,7 @@ Feature: sharing
|
||||
| shareType | group |
|
||||
| permissionsRole | Viewer |
|
||||
And using SharingNG
|
||||
And user "Brian" has a share "textfile0.txt" synced
|
||||
When user "Brian" gets all the shares shared with him using the sharing API
|
||||
Then the OCS status code should be "<ocs-status-code>"
|
||||
And the HTTP status code should be "200"
|
||||
|
||||
@@ -567,3 +567,52 @@ Feature: restore deleted files/folders
|
||||
| dav-path-version |
|
||||
| spaces |
|
||||
| new |
|
||||
|
||||
@issue-1523
|
||||
Scenario Outline: restore deleted folder when folder with same name exists
|
||||
Given using <dav-path-version> DAV path
|
||||
And user "Alice" has created folder "new"
|
||||
And user "Alice" has uploaded file with content "content" to "new/test.txt"
|
||||
And user "Alice" has deleted folder "new"
|
||||
And user "Alice" has created folder "new"
|
||||
And user "Alice" has uploaded file with content "new content" to "new/new-file.txt"
|
||||
When user "Alice" restores the folder with original path "/new" to "/new (1)" using the trashbin API
|
||||
Then the HTTP status code should be "201"
|
||||
And as "Alice" the following folders should exist
|
||||
| path |
|
||||
| /new |
|
||||
| /new (1) |
|
||||
And as "Alice" the following files should exist
|
||||
| path |
|
||||
| /new/new-file.txt |
|
||||
| /new (1)/test.txt |
|
||||
Examples:
|
||||
| dav-path-version |
|
||||
| spaces |
|
||||
| new |
|
||||
|
||||
@issue-1523
|
||||
Scenario Outline: restore deleted folder with files when folder with same name exists
|
||||
Given using <dav-path-version> DAV path
|
||||
And user "Alice" has created folder "folder-a"
|
||||
And user "Alice" has uploaded file with content "content b" to "folder-a/b.txt"
|
||||
And user "Alice" has uploaded file with content "content c" to "folder-a/c.txt"
|
||||
And user "Alice" has deleted file "folder-a/b.txt"
|
||||
And user "Alice" has deleted folder "folder-a"
|
||||
And user "Alice" has created folder "folder-a"
|
||||
When user "Alice" restores the file with original path "folder-a/b.txt" using the trashbin API
|
||||
Then the HTTP status code should be "201"
|
||||
When user "Alice" restores the folder with original path "/folder-a" to "/folder-a (1)" using the trashbin API
|
||||
Then the HTTP status code should be "201"
|
||||
And as "Alice" the following folders should exist
|
||||
| path |
|
||||
| /folder-a |
|
||||
| /folder-a (1) |
|
||||
And as "Alice" the following files should exist
|
||||
| path |
|
||||
| /folder-a/b.txt |
|
||||
| /folder-a (1)/c.txt |
|
||||
Examples:
|
||||
| dav-path-version |
|
||||
| spaces |
|
||||
| new |
|
||||
|
||||
@@ -261,10 +261,11 @@ function run_behat_tests() {
|
||||
FAILED_SCENARIO_PATHS_COLORED=`awk '/Failed scenarios:/',0 ${TEST_LOG_FILE} | grep -a feature`
|
||||
# There will be some ANSI escape codes for color in the FEATURE_COLORED var.
|
||||
# Strip them out so we can pass just the ordinary feature details to Behat.
|
||||
# Also strip everything after ".feature:XX", including text such as "(on line xx)" added by Behat indicating the failing step's line number.
|
||||
# Thanks to https://en.wikipedia.org/wiki/Tee_(command) and
|
||||
# https://stackoverflow.com/questions/23416278/how-to-strip-ansi-escape-sequences-from-a-variable
|
||||
# for ideas.
|
||||
FAILED_SCENARIO_PATHS=$(echo "${FAILED_SCENARIO_PATHS_COLORED}" | sed "s/\x1b[^m]*m//g")
|
||||
FAILED_SCENARIO_PATHS=$(echo "${FAILED_SCENARIO_PATHS_COLORED}" | sed "s/\x1b[^m]*m//g" | sed 's/\(\.feature:[0-9]\+\).*/\1/')
|
||||
|
||||
# If something else went wrong, and there were no failed scenarios,
|
||||
# then the awk, grep, sed command sequence above ends up with an empty string.
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
}
|
||||
},
|
||||
"require": {
|
||||
"behat/behat": "^3.13",
|
||||
"behat/behat": "^3.24",
|
||||
"behat/gherkin": "^4.9",
|
||||
"behat/mink": "1.7.1",
|
||||
"friends-of-behat/mink-extension": "^2.7",
|
||||
|
||||
149
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/blobstore/blobstore.go
generated
vendored
149
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/blobstore/blobstore.go
generated
vendored
@@ -20,13 +20,21 @@
|
||||
package blobstore
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pkg/xattr"
|
||||
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
TMPDir = ".oc-tmp"
|
||||
)
|
||||
|
||||
// Blobstore provides an interface to an filesystem based blobstore
|
||||
@@ -41,61 +49,106 @@ func New(root string) (*Blobstore, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Upload stores some data in the blobstore under the given key
|
||||
func (bs *Blobstore) Upload(node *node.Node, source, copyTarget string) error {
|
||||
path := node.InternalPath()
|
||||
// Upload is responsible for transferring data from a source file (upload) to its final location;
|
||||
// the file operation is done atomically using a temporary file followed by a rename
|
||||
func (bs *Blobstore) Upload(n *node.Node, source, copyTarget string) error {
|
||||
tempName := filepath.Join(n.SpaceRoot.InternalPath(), TMPDir, filepath.Base(source))
|
||||
|
||||
// preserve the mtime of the file
|
||||
fi, _ := os.Stat(path)
|
||||
|
||||
file, err := os.Open(source)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Decomposedfs: posix blobstore: Can not open source file to upload")
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
f, err := os.OpenFile(node.InternalPath(), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0700)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not open blob '%s' for writing", node.InternalPath())
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
w := bufio.NewWriter(f)
|
||||
_, err = w.ReadFrom(file)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not write blob '%s'", node.InternalPath())
|
||||
}
|
||||
err = w.Flush()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.Chtimes(path, fi.ModTime(), fi.ModTime())
|
||||
if err != nil {
|
||||
// there is no guarantee that the space root TMPDir exists at this point, so we create the directory if needed
|
||||
if err := os.MkdirAll(filepath.Dir(tempName), 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if copyTarget != "" {
|
||||
// also "upload" the file to a local path, e.g. for keeping the "current" version of the file
|
||||
err := os.MkdirAll(filepath.Dir(copyTarget), 0700)
|
||||
if err != nil {
|
||||
return err
|
||||
sourceFile, err := os.Open(source)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open source file '%s': %v", source, err)
|
||||
}
|
||||
defer func() {
|
||||
_ = sourceFile.Close()
|
||||
}()
|
||||
|
||||
tempFile, err := os.OpenFile(tempName, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0700)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create temp file '%s': %v", tempName, err)
|
||||
}
|
||||
|
||||
if _, err := tempFile.ReadFrom(sourceFile); err != nil {
|
||||
return fmt.Errorf("failed to write data from source file '%s' to temp file '%s' - %v", source, tempName, err)
|
||||
}
|
||||
|
||||
if err := tempFile.Sync(); err != nil {
|
||||
return fmt.Errorf("failed to sync temp file '%s' - %v", tempName, err)
|
||||
}
|
||||
|
||||
if err := tempFile.Close(); err != nil {
|
||||
return fmt.Errorf("failed to close temp file '%s' - %v", tempName, err)
|
||||
}
|
||||
|
||||
nodeAttributes, err := n.Xattrs(context.Background())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get xattrs for node '%s': %v", n.InternalPath(), err)
|
||||
}
|
||||
|
||||
var mtime *time.Time
|
||||
for k, v := range nodeAttributes {
|
||||
if err := xattr.Set(tempName, k, v); err != nil {
|
||||
return fmt.Errorf("failed to set xattr '%s' on temp file '%s' - %v", k, tempName, err)
|
||||
}
|
||||
|
||||
_, err = file.Seek(0, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
if k == "user.oc.mtime" {
|
||||
tv, err := time.Parse(time.RFC3339Nano, string(v))
|
||||
if err == nil {
|
||||
mtime = &tv
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// the extended attributes should always contain a mtime, but in case they don't, we fetch it from the node
|
||||
if mtime == nil {
|
||||
switch nodeMtime, err := n.GetMTime(context.Background()); {
|
||||
case err != nil:
|
||||
return fmt.Errorf("failed to get mtime for node '%s' - %v", n.InternalPath(), err)
|
||||
default:
|
||||
mtime = &nodeMtime
|
||||
}
|
||||
|
||||
copyFile, err := os.OpenFile(copyTarget, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0600)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not open copy target '%s' for writing", copyTarget)
|
||||
}
|
||||
defer copyFile.Close()
|
||||
}
|
||||
|
||||
_, err = copyFile.ReadFrom(file)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not write blob copy of '%s' to '%s'", node.InternalPath(), copyTarget)
|
||||
}
|
||||
// etags rely on the id and the mtime, so we need to ensure the mtime is set correctly
|
||||
if err := os.Chtimes(tempName, *mtime, *mtime); err != nil {
|
||||
return fmt.Errorf("failed to set mtime on temp file '%s' - %v", tempName, err)
|
||||
}
|
||||
|
||||
// atomically move the file to its final location,
|
||||
// on Windows systems (unsupported oc os) os.Rename is not atomic
|
||||
if err := os.Rename(tempName, n.InternalPath()); err != nil {
|
||||
return fmt.Errorf("failed to move temp file '%s' to node '%s' - %v", tempName, n.InternalPath(), err)
|
||||
}
|
||||
|
||||
// upload successfully, now handle the copy target if set
|
||||
if copyTarget == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// also "upload" the file to a local path, e.g., for keeping the "current" version of the file
|
||||
if err := os.MkdirAll(filepath.Dir(copyTarget), 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := sourceFile.Seek(0, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
copyFile, err := os.OpenFile(copyTarget, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0600)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not open copy target '%s' for writing", copyTarget)
|
||||
}
|
||||
defer func() {
|
||||
_ = copyFile.Close()
|
||||
}()
|
||||
|
||||
if _, err := copyFile.ReadFrom(sourceFile); err != nil {
|
||||
return errors.Wrapf(err, "could not write blob copy of '%s' to '%s'", n.InternalPath(), copyTarget)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
11
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/trashbin/trashbin.go
generated
vendored
11
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/trashbin/trashbin.go
generated
vendored
@@ -32,6 +32,7 @@ import (
|
||||
|
||||
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
|
||||
typesv1beta1 "github.com/cs3org/go-cs3apis/cs3/types/v1beta1"
|
||||
|
||||
"github.com/opencloud-eu/reva/v2/pkg/errtypes"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/lookup"
|
||||
@@ -333,10 +334,12 @@ func (tb *Trashbin) RestoreRecycleItem(ctx context.Context, spaceID string, key,
|
||||
return nil, fmt.Errorf("trashbin: parent id not found for %s", restorePath)
|
||||
}
|
||||
|
||||
trashNode := &trashNode{spaceID: spaceID, id: id, path: trashPath}
|
||||
err = tb.lu.MetadataBackend().Set(ctx, trashNode, prefixes.ParentidAttr, []byte(parentID))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
trashedNode := &trashNode{spaceID: spaceID, id: id, path: trashPath}
|
||||
if err = tb.lu.MetadataBackend().SetMultiple(ctx, trashedNode, map[string][]byte{
|
||||
prefixes.NameAttr: []byte(filepath.Base(restorePath)),
|
||||
prefixes.ParentidAttr: []byte(parentID),
|
||||
}, true); err != nil {
|
||||
return nil, fmt.Errorf("posixfs: failed to update trashed node metadata: %w", err)
|
||||
}
|
||||
|
||||
// restore the item
|
||||
|
||||
8
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/tree.go
generated
vendored
8
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/tree.go
generated
vendored
@@ -38,9 +38,11 @@ import (
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
|
||||
|
||||
"github.com/opencloud-eu/reva/v2/pkg/appctx"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/errtypes"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/events"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/blobstore"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/lookup"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/options"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/trashbin"
|
||||
@@ -649,6 +651,10 @@ func (t *Tree) createDirNode(ctx context.Context, n *node.Node) (err error) {
|
||||
return n.SetXattrsWithContext(ctx, attributes, false)
|
||||
}
|
||||
|
||||
func (t *Tree) isTemporary(path string) bool {
|
||||
return path == blobstore.TMPDir
|
||||
}
|
||||
|
||||
func (t *Tree) isIgnored(path string) bool {
|
||||
return isLockFile(path) || isTrash(path) || t.isUpload(path) || t.isInternal(path)
|
||||
}
|
||||
@@ -664,7 +670,7 @@ func (t *Tree) isIndex(path string) bool {
|
||||
func (t *Tree) isInternal(path string) bool {
|
||||
return path == t.options.Root ||
|
||||
path == filepath.Join(t.options.Root, "users") ||
|
||||
t.isIndex(path) || strings.Contains(path, lookup.MetadataDir)
|
||||
t.isIndex(path) || strings.Contains(path, lookup.MetadataDir) || t.isTemporary(path)
|
||||
}
|
||||
|
||||
func isLockFile(path string) bool {
|
||||
|
||||
12
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node/node.go
generated
vendored
12
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node/node.go
generated
vendored
@@ -1096,22 +1096,22 @@ func (n *Node) ReadUserPermissions(ctx context.Context, u *userpb.User) (ap *pro
|
||||
continue
|
||||
}
|
||||
|
||||
if isGrantExpired(g) {
|
||||
continue
|
||||
}
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
if isGrantExpired(g) {
|
||||
continue
|
||||
}
|
||||
|
||||
// If all permissions are set to false we have a deny grant
|
||||
if grants.PermissionsEqual(g.Permissions, &provider.ResourcePermissions{}) {
|
||||
return NoPermissions(), true, nil
|
||||
}
|
||||
AddPermissions(ap, g.GetPermissions())
|
||||
case metadata.IsAttrUnset(err):
|
||||
appctx.GetLogger(ctx).Error().Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Str("grant", grantees[i]).Interface("grantees", grantees).Msg("grant vanished from node after listing")
|
||||
appctx.GetLogger(ctx).Error().Err(err).Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Str("path", n.InternalPath()).Str("grant", grantees[i]).Interface("grantees", grantees).Msg("grant vanished from node after listing")
|
||||
// continue with next segment
|
||||
default:
|
||||
appctx.GetLogger(ctx).Error().Err(err).Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Str("grant", grantees[i]).Msg("error reading permissions")
|
||||
appctx.GetLogger(ctx).Error().Err(err).Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Str("path", n.InternalPath()).Str("grant", grantees[i]).Msg("error reading permissions")
|
||||
// continue with next segment
|
||||
}
|
||||
}
|
||||
|
||||
11
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload/upload.go
generated
vendored
11
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload/upload.go
generated
vendored
@@ -286,6 +286,17 @@ func (session *DecomposedFsSession) Finalize(ctx context.Context) (err error) {
|
||||
revisionNode := node.New(session.SpaceID(), session.NodeID(), "", "", session.Size(), session.ID(),
|
||||
provider.ResourceType_RESOURCE_TYPE_FILE, session.SpaceOwner(), session.store.lu)
|
||||
|
||||
switch spaceRoot, err := session.store.lu.NodeFromSpaceID(ctx, session.SpaceID()); {
|
||||
case err != nil:
|
||||
return fmt.Errorf("failed to get space root for space id %s: %v", session.SpaceID(), err)
|
||||
case spaceRoot == nil:
|
||||
return fmt.Errorf("space root for space id %s not found", session.SpaceID())
|
||||
case spaceRoot.InternalPath() == "":
|
||||
return fmt.Errorf("space root for space id %s has no valid internal path", session.SpaceID())
|
||||
default:
|
||||
revisionNode.SpaceRoot = spaceRoot
|
||||
}
|
||||
|
||||
// upload the data to the blobstore
|
||||
_, subspan := tracer.Start(ctx, "WriteBlob")
|
||||
err = session.store.tp.WriteBlob(revisionNode, session.binPath())
|
||||
|
||||
2
vendor/modules.txt
vendored
2
vendor/modules.txt
vendored
@@ -1198,7 +1198,7 @@ github.com/open-policy-agent/opa/v1/types
|
||||
github.com/open-policy-agent/opa/v1/util
|
||||
github.com/open-policy-agent/opa/v1/util/decoding
|
||||
github.com/open-policy-agent/opa/v1/version
|
||||
# github.com/opencloud-eu/reva/v2 v2.29.3
|
||||
# github.com/opencloud-eu/reva/v2 v2.29.5
|
||||
## explicit; go 1.24.1
|
||||
github.com/opencloud-eu/reva/v2/cmd/revad/internal/grace
|
||||
github.com/opencloud-eu/reva/v2/cmd/revad/runtime
|
||||
|
||||
Reference in New Issue
Block a user