mirror of
https://github.com/opencloud-eu/opencloud.git
synced 2025-12-24 14:50:39 -05:00
Compare commits
48 Commits
remove-wor
...
v2.0.5
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
59e3e081da | ||
|
|
4d2bf41274 | ||
|
|
4cab81a857 | ||
|
|
63b201b2a7 | ||
|
|
da64699172 | ||
|
|
723284193d | ||
|
|
ddaf1f86b0 | ||
|
|
1e6ee49221 | ||
|
|
db04f851ca | ||
|
|
a8fc417e18 | ||
|
|
a240389a06 | ||
|
|
195e9ffdac | ||
|
|
d76d5cd49c | ||
|
|
bf7fabddf7 | ||
|
|
0160118996 | ||
|
|
0935c44c1c | ||
|
|
f60e36e8d7 | ||
|
|
4828c5c4bc | ||
|
|
5dfd52aa32 | ||
|
|
d1a8463036 | ||
|
|
25d5dab696 | ||
|
|
e0f05003ef | ||
|
|
1cdd21591a | ||
|
|
e730c6ed3a | ||
|
|
7076185c0f | ||
|
|
87ea852a21 | ||
|
|
4988709fd7 | ||
|
|
f1ba964365 | ||
|
|
7608ae482c | ||
|
|
abe653ed49 | ||
|
|
b67ce7cd34 | ||
|
|
3263c16652 | ||
|
|
c8df6a27b6 | ||
|
|
a3a5a5a047 | ||
|
|
a80ecf0b88 | ||
|
|
17604171fe | ||
|
|
df4d427e66 | ||
|
|
c1ddc61f2b | ||
|
|
fa9282bec2 | ||
|
|
9759a2dee2 | ||
|
|
1bea4b7471 | ||
|
|
f266857cf1 | ||
|
|
d15c961c31 | ||
|
|
9238639753 | ||
|
|
0718947b0c | ||
|
|
2fc33db3aa | ||
|
|
05f0a8dc96 | ||
|
|
8697e6a4c8 |
30
.github/workflows/labels.yml
vendored
Normal file
30
.github/workflows/labels.yml
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
name: Require Pull Request Labels
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, labeled, unlabeled, synchronize]
|
||||
jobs:
|
||||
label:
|
||||
# Only run if PR is not from a fork
|
||||
if: github.event.pull_request.head.repo.full_name == github.repository
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- uses: mheap/github-action-required-labels@v5
|
||||
with:
|
||||
mode: minimum
|
||||
count: 1
|
||||
labels: |
|
||||
Type:Bug
|
||||
Type:Enhancement
|
||||
Type:Feature
|
||||
Type:Breaking-Change
|
||||
Type:Test
|
||||
Type:Documentation
|
||||
Type:Maintenance
|
||||
Type:Security
|
||||
Type:Dependencies
|
||||
Type:DevOps
|
||||
dependencies
|
||||
add_comment: true
|
||||
@@ -1,3 +1,3 @@
|
||||
# The test runner source for UI tests
|
||||
WEB_COMMITID=4f68c1e1dbcc88839e42c17c57f31dec243d7bd0
|
||||
WEB_BRANCH=main
|
||||
WEB_COMMITID=3cc779ddb45b52134fa986d21587f18316e2135a
|
||||
WEB_BRANCH=stable-2.1
|
||||
|
||||
176
.woodpecker.star
176
.woodpecker.star
@@ -1,17 +1,14 @@
|
||||
"""OpenCloud CI definition
|
||||
"""
|
||||
|
||||
# Production release tags
|
||||
# NOTE: need to be updated if new production releases are determined
|
||||
# - follow semver
|
||||
# - omit 'v' prefix
|
||||
PRODUCTION_RELEASE_TAGS = ["2.0", "3.0"]
|
||||
|
||||
# Repository
|
||||
|
||||
repo_slug = "opencloud-eu/opencloud"
|
||||
docker_repo_slug = "opencloudeu/opencloud"
|
||||
|
||||
# Active base branch for this code stream
|
||||
branch = "stable-2.0"
|
||||
|
||||
# images
|
||||
ALPINE_GIT = "alpine/git:latest"
|
||||
APACHE_TIKA = "apache/tika:2.8.0.0"
|
||||
@@ -49,6 +46,8 @@ READY_RELEASE_GO = "woodpeckerci/plugin-ready-release-go:latest"
|
||||
DEFAULT_PHP_VERSION = "8.2"
|
||||
DEFAULT_NODEJS_VERSION = "20"
|
||||
|
||||
CACHE_S3_SERVER = "https://s3.ci.opencloud.eu"
|
||||
|
||||
dirs = {
|
||||
"base": "/woodpecker/src/github.com/opencloud-eu/opencloud",
|
||||
"web": "/woodpecker/src/github.com/opencloud-eu/opencloud/webTestRunner",
|
||||
@@ -338,6 +337,20 @@ config = {
|
||||
},
|
||||
"dockerReleases": {
|
||||
"architectures": ["arm64", "amd64"],
|
||||
"production": {
|
||||
# NOTE: need to be updated if new production releases are determined
|
||||
"tags": ["2.0"],
|
||||
"repo": docker_repo_slug,
|
||||
"build_type": "production",
|
||||
},
|
||||
"rolling": {
|
||||
"repo": docker_repo_slug + "-rolling",
|
||||
"build_type": "rolling",
|
||||
},
|
||||
"daily": {
|
||||
"repo": docker_repo_slug + "-rolling",
|
||||
"build_type": "daily",
|
||||
},
|
||||
},
|
||||
"litmus": True,
|
||||
"codestyle": True,
|
||||
@@ -358,9 +371,7 @@ MINIO_MC_ENV = {
|
||||
"CACHE_BUCKET": {
|
||||
"from_secret": "cache_s3_bucket",
|
||||
},
|
||||
"MC_HOST": {
|
||||
"from_secret": "cache_s3_server",
|
||||
},
|
||||
"MC_HOST": CACHE_S3_SERVER,
|
||||
"AWS_ACCESS_KEY_ID": {
|
||||
"from_secret": "cache_s3_access_key",
|
||||
},
|
||||
@@ -419,8 +430,7 @@ def main(ctx):
|
||||
pipelines = []
|
||||
|
||||
build_release_helpers = \
|
||||
readyReleaseGo() + \
|
||||
docs()
|
||||
readyReleaseGo()
|
||||
|
||||
build_release_helpers.append(
|
||||
pipelineDependsOn(
|
||||
@@ -659,9 +669,7 @@ def testOpencloud(ctx):
|
||||
"name": "scan-result-cache",
|
||||
"image": PLUGINS_S3,
|
||||
"settings": {
|
||||
"endpoint": {
|
||||
"from_secret": "cache_s3_server",
|
||||
},
|
||||
"endpoint": CACHE_S3_SERVER,
|
||||
"bucket": "cache",
|
||||
"source": "cache/**/*",
|
||||
"target": "%s/%s" % (repo_slug, ctx.build.commit + "-${CI_PIPELINE_NUMBER}"),
|
||||
@@ -682,7 +690,7 @@ def testOpencloud(ctx):
|
||||
"when": [
|
||||
{
|
||||
"event": ["push", "manual"],
|
||||
"branch": "main",
|
||||
"branch": ["main", "stable-*"],
|
||||
},
|
||||
{
|
||||
"event": "pull_request",
|
||||
@@ -713,7 +721,7 @@ def scanOpencloud(ctx):
|
||||
"when": [
|
||||
{
|
||||
"event": ["push", "manual"],
|
||||
"branch": "main",
|
||||
"branch": ["main", "stable-*"],
|
||||
},
|
||||
{
|
||||
"event": "pull_request",
|
||||
@@ -736,7 +744,7 @@ def buildOpencloudBinaryForTesting(ctx):
|
||||
"when": [
|
||||
{
|
||||
"event": ["push", "manual"],
|
||||
"branch": "main",
|
||||
"branch": ["main", "stable-*"],
|
||||
},
|
||||
{
|
||||
"event": "pull_request",
|
||||
@@ -871,7 +879,7 @@ def codestyle(ctx):
|
||||
"when": [
|
||||
{
|
||||
"event": ["push", "manual"],
|
||||
"branch": "main",
|
||||
"branch": ["main", "stable-*"],
|
||||
},
|
||||
{
|
||||
"event": "pull_request",
|
||||
@@ -941,7 +949,7 @@ def localApiTestPipeline(ctx):
|
||||
"when": [
|
||||
{
|
||||
"event": ["push", "manual"],
|
||||
"branch": "main",
|
||||
"branch": ["main", "stable-*"],
|
||||
},
|
||||
{
|
||||
"event": "pull_request",
|
||||
@@ -1007,7 +1015,7 @@ def cs3ApiTests(ctx, storage, accounts_hash_difficulty = 4):
|
||||
"when": [
|
||||
{
|
||||
"event": ["push", "manual"],
|
||||
"branch": "main",
|
||||
"branch": ["main", "stable-*"],
|
||||
},
|
||||
{
|
||||
"event": "pull_request",
|
||||
@@ -1124,7 +1132,7 @@ def wopiValidatorTests(ctx, storage, wopiServerType, accounts_hash_difficulty =
|
||||
"when": [
|
||||
{
|
||||
"event": ["push", "manual"],
|
||||
"branch": "main",
|
||||
"branch": ["main", "stable-*"],
|
||||
},
|
||||
{
|
||||
"event": "pull_request",
|
||||
@@ -1178,7 +1186,7 @@ def coreApiTests(ctx, part_number = 1, number_of_parts = 1, with_remote_php = Fa
|
||||
"when": [
|
||||
{
|
||||
"event": ["push", "manual"],
|
||||
"branch": "main",
|
||||
"branch": ["main", "stable-*"],
|
||||
},
|
||||
{
|
||||
"event": "pull_request",
|
||||
@@ -1228,7 +1236,7 @@ def e2eTestPipeline(ctx):
|
||||
e2e_trigger = [
|
||||
{
|
||||
"event": ["push", "manual"],
|
||||
"branch": "main",
|
||||
"branch": ["main", "stable-*"],
|
||||
},
|
||||
{
|
||||
"event": "pull_request",
|
||||
@@ -1337,7 +1345,7 @@ def multiServiceE2ePipeline(ctx):
|
||||
e2e_trigger = [
|
||||
{
|
||||
"event": ["push", "manual"],
|
||||
"branch": "main",
|
||||
"branch": ["main", "stable-*"],
|
||||
},
|
||||
{
|
||||
"event": "pull_request",
|
||||
@@ -1463,9 +1471,7 @@ def uploadTracingResult(ctx):
|
||||
"bucket": {
|
||||
"from_secret": "cache_public_s3_bucket",
|
||||
},
|
||||
"endpoint": {
|
||||
"from_secret": "cache_public_s3_server",
|
||||
},
|
||||
"endpoint": CACHE_S3_SERVER,
|
||||
"path_style": True,
|
||||
"source": "webTestRunner/reports/e2e/playwright/tracing/**/*",
|
||||
"strip_prefix": "webTestRunner/reports/e2e/playwright/tracing",
|
||||
@@ -1513,26 +1519,31 @@ def logTracingResults():
|
||||
def dockerReleases(ctx):
|
||||
pipelines = []
|
||||
docker_repos = []
|
||||
build_type = "daily"
|
||||
build_type = ""
|
||||
|
||||
# dockerhub repo
|
||||
# - "opencloudeu/opencloud-rolling"
|
||||
repo = docker_repo_slug + "-rolling"
|
||||
docker_repos.append(repo)
|
||||
|
||||
# production release repo
|
||||
if ctx.build.event == "tag":
|
||||
tag = ctx.build.ref.replace("refs/tags/v", "").lower()
|
||||
for prod_tag in PRODUCTION_RELEASE_TAGS:
|
||||
|
||||
is_production = False
|
||||
for prod_tag in config["dockerReleases"]["production"]["tags"]:
|
||||
if tag.startswith(prod_tag):
|
||||
docker_repos.append(docker_repo_slug)
|
||||
is_production = True
|
||||
break
|
||||
|
||||
if is_production:
|
||||
docker_repos.append(config["dockerReleases"]["production"]["repo"])
|
||||
build_type = config["dockerReleases"]["production"]["build_type"]
|
||||
|
||||
else:
|
||||
docker_repos.append(config["dockerReleases"]["rolling"]["repo"])
|
||||
build_type = config["dockerReleases"]["rolling"]["build_type"]
|
||||
|
||||
else:
|
||||
docker_repos.append(config["dockerReleases"]["daily"]["repo"])
|
||||
build_type = config["dockerReleases"]["daily"]["build_type"]
|
||||
|
||||
for repo in docker_repos:
|
||||
repo_pipelines = []
|
||||
if ctx.build.event == "tag":
|
||||
build_type = "rolling" if "rolling" in repo else "production"
|
||||
|
||||
repo_pipelines.append(dockerRelease(ctx, repo, build_type))
|
||||
|
||||
# manifest = releaseDockerManifest(ctx, repo, build_type)
|
||||
@@ -1548,10 +1559,10 @@ def dockerReleases(ctx):
|
||||
return pipelines
|
||||
|
||||
def dockerRelease(ctx, repo, build_type):
|
||||
build_args = [
|
||||
"REVISION=%s" % (ctx.build.commit),
|
||||
"VERSION=%s" % (ctx.build.ref.replace("refs/tags/", "") if ctx.build.event == "tag" else "daily"),
|
||||
]
|
||||
build_args = {
|
||||
"REVISION": "%s" % (ctx.build.commit),
|
||||
"VERSION": "%s" % (ctx.build.ref.replace("refs/tags/", "") if ctx.build.event == "tag" else "daily"),
|
||||
}
|
||||
|
||||
depends_on = getPipelineNames(getGoBinForTesting(ctx))
|
||||
|
||||
@@ -1628,10 +1639,6 @@ def dockerRelease(ctx, repo, build_type):
|
||||
],
|
||||
},
|
||||
"when": [
|
||||
{
|
||||
"event": ["push", "manual"],
|
||||
"branch": "main",
|
||||
},
|
||||
{
|
||||
"event": "tag",
|
||||
},
|
||||
@@ -1642,7 +1649,7 @@ def dockerRelease(ctx, repo, build_type):
|
||||
"when": [
|
||||
{
|
||||
"event": ["push", "manual"],
|
||||
"branch": "main",
|
||||
"branch": ["main", "stable-*"],
|
||||
},
|
||||
{
|
||||
"event": "pull_request",
|
||||
@@ -1673,7 +1680,15 @@ def binaryRelease(ctx, arch, depends_on = []):
|
||||
{
|
||||
"name": "build",
|
||||
"image": OC_CI_GOLANG,
|
||||
"environment": CI_HTTP_PROXY_ENV,
|
||||
"environment": {
|
||||
"VERSION": (ctx.build.ref.replace("refs/tags/", "") if ctx.build.event == "tag" else "daily"),
|
||||
"HTTP_PROXY": {
|
||||
"from_secret": "ci_http_proxy",
|
||||
},
|
||||
"HTTPS_PROXY": {
|
||||
"from_secret": "ci_http_proxy",
|
||||
},
|
||||
},
|
||||
"commands": [
|
||||
"make -C opencloud release-%s" % arch,
|
||||
],
|
||||
@@ -1686,10 +1701,6 @@ def binaryRelease(ctx, arch, depends_on = []):
|
||||
"make -C opencloud release-finish",
|
||||
],
|
||||
"when": [
|
||||
{
|
||||
"event": ["push", "manual"],
|
||||
"branch": "main",
|
||||
},
|
||||
{
|
||||
"event": "tag",
|
||||
},
|
||||
@@ -1706,7 +1717,6 @@ def binaryRelease(ctx, arch, depends_on = []):
|
||||
"opencloud/dist/release/*",
|
||||
],
|
||||
"title": ctx.build.ref.replace("refs/tags/v", ""),
|
||||
"overwrite": True,
|
||||
"prerelease": len(ctx.build.ref.split("-")) > 1,
|
||||
},
|
||||
"when": [
|
||||
@@ -1720,7 +1730,7 @@ def binaryRelease(ctx, arch, depends_on = []):
|
||||
"when": [
|
||||
{
|
||||
"event": ["push", "manual"],
|
||||
"branch": "main",
|
||||
"branch": ["main", "stable-*"],
|
||||
},
|
||||
{
|
||||
"event": "pull_request",
|
||||
@@ -1786,7 +1796,6 @@ def licenseCheck(ctx):
|
||||
"third-party-licenses.tar.gz",
|
||||
],
|
||||
"title": ctx.build.ref.replace("refs/tags/v", ""),
|
||||
"overwrite": True,
|
||||
"prerelease": len(ctx.build.ref.split("-")) > 1,
|
||||
},
|
||||
"when": [
|
||||
@@ -1799,7 +1808,7 @@ def licenseCheck(ctx):
|
||||
"when": [
|
||||
{
|
||||
"event": ["push", "manual"],
|
||||
"branch": "main",
|
||||
"branch": ["main", "stable-*"],
|
||||
},
|
||||
{
|
||||
"event": "pull_request",
|
||||
@@ -1824,13 +1833,14 @@ def readyReleaseGo():
|
||||
"forge_token": {
|
||||
"from_secret": "github_token",
|
||||
},
|
||||
"release_branch": branch,
|
||||
},
|
||||
},
|
||||
],
|
||||
"when": [
|
||||
{
|
||||
"event": ["push", "manual"],
|
||||
"branch": "main",
|
||||
"branch": ["main", "stable-*"],
|
||||
},
|
||||
],
|
||||
}]
|
||||
@@ -1872,40 +1882,9 @@ def releaseDockerReadme(repo, build_type):
|
||||
"event": ["push", "manual"],
|
||||
"branch": "main",
|
||||
},
|
||||
{
|
||||
"event": "tag",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
def docs():
|
||||
return [
|
||||
{
|
||||
"name": "dev-docs",
|
||||
"steps": [
|
||||
{
|
||||
"name": "devdocs",
|
||||
"image": "codeberg.org/xfix/plugin-codeberg-pages-deploy:1",
|
||||
"settings": {
|
||||
"folder": "docs",
|
||||
"branch": "docs",
|
||||
"git_config_email": "${CI_COMMIT_AUTHOR_EMAIL}",
|
||||
"git_config_name": "${CI_COMMIT_AUTHOR}",
|
||||
"ssh_key": {
|
||||
"from_secret": "ssh_key",
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
"when": [
|
||||
{
|
||||
"event": "push",
|
||||
"branch": "main",
|
||||
},
|
||||
],
|
||||
},
|
||||
]
|
||||
|
||||
def makeNodeGenerate(module):
|
||||
if module == "":
|
||||
make = "make"
|
||||
@@ -1965,7 +1944,7 @@ def notify(ctx):
|
||||
"when": [
|
||||
{
|
||||
"event": ["push", "manual"],
|
||||
"branch": ["main", "release-*"],
|
||||
"branch": ["main", "stable-*", "release-*"],
|
||||
},
|
||||
{
|
||||
"event": "tag",
|
||||
@@ -2179,11 +2158,6 @@ def build():
|
||||
]
|
||||
|
||||
def skipIfUnchanged(ctx, type):
|
||||
## FIXME: the 'exclude' feature (https://woodpecker-ci.org/docs/usage/workflow-syntax#path) does not seem to provide
|
||||
# what we need. It seems to skip the build as soon as one of the changed files matches an exclude pattern, we only
|
||||
# want to skip of ALL changed files match. So skip this condition for now:
|
||||
return []
|
||||
|
||||
if "full-ci" in ctx.build.title.lower() or ctx.build.event == "tag" or ctx.build.event == "cron":
|
||||
return []
|
||||
|
||||
@@ -2345,9 +2319,7 @@ def genericCache(name, action, mounts, cache_path):
|
||||
"name": "%s_%s" % (action, name),
|
||||
"image": PLUGINS_S3_CACHE,
|
||||
"settings": {
|
||||
"endpoint": {
|
||||
"from_secret": "cache_s3_server",
|
||||
},
|
||||
"endpoint": CACHE_S3_SERVER,
|
||||
"rebuild": rebuild,
|
||||
"restore": restore,
|
||||
"mount": mounts,
|
||||
@@ -2378,9 +2350,7 @@ def genericCachePurge(flush_path):
|
||||
"secret_key": {
|
||||
"from_secret": "cache_s3_secret_key",
|
||||
},
|
||||
"endpoint": {
|
||||
"from_secret": "cache_s3_server",
|
||||
},
|
||||
"endpoint": CACHE_S3_SERVER,
|
||||
"flush": True,
|
||||
"flush_age": 1,
|
||||
"flush_path": flush_path,
|
||||
@@ -2390,7 +2360,7 @@ def genericCachePurge(flush_path):
|
||||
"when": [
|
||||
{
|
||||
"event": ["push", "manual"],
|
||||
"branch": "main",
|
||||
"branch": ["main", "stable-*"],
|
||||
},
|
||||
{
|
||||
"event": "pull_request",
|
||||
@@ -2576,7 +2546,7 @@ def litmus(ctx, storage):
|
||||
"when": [
|
||||
{
|
||||
"event": ["push", "manual"],
|
||||
"branch": "main",
|
||||
"branch": ["main", "stable-*"],
|
||||
},
|
||||
{
|
||||
"event": "pull_request",
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
---
|
||||
|
||||
when:
|
||||
- event: ["push", "manual"]
|
||||
branch: main
|
||||
|
||||
steps:
|
||||
- name: devdocs
|
||||
image: codeberg.org/xfix/plugin-codeberg-pages-deploy:1
|
||||
settings:
|
||||
folder: docs
|
||||
branch: docs
|
||||
git_config_email: ${CI_COMMIT_AUTHOR_EMAIL}
|
||||
git_config_name: ${CI_COMMIT_AUTHOR}
|
||||
ssh_key:
|
||||
from_secret: ssh_key
|
||||
60
CHANGELOG.md
60
CHANGELOG.md
@@ -1,5 +1,65 @@
|
||||
# Changelog
|
||||
|
||||
## [2.0.5](https://github.com/opencloud-eu/opencloud/releases/tag/v2.0.5) - 2025-10-29
|
||||
|
||||
### ❤️ Thanks to all contributors! ❤️
|
||||
|
||||
@ScharfViktor
|
||||
|
||||
### ✅ Tests
|
||||
|
||||
- [full-ci] Bump reva 2.29.5 [[#1738](https://github.com/opencloud-eu/opencloud/pull/1738)]
|
||||
- Fix tests in the stable branch [[#1731](https://github.com/opencloud-eu/opencloud/pull/1731)]
|
||||
|
||||
## [2.0.4](https://github.com/opencloud-eu/opencloud/releases/tag/v2.0.4) - 2025-07-11
|
||||
|
||||
### ❤️ Thanks to all contributors! ❤️
|
||||
|
||||
@micbar
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- [Backport] fix: build_args is now an object [[#1213](https://github.com/opencloud-eu/opencloud/pull/1213)]
|
||||
|
||||
## [2.0.3](https://github.com/opencloud-eu/opencloud/releases/tag/v2.0.3) - 2025-07-10
|
||||
|
||||
### ❤️ Thanks to all contributors! ❤️
|
||||
|
||||
@ScharfViktor
|
||||
|
||||
### 📦️ Dependencies
|
||||
|
||||
- [full-ci] Reva bump 2.29.4 [[#1202](https://github.com/opencloud-eu/opencloud/pull/1202)]
|
||||
|
||||
## [2.0.2](https://github.com/opencloud-eu/opencloud/releases/tag/v2.0.2) - 2025-05-02
|
||||
|
||||
### ❤️ Thanks to all contributors! ❤️
|
||||
|
||||
@ScharfViktor
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- Abort when the space root has already been created [[#766](https://github.com/opencloud-eu/opencloud/pull/766)]
|
||||
|
||||
## [2.0.1](https://github.com/opencloud-eu/opencloud/releases/tag/v2.0.1) - 2025-04-28
|
||||
|
||||
### ❤️ Thanks to all contributors! ❤️
|
||||
|
||||
@JammingBen, @ScharfViktor, @fschade, @micbar
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- fix(decomposeds3): enable async-uploads by default (#686) [[#694](https://github.com/opencloud-eu/opencloud/pull/694)]
|
||||
- fix(antivirus | backport): introduce a default max scan size for the full example deployment [[#620](https://github.com/opencloud-eu/opencloud/pull/620)]
|
||||
- [full-ci] chore(web): bump web to v2.1.1 [[#638](https://github.com/opencloud-eu/opencloud/pull/638)]
|
||||
|
||||
### 📦️ Dependencies
|
||||
|
||||
- chore: prepare release, bump version [[#731](https://github.com/opencloud-eu/opencloud/pull/731)]
|
||||
- Port #567 [[#689](https://github.com/opencloud-eu/opencloud/pull/689)]
|
||||
- chore: bump reva to v2.29.2 [[#681](https://github.com/opencloud-eu/opencloud/pull/681)]
|
||||
- build(deps): bump github.com/nats-io/nats-server/v2 [[#683](https://github.com/opencloud-eu/opencloud/pull/683)]
|
||||
|
||||
## [2.0.0](https://github.com/opencloud-eu/opencloud/releases/tag/v2.0.0) - 2025-03-26
|
||||
|
||||
### ❤️ Thanks to all contributors! ❤️
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||

|
||||
|
||||
[](https://ci.opencloud.eu/repos/3)
|
||||
[](https://ci.opencloud.eu/repos/3/branches/stable-2.0)
|
||||
[](https://app.element.io/#/room/#opencloud:matrix.org)
|
||||
[](https://opensource.org/licenses/Apache-2.0)
|
||||
|
||||
|
||||
@@ -99,7 +99,7 @@ MINIO_DOMAIN=
|
||||
#DECOMPOSED=:decomposed.yml
|
||||
|
||||
# Define SMPT settings if you would like to send OpenCloud email notifications.
|
||||
#
|
||||
#
|
||||
# NOTE: when configuring Inbucket, these settings have no effect, see inbucket.yml for details.
|
||||
# SMTP host to connect to.
|
||||
SMTP_HOST=
|
||||
@@ -210,6 +210,10 @@ COLLABORA_SSL_VERIFICATION=false
|
||||
# envvar in the OpenCloud Settings above by adding 'antivirus' to the list.
|
||||
# Note: the leading colon is required to enable the service.
|
||||
#CLAMAV=:clamav.yml
|
||||
# The maximum scan size the virus scanner can handle, needs adjustment in the scanner config as well.
|
||||
# Usable common abbreviations: [KB, KiB, MB, MiB, GB, GiB, TB, TiB, PB, PiB, EB, EiB], example: 2GB.
|
||||
# Defaults to "100MB"
|
||||
#ANTIVIRUS_MAX_SCAN_SIZE=
|
||||
# Image version of the ClamAV container.
|
||||
# Defaults to "latest"
|
||||
CLAMAV_DOCKER_TAG=
|
||||
|
||||
@@ -4,6 +4,7 @@ services:
|
||||
environment:
|
||||
ANTIVIRUS_SCANNER_TYPE: "clamav"
|
||||
ANTIVIRUS_CLAMAV_SOCKET: "/var/run/clamav/clamd.sock"
|
||||
ANTIVIRUS_MAX_SCAN_SIZE: ${ANTIVIRUS_MAX_SCAN_SIZE:-100MB}
|
||||
# the antivirus service needs manual startup, see .env and opencloud.yaml for START_ADDITIONAL_SERVICES
|
||||
# configure the antivirus service
|
||||
POSTPROCESSING_STEPS: "virusscan"
|
||||
|
||||
6
go.mod
6
go.mod
@@ -55,15 +55,15 @@ require (
|
||||
github.com/mitchellh/mapstructure v1.5.0
|
||||
github.com/mna/pigeon v1.3.0
|
||||
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826
|
||||
github.com/nats-io/nats-server/v2 v2.11.0
|
||||
github.com/nats-io/nats.go v1.39.1
|
||||
github.com/nats-io/nats-server/v2 v2.11.1
|
||||
github.com/nats-io/nats.go v1.41.0
|
||||
github.com/oklog/run v1.1.0
|
||||
github.com/olekukonko/tablewriter v0.0.5
|
||||
github.com/onsi/ginkgo v1.16.5
|
||||
github.com/onsi/ginkgo/v2 v2.23.3
|
||||
github.com/onsi/gomega v1.36.3
|
||||
github.com/open-policy-agent/opa v1.2.0
|
||||
github.com/opencloud-eu/reva/v2 v2.29.1
|
||||
github.com/opencloud-eu/reva/v2 v2.29.5
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/owncloud/libre-graph-api-go v1.0.5-0.20240829135935-80dc00d6f5ea
|
||||
github.com/pkg/errors v0.9.1
|
||||
|
||||
12
go.sum
12
go.sum
@@ -827,10 +827,10 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW
|
||||
github.com/namedotcom/go v0.0.0-20180403034216-08470befbe04/go.mod h1:5sN+Lt1CaY4wsPvgQH/jsuJi4XO2ssZbdsIizr4CVC8=
|
||||
github.com/nats-io/jwt/v2 v2.7.3 h1:6bNPK+FXgBeAqdj4cYQ0F8ViHRbi7woQLq4W29nUAzE=
|
||||
github.com/nats-io/jwt/v2 v2.7.3/go.mod h1:GvkcbHhKquj3pkioy5put1wvPxs78UlZ7D/pY+BgZk4=
|
||||
github.com/nats-io/nats-server/v2 v2.11.0 h1:fdwAT1d6DZW/4LUz5rkvQUe5leGEwjjOQYntzVRKvjE=
|
||||
github.com/nats-io/nats-server/v2 v2.11.0/go.mod h1:leXySghbdtXSUmWem8K9McnJ6xbJOb0t9+NQ5HTRZjI=
|
||||
github.com/nats-io/nats.go v1.39.1 h1:oTkfKBmz7W047vRxV762M67ZdXeOtUgvbBaNoQ+3PPk=
|
||||
github.com/nats-io/nats.go v1.39.1/go.mod h1:MgRb8oOdigA6cYpEPhXJuRVH6UE/V4jblJ2jQ27IXYM=
|
||||
github.com/nats-io/nats-server/v2 v2.11.1 h1:LwdauqMqMNhTxTN3+WFTX6wGDOKntHljgZ+7gL5HCnk=
|
||||
github.com/nats-io/nats-server/v2 v2.11.1/go.mod h1:leXySghbdtXSUmWem8K9McnJ6xbJOb0t9+NQ5HTRZjI=
|
||||
github.com/nats-io/nats.go v1.41.0 h1:PzxEva7fflkd+n87OtQTXqCTyLfIIMFJBpyccHLE2Ko=
|
||||
github.com/nats-io/nats.go v1.41.0/go.mod h1:wV73x0FSI/orHPSYoyMeJB+KajMDoWyXmFaRrrYaaTo=
|
||||
github.com/nats-io/nkeys v0.4.10 h1:glmRrpCmYLHByYcePvnTBEAwawwapjCPMjy2huw20wc=
|
||||
github.com/nats-io/nkeys v0.4.10/go.mod h1:OjRrnIKnWBFl+s4YK5ChQfvHP2fxqZexrKJoVVyWB3U=
|
||||
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
||||
@@ -865,8 +865,8 @@ github.com/onsi/gomega v1.36.3 h1:hID7cr8t3Wp26+cYnfcjR6HpJ00fdogN6dqZ1t6IylU=
|
||||
github.com/onsi/gomega v1.36.3/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0=
|
||||
github.com/open-policy-agent/opa v1.2.0 h1:88NDVCM0of1eO6Z4AFeL3utTEtMuwloFmWWU7dRV1z0=
|
||||
github.com/open-policy-agent/opa v1.2.0/go.mod h1:30euUmOvuBoebRCcJ7DMF42bRBOPznvt0ACUMYDUGVY=
|
||||
github.com/opencloud-eu/reva/v2 v2.29.1 h1:SgB2zn8d/3UWwFiJ0pUs85aDKJJ36JoKnyRM+iW+VoI=
|
||||
github.com/opencloud-eu/reva/v2 v2.29.1/go.mod h1:+nkCU7w6E6cyNSsKRYj1rb0cCI7QswEQ7KOPljctebM=
|
||||
github.com/opencloud-eu/reva/v2 v2.29.5 h1:T4RjTSDk650PVn0hAL8HpF+61ChqQ/UwNoWMYYAMOGU=
|
||||
github.com/opencloud-eu/reva/v2 v2.29.5/go.mod h1:+nkCU7w6E6cyNSsKRYj1rb0cCI7QswEQ7KOPljctebM=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
|
||||
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
|
||||
|
||||
@@ -16,7 +16,7 @@ var (
|
||||
// LatestTag is the latest released version plus the dev meta version.
|
||||
// Will be overwritten by the release pipeline
|
||||
// Needs a manual change for every tagged release
|
||||
LatestTag = "1.1.0+dev"
|
||||
LatestTag = "2.0.5+dev"
|
||||
|
||||
// Date indicates the build date.
|
||||
// This has been removed, it looks like you can only replace static strings with recent go versions
|
||||
|
||||
@@ -1,49 +1,124 @@
|
||||
export default {
|
||||
changeTypes: [
|
||||
{
|
||||
title: '💥 Breaking changes',
|
||||
labels: ['breaking', 'Type:Breaking-Change'],
|
||||
bump: 'major',
|
||||
weight: 3
|
||||
},
|
||||
{
|
||||
title: '🔒 Security',
|
||||
labels: ['security', 'Type:Security'],
|
||||
bump: 'patch',
|
||||
weight: 2
|
||||
},
|
||||
{
|
||||
title: '✨ Features',
|
||||
labels: ['feature', 'Type:Feature'],
|
||||
bump: 'minor',
|
||||
weight: 1
|
||||
},
|
||||
{
|
||||
title: '📈 Enhancement',
|
||||
labels: ['enhancement', 'refactor', 'Type:Enhancement'],
|
||||
bump: 'minor'
|
||||
},
|
||||
{
|
||||
title: '🐛 Bug Fixes',
|
||||
labels: ['bug', 'Type:Bug'],
|
||||
bump: 'patch'
|
||||
},
|
||||
{
|
||||
title: '📚 Documentation',
|
||||
labels: ['docs', 'documentation', 'Type:Documentation'],
|
||||
bump: 'patch'
|
||||
},
|
||||
{
|
||||
title: '✅ Tests',
|
||||
labels: ['test', 'tests', 'Type:Test'],
|
||||
bump: 'patch'
|
||||
},
|
||||
{
|
||||
title: '📦️ Dependencies',
|
||||
labels: ['dependency', 'dependencies', 'Type:Dependencies'],
|
||||
bump: 'patch',
|
||||
weight: -1
|
||||
}
|
||||
],
|
||||
useVersionPrefixV: true,
|
||||
}
|
||||
changeTypes: [
|
||||
{
|
||||
title: "💥 Breaking changes",
|
||||
labels: ["breaking", "Type:Breaking-Change"],
|
||||
bump: "major",
|
||||
weight: 3,
|
||||
},
|
||||
{
|
||||
title: "🔒 Security",
|
||||
labels: ["security", "Type:Security"],
|
||||
bump: "patch",
|
||||
weight: 2,
|
||||
},
|
||||
{
|
||||
title: "✨ Features",
|
||||
labels: ["feature", "Type:Feature"],
|
||||
bump: "minor",
|
||||
weight: 1,
|
||||
},
|
||||
{
|
||||
title: "📈 Enhancement",
|
||||
labels: ["enhancement", "refactor", "Type:Enhancement"],
|
||||
bump: "minor",
|
||||
},
|
||||
{
|
||||
title: "🐛 Bug Fixes",
|
||||
labels: ["bug", "Type:Bug"],
|
||||
bump: "patch",
|
||||
},
|
||||
{
|
||||
title: "📚 Documentation",
|
||||
labels: ["docs", "documentation", "Type:Documentation"],
|
||||
bump: "patch",
|
||||
},
|
||||
{
|
||||
title: "✅ Tests",
|
||||
labels: ["test", "tests", "Type:Test"],
|
||||
bump: "patch",
|
||||
},
|
||||
{
|
||||
title: "📦️ Dependencies",
|
||||
labels: ["dependency", "dependencies", "Type:Dependencies"],
|
||||
bump: "patch",
|
||||
weight: -1,
|
||||
},
|
||||
],
|
||||
useVersionPrefixV: true,
|
||||
getLatestTag: ({ exec }) => {
|
||||
// the plugin uses the latest tag to determine the next version
|
||||
// and the changes that are included in the upcoming release.
|
||||
const branch = getBranch(exec);
|
||||
let tags = getTags(exec);
|
||||
|
||||
if (branch.startsWith("stable-")) {
|
||||
const [_, majorAndMinor] = branch.split("-");
|
||||
// we only care about tags that are within the range of the current stable branch.
|
||||
// e.g. if the branch is stable-1.2, we only care about tags that are v1.2.x.
|
||||
const matchingTags = tags.filter((t) =>
|
||||
t.startsWith(`v${majorAndMinor}`)
|
||||
);
|
||||
|
||||
if (matchingTags.length) {
|
||||
tags = matchingTags;
|
||||
}
|
||||
}
|
||||
|
||||
return tags.pop() || "v0.0.0";
|
||||
},
|
||||
useLatestRelease: ({ exec, nextVersion }) => {
|
||||
// check if the release should be marked as latest release on GitHub.
|
||||
const tags = getTags(exec);
|
||||
const latestTag = tags.pop() || "v0.0.0";
|
||||
return compareVersions(latestTag, nextVersion) === -1;
|
||||
},
|
||||
};
|
||||
|
||||
const parseVersion = (tag: string) => {
|
||||
const version = tag.startsWith("v") ? tag.slice(1) : tag;
|
||||
const [main, pre] = version.split("-");
|
||||
const [major, minor, patch] = main.split(".").map(Number);
|
||||
return { major, minor, patch, pre };
|
||||
};
|
||||
|
||||
const getBranch = (exec: any): string => {
|
||||
return exec("git rev-parse --abbrev-ref HEAD", {
|
||||
silent: true,
|
||||
}).stdout.trim();
|
||||
};
|
||||
|
||||
const getTags = (exec: any) => {
|
||||
exec("git fetch --tags", { silent: true });
|
||||
const tagsOutput = exec("git tag", { silent: true }).stdout.trim();
|
||||
const tags: string[] = tagsOutput ? tagsOutput.split("\n") : [];
|
||||
return tags.filter((tag) => tag.startsWith("v")).sort(compareVersions);
|
||||
};
|
||||
|
||||
const compareVersions = (a: string, b: string) => {
|
||||
const va = parseVersion(a);
|
||||
const vb = parseVersion(b);
|
||||
|
||||
if (va.major !== vb.major) {
|
||||
return va.major - vb.major;
|
||||
}
|
||||
if (va.minor !== vb.minor) {
|
||||
return va.minor - vb.minor;
|
||||
}
|
||||
if (va.patch !== vb.patch) {
|
||||
return va.patch - vb.patch;
|
||||
}
|
||||
|
||||
if (va.pre && !vb.pre) {
|
||||
return -1;
|
||||
}
|
||||
if (!va.pre && vb.pre) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (va.pre && vb.pre) {
|
||||
return va.pre.localeCompare(vb.pre);
|
||||
}
|
||||
|
||||
return 0;
|
||||
};
|
||||
|
||||
@@ -4,7 +4,10 @@ The `antivirus` service is responsible for scanning files for viruses.
|
||||
|
||||
## Memory Considerations
|
||||
|
||||
The antivirus service can consume considerably amounts of memory. This is relevant to provide or define sufficient memory for the deployment selected. To avoid out of memory (OOM) situations, the following equation gives a rough overview based on experiences made. The memory calculation comes without any guarantee, is intended as overview only and subject of change.
|
||||
The antivirus service can consume considerable amounts of memory.
|
||||
This is relevant to provide or define sufficient memory for the deployment selected.
|
||||
To avoid out of memory (OOM) situations, the following equation gives a rough overview based on experiences made.
|
||||
The memory calculation comes without any guarantee, is intended as overview only and subject of change.
|
||||
|
||||
`memory limit` = `max file size` x `workers` x `factor 8 - 14`
|
||||
|
||||
@@ -19,17 +22,25 @@ With:
|
||||
|
||||
### Antivirus Scanner Type
|
||||
|
||||
The antivirus service currently supports [ICAP](https://tools.ietf.org/html/rfc3507) and [ClamAV](http://www.clamav.net/index.html) as antivirus scanners. The `ANTIVIRUS_SCANNER_TYPE` environment variable is used to select the scanner. The detailed configuration for each scanner heavily depends on the scanner type selected. See the environment variables for more details.
|
||||
The antivirus service currently supports [ICAP](https://tools.ietf.org/html/rfc3507) and [ClamAV](http://www.clamav.net/index.html) as antivirus scanners.
|
||||
The `ANTIVIRUS_SCANNER_TYPE` environment variable is used to select the scanner.
|
||||
The detailed configuration for each scanner heavily depends on the scanner type selected.
|
||||
See the environment variables for more details.
|
||||
|
||||
- For `icap`, only scanners using the `X-Infection-Found` header are currently supported.
|
||||
- For `clamav` only local sockets can currently be configured.
|
||||
|
||||
### Maximum Scan Size
|
||||
|
||||
Several factors can make it necessary to limit the maximum filesize the antivirus service will use for scanning. Use the `ANTIVIRUS_MAX_SCAN_SIZE` environment variable to scan only a given amount of bytes. Obviously, it is recommended to scan the whole file, but several factors like scanner type and version, bandwidth, performance issues, etc. might make a limit necessary.
|
||||
Several factors can make it necessary to limit the maximum filesize the antivirus service uses for scanning.
|
||||
Use the `ANTIVIRUS_MAX_SCAN_SIZE` environment variable to specify the maximum file size to be scanned.
|
||||
|
||||
Even if it's recommended to scan each file, several factors like scanner type and version,
|
||||
bandwidth, performance issues, etc. might make a limit necessary.
|
||||
|
||||
**IMPORTANT**
|
||||
> Streaming of files to the virus scan service still [needs to be implemented](https://github.com/owncloud/ocis/issues/6803). To prevent OOM errors `ANTIVIRUS_MAX_SCAN_SIZE` needs to be set lower than available ram.
|
||||
> Streaming of files to the virus scan service still [needs to be implemented](https://github.com/owncloud/ocis/issues/6803).
|
||||
> To prevent OOM errors `ANTIVIRUS_MAX_SCAN_SIZE` needs to be set lower than available ram and or the maximum file size that can be scanned by the virus scanner.
|
||||
|
||||
### Antivirus Workers
|
||||
|
||||
@@ -41,7 +52,7 @@ The antivirus service allows three different ways of handling infected files. Th
|
||||
|
||||
- `delete`: (default): Infected files will be deleted immediately, further postprocessing is cancelled.
|
||||
- `abort`: (advanced option): Infected files will be kept, further postprocessing is cancelled. Files can be manually retrieved and inspected by an admin. To identify the file for further investigation, the antivirus service logs the abort/infected state including the file ID. The file is located in the `storage/users/uploads` folder of the OpenCloud data directory and persists until it is manually deleted by the admin via the [Manage Unfinished Uploads](https://github.com/opencloud-eu/opencloud/tree/main/services/storage-users#manage-unfinished-uploads) command.
|
||||
- `continue`: (obviously not recommended): Infected files will be marked via metadata as infected but postprocessing continues normally. Note: Infected Files are moved to their final destination and therefore not prevented from download which includes the risk of spreading viruses.
|
||||
- `continue`: (not recommended): Infected files will be marked via metadata as infected, but postprocessing continues normally. Note: Infected Files are moved to their final destination and therefore not prevented from download, which includes the risk of spreading viruses.
|
||||
|
||||
In all cases, a log entry is added declaring the infection and handling method and a notification via the `userlog` service sent.
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ type Config struct {
|
||||
Workers int `yaml:"workers" env:"ANTIVIRUS_WORKERS" desc:"The number of concurrent go routines that fetch events from the event queue." introductionVersion:"1.0.0"`
|
||||
|
||||
Scanner Scanner
|
||||
MaxScanSize string `yaml:"max-scan-size" env:"ANTIVIRUS_MAX_SCAN_SIZE" desc:"The maximum scan size the virus scanner can handle. Only this many bytes of a file will be scanned. 0 means unlimited and is the default. Usable common abbreviations: [KB, KiB, MB, MiB, GB, GiB, TB, TiB, PB, PiB, EB, EiB], example: 2GB." introductionVersion:"1.0.0"`
|
||||
MaxScanSize string `yaml:"max-scan-size" env:"ANTIVIRUS_MAX_SCAN_SIZE" desc:"The maximum scan size the virus scanner can handle. 0 means unlimited. Usable common abbreviations: [KB, KiB, MB, MiB, GB, GiB, TB, TiB, PB, PiB, EB, EiB], example: 2GB." introductionVersion:"1.0.0"`
|
||||
|
||||
Context context.Context `json:"-" yaml:"-"`
|
||||
|
||||
|
||||
@@ -472,7 +472,10 @@ module.exports = function(webpackEnv) {
|
||||
// its runtime that would otherwise be processed through "file" loader.
|
||||
// Also exclude `html` and `json` extensions so they get processed
|
||||
// by webpacks internal loaders.
|
||||
exclude: [/\.(js|mjs|jsx|ts|tsx)$/, /\.html$/, /\.json$/],
|
||||
//
|
||||
// html-webpack-plugin has a known bug,
|
||||
// fixed by /^$/ https://github.com/jantimon/html-webpack-plugin/issues/1589
|
||||
exclude: [/^$/, /\.(js|mjs|jsx|ts|tsx)$/, /\.html$/, /\.json$/],
|
||||
options: {
|
||||
name: 'static/media/[name].[hash:8].[ext]',
|
||||
},
|
||||
|
||||
@@ -123,6 +123,7 @@ func DefaultConfig() *config.Config {
|
||||
MaxConcurrency: 5,
|
||||
LockCycleDurationFactor: 30,
|
||||
DisableMultipart: true,
|
||||
AsyncUploads: true,
|
||||
},
|
||||
Decomposed: config.DecomposedDriver{
|
||||
Propagator: "sync",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
SHELL := bash
|
||||
NAME := web
|
||||
WEB_ASSETS_VERSION = v2.1.0
|
||||
WEB_ASSETS_VERSION = v2.1.1
|
||||
WEB_ASSETS_BRANCH = main
|
||||
|
||||
ifneq (, $(shell command -v go 2> /dev/null)) # suppress `command not found warnings` for non go targets in CI
|
||||
|
||||
@@ -120,3 +120,26 @@ Feature: Propfind test
|
||||
| Manager | RDNVWZP |
|
||||
| Space Editor | DNVW |
|
||||
| Space Viewer | |
|
||||
|
||||
@issue-1523
|
||||
Scenario: propfind response contains a restored folder with correct name
|
||||
Given user "Alice" has created a folder "folderMain" in space "Personal"
|
||||
And user "Alice" has deleted folder "folderMain"
|
||||
And user "Alice" has created a folder "folderMain" in space "Personal"
|
||||
When user "Alice" restores the folder with original path "/folderMain" to "/folderMain (1)" using the trashbin API
|
||||
And user "Alice" sends PROPFIND request to space "Personal" using the WebDAV API
|
||||
Then the HTTP status code should be "207"
|
||||
And as user "Alice" the PROPFIND response should contain a resource "folderMain" with these key and value pairs:
|
||||
| key | value |
|
||||
| oc:fileid | %file_id_pattern% |
|
||||
| oc:file-parent | %file_id_pattern% |
|
||||
| oc:name | folderMain |
|
||||
| oc:permissions | RDNVCKZP |
|
||||
| oc:size | 0 |
|
||||
And as user "Alice" the PROPFIND response should contain a resource "folderMain (1)" with these key and value pairs:
|
||||
| key | value |
|
||||
| oc:fileid | %file_id_pattern% |
|
||||
| oc:file-parent | %file_id_pattern% |
|
||||
| oc:name | folderMain (1) |
|
||||
| oc:permissions | RDNVCKZP |
|
||||
| oc:size | 0 |
|
||||
|
||||
@@ -23,6 +23,7 @@ Feature: Send a sharing invitations
|
||||
| shareType | user |
|
||||
| permissionsRole | <permissions-role> |
|
||||
Then the HTTP status code should be "200"
|
||||
And user "Brian" has a share "<resource>" synced
|
||||
And user "Brian" should have a share "<resource>" shared by user "Alice" from space "Personal"
|
||||
And the JSON data of the response should match
|
||||
"""
|
||||
@@ -113,7 +114,9 @@ Feature: Send a sharing invitations
|
||||
| shareType | group |
|
||||
| permissionsRole | <permissions-role> |
|
||||
Then the HTTP status code should be "200"
|
||||
And user "Brian" has a share "<resource>" synced
|
||||
And user "Brian" should have a share "<resource>" shared by user "Alice" from space "Personal"
|
||||
And user "Carol" has a share "<resource>" synced
|
||||
And user "Carol" should have a share "<resource>" shared by user "Alice" from space "Personal"
|
||||
And the JSON data of the response should match
|
||||
"""
|
||||
@@ -1998,6 +2001,7 @@ Feature: Send a sharing invitations
|
||||
| shareType | user |
|
||||
| permissionsRole | <permissions-role> |
|
||||
Then the HTTP status code should be "200"
|
||||
And user "Brian" has a share "<resource>" synced
|
||||
And user "Brian" should have a share "<resource>" shared by user "Alice" from space "NewSpace"
|
||||
And the JSON data of the response should match
|
||||
"""
|
||||
@@ -2086,7 +2090,9 @@ Feature: Send a sharing invitations
|
||||
| shareType | group |
|
||||
| permissionsRole | <permissions-role> |
|
||||
Then the HTTP status code should be "200"
|
||||
And user "Brian" has a share "<resource>" synced
|
||||
And user "Brian" should have a share "<resource>" shared by user "Alice" from space "NewSpace"
|
||||
And user "Carol" has a share "<resource>" synced
|
||||
And user "Carol" should have a share "<resource>" shared by user "Alice" from space "NewSpace"
|
||||
And the JSON data of the response should match
|
||||
"""
|
||||
@@ -3166,6 +3172,7 @@ Feature: Send a sharing invitations
|
||||
| shareType | user |
|
||||
| permissionsRole | Viewer |
|
||||
Then the HTTP status code should be "200"
|
||||
And user "Brian" has a share "textfile.txt" synced
|
||||
And user "Brian" should have a share "textfile.txt" shared by user "Alice" from space "NewSpace"
|
||||
When user "Alice" sends the following resource share invitation using the Graph API:
|
||||
| resource | textfile.txt |
|
||||
@@ -3174,6 +3181,7 @@ Feature: Send a sharing invitations
|
||||
| shareType | group |
|
||||
| permissionsRole | Viewer |
|
||||
Then the HTTP status code should be "200"
|
||||
And user "Carol" has a share "textfile.txt" synced
|
||||
And user "Carol" should have a share "textfile.txt" shared by user "Alice" from space "NewSpace"
|
||||
|
||||
|
||||
@@ -3193,4 +3201,5 @@ Feature: Send a sharing invitations
|
||||
| shareType | group |
|
||||
| permissionsRole | Viewer |
|
||||
Then the HTTP status code should be "200"
|
||||
And user "Brian" has a share "textfile.txt" synced
|
||||
And user "Brian" should have a share "textfile.txt" shared by user "Alice" from space "NewSpace"
|
||||
|
||||
@@ -19,7 +19,7 @@ Feature: Update permission of a share
|
||||
| sharee | Brian |
|
||||
| shareType | user |
|
||||
| permissionsRole | Viewer |
|
||||
| expirationDateTime | 2025-07-15T14:00:00Z |
|
||||
| expirationDateTime | 2027-07-15T14:00:00Z |
|
||||
When user "Alice" updates the last resource share with the following properties using the Graph API:
|
||||
| space | Personal |
|
||||
| resource | testfile.txt |
|
||||
@@ -91,7 +91,7 @@ Feature: Update permission of a share
|
||||
| sharee | Brian |
|
||||
| shareType | user |
|
||||
| permissionsRole | Viewer |
|
||||
| expirationDateTime | 2025-07-15T14:00:00Z |
|
||||
| expirationDateTime | 2027-07-15T14:00:00Z |
|
||||
When user "Alice" updates the last resource share with the following properties using the Graph API:
|
||||
| space | Personal |
|
||||
| resource | <resource> |
|
||||
@@ -394,7 +394,7 @@ Feature: Update permission of a share
|
||||
| sharee | grp1 |
|
||||
| shareType | group |
|
||||
| permissionsRole | <permissions-role> |
|
||||
| expirationDateTime | 2025-07-15T14:00:00Z |
|
||||
| expirationDateTime | 2027-07-15T14:00:00Z |
|
||||
When user "Alice" updates the last drive share with the following using root endpoint of the Graph API:
|
||||
| permissionsRole | <new-permissions-role> |
|
||||
| space | NewSpace |
|
||||
@@ -474,7 +474,7 @@ Feature: Update permission of a share
|
||||
| sharee | grp1 |
|
||||
| shareType | group |
|
||||
| permissionsRole | <permissions-role> |
|
||||
| expirationDateTime | 2025-07-15T14:00:00Z |
|
||||
| expirationDateTime | 2027-07-15T14:00:00Z |
|
||||
When user "Alice" updates the last drive share with the following using root endpoint of the Graph API:
|
||||
| expirationDateTime | |
|
||||
| space | NewSpace |
|
||||
@@ -554,7 +554,7 @@ Feature: Update permission of a share
|
||||
| sharee | grp1 |
|
||||
| shareType | group |
|
||||
| permissionsRole | <permissions-role> |
|
||||
| expirationDateTime | 2025-07-15T14:00:00Z |
|
||||
| expirationDateTime | 2027-07-15T14:00:00Z |
|
||||
When user "Alice" updates the last drive share with the following using root endpoint of the Graph API:
|
||||
| expirationDateTime | 2200-07-15T14:00:00Z |
|
||||
| space | NewSpace |
|
||||
@@ -636,7 +636,7 @@ Feature: Update permission of a share
|
||||
| sharee | grp1 |
|
||||
| shareType | group |
|
||||
| permissionsRole | <permissions-role> |
|
||||
| expirationDateTime | 2025-07-15T14:00:00Z |
|
||||
| expirationDateTime | 2027-07-15T14:00:00Z |
|
||||
When user "Alice" updates the last drive share with the following using root endpoint of the Graph API:
|
||||
| expirationDateTime | |
|
||||
| permissionsRole | <new-permissions-role> |
|
||||
@@ -717,7 +717,7 @@ Feature: Update permission of a share
|
||||
| sharee | grp1 |
|
||||
| shareType | group |
|
||||
| permissionsRole | <permissions-role> |
|
||||
| expirationDateTime | 2025-07-15T14:00:00Z |
|
||||
| expirationDateTime | 2027-07-15T14:00:00Z |
|
||||
When user "Alice" updates the last drive share with the following using root endpoint of the Graph API:
|
||||
| expirationDateTime | 2200-07-15T14:00:00Z |
|
||||
| permissionsRole | <new-permissions-role> |
|
||||
@@ -799,7 +799,7 @@ Feature: Update permission of a share
|
||||
| sharee | Brian |
|
||||
| shareType | user |
|
||||
| permissionsRole | <permissions-role> |
|
||||
| expirationDateTime | 2025-07-15T14:00:00Z |
|
||||
| expirationDateTime | 2027-07-15T14:00:00Z |
|
||||
When user "Alice" updates the last drive share with the following using root endpoint of the Graph API:
|
||||
| permissionsRole | <new-permissions-role> |
|
||||
| space | NewSpace |
|
||||
@@ -875,7 +875,7 @@ Feature: Update permission of a share
|
||||
| sharee | Brian |
|
||||
| shareType | user |
|
||||
| permissionsRole | <permissions-role> |
|
||||
| expirationDateTime | 2025-07-15T14:00:00Z |
|
||||
| expirationDateTime | 2027-07-15T14:00:00Z |
|
||||
When user "Alice" updates the last drive share with the following using root endpoint of the Graph API:
|
||||
| expirationDateTime | |
|
||||
| space | NewSpace |
|
||||
@@ -951,7 +951,7 @@ Feature: Update permission of a share
|
||||
| sharee | Brian |
|
||||
| shareType | user |
|
||||
| permissionsRole | <permissions-role> |
|
||||
| expirationDateTime | 2025-07-15T14:00:00Z |
|
||||
| expirationDateTime | 2027-07-15T14:00:00Z |
|
||||
When user "Alice" updates the last drive share with the following using root endpoint of the Graph API:
|
||||
| expirationDateTime | 2200-07-15T14:00:00Z |
|
||||
| permissionsRole | <new-permissions-role> |
|
||||
@@ -1033,7 +1033,7 @@ Feature: Update permission of a share
|
||||
| sharee | Brian |
|
||||
| shareType | user |
|
||||
| permissionsRole | <permissions-role> |
|
||||
| expirationDateTime | 2025-07-15T14:00:00Z |
|
||||
| expirationDateTime | 2027-07-15T14:00:00Z |
|
||||
When user "Alice" updates the last drive share with the following using root endpoint of the Graph API:
|
||||
| expirationDateTime | |
|
||||
| permissionsRole | <new-permissions-role> |
|
||||
@@ -1110,7 +1110,7 @@ Feature: Update permission of a share
|
||||
| sharee | Brian |
|
||||
| shareType | user |
|
||||
| permissionsRole | <permissions-role> |
|
||||
| expirationDateTime | 2025-07-15T14:00:00Z |
|
||||
| expirationDateTime | 2027-07-15T14:00:00Z |
|
||||
When user "Alice" updates the last drive share with the following using root endpoint of the Graph API:
|
||||
| expirationDateTime | 2200-07-15T14:00:00Z |
|
||||
| permissionsRole | <new-permissions-role> |
|
||||
|
||||
@@ -21,6 +21,7 @@ Feature: sharing
|
||||
| shareType | user |
|
||||
| permissionsRole | Viewer |
|
||||
And using SharingNG
|
||||
And user "Brian" has a share "textfile0.txt" synced
|
||||
When user "Brian" gets all the shares shared with him using the sharing API
|
||||
Then the OCS status code should be "<ocs-status-code>"
|
||||
And the HTTP status code should be "200"
|
||||
@@ -47,6 +48,8 @@ Feature: sharing
|
||||
| shareType | user |
|
||||
| permissionsRole | Viewer |
|
||||
And using SharingNG
|
||||
And user "Brian" has a share "textfile0.txt" synced
|
||||
And user "Brian" has a share "textfile1.txt" synced
|
||||
When user "Brian" gets all the shares shared with him that are received as file "/Shares/textfile1.txt" using the provisioning API
|
||||
Then the OCS status code should be "<ocs-status-code>"
|
||||
And the HTTP status code should be "200"
|
||||
@@ -73,6 +76,8 @@ Feature: sharing
|
||||
| shareType | user |
|
||||
| permissionsRole | Viewer |
|
||||
And using SharingNG
|
||||
And user "Brian" has a share "textfile0.txt" synced
|
||||
And user "Brian" has a share "textfile1.txt" synced
|
||||
When user "Brian" gets all the shares shared with him that are received as file "/Shares/textfile0.txt" using the provisioning API
|
||||
Then the OCS status code should be "<ocs-status-code>"
|
||||
And the HTTP status code should be "200"
|
||||
@@ -94,6 +99,7 @@ Feature: sharing
|
||||
| shareType | group |
|
||||
| permissionsRole | Viewer |
|
||||
And using SharingNG
|
||||
And user "Brian" has a share "textfile0.txt" synced
|
||||
When user "Brian" gets all the shares shared with him using the sharing API
|
||||
Then the OCS status code should be "<ocs-status-code>"
|
||||
And the HTTP status code should be "200"
|
||||
|
||||
@@ -567,3 +567,52 @@ Feature: restore deleted files/folders
|
||||
| dav-path-version |
|
||||
| spaces |
|
||||
| new |
|
||||
|
||||
@issue-1523
|
||||
Scenario Outline: restore deleted folder when folder with same name exists
|
||||
Given using <dav-path-version> DAV path
|
||||
And user "Alice" has created folder "new"
|
||||
And user "Alice" has uploaded file with content "content" to "new/test.txt"
|
||||
And user "Alice" has deleted folder "new"
|
||||
And user "Alice" has created folder "new"
|
||||
And user "Alice" has uploaded file with content "new content" to "new/new-file.txt"
|
||||
When user "Alice" restores the folder with original path "/new" to "/new (1)" using the trashbin API
|
||||
Then the HTTP status code should be "201"
|
||||
And as "Alice" the following folders should exist
|
||||
| path |
|
||||
| /new |
|
||||
| /new (1) |
|
||||
And as "Alice" the following files should exist
|
||||
| path |
|
||||
| /new/new-file.txt |
|
||||
| /new (1)/test.txt |
|
||||
Examples:
|
||||
| dav-path-version |
|
||||
| spaces |
|
||||
| new |
|
||||
|
||||
@issue-1523
|
||||
Scenario Outline: restore deleted folder with files when folder with same name exists
|
||||
Given using <dav-path-version> DAV path
|
||||
And user "Alice" has created folder "folder-a"
|
||||
And user "Alice" has uploaded file with content "content b" to "folder-a/b.txt"
|
||||
And user "Alice" has uploaded file with content "content c" to "folder-a/c.txt"
|
||||
And user "Alice" has deleted file "folder-a/b.txt"
|
||||
And user "Alice" has deleted folder "folder-a"
|
||||
And user "Alice" has created folder "folder-a"
|
||||
When user "Alice" restores the file with original path "folder-a/b.txt" using the trashbin API
|
||||
Then the HTTP status code should be "201"
|
||||
When user "Alice" restores the folder with original path "/folder-a" to "/folder-a (1)" using the trashbin API
|
||||
Then the HTTP status code should be "201"
|
||||
And as "Alice" the following folders should exist
|
||||
| path |
|
||||
| /folder-a |
|
||||
| /folder-a (1) |
|
||||
And as "Alice" the following files should exist
|
||||
| path |
|
||||
| /folder-a/b.txt |
|
||||
| /folder-a (1)/c.txt |
|
||||
Examples:
|
||||
| dav-path-version |
|
||||
| spaces |
|
||||
| new |
|
||||
|
||||
@@ -261,10 +261,11 @@ function run_behat_tests() {
|
||||
FAILED_SCENARIO_PATHS_COLORED=`awk '/Failed scenarios:/',0 ${TEST_LOG_FILE} | grep -a feature`
|
||||
# There will be some ANSI escape codes for color in the FEATURE_COLORED var.
|
||||
# Strip them out so we can pass just the ordinary feature details to Behat.
|
||||
# Also strip everything after ".feature:XX", including text such as "(on line xx)" added by Behat indicating the failing step's line number.
|
||||
# Thanks to https://en.wikipedia.org/wiki/Tee_(command) and
|
||||
# https://stackoverflow.com/questions/23416278/how-to-strip-ansi-escape-sequences-from-a-variable
|
||||
# for ideas.
|
||||
FAILED_SCENARIO_PATHS=$(echo "${FAILED_SCENARIO_PATHS_COLORED}" | sed "s/\x1b[^m]*m//g")
|
||||
FAILED_SCENARIO_PATHS=$(echo "${FAILED_SCENARIO_PATHS_COLORED}" | sed "s/\x1b[^m]*m//g" | sed 's/\(\.feature:[0-9]\+\).*/\1/')
|
||||
|
||||
# If something else went wrong, and there were no failed scenarios,
|
||||
# then the awk, grep, sed command sequence above ends up with an empty string.
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
}
|
||||
},
|
||||
"require": {
|
||||
"behat/behat": "^3.13",
|
||||
"behat/behat": "^3.24",
|
||||
"behat/gherkin": "^4.9",
|
||||
"behat/mink": "1.7.1",
|
||||
"friends-of-behat/mink-extension": "^2.7",
|
||||
|
||||
2
vendor/github.com/nats-io/nats-server/v2/server/const.go
generated
vendored
2
vendor/github.com/nats-io/nats-server/v2/server/const.go
generated
vendored
@@ -58,7 +58,7 @@ func init() {
|
||||
|
||||
const (
|
||||
// VERSION is the current version for the server.
|
||||
VERSION = "2.11.0"
|
||||
VERSION = "2.11.1"
|
||||
|
||||
// PROTO is the currently supported protocol.
|
||||
// 0 was the original
|
||||
|
||||
4
vendor/github.com/nats-io/nats-server/v2/server/jetstream.go
generated
vendored
4
vendor/github.com/nats-io/nats-server/v2/server/jetstream.go
generated
vendored
@@ -2315,7 +2315,7 @@ func (js *jetStream) checkBytesLimits(selectedLimits *JetStreamAccountLimits, ad
|
||||
return NewJSMemoryResourcesExceededError()
|
||||
}
|
||||
// Check if this server can handle request.
|
||||
if checkServer && js.memReserved+addBytes > js.config.MaxMemory {
|
||||
if checkServer && js.memReserved+totalBytes > js.config.MaxMemory {
|
||||
return NewJSMemoryResourcesExceededError()
|
||||
}
|
||||
case FileStorage:
|
||||
@@ -2324,7 +2324,7 @@ func (js *jetStream) checkBytesLimits(selectedLimits *JetStreamAccountLimits, ad
|
||||
return NewJSStorageResourcesExceededError()
|
||||
}
|
||||
// Check if this server can handle request.
|
||||
if checkServer && js.storeReserved+addBytes > js.config.MaxStore {
|
||||
if checkServer && js.storeReserved+totalBytes > js.config.MaxStore {
|
||||
return NewJSStorageResourcesExceededError()
|
||||
}
|
||||
}
|
||||
|
||||
14
vendor/github.com/nats-io/nats-server/v2/server/jetstream_api.go
generated
vendored
14
vendor/github.com/nats-io/nats-server/v2/server/jetstream_api.go
generated
vendored
@@ -2526,6 +2526,9 @@ func (s *Server) jsLeaderServerRemoveRequest(sub *subscription, c *client, _ *Ac
|
||||
s.Warnf(badAPIRequestT, msg)
|
||||
return
|
||||
}
|
||||
if acc != s.SystemAccount() {
|
||||
return
|
||||
}
|
||||
|
||||
js, cc := s.getJetStreamCluster()
|
||||
if js == nil || cc == nil || cc.meta == nil {
|
||||
@@ -2650,6 +2653,10 @@ func (s *Server) jsLeaderServerStreamMoveRequest(sub *subscription, c *client, _
|
||||
accName := tokenAt(subject, 6)
|
||||
streamName := tokenAt(subject, 7)
|
||||
|
||||
if acc.GetName() != accName && acc != s.SystemAccount() {
|
||||
return
|
||||
}
|
||||
|
||||
var resp = JSApiStreamUpdateResponse{ApiResponse: ApiResponse{Type: JSApiStreamUpdateResponseType}}
|
||||
|
||||
var req JSApiMetaServerStreamMoveRequest
|
||||
@@ -2807,6 +2814,10 @@ func (s *Server) jsLeaderServerStreamCancelMoveRequest(sub *subscription, c *cli
|
||||
accName := tokenAt(subject, 6)
|
||||
streamName := tokenAt(subject, 7)
|
||||
|
||||
if acc.GetName() != accName && acc != s.SystemAccount() {
|
||||
return
|
||||
}
|
||||
|
||||
targetAcc, ok := s.accounts.Load(accName)
|
||||
if !ok {
|
||||
resp.Error = NewJSNoAccountError()
|
||||
@@ -2893,6 +2904,9 @@ func (s *Server) jsLeaderAccountPurgeRequest(sub *subscription, c *client, _ *Ac
|
||||
s.Warnf(badAPIRequestT, msg)
|
||||
return
|
||||
}
|
||||
if acc != s.SystemAccount() {
|
||||
return
|
||||
}
|
||||
|
||||
js := s.getJetStream()
|
||||
if js == nil {
|
||||
|
||||
27
vendor/github.com/nats-io/nats-server/v2/server/stream.go
generated
vendored
27
vendor/github.com/nats-io/nats-server/v2/server/stream.go
generated
vendored
@@ -6303,6 +6303,10 @@ func (a *Account) RestoreStream(ncfg *StreamConfig, r io.Reader) (*stream, error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
js := jsa.js
|
||||
if js == nil {
|
||||
return nil, NewJSNotEnabledForAccountError()
|
||||
}
|
||||
|
||||
cfg, apiErr := s.checkStreamCfg(ncfg, a, false)
|
||||
if apiErr != nil {
|
||||
@@ -6337,6 +6341,22 @@ func (a *Account) RestoreStream(ncfg *StreamConfig, r io.Reader) (*stream, error
|
||||
}
|
||||
sdirCheck := filepath.Clean(sdir) + string(os.PathSeparator)
|
||||
|
||||
_, isClustered := jsa.jetStreamAndClustered()
|
||||
jsa.usageMu.RLock()
|
||||
selected, tier, hasTier := jsa.selectLimits(cfg.Replicas)
|
||||
jsa.usageMu.RUnlock()
|
||||
reserved := int64(0)
|
||||
if hasTier {
|
||||
if isClustered {
|
||||
js.mu.RLock()
|
||||
_, reserved = tieredStreamAndReservationCount(js.cluster.streams[a.Name], tier, &cfg)
|
||||
js.mu.RUnlock()
|
||||
} else {
|
||||
reserved = jsa.tieredReservation(tier, &cfg)
|
||||
}
|
||||
}
|
||||
|
||||
var bc int64
|
||||
tr := tar.NewReader(s2.NewReader(r))
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
@@ -6349,6 +6369,13 @@ func (a *Account) RestoreStream(ncfg *StreamConfig, r io.Reader) (*stream, error
|
||||
if hdr.Typeflag != tar.TypeReg {
|
||||
return nil, logAndReturnError()
|
||||
}
|
||||
bc += hdr.Size
|
||||
js.mu.RLock()
|
||||
err = js.checkAllLimits(&selected, &cfg, reserved, bc)
|
||||
js.mu.RUnlock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fpath := filepath.Join(sdir, filepath.Clean(hdr.Name))
|
||||
if !strings.HasPrefix(fpath, sdirCheck) {
|
||||
return nil, logAndReturnError()
|
||||
|
||||
2
vendor/github.com/nats-io/nats.go/README.md
generated
vendored
2
vendor/github.com/nats-io/nats.go/README.md
generated
vendored
@@ -23,7 +23,7 @@ A [Go](http://golang.org) client for the [NATS messaging system](https://nats.io
|
||||
go get github.com/nats-io/nats.go@latest
|
||||
|
||||
# To get a specific version:
|
||||
go get github.com/nats-io/nats.go@v1.39.1
|
||||
go get github.com/nats-io/nats.go@v1.41.0
|
||||
|
||||
# Note that the latest major version for NATS Server is v2:
|
||||
go get github.com/nats-io/nats-server/v2@latest
|
||||
|
||||
17
vendor/github.com/nats-io/nats.go/go_test.mod
generated
vendored
17
vendor/github.com/nats-io/nats.go/go_test.mod
generated
vendored
@@ -1,23 +1,24 @@
|
||||
module github.com/nats-io/nats.go
|
||||
|
||||
go 1.22.0
|
||||
go 1.23.0
|
||||
|
||||
require (
|
||||
github.com/golang/protobuf v1.4.2
|
||||
github.com/klauspost/compress v1.17.11
|
||||
github.com/klauspost/compress v1.18.0
|
||||
github.com/nats-io/jwt v1.2.2
|
||||
github.com/nats-io/nats-server/v2 v2.10.24
|
||||
github.com/nats-io/nkeys v0.4.9
|
||||
github.com/nats-io/nats-server/v2 v2.11.0
|
||||
github.com/nats-io/nkeys v0.4.10
|
||||
github.com/nats-io/nuid v1.0.1
|
||||
go.uber.org/goleak v1.3.0
|
||||
golang.org/x/text v0.21.0
|
||||
golang.org/x/text v0.23.0
|
||||
google.golang.org/protobuf v1.23.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/google/go-tpm v0.9.3 // indirect
|
||||
github.com/minio/highwayhash v1.0.3 // indirect
|
||||
github.com/nats-io/jwt/v2 v2.7.3 // indirect
|
||||
golang.org/x/crypto v0.31.0 // indirect
|
||||
golang.org/x/sys v0.28.0 // indirect
|
||||
golang.org/x/time v0.8.0 // indirect
|
||||
golang.org/x/crypto v0.36.0 // indirect
|
||||
golang.org/x/sys v0.31.0 // indirect
|
||||
golang.org/x/time v0.11.0 // indirect
|
||||
)
|
||||
|
||||
36
vendor/github.com/nats-io/nats.go/go_test.sum
generated
vendored
36
vendor/github.com/nats-io/nats.go/go_test.sum
generated
vendored
@@ -1,3 +1,5 @@
|
||||
github.com/antithesishq/antithesis-sdk-go v0.4.3-default-no-op h1:+OSa/t11TFhqfrX0EOSqQBDJ0YlpmK0rDSiB19dg9M0=
|
||||
github.com/antithesishq/antithesis-sdk-go v0.4.3-default-no-op/go.mod h1:IUpT2DPAKh6i/YhSbt6Gl3v2yvUZjmKncl7U91fup7E=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
@@ -9,21 +11,24 @@ github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
|
||||
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-tpm v0.9.3 h1:+yx0/anQuGzi+ssRqeD6WpXjW2L/V0dItUayO0i9sRc=
|
||||
github.com/google/go-tpm v0.9.3/go.mod h1:h9jEsEECg7gtLis0upRBQU+GhYVH6jMjrFxI8u6bVUY=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/minio/highwayhash v1.0.3 h1:kbnuUMoHYyVl7szWjSxJnxw11k2U709jqFPPmIUyD6Q=
|
||||
github.com/minio/highwayhash v1.0.3/go.mod h1:GGYsuwP/fPD6Y9hMiXuapVvlIUEhFhMTh0rxU3ik1LQ=
|
||||
github.com/nats-io/jwt v1.2.2 h1:w3GMTO969dFg+UOKTmmyuu7IGdusK+7Ytlt//OYH/uU=
|
||||
github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q=
|
||||
github.com/nats-io/jwt/v2 v2.7.3 h1:6bNPK+FXgBeAqdj4cYQ0F8ViHRbi7woQLq4W29nUAzE=
|
||||
github.com/nats-io/jwt/v2 v2.7.3/go.mod h1:GvkcbHhKquj3pkioy5put1wvPxs78UlZ7D/pY+BgZk4=
|
||||
github.com/nats-io/nats-server/v2 v2.10.24 h1:KcqqQAD0ZZcG4yLxtvSFJY7CYKVYlnlWoAiVZ6i/IY4=
|
||||
github.com/nats-io/nats-server/v2 v2.10.24/go.mod h1:olvKt8E5ZlnjyqBGbAXtxvSQKsPodISK5Eo/euIta4s=
|
||||
github.com/nats-io/nats-server/v2 v2.11.0 h1:fdwAT1d6DZW/4LUz5rkvQUe5leGEwjjOQYntzVRKvjE=
|
||||
github.com/nats-io/nats-server/v2 v2.11.0/go.mod h1:leXySghbdtXSUmWem8K9McnJ6xbJOb0t9+NQ5HTRZjI=
|
||||
github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s=
|
||||
github.com/nats-io/nkeys v0.4.9 h1:qe9Faq2Gxwi6RZnZMXfmGMZkg3afLLOtrU+gDZJ35b0=
|
||||
github.com/nats-io/nkeys v0.4.9/go.mod h1:jcMqs+FLG+W5YO36OX6wFIFcmpdAns+w1Wm6D3I/evE=
|
||||
github.com/nats-io/nkeys v0.4.10 h1:glmRrpCmYLHByYcePvnTBEAwawwapjCPMjy2huw20wc=
|
||||
github.com/nats-io/nkeys v0.4.10/go.mod h1:OjRrnIKnWBFl+s4YK5ChQfvHP2fxqZexrKJoVVyWB3U=
|
||||
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
@@ -34,20 +39,19 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
|
||||
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
|
||||
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg=
|
||||
golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
|
||||
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
|
||||
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
|
||||
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
|
||||
175
vendor/github.com/nats-io/nats.go/js.go
generated
vendored
175
vendor/github.com/nats-io/nats.go/js.go
generated
vendored
@@ -1,4 +1,4 @@
|
||||
// Copyright 2020-2024 The NATS Authors
|
||||
// Copyright 2020-2025 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
@@ -273,6 +273,8 @@ type jsOpts struct {
|
||||
aecb MsgErrHandler
|
||||
// Max async pub ack in flight
|
||||
maxpa int
|
||||
// ackTimeout is the max time to wait for an ack in async publish.
|
||||
ackTimeout time.Duration
|
||||
// the domain that produced the pre
|
||||
domain string
|
||||
// enables protocol tracing
|
||||
@@ -466,13 +468,14 @@ func (opt pubOptFn) configurePublish(opts *pubOpts) error {
|
||||
}
|
||||
|
||||
type pubOpts struct {
|
||||
ctx context.Context
|
||||
ttl time.Duration
|
||||
id string
|
||||
lid string // Expected last msgId
|
||||
str string // Expected stream name
|
||||
seq *uint64 // Expected last sequence
|
||||
lss *uint64 // Expected last sequence per subject
|
||||
ctx context.Context
|
||||
ttl time.Duration
|
||||
id string
|
||||
lid string // Expected last msgId
|
||||
str string // Expected stream name
|
||||
seq *uint64 // Expected last sequence
|
||||
lss *uint64 // Expected last sequence per subject
|
||||
msgTTL time.Duration // Message TTL
|
||||
|
||||
// Publish retries for NoResponders err.
|
||||
rwait time.Duration // Retry wait between attempts
|
||||
@@ -507,6 +510,7 @@ const (
|
||||
ExpectedLastSubjSeqHdr = "Nats-Expected-Last-Subject-Sequence"
|
||||
ExpectedLastMsgIdHdr = "Nats-Expected-Last-Msg-Id"
|
||||
MsgRollup = "Nats-Rollup"
|
||||
MsgTTLHdr = "Nats-TTL"
|
||||
)
|
||||
|
||||
// Headers for republished messages and direct gets.
|
||||
@@ -566,6 +570,9 @@ func (js *js) PublishMsg(m *Msg, opts ...PubOpt) (*PubAck, error) {
|
||||
if o.lss != nil {
|
||||
m.Header.Set(ExpectedLastSubjSeqHdr, strconv.FormatUint(*o.lss, 10))
|
||||
}
|
||||
if o.msgTTL > 0 {
|
||||
m.Header.Set(MsgTTLHdr, o.msgTTL.String())
|
||||
}
|
||||
|
||||
var resp *Msg
|
||||
var err error
|
||||
@@ -648,6 +655,7 @@ type pubAckFuture struct {
|
||||
maxRetries int
|
||||
retryWait time.Duration
|
||||
reply string
|
||||
timeout *time.Timer
|
||||
}
|
||||
|
||||
func (paf *pubAckFuture) Ok() <-chan *PubAck {
|
||||
@@ -712,13 +720,19 @@ func (js *js) newAsyncReply() string {
|
||||
}
|
||||
var sb strings.Builder
|
||||
sb.WriteString(js.rpre)
|
||||
rn := js.rr.Int63()
|
||||
var b [aReplyTokensize]byte
|
||||
for i, l := 0, rn; i < len(b); i++ {
|
||||
b[i] = rdigits[l%base]
|
||||
l /= base
|
||||
for {
|
||||
rn := js.rr.Int63()
|
||||
var b [aReplyTokensize]byte
|
||||
for i, l := 0, rn; i < len(b); i++ {
|
||||
b[i] = rdigits[l%base]
|
||||
l /= base
|
||||
}
|
||||
if _, ok := js.pafs[string(b[:])]; ok {
|
||||
continue
|
||||
}
|
||||
sb.Write(b[:])
|
||||
break
|
||||
}
|
||||
sb.Write(b[:])
|
||||
js.mu.Unlock()
|
||||
return sb.String()
|
||||
}
|
||||
@@ -894,6 +908,10 @@ func (js *js) handleAsyncReply(m *Msg) {
|
||||
}
|
||||
}
|
||||
|
||||
if paf.timeout != nil {
|
||||
paf.timeout.Stop()
|
||||
}
|
||||
|
||||
// Process no responders etc.
|
||||
if len(m.Data) == 0 && m.Header.Get(statusHdr) == noResponders {
|
||||
if paf.retries < paf.maxRetries {
|
||||
@@ -975,6 +993,15 @@ func PublishAsyncMaxPending(max int) JSOpt {
|
||||
})
|
||||
}
|
||||
|
||||
// PublishAsyncTimeout sets the timeout for async message publish.
|
||||
// If not provided, timeout is disabled.
|
||||
func PublishAsyncTimeout(dur time.Duration) JSOpt {
|
||||
return jsOptFn(func(opts *jsOpts) error {
|
||||
opts.ackTimeout = dur
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// PublishAsync publishes a message to JetStream and returns a PubAckFuture
|
||||
func (js *js) PublishAsync(subj string, data []byte, opts ...PubOpt) (PubAckFuture, error) {
|
||||
return js.PublishMsgAsync(&Msg{Subject: subj, Data: data}, opts...)
|
||||
@@ -1024,6 +1051,9 @@ func (js *js) PublishMsgAsync(m *Msg, opts ...PubOpt) (PubAckFuture, error) {
|
||||
if o.lss != nil {
|
||||
m.Header.Set(ExpectedLastSubjSeqHdr, strconv.FormatUint(*o.lss, 10))
|
||||
}
|
||||
if o.msgTTL > 0 {
|
||||
m.Header.Set(MsgTTLHdr, o.msgTTL.String())
|
||||
}
|
||||
|
||||
// Reply
|
||||
paf := o.pafRetry
|
||||
@@ -1050,11 +1080,52 @@ func (js *js) PublishMsgAsync(m *Msg, opts ...PubOpt) (PubAckFuture, error) {
|
||||
case <-js.asyncStall():
|
||||
case <-time.After(stallWait):
|
||||
js.clearPAF(id)
|
||||
return nil, errors.New("nats: stalled with too many outstanding async published messages")
|
||||
return nil, ErrTooManyStalledMsgs
|
||||
}
|
||||
}
|
||||
if js.opts.ackTimeout > 0 {
|
||||
paf.timeout = time.AfterFunc(js.opts.ackTimeout, func() {
|
||||
js.mu.Lock()
|
||||
defer js.mu.Unlock()
|
||||
|
||||
if _, ok := js.pafs[id]; !ok {
|
||||
// paf has already been resolved
|
||||
// while waiting for the lock
|
||||
return
|
||||
}
|
||||
|
||||
// ack timed out, remove from pending acks
|
||||
delete(js.pafs, id)
|
||||
|
||||
// check on anyone stalled and waiting.
|
||||
if js.stc != nil && len(js.pafs) < js.opts.maxpa {
|
||||
close(js.stc)
|
||||
js.stc = nil
|
||||
}
|
||||
|
||||
// send error to user
|
||||
paf.err = ErrAsyncPublishTimeout
|
||||
if paf.errCh != nil {
|
||||
paf.errCh <- paf.err
|
||||
}
|
||||
|
||||
// call error callback if set
|
||||
if js.opts.aecb != nil {
|
||||
js.opts.aecb(js, paf.msg, ErrAsyncPublishTimeout)
|
||||
}
|
||||
|
||||
// check on anyone one waiting on done status.
|
||||
if js.dch != nil && len(js.pafs) == 0 {
|
||||
close(js.dch)
|
||||
js.dch = nil
|
||||
}
|
||||
})
|
||||
}
|
||||
} else {
|
||||
reply = paf.reply
|
||||
if paf.timeout != nil {
|
||||
paf.timeout.Reset(js.opts.ackTimeout)
|
||||
}
|
||||
id = reply[js.replyPrefixLen:]
|
||||
}
|
||||
hdr, err := m.headerBytes()
|
||||
@@ -1151,6 +1222,15 @@ func StallWait(ttl time.Duration) PubOpt {
|
||||
})
|
||||
}
|
||||
|
||||
// MsgTTL sets per msg TTL.
|
||||
// Requires [StreamConfig.AllowMsgTTL] to be enabled.
|
||||
func MsgTTL(dur time.Duration) PubOpt {
|
||||
return pubOptFn(func(opts *pubOpts) error {
|
||||
opts.msgTTL = dur
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
type ackOpts struct {
|
||||
ttl time.Duration
|
||||
ctx context.Context
|
||||
@@ -1361,6 +1441,9 @@ type jsSub struct {
|
||||
fciseq uint64
|
||||
csfct *time.Timer
|
||||
|
||||
// context set on js.Subscribe used e.g. to recreate ordered consumer
|
||||
ctx context.Context
|
||||
|
||||
// Cancellation function to cancel context on drain/unsubscribe.
|
||||
cancel func()
|
||||
}
|
||||
@@ -1833,6 +1916,7 @@ func (js *js) subscribe(subj, queue string, cb MsgHandler, ch chan *Msg, isSync,
|
||||
psubj: subj,
|
||||
cancel: cancel,
|
||||
ackNone: o.cfg.AckPolicy == AckNonePolicy,
|
||||
ctx: o.ctx,
|
||||
}
|
||||
|
||||
// Auto acknowledge unless manual ack is set or policy is set to AckNonePolicy
|
||||
@@ -1864,7 +1948,12 @@ func (js *js) subscribe(subj, queue string, cb MsgHandler, ch chan *Msg, isSync,
|
||||
} else if consName == "" {
|
||||
consName = getHash(nuid.Next())
|
||||
}
|
||||
info, err := js.upsertConsumer(stream, consName, ccreq.Config)
|
||||
var info *ConsumerInfo
|
||||
if o.ctx != nil {
|
||||
info, err = js.upsertConsumer(stream, consName, ccreq.Config, Context(o.ctx))
|
||||
} else {
|
||||
info, err = js.upsertConsumer(stream, consName, ccreq.Config)
|
||||
}
|
||||
if err != nil {
|
||||
var apiErr *APIError
|
||||
if ok := errors.As(err, &apiErr); !ok {
|
||||
@@ -2196,7 +2285,13 @@ func (sub *Subscription) resetOrderedConsumer(sseq uint64) {
|
||||
jsi.consumer = ""
|
||||
sub.mu.Unlock()
|
||||
consName := getHash(nuid.Next())
|
||||
cinfo, err := js.upsertConsumer(jsi.stream, consName, cfg)
|
||||
var cinfo *ConsumerInfo
|
||||
var err error
|
||||
if js.opts.ctx != nil {
|
||||
cinfo, err = js.upsertConsumer(jsi.stream, consName, cfg, Context(js.opts.ctx))
|
||||
} else {
|
||||
cinfo, err = js.upsertConsumer(jsi.stream, consName, cfg)
|
||||
}
|
||||
if err != nil {
|
||||
var apiErr *APIError
|
||||
if errors.Is(err, ErrJetStreamNotEnabled) || errors.Is(err, ErrTimeout) || errors.Is(err, context.DeadlineExceeded) {
|
||||
@@ -2206,6 +2301,9 @@ func (sub *Subscription) resetOrderedConsumer(sseq uint64) {
|
||||
// retry for insufficient resources, as it may mean that client is connected to a running
|
||||
// server in cluster while the server hosting R1 JetStream resources is restarting
|
||||
return
|
||||
} else if errors.As(err, &apiErr) && apiErr.ErrorCode == JSErrCodeJetStreamNotAvailable {
|
||||
// retry if JetStream meta leader is temporarily unavailable
|
||||
return
|
||||
}
|
||||
pushErr(err)
|
||||
return
|
||||
@@ -2975,6 +3073,11 @@ func (sub *Subscription) Fetch(batch int, opts ...PullOpt) ([]*Msg, error) {
|
||||
}
|
||||
}
|
||||
var hbTimer *time.Timer
|
||||
defer func() {
|
||||
if hbTimer != nil {
|
||||
hbTimer.Stop()
|
||||
}
|
||||
}()
|
||||
var hbErr error
|
||||
sub.mu.Lock()
|
||||
subClosed := sub.closed || sub.draining
|
||||
@@ -2983,6 +3086,7 @@ func (sub *Subscription) Fetch(batch int, opts ...PullOpt) ([]*Msg, error) {
|
||||
err = errors.Join(ErrBadSubscription, ErrSubscriptionClosed)
|
||||
}
|
||||
hbLock := sync.Mutex{}
|
||||
var disconnected atomic.Bool
|
||||
if err == nil && len(msgs) < batch && !subClosed {
|
||||
// For batch real size of 1, it does not make sense to set no_wait in
|
||||
// the request.
|
||||
@@ -3037,7 +3141,17 @@ func (sub *Subscription) Fetch(batch int, opts ...PullOpt) ([]*Msg, error) {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
connStatusChanged := nc.StatusChanged()
|
||||
go func() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-connStatusChanged:
|
||||
disconnected.Store(true)
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
}()
|
||||
err = sendReq()
|
||||
for err == nil && len(msgs) < batch {
|
||||
// Ask for next message and wait if there are no messages
|
||||
@@ -3064,9 +3178,6 @@ func (sub *Subscription) Fetch(batch int, opts ...PullOpt) ([]*Msg, error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if hbTimer != nil {
|
||||
hbTimer.Stop()
|
||||
}
|
||||
}
|
||||
// If there is at least a message added to msgs, then need to return OK and no error
|
||||
if err != nil && len(msgs) == 0 {
|
||||
@@ -3075,6 +3186,9 @@ func (sub *Subscription) Fetch(batch int, opts ...PullOpt) ([]*Msg, error) {
|
||||
if hbErr != nil {
|
||||
return nil, hbErr
|
||||
}
|
||||
if disconnected.Load() {
|
||||
return nil, ErrFetchDisconnected
|
||||
}
|
||||
return nil, o.checkCtxErr(err)
|
||||
}
|
||||
return msgs, nil
|
||||
@@ -3285,6 +3399,18 @@ func (sub *Subscription) FetchBatch(batch int, opts ...PullOpt) (MessageBatch, e
|
||||
}
|
||||
expires := ttl - expiresDiff
|
||||
|
||||
connStatusChanged := nc.StatusChanged()
|
||||
var disconnected atomic.Bool
|
||||
go func() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-connStatusChanged:
|
||||
disconnected.Store(true)
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
}()
|
||||
requestBatch := batch - len(result.msgs)
|
||||
req := nextRequest{
|
||||
Expires: expires,
|
||||
@@ -3309,6 +3435,11 @@ func (sub *Subscription) FetchBatch(batch int, opts ...PullOpt) (MessageBatch, e
|
||||
return result, nil
|
||||
}
|
||||
var hbTimer *time.Timer
|
||||
defer func() {
|
||||
if hbTimer != nil {
|
||||
hbTimer.Stop()
|
||||
}
|
||||
}()
|
||||
var hbErr error
|
||||
if o.hb > 0 {
|
||||
hbTimer = time.AfterFunc(2*o.hb, func() {
|
||||
@@ -3355,6 +3486,8 @@ func (sub *Subscription) FetchBatch(batch int, opts ...PullOpt) (MessageBatch, e
|
||||
result.Lock()
|
||||
if hbErr != nil {
|
||||
result.err = hbErr
|
||||
} else if disconnected.Load() {
|
||||
result.err = ErrFetchDisconnected
|
||||
} else {
|
||||
result.err = o.checkCtxErr(err)
|
||||
}
|
||||
|
||||
14
vendor/github.com/nats-io/nats.go/jserrors.go
generated
vendored
14
vendor/github.com/nats-io/nats.go/jserrors.go
generated
vendored
@@ -1,4 +1,4 @@
|
||||
// Copyright 2020-2023 The NATS Authors
|
||||
// Copyright 2020-2025 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
@@ -157,6 +157,17 @@ var (
|
||||
// Deprecated: ErrInvalidDurableName is no longer returned and will be removed in future releases.
|
||||
// Use ErrInvalidConsumerName instead.
|
||||
ErrInvalidDurableName = errors.New("nats: invalid durable name")
|
||||
|
||||
// ErrAsyncPublishTimeout is returned when waiting for ack on async publish
|
||||
ErrAsyncPublishTimeout JetStreamError = &jsError{message: "timeout waiting for ack"}
|
||||
|
||||
// ErrTooManyStalledMsgs is returned when too many outstanding async
|
||||
// messages are waiting for ack.
|
||||
ErrTooManyStalledMsgs JetStreamError = &jsError{message: "stalled with too many outstanding async published messages"}
|
||||
|
||||
// ErrFetchDisconnected is returned when the connection to the server is lost
|
||||
// while waiting for messages to be delivered on PullSubscribe.
|
||||
ErrFetchDisconnected = &jsError{message: "disconnected during fetch"}
|
||||
)
|
||||
|
||||
// Error code represents JetStream error codes returned by the API
|
||||
@@ -166,6 +177,7 @@ const (
|
||||
JSErrCodeJetStreamNotEnabledForAccount ErrorCode = 10039
|
||||
JSErrCodeJetStreamNotEnabled ErrorCode = 10076
|
||||
JSErrCodeInsufficientResourcesErr ErrorCode = 10023
|
||||
JSErrCodeJetStreamNotAvailable ErrorCode = 10008
|
||||
|
||||
JSErrCodeStreamNotFound ErrorCode = 10059
|
||||
JSErrCodeStreamNameInUse ErrorCode = 10058
|
||||
|
||||
8
vendor/github.com/nats-io/nats.go/jsm.go
generated
vendored
8
vendor/github.com/nats-io/nats.go/jsm.go
generated
vendored
@@ -243,6 +243,14 @@ type StreamConfig struct {
|
||||
// Template identifies the template that manages the Stream. Deprecated:
|
||||
// This feature is no longer supported.
|
||||
Template string `json:"template_owner,omitempty"`
|
||||
|
||||
// AllowMsgTTL allows header initiated per-message TTLs.
|
||||
// This feature requires nats-server v2.11.0 or later.
|
||||
AllowMsgTTL bool `json:"allow_msg_ttl"`
|
||||
|
||||
// Enables and sets a duration for adding server markers for delete, purge and max age limits.
|
||||
// This feature requires nats-server v2.11.0 or later.
|
||||
SubjectDeleteMarkerTTL time.Duration `json:"subject_delete_marker_ttl,omitempty"`
|
||||
}
|
||||
|
||||
// SubjectTransformConfig is for applying a subject transform (to matching messages) before doing anything else when a new message is received.
|
||||
|
||||
36
vendor/github.com/nats-io/nats.go/kv.go
generated
vendored
36
vendor/github.com/nats-io/nats.go/kv.go
generated
vendored
@@ -826,6 +826,8 @@ func (kv *kvs) PurgeDeletes(opts ...PurgeOpt) error {
|
||||
deleteMarkers = append(deleteMarkers, entry)
|
||||
}
|
||||
}
|
||||
// Stop watcher here so as we purge we do not have the system continually updating numPending.
|
||||
watcher.Stop()
|
||||
|
||||
var (
|
||||
pr StreamPurgeRequest
|
||||
@@ -929,13 +931,14 @@ func (kv *kvs) History(key string, opts ...WatchOpt) ([]KeyValueEntry, error) {
|
||||
|
||||
// Implementation for Watch
|
||||
type watcher struct {
|
||||
mu sync.Mutex
|
||||
updates chan KeyValueEntry
|
||||
sub *Subscription
|
||||
initDone bool
|
||||
initPending uint64
|
||||
received uint64
|
||||
ctx context.Context
|
||||
mu sync.Mutex
|
||||
updates chan KeyValueEntry
|
||||
sub *Subscription
|
||||
initDone bool
|
||||
initPending uint64
|
||||
received uint64
|
||||
ctx context.Context
|
||||
initDoneTimer *time.Timer
|
||||
}
|
||||
|
||||
// Context returns the context for the watcher if set.
|
||||
@@ -1044,8 +1047,11 @@ func (kv *kvs) WatchFiltered(keys []string, opts ...WatchOpt) (KeyWatcher, error
|
||||
w.initPending = delta
|
||||
}
|
||||
if w.received > w.initPending || delta == 0 {
|
||||
w.initDoneTimer.Stop()
|
||||
w.initDone = true
|
||||
w.updates <- nil
|
||||
} else if w.initDoneTimer != nil {
|
||||
w.initDoneTimer.Reset(kv.js.opts.wait)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1088,6 +1094,16 @@ func (kv *kvs) WatchFiltered(keys []string, opts ...WatchOpt) (KeyWatcher, error
|
||||
if sub.jsi != nil && sub.jsi.pending == 0 {
|
||||
w.initDone = true
|
||||
w.updates <- nil
|
||||
} else {
|
||||
// Set a timer to send the marker if we do not get any messages.
|
||||
w.initDoneTimer = time.AfterFunc(kv.js.opts.wait, func() {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
if !w.initDone {
|
||||
w.initDone = true
|
||||
w.updates <- nil
|
||||
}
|
||||
})
|
||||
}
|
||||
} else {
|
||||
// if UpdatesOnly was used, mark initialization as complete
|
||||
@@ -1095,8 +1111,14 @@ func (kv *kvs) WatchFiltered(keys []string, opts ...WatchOpt) (KeyWatcher, error
|
||||
}
|
||||
// Set us up to close when the waitForMessages func returns.
|
||||
sub.pDone = func(_ string) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
if w.initDoneTimer != nil {
|
||||
w.initDoneTimer.Stop()
|
||||
}
|
||||
close(w.updates)
|
||||
}
|
||||
|
||||
sub.mu.Unlock()
|
||||
|
||||
w.sub = sub
|
||||
|
||||
61
vendor/github.com/nats-io/nats.go/nats.go
generated
vendored
61
vendor/github.com/nats-io/nats.go/nats.go
generated
vendored
@@ -47,7 +47,7 @@ import (
|
||||
|
||||
// Default Constants
|
||||
const (
|
||||
Version = "1.39.1"
|
||||
Version = "1.41.0"
|
||||
DefaultURL = "nats://127.0.0.1:4222"
|
||||
DefaultPort = 4222
|
||||
DefaultMaxReconnect = 60
|
||||
@@ -274,7 +274,6 @@ type InProcessConnProvider interface {
|
||||
|
||||
// Options can be used to create a customized connection.
|
||||
type Options struct {
|
||||
|
||||
// Url represents a single NATS server url to which the client
|
||||
// will be connecting. If the Servers option is also set, it
|
||||
// then becomes the first server in the Servers array.
|
||||
@@ -422,6 +421,10 @@ type Options struct {
|
||||
// AsyncErrorCB sets the async error handler (e.g. slow consumer errors)
|
||||
AsyncErrorCB ErrHandler
|
||||
|
||||
// ReconnectErrCB sets the callback that is invoked whenever a
|
||||
// reconnect attempt failed
|
||||
ReconnectErrCB ConnErrHandler
|
||||
|
||||
// ReconnectBufSize is the size of the backing bufio during reconnect.
|
||||
// Once this has been exhausted publish operations will return an error.
|
||||
// Defaults to 8388608 bytes (8MB).
|
||||
@@ -1151,6 +1154,14 @@ func ReconnectHandler(cb ConnHandler) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// ReconnectErrHandler is an Option to set the reconnect error handler.
|
||||
func ReconnectErrHandler(cb ConnErrHandler) Option {
|
||||
return func(o *Options) error {
|
||||
o.ReconnectErrCB = cb
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ClosedHandler is an Option to set the closed handler.
|
||||
func ClosedHandler(cb ConnHandler) Option {
|
||||
return func(o *Options) error {
|
||||
@@ -2213,6 +2224,7 @@ func (nc *Conn) ForceReconnect() error {
|
||||
// even if we're in the middle of a backoff
|
||||
if nc.rqch != nil {
|
||||
close(nc.rqch)
|
||||
nc.rqch = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -2386,7 +2398,6 @@ func (nc *Conn) setup() {
|
||||
|
||||
// Process a connected connection and initialize properly.
|
||||
func (nc *Conn) processConnectInit() error {
|
||||
|
||||
// Set our deadline for the whole connect process
|
||||
nc.conn.SetDeadline(time.Now().Add(nc.Opts.Timeout))
|
||||
defer nc.conn.SetDeadline(time.Time{})
|
||||
@@ -2535,7 +2546,6 @@ func (nc *Conn) checkForSecure() error {
|
||||
// processExpectedInfo will look for the expected first INFO message
|
||||
// sent when a connection is established. The lock should be held entering.
|
||||
func (nc *Conn) processExpectedInfo() error {
|
||||
|
||||
c := &control{}
|
||||
|
||||
// Read the protocol
|
||||
@@ -2640,8 +2650,10 @@ func (nc *Conn) connectProto() (string, error) {
|
||||
|
||||
// If our server does not support headers then we can't do them or no responders.
|
||||
hdrs := nc.info.Headers
|
||||
cinfo := connectInfo{o.Verbose, o.Pedantic, ujwt, nkey, sig, user, pass, token,
|
||||
o.Secure, o.Name, LangString, Version, clientProtoInfo, !o.NoEcho, hdrs, hdrs}
|
||||
cinfo := connectInfo{
|
||||
o.Verbose, o.Pedantic, ujwt, nkey, sig, user, pass, token,
|
||||
o.Secure, o.Name, LangString, Version, clientProtoInfo, !o.NoEcho, hdrs, hdrs,
|
||||
}
|
||||
|
||||
b, err := json.Marshal(cinfo)
|
||||
if err != nil {
|
||||
@@ -2832,6 +2844,16 @@ func (nc *Conn) doReconnect(err error, forceReconnect bool) {
|
||||
var rt *time.Timer
|
||||
// Channel used to kick routine out of sleep when conn is closed.
|
||||
rqch := nc.rqch
|
||||
|
||||
// if rqch is nil, we need to set it up to signal
|
||||
// the reconnect loop to reconnect immediately
|
||||
// this means that `ForceReconnect` was called
|
||||
// before entering doReconnect
|
||||
if rqch == nil {
|
||||
rqch = make(chan struct{})
|
||||
close(rqch)
|
||||
}
|
||||
|
||||
// Counter that is increased when the whole list of servers has been tried.
|
||||
var wlf int
|
||||
|
||||
@@ -2911,10 +2933,13 @@ func (nc *Conn) doReconnect(err error, forceReconnect bool) {
|
||||
|
||||
// Try to create a new connection
|
||||
err = nc.createConn()
|
||||
|
||||
// Not yet connected, retry...
|
||||
// Continue to hold the lock
|
||||
if err != nil {
|
||||
// Perform appropriate callback for a failed connection attempt.
|
||||
if nc.Opts.ReconnectErrCB != nil {
|
||||
nc.ach.push(func() { nc.Opts.ReconnectErrCB(nc, err) })
|
||||
}
|
||||
nc.err = nil
|
||||
continue
|
||||
}
|
||||
@@ -3259,7 +3284,7 @@ func (nc *Conn) processMsg(data []byte) {
|
||||
// It's possible that we end-up not using the message, but that's ok.
|
||||
|
||||
// FIXME(dlc): Need to copy, should/can do COW?
|
||||
var msgPayload = data
|
||||
msgPayload := data
|
||||
if !nc.ps.msgCopied {
|
||||
msgPayload = make([]byte, len(data))
|
||||
copy(msgPayload, data)
|
||||
@@ -3450,8 +3475,10 @@ slowConsumer:
|
||||
}
|
||||
}
|
||||
|
||||
var permissionsRe = regexp.MustCompile(`Subscription to "(\S+)"`)
|
||||
var permissionsQueueRe = regexp.MustCompile(`using queue "(\S+)"`)
|
||||
var (
|
||||
permissionsRe = regexp.MustCompile(`Subscription to "(\S+)"`)
|
||||
permissionsQueueRe = regexp.MustCompile(`using queue "(\S+)"`)
|
||||
)
|
||||
|
||||
// processTransientError is called when the server signals a non terminal error
|
||||
// which does not close the connection or trigger a reconnect.
|
||||
@@ -3976,7 +4003,7 @@ func (nc *Conn) publish(subj, reply string, hdr, data []byte) error {
|
||||
// go 1.14 some values strconv faster, may be able to switch over.
|
||||
|
||||
var b [12]byte
|
||||
var i = len(b)
|
||||
i := len(b)
|
||||
|
||||
if hdr != nil {
|
||||
if len(hdr) > 0 {
|
||||
@@ -4584,6 +4611,8 @@ func (s *Subscription) StatusChanged(statuses ...SubStatus) <-chan SubStatus {
|
||||
statuses = []SubStatus{SubscriptionActive, SubscriptionDraining, SubscriptionClosed, SubscriptionSlowConsumer}
|
||||
}
|
||||
ch := make(chan SubStatus, 10)
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
for _, status := range statuses {
|
||||
s.registerStatusChangeListener(status, ch)
|
||||
// initial status
|
||||
@@ -4597,9 +4626,8 @@ func (s *Subscription) StatusChanged(statuses ...SubStatus) <-chan SubStatus {
|
||||
// registerStatusChangeListener registers a channel waiting for a specific status change event.
|
||||
// Status change events are non-blocking - if no receiver is waiting for the status change,
|
||||
// it will not be sent on the channel. Closed channels are ignored.
|
||||
// Lock should be held entering.
|
||||
func (s *Subscription) registerStatusChangeListener(status SubStatus, ch chan SubStatus) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.statListeners == nil {
|
||||
s.statListeners = make(map[chan SubStatus][]SubStatus)
|
||||
}
|
||||
@@ -5677,7 +5705,7 @@ func (nc *Conn) IsDraining() bool {
|
||||
// caller must lock
|
||||
func (nc *Conn) getServers(implicitOnly bool) []string {
|
||||
poolSize := len(nc.srvPool)
|
||||
var servers = make([]string, 0)
|
||||
servers := make([]string, 0)
|
||||
for i := 0; i < poolSize; i++ {
|
||||
if implicitOnly && !nc.srvPool[i].isImplicit {
|
||||
continue
|
||||
@@ -5877,6 +5905,8 @@ func (nc *Conn) StatusChanged(statuses ...Status) chan Status {
|
||||
statuses = []Status{CONNECTED, RECONNECTING, DISCONNECTED, CLOSED}
|
||||
}
|
||||
ch := make(chan Status, 10)
|
||||
nc.mu.Lock()
|
||||
defer nc.mu.Unlock()
|
||||
for _, s := range statuses {
|
||||
nc.registerStatusChangeListener(s, ch)
|
||||
}
|
||||
@@ -5886,9 +5916,8 @@ func (nc *Conn) StatusChanged(statuses ...Status) chan Status {
|
||||
// registerStatusChangeListener registers a channel waiting for a specific status change event.
|
||||
// Status change events are non-blocking - if no receiver is waiting for the status change,
|
||||
// it will not be sent on the channel. Closed channels are ignored.
|
||||
// The lock should be held entering.
|
||||
func (nc *Conn) registerStatusChangeListener(status Status, ch chan Status) {
|
||||
nc.mu.Lock()
|
||||
defer nc.mu.Unlock()
|
||||
if nc.statListeners == nil {
|
||||
nc.statListeners = make(map[Status][]chan Status)
|
||||
}
|
||||
|
||||
4
vendor/github.com/nats-io/nats.go/object.go
generated
vendored
4
vendor/github.com/nats-io/nats.go/object.go
generated
vendored
@@ -1127,6 +1127,10 @@ func (obs *obs) Watch(opts ...WatchOpt) (ObjectWatcher, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Set us up to close when the waitForMessages func returns.
|
||||
sub.pDone = func(_ string) {
|
||||
close(w.updates)
|
||||
}
|
||||
w.sub = sub
|
||||
return w, nil
|
||||
}
|
||||
|
||||
149
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/blobstore/blobstore.go
generated
vendored
149
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/blobstore/blobstore.go
generated
vendored
@@ -20,13 +20,21 @@
|
||||
package blobstore
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pkg/xattr"
|
||||
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
TMPDir = ".oc-tmp"
|
||||
)
|
||||
|
||||
// Blobstore provides an interface to an filesystem based blobstore
|
||||
@@ -41,61 +49,106 @@ func New(root string) (*Blobstore, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Upload stores some data in the blobstore under the given key
|
||||
func (bs *Blobstore) Upload(node *node.Node, source, copyTarget string) error {
|
||||
path := node.InternalPath()
|
||||
// Upload is responsible for transferring data from a source file (upload) to its final location;
|
||||
// the file operation is done atomically using a temporary file followed by a rename
|
||||
func (bs *Blobstore) Upload(n *node.Node, source, copyTarget string) error {
|
||||
tempName := filepath.Join(n.SpaceRoot.InternalPath(), TMPDir, filepath.Base(source))
|
||||
|
||||
// preserve the mtime of the file
|
||||
fi, _ := os.Stat(path)
|
||||
|
||||
file, err := os.Open(source)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Decomposedfs: posix blobstore: Can not open source file to upload")
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
f, err := os.OpenFile(node.InternalPath(), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0700)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not open blob '%s' for writing", node.InternalPath())
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
w := bufio.NewWriter(f)
|
||||
_, err = w.ReadFrom(file)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not write blob '%s'", node.InternalPath())
|
||||
}
|
||||
err = w.Flush()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.Chtimes(path, fi.ModTime(), fi.ModTime())
|
||||
if err != nil {
|
||||
// there is no guarantee that the space root TMPDir exists at this point, so we create the directory if needed
|
||||
if err := os.MkdirAll(filepath.Dir(tempName), 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if copyTarget != "" {
|
||||
// also "upload" the file to a local path, e.g. for keeping the "current" version of the file
|
||||
err := os.MkdirAll(filepath.Dir(copyTarget), 0700)
|
||||
if err != nil {
|
||||
return err
|
||||
sourceFile, err := os.Open(source)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open source file '%s': %v", source, err)
|
||||
}
|
||||
defer func() {
|
||||
_ = sourceFile.Close()
|
||||
}()
|
||||
|
||||
tempFile, err := os.OpenFile(tempName, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0700)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create temp file '%s': %v", tempName, err)
|
||||
}
|
||||
|
||||
if _, err := tempFile.ReadFrom(sourceFile); err != nil {
|
||||
return fmt.Errorf("failed to write data from source file '%s' to temp file '%s' - %v", source, tempName, err)
|
||||
}
|
||||
|
||||
if err := tempFile.Sync(); err != nil {
|
||||
return fmt.Errorf("failed to sync temp file '%s' - %v", tempName, err)
|
||||
}
|
||||
|
||||
if err := tempFile.Close(); err != nil {
|
||||
return fmt.Errorf("failed to close temp file '%s' - %v", tempName, err)
|
||||
}
|
||||
|
||||
nodeAttributes, err := n.Xattrs(context.Background())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get xattrs for node '%s': %v", n.InternalPath(), err)
|
||||
}
|
||||
|
||||
var mtime *time.Time
|
||||
for k, v := range nodeAttributes {
|
||||
if err := xattr.Set(tempName, k, v); err != nil {
|
||||
return fmt.Errorf("failed to set xattr '%s' on temp file '%s' - %v", k, tempName, err)
|
||||
}
|
||||
|
||||
_, err = file.Seek(0, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
if k == "user.oc.mtime" {
|
||||
tv, err := time.Parse(time.RFC3339Nano, string(v))
|
||||
if err == nil {
|
||||
mtime = &tv
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// the extended attributes should always contain a mtime, but in case they don't, we fetch it from the node
|
||||
if mtime == nil {
|
||||
switch nodeMtime, err := n.GetMTime(context.Background()); {
|
||||
case err != nil:
|
||||
return fmt.Errorf("failed to get mtime for node '%s' - %v", n.InternalPath(), err)
|
||||
default:
|
||||
mtime = &nodeMtime
|
||||
}
|
||||
|
||||
copyFile, err := os.OpenFile(copyTarget, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0600)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not open copy target '%s' for writing", copyTarget)
|
||||
}
|
||||
defer copyFile.Close()
|
||||
}
|
||||
|
||||
_, err = copyFile.ReadFrom(file)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not write blob copy of '%s' to '%s'", node.InternalPath(), copyTarget)
|
||||
}
|
||||
// etags rely on the id and the mtime, so we need to ensure the mtime is set correctly
|
||||
if err := os.Chtimes(tempName, *mtime, *mtime); err != nil {
|
||||
return fmt.Errorf("failed to set mtime on temp file '%s' - %v", tempName, err)
|
||||
}
|
||||
|
||||
// atomically move the file to its final location,
|
||||
// on Windows systems (unsupported oc os) os.Rename is not atomic
|
||||
if err := os.Rename(tempName, n.InternalPath()); err != nil {
|
||||
return fmt.Errorf("failed to move temp file '%s' to node '%s' - %v", tempName, n.InternalPath(), err)
|
||||
}
|
||||
|
||||
// upload successfully, now handle the copy target if set
|
||||
if copyTarget == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// also "upload" the file to a local path, e.g., for keeping the "current" version of the file
|
||||
if err := os.MkdirAll(filepath.Dir(copyTarget), 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := sourceFile.Seek(0, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
copyFile, err := os.OpenFile(copyTarget, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0600)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not open copy target '%s' for writing", copyTarget)
|
||||
}
|
||||
defer func() {
|
||||
_ = copyFile.Close()
|
||||
}()
|
||||
|
||||
if _, err := copyFile.ReadFrom(sourceFile); err != nil {
|
||||
return errors.Wrapf(err, "could not write blob copy of '%s' to '%s'", n.InternalPath(), copyTarget)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
13
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/lookup/lookup.go
generated
vendored
13
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/lookup/lookup.go
generated
vendored
@@ -392,11 +392,14 @@ func (lu *Lookup) GenerateSpaceID(spaceType string, owner *user.User) (string, e
|
||||
case _spaceTypeProject:
|
||||
return uuid.New().String(), nil
|
||||
case _spaceTypePersonal:
|
||||
path := templates.WithUser(owner, lu.Options.PersonalSpacePathTemplate)
|
||||
relPath := templates.WithUser(owner, lu.Options.PersonalSpacePathTemplate)
|
||||
path := filepath.Join(lu.Options.Root, relPath)
|
||||
|
||||
spaceID, _, err := lu.IDsForPath(context.TODO(), filepath.Join(lu.Options.Root, path))
|
||||
// do we already know about this space?
|
||||
spaceID, _, err := lu.IDsForPath(context.TODO(), path)
|
||||
if err != nil {
|
||||
_, err := os.Stat(filepath.Join(lu.Options.Root, path))
|
||||
// check if the space exists on disk incl. attributes
|
||||
spaceID, _, _, err := lu.metadataBackend.IdentifyPath(context.TODO(), path)
|
||||
if err != nil {
|
||||
if metadata.IsNotExist(err) || metadata.IsAttrUnset(err) {
|
||||
return uuid.New().String(), nil
|
||||
@@ -404,6 +407,10 @@ func (lu *Lookup) GenerateSpaceID(spaceType string, owner *user.User) (string, e
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
if len(spaceID) == 0 {
|
||||
return "", errtypes.InternalError("encountered empty space id on disk")
|
||||
}
|
||||
return spaceID, nil
|
||||
}
|
||||
return spaceID, nil
|
||||
default:
|
||||
|
||||
11
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/trashbin/trashbin.go
generated
vendored
11
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/trashbin/trashbin.go
generated
vendored
@@ -32,6 +32,7 @@ import (
|
||||
|
||||
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
|
||||
typesv1beta1 "github.com/cs3org/go-cs3apis/cs3/types/v1beta1"
|
||||
|
||||
"github.com/opencloud-eu/reva/v2/pkg/errtypes"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/lookup"
|
||||
@@ -333,10 +334,12 @@ func (tb *Trashbin) RestoreRecycleItem(ctx context.Context, spaceID string, key,
|
||||
return nil, fmt.Errorf("trashbin: parent id not found for %s", restorePath)
|
||||
}
|
||||
|
||||
trashNode := &trashNode{spaceID: spaceID, id: id, path: trashPath}
|
||||
err = tb.lu.MetadataBackend().Set(ctx, trashNode, prefixes.ParentidAttr, []byte(parentID))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
trashedNode := &trashNode{spaceID: spaceID, id: id, path: trashPath}
|
||||
if err = tb.lu.MetadataBackend().SetMultiple(ctx, trashedNode, map[string][]byte{
|
||||
prefixes.NameAttr: []byte(filepath.Base(restorePath)),
|
||||
prefixes.ParentidAttr: []byte(parentID),
|
||||
}, true); err != nil {
|
||||
return nil, fmt.Errorf("posixfs: failed to update trashed node metadata: %w", err)
|
||||
}
|
||||
|
||||
// restore the item
|
||||
|
||||
8
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/tree.go
generated
vendored
8
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/tree/tree.go
generated
vendored
@@ -38,9 +38,11 @@ import (
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
|
||||
|
||||
"github.com/opencloud-eu/reva/v2/pkg/appctx"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/errtypes"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/events"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/blobstore"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/lookup"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/options"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix/trashbin"
|
||||
@@ -649,6 +651,10 @@ func (t *Tree) createDirNode(ctx context.Context, n *node.Node) (err error) {
|
||||
return n.SetXattrsWithContext(ctx, attributes, false)
|
||||
}
|
||||
|
||||
func (t *Tree) isTemporary(path string) bool {
|
||||
return path == blobstore.TMPDir
|
||||
}
|
||||
|
||||
func (t *Tree) isIgnored(path string) bool {
|
||||
return isLockFile(path) || isTrash(path) || t.isUpload(path) || t.isInternal(path)
|
||||
}
|
||||
@@ -664,7 +670,7 @@ func (t *Tree) isIndex(path string) bool {
|
||||
func (t *Tree) isInternal(path string) bool {
|
||||
return path == t.options.Root ||
|
||||
path == filepath.Join(t.options.Root, "users") ||
|
||||
t.isIndex(path) || strings.Contains(path, lookup.MetadataDir)
|
||||
t.isIndex(path) || strings.Contains(path, lookup.MetadataDir) || t.isTemporary(path)
|
||||
}
|
||||
|
||||
func isLockFile(path string) bool {
|
||||
|
||||
@@ -46,7 +46,12 @@ func (HybridBackend) Name() string { return "hybrid" }
|
||||
|
||||
// IdentifyPath returns the space id, node id and mtime of a file
|
||||
func (b HybridBackend) IdentifyPath(_ context.Context, path string) (string, string, time.Time, error) {
|
||||
spaceID, _ := xattr.Get(path, prefixes.SpaceIDAttr)
|
||||
spaceID, err := xattr.Get(path, prefixes.SpaceIDAttr)
|
||||
if err != nil {
|
||||
if IsNotExist(err) {
|
||||
return "", "", time.Time{}, err
|
||||
}
|
||||
}
|
||||
id, _ := xattr.Get(path, prefixes.IDAttr)
|
||||
|
||||
mtimeAttr, _ := xattr.Get(path, prefixes.MTimeAttr)
|
||||
|
||||
12
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node/node.go
generated
vendored
12
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/node/node.go
generated
vendored
@@ -1096,22 +1096,22 @@ func (n *Node) ReadUserPermissions(ctx context.Context, u *userpb.User) (ap *pro
|
||||
continue
|
||||
}
|
||||
|
||||
if isGrantExpired(g) {
|
||||
continue
|
||||
}
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
if isGrantExpired(g) {
|
||||
continue
|
||||
}
|
||||
|
||||
// If all permissions are set to false we have a deny grant
|
||||
if grants.PermissionsEqual(g.Permissions, &provider.ResourcePermissions{}) {
|
||||
return NoPermissions(), true, nil
|
||||
}
|
||||
AddPermissions(ap, g.GetPermissions())
|
||||
case metadata.IsAttrUnset(err):
|
||||
appctx.GetLogger(ctx).Error().Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Str("grant", grantees[i]).Interface("grantees", grantees).Msg("grant vanished from node after listing")
|
||||
appctx.GetLogger(ctx).Error().Err(err).Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Str("path", n.InternalPath()).Str("grant", grantees[i]).Interface("grantees", grantees).Msg("grant vanished from node after listing")
|
||||
// continue with next segment
|
||||
default:
|
||||
appctx.GetLogger(ctx).Error().Err(err).Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Str("grant", grantees[i]).Msg("error reading permissions")
|
||||
appctx.GetLogger(ctx).Error().Err(err).Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Str("path", n.InternalPath()).Str("grant", grantees[i]).Msg("error reading permissions")
|
||||
// continue with next segment
|
||||
}
|
||||
}
|
||||
|
||||
6
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/spaces.go
generated
vendored
6
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/spaces.go
generated
vendored
@@ -129,7 +129,11 @@ func (fs *Decomposedfs) CreateStorageSpace(ctx context.Context, req *provider.Cr
|
||||
}
|
||||
|
||||
// 770 permissions for the space
|
||||
if err := os.MkdirAll(rootPath, 0770); err != nil {
|
||||
if err := os.Mkdir(rootPath, 0770); err != nil {
|
||||
if os.IsExist(err) {
|
||||
// Someone has created the space in the meantime. Abort.
|
||||
return nil, errtypes.AlreadyExists(spaceID)
|
||||
}
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("Decomposedfs: error creating space %s", rootPath))
|
||||
}
|
||||
|
||||
|
||||
11
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload/upload.go
generated
vendored
11
vendor/github.com/opencloud-eu/reva/v2/pkg/storage/pkg/decomposedfs/upload/upload.go
generated
vendored
@@ -286,6 +286,17 @@ func (session *DecomposedFsSession) Finalize(ctx context.Context) (err error) {
|
||||
revisionNode := node.New(session.SpaceID(), session.NodeID(), "", "", session.Size(), session.ID(),
|
||||
provider.ResourceType_RESOURCE_TYPE_FILE, session.SpaceOwner(), session.store.lu)
|
||||
|
||||
switch spaceRoot, err := session.store.lu.NodeFromSpaceID(ctx, session.SpaceID()); {
|
||||
case err != nil:
|
||||
return fmt.Errorf("failed to get space root for space id %s: %v", session.SpaceID(), err)
|
||||
case spaceRoot == nil:
|
||||
return fmt.Errorf("space root for space id %s not found", session.SpaceID())
|
||||
case spaceRoot.InternalPath() == "":
|
||||
return fmt.Errorf("space root for space id %s has no valid internal path", session.SpaceID())
|
||||
default:
|
||||
revisionNode.SpaceRoot = spaceRoot
|
||||
}
|
||||
|
||||
// upload the data to the blobstore
|
||||
_, subspan := tracer.Start(ctx, "WriteBlob")
|
||||
err = session.store.tp.WriteBlob(revisionNode, session.binPath())
|
||||
|
||||
8
vendor/modules.txt
vendored
8
vendor/modules.txt
vendored
@@ -993,7 +993,7 @@ github.com/munnerz/goautoneg
|
||||
# github.com/nats-io/jwt/v2 v2.7.3
|
||||
## explicit; go 1.22
|
||||
github.com/nats-io/jwt/v2
|
||||
# github.com/nats-io/nats-server/v2 v2.11.0
|
||||
# github.com/nats-io/nats-server/v2 v2.11.1
|
||||
## explicit; go 1.23.0
|
||||
github.com/nats-io/nats-server/v2/conf
|
||||
github.com/nats-io/nats-server/v2/internal/fastrand
|
||||
@@ -1009,8 +1009,8 @@ github.com/nats-io/nats-server/v2/server/stree
|
||||
github.com/nats-io/nats-server/v2/server/sysmem
|
||||
github.com/nats-io/nats-server/v2/server/thw
|
||||
github.com/nats-io/nats-server/v2/server/tpm
|
||||
# github.com/nats-io/nats.go v1.39.1
|
||||
## explicit; go 1.22.0
|
||||
# github.com/nats-io/nats.go v1.41.0
|
||||
## explicit; go 1.23.0
|
||||
github.com/nats-io/nats.go
|
||||
github.com/nats-io/nats.go/encoders/builtin
|
||||
github.com/nats-io/nats.go/internal/parser
|
||||
@@ -1198,7 +1198,7 @@ github.com/open-policy-agent/opa/v1/types
|
||||
github.com/open-policy-agent/opa/v1/util
|
||||
github.com/open-policy-agent/opa/v1/util/decoding
|
||||
github.com/open-policy-agent/opa/v1/version
|
||||
# github.com/opencloud-eu/reva/v2 v2.29.1
|
||||
# github.com/opencloud-eu/reva/v2 v2.29.5
|
||||
## explicit; go 1.24.1
|
||||
github.com/opencloud-eu/reva/v2/cmd/revad/internal/grace
|
||||
github.com/opencloud-eu/reva/v2/cmd/revad/runtime
|
||||
|
||||
Reference in New Issue
Block a user