mirror of
https://github.com/opencloud-eu/opencloud.git
synced 2025-12-25 07:08:47 -05:00
Compare commits
132 Commits
runTestsIn
...
stable-2.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
59e3e081da | ||
|
|
4d2bf41274 | ||
|
|
4cab81a857 | ||
|
|
63b201b2a7 | ||
|
|
da64699172 | ||
|
|
723284193d | ||
|
|
ddaf1f86b0 | ||
|
|
1e6ee49221 | ||
|
|
db04f851ca | ||
|
|
a8fc417e18 | ||
|
|
a240389a06 | ||
|
|
195e9ffdac | ||
|
|
d76d5cd49c | ||
|
|
bf7fabddf7 | ||
|
|
0160118996 | ||
|
|
0935c44c1c | ||
|
|
f60e36e8d7 | ||
|
|
4828c5c4bc | ||
|
|
5dfd52aa32 | ||
|
|
d1a8463036 | ||
|
|
25d5dab696 | ||
|
|
e0f05003ef | ||
|
|
1cdd21591a | ||
|
|
e730c6ed3a | ||
|
|
7076185c0f | ||
|
|
87ea852a21 | ||
|
|
4988709fd7 | ||
|
|
f1ba964365 | ||
|
|
7608ae482c | ||
|
|
abe653ed49 | ||
|
|
b67ce7cd34 | ||
|
|
3263c16652 | ||
|
|
c8df6a27b6 | ||
|
|
a3a5a5a047 | ||
|
|
a80ecf0b88 | ||
|
|
17604171fe | ||
|
|
df4d427e66 | ||
|
|
c1ddc61f2b | ||
|
|
fa9282bec2 | ||
|
|
9759a2dee2 | ||
|
|
1bea4b7471 | ||
|
|
f266857cf1 | ||
|
|
d15c961c31 | ||
|
|
9238639753 | ||
|
|
0718947b0c | ||
|
|
2fc33db3aa | ||
|
|
05f0a8dc96 | ||
|
|
8697e6a4c8 | ||
|
|
1c9f7654d6 | ||
|
|
52ee25ea76 | ||
|
|
d18549e3fa | ||
|
|
82e63866f8 | ||
|
|
dbdc0b6d8c | ||
|
|
17b1c71f78 | ||
|
|
a298f90509 | ||
|
|
298043973b | ||
|
|
68ae16f5e1 | ||
|
|
7d0eee14c9 | ||
|
|
afb49f2e87 | ||
|
|
0f2fdc4f86 | ||
|
|
970526575a | ||
|
|
7dcdc53127 | ||
|
|
39544371f8 | ||
|
|
e4402d9b17 | ||
|
|
5561d5f354 | ||
|
|
10fb2d79e6 | ||
|
|
e37bedda1c | ||
|
|
0702b8bf9f | ||
|
|
a414a2015d | ||
|
|
292c8e5b63 | ||
|
|
51994d6398 | ||
|
|
9d72a7cbe6 | ||
|
|
88721d8d52 | ||
|
|
72038e5cea | ||
|
|
2d16e6e693 | ||
|
|
2344092b81 | ||
|
|
2277ba8ffe | ||
|
|
d9041f2f47 | ||
|
|
17267e899e | ||
|
|
c3be42950a | ||
|
|
3249d233f5 | ||
|
|
3eed688e5d | ||
|
|
ddf32022de | ||
|
|
24e5e19825 | ||
|
|
6375de8167 | ||
|
|
01755c5955 | ||
|
|
52c72c852f | ||
|
|
270a6b65ab | ||
|
|
cb700cafae | ||
|
|
659d246482 | ||
|
|
fae29f107c | ||
|
|
8bc17593cb | ||
|
|
7e71089ed6 | ||
|
|
f94eedaee3 | ||
|
|
8584184063 | ||
|
|
f0c4e383ca | ||
|
|
8b7f12126f | ||
|
|
f0e79468f2 | ||
|
|
de485a69d7 | ||
|
|
f788703c55 | ||
|
|
44dea094d7 | ||
|
|
248ff08c59 | ||
|
|
ac54b446a5 | ||
|
|
073da849e0 | ||
|
|
c2ce9a136d | ||
|
|
467fc2af0c | ||
|
|
73c179e447 | ||
|
|
ac8351bd3b | ||
|
|
00d49804cf | ||
|
|
5953f950ef | ||
|
|
858dd4110f | ||
|
|
7fc336d57b | ||
|
|
7be2e4693a | ||
|
|
544b354a42 | ||
|
|
99c814e45a | ||
|
|
6055c1476c | ||
|
|
213874dbec | ||
|
|
67ce1d51e2 | ||
|
|
cda94ce584 | ||
|
|
fa34a073fd | ||
|
|
036b3669c2 | ||
|
|
fe2e4b6c23 | ||
|
|
d58f066376 | ||
|
|
073e902626 | ||
|
|
0e63721c13 | ||
|
|
547cfaee75 | ||
|
|
4569064400 | ||
|
|
5aa51dcfda | ||
|
|
1d0fcfbca4 | ||
|
|
31f24a65b3 | ||
|
|
05c2b39281 | ||
|
|
8c9725b46e |
1
.github/settings.yml
vendored
1
.github/settings.yml
vendored
@@ -1,2 +1 @@
|
||||
_extends: gh-labels
|
||||
|
||||
|
||||
30
.github/workflows/labels.yml
vendored
Normal file
30
.github/workflows/labels.yml
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
name: Require Pull Request Labels
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, labeled, unlabeled, synchronize]
|
||||
jobs:
|
||||
label:
|
||||
# Only run if PR is not from a fork
|
||||
if: github.event.pull_request.head.repo.full_name == github.repository
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- uses: mheap/github-action-required-labels@v5
|
||||
with:
|
||||
mode: minimum
|
||||
count: 1
|
||||
labels: |
|
||||
Type:Bug
|
||||
Type:Enhancement
|
||||
Type:Feature
|
||||
Type:Breaking-Change
|
||||
Type:Test
|
||||
Type:Documentation
|
||||
Type:Maintenance
|
||||
Type:Security
|
||||
Type:Dependencies
|
||||
Type:DevOps
|
||||
dependencies
|
||||
add_comment: true
|
||||
12
.make/go.mk
12
.make/go.mk
@@ -110,3 +110,15 @@ debug-linux-docker-amd64: release-dirs
|
||||
-ldflags '-extldflags "-static" $(DEBUG_LDFLAGS) $(DOCKER_LDFLAGS)' \
|
||||
-o '$(DIST)/binaries/$(EXECUTABLE)-linux-amd64' \
|
||||
./cmd/$(NAME)
|
||||
|
||||
debug-linux-docker-arm64: release-dirs
|
||||
GOOS=linux \
|
||||
GOARCH=arm64 \
|
||||
go build \
|
||||
-gcflags="all=-N -l" \
|
||||
-tags 'netgo $(TAGS)' \
|
||||
-buildmode=exe \
|
||||
-trimpath \
|
||||
-ldflags '-extldflags "-static" $(DEBUG_LDFLAGS) $(DOCKER_LDFLAGS)' \
|
||||
-o '$(DIST)/binaries/$(EXECUTABLE)-linux-arm64' \
|
||||
./cmd/$(NAME)
|
||||
|
||||
3
.vscode/launch.json
vendored
3
.vscode/launch.json
vendored
@@ -101,6 +101,9 @@
|
||||
"APP_PROVIDER_GRPC_ADDR": "127.0.0.1:10164",
|
||||
"APP_REGISTRY_DEBUG_ADDR": "127.0.0.1:10243",
|
||||
"APP_REGISTRY_GRPC_ADDR": "127.0.0.1:10242",
|
||||
"AUTH_APP_DEBUG_ADDR": "127.0.0.1:10245",
|
||||
"AUTH_APP_GRPC_ADDR": "127.0.0.1:10246",
|
||||
"AUTH_APP_HTTP_ADDR": "127.0.0.1:10247",
|
||||
"AUTH_BASIC_DEBUG_ADDR": "127.0.0.1:10147",
|
||||
"AUTH_BASIC_GRPC_ADDR": "127.0.0.1:10146",
|
||||
"AUTH_MACHINE_DEBUG_ADDR": "127.0.0.1:10167",
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
# The test runner source for UI tests
|
||||
WEB_COMMITID=4a2f3a1d14009676a3a9dfef536ed4fd3e7f4c21
|
||||
WEB_BRANCH=main
|
||||
WEB_COMMITID=3cc779ddb45b52134fa986d21587f18316e2135a
|
||||
WEB_BRANCH=stable-2.1
|
||||
|
||||
267
.woodpecker.star
267
.woodpecker.star
@@ -1,17 +1,14 @@
|
||||
"""OpenCloud CI definition
|
||||
"""
|
||||
|
||||
# Production release tags
|
||||
# NOTE: need to be updated if new production releases are determined
|
||||
# - follow semver
|
||||
# - omit 'v' prefix
|
||||
PRODUCTION_RELEASE_TAGS = ["2.0", "3.0"]
|
||||
|
||||
# Repository
|
||||
|
||||
repo_slug = "opencloud-eu/opencloud"
|
||||
docker_repo_slug = "opencloudeu/opencloud"
|
||||
|
||||
# Active base branch for this code stream
|
||||
branch = "stable-2.0"
|
||||
|
||||
# images
|
||||
ALPINE_GIT = "alpine/git:latest"
|
||||
APACHE_TIKA = "apache/tika:2.8.0.0"
|
||||
@@ -28,7 +25,7 @@ OC_CI_GOLANG = "docker.io/golang:1.24"
|
||||
OC_CI_NODEJS = "owncloudci/nodejs:%s"
|
||||
OC_CI_PHP = "owncloudci/php:%s"
|
||||
OC_CI_WAIT_FOR = "owncloudci/wait-for:latest"
|
||||
OC_CS3_API_VALIDATOR = "owncloud/cs3api-validator:0.2.1"
|
||||
OC_CS3_API_VALIDATOR = "opencloudeu/cs3api-validator:latest"
|
||||
OC_LITMUS = "owncloudci/litmus:latest"
|
||||
OC_UBUNTU = "owncloud/ubuntu:20.04"
|
||||
ONLYOFFICE_DOCUMENT_SERVER = "onlyoffice/documentserver:7.5.1"
|
||||
@@ -44,10 +41,13 @@ PLUGINS_S3_CACHE = "plugins/s3-cache:1"
|
||||
PLUGINS_SLACK = "plugins/slack:1"
|
||||
REDIS = "redis:6-alpine"
|
||||
SONARSOURCE_SONAR_SCANNER_CLI = "sonarsource/sonar-scanner-cli:11.0"
|
||||
READY_RELEASE_GO = "woodpeckerci/plugin-ready-release-go:latest"
|
||||
|
||||
DEFAULT_PHP_VERSION = "8.2"
|
||||
DEFAULT_NODEJS_VERSION = "20"
|
||||
|
||||
CACHE_S3_SERVER = "https://s3.ci.opencloud.eu"
|
||||
|
||||
dirs = {
|
||||
"base": "/woodpecker/src/github.com/opencloud-eu/opencloud",
|
||||
"web": "/woodpecker/src/github.com/opencloud-eu/opencloud/webTestRunner",
|
||||
@@ -279,10 +279,6 @@ config = {
|
||||
],
|
||||
"skip": False,
|
||||
"withRemotePhp": [True],
|
||||
"extraServerEnvironment": {
|
||||
"OC_ADD_RUN_SERVICES": "auth-app",
|
||||
"PROXY_ENABLE_APP_AUTH": True,
|
||||
},
|
||||
},
|
||||
"cliCommands": {
|
||||
"suites": [
|
||||
@@ -296,6 +292,7 @@ config = {
|
||||
"ANTIVIRUS_CLAMAV_SOCKET": "tcp://clamav:3310",
|
||||
"OC_ASYNC_UPLOADS": True,
|
||||
"OC_ADD_RUN_SERVICES": "antivirus",
|
||||
"STORAGE_USERS_DRIVER": "decomposed",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -340,6 +337,20 @@ config = {
|
||||
},
|
||||
"dockerReleases": {
|
||||
"architectures": ["arm64", "amd64"],
|
||||
"production": {
|
||||
# NOTE: need to be updated if new production releases are determined
|
||||
"tags": ["2.0"],
|
||||
"repo": docker_repo_slug,
|
||||
"build_type": "production",
|
||||
},
|
||||
"rolling": {
|
||||
"repo": docker_repo_slug + "-rolling",
|
||||
"build_type": "rolling",
|
||||
},
|
||||
"daily": {
|
||||
"repo": docker_repo_slug + "-rolling",
|
||||
"build_type": "daily",
|
||||
},
|
||||
},
|
||||
"litmus": True,
|
||||
"codestyle": True,
|
||||
@@ -360,9 +371,7 @@ MINIO_MC_ENV = {
|
||||
"CACHE_BUCKET": {
|
||||
"from_secret": "cache_s3_bucket",
|
||||
},
|
||||
"MC_HOST": {
|
||||
"from_secret": "cache_s3_server",
|
||||
},
|
||||
"MC_HOST": CACHE_S3_SERVER,
|
||||
"AWS_ACCESS_KEY_ID": {
|
||||
"from_secret": "cache_s3_access_key",
|
||||
},
|
||||
@@ -421,8 +430,7 @@ def main(ctx):
|
||||
pipelines = []
|
||||
|
||||
build_release_helpers = \
|
||||
changelog() + \
|
||||
docs()
|
||||
readyReleaseGo()
|
||||
|
||||
build_release_helpers.append(
|
||||
pipelineDependsOn(
|
||||
@@ -661,9 +669,7 @@ def testOpencloud(ctx):
|
||||
"name": "scan-result-cache",
|
||||
"image": PLUGINS_S3,
|
||||
"settings": {
|
||||
"endpoint": {
|
||||
"from_secret": "cache_s3_server",
|
||||
},
|
||||
"endpoint": CACHE_S3_SERVER,
|
||||
"bucket": "cache",
|
||||
"source": "cache/**/*",
|
||||
"target": "%s/%s" % (repo_slug, ctx.build.commit + "-${CI_PIPELINE_NUMBER}"),
|
||||
@@ -684,7 +690,7 @@ def testOpencloud(ctx):
|
||||
"when": [
|
||||
{
|
||||
"event": ["push", "manual"],
|
||||
"branch": "main",
|
||||
"branch": ["main", "stable-*"],
|
||||
},
|
||||
{
|
||||
"event": "pull_request",
|
||||
@@ -715,7 +721,7 @@ def scanOpencloud(ctx):
|
||||
"when": [
|
||||
{
|
||||
"event": ["push", "manual"],
|
||||
"branch": "main",
|
||||
"branch": ["main", "stable-*"],
|
||||
},
|
||||
{
|
||||
"event": "pull_request",
|
||||
@@ -738,7 +744,7 @@ def buildOpencloudBinaryForTesting(ctx):
|
||||
"when": [
|
||||
{
|
||||
"event": ["push", "manual"],
|
||||
"branch": "main",
|
||||
"branch": ["main", "stable-*"],
|
||||
},
|
||||
{
|
||||
"event": "pull_request",
|
||||
@@ -873,7 +879,7 @@ def codestyle(ctx):
|
||||
"when": [
|
||||
{
|
||||
"event": ["push", "manual"],
|
||||
"branch": "main",
|
||||
"branch": ["main", "stable-*"],
|
||||
},
|
||||
{
|
||||
"event": "pull_request",
|
||||
@@ -943,7 +949,7 @@ def localApiTestPipeline(ctx):
|
||||
"when": [
|
||||
{
|
||||
"event": ["push", "manual"],
|
||||
"branch": "main",
|
||||
"branch": ["main", "stable-*"],
|
||||
},
|
||||
{
|
||||
"event": "pull_request",
|
||||
@@ -1009,7 +1015,7 @@ def cs3ApiTests(ctx, storage, accounts_hash_difficulty = 4):
|
||||
"when": [
|
||||
{
|
||||
"event": ["push", "manual"],
|
||||
"branch": "main",
|
||||
"branch": ["main", "stable-*"],
|
||||
},
|
||||
{
|
||||
"event": "pull_request",
|
||||
@@ -1126,7 +1132,7 @@ def wopiValidatorTests(ctx, storage, wopiServerType, accounts_hash_difficulty =
|
||||
"when": [
|
||||
{
|
||||
"event": ["push", "manual"],
|
||||
"branch": "main",
|
||||
"branch": ["main", "stable-*"],
|
||||
},
|
||||
{
|
||||
"event": "pull_request",
|
||||
@@ -1143,7 +1149,7 @@ def coreApiTests(ctx, part_number = 1, number_of_parts = 1, with_remote_php = Fa
|
||||
storage = "decomposed"
|
||||
filterTags = "~@skipOnGraph&&~@skipOnOpencloud-%s-Storage" % storage
|
||||
test_dir = "%s/tests/acceptance" % dirs["base"]
|
||||
expected_failures_file = "%s/expected-failures-API-on-decomposed-storage.md" % (test_dir)
|
||||
expected_failures_file = "%s/expected-failures-API-on-%s-storage.md" % (test_dir, storage)
|
||||
|
||||
return {
|
||||
"name": "Core-API-Tests-%s%s-%s" % (part_number, "-withoutRemotePhp" if not with_remote_php else "", storage),
|
||||
@@ -1180,7 +1186,7 @@ def coreApiTests(ctx, part_number = 1, number_of_parts = 1, with_remote_php = Fa
|
||||
"when": [
|
||||
{
|
||||
"event": ["push", "manual"],
|
||||
"branch": "main",
|
||||
"branch": ["main", "stable-*"],
|
||||
},
|
||||
{
|
||||
"event": "pull_request",
|
||||
@@ -1230,7 +1236,7 @@ def e2eTestPipeline(ctx):
|
||||
e2e_trigger = [
|
||||
{
|
||||
"event": ["push", "manual"],
|
||||
"branch": "main",
|
||||
"branch": ["main", "stable-*"],
|
||||
},
|
||||
{
|
||||
"event": "pull_request",
|
||||
@@ -1339,7 +1345,7 @@ def multiServiceE2ePipeline(ctx):
|
||||
e2e_trigger = [
|
||||
{
|
||||
"event": ["push", "manual"],
|
||||
"branch": "main",
|
||||
"branch": ["main", "stable-*"],
|
||||
},
|
||||
{
|
||||
"event": "pull_request",
|
||||
@@ -1465,9 +1471,7 @@ def uploadTracingResult(ctx):
|
||||
"bucket": {
|
||||
"from_secret": "cache_public_s3_bucket",
|
||||
},
|
||||
"endpoint": {
|
||||
"from_secret": "cache_public_s3_server",
|
||||
},
|
||||
"endpoint": CACHE_S3_SERVER,
|
||||
"path_style": True,
|
||||
"source": "webTestRunner/reports/e2e/playwright/tracing/**/*",
|
||||
"strip_prefix": "webTestRunner/reports/e2e/playwright/tracing",
|
||||
@@ -1515,26 +1519,31 @@ def logTracingResults():
|
||||
def dockerReleases(ctx):
|
||||
pipelines = []
|
||||
docker_repos = []
|
||||
build_type = "daily"
|
||||
build_type = ""
|
||||
|
||||
# dockerhub repo
|
||||
# - "opencloudeu/opencloud-rolling"
|
||||
repo = docker_repo_slug + "-rolling"
|
||||
docker_repos.append(repo)
|
||||
|
||||
# production release repo
|
||||
if ctx.build.event == "tag":
|
||||
tag = ctx.build.ref.replace("refs/tags/v", "").lower()
|
||||
for prod_tag in PRODUCTION_RELEASE_TAGS:
|
||||
|
||||
is_production = False
|
||||
for prod_tag in config["dockerReleases"]["production"]["tags"]:
|
||||
if tag.startswith(prod_tag):
|
||||
docker_repos.append(docker_repo_slug)
|
||||
is_production = True
|
||||
break
|
||||
|
||||
if is_production:
|
||||
docker_repos.append(config["dockerReleases"]["production"]["repo"])
|
||||
build_type = config["dockerReleases"]["production"]["build_type"]
|
||||
|
||||
else:
|
||||
docker_repos.append(config["dockerReleases"]["rolling"]["repo"])
|
||||
build_type = config["dockerReleases"]["rolling"]["build_type"]
|
||||
|
||||
else:
|
||||
docker_repos.append(config["dockerReleases"]["daily"]["repo"])
|
||||
build_type = config["dockerReleases"]["daily"]["build_type"]
|
||||
|
||||
for repo in docker_repos:
|
||||
repo_pipelines = []
|
||||
if ctx.build.event == "tag":
|
||||
build_type = "rolling" if "rolling" in repo else "production"
|
||||
|
||||
repo_pipelines.append(dockerRelease(ctx, repo, build_type))
|
||||
|
||||
# manifest = releaseDockerManifest(ctx, repo, build_type)
|
||||
@@ -1550,10 +1559,10 @@ def dockerReleases(ctx):
|
||||
return pipelines
|
||||
|
||||
def dockerRelease(ctx, repo, build_type):
|
||||
build_args = [
|
||||
"REVISION=%s" % (ctx.build.commit),
|
||||
"VERSION=%s" % (ctx.build.ref.replace("refs/tags/", "") if ctx.build.event == "tag" else "daily"),
|
||||
]
|
||||
build_args = {
|
||||
"REVISION": "%s" % (ctx.build.commit),
|
||||
"VERSION": "%s" % (ctx.build.ref.replace("refs/tags/", "") if ctx.build.event == "tag" else "daily"),
|
||||
}
|
||||
|
||||
depends_on = getPipelineNames(getGoBinForTesting(ctx))
|
||||
|
||||
@@ -1630,10 +1639,6 @@ def dockerRelease(ctx, repo, build_type):
|
||||
],
|
||||
},
|
||||
"when": [
|
||||
{
|
||||
"event": ["push", "manual"],
|
||||
"branch": "main",
|
||||
},
|
||||
{
|
||||
"event": "tag",
|
||||
},
|
||||
@@ -1644,7 +1649,7 @@ def dockerRelease(ctx, repo, build_type):
|
||||
"when": [
|
||||
{
|
||||
"event": ["push", "manual"],
|
||||
"branch": "main",
|
||||
"branch": ["main", "stable-*"],
|
||||
},
|
||||
{
|
||||
"event": "pull_request",
|
||||
@@ -1675,7 +1680,15 @@ def binaryRelease(ctx, arch, depends_on = []):
|
||||
{
|
||||
"name": "build",
|
||||
"image": OC_CI_GOLANG,
|
||||
"environment": CI_HTTP_PROXY_ENV,
|
||||
"environment": {
|
||||
"VERSION": (ctx.build.ref.replace("refs/tags/", "") if ctx.build.event == "tag" else "daily"),
|
||||
"HTTP_PROXY": {
|
||||
"from_secret": "ci_http_proxy",
|
||||
},
|
||||
"HTTPS_PROXY": {
|
||||
"from_secret": "ci_http_proxy",
|
||||
},
|
||||
},
|
||||
"commands": [
|
||||
"make -C opencloud release-%s" % arch,
|
||||
],
|
||||
@@ -1687,23 +1700,6 @@ def binaryRelease(ctx, arch, depends_on = []):
|
||||
"commands": [
|
||||
"make -C opencloud release-finish",
|
||||
],
|
||||
"when": [
|
||||
{
|
||||
"event": ["push", "manual"],
|
||||
"branch": "main",
|
||||
},
|
||||
{
|
||||
"event": "tag",
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
"name": "changelog",
|
||||
"image": OC_CI_GOLANG,
|
||||
"environment": CI_HTTP_PROXY_ENV,
|
||||
"commands": [
|
||||
"make changelog CHANGELOG_VERSION=%s" % ctx.build.ref.replace("refs/tags/v", ""),
|
||||
],
|
||||
"when": [
|
||||
{
|
||||
"event": "tag",
|
||||
@@ -1721,8 +1717,6 @@ def binaryRelease(ctx, arch, depends_on = []):
|
||||
"opencloud/dist/release/*",
|
||||
],
|
||||
"title": ctx.build.ref.replace("refs/tags/v", ""),
|
||||
"note": "opencloud/dist/CHANGELOG.md",
|
||||
"overwrite": True,
|
||||
"prerelease": len(ctx.build.ref.split("-")) > 1,
|
||||
},
|
||||
"when": [
|
||||
@@ -1736,7 +1730,7 @@ def binaryRelease(ctx, arch, depends_on = []):
|
||||
"when": [
|
||||
{
|
||||
"event": ["push", "manual"],
|
||||
"branch": "main",
|
||||
"branch": ["main", "stable-*"],
|
||||
},
|
||||
{
|
||||
"event": "pull_request",
|
||||
@@ -1791,19 +1785,6 @@ def licenseCheck(ctx):
|
||||
"cd third-party-licenses && tar -czf ../third-party-licenses.tar.gz *",
|
||||
],
|
||||
},
|
||||
{
|
||||
"name": "changelog",
|
||||
"image": OC_CI_GOLANG,
|
||||
"environment": CI_HTTP_PROXY_ENV,
|
||||
"commands": [
|
||||
"make changelog CHANGELOG_VERSION=%s" % ctx.build.ref.replace("refs/tags/v", "").split("-")[0],
|
||||
],
|
||||
"when": [
|
||||
{
|
||||
"event": "tag",
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
"name": "release",
|
||||
"image": PLUGINS_GITHUB_RELEASE,
|
||||
@@ -1815,8 +1796,6 @@ def licenseCheck(ctx):
|
||||
"third-party-licenses.tar.gz",
|
||||
],
|
||||
"title": ctx.build.ref.replace("refs/tags/v", ""),
|
||||
"note": "opencloud/dist/CHANGELOG.md",
|
||||
"overwrite": True,
|
||||
"prerelease": len(ctx.build.ref.split("-")) > 1,
|
||||
},
|
||||
"when": [
|
||||
@@ -1829,7 +1808,7 @@ def licenseCheck(ctx):
|
||||
"when": [
|
||||
{
|
||||
"event": ["push", "manual"],
|
||||
"branch": "main",
|
||||
"branch": ["main", "stable-*"],
|
||||
},
|
||||
{
|
||||
"event": "pull_request",
|
||||
@@ -1841,62 +1820,27 @@ def licenseCheck(ctx):
|
||||
"workspace": workspace,
|
||||
}
|
||||
|
||||
def changelog():
|
||||
def readyReleaseGo():
|
||||
return [{
|
||||
"name": "changelog",
|
||||
"name": "ready-release-go",
|
||||
"steps": [
|
||||
{
|
||||
"name": "generate",
|
||||
"image": OC_CI_GOLANG,
|
||||
"environment": CI_HTTP_PROXY_ENV,
|
||||
"commands": [
|
||||
"make -C opencloud changelog",
|
||||
],
|
||||
},
|
||||
{
|
||||
"name": "diff",
|
||||
"image": OC_CI_ALPINE,
|
||||
"commands": [
|
||||
"git diff",
|
||||
],
|
||||
},
|
||||
{
|
||||
"name": "output",
|
||||
"image": OC_CI_ALPINE,
|
||||
"commands": [
|
||||
"cat CHANGELOG.md",
|
||||
],
|
||||
},
|
||||
{
|
||||
"name": "publish",
|
||||
"image": PLUGINS_GIT_PUSH,
|
||||
"name": "release-helper",
|
||||
"image": READY_RELEASE_GO,
|
||||
"settings": {
|
||||
"branch": "main",
|
||||
"remote": "ssh://git@github.com/%s.git" % repo_slug,
|
||||
"commit": True,
|
||||
"ssh_key": {
|
||||
"from_secret": "ssh_key",
|
||||
"git_email": "devops@opencloud.eu",
|
||||
"forge_type": "github",
|
||||
"forge_token": {
|
||||
"from_secret": "github_token",
|
||||
},
|
||||
"commit_message": "Automated changelog update [skip ci]",
|
||||
"author_email": "devops@opencloud.eu",
|
||||
"author_name": "openclouders",
|
||||
"rebase": True,
|
||||
"release_branch": branch,
|
||||
},
|
||||
"when": [
|
||||
{
|
||||
"event": ["push", "manual"],
|
||||
"branch": "main",
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
"when": [
|
||||
{
|
||||
"event": ["push", "manual"],
|
||||
"branch": "main",
|
||||
},
|
||||
{
|
||||
"event": "pull_request",
|
||||
"branch": ["main", "stable-*"],
|
||||
},
|
||||
],
|
||||
}]
|
||||
@@ -1938,40 +1882,9 @@ def releaseDockerReadme(repo, build_type):
|
||||
"event": ["push", "manual"],
|
||||
"branch": "main",
|
||||
},
|
||||
{
|
||||
"event": "tag",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
def docs():
|
||||
return [
|
||||
{
|
||||
"name": "dev-docs",
|
||||
"steps": [
|
||||
{
|
||||
"name": "devdocs",
|
||||
"image": "codeberg.org/xfix/plugin-codeberg-pages-deploy:1",
|
||||
"settings": {
|
||||
"folder": "docs",
|
||||
"branch": "docs",
|
||||
"git_config_email": "${CI_COMMIT_AUTHOR_EMAIL}",
|
||||
"git_config_name": "${CI_COMMIT_AUTHOR}",
|
||||
"ssh_key": {
|
||||
"from_secret": "ssh_key",
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
"when": [
|
||||
{
|
||||
"event": "push",
|
||||
"branch": "main",
|
||||
},
|
||||
],
|
||||
},
|
||||
]
|
||||
|
||||
def makeNodeGenerate(module):
|
||||
if module == "":
|
||||
make = "make"
|
||||
@@ -2031,7 +1944,7 @@ def notify(ctx):
|
||||
"when": [
|
||||
{
|
||||
"event": ["push", "manual"],
|
||||
"branch": ["main", "release-*"],
|
||||
"branch": ["main", "stable-*", "release-*"],
|
||||
},
|
||||
{
|
||||
"event": "tag",
|
||||
@@ -2245,18 +2158,12 @@ def build():
|
||||
]
|
||||
|
||||
def skipIfUnchanged(ctx, type):
|
||||
## FIXME: the 'exclude' feature (https://woodpecker-ci.org/docs/usage/workflow-syntax#path) does not seem to provide
|
||||
# what we need. It seems to skip the build as soon as one of the changed files matches an exclude pattern, we only
|
||||
# want to skip of ALL changed files match. So skip this condition for now:
|
||||
return []
|
||||
|
||||
if "full-ci" in ctx.build.title.lower() or ctx.build.event == "tag" or ctx.build.event == "cron":
|
||||
return []
|
||||
|
||||
base = [
|
||||
".github/**",
|
||||
".vscode/**",
|
||||
"changelog/**",
|
||||
"docs/**",
|
||||
"deployments/**",
|
||||
"CHANGELOG.md",
|
||||
@@ -2412,9 +2319,7 @@ def genericCache(name, action, mounts, cache_path):
|
||||
"name": "%s_%s" % (action, name),
|
||||
"image": PLUGINS_S3_CACHE,
|
||||
"settings": {
|
||||
"endpoint": {
|
||||
"from_secret": "cache_s3_server",
|
||||
},
|
||||
"endpoint": CACHE_S3_SERVER,
|
||||
"rebuild": rebuild,
|
||||
"restore": restore,
|
||||
"mount": mounts,
|
||||
@@ -2445,9 +2350,7 @@ def genericCachePurge(flush_path):
|
||||
"secret_key": {
|
||||
"from_secret": "cache_s3_secret_key",
|
||||
},
|
||||
"endpoint": {
|
||||
"from_secret": "cache_s3_server",
|
||||
},
|
||||
"endpoint": CACHE_S3_SERVER,
|
||||
"flush": True,
|
||||
"flush_age": 1,
|
||||
"flush_path": flush_path,
|
||||
@@ -2457,7 +2360,7 @@ def genericCachePurge(flush_path):
|
||||
"when": [
|
||||
{
|
||||
"event": ["push", "manual"],
|
||||
"branch": "main",
|
||||
"branch": ["main", "stable-*"],
|
||||
},
|
||||
{
|
||||
"event": "pull_request",
|
||||
@@ -2643,7 +2546,7 @@ def litmus(ctx, storage):
|
||||
"when": [
|
||||
{
|
||||
"event": ["push", "manual"],
|
||||
"branch": "main",
|
||||
"branch": ["main", "stable-*"],
|
||||
},
|
||||
{
|
||||
"event": "pull_request",
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
---
|
||||
|
||||
when:
|
||||
- event: ["push", "manual"]
|
||||
branch: main
|
||||
|
||||
steps:
|
||||
- name: devdocs
|
||||
image: codeberg.org/xfix/plugin-codeberg-pages-deploy:1
|
||||
settings:
|
||||
folder: docs
|
||||
branch: docs
|
||||
git_config_email: ${CI_COMMIT_AUTHOR_EMAIL}
|
||||
git_config_name: ${CI_COMMIT_AUTHOR}
|
||||
ssh_key:
|
||||
from_secret: ssh_key
|
||||
125
CHANGELOG.md
125
CHANGELOG.md
@@ -1,2 +1,125 @@
|
||||
# Table of Contents
|
||||
# Changelog
|
||||
|
||||
## [2.0.5](https://github.com/opencloud-eu/opencloud/releases/tag/v2.0.5) - 2025-10-29
|
||||
|
||||
### ❤️ Thanks to all contributors! ❤️
|
||||
|
||||
@ScharfViktor
|
||||
|
||||
### ✅ Tests
|
||||
|
||||
- [full-ci] Bump reva 2.29.5 [[#1738](https://github.com/opencloud-eu/opencloud/pull/1738)]
|
||||
- Fix tests in the stable branch [[#1731](https://github.com/opencloud-eu/opencloud/pull/1731)]
|
||||
|
||||
## [2.0.4](https://github.com/opencloud-eu/opencloud/releases/tag/v2.0.4) - 2025-07-11
|
||||
|
||||
### ❤️ Thanks to all contributors! ❤️
|
||||
|
||||
@micbar
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- [Backport] fix: build_args is now an object [[#1213](https://github.com/opencloud-eu/opencloud/pull/1213)]
|
||||
|
||||
## [2.0.3](https://github.com/opencloud-eu/opencloud/releases/tag/v2.0.3) - 2025-07-10
|
||||
|
||||
### ❤️ Thanks to all contributors! ❤️
|
||||
|
||||
@ScharfViktor
|
||||
|
||||
### 📦️ Dependencies
|
||||
|
||||
- [full-ci] Reva bump 2.29.4 [[#1202](https://github.com/opencloud-eu/opencloud/pull/1202)]
|
||||
|
||||
## [2.0.2](https://github.com/opencloud-eu/opencloud/releases/tag/v2.0.2) - 2025-05-02
|
||||
|
||||
### ❤️ Thanks to all contributors! ❤️
|
||||
|
||||
@ScharfViktor
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- Abort when the space root has already been created [[#766](https://github.com/opencloud-eu/opencloud/pull/766)]
|
||||
|
||||
## [2.0.1](https://github.com/opencloud-eu/opencloud/releases/tag/v2.0.1) - 2025-04-28
|
||||
|
||||
### ❤️ Thanks to all contributors! ❤️
|
||||
|
||||
@JammingBen, @ScharfViktor, @fschade, @micbar
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- fix(decomposeds3): enable async-uploads by default (#686) [[#694](https://github.com/opencloud-eu/opencloud/pull/694)]
|
||||
- fix(antivirus | backport): introduce a default max scan size for the full example deployment [[#620](https://github.com/opencloud-eu/opencloud/pull/620)]
|
||||
- [full-ci] chore(web): bump web to v2.1.1 [[#638](https://github.com/opencloud-eu/opencloud/pull/638)]
|
||||
|
||||
### 📦️ Dependencies
|
||||
|
||||
- chore: prepare release, bump version [[#731](https://github.com/opencloud-eu/opencloud/pull/731)]
|
||||
- Port #567 [[#689](https://github.com/opencloud-eu/opencloud/pull/689)]
|
||||
- chore: bump reva to v2.29.2 [[#681](https://github.com/opencloud-eu/opencloud/pull/681)]
|
||||
- build(deps): bump github.com/nats-io/nats-server/v2 [[#683](https://github.com/opencloud-eu/opencloud/pull/683)]
|
||||
|
||||
## [2.0.0](https://github.com/opencloud-eu/opencloud/releases/tag/v2.0.0) - 2025-03-26
|
||||
|
||||
### ❤️ Thanks to all contributors! ❤️
|
||||
|
||||
@JammingBen, @ScharfViktor, @aduffeck, @amrita-shrestha, @butonic, @dragonchaser, @dragotin, @individual-it, @kulmann, @micbar, @prashant-gurung899, @rhafer
|
||||
|
||||
### 💥 Breaking changes
|
||||
|
||||
- [posix] change storage users default to posixfs [[#237](https://github.com/opencloud-eu/opencloud/pull/237)]
|
||||
|
||||
### 🐛 Bug Fixes
|
||||
|
||||
- Bump reva to 2.29.1 [[#501](https://github.com/opencloud-eu/opencloud/pull/501)]
|
||||
- remove workaround for translation formatting [[#491](https://github.com/opencloud-eu/opencloud/pull/491)]
|
||||
- [full-ci] fix(collaboration): hide SaveAs and ExportAs buttons in web office [[#471](https://github.com/opencloud-eu/opencloud/pull/471)]
|
||||
- fix: add missing debug docker [[#481](https://github.com/opencloud-eu/opencloud/pull/481)]
|
||||
- Downgrade nats.go to 1.39.1 [[#479](https://github.com/opencloud-eu/opencloud/pull/479)]
|
||||
- fix cli driver initialization for "posix" [[#459](https://github.com/opencloud-eu/opencloud/pull/459)]
|
||||
- Do not cache when there was an error gathering the data [[#462](https://github.com/opencloud-eu/opencloud/pull/462)]
|
||||
- fix(storage-users): 'uploads sessions' command crash [[#446](https://github.com/opencloud-eu/opencloud/pull/446)]
|
||||
- fix: org name in multiarch dev build [[#431](https://github.com/opencloud-eu/opencloud/pull/431)]
|
||||
- fix local setup [[#440](https://github.com/opencloud-eu/opencloud/pull/440)]
|
||||
|
||||
### 📈 Enhancement
|
||||
|
||||
- [full-ci] chore(web): update web to v2.1.0 [[#497](https://github.com/opencloud-eu/opencloud/pull/497)]
|
||||
- Bump reva [[#474](https://github.com/opencloud-eu/opencloud/pull/474)]
|
||||
- Bump reva to pull in the latest fixes [[#451](https://github.com/opencloud-eu/opencloud/pull/451)]
|
||||
- Switch to jsoncs3 backend for app tokens and enable service by default [[#433](https://github.com/opencloud-eu/opencloud/pull/433)]
|
||||
- Completely remove "edition" from capabilities [[#434](https://github.com/opencloud-eu/opencloud/pull/434)]
|
||||
- feat: add post logout redirect uris for mobile clients [[#411](https://github.com/opencloud-eu/opencloud/pull/411)]
|
||||
- chore: bump version to v1.1.0 [[#422](https://github.com/opencloud-eu/opencloud/pull/422)]
|
||||
|
||||
### ✅ Tests
|
||||
|
||||
- [full-ci] add one more TUS test to expected to fail file [[#489](https://github.com/opencloud-eu/opencloud/pull/489)]
|
||||
- [full-ci]Remove mtime 500 issue from expected failure [[#467](https://github.com/opencloud-eu/opencloud/pull/467)]
|
||||
- add auth app to ocm test setup [[#472](https://github.com/opencloud-eu/opencloud/pull/472)]
|
||||
- use opencloudeu/cs3api-validator in CI [[#469](https://github.com/opencloud-eu/opencloud/pull/469)]
|
||||
- fix(test): Run app-auth test with jsoncs3 backend [[#460](https://github.com/opencloud-eu/opencloud/pull/460)]
|
||||
- Always run CLI tests with the decomposed storage driver [[#435](https://github.com/opencloud-eu/opencloud/pull/435)]
|
||||
- Disable the 'exclude' patterns on the path conditional for now [[#439](https://github.com/opencloud-eu/opencloud/pull/439)]
|
||||
- run CS3 API tests in CI [[#415](https://github.com/opencloud-eu/opencloud/pull/415)]
|
||||
- fix: fix path exclusion glob patterns [[#427](https://github.com/opencloud-eu/opencloud/pull/427)]
|
||||
- Cleanup woodpecker [[#430](https://github.com/opencloud-eu/opencloud/pull/430)]
|
||||
- enable main API test suite to run in CI [[#419](https://github.com/opencloud-eu/opencloud/pull/419)]
|
||||
- Run wopi tests in CI [[#416](https://github.com/opencloud-eu/opencloud/pull/416)]
|
||||
- Run `cliCommands` tests pipeline in CI [[#413](https://github.com/opencloud-eu/opencloud/pull/413)]
|
||||
|
||||
### 📚 Documentation
|
||||
|
||||
- docs(idp): Document how to add custom OIDC clients [[#476](https://github.com/opencloud-eu/opencloud/pull/476)]
|
||||
- Clean invalid documentation links [[#466](https://github.com/opencloud-eu/opencloud/pull/466)]
|
||||
|
||||
### 📦️ Dependencies
|
||||
|
||||
- build(deps): bump github.com/grpc-ecosystem/grpc-gateway/v2 from 2.26.1 to 2.26.3 [[#480](https://github.com/opencloud-eu/opencloud/pull/480)]
|
||||
- chore: update alpine to 3.21 [[#483](https://github.com/opencloud-eu/opencloud/pull/483)]
|
||||
- build(deps): bump github.com/nats-io/nats.go from 1.39.1 to 1.40.0 [[#464](https://github.com/opencloud-eu/opencloud/pull/464)]
|
||||
- build(deps): bump github.com/spf13/afero from 1.12.0 to 1.14.0 [[#436](https://github.com/opencloud-eu/opencloud/pull/436)]
|
||||
- build(deps): bump github.com/KimMachineGun/automemlimit from 0.7.0 to 0.7.1 [[#437](https://github.com/opencloud-eu/opencloud/pull/437)]
|
||||
- build(deps): bump golang.org/x/image from 0.24.0 to 0.25.0 [[#426](https://github.com/opencloud-eu/opencloud/pull/426)]
|
||||
- build(deps): bump go.opentelemetry.io/contrib/zpages from 0.57.0 to 0.60.0 [[#425](https://github.com/opencloud-eu/opencloud/pull/425)]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||

|
||||
|
||||
[](https://ci.opencloud.eu/repos/3)
|
||||
[](https://ci.opencloud.eu/repos/3/branches/stable-2.0)
|
||||
[](https://app.element.io/#/room/#opencloud:matrix.org)
|
||||
[](https://opensource.org/licenses/Apache-2.0)
|
||||
|
||||
|
||||
@@ -37,7 +37,7 @@ function backup_file () {
|
||||
# URL pattern of the download file
|
||||
# https://github.com/opencloud-eu/opencloud/releases/download/v1.0.0/opencloud-1.0.0-linux-amd64
|
||||
|
||||
dlversion="${OC_VERSION:-1.0.0}"
|
||||
dlversion="${OC_VERSION:-1.1.0}"
|
||||
dlurl="https://github.com/opencloud-eu/opencloud/releases/download/v${dlversion}/"
|
||||
|
||||
sandbox="opencloud-sandbox-${dlversion}"
|
||||
|
||||
@@ -50,11 +50,11 @@ OC_DOMAIN=
|
||||
ADMIN_PASSWORD=
|
||||
# Demo users should not be created on a production instance,
|
||||
# because their passwords are public. Defaults to "false".
|
||||
# Also see: https://doc.opencloud.eu/opencloud/latest/deployment/general/general-info.html#demo-users-and-groups
|
||||
# If demo users is set to "true", the following user accounts are created automatically:
|
||||
# alan, mary, margaret, dennis and lynn - the password is 'demo' for all.
|
||||
DEMO_USERS=
|
||||
# Define the openCloud loglevel used.
|
||||
# For more details see:
|
||||
# https://doc.opencloud.eu/opencloud/latest/deployment/services/env-vars-special-scope.html
|
||||
#
|
||||
LOG_LEVEL=
|
||||
# Define the kind of logging.
|
||||
# The default log can be read by machines.
|
||||
@@ -64,8 +64,6 @@ LOG_LEVEL=
|
||||
# Define the openCloud storage location. Set the paths for config and data to a local path.
|
||||
# Note that especially the data directory can grow big.
|
||||
# Leaving it default stores data in docker internal volumes.
|
||||
# For more details see:
|
||||
# https://doc.opencloud.eu/opencloud/next/deployment/general/general-info.html#default-paths
|
||||
# OC_CONFIG_DIR=/your/local/opencloud/config
|
||||
# OC_DATA_DIR=/your/local/opencloud/data
|
||||
|
||||
@@ -74,7 +72,7 @@ LOG_LEVEL=
|
||||
# Per default, S3 storage is disabled and the decomposed storage driver is used.
|
||||
# To enable S3 storage, uncomment the following line and configure the S3 storage.
|
||||
# For more details see:
|
||||
# https://doc.opencloud.eu/opencloud/next/deployment/storage/decomposeds3.html
|
||||
# https://docs.opencloud.eu/docs/admin/configuration/storage-decomposeds3
|
||||
# Note: the leading colon is required to enable the service.
|
||||
#DECOMPOSEDS3=:decomposeds3.yml
|
||||
# Configure the S3 storage endpoint. Defaults to "http://minio:9000" for testing purposes.
|
||||
@@ -94,16 +92,14 @@ DECOMPOSEDS3_BUCKET=
|
||||
# Minio domain. Defaults to "minio.opencloud.test".
|
||||
MINIO_DOMAIN=
|
||||
|
||||
# POSIX Storage configuration - optional
|
||||
# OpenCloud supports posix storage as primary storage.
|
||||
# Per default, S3 storage is disabled and the decomposed storage driver is used.
|
||||
# To enable POSIX storage, uncomment the following line.
|
||||
# OpenCloud uses POSIX storage as the default primary storage.
|
||||
# By default, Decomposed storage is disabled, and the POSIX storage driver is used.
|
||||
# To enable Decomposed storage, uncomment the following line.
|
||||
# Note: the leading colon is required to enable the service.
|
||||
#POSIX=:posix.yml
|
||||
#DECOMPOSED=:decomposed.yml
|
||||
|
||||
# Define SMPT settings if you would like to send OpenCloud email notifications.
|
||||
# For more details see:
|
||||
# https://doc.opencloud.eu/opencloud/latest/deployment/services/s-list/notifications.html
|
||||
#
|
||||
# NOTE: when configuring Inbucket, these settings have no effect, see inbucket.yml for details.
|
||||
# SMTP host to connect to.
|
||||
SMTP_HOST=
|
||||
@@ -205,7 +201,6 @@ COLLABORA_SSL_VERIFICATION=false
|
||||
|
||||
|
||||
### Debugging - Monitoring ###
|
||||
# Please see documentation at: https://opencloud.dev/opencloud/deployment/monitoring-tracing/
|
||||
# Note: the leading colon is required to enable the service.
|
||||
#MONITORING=:monitoring_tracing/monitoring.yml
|
||||
|
||||
@@ -215,6 +210,10 @@ COLLABORA_SSL_VERIFICATION=false
|
||||
# envvar in the OpenCloud Settings above by adding 'antivirus' to the list.
|
||||
# Note: the leading colon is required to enable the service.
|
||||
#CLAMAV=:clamav.yml
|
||||
# The maximum scan size the virus scanner can handle, needs adjustment in the scanner config as well.
|
||||
# Usable common abbreviations: [KB, KiB, MB, MiB, GB, GiB, TB, TiB, PB, PiB, EB, EiB], example: 2GB.
|
||||
# Defaults to "100MB"
|
||||
#ANTIVIRUS_MAX_SCAN_SIZE=
|
||||
# Image version of the ClamAV container.
|
||||
# Defaults to "latest"
|
||||
CLAMAV_DOCKER_TAG=
|
||||
@@ -246,4 +245,4 @@ COMPOSE_PATH_SEPARATOR=:
|
||||
# This MUST be the last line as it assembles the supplemental compose files to be used.
|
||||
# ALL supplemental configs must be added here, whether commented or not.
|
||||
# Each var must either be empty or contain :path/file.yml
|
||||
COMPOSE_FILE=docker-compose.yml${OPENCLOUD:-}${TIKA:-}${DECOMPOSEDS3:-}${DECOMPOSEDS3_MINIO:-}${POSIX:-}${COLLABORA:-}${MONITORING:-}${IMPORTER:-}${CLAMAV:-}${ONLYOFFICE:-}${INBUCKET:-}${EXTENSIONS:-}${UNZIP:-}${DRAWIO:-}${JSONVIEWER:-}${PROGRESSBARS:-}${EXTERNALSITES:-}
|
||||
COMPOSE_FILE=docker-compose.yml${OPENCLOUD:-}${TIKA:-}${DECOMPOSEDS3:-}${DECOMPOSEDS3_MINIO:-}${DECOMPOSED:-}${COLLABORA:-}${MONITORING:-}${IMPORTER:-}${CLAMAV:-}${ONLYOFFICE:-}${INBUCKET:-}${EXTENSIONS:-}${UNZIP:-}${DRAWIO:-}${JSONVIEWER:-}${PROGRESSBARS:-}${EXTERNALSITES:-}
|
||||
|
||||
@@ -4,6 +4,7 @@ services:
|
||||
environment:
|
||||
ANTIVIRUS_SCANNER_TYPE: "clamav"
|
||||
ANTIVIRUS_CLAMAV_SOCKET: "/var/run/clamav/clamd.sock"
|
||||
ANTIVIRUS_MAX_SCAN_SIZE: ${ANTIVIRUS_MAX_SCAN_SIZE:-100MB}
|
||||
# the antivirus service needs manual startup, see .env and opencloud.yaml for START_ADDITIONAL_SERVICES
|
||||
# configure the antivirus service
|
||||
POSTPROCESSING_STEPS: "virusscan"
|
||||
|
||||
@@ -53,7 +53,7 @@ services:
|
||||
restart: always
|
||||
|
||||
collabora:
|
||||
image: collabora/code:24.04.12.2.1
|
||||
image: collabora/code:24.04.13.2.1
|
||||
# release notes: https://www.collaboraonline.com/release-notes/
|
||||
networks:
|
||||
opencloud-net:
|
||||
@@ -80,6 +80,7 @@ services:
|
||||
logging:
|
||||
driver: ${LOG_DRIVER:-local}
|
||||
restart: always
|
||||
command: ["bash", "-c", "coolconfig generate-proof-key ; /start-collabora-online.sh"]
|
||||
entrypoint: ['/bin/bash', '-c']
|
||||
command: ['coolconfig generate-proof-key && /start-collabora-online.sh']
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "-f", "http://localhost:9980/hosting/discovery" ]
|
||||
|
||||
6
deployments/examples/opencloud_full/decomposed.yml
Normal file
6
deployments/examples/opencloud_full/decomposed.yml
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
services:
|
||||
opencloud:
|
||||
environment:
|
||||
STORAGE_USERS_DRIVER: decomposed
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
---
|
||||
services:
|
||||
opencloud:
|
||||
environment:
|
||||
# activate posix storage driver for users
|
||||
STORAGE_USERS_DRIVER: posix
|
||||
# keep system data on decomposed storage since this are only small files atm
|
||||
STORAGE_SYSTEM_DRIVER: decomposed
|
||||
# posix requires a shared cache store
|
||||
STORAGE_USERS_ID_CACHE_STORE: "nats-js-kv"
|
||||
42
go.mod
42
go.mod
@@ -5,7 +5,7 @@ go 1.24.1
|
||||
require (
|
||||
dario.cat/mergo v1.0.1
|
||||
github.com/CiscoM31/godata v1.0.10
|
||||
github.com/KimMachineGun/automemlimit v0.7.0
|
||||
github.com/KimMachineGun/automemlimit v0.7.1
|
||||
github.com/Masterminds/semver v1.5.0
|
||||
github.com/MicahParks/keyfunc/v2 v2.1.0
|
||||
github.com/Nerzal/gocloak/v13 v13.9.0
|
||||
@@ -35,14 +35,14 @@ require (
|
||||
github.com/go-micro/plugins/v4/wrapper/trace/opentelemetry v1.2.0
|
||||
github.com/go-playground/validator/v10 v10.25.0
|
||||
github.com/gofrs/uuid v4.4.0+incompatible
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2
|
||||
github.com/golang/protobuf v1.5.4
|
||||
github.com/google/go-cmp v0.7.0
|
||||
github.com/google/go-tika v0.3.1
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/gookit/config/v2 v2.2.5
|
||||
github.com/gorilla/mux v1.8.1
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3
|
||||
github.com/invopop/validation v0.8.0
|
||||
github.com/jellydator/ttlcache/v2 v2.11.1
|
||||
github.com/jellydator/ttlcache/v3 v3.3.0
|
||||
@@ -55,15 +55,15 @@ require (
|
||||
github.com/mitchellh/mapstructure v1.5.0
|
||||
github.com/mna/pigeon v1.3.0
|
||||
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826
|
||||
github.com/nats-io/nats-server/v2 v2.10.26
|
||||
github.com/nats-io/nats.go v1.39.1
|
||||
github.com/nats-io/nats-server/v2 v2.11.1
|
||||
github.com/nats-io/nats.go v1.41.0
|
||||
github.com/oklog/run v1.1.0
|
||||
github.com/olekukonko/tablewriter v0.0.5
|
||||
github.com/onsi/ginkgo v1.16.5
|
||||
github.com/onsi/ginkgo/v2 v2.23.0
|
||||
github.com/onsi/gomega v1.36.2
|
||||
github.com/onsi/ginkgo/v2 v2.23.3
|
||||
github.com/onsi/gomega v1.36.3
|
||||
github.com/open-policy-agent/opa v1.2.0
|
||||
github.com/opencloud-eu/reva/v2 v2.28.1-0.20250318145617-dd5b9b6fb606
|
||||
github.com/opencloud-eu/reva/v2 v2.29.5
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/owncloud/libre-graph-api-go v1.0.5-0.20240829135935-80dc00d6f5ea
|
||||
github.com/pkg/errors v0.9.1
|
||||
@@ -73,10 +73,10 @@ require (
|
||||
github.com/riandyrn/otelchi v0.12.1
|
||||
github.com/rogpeppe/go-internal v1.14.1
|
||||
github.com/rs/cors v1.11.1
|
||||
github.com/rs/zerolog v1.33.0
|
||||
github.com/rs/zerolog v1.34.0
|
||||
github.com/shamaton/msgpack/v2 v2.2.3
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/afero v1.12.0
|
||||
github.com/spf13/afero v1.14.0
|
||||
github.com/spf13/cobra v1.9.1
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/test-go/testify v1.1.4
|
||||
@@ -104,7 +104,7 @@ require (
|
||||
golang.org/x/sync v0.12.0
|
||||
golang.org/x/term v0.30.0
|
||||
golang.org/x/text v0.23.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb
|
||||
google.golang.org/grpc v1.71.0
|
||||
google.golang.org/protobuf v1.36.5
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
@@ -116,7 +116,7 @@ require (
|
||||
contrib.go.opencensus.io/exporter/prometheus v0.4.2 // indirect
|
||||
filippo.io/edwards25519 v1.1.0 // indirect
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
|
||||
github.com/BurntSushi/toml v1.4.0 // indirect
|
||||
github.com/BurntSushi/toml v1.5.0 // indirect
|
||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||
github.com/Masterminds/sprig v2.22.0+incompatible // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
@@ -197,7 +197,7 @@ require (
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/go-redis/redis/v8 v8.11.5 // indirect
|
||||
github.com/go-resty/resty/v2 v2.7.0 // indirect
|
||||
github.com/go-sql-driver/mysql v1.9.0 // indirect
|
||||
github.com/go-sql-driver/mysql v1.9.1 // indirect
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||
github.com/go-test/deep v1.1.0 // indirect
|
||||
@@ -209,12 +209,13 @@ require (
|
||||
github.com/goccy/go-yaml v1.11.2 // indirect
|
||||
github.com/gofrs/flock v0.12.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.5.1 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.5.2 // indirect
|
||||
github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/gomodule/redigo v1.9.2 // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/go-tpm v0.9.3 // indirect
|
||||
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect
|
||||
github.com/google/renameio/v2 v2.0.0 // indirect
|
||||
github.com/gookit/color v1.5.4 // indirect
|
||||
@@ -253,7 +254,7 @@ require (
|
||||
github.com/minio/crc64nvme v1.0.1 // indirect
|
||||
github.com/minio/highwayhash v1.0.3 // indirect
|
||||
github.com/minio/md5-simd v1.1.2 // indirect
|
||||
github.com/minio/minio-go/v7 v7.0.87 // indirect
|
||||
github.com/minio/minio-go/v7 v7.0.88 // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
@@ -287,6 +288,7 @@ require (
|
||||
github.com/segmentio/ksuid v1.0.4 // indirect
|
||||
github.com/sercand/kuberesolver/v5 v5.1.1 // indirect
|
||||
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect
|
||||
github.com/sethvargo/go-diceware v0.5.0 // indirect
|
||||
github.com/sethvargo/go-password v0.3.1 // indirect
|
||||
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c // indirect
|
||||
github.com/shurcooL/vfsgen v0.0.0-20230704071429-0000e147ea92 // indirect
|
||||
@@ -307,9 +309,9 @@ require (
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
|
||||
github.com/yashtewari/glob-intersection v0.2.0 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.19 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.19 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.19 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.20 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.20 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.20 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 // indirect
|
||||
@@ -320,11 +322,11 @@ require (
|
||||
go.uber.org/zap v1.23.0 // indirect
|
||||
golang.org/x/mod v0.24.0 // indirect
|
||||
golang.org/x/sys v0.31.0 // indirect
|
||||
golang.org/x/time v0.10.0 // indirect
|
||||
golang.org/x/time v0.11.0 // indirect
|
||||
golang.org/x/tools v0.31.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
||||
google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250219182151-9fdb1cabc7b2 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb // indirect
|
||||
gopkg.in/cenkalti/backoff.v1 v1.1.0 // indirect
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||
gopkg.in/warnings.v0 v0.1.2 // indirect
|
||||
|
||||
87
go.sum
87
go.sum
@@ -61,15 +61,15 @@ github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbt
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0=
|
||||
github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
|
||||
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/CiscoM31/godata v1.0.10 h1:DZdJ6M8QNh4HquvDDOqNLu6h77Wl86KGK7Qlbmb90sk=
|
||||
github.com/CiscoM31/godata v1.0.10/go.mod h1:ZMiT6JuD3Rm83HEtiTx4JEChsd25YCrxchKGag/sdTc=
|
||||
github.com/DeepDiver1975/secure v0.0.0-20240611112133-abc838fb797c h1:ocsNvQ2tNHme4v/lTs17HROamc7mFzZfzWcg4m+UXN0=
|
||||
github.com/DeepDiver1975/secure v0.0.0-20240611112133-abc838fb797c/go.mod h1:BmF5hyM6tXczk3MpQkFf1hpKSRqCyhqcbiQtiAF7+40=
|
||||
github.com/KimMachineGun/automemlimit v0.7.0 h1:7G06p/dMSf7G8E6oq+f2uOPuVncFyIlDI/pBWK49u88=
|
||||
github.com/KimMachineGun/automemlimit v0.7.0/go.mod h1:QZxpHaGOQoYvFhv/r4u3U0JTC2ZcOwbSr11UZF46UBM=
|
||||
github.com/KimMachineGun/automemlimit v0.7.1 h1:QcG/0iCOLChjfUweIMC3YL5Xy9C3VBeNmCZHrZfJMBw=
|
||||
github.com/KimMachineGun/automemlimit v0.7.1/go.mod h1:QZxpHaGOQoYvFhv/r4u3U0JTC2ZcOwbSr11UZF46UBM=
|
||||
github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
|
||||
github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
|
||||
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
|
||||
@@ -111,6 +111,8 @@ github.com/amoghe/go-crypt v0.0.0-20220222110647-20eada5f5964 h1:I9YN9WMo3SUh7p/
|
||||
github.com/amoghe/go-crypt v0.0.0-20220222110647-20eada5f5964/go.mod h1:eFiR01PwTcpbzXtdMces7zxg6utvFM5puiWHpWB8D/k=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
|
||||
github.com/antithesishq/antithesis-sdk-go v0.4.3-default-no-op h1:+OSa/t11TFhqfrX0EOSqQBDJ0YlpmK0rDSiB19dg9M0=
|
||||
github.com/antithesishq/antithesis-sdk-go v0.4.3-default-no-op/go.mod h1:IUpT2DPAKh6i/YhSbt6Gl3v2yvUZjmKncl7U91fup7E=
|
||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q=
|
||||
github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE=
|
||||
@@ -416,8 +418,8 @@ github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq
|
||||
github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48/go.mod h1:dZGr0i9PLlaaTD4H/hoZIDjQ+r6xq8mgbRzHZf7f2J8=
|
||||
github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY=
|
||||
github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I=
|
||||
github.com/go-sql-driver/mysql v1.9.0 h1:Y0zIbQXhQKmQgTp44Y1dp3wTXcn804QoTptLZT1vtvo=
|
||||
github.com/go-sql-driver/mysql v1.9.0/go.mod h1:pDetrLJeA3oMujJuvXc8RJoasr589B6A9fwzD3QMrqw=
|
||||
github.com/go-sql-driver/mysql v1.9.1 h1:FrjNGn/BsJQjVRuSa8CBrM5BWA9BWoXXat3KrtSb/iI=
|
||||
github.com/go-sql-driver/mysql v1.9.1/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
@@ -453,10 +455,10 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 h1:gtexQ/VGyN+VVFRXSFiguSNcXmS6rkKT+X7FdIrTtfo=
|
||||
github.com/golang/geo v0.0.0-20210211234256-740aa86cb551/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
@@ -526,6 +528,8 @@ github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD
|
||||
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
|
||||
github.com/google/go-tika v0.3.1 h1:l+jr10hDhZjcgxFRfcQChRLo1bPXQeLFluMyvDhXTTA=
|
||||
github.com/google/go-tika v0.3.1/go.mod h1:DJh5N8qxXIl85QkqmXknd+PeeRkUOTbvwyYf7ieDz6c=
|
||||
github.com/google/go-tpm v0.9.3 h1:+yx0/anQuGzi+ssRqeD6WpXjW2L/V0dItUayO0i9sRc=
|
||||
github.com/google/go-tpm v0.9.3/go.mod h1:h9jEsEECg7gtLis0upRBQU+GhYVH6jMjrFxI8u6bVUY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
@@ -577,8 +581,8 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vb
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 h1:e9Rjr40Z98/clHv5Yg79Is0NtosR5LXRvdr7o/6NwbA=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1/go.mod h1:tIxuGz/9mpox++sgp9fJjHO0+q1X9/UOWd798aAm22M=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
|
||||
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI=
|
||||
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
|
||||
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
|
||||
@@ -785,8 +789,8 @@ github.com/minio/highwayhash v1.0.3 h1:kbnuUMoHYyVl7szWjSxJnxw11k2U709jqFPPmIUyD
|
||||
github.com/minio/highwayhash v1.0.3/go.mod h1:GGYsuwP/fPD6Y9hMiXuapVvlIUEhFhMTh0rxU3ik1LQ=
|
||||
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
||||
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
|
||||
github.com/minio/minio-go/v7 v7.0.87 h1:nkr9x0u53PespfxfUqxP3UYWiE2a41gaofgNnC4Y8WQ=
|
||||
github.com/minio/minio-go/v7 v7.0.87/go.mod h1:33+O8h0tO7pCeCWwBVa07RhVVfB/3vS4kEX7rwYKmIg=
|
||||
github.com/minio/minio-go/v7 v7.0.88 h1:v8MoIJjwYxOkehp+eiLIuvXk87P2raUtoU5klrAAshs=
|
||||
github.com/minio/minio-go/v7 v7.0.88/go.mod h1:33+O8h0tO7pCeCWwBVa07RhVVfB/3vS4kEX7rwYKmIg=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
|
||||
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
|
||||
@@ -823,10 +827,10 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW
|
||||
github.com/namedotcom/go v0.0.0-20180403034216-08470befbe04/go.mod h1:5sN+Lt1CaY4wsPvgQH/jsuJi4XO2ssZbdsIizr4CVC8=
|
||||
github.com/nats-io/jwt/v2 v2.7.3 h1:6bNPK+FXgBeAqdj4cYQ0F8ViHRbi7woQLq4W29nUAzE=
|
||||
github.com/nats-io/jwt/v2 v2.7.3/go.mod h1:GvkcbHhKquj3pkioy5put1wvPxs78UlZ7D/pY+BgZk4=
|
||||
github.com/nats-io/nats-server/v2 v2.10.26 h1:2i3rAsn4x5/2eOt2NEmuI/iSb8zfHpIUI7yiaOWbo2c=
|
||||
github.com/nats-io/nats-server/v2 v2.10.26/go.mod h1:SGzoWGU8wUVnMr/HJhEMv4R8U4f7hF4zDygmRxpNsvg=
|
||||
github.com/nats-io/nats.go v1.39.1 h1:oTkfKBmz7W047vRxV762M67ZdXeOtUgvbBaNoQ+3PPk=
|
||||
github.com/nats-io/nats.go v1.39.1/go.mod h1:MgRb8oOdigA6cYpEPhXJuRVH6UE/V4jblJ2jQ27IXYM=
|
||||
github.com/nats-io/nats-server/v2 v2.11.1 h1:LwdauqMqMNhTxTN3+WFTX6wGDOKntHljgZ+7gL5HCnk=
|
||||
github.com/nats-io/nats-server/v2 v2.11.1/go.mod h1:leXySghbdtXSUmWem8K9McnJ6xbJOb0t9+NQ5HTRZjI=
|
||||
github.com/nats-io/nats.go v1.41.0 h1:PzxEva7fflkd+n87OtQTXqCTyLfIIMFJBpyccHLE2Ko=
|
||||
github.com/nats-io/nats.go v1.41.0/go.mod h1:wV73x0FSI/orHPSYoyMeJB+KajMDoWyXmFaRrrYaaTo=
|
||||
github.com/nats-io/nkeys v0.4.10 h1:glmRrpCmYLHByYcePvnTBEAwawwapjCPMjy2huw20wc=
|
||||
github.com/nats-io/nkeys v0.4.10/go.mod h1:OjRrnIKnWBFl+s4YK5ChQfvHP2fxqZexrKJoVVyWB3U=
|
||||
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
||||
@@ -852,17 +856,17 @@ github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||
github.com/onsi/ginkgo/v2 v2.23.0 h1:FA1xjp8ieYDzlgS5ABTpdUDB7wtngggONc8a7ku2NqQ=
|
||||
github.com/onsi/ginkgo/v2 v2.23.0/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM=
|
||||
github.com/onsi/ginkgo/v2 v2.23.3 h1:edHxnszytJ4lD9D5Jjc4tiDkPBZ3siDeJJkUZJJVkp0=
|
||||
github.com/onsi/ginkgo/v2 v2.23.3/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
|
||||
github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
|
||||
github.com/onsi/gomega v1.36.3 h1:hID7cr8t3Wp26+cYnfcjR6HpJ00fdogN6dqZ1t6IylU=
|
||||
github.com/onsi/gomega v1.36.3/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0=
|
||||
github.com/open-policy-agent/opa v1.2.0 h1:88NDVCM0of1eO6Z4AFeL3utTEtMuwloFmWWU7dRV1z0=
|
||||
github.com/open-policy-agent/opa v1.2.0/go.mod h1:30euUmOvuBoebRCcJ7DMF42bRBOPznvt0ACUMYDUGVY=
|
||||
github.com/opencloud-eu/reva/v2 v2.28.1-0.20250318145617-dd5b9b6fb606 h1:ASUV6F7hHgar1RrnPfTQhtd+/KMeTCn7LhLzda0+HKY=
|
||||
github.com/opencloud-eu/reva/v2 v2.28.1-0.20250318145617-dd5b9b6fb606/go.mod h1:XWp81Uok1opSID0HeITjvxJqdorltHVx+iJv4IlWzPo=
|
||||
github.com/opencloud-eu/reva/v2 v2.29.5 h1:T4RjTSDk650PVn0hAL8HpF+61ChqQ/UwNoWMYYAMOGU=
|
||||
github.com/opencloud-eu/reva/v2 v2.29.5/go.mod h1:+nkCU7w6E6cyNSsKRYj1rb0cCI7QswEQ7KOPljctebM=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
|
||||
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
|
||||
@@ -978,11 +982,10 @@ github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0t
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA=
|
||||
github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
|
||||
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||
github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU=
|
||||
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
|
||||
github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8=
|
||||
github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
|
||||
github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY=
|
||||
github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ=
|
||||
github.com/russellhaering/goxmldsig v1.4.0 h1:8UcDh/xGyQiyrW+Fq5t8f+l2DLB1+zlhYzkPUJ7Qhys=
|
||||
github.com/russellhaering/goxmldsig v1.4.0/go.mod h1:gM4MDENBQf7M+V824SGfyIUVFWydB7n0KkEubVJl+Tw=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
@@ -1000,6 +1003,8 @@ github.com/sercand/kuberesolver/v5 v5.1.1 h1:CYH+d67G0sGBj7q5wLK61yzqJJ8gLLC8aep
|
||||
github.com/sercand/kuberesolver/v5 v5.1.1/go.mod h1:Fs1KbKhVRnB2aDWN12NjKCB+RgYMWZJ294T3BtmVCpQ=
|
||||
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8=
|
||||
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
|
||||
github.com/sethvargo/go-diceware v0.5.0 h1:exrQ7GpaBo00GqRVM1N8ChXSsi3oS7tjQiIehsD+yR0=
|
||||
github.com/sethvargo/go-diceware v0.5.0/go.mod h1:Lg1SyPS7yQO6BBgTN5r4f2MUDkqGfLWsOjHPY0kA8iw=
|
||||
github.com/sethvargo/go-password v0.3.1 h1:WqrLTjo7X6AcVYfC6R7GtSyuUQR9hGyAj/f1PYQZCJU=
|
||||
github.com/sethvargo/go-password v0.3.1/go.mod h1:rXofC1zT54N7R8K/h1WDUdkf9BOx5OptoxrMBcrXzvs=
|
||||
github.com/shamaton/msgpack/v2 v2.2.3 h1:uDOHmxQySlvlUYfQwdjxyybAOzjlQsD1Vjy+4jmO9NM=
|
||||
@@ -1032,8 +1037,8 @@ github.com/spacewander/go-suffix-tree v0.0.0-20191010040751-0865e368c784/go.mod
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/afero v1.4.1/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
|
||||
github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs=
|
||||
github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4=
|
||||
github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA=
|
||||
github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
|
||||
@@ -1140,12 +1145,12 @@ github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQ
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.4.0 h1:TU77id3TnN/zKr7CO/uk+fBCwF2jGcMuw2B/FMAzYIk=
|
||||
go.etcd.io/bbolt v1.4.0/go.mod h1:AsD+OCi/qPN1giOX1aiLAha3o1U8rAz65bvN4j0sRuk=
|
||||
go.etcd.io/etcd/api/v3 v3.5.19 h1:w3L6sQZGsWPuBxRQ4m6pPP3bVUtV8rjW033EGwlr0jw=
|
||||
go.etcd.io/etcd/api/v3 v3.5.19/go.mod h1:QqKGViq4KTgOG43dr/uH0vmGWIaoJY3ggFi6ZH0TH/U=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.19 h1:9VsyGhg0WQGjDWWlDI4VuaS9PZJGNbPkaHEIuLwtixk=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.19/go.mod h1:qaOi1k4ZA9lVLejXNvyPABrVEe7VymMF2433yyRQ7O0=
|
||||
go.etcd.io/etcd/client/v3 v3.5.19 h1:+4byIz6ti3QC28W0zB0cEZWwhpVHXdrKovyycJh1KNo=
|
||||
go.etcd.io/etcd/client/v3 v3.5.19/go.mod h1:FNzyinmMIl0oVsty1zA3hFeUrxXI/JpEnz4sG+POzjU=
|
||||
go.etcd.io/etcd/api/v3 v3.5.20 h1:aKfz3nPZECWoZJXMSH9y6h2adXjtOHaHTGEVCuCmaz0=
|
||||
go.etcd.io/etcd/api/v3 v3.5.20/go.mod h1:QqKGViq4KTgOG43dr/uH0vmGWIaoJY3ggFi6ZH0TH/U=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.20 h1:sZIAtra+xCo56gdf6BR62to/hiie5Bwl7hQIqMzVTEM=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.20/go.mod h1:qaOi1k4ZA9lVLejXNvyPABrVEe7VymMF2433yyRQ7O0=
|
||||
go.etcd.io/etcd/client/v3 v3.5.20 h1:jMT2MwQEhyvhQg49Cec+1ZHJzfUf6ZgcmV0GjPv0tIQ=
|
||||
go.etcd.io/etcd/client/v3 v3.5.20/go.mod h1:J5lbzYRMUR20YolS5UjlqqMcu3/wdEvG5VNBhzyo3m0=
|
||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
@@ -1475,8 +1480,8 @@ golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxb
|
||||
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.10.0 h1:3usCWA8tQn0L8+hFJQNgzpWbd89begxN66o1Ojdn5L4=
|
||||
golang.org/x/time v0.10.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
|
||||
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
@@ -1594,10 +1599,10 @@ google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6D
|
||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 h1:ToEetK57OidYuqD4Q5w+vfEnPvPpuTwedCNVohYJfNk=
|
||||
google.golang.org/genproto v0.0.0-20241118233622-e639e219e697/go.mod h1:JJrvXBWRZaFMxBufik1a4RpFw4HhgVtBBWQeQgUj2cc=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a h1:nwKuGPlUAt+aR+pcrkfFRrTU1BVrSmYyYMxYbUIVHr0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a/go.mod h1:3kWAYMk1I75K4vykHtKt2ycnOgpA6974V7bREqbsenU=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250219182151-9fdb1cabc7b2 h1:DMTIbak9GhdaSxEjvVzAeNZvyc03I61duqNbnm3SU0M=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250219182151-9fdb1cabc7b2/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb h1:TLPQVbx1GJ8VKZxz52VAxl1EBgKXXbTiU9Fc5fZeLn4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
|
||||
@@ -30,10 +30,10 @@ dev-docker-multiarch:
|
||||
docker buildx rm opencloudbuilder || true
|
||||
docker buildx create --platform linux/arm64,linux/amd64 --name opencloudbuilder
|
||||
docker buildx use opencloudbuilder
|
||||
cd .. && docker buildx build --platform linux/arm64,linux/amd64 --output type=docker --file opencloud/docker/Dockerfile.multiarch --tag opencloud-eu/opencloud:dev-multiarch .
|
||||
cd .. && docker buildx build --platform linux/arm64,linux/amd64 --output type=docker --file opencloud/docker/Dockerfile.multiarch --tag opencloudeu/opencloud:dev-multiarch .
|
||||
docker buildx rm opencloudbuilder
|
||||
|
||||
.PHONY: debug-docker
|
||||
debug-docker:
|
||||
$(MAKE) --no-print-directory debug-linux-docker-$(GOARCH)
|
||||
docker build -f docker/Dockerfile.linux.debug.$(GOARCH) -t opencloud-eu/opencloud:debug .
|
||||
docker build -f docker/Dockerfile.linux.debug.$(GOARCH) -t opencloudeu/opencloud:debug .
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM amd64/alpine:3.20
|
||||
FROM amd64/alpine:3.21
|
||||
|
||||
ARG VERSION=""
|
||||
ARG REVISION=""
|
||||
@@ -22,6 +22,8 @@ RUN addgroup -g 1000 -S opencloud-group && \
|
||||
adduser -S --ingroup opencloud-group --uid 1000 opencloud-user --home /var/lib/opencloud
|
||||
|
||||
RUN mkdir -p /var/lib/opencloud && \
|
||||
# Pre-create the web directory to avoid permission issues
|
||||
mkdir -p /var/lib/opencloud/web/assets/apps && \
|
||||
chown -R opencloud-user:opencloud-group /var/lib/opencloud && \
|
||||
chmod -R 751 /var/lib/opencloud && \
|
||||
mkdir -p /etc/opencloud && \
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM arm64v8/alpine:3.20
|
||||
FROM arm64v8/alpine:3.21
|
||||
|
||||
ARG VERSION=""
|
||||
ARG REVISION=""
|
||||
@@ -22,6 +22,8 @@ RUN addgroup -g 1000 -S opencloud-group && \
|
||||
adduser -S --ingroup opencloud-group --uid 1000 opencloud-user --home /var/lib/opencloud
|
||||
|
||||
RUN mkdir -p /var/lib/opencloud && \
|
||||
# Pre-create the web directory to avoid permission issues
|
||||
mkdir -p /var/lib/opencloud/web/assets/apps && \
|
||||
chown -R opencloud-user:opencloud-group /var/lib/opencloud && \
|
||||
chmod -R 751 /var/lib/opencloud && \
|
||||
mkdir -p /etc/opencloud && \
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM amd64/alpine:latest
|
||||
FROM amd64/alpine:edge
|
||||
|
||||
ARG VERSION=""
|
||||
ARG REVISION=""
|
||||
@@ -22,6 +22,8 @@ RUN addgroup -g 1000 -S opencloud-group && \
|
||||
adduser -S --ingroup opencloud-group --uid 1000 opencloud-user --home /var/lib/opencloud
|
||||
|
||||
RUN mkdir -p /var/lib/opencloud && \
|
||||
# Pre-create the web directory to avoid permission issues
|
||||
mkdir -p /var/lib/opencloud/web/assets/apps && \
|
||||
chown -R opencloud-user:opencloud-group /var/lib/opencloud && \
|
||||
chmod -R 751 /var/lib/opencloud && \
|
||||
mkdir -p /etc/opencloud && \
|
||||
|
||||
43
opencloud/docker/Dockerfile.linux.debug.arm64
Normal file
43
opencloud/docker/Dockerfile.linux.debug.arm64
Normal file
@@ -0,0 +1,43 @@
|
||||
FROM arm64v8/alpine:edge
|
||||
|
||||
ARG VERSION=""
|
||||
ARG REVISION=""
|
||||
|
||||
RUN apk add --no-cache attr bash ca-certificates curl delve inotify-tools libc6-compat mailcap tree vips patch && \
|
||||
echo 'hosts: files dns' >| /etc/nsswitch.conf
|
||||
|
||||
LABEL maintainer="OpenCloud GmbH <devops@opencloud.eu>" \
|
||||
org.opencontainers.image.title="OpenCloud" \
|
||||
org.opencontainers.image.vendor="OpenCloud GmbH" \
|
||||
org.opencontainers.image.authors="OpenCloud GmbH" \
|
||||
org.opencontainers.image.description="OpenCloud is a modern file-sync and share platform" \
|
||||
org.opencontainers.image.licenses="Apache-2.0" \
|
||||
org.opencontainers.image.documentation="https://github.com/opencloud-eu/opencloud" \
|
||||
org.opencontainers.image.url="https://hub.docker.com/r/opencloud-eu/opencloud" \
|
||||
org.opencontainers.image.source="https://github.com/opencloud-eu/opencloud" \
|
||||
org.opencontainers.image.version="${VERSION}" \
|
||||
org.opencontainers.image.revision="${REVISION}"
|
||||
|
||||
RUN addgroup -g 1000 -S opencloud-group && \
|
||||
adduser -S --ingroup opencloud-group --uid 1000 opencloud-user --home /var/lib/opencloud
|
||||
|
||||
RUN mkdir -p /var/lib/opencloud && \
|
||||
# Pre-create the web directory to avoid permission issues
|
||||
mkdir -p /var/lib/opencloud/web/assets/apps && \
|
||||
chown -R opencloud-user:opencloud-group /var/lib/opencloud && \
|
||||
chmod -R 751 /var/lib/opencloud && \
|
||||
mkdir -p /etc/opencloud && \
|
||||
chown -R opencloud-user:opencloud-group /etc/opencloud && \
|
||||
chmod -R 751 /etc/opencloud
|
||||
|
||||
VOLUME [ "/var/lib/opencloud", "/etc/opencloud" ]
|
||||
WORKDIR /var/lib/opencloud
|
||||
|
||||
USER 1000
|
||||
|
||||
EXPOSE 9200/tcp
|
||||
|
||||
ENTRYPOINT ["/usr/bin/opencloud"]
|
||||
CMD ["server"]
|
||||
|
||||
COPY dist/binaries/opencloud-linux-arm64 /usr/bin/opencloud
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM golang:alpine3.20 AS build
|
||||
FROM golang:alpine3.21 AS build
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
ARG VERSION
|
||||
@@ -37,6 +37,8 @@ RUN addgroup -g 1000 -S opencloud-group && \
|
||||
adduser -S --ingroup opencloud-group --uid 1000 opencloud-user --home /var/lib/opencloud
|
||||
|
||||
RUN mkdir -p /var/lib/opencloud && \
|
||||
# Pre-create the web directory to avoid permission issues
|
||||
mkdir -p /var/lib/opencloud/web/assets/apps && \
|
||||
chown -R opencloud-user:opencloud-group /var/lib/opencloud && \
|
||||
chmod -R 751 /var/lib/opencloud && \
|
||||
mkdir -p /etc/opencloud && \
|
||||
|
||||
@@ -28,7 +28,7 @@ var (
|
||||
|
||||
// regex to determine if a node is trashed or versioned.
|
||||
// 9113a718-8285-4b32-9042-f930f1a58ac2.REV.2024-05-22T07:32:53.89969726Z
|
||||
_versionRegex = regexp.MustCompile(`\.REV\.[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?Z$`)
|
||||
_versionRegex = regexp.MustCompile(`\.REV\.[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?Z(\.\d+)?$`)
|
||||
// 9113a718-8285-4b32-9042-f930f1a58ac2.T.2024-05-23T08:25:20.006571811Z <- this HAS a symlink
|
||||
_trashRegex = regexp.MustCompile(`\.T\.[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?Z$`)
|
||||
)
|
||||
|
||||
@@ -11,17 +11,9 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
authapp "github.com/opencloud-eu/opencloud/services/auth-app/pkg/command"
|
||||
|
||||
"github.com/cenkalti/backoff"
|
||||
"github.com/mohae/deepcopy"
|
||||
"github.com/olekukonko/tablewriter"
|
||||
notifications "github.com/opencloud-eu/opencloud/services/notifications/pkg/command"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/events/stream"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/logger"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/rgrpc/todo/pool"
|
||||
"github.com/thejerf/suture/v4"
|
||||
|
||||
occfg "github.com/opencloud-eu/opencloud/pkg/config"
|
||||
"github.com/opencloud-eu/opencloud/pkg/log"
|
||||
ogrpc "github.com/opencloud-eu/opencloud/pkg/service/grpc"
|
||||
@@ -31,6 +23,7 @@ import (
|
||||
appProvider "github.com/opencloud-eu/opencloud/services/app-provider/pkg/command"
|
||||
appRegistry "github.com/opencloud-eu/opencloud/services/app-registry/pkg/command"
|
||||
audit "github.com/opencloud-eu/opencloud/services/audit/pkg/command"
|
||||
authapp "github.com/opencloud-eu/opencloud/services/auth-app/pkg/command"
|
||||
authbasic "github.com/opencloud-eu/opencloud/services/auth-basic/pkg/command"
|
||||
authmachine "github.com/opencloud-eu/opencloud/services/auth-machine/pkg/command"
|
||||
authservice "github.com/opencloud-eu/opencloud/services/auth-service/pkg/command"
|
||||
@@ -44,6 +37,7 @@ import (
|
||||
idp "github.com/opencloud-eu/opencloud/services/idp/pkg/command"
|
||||
invitations "github.com/opencloud-eu/opencloud/services/invitations/pkg/command"
|
||||
nats "github.com/opencloud-eu/opencloud/services/nats/pkg/command"
|
||||
notifications "github.com/opencloud-eu/opencloud/services/notifications/pkg/command"
|
||||
ocdav "github.com/opencloud-eu/opencloud/services/ocdav/pkg/command"
|
||||
ocm "github.com/opencloud-eu/opencloud/services/ocm/pkg/command"
|
||||
ocs "github.com/opencloud-eu/opencloud/services/ocs/pkg/command"
|
||||
@@ -64,6 +58,10 @@ import (
|
||||
web "github.com/opencloud-eu/opencloud/services/web/pkg/command"
|
||||
webdav "github.com/opencloud-eu/opencloud/services/webdav/pkg/command"
|
||||
webfinger "github.com/opencloud-eu/opencloud/services/webfinger/pkg/command"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/events/stream"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/logger"
|
||||
"github.com/opencloud-eu/reva/v2/pkg/rgrpc/todo/pool"
|
||||
"github.com/thejerf/suture/v4"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -160,6 +158,11 @@ func NewService(ctx context.Context, options ...Option) (*Service, error) {
|
||||
cfg.AppRegistry.Commons = cfg.Commons
|
||||
return appRegistry.Execute(cfg.AppRegistry)
|
||||
})
|
||||
reg(3, opts.Config.AuthApp.Service.Name, func(ctx context.Context, cfg *occfg.Config) error {
|
||||
cfg.AuthApp.Context = ctx
|
||||
cfg.AuthApp.Commons = cfg.Commons
|
||||
return authapp.Execute(cfg.AuthApp)
|
||||
})
|
||||
reg(3, opts.Config.AuthBasic.Service.Name, func(ctx context.Context, cfg *occfg.Config) error {
|
||||
cfg.AuthBasic.Context = ctx
|
||||
cfg.AuthBasic.Commons = cfg.Commons
|
||||
@@ -324,11 +327,6 @@ func NewService(ctx context.Context, options ...Option) (*Service, error) {
|
||||
cfg.Audit.Commons = cfg.Commons
|
||||
return audit.Execute(cfg.Audit)
|
||||
})
|
||||
areg(opts.Config.AuthApp.Service.Name, func(ctx context.Context, cfg *occfg.Config) error {
|
||||
cfg.AuthApp.Context = ctx
|
||||
cfg.AuthApp.Commons = cfg.Commons
|
||||
return authapp.Execute(cfg.AuthApp)
|
||||
})
|
||||
areg(opts.Config.Policies.Service.Name, func(ctx context.Context, cfg *occfg.Config) error {
|
||||
cfg.Policies.Context = ctx
|
||||
cfg.Policies.Commons = cfg.Commons
|
||||
|
||||
@@ -16,7 +16,7 @@ var (
|
||||
// LatestTag is the latest released version plus the dev meta version.
|
||||
// Will be overwritten by the release pipeline
|
||||
// Needs a manual change for every tagged release
|
||||
LatestTag = "1.1.0+dev"
|
||||
LatestTag = "2.0.5+dev"
|
||||
|
||||
// Date indicates the build date.
|
||||
// This has been removed, it looks like you can only replace static strings with recent go versions
|
||||
|
||||
124
release-config.ts
Normal file
124
release-config.ts
Normal file
@@ -0,0 +1,124 @@
|
||||
export default {
|
||||
changeTypes: [
|
||||
{
|
||||
title: "💥 Breaking changes",
|
||||
labels: ["breaking", "Type:Breaking-Change"],
|
||||
bump: "major",
|
||||
weight: 3,
|
||||
},
|
||||
{
|
||||
title: "🔒 Security",
|
||||
labels: ["security", "Type:Security"],
|
||||
bump: "patch",
|
||||
weight: 2,
|
||||
},
|
||||
{
|
||||
title: "✨ Features",
|
||||
labels: ["feature", "Type:Feature"],
|
||||
bump: "minor",
|
||||
weight: 1,
|
||||
},
|
||||
{
|
||||
title: "📈 Enhancement",
|
||||
labels: ["enhancement", "refactor", "Type:Enhancement"],
|
||||
bump: "minor",
|
||||
},
|
||||
{
|
||||
title: "🐛 Bug Fixes",
|
||||
labels: ["bug", "Type:Bug"],
|
||||
bump: "patch",
|
||||
},
|
||||
{
|
||||
title: "📚 Documentation",
|
||||
labels: ["docs", "documentation", "Type:Documentation"],
|
||||
bump: "patch",
|
||||
},
|
||||
{
|
||||
title: "✅ Tests",
|
||||
labels: ["test", "tests", "Type:Test"],
|
||||
bump: "patch",
|
||||
},
|
||||
{
|
||||
title: "📦️ Dependencies",
|
||||
labels: ["dependency", "dependencies", "Type:Dependencies"],
|
||||
bump: "patch",
|
||||
weight: -1,
|
||||
},
|
||||
],
|
||||
useVersionPrefixV: true,
|
||||
getLatestTag: ({ exec }) => {
|
||||
// the plugin uses the latest tag to determine the next version
|
||||
// and the changes that are included in the upcoming release.
|
||||
const branch = getBranch(exec);
|
||||
let tags = getTags(exec);
|
||||
|
||||
if (branch.startsWith("stable-")) {
|
||||
const [_, majorAndMinor] = branch.split("-");
|
||||
// we only care about tags that are within the range of the current stable branch.
|
||||
// e.g. if the branch is stable-1.2, we only care about tags that are v1.2.x.
|
||||
const matchingTags = tags.filter((t) =>
|
||||
t.startsWith(`v${majorAndMinor}`)
|
||||
);
|
||||
|
||||
if (matchingTags.length) {
|
||||
tags = matchingTags;
|
||||
}
|
||||
}
|
||||
|
||||
return tags.pop() || "v0.0.0";
|
||||
},
|
||||
useLatestRelease: ({ exec, nextVersion }) => {
|
||||
// check if the release should be marked as latest release on GitHub.
|
||||
const tags = getTags(exec);
|
||||
const latestTag = tags.pop() || "v0.0.0";
|
||||
return compareVersions(latestTag, nextVersion) === -1;
|
||||
},
|
||||
};
|
||||
|
||||
const parseVersion = (tag: string) => {
|
||||
const version = tag.startsWith("v") ? tag.slice(1) : tag;
|
||||
const [main, pre] = version.split("-");
|
||||
const [major, minor, patch] = main.split(".").map(Number);
|
||||
return { major, minor, patch, pre };
|
||||
};
|
||||
|
||||
const getBranch = (exec: any): string => {
|
||||
return exec("git rev-parse --abbrev-ref HEAD", {
|
||||
silent: true,
|
||||
}).stdout.trim();
|
||||
};
|
||||
|
||||
const getTags = (exec: any) => {
|
||||
exec("git fetch --tags", { silent: true });
|
||||
const tagsOutput = exec("git tag", { silent: true }).stdout.trim();
|
||||
const tags: string[] = tagsOutput ? tagsOutput.split("\n") : [];
|
||||
return tags.filter((tag) => tag.startsWith("v")).sort(compareVersions);
|
||||
};
|
||||
|
||||
const compareVersions = (a: string, b: string) => {
|
||||
const va = parseVersion(a);
|
||||
const vb = parseVersion(b);
|
||||
|
||||
if (va.major !== vb.major) {
|
||||
return va.major - vb.major;
|
||||
}
|
||||
if (va.minor !== vb.minor) {
|
||||
return va.minor - vb.minor;
|
||||
}
|
||||
if (va.patch !== vb.patch) {
|
||||
return va.patch - vb.patch;
|
||||
}
|
||||
|
||||
if (va.pre && !vb.pre) {
|
||||
return -1;
|
||||
}
|
||||
if (!va.pre && vb.pre) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (va.pre && vb.pre) {
|
||||
return va.pre.localeCompare(vb.pre);
|
||||
}
|
||||
|
||||
return 0;
|
||||
};
|
||||
@@ -4,7 +4,10 @@ The `antivirus` service is responsible for scanning files for viruses.
|
||||
|
||||
## Memory Considerations
|
||||
|
||||
The antivirus service can consume considerably amounts of memory. This is relevant to provide or define sufficient memory for the deployment selected. To avoid out of memory (OOM) situations, the following equation gives a rough overview based on experiences made. The memory calculation comes without any guarantee, is intended as overview only and subject of change.
|
||||
The antivirus service can consume considerable amounts of memory.
|
||||
This is relevant to provide or define sufficient memory for the deployment selected.
|
||||
To avoid out of memory (OOM) situations, the following equation gives a rough overview based on experiences made.
|
||||
The memory calculation comes without any guarantee, is intended as overview only and subject of change.
|
||||
|
||||
`memory limit` = `max file size` x `workers` x `factor 8 - 14`
|
||||
|
||||
@@ -19,17 +22,25 @@ With:
|
||||
|
||||
### Antivirus Scanner Type
|
||||
|
||||
The antivirus service currently supports [ICAP](https://tools.ietf.org/html/rfc3507) and [ClamAV](http://www.clamav.net/index.html) as antivirus scanners. The `ANTIVIRUS_SCANNER_TYPE` environment variable is used to select the scanner. The detailed configuration for each scanner heavily depends on the scanner type selected. See the environment variables for more details.
|
||||
The antivirus service currently supports [ICAP](https://tools.ietf.org/html/rfc3507) and [ClamAV](http://www.clamav.net/index.html) as antivirus scanners.
|
||||
The `ANTIVIRUS_SCANNER_TYPE` environment variable is used to select the scanner.
|
||||
The detailed configuration for each scanner heavily depends on the scanner type selected.
|
||||
See the environment variables for more details.
|
||||
|
||||
- For `icap`, only scanners using the `X-Infection-Found` header are currently supported.
|
||||
- For `clamav` only local sockets can currently be configured.
|
||||
|
||||
### Maximum Scan Size
|
||||
|
||||
Several factors can make it necessary to limit the maximum filesize the antivirus service will use for scanning. Use the `ANTIVIRUS_MAX_SCAN_SIZE` environment variable to scan only a given amount of bytes. Obviously, it is recommended to scan the whole file, but several factors like scanner type and version, bandwidth, performance issues, etc. might make a limit necessary.
|
||||
Several factors can make it necessary to limit the maximum filesize the antivirus service uses for scanning.
|
||||
Use the `ANTIVIRUS_MAX_SCAN_SIZE` environment variable to specify the maximum file size to be scanned.
|
||||
|
||||
Even if it's recommended to scan each file, several factors like scanner type and version,
|
||||
bandwidth, performance issues, etc. might make a limit necessary.
|
||||
|
||||
**IMPORTANT**
|
||||
> Streaming of files to the virus scan service still [needs to be implemented](https://github.com/owncloud/ocis/issues/6803). To prevent OOM errors `ANTIVIRUS_MAX_SCAN_SIZE` needs to be set lower than available ram.
|
||||
> Streaming of files to the virus scan service still [needs to be implemented](https://github.com/owncloud/ocis/issues/6803).
|
||||
> To prevent OOM errors `ANTIVIRUS_MAX_SCAN_SIZE` needs to be set lower than available ram and or the maximum file size that can be scanned by the virus scanner.
|
||||
|
||||
### Antivirus Workers
|
||||
|
||||
@@ -41,7 +52,7 @@ The antivirus service allows three different ways of handling infected files. Th
|
||||
|
||||
- `delete`: (default): Infected files will be deleted immediately, further postprocessing is cancelled.
|
||||
- `abort`: (advanced option): Infected files will be kept, further postprocessing is cancelled. Files can be manually retrieved and inspected by an admin. To identify the file for further investigation, the antivirus service logs the abort/infected state including the file ID. The file is located in the `storage/users/uploads` folder of the OpenCloud data directory and persists until it is manually deleted by the admin via the [Manage Unfinished Uploads](https://github.com/opencloud-eu/opencloud/tree/main/services/storage-users#manage-unfinished-uploads) command.
|
||||
- `continue`: (obviously not recommended): Infected files will be marked via metadata as infected but postprocessing continues normally. Note: Infected Files are moved to their final destination and therefore not prevented from download which includes the risk of spreading viruses.
|
||||
- `continue`: (not recommended): Infected files will be marked via metadata as infected, but postprocessing continues normally. Note: Infected Files are moved to their final destination and therefore not prevented from download, which includes the risk of spreading viruses.
|
||||
|
||||
In all cases, a log entry is added declaring the infection and handling method and a notification via the `userlog` service sent.
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ type Config struct {
|
||||
Workers int `yaml:"workers" env:"ANTIVIRUS_WORKERS" desc:"The number of concurrent go routines that fetch events from the event queue." introductionVersion:"1.0.0"`
|
||||
|
||||
Scanner Scanner
|
||||
MaxScanSize string `yaml:"max-scan-size" env:"ANTIVIRUS_MAX_SCAN_SIZE" desc:"The maximum scan size the virus scanner can handle. Only this many bytes of a file will be scanned. 0 means unlimited and is the default. Usable common abbreviations: [KB, KiB, MB, MiB, GB, GiB, TB, TiB, PB, PiB, EB, EiB], example: 2GB." introductionVersion:"1.0.0"`
|
||||
MaxScanSize string `yaml:"max-scan-size" env:"ANTIVIRUS_MAX_SCAN_SIZE" desc:"The maximum scan size the virus scanner can handle. 0 means unlimited. Usable common abbreviations: [KB, KiB, MB, MiB, GB, GiB, TB, TiB, PB, PiB, EB, EiB], example: 2GB." introductionVersion:"1.0.0"`
|
||||
|
||||
Context context.Context `json:"-" yaml:"-"`
|
||||
|
||||
|
||||
@@ -1,53 +1,44 @@
|
||||
# Auth-App
|
||||
|
||||
The auth-app service provides authentication for 3rd party apps.
|
||||
The auth-app service provides authentication for 3rd party apps unable to use
|
||||
OpenID Connect. The service is enabled by default and started automatically. It
|
||||
is possible to disable the service by setting:
|
||||
|
||||
## The `auth` Service Family
|
||||
|
||||
OpenCloud uses serveral authentication services for different use cases. All services that start with `auth-` are part of the authentication service family. Each member authenticates requests with different scopes. As of now, these services exist:
|
||||
- `auth-app` handles authentication of external 3rd party apps
|
||||
- `auth-basic` handles basic authentication
|
||||
- `auth-bearer` handles oidc authentication
|
||||
- `auth-machine` handles interservice authentication when a user is impersonated
|
||||
- `auth-service` handles interservice authentication when using service accounts
|
||||
|
||||
## Service Startup
|
||||
|
||||
Because this service is not started automatically, a manual start needs to be initiated which can be done in several ways. To configure the service usage, an environment variable for the proxy service needs to be set to allow app authentication.
|
||||
```bash
|
||||
OC_ADD_RUN_SERVICES=auth-app # deployment specific. Add the service to the manual startup list, use with binary deployments. Alternatively you can start the service explicitly via the command line.
|
||||
PROXY_ENABLE_APP_AUTH=true # mandatory, allow app authentication. In case of a distributed environment, this envvar needs to be set in the proxy service.
|
||||
OC_EXCLUDE_RUN_SERVICES=auth-app # deployment specific. Removes service from the list of automatically started services, use with single-binary deployments
|
||||
PROXY_ENABLE_APP_AUTH=false # mandatory, disables app authentication. In case of a distributed environment, this envvar needs to be set in the proxy service.
|
||||
```
|
||||
|
||||
## App Tokens
|
||||
|
||||
App Tokens are used to authenticate 3rd party access via https like when using curl (apps) to access an API endpoint. These apps need to authenticate themselves as no logged in user authenticates the request. To be able to use an app token, one must first create a token. There are different options of creating a token.
|
||||
App Tokens are password specifically generated to be used by 3rd party applications
|
||||
for authentication when accessing the OpenCloud API endpoints. To
|
||||
be able to use an app token, one must first create a token. There are different
|
||||
options of creating a token.
|
||||
|
||||
### Via CLI (dev only)
|
||||
## Important Security Note
|
||||
|
||||
Replace the `user-name` with an existing user. For the `token-expiration`, you can use any time abbreviation from the following list: `h, m, s`. Examples: `72h` or `1h` or `1m` or `1s.` Default is `72h`.
|
||||
When using an external IDP for authentication, App Token are NOT invalidated
|
||||
when the user is disabled or locked in that external IDP. That means the user
|
||||
will still be able to use its existing App Tokens for authentication for as
|
||||
long as the App Tokes are valid.
|
||||
|
||||
```bash
|
||||
opencloud auth-app create --user-name={user-name} --expiration={token-expiration}
|
||||
```
|
||||
|
||||
Once generated, these tokens can be used to authenticate requests to OpenCloud. They are passed as part of the request as `Basic Auth` header.
|
||||
## Managing App Tokens
|
||||
|
||||
### Via API
|
||||
|
||||
The `auth-app` service provides an API to create (POST), list (GET) and delete (DELETE) tokens at the `/auth-app/tokens` endpoint.
|
||||
Please note: This API is preliminary. In the future we will provide endpoints
|
||||
in the `graph` service for allowing the management of App Tokens.
|
||||
|
||||
When using curl for the respective command, you need to authenticate with a header. To do so, get from the browsers developer console the currently active bearer token. Consider that this token has a short lifetime. In any example, replace `<your host[:port]>` with the URL:port of your OpenCloud instance, and `{token}` `{value}` accordingly. Note that the active bearer token authenticates the user the token was issued for.
|
||||
The `auth-app` service provides an API to create (POST), list (GET) and delete (DELETE) tokens at the `/auth-app/tokens` endpoint.
|
||||
|
||||
* **Create a token**\
|
||||
The POST request requires:
|
||||
* A `expiry` key/value pair in the form of `expiry=<number><h|m|s>`\
|
||||
Example: `expiry=72h`
|
||||
* An active bearer token
|
||||
```bash
|
||||
curl --request POST 'https://<your host:9200>/auth-app/tokens?expiry={value}' \
|
||||
--header 'accept: application/json' \
|
||||
--header 'authorization: Bearer {token}'
|
||||
--header 'accept: application/json'
|
||||
```
|
||||
Example output:
|
||||
```
|
||||
@@ -59,14 +50,19 @@ When using curl for the respective command, you need to authenticate with a head
|
||||
}
|
||||
```
|
||||
|
||||
Note, that this is the only time the app token will be returned in cleartext. To use the token
|
||||
please copy it from the response.
|
||||
|
||||
* **List tokens**\
|
||||
The GET request only requires an active bearer token for authentication:\
|
||||
Note that `--request GET` is technically not required because it is curl default.
|
||||
```bash
|
||||
curl --request GET 'https://<your host:9200>/auth-app/tokens' \
|
||||
--header 'accept: application/json' \
|
||||
--header 'authorization: Bearer {token}'
|
||||
--header 'accept: application/json'
|
||||
```
|
||||
|
||||
Note that the `token` value in the response to the "List Tokens` request is not the actual
|
||||
app token, but a hashed value of the token. So this value cannot be used for authenticating
|
||||
with the token.
|
||||
|
||||
Example output:
|
||||
```
|
||||
[
|
||||
@@ -87,20 +83,23 @@ When using curl for the respective command, you need to authenticate with a head
|
||||
|
||||
* **Delete a token**\
|
||||
The DELETE request requires:
|
||||
* A `token` key/value pair in the form of `token=<token_issued>`\
|
||||
Example: `token=Z3s2K7816M4vuSpd5`
|
||||
* An active bearer token
|
||||
* A `token` key/value pair in the form of `token=<token_issued>`. The value needs to be the hashed value as returned by the `List Tokens` respone.\
|
||||
Example: `token=$2$Z3s2K7816M4vuSpd5`
|
||||
```bash
|
||||
curl --request DELETE 'https://<your host:9200>/auth-app/tokens?token={value}' \
|
||||
--header 'accept: application/json' \
|
||||
--header 'authorization: Bearer {token}'
|
||||
--header 'accept: application/json'
|
||||
```
|
||||
|
||||
### Via Impersonation API
|
||||
|
||||
When setting the environment variable `AUTH_APP_ENABLE_IMPERSONATION` to `true`, admins will be able to use the `/auth-app/tokens` endpoint to create tokens for other users but using their own bearer token for authentication. This can be important for migration scenarios, but should not be considered for regular tasks on a production system for security reasons.
|
||||
When setting the environment variable `AUTH_APP_ENABLE_IMPERSONATION` to
|
||||
`true`, admins will be able to use the `/auth-app/tokens` endpoint to create
|
||||
tokens for other users. This can be important for migration scenarios, but
|
||||
should not be considered for regular tasks on a production system for security
|
||||
reasons.
|
||||
|
||||
To impersonate, the respective requests from the CLI commands above extend with the following parameters, where you can use one or the other:
|
||||
To impersonate, the respective requests from the CLI commands above extend with
|
||||
the following parameters, where you can use one or the other:
|
||||
|
||||
* The `userID` in the form of: `userID={value}`\
|
||||
Example:\
|
||||
@@ -114,6 +113,27 @@ Example:\
|
||||
A final create request would then look like:
|
||||
```bash
|
||||
curl --request POST 'https://<your host:9200>/auth-app/tokens?expiry={value}&userName={value}' \
|
||||
--header 'accept: application/json' \
|
||||
--header 'authorization: Bearer {token}'
|
||||
--header 'accept: application/json'
|
||||
```
|
||||
|
||||
### Via CLI (developer only)
|
||||
|
||||
As the CLI is using the internal CS3Apis this needs access to the reva gateway
|
||||
service. This is mainly of developer (and admin) usage.
|
||||
Replace the `user-name` with an existing user. For the `token-expiration`, you
|
||||
can use any time abbreviation from the following list: `h, m, s`. Examples:
|
||||
`72h` or `1h` or `1m` or `1s.` Default is `72h`.
|
||||
|
||||
```bash
|
||||
opencloud auth-app create --user-name={user-name} --expiration={token-expiration}
|
||||
```
|
||||
|
||||
## Authenticating using App Tokens
|
||||
|
||||
To autenticate using an App Token simply use the username for which token was generated
|
||||
and the token value as returned by the "Create Token" request.
|
||||
|
||||
```bash
|
||||
curl -u <username>:<tokenvalue> 'https://<your host>/graph/v1.0/me' \
|
||||
--header 'accept: application/json'
|
||||
```
|
||||
|
||||
@@ -28,9 +28,40 @@ type Config struct {
|
||||
|
||||
AllowImpersonation bool `yaml:"allow_impersonation" env:"AUTH_APP_ENABLE_IMPERSONATION" desc:"Allows admins to create app tokens for other users. Used for migration. Do NOT use in productive deployments." introductionVersion:"1.0.0"`
|
||||
|
||||
StorageDriver string `yaml:"storage_driver" env:"AUTH_APP_STORAGE_DRIVER" desc:"Driver to be used to persist the app tokes . Supported values are 'jsoncs3', 'json'." introductionVersion:"%%NEXT%%"`
|
||||
StorageDrivers StorageDrivers `yaml:"storage_drivers"`
|
||||
|
||||
Context context.Context `yaml:"-"`
|
||||
}
|
||||
|
||||
type StorageDrivers struct {
|
||||
JSONCS3 JSONCS3Driver `yaml:"jsoncs3"`
|
||||
}
|
||||
|
||||
type JSONCS3Driver struct {
|
||||
ProviderAddr string `yaml:"provider_addr" env:"AUTH_APP_JSONCS3_PROVIDER_ADDR" desc:"GRPC address of the STORAGE-SYSTEM service." introductionVersion:"%%NEXT%%"`
|
||||
SystemUserID string `yaml:"system_user_id" env:"OC_SYSTEM_USER_ID;AUTH_APP_JSONCS3_SYSTEM_USER_ID" desc:"ID of the OpenCloud STORAGE-SYSTEM system user. Admins need to set the ID for the STORAGE-SYSTEM system user in this config option which is then used to reference the user. Any reasonable long string is possible, preferably this would be an UUIDv4 format." introductionVersion:"%%NEXT%%"`
|
||||
SystemUserIDP string `yaml:"system_user_idp" env:"OC_SYSTEM_USER_IDP;AUTH_APP_JSONCS3_SYSTEM_USER_IDP" desc:"IDP of the OpenCloud STORAGE-SYSTEM system user." introductionVersion:"%%NEXT%%"`
|
||||
SystemUserAPIKey string `yaml:"system_user_api_key" env:"OC_SYSTEM_USER_API_KEY;AUTH_APP_JSONCS3_SYSTEM_USER_API_KEY" desc:"API key for the STORAGE-SYSTEM system user." introductionVersion:"%%NEXT%%"`
|
||||
PasswordGenerator string `yaml:"password_generator" env:"AUTH_APP_JSONCS3_PASSWORD_GENERATOR" desc:"The password generator that should be used for generating app tokens. Supported values are: 'diceware' and 'random'." introductionVersion:"%%NEXT%%"`
|
||||
PasswordGeneratorOptions PasswordGeneratorOptions `yaml:"password_generator_options"`
|
||||
}
|
||||
|
||||
type PasswordGeneratorOptions struct {
|
||||
DicewareOptions DicewareOptions `yaml:"diceware"`
|
||||
RandPWOpts RandPWOpts `yaml:"randon"`
|
||||
}
|
||||
|
||||
// DicewareOptions defines the config options for the "diceware" password generator
|
||||
type DicewareOptions struct {
|
||||
NumberOfWords int `yaml:"number_of_words" env:"AUTH_APP_JSONCS3_DICEWARE_NUMBER_OF_WORDS" desc:"The number of words the generated passphrase will have." introductionVersion:"%%NEXT%%"`
|
||||
}
|
||||
|
||||
// RandPWOpts defines the config options for the "random" password generator
|
||||
type RandPWOpts struct {
|
||||
PasswordLength int `yaml:"password_length" env:"AUTH_APP_JSONCS3_RANDOM_PASSWORD_LENGTH" desc:"The number of charactors the generated passwords will have." introductionVersion:"%%NEXT%%"`
|
||||
}
|
||||
|
||||
// Log defines the loging configuration
|
||||
type Log struct {
|
||||
Level string `yaml:"level" env:"OC_LOG_LEVEL;AUTH_APP_LOG_LEVEL" desc:"The log level. Valid values are: 'panic', 'fatal', 'error', 'warn', 'info', 'debug', 'trace'." introductionVersion:"1.0.0"`
|
||||
|
||||
@@ -44,6 +44,19 @@ func DefaultConfig() *config.Config {
|
||||
Service: config.Service{
|
||||
Name: "auth-app",
|
||||
},
|
||||
StorageDriver: "jsoncs3",
|
||||
StorageDrivers: config.StorageDrivers{
|
||||
JSONCS3: config.JSONCS3Driver{
|
||||
ProviderAddr: "eu.opencloud.api.storage-system",
|
||||
SystemUserIDP: "internal",
|
||||
PasswordGenerator: "diceware",
|
||||
PasswordGeneratorOptions: config.PasswordGeneratorOptions{
|
||||
DicewareOptions: config.DicewareOptions{
|
||||
NumberOfWords: 6,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Reva: shared.DefaultRevaConfig(),
|
||||
}
|
||||
}
|
||||
@@ -85,6 +98,14 @@ func EnsureDefaults(cfg *config.Config) {
|
||||
cfg.MachineAuthAPIKey = cfg.Commons.MachineAuthAPIKey
|
||||
}
|
||||
|
||||
if cfg.StorageDrivers.JSONCS3.SystemUserAPIKey == "" && cfg.Commons != nil && cfg.Commons.SystemUserAPIKey != "" {
|
||||
cfg.StorageDrivers.JSONCS3.SystemUserAPIKey = cfg.Commons.SystemUserAPIKey
|
||||
}
|
||||
|
||||
if cfg.StorageDrivers.JSONCS3.SystemUserID == "" && cfg.Commons != nil && cfg.Commons.SystemUserID != "" {
|
||||
cfg.StorageDrivers.JSONCS3.SystemUserID = cfg.Commons.SystemUserID
|
||||
}
|
||||
|
||||
if cfg.TokenManager == nil && cfg.Commons != nil && cfg.Commons.TokenManager != nil {
|
||||
cfg.TokenManager = &config.TokenManager{
|
||||
JWTSecret: cfg.Commons.TokenManager.JWTSecret,
|
||||
|
||||
@@ -11,6 +11,14 @@ import (
|
||||
func AuthAppConfigFromStruct(cfg *config.Config) map[string]interface{} {
|
||||
appAuthJSON := filepath.Join(defaults.BaseDataPath(), "appauth.json")
|
||||
|
||||
jsonCS3pwGenOpt := map[string]any{}
|
||||
switch cfg.StorageDrivers.JSONCS3.PasswordGenerator {
|
||||
case "random":
|
||||
jsonCS3pwGenOpt["token_strength"] = cfg.StorageDrivers.JSONCS3.PasswordGeneratorOptions.RandPWOpts.PasswordLength
|
||||
case "diceware":
|
||||
jsonCS3pwGenOpt["number_of_words"] = cfg.StorageDrivers.JSONCS3.PasswordGeneratorOptions.DicewareOptions.NumberOfWords
|
||||
}
|
||||
|
||||
rcfg := map[string]interface{}{
|
||||
"shared": map[string]interface{}{
|
||||
"jwt_secret": cfg.TokenManager.JWTSecret,
|
||||
@@ -36,11 +44,19 @@ func AuthAppConfigFromStruct(cfg *config.Config) map[string]interface{} {
|
||||
},
|
||||
},
|
||||
"applicationauth": map[string]interface{}{
|
||||
"driver": "json",
|
||||
"driver": cfg.StorageDriver,
|
||||
"drivers": map[string]interface{}{
|
||||
"json": map[string]interface{}{
|
||||
"file": appAuthJSON,
|
||||
},
|
||||
"jsoncs3": map[string]interface{}{
|
||||
"provider_addr": cfg.StorageDrivers.JSONCS3.ProviderAddr,
|
||||
"service_user_id": cfg.StorageDrivers.JSONCS3.SystemUserID,
|
||||
"service_user_idp": cfg.StorageDrivers.JSONCS3.SystemUserIDP,
|
||||
"machine_auth_apikey": cfg.StorageDrivers.JSONCS3.SystemUserAPIKey,
|
||||
"password_generator": cfg.StorageDrivers.JSONCS3.PasswordGenerator,
|
||||
"generator_config": jsonCS3pwGenOpt,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -99,7 +99,10 @@ func (a *AuthAppService) HandleCreate(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
label := "Generated via API"
|
||||
label := q.Get("label")
|
||||
if label == "" {
|
||||
label = "Generated via API"
|
||||
}
|
||||
|
||||
// Impersonated request
|
||||
userID, userName := q.Get("userID"), q.Get("userName")
|
||||
@@ -131,7 +134,7 @@ func (a *AuthAppService) HandleCreate(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
label = "Generated via Impersonation API"
|
||||
label = label + " (Impersonation)"
|
||||
}
|
||||
|
||||
scopes, err := scope.AddOwnerScope(map[string]*authpb.Scope{})
|
||||
|
||||
@@ -1272,6 +1272,10 @@ func (f *FileConnector) CheckFileInfo(ctx context.Context) (*ConnectorResponse,
|
||||
|
||||
fileinfo.KeyPostMessageOrigin: f.cfg.Commons.OpenCloudURL,
|
||||
fileinfo.KeyLicenseCheckForEditIsEnabled: f.cfg.App.LicenseCheckEnable,
|
||||
|
||||
// set to true for Collabora until we have a web embed mode for "Save As" and "Export As"
|
||||
// see the FIXME in ./fileinfo/collabora.go and https://github.com/opencloud-eu/web/issues/422
|
||||
fileinfo.KeyUserCanNotWriteRelative: false,
|
||||
}
|
||||
|
||||
switch wopiContext.ViewMode {
|
||||
|
||||
@@ -1780,7 +1780,7 @@ var _ = Describe("FileConnector", func() {
|
||||
OwnerID: "61616262636340637573746f6d496470", // hex of aabbcc@customIdp
|
||||
Size: int64(998877),
|
||||
BaseFileName: "test.txt",
|
||||
UserCanNotWriteRelative: false,
|
||||
UserCanNotWriteRelative: true,
|
||||
DisableExport: true,
|
||||
DisableCopy: true,
|
||||
DisablePrint: true,
|
||||
@@ -1962,7 +1962,7 @@ var _ = Describe("FileConnector", func() {
|
||||
OwnerID: "61616262636340637573746f6d496470", // hex of aabbcc@customIdp
|
||||
Size: int64(998877),
|
||||
BaseFileName: "test.txt",
|
||||
UserCanNotWriteRelative: false,
|
||||
UserCanNotWriteRelative: true,
|
||||
DisableExport: true,
|
||||
DisableCopy: true,
|
||||
DisablePrint: true,
|
||||
|
||||
@@ -99,7 +99,7 @@ func (cinfo *Collabora) SetProperties(props map[string]interface{}) {
|
||||
case KeyUserCanWrite:
|
||||
cinfo.UserCanWrite = value.(bool)
|
||||
case KeyUserCanNotWriteRelative:
|
||||
cinfo.UserCanNotWriteRelative = value.(bool)
|
||||
cinfo.UserCanNotWriteRelative = true // FIXME: set to `value.(bool)` again for https://github.com/opencloud-eu/web/issues/422
|
||||
case KeyUserID:
|
||||
cinfo.UserID = value.(string)
|
||||
case KeyUserFriendlyName:
|
||||
|
||||
@@ -677,21 +677,34 @@ func (g Graph) getSpecialDriveItems(ctx context.Context, baseURL *url.URL, space
|
||||
}
|
||||
|
||||
var spaceItems []libregraph.DriveItem
|
||||
var err error
|
||||
doCache := true
|
||||
|
||||
spaceItems = g.fetchSpecialDriveItem(ctx, spaceItems, SpaceImageSpecialFolderName, imageNode, space, baseURL)
|
||||
spaceItems = g.fetchSpecialDriveItem(ctx, spaceItems, ReadmeSpecialFolderName, readmeNode, space, baseURL)
|
||||
spaceItems, err = g.fetchSpecialDriveItem(ctx, spaceItems, SpaceImageSpecialFolderName, imageNode, space, baseURL)
|
||||
if err != nil {
|
||||
doCache = false
|
||||
g.logger.Debug().Err(err).Str("ID", imageNode).Msg("Could not get space image")
|
||||
}
|
||||
spaceItems, err = g.fetchSpecialDriveItem(ctx, spaceItems, ReadmeSpecialFolderName, readmeNode, space, baseURL)
|
||||
if err != nil {
|
||||
doCache = false
|
||||
g.logger.Debug().Err(err).Str("ID", imageNode).Msg("Could not get space readme")
|
||||
}
|
||||
|
||||
// cache properties
|
||||
spacePropertiesEntry := specialDriveItemEntry{
|
||||
specialDriveItems: spaceItems,
|
||||
rootMtime: space.GetMtime(),
|
||||
}
|
||||
g.specialDriveItemsCache.Set(cachekey, spacePropertiesEntry, time.Duration(g.config.Spaces.ExtendedSpacePropertiesCacheTTL))
|
||||
|
||||
if doCache {
|
||||
g.specialDriveItemsCache.Set(cachekey, spacePropertiesEntry, time.Duration(g.config.Spaces.ExtendedSpacePropertiesCacheTTL))
|
||||
}
|
||||
|
||||
return spaceItems
|
||||
}
|
||||
|
||||
func (g Graph) fetchSpecialDriveItem(ctx context.Context, spaceItems []libregraph.DriveItem, itemName string, itemNode string, space *storageprovider.StorageSpace, baseURL *url.URL) []libregraph.DriveItem {
|
||||
func (g Graph) fetchSpecialDriveItem(ctx context.Context, spaceItems []libregraph.DriveItem, itemName string, itemNode string, space *storageprovider.StorageSpace, baseURL *url.URL) ([]libregraph.DriveItem, error) {
|
||||
var ref *storageprovider.Reference
|
||||
if itemNode != "" {
|
||||
rid, _ := storagespace.ParseID(itemNode)
|
||||
@@ -700,12 +713,15 @@ func (g Graph) fetchSpecialDriveItem(ctx context.Context, spaceItems []libregrap
|
||||
ref = &storageprovider.Reference{
|
||||
ResourceId: &rid,
|
||||
}
|
||||
spaceItem := g.getSpecialDriveItem(ctx, ref, itemName, baseURL, space)
|
||||
spaceItem, err := g.getSpecialDriveItem(ctx, ref, itemName, baseURL, space)
|
||||
if err != nil {
|
||||
return spaceItems, err
|
||||
}
|
||||
if spaceItem != nil {
|
||||
spaceItems = append(spaceItems, *spaceItem)
|
||||
}
|
||||
}
|
||||
return spaceItems
|
||||
return spaceItems, nil
|
||||
}
|
||||
|
||||
// generates a space root stat cache key used to detect changes in a space
|
||||
@@ -730,10 +746,10 @@ type specialDriveItemEntry struct {
|
||||
rootMtime *types.Timestamp
|
||||
}
|
||||
|
||||
func (g Graph) getSpecialDriveItem(ctx context.Context, ref *storageprovider.Reference, itemName string, baseURL *url.URL, space *storageprovider.StorageSpace) *libregraph.DriveItem {
|
||||
func (g Graph) getSpecialDriveItem(ctx context.Context, ref *storageprovider.Reference, itemName string, baseURL *url.URL, space *storageprovider.StorageSpace) (*libregraph.DriveItem, error) {
|
||||
var spaceItem *libregraph.DriveItem
|
||||
if ref.GetResourceId().GetSpaceId() == "" && ref.GetResourceId().GetOpaqueId() == "" {
|
||||
return nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// FIXME we should send a fieldmask 'path' and return it as the Path property to save an additional call to the storage.
|
||||
@@ -741,16 +757,14 @@ func (g Graph) getSpecialDriveItem(ctx context.Context, ref *storageprovider.Ref
|
||||
// and Path should always be relative to the space root OR the resource the current user can access ...
|
||||
spaceItem, err := g.getDriveItem(ctx, ref)
|
||||
if err != nil {
|
||||
g.logger.Debug().Err(err).Str("ID", ref.GetResourceId().GetOpaqueId()).Str("name", itemName).Msg("Could not get item info")
|
||||
return nil
|
||||
return nil, err
|
||||
}
|
||||
itemPath := ref.GetPath()
|
||||
if itemPath == "" {
|
||||
// lookup by id
|
||||
itemPath, err = g.getPathForResource(ctx, *ref.GetResourceId())
|
||||
if err != nil {
|
||||
g.logger.Debug().Err(err).Str("ID", ref.GetResourceId().GetOpaqueId()).Str("name", itemName).Msg("Could not get item path")
|
||||
return nil
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
spaceItem.SpecialFolder = &libregraph.SpecialFolder{Name: libregraph.PtrString(itemName)}
|
||||
@@ -758,5 +772,5 @@ func (g Graph) getSpecialDriveItem(ctx context.Context, ref *storageprovider.Ref
|
||||
webdavURL.Path = path.Join(webdavURL.Path, space.GetId().GetOpaqueId(), itemPath)
|
||||
spaceItem.WebDavUrl = libregraph.PtrString(webdavURL.String())
|
||||
|
||||
return spaceItem
|
||||
return spaceItem, nil
|
||||
}
|
||||
|
||||
@@ -7,3 +7,76 @@ It is mainly targeted at smaller installations. For larger setups it is recommen
|
||||
By default, it is configured to use the OpenCloud IDM service as its LDAP backend for looking up and authenticating users. Other backends like an external LDAP server can be configured via a set of [enviroment variables](https://docs.opencloud.eu/services/idp/configuration/#environment-variables).
|
||||
|
||||
Note that translations provided by the IDP service are not maintained via OpenCloud but part of the embedded [LibreGraph Connect Identifier](https://github.com/libregraph/lico/tree/master/identifier) package.
|
||||
|
||||
## Configuration
|
||||
|
||||
### Custom Clients
|
||||
|
||||
By default the `idp` service generates a OIDC client configuration suitable for
|
||||
using OpenCloud with the standard client applications (Web, Desktop, iOS and
|
||||
Android). If you need to configure additional client it is possible to inject a
|
||||
custom configuration via `yaml`. This can be done by adding a section `clients`
|
||||
to the `idp` section of the main configuration file (`opencloud.yaml`). This section
|
||||
needs to contain configuration for all clients (including the standard clients).
|
||||
|
||||
For example if you want to add a (public) client for use with the oidc-agent you would
|
||||
need to add this snippet to the `idp` section in `opencloud.yaml`.
|
||||
|
||||
```yaml
|
||||
clients:
|
||||
- id: web
|
||||
name: OpenCloud Web App
|
||||
trusted: true
|
||||
secret: ""
|
||||
redirect_uris:
|
||||
- https://opencloud.k8s:9200/
|
||||
- https://opencloud.k8s:9200/oidc-callback.html
|
||||
- https://opencloud.k8s:9200/oidc-silent-redirect.html
|
||||
post_logout_redirect_uris: []
|
||||
origins:
|
||||
- https://opencloud.k8s:9200
|
||||
application_type: ""
|
||||
- id: OpenCloudDesktop
|
||||
name: OpenCloud Desktop Client
|
||||
trusted: false
|
||||
secret: ""
|
||||
redirect_uris:
|
||||
- http://127.0.0.1
|
||||
- http://localhost
|
||||
post_logout_redirect_uris: []
|
||||
origins: []
|
||||
application_type: native
|
||||
- id: OpenCloudAndroid
|
||||
name: OpenCloud Android App
|
||||
trusted: false
|
||||
secret: ""
|
||||
redirect_uris:
|
||||
- oc://android.opencloud.eu
|
||||
post_logout_redirect_uris:
|
||||
- oc://android.opencloud.eu
|
||||
origins: []
|
||||
application_type: native
|
||||
- id: OpenCloudIOS
|
||||
name: OpenCloud iOS App
|
||||
trusted: false
|
||||
secret: ""
|
||||
redirect_uris:
|
||||
- oc://ios.opencloud.eu
|
||||
post_logout_redirect_uris:
|
||||
- oc://ios.opencloud.eu
|
||||
origins: []
|
||||
application_type: native
|
||||
- id: oidc-agent
|
||||
name: OIDC Agent
|
||||
trusted: false
|
||||
secret: ""
|
||||
redirect_uris:
|
||||
- http://127.0.0.1
|
||||
- http://localhost
|
||||
post_logout_redirect_uris: []
|
||||
origins: []
|
||||
application_type: native
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -472,7 +472,10 @@ module.exports = function(webpackEnv) {
|
||||
// its runtime that would otherwise be processed through "file" loader.
|
||||
// Also exclude `html` and `json` extensions so they get processed
|
||||
// by webpacks internal loaders.
|
||||
exclude: [/\.(js|mjs|jsx|ts|tsx)$/, /\.html$/, /\.json$/],
|
||||
//
|
||||
// html-webpack-plugin has a known bug,
|
||||
// fixed by /^$/ https://github.com/jantimon/html-webpack-plugin/issues/1589
|
||||
exclude: [/^$/, /\.(js|mjs|jsx|ts|tsx)$/, /\.html$/, /\.json$/],
|
||||
options: {
|
||||
name: 'static/media/[name].[hash:8].[ext]',
|
||||
},
|
||||
|
||||
@@ -21,19 +21,19 @@ var (
|
||||
func NewTextTemplate(mt MessageTemplate, locale, defaultLocale string, translationPath string, vars map[string]string) (MessageTemplate, error) {
|
||||
var err error
|
||||
t := l10n.NewTranslatorFromCommonConfig(defaultLocale, _domain, translationPath, _translationFS, "l10n/locale").Locale(locale)
|
||||
mt.Subject, err = composeMessage(t.Get("%s", mt.Subject), vars)
|
||||
mt.Subject, err = composeMessage(t.Get(mt.Subject, []interface{}{}...), vars)
|
||||
if err != nil {
|
||||
return mt, err
|
||||
}
|
||||
mt.Greeting, err = composeMessage(t.Get("%s", mt.Greeting), vars)
|
||||
mt.Greeting, err = composeMessage(t.Get(mt.Greeting, []interface{}{}...), vars)
|
||||
if err != nil {
|
||||
return mt, err
|
||||
}
|
||||
mt.MessageBody, err = composeMessage(t.Get("%s", mt.MessageBody), vars)
|
||||
mt.MessageBody, err = composeMessage(t.Get(mt.MessageBody, []interface{}{}...), vars)
|
||||
if err != nil {
|
||||
return mt, err
|
||||
}
|
||||
mt.CallToAction, err = composeMessage(t.Get("%s", mt.CallToAction), vars)
|
||||
mt.CallToAction, err = composeMessage(t.Get(mt.CallToAction, []interface{}{}...), vars)
|
||||
if err != nil {
|
||||
return mt, err
|
||||
}
|
||||
@@ -44,19 +44,19 @@ func NewTextTemplate(mt MessageTemplate, locale, defaultLocale string, translati
|
||||
func NewHTMLTemplate(mt MessageTemplate, locale, defaultLocale string, translationPath string, vars map[string]string) (MessageTemplate, error) {
|
||||
var err error
|
||||
t := l10n.NewTranslatorFromCommonConfig(defaultLocale, _domain, translationPath, _translationFS, "l10n/locale").Locale(locale)
|
||||
mt.Subject, err = composeMessage(t.Get("%s", mt.Subject), vars)
|
||||
mt.Subject, err = composeMessage(t.Get(mt.Subject, []interface{}{}...), vars)
|
||||
if err != nil {
|
||||
return mt, err
|
||||
}
|
||||
mt.Greeting, err = composeMessage(newlineToBr(t.Get("%s", mt.Greeting)), vars)
|
||||
mt.Greeting, err = composeMessage(newlineToBr(t.Get(mt.Greeting, []interface{}{}...)), vars)
|
||||
if err != nil {
|
||||
return mt, err
|
||||
}
|
||||
mt.MessageBody, err = composeMessage(newlineToBr(t.Get("%s", mt.MessageBody)), vars)
|
||||
mt.MessageBody, err = composeMessage(newlineToBr(t.Get(mt.MessageBody, []interface{}{}...)), vars)
|
||||
if err != nil {
|
||||
return mt, err
|
||||
}
|
||||
mt.CallToAction, err = composeMessage(callToActionToHTML(t.Get("%s", mt.CallToAction)), vars)
|
||||
mt.CallToAction, err = composeMessage(callToActionToHTML(t.Get(mt.CallToAction, []interface{}{}...)), vars)
|
||||
if err != nil {
|
||||
return mt, err
|
||||
}
|
||||
@@ -71,18 +71,18 @@ func NewGroupedTextTemplate(gmt GroupedMessageTemplate, vars map[string]string,
|
||||
|
||||
var err error
|
||||
t := l10n.NewTranslatorFromCommonConfig(defaultLocale, _domain, translationPath, _translationFS, "l10n/locale").Locale(locale)
|
||||
gmt.Subject, err = composeMessage(t.Get("%s", gmt.Subject), vars)
|
||||
gmt.Subject, err = composeMessage(t.Get(gmt.Subject, []interface{}{}...), vars)
|
||||
if err != nil {
|
||||
return gmt, err
|
||||
}
|
||||
gmt.Greeting, err = composeMessage(t.Get("%s", gmt.Greeting), vars)
|
||||
gmt.Greeting, err = composeMessage(t.Get(gmt.Greeting, []interface{}{}...), vars)
|
||||
if err != nil {
|
||||
return gmt, err
|
||||
}
|
||||
|
||||
bodyParts := make([]string, 0, len(mtsVars))
|
||||
for i, mt := range mts {
|
||||
bodyPart, err := composeMessage(t.Get("%s", mt.MessageBody), mtsVars[i])
|
||||
bodyPart, err := composeMessage(t.Get(mt.MessageBody, []interface{}{}...), mtsVars[i])
|
||||
if err != nil {
|
||||
return gmt, err
|
||||
}
|
||||
@@ -100,18 +100,18 @@ func NewGroupedHTMLTemplate(gmt GroupedMessageTemplate, vars map[string]string,
|
||||
|
||||
var err error
|
||||
t := l10n.NewTranslatorFromCommonConfig(defaultLocale, _domain, translationPath, _translationFS, "l10n/locale").Locale(locale)
|
||||
gmt.Subject, err = composeMessage(t.Get("%s", gmt.Subject), vars)
|
||||
gmt.Subject, err = composeMessage(t.Get(gmt.Subject, []interface{}{}...), vars)
|
||||
if err != nil {
|
||||
return gmt, err
|
||||
}
|
||||
gmt.Greeting, err = composeMessage(newlineToBr(t.Get("%s", gmt.Greeting)), vars)
|
||||
gmt.Greeting, err = composeMessage(newlineToBr(t.Get(gmt.Greeting, []interface{}{}...)), vars)
|
||||
if err != nil {
|
||||
return gmt, err
|
||||
}
|
||||
|
||||
bodyParts := make([]string, 0, len(mtsVars))
|
||||
for i, mt := range mts {
|
||||
bodyPart, err := composeMessage(t.Get("%s", mt.MessageBody), mtsVars[i])
|
||||
bodyPart, err := composeMessage(t.Get(mt.MessageBody, []interface{}{}...), mtsVars[i])
|
||||
if err != nil {
|
||||
return gmt, err
|
||||
}
|
||||
|
||||
@@ -352,7 +352,7 @@ func loadMiddlewares(logger log.Logger, cfg *config.Config,
|
||||
middleware.CredentialsByUserAgent(cfg.AuthMiddleware.CredentialsByUserAgent),
|
||||
middleware.Logger(logger),
|
||||
middleware.OIDCIss(cfg.OIDC.Issuer),
|
||||
middleware.EnableBasicAuth(cfg.EnableBasicAuth),
|
||||
middleware.EnableBasicAuth(cfg.EnableBasicAuth || cfg.AuthMiddleware.AllowAppAuth),
|
||||
middleware.TraceProvider(traceProvider),
|
||||
),
|
||||
middleware.AccountResolver(
|
||||
|
||||
@@ -100,6 +100,9 @@ func DefaultConfig() *config.Config {
|
||||
Cluster: "opencloud-cluster",
|
||||
EnableTLS: false,
|
||||
},
|
||||
AuthMiddleware: config.AuthMiddleware{
|
||||
AllowAppAuth: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -697,7 +697,7 @@ func translateBundle(bundle *settingsmsg.Bundle, t *gotext.Locale) *settingsmsg.
|
||||
// translate interval names ('Instant', 'Daily', 'Weekly', 'Never')
|
||||
value := set.GetSingleChoiceValue()
|
||||
for i, v := range value.GetOptions() {
|
||||
value.Options[i].DisplayValue = t.Get("%s", v.GetDisplayValue())
|
||||
value.Options[i].DisplayValue = t.Get(v.GetDisplayValue(), []interface{}{}...)
|
||||
}
|
||||
set.Value = &settingsmsg.Setting_SingleChoiceValue{SingleChoiceValue: value}
|
||||
fallthrough
|
||||
@@ -710,9 +710,9 @@ func translateBundle(bundle *settingsmsg.Bundle, t *gotext.Locale) *settingsmsg.
|
||||
defaults.SettingUUIDProfileEventSpaceDisabled,
|
||||
defaults.SettingUUIDProfileEventSpaceDeleted:
|
||||
// translate event names ('Share Received', 'Share Removed', ...)
|
||||
set.DisplayName = t.Get("%s", set.GetDisplayName())
|
||||
set.DisplayName = t.Get(set.GetDisplayName(), []interface{}{}...)
|
||||
// translate event descriptions ('Notify me when I receive a share', ...)
|
||||
set.Description = t.Get("%s", set.GetDescription())
|
||||
set.Description = t.Get(set.GetDescription(), []interface{}{}...)
|
||||
bundle.Settings[i] = set
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ When hard-stopping OpenCloud, for example with the `kill <pid>` command (SIGKILL
|
||||
|
||||
* With the value of the environment variable `STORAGE_USERS_GRACEFUL_SHUTDOWN_TIMEOUT`, the `storage-users` service will delay its shutdown giving it time to finalize writing necessary data. This delay can be necessary if there is a lot of data to be saved and/or if storage access/thruput is slow. In such a case you would receive an error log entry informing you that not all data could be saved in time. To prevent such occurrences, you must increase the default value.
|
||||
|
||||
* If a shutdown error has been logged, the command-line maintenance tool [Inspect and Manipulate Node Metadata](https://doc.opencloud.eu/maintenance/commands/commands.html#inspect-and-manipulate-node-metadata) can help to fix the issue. Please contact support for details.
|
||||
* If a shutdown error has been logged, the command-line maintenance tool to inspect and manipulate node metadata can help to fix the issue. Please contact support for details.
|
||||
|
||||
## CLI Commands
|
||||
|
||||
@@ -142,8 +142,7 @@ opencloud storage-users uploads sessions --processing=false --has-virus=false --
|
||||
|
||||
### Manage Trash-Bin Items
|
||||
|
||||
This command set provides commands to get an overview of trash-bin items, restore items and purge old items of `personal` spaces and `project` spaces (spaces that have been created manually). `trash-bin` commands require a `spaceID` as parameter. See [List all spaces
|
||||
](https://doc.opencloud.eu/apis/http/graph/spaces/#list-all-spaces-get-drives) or [Listing Space IDs](https://doc.opencloud.eu/maintenance/space-ids/space-ids.html) for details of how to get them.
|
||||
This command set provides commands to get an overview of trash-bin items, restore items and purge old items of `personal` spaces and `project` spaces (spaces that have been created manually). `trash-bin` commands require a `spaceID` as parameter.
|
||||
|
||||
```bash
|
||||
opencloud storage-users trash-bin <command>
|
||||
@@ -170,10 +169,10 @@ The behaviour of the `purge-expired` command can be configured by using the foll
|
||||
Used to obtain space trash-bin information and takes the system admin user as the default which is the `OC_ADMIN_USER_ID` but can be set individually. It should be noted, that the `OC_ADMIN_USER_ID` is only assigned automatically when using the single binary deployment and must be manually assigned in all other deployments. The command only considers spaces to which the assigned user has access and delete permission.
|
||||
|
||||
* `STORAGE_USERS_PURGE_TRASH_BIN_PERSONAL_DELETE_BEFORE`\
|
||||
Has a default value of `720h` which equals `30 days`. This means, the command will delete all files older than `30 days`. The value is human-readable, for valid values see the duration type described in the [Environment Variable Types](https://doc.opencloud.eu/deployment/services/envvar-types-description.html). A value of `0` is equivalent to disable and prevents the deletion of `personal space` trash-bin files.
|
||||
Has a default value of `720h` which equals `30 days`. This means, the command will delete all files older than `30 days`. The value is human-readable. A value of `0` is equivalent to disable and prevents the deletion of `personal space` trash-bin files.
|
||||
|
||||
* `STORAGE_USERS_PURGE_TRASH_BIN_PROJECT_DELETE_BEFORE`\
|
||||
Has a default value of `720h` which equals `30 days`. This means, the command will delete all files older than `30 days`. The value is human-readable, for valid values see the duration type described in the [Environment Variable Types](https://doc.opencloud.eu/latest/deployment/services/envvar-types-description.html). A value of `0` is equivalent to disable and prevents the deletion of `project space` trash-bin files.
|
||||
Has a default value of `720h` which equals `30 days`. This means, the command will delete all files older than `30 days`. The value is human-readable. A value of `0` is equivalent to disable and prevents the deletion of `project space` trash-bin files.
|
||||
|
||||
#### List and Restore Trash-Bins Items
|
||||
|
||||
|
||||
@@ -98,13 +98,26 @@ func ListUploadSessions(cfg *config.Config) *cli.Command {
|
||||
return configlog.ReturnFatal(parser.ParseConfig(cfg))
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
var err error
|
||||
f, ok := registry.NewFuncs[cfg.Driver]
|
||||
if !ok {
|
||||
fmt.Fprintf(os.Stderr, "Unknown filesystem driver '%s'\n", cfg.Driver)
|
||||
os.Exit(1)
|
||||
}
|
||||
drivers := revaconfig.StorageProviderDrivers(cfg)
|
||||
fs, err := f(drivers[cfg.Driver].(map[string]interface{}), nil, nil)
|
||||
var fsStream events.Stream
|
||||
if cfg.Driver == "posix" {
|
||||
// We need to init the posix driver with 'scanfs' disabled
|
||||
drivers["posix"] = revaconfig.Posix(cfg, false)
|
||||
// Also posix refuses to start without an events stream
|
||||
fsStream, err = event.NewStream(cfg)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to create event stream for posix driver: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
fs, err := f(drivers[cfg.Driver].(map[string]interface{}), fsStream, nil)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to initialize filesystem driver '%s'\n", cfg.Driver)
|
||||
return err
|
||||
@@ -117,7 +130,7 @@ func ListUploadSessions(cfg *config.Config) *cli.Command {
|
||||
}
|
||||
|
||||
var stream events.Stream
|
||||
if c.Bool("restart") {
|
||||
if c.Bool("restart") || c.Bool("resume") {
|
||||
stream, err = event.NewStream(cfg)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to create event stream: %v\n", err)
|
||||
|
||||
@@ -123,6 +123,7 @@ func DefaultConfig() *config.Config {
|
||||
MaxConcurrency: 5,
|
||||
LockCycleDurationFactor: 30,
|
||||
DisableMultipart: true,
|
||||
AsyncUploads: true,
|
||||
},
|
||||
Decomposed: config.DecomposedDriver{
|
||||
Propagator: "sync",
|
||||
|
||||
@@ -376,7 +376,7 @@ func composeMessage(nt NotificationTemplate, locale, defaultLocale, path string,
|
||||
|
||||
func loadTemplates(nt NotificationTemplate, locale, defaultLocale, path string) (string, string) {
|
||||
t := l10n.NewTranslatorFromCommonConfig(defaultLocale, _domain, path, _translationFS, "l10n/locale").Locale(locale)
|
||||
return t.Get("%s", nt.Subject), t.Get("%s", nt.Message)
|
||||
return t.Get(nt.Subject, []interface{}{}...), t.Get(nt.Message, []interface{}{}...)
|
||||
}
|
||||
|
||||
func executeTemplate(raw string, vars map[string]interface{}) (string, error) {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
SHELL := bash
|
||||
NAME := web
|
||||
WEB_ASSETS_VERSION = v2.0.0
|
||||
WEB_ASSETS_VERSION = v2.1.1
|
||||
WEB_ASSETS_BRANCH = main
|
||||
|
||||
ifneq (, $(shell command -v go 2> /dev/null)) # suppress `command not found warnings` for non go targets in CI
|
||||
|
||||
@@ -15,7 +15,7 @@ services:
|
||||
STORAGE_SYSTEM_DRIVER_OC_ROOT: /srv/app/tmp/opencloud/storage/metadata
|
||||
SHARING_USER_JSON_FILE: /srv/app/tmp/opencloud/shares.json
|
||||
PROXY_ENABLE_BASIC_AUTH: "true"
|
||||
WEB_UI_CONFIG_FILE: /woodpecker/src/github.com/opencloud-eu/opencloud/tests/config/drone/opencloud-config.json
|
||||
WEB_UI_CONFIG_FILE: /woodpecker/src/github.com/opencloud-eu/opencloud/tests/config/woodpecker/opencloud-config.json
|
||||
ACCOUNTS_HASH_DIFFICULTY: 4
|
||||
OC_INSECURE: "true"
|
||||
IDM_CREATE_DEMO_USERS: "true"
|
||||
|
||||
@@ -153,22 +153,6 @@ _ocdav: api compatibility, return correct status code_
|
||||
|
||||
- [coreApiTrashbin/trashbinSharingToShares.feature:277](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiTrashbin/trashbinSharingToShares.feature#L277)
|
||||
|
||||
#### [Uploading file with mtime gives 500 error](https://github.com/opencloud-eu/opencloud/issues/391)
|
||||
|
||||
- [coreApiWebdavUpload/uploadFile.feature:400](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUpload/uploadFile.feature#L400)
|
||||
- [coreApiWebdavUpload/uploadFile.feature:401](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUpload/uploadFile.feature#L401)
|
||||
- [coreApiWebdavUpload/uploadFile.feature:402](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUpload/uploadFile.feature#L402)
|
||||
- [coreApiWebdavUploadTUS/uploadFileMtime.feature:65](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/uploadFileMtime.feature#L65)
|
||||
- [coreApiWebdavUploadTUS/uploadFileMtime.feature:66](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/uploadFileMtime.feature#L66)
|
||||
- [coreApiWebdavUploadTUS/uploadFileMtime.feature:67](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/uploadFileMtime.feature#L67)
|
||||
- [coreApiWebdavUploadTUS/uploadFileMtime.feature:79](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/uploadFileMtime.feature#L79)
|
||||
- [coreApiWebdavUploadTUS/uploadFileMtime.feature:80](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/uploadFileMtime.feature#L80)
|
||||
- [coreApiWebdavUploadTUS/uploadFileMtime.feature:81](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/uploadFileMtime.feature#L81)
|
||||
- [coreApiVersions/fileVersions.feature:296](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiVersions/fileVersions.feature#L296)
|
||||
- [coreApiVersions/fileVersions.feature:297](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiVersions/fileVersions.feature#L297)
|
||||
- [coreApiVersions/fileVersions.feature:298](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiVersions/fileVersions.feature#L298)
|
||||
- [coreApiVersions/fileVersions.feature:301](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiVersions/fileVersions.feature#L301)
|
||||
|
||||
### Won't fix
|
||||
|
||||
Not everything needs to be implemented for opencloud.
|
||||
|
||||
163
tests/acceptance/expected-failures-API-on-posix-storage.md
Normal file
163
tests/acceptance/expected-failures-API-on-posix-storage.md
Normal file
@@ -0,0 +1,163 @@
|
||||
## Scenarios from core API tests that are expected to fail with decomposed storage while running with the Graph API
|
||||
|
||||
### File
|
||||
|
||||
Basic file management like up and download, move, copy, properties, trash, versions and chunking.
|
||||
|
||||
#### [Custom dav properties with namespaces are rendered incorrectly](https://github.com/owncloud/ocis/issues/2140)
|
||||
|
||||
_ocdav: double-check the webdav property parsing when custom namespaces are used_
|
||||
|
||||
- [coreApiWebdavProperties/setFileProperties.feature:128](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavProperties/setFileProperties.feature#L128)
|
||||
- [coreApiWebdavProperties/setFileProperties.feature:129](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavProperties/setFileProperties.feature#L129)
|
||||
- [coreApiWebdavProperties/setFileProperties.feature:130](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavProperties/setFileProperties.feature#L130)
|
||||
|
||||
### Sync
|
||||
|
||||
Synchronization features like etag propagation, setting mtime and locking files
|
||||
|
||||
#### [Uploading an old method chunked file with checksum should fail using new DAV path](https://github.com/owncloud/ocis/issues/2323)
|
||||
|
||||
- [coreApiMain/checksums.feature:233](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiMain/checksums.feature#L233)
|
||||
- [coreApiMain/checksums.feature:234](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiMain/checksums.feature#L234)
|
||||
- [coreApiMain/checksums.feature:235](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiMain/checksums.feature#L235)
|
||||
|
||||
### Share
|
||||
|
||||
#### [d:quota-available-bytes in dprop of PROPFIND give wrong response value](https://github.com/owncloud/ocis/issues/8197)
|
||||
|
||||
- [coreApiWebdavProperties/getQuota.feature:57](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavProperties/getQuota.feature#L57)
|
||||
- [coreApiWebdavProperties/getQuota.feature:58](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavProperties/getQuota.feature#L58)
|
||||
- [coreApiWebdavProperties/getQuota.feature:59](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavProperties/getQuota.feature#L59)
|
||||
- [coreApiWebdavProperties/getQuota.feature:73](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavProperties/getQuota.feature#L73)
|
||||
- [coreApiWebdavProperties/getQuota.feature:74](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavProperties/getQuota.feature#L74)
|
||||
- [coreApiWebdavProperties/getQuota.feature:75](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavProperties/getQuota.feature#L75)
|
||||
|
||||
#### [deleting a file inside a received shared folder is moved to the trash-bin of the sharer not the receiver](https://github.com/owncloud/ocis/issues/1124)
|
||||
|
||||
- [coreApiTrashbin/trashbinSharingToShares.feature:54](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiTrashbin/trashbinSharingToShares.feature#L54)
|
||||
- [coreApiTrashbin/trashbinSharingToShares.feature:55](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiTrashbin/trashbinSharingToShares.feature#L55)
|
||||
- [coreApiTrashbin/trashbinSharingToShares.feature:56](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiTrashbin/trashbinSharingToShares.feature#L56)
|
||||
- [coreApiTrashbin/trashbinSharingToShares.feature:83](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiTrashbin/trashbinSharingToShares.feature#L83)
|
||||
- [coreApiTrashbin/trashbinSharingToShares.feature:84](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiTrashbin/trashbinSharingToShares.feature#L84)
|
||||
- [coreApiTrashbin/trashbinSharingToShares.feature:85](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiTrashbin/trashbinSharingToShares.feature#L85)
|
||||
- [coreApiTrashbin/trashbinSharingToShares.feature:142](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiTrashbin/trashbinSharingToShares.feature#L142)
|
||||
- [coreApiTrashbin/trashbinSharingToShares.feature:143](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiTrashbin/trashbinSharingToShares.feature#L143)
|
||||
- [coreApiTrashbin/trashbinSharingToShares.feature:144](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiTrashbin/trashbinSharingToShares.feature#L144)
|
||||
- [coreApiTrashbin/trashbinSharingToShares.feature:202](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiTrashbin/trashbinSharingToShares.feature#L202)
|
||||
- [coreApiTrashbin/trashbinSharingToShares.feature:203](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiTrashbin/trashbinSharingToShares.feature#L203)
|
||||
|
||||
### Other
|
||||
|
||||
API, search, favorites, config, capabilities, not existing endpoints, CORS and others
|
||||
|
||||
#### [sending MKCOL requests to another or non-existing user's webDav endpoints as normal user should return 404](https://github.com/owncloud/ocis/issues/5049)
|
||||
|
||||
_ocdav: api compatibility, return correct status code_
|
||||
|
||||
- [coreApiAuth/webDavMKCOLAuth.feature:42](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiAuth/webDavMKCOLAuth.feature#L42)
|
||||
- [coreApiAuth/webDavMKCOLAuth.feature:53](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiAuth/webDavMKCOLAuth.feature#L53)
|
||||
|
||||
#### [trying to lock file of another user gives http 500](https://github.com/owncloud/ocis/issues/2176)
|
||||
|
||||
- [coreApiAuth/webDavLOCKAuth.feature:46](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiAuth/webDavLOCKAuth.feature#L46)
|
||||
- [coreApiAuth/webDavLOCKAuth.feature:58](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiAuth/webDavLOCKAuth.feature#L58)
|
||||
|
||||
#### [Support for favorites](https://github.com/owncloud/ocis/issues/1228)
|
||||
|
||||
- [coreApiFavorites/favorites.feature:101](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiFavorites/favorites.feature#L101)
|
||||
- [coreApiFavorites/favorites.feature:102](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiFavorites/favorites.feature#L102)
|
||||
- [coreApiFavorites/favorites.feature:103](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiFavorites/favorites.feature#L103)
|
||||
- [coreApiFavorites/favorites.feature:124](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiFavorites/favorites.feature#L124)
|
||||
- [coreApiFavorites/favorites.feature:125](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiFavorites/favorites.feature#L125)
|
||||
- [coreApiFavorites/favorites.feature:126](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiFavorites/favorites.feature#L126)
|
||||
- [coreApiFavorites/favorites.feature:189](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiFavorites/favorites.feature#L189)
|
||||
- [coreApiFavorites/favorites.feature:190](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiFavorites/favorites.feature#L190)
|
||||
- [coreApiFavorites/favorites.feature:191](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiFavorites/favorites.feature#L191)
|
||||
- [coreApiFavorites/favorites.feature:145](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiFavorites/favorites.feature#L145)
|
||||
- [coreApiFavorites/favorites.feature:146](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiFavorites/favorites.feature#L146)
|
||||
- [coreApiFavorites/favorites.feature:147](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiFavorites/favorites.feature#L147)
|
||||
- [coreApiFavorites/favorites.feature:174](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiFavorites/favorites.feature#L174)
|
||||
- [coreApiFavorites/favorites.feature:175](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiFavorites/favorites.feature#L175)
|
||||
- [coreApiFavorites/favorites.feature:176](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiFavorites/favorites.feature#L176)
|
||||
- [coreApiFavorites/favoritesSharingToShares.feature:91](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiFavorites/favoritesSharingToShares.feature#L91)
|
||||
- [coreApiFavorites/favoritesSharingToShares.feature:92](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiFavorites/favoritesSharingToShares.feature#L92)
|
||||
- [coreApiFavorites/favoritesSharingToShares.feature:93](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiFavorites/favoritesSharingToShares.feature#L93)
|
||||
|
||||
#### [WWW-Authenticate header for unauthenticated requests is not clear](https://github.com/owncloud/ocis/issues/2285)
|
||||
|
||||
- [coreApiWebdavOperations/refuseAccess.feature:21](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavOperations/refuseAccess.feature#L21)
|
||||
- [coreApiWebdavOperations/refuseAccess.feature:22](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavOperations/refuseAccess.feature#L22)
|
||||
|
||||
#### [PATCH request for TUS upload with wrong checksum gives incorrect response](https://github.com/owncloud/ocis/issues/1755)
|
||||
|
||||
- [coreApiWebdavUploadTUS/checksums.feature:74](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L74)
|
||||
- [coreApiWebdavUploadTUS/checksums.feature:75](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L75)
|
||||
- [coreApiWebdavUploadTUS/checksums.feature:76](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L76)
|
||||
- [coreApiWebdavUploadTUS/checksums.feature:77](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L77)
|
||||
- [coreApiWebdavUploadTUS/checksums.feature:79](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L79)
|
||||
- [coreApiWebdavUploadTUS/checksums.feature:78](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L78)
|
||||
- [coreApiWebdavUploadTUS/checksums.feature:147](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L147)
|
||||
- [coreApiWebdavUploadTUS/checksums.feature:148](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L148)
|
||||
- [coreApiWebdavUploadTUS/checksums.feature:149](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L149)
|
||||
- [coreApiWebdavUploadTUS/checksums.feature:192](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L192)
|
||||
- [coreApiWebdavUploadTUS/checksums.feature:193](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L193)
|
||||
- [coreApiWebdavUploadTUS/checksums.feature:194](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L194)
|
||||
- [coreApiWebdavUploadTUS/checksums.feature:195](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L195)
|
||||
- [coreApiWebdavUploadTUS/checksums.feature:196](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L196)
|
||||
- [coreApiWebdavUploadTUS/checksums.feature:197](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L197)
|
||||
- [coreApiWebdavUploadTUS/checksums.feature:240](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L240)
|
||||
- [coreApiWebdavUploadTUS/checksums.feature:241](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L241)
|
||||
- [coreApiWebdavUploadTUS/checksums.feature:242](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L242)
|
||||
- [coreApiWebdavUploadTUS/checksums.feature:243](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L243)
|
||||
- [coreApiWebdavUploadTUS/checksums.feature:244](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L244)
|
||||
- [coreApiWebdavUploadTUS/checksums.feature:245](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L245)
|
||||
- [coreApiWebdavUploadTUS/uploadToShare.feature:255](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/uploadToShare.feature#L255)
|
||||
- [coreApiWebdavUploadTUS/uploadToShare.feature:256](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/uploadToShare.feature#L256)
|
||||
- [coreApiWebdavUploadTUS/uploadToShare.feature:279](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/uploadToShare.feature#L279)
|
||||
- [coreApiWebdavUploadTUS/uploadToShare.feature:280](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/uploadToShare.feature#L280)
|
||||
- [coreApiWebdavUploadTUS/uploadToShare.feature:375](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/uploadToShare.feature#L375)
|
||||
- [coreApiWebdavUploadTUS/uploadToShare.feature:376](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/uploadToShare.feature#L376)
|
||||
|
||||
#### [Renaming resource to banned name is allowed in spaces webdav](https://github.com/owncloud/ocis/issues/3099)
|
||||
|
||||
- [coreApiWebdavMove2/moveFile.feature:143](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavMove2/moveFile.feature#L143)
|
||||
- [coreApiWebdavMove1/moveFolder.feature:36](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavMove1/moveFolder.feature#L36)
|
||||
- [coreApiWebdavMove1/moveFolder.feature:50](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavMove1/moveFolder.feature#L50)
|
||||
- [coreApiWebdavMove1/moveFolder.feature:64](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavMove1/moveFolder.feature#L64)
|
||||
|
||||
#### [Trying to delete other user's trashbin item returns 409 for spaces path instead of 404](https://github.com/owncloud/ocis/issues/9791)
|
||||
|
||||
- [coreApiTrashbin/trashbinDelete.feature:92](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiTrashbin/trashbinDelete.feature#L92)
|
||||
|
||||
#### [MOVE a file into same folder with same name returns 404 instead of 403](https://github.com/owncloud/ocis/issues/1976)
|
||||
|
||||
- [coreApiWebdavMove2/moveFile.feature:100](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavMove2/moveFile.feature#L100)
|
||||
- [coreApiWebdavMove2/moveFile.feature:101](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavMove2/moveFile.feature#L101)
|
||||
- [coreApiWebdavMove2/moveFile.feature:102](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavMove2/moveFile.feature#L102)
|
||||
- [coreApiWebdavMove1/moveFolder.feature:217](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavMove1/moveFolder.feature#L217)
|
||||
- [coreApiWebdavMove1/moveFolder.feature:218](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavMove1/moveFolder.feature#L218)
|
||||
- [coreApiWebdavMove1/moveFolder.feature:219](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavMove1/moveFolder.feature#L219)
|
||||
- [coreApiWebdavMove2/moveShareOnOpencloud.feature:334](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavMove2/moveShareOnOpencloud.feature#L334)
|
||||
- [coreApiWebdavMove2/moveShareOnOpencloud.feature:337](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavMove2/moveShareOnOpencloud.feature#L337)
|
||||
- [coreApiWebdavMove2/moveShareOnOpencloud.feature:340](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavMove2/moveShareOnOpencloud.feature#L340)
|
||||
|
||||
#### [COPY file/folder to same name is possible (but 500 code error for folder with spaces path)](https://github.com/owncloud/ocis/issues/8711)
|
||||
|
||||
- [coreApiSharePublicLink2/copyFromPublicLink.feature:198](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiSharePublicLink2/copyFromPublicLink.feature#L198)
|
||||
- [coreApiWebdavProperties/copyFile.feature:1094](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavProperties/copyFile.feature#L1094)
|
||||
- [coreApiWebdavProperties/copyFile.feature:1095](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavProperties/copyFile.feature#L1095)
|
||||
- [coreApiWebdavProperties/copyFile.feature:1096](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavProperties/copyFile.feature#L1096)
|
||||
|
||||
#### [Trying to restore personal file to file of share received folder returns 403 but the share file is deleted (new dav path)](https://github.com/owncloud/ocis/issues/10356)
|
||||
|
||||
- [coreApiTrashbin/trashbinSharingToShares.feature:277](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiTrashbin/trashbinSharingToShares.feature#L277)
|
||||
|
||||
### Won't fix
|
||||
|
||||
Not everything needs to be implemented for opencloud.
|
||||
|
||||
- _Blacklisted ignored files are no longer required because opencloud can handle `.htaccess` files without security implications introduced by serving user provided files with apache._
|
||||
|
||||
Note: always have an empty line at the end of this file.
|
||||
The bash script that processes this file requires that the last line has a newline on the end.
|
||||
@@ -193,25 +193,7 @@
|
||||
- [apiServiceAvailability/serviceAvailabilityCheck.feature:125](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiServiceAvailability/serviceAvailabilityCheck.feature#L125)
|
||||
|
||||
#### [Skip tests for different languages](https://github.com/opencloud-eu/opencloud/issues/183)
|
||||
- [apiAntivirus/antivirus.feature:309](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L309)
|
||||
- [apiAntivirus/antivirus.feature:310](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L310)
|
||||
- [apiAntivirus/antivirus.feature:311](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L311)
|
||||
- [apiAntivirus/antivirus.feature:312](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L312)
|
||||
- [apiAntivirus/antivirus.feature:313](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L313)
|
||||
- [apiAntivirus/antivirus.feature:314](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiAntivirus/antivirus.feature#L314)
|
||||
- [apiNotification/deprovisioningNotification.feature:126](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiNotification/deprovisioningNotification.feature#L126)
|
||||
- [apiNotification/deprovisioningNotification.feature:127](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiNotification/deprovisioningNotification.feature#L127)
|
||||
- [apiNotification/notification.feature:282](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiNotification/notification.feature#L282)
|
||||
- [apiNotification/notification.feature:283](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiNotification/notification.feature#L283)
|
||||
- [apiNotification/notification.feature:284](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiNotification/notification.feature#L284)
|
||||
- [apiNotification/notification.feature:285](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiNotification/notification.feature#L285)
|
||||
- [apiNotification/notification.feature:288](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiNotification/notification.feature#L288)
|
||||
- [apiNotification/spaceNotification.feature:434](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiNotification/spaceNotification.feature#L434)
|
||||
- [apiNotification/spaceNotification.feature:435](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiNotification/spaceNotification.feature#L435)
|
||||
- [apiNotification/emailNotification.feature:84](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiNotification/emailNotification.feature#L84)
|
||||
- [apiNotification/emailNotification.feature:117](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiNotification/emailNotification.feature#L117)
|
||||
- [apiNotification/emailNotification.feature:150](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiNotification/emailNotification.feature#L150)
|
||||
- [apiNotification/emailNotification.feature:205](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiNotification/emailNotification.feature#L205)
|
||||
- [apiActivities/activities.feature:2598](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/apiActivities/activities.feature#L2598)
|
||||
|
||||
|
||||
|
||||
@@ -337,6 +337,7 @@
|
||||
- [coreApiWebdavUploadTUS/uploadFileMtime.feature:39](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/uploadFileMtime.feature#L39)
|
||||
- [coreApiWebdavUploadTUS/uploadFileMtime.feature:51](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/uploadFileMtime.feature#L51)
|
||||
- [coreApiWebdavUploadTUS/uploadFileMtime.feature:65](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/uploadFileMtime.feature#L65)
|
||||
- [coreApiWebdavUploadTUS/uploadFileMtime.feature:79](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/uploadFileMtime.feature#L79)
|
||||
- [coreApiWebdavUploadTUS/uploadFileMtimeShares.feature:29](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/uploadFileMtimeShares.feature#L29)
|
||||
- [coreApiWebdavUploadTUS/uploadFileMtimeShares.feature:48](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/uploadFileMtimeShares.feature#L48)
|
||||
- [coreApiWebdavUploadTUS/uploadFileMtimeShares.feature:69](https://github.com/opencloud-eu/opencloud/blob/main/tests/acceptance/features/coreApiWebdavUploadTUS/uploadFileMtimeShares.feature#L69)
|
||||
|
||||
@@ -22,7 +22,7 @@ Feature: create auth-app token
|
||||
],
|
||||
"properties": {
|
||||
"token": {
|
||||
"pattern": "^[a-zA-Z0-9]{16}$"
|
||||
"pattern": "^([a-zA-Z]+ ){5}[a-zA-Z]+$"
|
||||
},
|
||||
"label": {
|
||||
"const": "Generated via API"
|
||||
@@ -58,7 +58,7 @@ Feature: create auth-app token
|
||||
],
|
||||
"properties": {
|
||||
"token": {
|
||||
"pattern": "^\\$2a\\$11\\$[A-Za-z0-9./]{53}$"
|
||||
"pattern": "^[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-4[0-9A-Fa-f]{3}-[89ABab][0-9A-Fa-f]{3}-[0-9A-Fa-f]{12}$"
|
||||
},
|
||||
"label": {
|
||||
"const": "Generated via API"
|
||||
@@ -75,7 +75,7 @@ Feature: create auth-app token
|
||||
],
|
||||
"properties": {
|
||||
"token": {
|
||||
"pattern": "^\\$2a\\$11\\$[A-Za-z0-9./]{53}$"
|
||||
"pattern": "^[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-4[0-9A-Fa-f]{3}-[89ABab][0-9A-Fa-f]{3}-[0-9A-Fa-f]{12}$"
|
||||
},
|
||||
"label": {
|
||||
"const": "Generated via CLI"
|
||||
@@ -92,10 +92,10 @@ Feature: create auth-app token
|
||||
],
|
||||
"properties": {
|
||||
"token": {
|
||||
"pattern": "^\\$2a\\$11\\$[A-Za-z0-9./]{53}$"
|
||||
"pattern": "^[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-4[0-9A-Fa-f]{3}-[89ABab][0-9A-Fa-f]{3}-[0-9A-Fa-f]{12}$"
|
||||
},
|
||||
"label": {
|
||||
"const": "Generated via Impersonation API"
|
||||
"const": "Generated via API (Impersonation)"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -121,10 +121,10 @@ Feature: create auth-app token
|
||||
],
|
||||
"properties": {
|
||||
"token": {
|
||||
"pattern": "^[a-zA-Z0-9]{16}$"
|
||||
"pattern": "^([a-zA-Z]+ ){5}[a-zA-Z]+$"
|
||||
},
|
||||
"label": {
|
||||
"const": "Generated via Impersonation API"
|
||||
"const": "Generated via API (Impersonation)"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -183,7 +183,7 @@ Feature: check file info with different wopi apps
|
||||
"const": true
|
||||
},
|
||||
"UserCanNotWriteRelative": {
|
||||
"const": false
|
||||
"const": true
|
||||
},
|
||||
"EnableOwnerTermination": {
|
||||
"const": true
|
||||
@@ -581,7 +581,7 @@ Feature: check file info with different wopi apps
|
||||
"const": <user-can-write>
|
||||
},
|
||||
"UserCanNotWriteRelative": {
|
||||
"const": false
|
||||
"const": true
|
||||
},
|
||||
"EnableOwnerTermination": {
|
||||
"const": true
|
||||
@@ -691,7 +691,7 @@ Feature: check file info with different wopi apps
|
||||
"const": true
|
||||
},
|
||||
"UserCanNotWriteRelative": {
|
||||
"const": false
|
||||
"const": true
|
||||
},
|
||||
"EnableOwnerTermination": {
|
||||
"const": true
|
||||
@@ -1077,7 +1077,7 @@ Feature: check file info with different wopi apps
|
||||
"const": true
|
||||
},
|
||||
"UserCanNotWriteRelative": {
|
||||
"const": false
|
||||
"const": true
|
||||
},
|
||||
"EnableOwnerTermination": {
|
||||
"const": true
|
||||
@@ -1424,7 +1424,7 @@ Feature: check file info with different wopi apps
|
||||
"const": true
|
||||
},
|
||||
"UserCanNotWriteRelative": {
|
||||
"const": false
|
||||
"const": true
|
||||
},
|
||||
"EnableOwnerTermination": {
|
||||
"const": true
|
||||
@@ -1810,7 +1810,7 @@ Feature: check file info with different wopi apps
|
||||
"const": <user-can-write>
|
||||
},
|
||||
"UserCanNotWriteRelative": {
|
||||
"const": false
|
||||
"const": true
|
||||
},
|
||||
"EnableOwnerTermination": {
|
||||
"const": true
|
||||
|
||||
@@ -120,3 +120,26 @@ Feature: Propfind test
|
||||
| Manager | RDNVWZP |
|
||||
| Space Editor | DNVW |
|
||||
| Space Viewer | |
|
||||
|
||||
@issue-1523
|
||||
Scenario: propfind response contains a restored folder with correct name
|
||||
Given user "Alice" has created a folder "folderMain" in space "Personal"
|
||||
And user "Alice" has deleted folder "folderMain"
|
||||
And user "Alice" has created a folder "folderMain" in space "Personal"
|
||||
When user "Alice" restores the folder with original path "/folderMain" to "/folderMain (1)" using the trashbin API
|
||||
And user "Alice" sends PROPFIND request to space "Personal" using the WebDAV API
|
||||
Then the HTTP status code should be "207"
|
||||
And as user "Alice" the PROPFIND response should contain a resource "folderMain" with these key and value pairs:
|
||||
| key | value |
|
||||
| oc:fileid | %file_id_pattern% |
|
||||
| oc:file-parent | %file_id_pattern% |
|
||||
| oc:name | folderMain |
|
||||
| oc:permissions | RDNVCKZP |
|
||||
| oc:size | 0 |
|
||||
And as user "Alice" the PROPFIND response should contain a resource "folderMain (1)" with these key and value pairs:
|
||||
| key | value |
|
||||
| oc:fileid | %file_id_pattern% |
|
||||
| oc:file-parent | %file_id_pattern% |
|
||||
| oc:name | folderMain (1) |
|
||||
| oc:permissions | RDNVCKZP |
|
||||
| oc:size | 0 |
|
||||
|
||||
@@ -23,6 +23,7 @@ Feature: Send a sharing invitations
|
||||
| shareType | user |
|
||||
| permissionsRole | <permissions-role> |
|
||||
Then the HTTP status code should be "200"
|
||||
And user "Brian" has a share "<resource>" synced
|
||||
And user "Brian" should have a share "<resource>" shared by user "Alice" from space "Personal"
|
||||
And the JSON data of the response should match
|
||||
"""
|
||||
@@ -113,7 +114,9 @@ Feature: Send a sharing invitations
|
||||
| shareType | group |
|
||||
| permissionsRole | <permissions-role> |
|
||||
Then the HTTP status code should be "200"
|
||||
And user "Brian" has a share "<resource>" synced
|
||||
And user "Brian" should have a share "<resource>" shared by user "Alice" from space "Personal"
|
||||
And user "Carol" has a share "<resource>" synced
|
||||
And user "Carol" should have a share "<resource>" shared by user "Alice" from space "Personal"
|
||||
And the JSON data of the response should match
|
||||
"""
|
||||
@@ -1998,6 +2001,7 @@ Feature: Send a sharing invitations
|
||||
| shareType | user |
|
||||
| permissionsRole | <permissions-role> |
|
||||
Then the HTTP status code should be "200"
|
||||
And user "Brian" has a share "<resource>" synced
|
||||
And user "Brian" should have a share "<resource>" shared by user "Alice" from space "NewSpace"
|
||||
And the JSON data of the response should match
|
||||
"""
|
||||
@@ -2086,7 +2090,9 @@ Feature: Send a sharing invitations
|
||||
| shareType | group |
|
||||
| permissionsRole | <permissions-role> |
|
||||
Then the HTTP status code should be "200"
|
||||
And user "Brian" has a share "<resource>" synced
|
||||
And user "Brian" should have a share "<resource>" shared by user "Alice" from space "NewSpace"
|
||||
And user "Carol" has a share "<resource>" synced
|
||||
And user "Carol" should have a share "<resource>" shared by user "Alice" from space "NewSpace"
|
||||
And the JSON data of the response should match
|
||||
"""
|
||||
@@ -3166,6 +3172,7 @@ Feature: Send a sharing invitations
|
||||
| shareType | user |
|
||||
| permissionsRole | Viewer |
|
||||
Then the HTTP status code should be "200"
|
||||
And user "Brian" has a share "textfile.txt" synced
|
||||
And user "Brian" should have a share "textfile.txt" shared by user "Alice" from space "NewSpace"
|
||||
When user "Alice" sends the following resource share invitation using the Graph API:
|
||||
| resource | textfile.txt |
|
||||
@@ -3174,6 +3181,7 @@ Feature: Send a sharing invitations
|
||||
| shareType | group |
|
||||
| permissionsRole | Viewer |
|
||||
Then the HTTP status code should be "200"
|
||||
And user "Carol" has a share "textfile.txt" synced
|
||||
And user "Carol" should have a share "textfile.txt" shared by user "Alice" from space "NewSpace"
|
||||
|
||||
|
||||
@@ -3193,4 +3201,5 @@ Feature: Send a sharing invitations
|
||||
| shareType | group |
|
||||
| permissionsRole | Viewer |
|
||||
Then the HTTP status code should be "200"
|
||||
And user "Brian" has a share "textfile.txt" synced
|
||||
And user "Brian" should have a share "textfile.txt" shared by user "Alice" from space "NewSpace"
|
||||
|
||||
@@ -19,7 +19,7 @@ Feature: Update permission of a share
|
||||
| sharee | Brian |
|
||||
| shareType | user |
|
||||
| permissionsRole | Viewer |
|
||||
| expirationDateTime | 2025-07-15T14:00:00Z |
|
||||
| expirationDateTime | 2027-07-15T14:00:00Z |
|
||||
When user "Alice" updates the last resource share with the following properties using the Graph API:
|
||||
| space | Personal |
|
||||
| resource | testfile.txt |
|
||||
@@ -91,7 +91,7 @@ Feature: Update permission of a share
|
||||
| sharee | Brian |
|
||||
| shareType | user |
|
||||
| permissionsRole | Viewer |
|
||||
| expirationDateTime | 2025-07-15T14:00:00Z |
|
||||
| expirationDateTime | 2027-07-15T14:00:00Z |
|
||||
When user "Alice" updates the last resource share with the following properties using the Graph API:
|
||||
| space | Personal |
|
||||
| resource | <resource> |
|
||||
@@ -394,7 +394,7 @@ Feature: Update permission of a share
|
||||
| sharee | grp1 |
|
||||
| shareType | group |
|
||||
| permissionsRole | <permissions-role> |
|
||||
| expirationDateTime | 2025-07-15T14:00:00Z |
|
||||
| expirationDateTime | 2027-07-15T14:00:00Z |
|
||||
When user "Alice" updates the last drive share with the following using root endpoint of the Graph API:
|
||||
| permissionsRole | <new-permissions-role> |
|
||||
| space | NewSpace |
|
||||
@@ -474,7 +474,7 @@ Feature: Update permission of a share
|
||||
| sharee | grp1 |
|
||||
| shareType | group |
|
||||
| permissionsRole | <permissions-role> |
|
||||
| expirationDateTime | 2025-07-15T14:00:00Z |
|
||||
| expirationDateTime | 2027-07-15T14:00:00Z |
|
||||
When user "Alice" updates the last drive share with the following using root endpoint of the Graph API:
|
||||
| expirationDateTime | |
|
||||
| space | NewSpace |
|
||||
@@ -554,7 +554,7 @@ Feature: Update permission of a share
|
||||
| sharee | grp1 |
|
||||
| shareType | group |
|
||||
| permissionsRole | <permissions-role> |
|
||||
| expirationDateTime | 2025-07-15T14:00:00Z |
|
||||
| expirationDateTime | 2027-07-15T14:00:00Z |
|
||||
When user "Alice" updates the last drive share with the following using root endpoint of the Graph API:
|
||||
| expirationDateTime | 2200-07-15T14:00:00Z |
|
||||
| space | NewSpace |
|
||||
@@ -636,7 +636,7 @@ Feature: Update permission of a share
|
||||
| sharee | grp1 |
|
||||
| shareType | group |
|
||||
| permissionsRole | <permissions-role> |
|
||||
| expirationDateTime | 2025-07-15T14:00:00Z |
|
||||
| expirationDateTime | 2027-07-15T14:00:00Z |
|
||||
When user "Alice" updates the last drive share with the following using root endpoint of the Graph API:
|
||||
| expirationDateTime | |
|
||||
| permissionsRole | <new-permissions-role> |
|
||||
@@ -717,7 +717,7 @@ Feature: Update permission of a share
|
||||
| sharee | grp1 |
|
||||
| shareType | group |
|
||||
| permissionsRole | <permissions-role> |
|
||||
| expirationDateTime | 2025-07-15T14:00:00Z |
|
||||
| expirationDateTime | 2027-07-15T14:00:00Z |
|
||||
When user "Alice" updates the last drive share with the following using root endpoint of the Graph API:
|
||||
| expirationDateTime | 2200-07-15T14:00:00Z |
|
||||
| permissionsRole | <new-permissions-role> |
|
||||
@@ -799,7 +799,7 @@ Feature: Update permission of a share
|
||||
| sharee | Brian |
|
||||
| shareType | user |
|
||||
| permissionsRole | <permissions-role> |
|
||||
| expirationDateTime | 2025-07-15T14:00:00Z |
|
||||
| expirationDateTime | 2027-07-15T14:00:00Z |
|
||||
When user "Alice" updates the last drive share with the following using root endpoint of the Graph API:
|
||||
| permissionsRole | <new-permissions-role> |
|
||||
| space | NewSpace |
|
||||
@@ -875,7 +875,7 @@ Feature: Update permission of a share
|
||||
| sharee | Brian |
|
||||
| shareType | user |
|
||||
| permissionsRole | <permissions-role> |
|
||||
| expirationDateTime | 2025-07-15T14:00:00Z |
|
||||
| expirationDateTime | 2027-07-15T14:00:00Z |
|
||||
When user "Alice" updates the last drive share with the following using root endpoint of the Graph API:
|
||||
| expirationDateTime | |
|
||||
| space | NewSpace |
|
||||
@@ -951,7 +951,7 @@ Feature: Update permission of a share
|
||||
| sharee | Brian |
|
||||
| shareType | user |
|
||||
| permissionsRole | <permissions-role> |
|
||||
| expirationDateTime | 2025-07-15T14:00:00Z |
|
||||
| expirationDateTime | 2027-07-15T14:00:00Z |
|
||||
When user "Alice" updates the last drive share with the following using root endpoint of the Graph API:
|
||||
| expirationDateTime | 2200-07-15T14:00:00Z |
|
||||
| permissionsRole | <new-permissions-role> |
|
||||
@@ -1033,7 +1033,7 @@ Feature: Update permission of a share
|
||||
| sharee | Brian |
|
||||
| shareType | user |
|
||||
| permissionsRole | <permissions-role> |
|
||||
| expirationDateTime | 2025-07-15T14:00:00Z |
|
||||
| expirationDateTime | 2027-07-15T14:00:00Z |
|
||||
When user "Alice" updates the last drive share with the following using root endpoint of the Graph API:
|
||||
| expirationDateTime | |
|
||||
| permissionsRole | <new-permissions-role> |
|
||||
@@ -1110,7 +1110,7 @@ Feature: Update permission of a share
|
||||
| sharee | Brian |
|
||||
| shareType | user |
|
||||
| permissionsRole | <permissions-role> |
|
||||
| expirationDateTime | 2025-07-15T14:00:00Z |
|
||||
| expirationDateTime | 2027-07-15T14:00:00Z |
|
||||
When user "Alice" updates the last drive share with the following using root endpoint of the Graph API:
|
||||
| expirationDateTime | 2200-07-15T14:00:00Z |
|
||||
| permissionsRole | <new-permissions-role> |
|
||||
|
||||
@@ -27,7 +27,7 @@ Feature: backup consistency
|
||||
Then the command should be successful
|
||||
And the command output should contain "💚 No inconsistency found. The backup in '%storage_path%' seems to be valid."
|
||||
|
||||
@issue-9498 @issue-391 @skipOnOpencloud-decomposed-Storage
|
||||
@issue-9498 @issue-391
|
||||
Scenario: check backup consistency after uploading file multiple times via TUS
|
||||
Given user "Alice" uploads a file "filesForUpload/textfile.txt" to "/today.txt" with mtime "today" via TUS inside of the space "Personal" using the WebDAV API
|
||||
And user "Alice" uploads a file "filesForUpload/textfile.txt" to "/today.txt" with mtime "today" via TUS inside of the space "Personal" using the WebDAV API
|
||||
@@ -39,9 +39,9 @@ Feature: backup consistency
|
||||
And the administrator has started the server
|
||||
When user "Alice" gets the number of versions of file "today.txt"
|
||||
Then the HTTP status code should be "207"
|
||||
And the number of versions should be "1"
|
||||
And the number of versions should be "2"
|
||||
|
||||
@issue-9498 @issue-428 @skipOnOpencloud-decomposed-Storage
|
||||
@issue-9498 @issue-428
|
||||
Scenario: check backup consistency after uploading a file multiple times
|
||||
Given user "Alice" has uploaded file with content "hello world" to "/textfile0.txt"
|
||||
And user "Alice" has uploaded file with content "hello world" to "/textfile0.txt"
|
||||
|
||||
@@ -11,7 +11,7 @@ Feature: remove file versions via CLI command
|
||||
And user "Alice" has uploaded file with content "This is version 3" to "textfile.txt"
|
||||
When the administrator removes all the file versions using the CLI
|
||||
Then the command should be successful
|
||||
And the command output should contain "✅ Deleted 2 revisions (6 files / 2 blobs)"
|
||||
And the command output should contain "✅ Deleted 2 revisions (4 files / 2 blobs)"
|
||||
When user "Alice" gets the number of versions of file "textfile.txt"
|
||||
Then the HTTP status code should be "207"
|
||||
And the number of versions should be "0"
|
||||
@@ -26,7 +26,7 @@ Feature: remove file versions via CLI command
|
||||
And user "Alice" has uploaded file with content "This is version 3" to "anotherFile.txt"
|
||||
When the administrator removes the versions of file "randomFile.txt" of user "Alice" from space "Personal" using the CLI
|
||||
Then the command should be successful
|
||||
And the command output should contain "✅ Deleted 2 revisions (6 files / 2 blobs)"
|
||||
And the command output should contain "✅ Deleted 2 revisions (4 files / 2 blobs)"
|
||||
When user "Alice" gets the number of versions of file "randomFile.txt"
|
||||
Then the HTTP status code should be "207"
|
||||
And the number of versions should be "0"
|
||||
@@ -52,7 +52,7 @@ Feature: remove file versions via CLI command
|
||||
And we save it into "EPSUM_FILEID"
|
||||
When the administrator removes the file versions of space "projectSpace" using the CLI
|
||||
Then the command should be successful
|
||||
And the command output should contain "✅ Deleted 4 revisions (12 files / 4 blobs)"
|
||||
And the command output should contain "✅ Deleted 4 revisions (8 files / 4 blobs)"
|
||||
When user "Alice" gets the number of versions of file "file.txt"
|
||||
Then the HTTP status code should be "207"
|
||||
And the number of versions should be "2"
|
||||
|
||||
@@ -21,6 +21,7 @@ Feature: sharing
|
||||
| shareType | user |
|
||||
| permissionsRole | Viewer |
|
||||
And using SharingNG
|
||||
And user "Brian" has a share "textfile0.txt" synced
|
||||
When user "Brian" gets all the shares shared with him using the sharing API
|
||||
Then the OCS status code should be "<ocs-status-code>"
|
||||
And the HTTP status code should be "200"
|
||||
@@ -47,6 +48,8 @@ Feature: sharing
|
||||
| shareType | user |
|
||||
| permissionsRole | Viewer |
|
||||
And using SharingNG
|
||||
And user "Brian" has a share "textfile0.txt" synced
|
||||
And user "Brian" has a share "textfile1.txt" synced
|
||||
When user "Brian" gets all the shares shared with him that are received as file "/Shares/textfile1.txt" using the provisioning API
|
||||
Then the OCS status code should be "<ocs-status-code>"
|
||||
And the HTTP status code should be "200"
|
||||
@@ -73,6 +76,8 @@ Feature: sharing
|
||||
| shareType | user |
|
||||
| permissionsRole | Viewer |
|
||||
And using SharingNG
|
||||
And user "Brian" has a share "textfile0.txt" synced
|
||||
And user "Brian" has a share "textfile1.txt" synced
|
||||
When user "Brian" gets all the shares shared with him that are received as file "/Shares/textfile0.txt" using the provisioning API
|
||||
Then the OCS status code should be "<ocs-status-code>"
|
||||
And the HTTP status code should be "200"
|
||||
@@ -94,6 +99,7 @@ Feature: sharing
|
||||
| shareType | group |
|
||||
| permissionsRole | Viewer |
|
||||
And using SharingNG
|
||||
And user "Brian" has a share "textfile0.txt" synced
|
||||
When user "Brian" gets all the shares shared with him using the sharing API
|
||||
Then the OCS status code should be "<ocs-status-code>"
|
||||
And the HTTP status code should be "200"
|
||||
|
||||
@@ -567,3 +567,52 @@ Feature: restore deleted files/folders
|
||||
| dav-path-version |
|
||||
| spaces |
|
||||
| new |
|
||||
|
||||
@issue-1523
|
||||
Scenario Outline: restore deleted folder when folder with same name exists
|
||||
Given using <dav-path-version> DAV path
|
||||
And user "Alice" has created folder "new"
|
||||
And user "Alice" has uploaded file with content "content" to "new/test.txt"
|
||||
And user "Alice" has deleted folder "new"
|
||||
And user "Alice" has created folder "new"
|
||||
And user "Alice" has uploaded file with content "new content" to "new/new-file.txt"
|
||||
When user "Alice" restores the folder with original path "/new" to "/new (1)" using the trashbin API
|
||||
Then the HTTP status code should be "201"
|
||||
And as "Alice" the following folders should exist
|
||||
| path |
|
||||
| /new |
|
||||
| /new (1) |
|
||||
And as "Alice" the following files should exist
|
||||
| path |
|
||||
| /new/new-file.txt |
|
||||
| /new (1)/test.txt |
|
||||
Examples:
|
||||
| dav-path-version |
|
||||
| spaces |
|
||||
| new |
|
||||
|
||||
@issue-1523
|
||||
Scenario Outline: restore deleted folder with files when folder with same name exists
|
||||
Given using <dav-path-version> DAV path
|
||||
And user "Alice" has created folder "folder-a"
|
||||
And user "Alice" has uploaded file with content "content b" to "folder-a/b.txt"
|
||||
And user "Alice" has uploaded file with content "content c" to "folder-a/c.txt"
|
||||
And user "Alice" has deleted file "folder-a/b.txt"
|
||||
And user "Alice" has deleted folder "folder-a"
|
||||
And user "Alice" has created folder "folder-a"
|
||||
When user "Alice" restores the file with original path "folder-a/b.txt" using the trashbin API
|
||||
Then the HTTP status code should be "201"
|
||||
When user "Alice" restores the folder with original path "/folder-a" to "/folder-a (1)" using the trashbin API
|
||||
Then the HTTP status code should be "201"
|
||||
And as "Alice" the following folders should exist
|
||||
| path |
|
||||
| /folder-a |
|
||||
| /folder-a (1) |
|
||||
And as "Alice" the following files should exist
|
||||
| path |
|
||||
| /folder-a/b.txt |
|
||||
| /folder-a (1)/c.txt |
|
||||
Examples:
|
||||
| dav-path-version |
|
||||
| spaces |
|
||||
| new |
|
||||
|
||||
@@ -282,14 +282,14 @@ Feature: dav-versions
|
||||
| new |
|
||||
| spaces |
|
||||
|
||||
@issue-10496
|
||||
@issue-391
|
||||
Scenario Outline: upload the same file more than twice with the same mtime and only one version is available
|
||||
Given using <dav-path-version> DAV path
|
||||
And user "Alice" has uploaded file "filesForUpload/textfile.txt" to "file.txt" with mtime "Thu, 08 Aug 2019 04:18:13 GMT"
|
||||
And user "Alice" has uploaded file "filesForUpload/textfile.txt" to "file.txt" with mtime "Thu, 08 Aug 2019 04:18:13 GMT"
|
||||
When user "Alice" uploads file "filesForUpload/textfile.txt" to "file.txt" with mtime "Thu, 08 Aug 2019 04:18:13 GMT" using the WebDAV API
|
||||
Then the HTTP status code should be "204"
|
||||
And the version folder of file "/file.txt" for user "Alice" should contain "1" element
|
||||
And the version folder of file "/file.txt" for user "Alice" should contain "2" element
|
||||
And as "Alice" the mtime of the file "file.txt" should be "Thu, 08 Aug 2019 04:18:13 GMT"
|
||||
Examples:
|
||||
| dav-path-version |
|
||||
@@ -303,7 +303,7 @@ Feature: dav-versions
|
||||
And user "Alice" has uploaded file "filesForUpload/textfile.txt" to "file.txt" with mtime "Thu, 08 Aug 2019 04:18:13 GMT"
|
||||
When user "Alice" restores version index "1" of file "/file.txt" using the WebDAV API
|
||||
Then the HTTP status code should be "204"
|
||||
And the version folder of file "/file.txt" for user "Alice" should contain "0" element
|
||||
And the version folder of file "/file.txt" for user "Alice" should contain "1" element
|
||||
|
||||
@skipOnReva
|
||||
Scenario: sharer of a file can see the old version information when the sharee changes the content of the file
|
||||
|
||||
@@ -59,7 +59,7 @@ Feature: upload file
|
||||
And user "Alice" has uploaded file "filesForUpload/textfile.txt" to "file.txt" with mtime "Thu, 08 Aug 2019 04:18:13 GMT" using the TUS protocol
|
||||
When user "Alice" uploads file "filesForUpload/textfile.txt" to "file.txt" with mtime "Thu, 08 Aug 2019 04:18:13 GMT" using the TUS protocol on the WebDAV API
|
||||
Then as "Alice" the mtime of the file "file.txt" should be "Thu, 08 Aug 2019 04:18:13 GMT"
|
||||
And the version folder of file "file.txt" for user "Alice" should contain "1" element
|
||||
And the version folder of file "file.txt" for user "Alice" should contain "2" element
|
||||
Examples:
|
||||
| dav-path-version |
|
||||
| old |
|
||||
|
||||
@@ -261,10 +261,11 @@ function run_behat_tests() {
|
||||
FAILED_SCENARIO_PATHS_COLORED=`awk '/Failed scenarios:/',0 ${TEST_LOG_FILE} | grep -a feature`
|
||||
# There will be some ANSI escape codes for color in the FEATURE_COLORED var.
|
||||
# Strip them out so we can pass just the ordinary feature details to Behat.
|
||||
# Also strip everything after ".feature:XX", including text such as "(on line xx)" added by Behat indicating the failing step's line number.
|
||||
# Thanks to https://en.wikipedia.org/wiki/Tee_(command) and
|
||||
# https://stackoverflow.com/questions/23416278/how-to-strip-ansi-escape-sequences-from-a-variable
|
||||
# for ideas.
|
||||
FAILED_SCENARIO_PATHS=$(echo "${FAILED_SCENARIO_PATHS_COLORED}" | sed "s/\x1b[^m]*m//g")
|
||||
FAILED_SCENARIO_PATHS=$(echo "${FAILED_SCENARIO_PATHS_COLORED}" | sed "s/\x1b[^m]*m//g" | sed 's/\(\.feature:[0-9]\+\).*/\1/')
|
||||
|
||||
# If something else went wrong, and there were no failed scenarios,
|
||||
# then the awk, grep, sed command sequence above ends up with an empty string.
|
||||
|
||||
@@ -17,6 +17,9 @@ export APP_PROVIDER_DEBUG_ADDR=127.0.0.1:10165
|
||||
export APP_PROVIDER_GRPC_ADDR=127.0.0.1:10164
|
||||
export APP_REGISTRY_DEBUG_ADDR=127.0.0.1:10243
|
||||
export APP_REGISTRY_GRPC_ADDR=127.0.0.1:10242
|
||||
export AUTH_APP_DEBUG_ADDR=127.0.0.1:10245
|
||||
export AUTH_APP_GRPC_ADDR=127.0.0.1:10246
|
||||
export AUTH_APP_HTTP_ADDR=127.0.0.1:10247
|
||||
export AUTH_BASIC_DEBUG_ADDR=127.0.0.1:10147
|
||||
export AUTH_BASIC_GRPC_ADDR=127.0.0.1:10146
|
||||
export AUTH_MACHINE_DEBUG_ADDR=127.0.0.1:10167
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
}
|
||||
},
|
||||
"require": {
|
||||
"behat/behat": "^3.13",
|
||||
"behat/behat": "^3.24",
|
||||
"behat/gherkin": "^4.9",
|
||||
"behat/mink": "1.7.1",
|
||||
"friends-of-behat/mink-extension": "^2.7",
|
||||
|
||||
2
vendor/github.com/BurntSushi/toml/README.md
generated
vendored
2
vendor/github.com/BurntSushi/toml/README.md
generated
vendored
@@ -3,7 +3,7 @@ reflection interface similar to Go's standard library `json` and `xml` packages.
|
||||
|
||||
Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
|
||||
|
||||
Documentation: https://godocs.io/github.com/BurntSushi/toml
|
||||
Documentation: https://pkg.go.dev/github.com/BurntSushi/toml
|
||||
|
||||
See the [releases page](https://github.com/BurntSushi/toml/releases) for a
|
||||
changelog; this information is also in the git tag annotations (e.g. `git show
|
||||
|
||||
33
vendor/github.com/BurntSushi/toml/decode.go
generated
vendored
33
vendor/github.com/BurntSushi/toml/decode.go
generated
vendored
@@ -196,6 +196,19 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v any) error {
|
||||
return md.unify(primValue.undecoded, rvalue(v))
|
||||
}
|
||||
|
||||
// markDecodedRecursive is a helper to mark any key under the given tmap as
|
||||
// decoded, recursing as needed
|
||||
func markDecodedRecursive(md *MetaData, tmap map[string]any) {
|
||||
for key := range tmap {
|
||||
md.decoded[md.context.add(key).String()] = struct{}{}
|
||||
if tmap, ok := tmap[key].(map[string]any); ok {
|
||||
md.context = append(md.context, key)
|
||||
markDecodedRecursive(md, tmap)
|
||||
md.context = md.context[0 : len(md.context)-1]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// unify performs a sort of type unification based on the structure of `rv`,
|
||||
// which is the client representation.
|
||||
//
|
||||
@@ -222,6 +235,16 @@ func (md *MetaData) unify(data any, rv reflect.Value) error {
|
||||
if err != nil {
|
||||
return md.parseErr(err)
|
||||
}
|
||||
// Assume the Unmarshaler decoded everything, so mark all keys under
|
||||
// this table as decoded.
|
||||
if tmap, ok := data.(map[string]any); ok {
|
||||
markDecodedRecursive(md, tmap)
|
||||
}
|
||||
if aot, ok := data.([]map[string]any); ok {
|
||||
for _, tmap := range aot {
|
||||
markDecodedRecursive(md, tmap)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if v, ok := rvi.(encoding.TextUnmarshaler); ok {
|
||||
@@ -540,12 +563,14 @@ func (md *MetaData) badtype(dst string, data any) error {
|
||||
|
||||
func (md *MetaData) parseErr(err error) error {
|
||||
k := md.context.String()
|
||||
d := string(md.data)
|
||||
return ParseError{
|
||||
LastKey: k,
|
||||
Position: md.keyInfo[k].pos,
|
||||
Line: md.keyInfo[k].pos.Line,
|
||||
Message: err.Error(),
|
||||
err: err,
|
||||
input: string(md.data),
|
||||
LastKey: k,
|
||||
Position: md.keyInfo[k].pos.withCol(d),
|
||||
Line: md.keyInfo[k].pos.Line,
|
||||
input: d,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
46
vendor/github.com/BurntSushi/toml/encode.go
generated
vendored
46
vendor/github.com/BurntSushi/toml/encode.go
generated
vendored
@@ -402,31 +402,30 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
|
||||
|
||||
// Sort keys so that we have deterministic output. And write keys directly
|
||||
// underneath this key first, before writing sub-structs or sub-maps.
|
||||
var mapKeysDirect, mapKeysSub []string
|
||||
var mapKeysDirect, mapKeysSub []reflect.Value
|
||||
for _, mapKey := range rv.MapKeys() {
|
||||
k := mapKey.String()
|
||||
if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) {
|
||||
mapKeysSub = append(mapKeysSub, k)
|
||||
mapKeysSub = append(mapKeysSub, mapKey)
|
||||
} else {
|
||||
mapKeysDirect = append(mapKeysDirect, k)
|
||||
mapKeysDirect = append(mapKeysDirect, mapKey)
|
||||
}
|
||||
}
|
||||
|
||||
var writeMapKeys = func(mapKeys []string, trailC bool) {
|
||||
sort.Strings(mapKeys)
|
||||
writeMapKeys := func(mapKeys []reflect.Value, trailC bool) {
|
||||
sort.Slice(mapKeys, func(i, j int) bool { return mapKeys[i].String() < mapKeys[j].String() })
|
||||
for i, mapKey := range mapKeys {
|
||||
val := eindirect(rv.MapIndex(reflect.ValueOf(mapKey)))
|
||||
val := eindirect(rv.MapIndex(mapKey))
|
||||
if isNil(val) {
|
||||
continue
|
||||
}
|
||||
|
||||
if inline {
|
||||
enc.writeKeyValue(Key{mapKey}, val, true)
|
||||
enc.writeKeyValue(Key{mapKey.String()}, val, true)
|
||||
if trailC || i != len(mapKeys)-1 {
|
||||
enc.wf(", ")
|
||||
}
|
||||
} else {
|
||||
enc.encode(key.add(mapKey), val)
|
||||
enc.encode(key.add(mapKey.String()), val)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -441,8 +440,6 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
|
||||
}
|
||||
}
|
||||
|
||||
const is32Bit = (32 << (^uint(0) >> 63)) == 32
|
||||
|
||||
func pointerTo(t reflect.Type) reflect.Type {
|
||||
if t.Kind() == reflect.Ptr {
|
||||
return pointerTo(t.Elem())
|
||||
@@ -477,15 +474,14 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
||||
|
||||
frv := eindirect(rv.Field(i))
|
||||
|
||||
if is32Bit {
|
||||
// Copy so it works correct on 32bit archs; not clear why this
|
||||
// is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4
|
||||
// This also works fine on 64bit, but 32bit archs are somewhat
|
||||
// rare and this is a wee bit faster.
|
||||
copyStart := make([]int, len(start))
|
||||
copy(copyStart, start)
|
||||
start = copyStart
|
||||
}
|
||||
// Need to make a copy because ... ehm, I don't know why... I guess
|
||||
// allocating a new array can cause it to fail(?)
|
||||
//
|
||||
// Done for: https://github.com/BurntSushi/toml/issues/430
|
||||
// Previously only on 32bit for: https://github.com/BurntSushi/toml/issues/314
|
||||
copyStart := make([]int, len(start))
|
||||
copy(copyStart, start)
|
||||
start = copyStart
|
||||
|
||||
// Treat anonymous struct fields with tag names as though they are
|
||||
// not anonymous, like encoding/json does.
|
||||
@@ -507,7 +503,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
||||
}
|
||||
addFields(rt, rv, nil)
|
||||
|
||||
writeFields := func(fields [][]int) {
|
||||
writeFields := func(fields [][]int, totalFields int) {
|
||||
for _, fieldIndex := range fields {
|
||||
fieldType := rt.FieldByIndex(fieldIndex)
|
||||
fieldVal := rv.FieldByIndex(fieldIndex)
|
||||
@@ -537,7 +533,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
||||
|
||||
if inline {
|
||||
enc.writeKeyValue(Key{keyName}, fieldVal, true)
|
||||
if fieldIndex[0] != len(fields)-1 {
|
||||
if fieldIndex[0] != totalFields-1 {
|
||||
enc.wf(", ")
|
||||
}
|
||||
} else {
|
||||
@@ -549,8 +545,10 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
||||
if inline {
|
||||
enc.wf("{")
|
||||
}
|
||||
writeFields(fieldsDirect)
|
||||
writeFields(fieldsSub)
|
||||
|
||||
l := len(fieldsDirect) + len(fieldsSub)
|
||||
writeFields(fieldsDirect, l)
|
||||
writeFields(fieldsSub, l)
|
||||
if inline {
|
||||
enc.wf("}")
|
||||
}
|
||||
|
||||
69
vendor/github.com/BurntSushi/toml/error.go
generated
vendored
69
vendor/github.com/BurntSushi/toml/error.go
generated
vendored
@@ -67,21 +67,36 @@ type ParseError struct {
|
||||
// Position of an error.
|
||||
type Position struct {
|
||||
Line int // Line number, starting at 1.
|
||||
Col int // Error column, starting at 1.
|
||||
Start int // Start of error, as byte offset starting at 0.
|
||||
Len int // Lenght in bytes.
|
||||
Len int // Length of the error in bytes.
|
||||
}
|
||||
|
||||
func (p Position) withCol(tomlFile string) Position {
|
||||
var (
|
||||
pos int
|
||||
lines = strings.Split(tomlFile, "\n")
|
||||
)
|
||||
for i := range lines {
|
||||
ll := len(lines[i]) + 1 // +1 for the removed newline
|
||||
if pos+ll >= p.Start {
|
||||
p.Col = p.Start - pos + 1
|
||||
if p.Col < 1 { // Should never happen, but just in case.
|
||||
p.Col = 1
|
||||
}
|
||||
break
|
||||
}
|
||||
pos += ll
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func (pe ParseError) Error() string {
|
||||
msg := pe.Message
|
||||
if msg == "" { // Error from errorf()
|
||||
msg = pe.err.Error()
|
||||
}
|
||||
|
||||
if pe.LastKey == "" {
|
||||
return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, msg)
|
||||
return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, pe.Message)
|
||||
}
|
||||
return fmt.Sprintf("toml: line %d (last key %q): %s",
|
||||
pe.Position.Line, pe.LastKey, msg)
|
||||
pe.Position.Line, pe.LastKey, pe.Message)
|
||||
}
|
||||
|
||||
// ErrorWithPosition returns the error with detailed location context.
|
||||
@@ -92,26 +107,19 @@ func (pe ParseError) ErrorWithPosition() string {
|
||||
return pe.Error()
|
||||
}
|
||||
|
||||
var (
|
||||
lines = strings.Split(pe.input, "\n")
|
||||
col = pe.column(lines)
|
||||
b = new(strings.Builder)
|
||||
)
|
||||
|
||||
msg := pe.Message
|
||||
if msg == "" {
|
||||
msg = pe.err.Error()
|
||||
}
|
||||
|
||||
// TODO: don't show control characters as literals? This may not show up
|
||||
// well everywhere.
|
||||
|
||||
var (
|
||||
lines = strings.Split(pe.input, "\n")
|
||||
b = new(strings.Builder)
|
||||
)
|
||||
if pe.Position.Len == 1 {
|
||||
fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d:\n\n",
|
||||
msg, pe.Position.Line, col+1)
|
||||
pe.Message, pe.Position.Line, pe.Position.Col)
|
||||
} else {
|
||||
fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d-%d:\n\n",
|
||||
msg, pe.Position.Line, col, col+pe.Position.Len)
|
||||
pe.Message, pe.Position.Line, pe.Position.Col, pe.Position.Col+pe.Position.Len-1)
|
||||
}
|
||||
if pe.Position.Line > 2 {
|
||||
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, expandTab(lines[pe.Position.Line-3]))
|
||||
@@ -129,7 +137,7 @@ func (pe ParseError) ErrorWithPosition() string {
|
||||
diff := len(expanded) - len(lines[pe.Position.Line-1])
|
||||
|
||||
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, expanded)
|
||||
fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col+diff), strings.Repeat("^", pe.Position.Len))
|
||||
fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", pe.Position.Col-1+diff), strings.Repeat("^", pe.Position.Len))
|
||||
return b.String()
|
||||
}
|
||||
|
||||
@@ -151,23 +159,6 @@ func (pe ParseError) ErrorWithUsage() string {
|
||||
return m
|
||||
}
|
||||
|
||||
func (pe ParseError) column(lines []string) int {
|
||||
var pos, col int
|
||||
for i := range lines {
|
||||
ll := len(lines[i]) + 1 // +1 for the removed newline
|
||||
if pos+ll >= pe.Position.Start {
|
||||
col = pe.Position.Start - pos
|
||||
if col < 0 { // Should never happen, but just in case.
|
||||
col = 0
|
||||
}
|
||||
break
|
||||
}
|
||||
pos += ll
|
||||
}
|
||||
|
||||
return col
|
||||
}
|
||||
|
||||
func expandTab(s string) string {
|
||||
var (
|
||||
b strings.Builder
|
||||
|
||||
33
vendor/github.com/BurntSushi/toml/lex.go
generated
vendored
33
vendor/github.com/BurntSushi/toml/lex.go
generated
vendored
@@ -275,7 +275,9 @@ func (lx *lexer) errorPos(start, length int, err error) stateFn {
|
||||
func (lx *lexer) errorf(format string, values ...any) stateFn {
|
||||
if lx.atEOF {
|
||||
pos := lx.getPos()
|
||||
pos.Line--
|
||||
if lx.pos >= 1 && lx.input[lx.pos-1] == '\n' {
|
||||
pos.Line--
|
||||
}
|
||||
pos.Len = 1
|
||||
pos.Start = lx.pos - 1
|
||||
lx.items <- item{typ: itemError, pos: pos, err: fmt.Errorf(format, values...)}
|
||||
@@ -492,6 +494,9 @@ func lexKeyEnd(lx *lexer) stateFn {
|
||||
lx.emit(itemKeyEnd)
|
||||
return lexSkip(lx, lexValue)
|
||||
default:
|
||||
if r == '\n' {
|
||||
return lx.errorPrevLine(fmt.Errorf("expected '.' or '=', but got %q instead", r))
|
||||
}
|
||||
return lx.errorf("expected '.' or '=', but got %q instead", r)
|
||||
}
|
||||
}
|
||||
@@ -560,6 +565,9 @@ func lexValue(lx *lexer) stateFn {
|
||||
if r == eof {
|
||||
return lx.errorf("unexpected EOF; expected value")
|
||||
}
|
||||
if r == '\n' {
|
||||
return lx.errorPrevLine(fmt.Errorf("expected value but found %q instead", r))
|
||||
}
|
||||
return lx.errorf("expected value but found %q instead", r)
|
||||
}
|
||||
|
||||
@@ -1111,7 +1119,7 @@ func lexBaseNumberOrDate(lx *lexer) stateFn {
|
||||
case 'x':
|
||||
r = lx.peek()
|
||||
if !isHex(r) {
|
||||
lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r)
|
||||
lx.errorf("not a hexadecimal number: '%s%c'", lx.current(), r)
|
||||
}
|
||||
return lexHexInteger
|
||||
}
|
||||
@@ -1259,23 +1267,6 @@ func isBinary(r rune) bool { return r == '0' || r == '1' }
|
||||
func isOctal(r rune) bool { return r >= '0' && r <= '7' }
|
||||
func isHex(r rune) bool { return (r >= '0' && r <= '9') || (r|0x20 >= 'a' && r|0x20 <= 'f') }
|
||||
func isBareKeyChar(r rune, tomlNext bool) bool {
|
||||
if tomlNext {
|
||||
return (r >= 'A' && r <= 'Z') ||
|
||||
(r >= 'a' && r <= 'z') ||
|
||||
(r >= '0' && r <= '9') ||
|
||||
r == '_' || r == '-' ||
|
||||
r == 0xb2 || r == 0xb3 || r == 0xb9 || (r >= 0xbc && r <= 0xbe) ||
|
||||
(r >= 0xc0 && r <= 0xd6) || (r >= 0xd8 && r <= 0xf6) || (r >= 0xf8 && r <= 0x037d) ||
|
||||
(r >= 0x037f && r <= 0x1fff) ||
|
||||
(r >= 0x200c && r <= 0x200d) || (r >= 0x203f && r <= 0x2040) ||
|
||||
(r >= 0x2070 && r <= 0x218f) || (r >= 0x2460 && r <= 0x24ff) ||
|
||||
(r >= 0x2c00 && r <= 0x2fef) || (r >= 0x3001 && r <= 0xd7ff) ||
|
||||
(r >= 0xf900 && r <= 0xfdcf) || (r >= 0xfdf0 && r <= 0xfffd) ||
|
||||
(r >= 0x10000 && r <= 0xeffff)
|
||||
}
|
||||
|
||||
return (r >= 'A' && r <= 'Z') ||
|
||||
(r >= 'a' && r <= 'z') ||
|
||||
(r >= '0' && r <= '9') ||
|
||||
r == '_' || r == '-'
|
||||
return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') ||
|
||||
(r >= '0' && r <= '9') || r == '_' || r == '-'
|
||||
}
|
||||
|
||||
3
vendor/github.com/BurntSushi/toml/meta.go
generated
vendored
3
vendor/github.com/BurntSushi/toml/meta.go
generated
vendored
@@ -135,9 +135,6 @@ func (k Key) maybeQuoted(i int) string {
|
||||
|
||||
// Like append(), but only increase the cap by 1.
|
||||
func (k Key) add(piece string) Key {
|
||||
if cap(k) > len(k) {
|
||||
return append(k, piece)
|
||||
}
|
||||
newKey := make(Key, len(k)+1)
|
||||
copy(newKey, k)
|
||||
newKey[len(k)] = piece
|
||||
|
||||
17
vendor/github.com/BurntSushi/toml/parse.go
generated
vendored
17
vendor/github.com/BurntSushi/toml/parse.go
generated
vendored
@@ -50,7 +50,6 @@ func parse(data string) (p *parser, err error) {
|
||||
// it anyway.
|
||||
if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { // UTF-16
|
||||
data = data[2:]
|
||||
//lint:ignore S1017 https://github.com/dominikh/go-tools/issues/1447
|
||||
} else if strings.HasPrefix(data, "\xef\xbb\xbf") { // UTF-8
|
||||
data = data[3:]
|
||||
}
|
||||
@@ -65,7 +64,7 @@ func parse(data string) (p *parser, err error) {
|
||||
if i := strings.IndexRune(data[:ex], 0); i > -1 {
|
||||
return nil, ParseError{
|
||||
Message: "files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8",
|
||||
Position: Position{Line: 1, Start: i, Len: 1},
|
||||
Position: Position{Line: 1, Col: 1, Start: i, Len: 1},
|
||||
Line: 1,
|
||||
input: data,
|
||||
}
|
||||
@@ -92,8 +91,9 @@ func parse(data string) (p *parser, err error) {
|
||||
|
||||
func (p *parser) panicErr(it item, err error) {
|
||||
panic(ParseError{
|
||||
Message: err.Error(),
|
||||
err: err,
|
||||
Position: it.pos,
|
||||
Position: it.pos.withCol(p.lx.input),
|
||||
Line: it.pos.Len,
|
||||
LastKey: p.current(),
|
||||
})
|
||||
@@ -102,7 +102,7 @@ func (p *parser) panicErr(it item, err error) {
|
||||
func (p *parser) panicItemf(it item, format string, v ...any) {
|
||||
panic(ParseError{
|
||||
Message: fmt.Sprintf(format, v...),
|
||||
Position: it.pos,
|
||||
Position: it.pos.withCol(p.lx.input),
|
||||
Line: it.pos.Len,
|
||||
LastKey: p.current(),
|
||||
})
|
||||
@@ -111,7 +111,7 @@ func (p *parser) panicItemf(it item, format string, v ...any) {
|
||||
func (p *parser) panicf(format string, v ...any) {
|
||||
panic(ParseError{
|
||||
Message: fmt.Sprintf(format, v...),
|
||||
Position: p.pos,
|
||||
Position: p.pos.withCol(p.lx.input),
|
||||
Line: p.pos.Line,
|
||||
LastKey: p.current(),
|
||||
})
|
||||
@@ -123,10 +123,11 @@ func (p *parser) next() item {
|
||||
if it.typ == itemError {
|
||||
if it.err != nil {
|
||||
panic(ParseError{
|
||||
Position: it.pos,
|
||||
Message: it.err.Error(),
|
||||
err: it.err,
|
||||
Position: it.pos.withCol(p.lx.input),
|
||||
Line: it.pos.Line,
|
||||
LastKey: p.current(),
|
||||
err: it.err,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -527,7 +528,7 @@ func numUnderscoresOK(s string) bool {
|
||||
}
|
||||
}
|
||||
|
||||
// isHexis a superset of all the permissable characters surrounding an
|
||||
// isHex is a superset of all the permissible characters surrounding an
|
||||
// underscore.
|
||||
accept = isHex(r)
|
||||
}
|
||||
|
||||
4
vendor/github.com/KimMachineGun/automemlimit/memlimit/cgroups.go
generated
vendored
4
vendor/github.com/KimMachineGun/automemlimit/memlimit/cgroups.go
generated
vendored
@@ -269,11 +269,9 @@ func parseMountInfoLine(line string) (mountInfo, error) {
|
||||
return mountInfo{}, fmt.Errorf("invalid separator")
|
||||
}
|
||||
|
||||
fields1 := strings.Split(fieldss[0], " ")
|
||||
fields1 := strings.SplitN(fieldss[0], " ", 7)
|
||||
if len(fields1) < 6 {
|
||||
return mountInfo{}, fmt.Errorf("not enough fields before separator: %v", fields1)
|
||||
} else if len(fields1) > 7 {
|
||||
return mountInfo{}, fmt.Errorf("too many fields before separator: %v", fields1)
|
||||
} else if len(fields1) == 6 {
|
||||
fields1 = append(fields1, "")
|
||||
}
|
||||
|
||||
1
vendor/github.com/go-sql-driver/mysql/AUTHORS
generated
vendored
1
vendor/github.com/go-sql-driver/mysql/AUTHORS
generated
vendored
@@ -24,6 +24,7 @@ Artur Melanchyk <artur.melanchyk@gmail.com>
|
||||
Asta Xie <xiemengjun at gmail.com>
|
||||
B Lamarche <blam413 at gmail.com>
|
||||
Bes Dollma <bdollma@thousandeyes.com>
|
||||
Bogdan Constantinescu <bog.con.bc at gmail.com>
|
||||
Brian Hendriks <brian at dolthub.com>
|
||||
Bulat Gaifullin <gaifullinbf at gmail.com>
|
||||
Caine Jette <jette at alum.mit.edu>
|
||||
|
||||
11
vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
generated
vendored
11
vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
generated
vendored
@@ -1,5 +1,16 @@
|
||||
# Changelog
|
||||
|
||||
## v1.9.1 (2025-03-21)
|
||||
|
||||
### Major Changes
|
||||
|
||||
* Add Charset() option. (#1679)
|
||||
|
||||
### Bugfixes
|
||||
|
||||
* go.mod: fix go version format (#1682)
|
||||
* Fix FormatDSN missing ConnectionAttributes (#1619)
|
||||
|
||||
## v1.9.0 (2025-02-18)
|
||||
|
||||
### Major Changes
|
||||
|
||||
21
vendor/github.com/go-sql-driver/mysql/dsn.go
generated
vendored
21
vendor/github.com/go-sql-driver/mysql/dsn.go
generated
vendored
@@ -44,7 +44,6 @@ type Config struct {
|
||||
DBName string // Database name
|
||||
Params map[string]string // Connection parameters
|
||||
ConnectionAttributes string // Connection Attributes, comma-delimited string of user-defined "key:value" pairs
|
||||
charsets []string // Connection charset. When set, this will be set in SET NAMES <charset> query
|
||||
Collation string // Connection collation. When set, this will be set in SET NAMES <charset> COLLATE <collation> query
|
||||
Loc *time.Location // Location for time.Time values
|
||||
MaxAllowedPacket int // Max packet size allowed
|
||||
@@ -81,6 +80,7 @@ type Config struct {
|
||||
beforeConnect func(context.Context, *Config) error // Invoked before a connection is established
|
||||
pubKey *rsa.PublicKey // Server public key
|
||||
timeTruncate time.Duration // Truncate time.Time values to the specified duration
|
||||
charsets []string // Connection charset. When set, this will be set in SET NAMES <charset> query
|
||||
}
|
||||
|
||||
// Functional Options Pattern
|
||||
@@ -135,6 +135,21 @@ func EnableCompression(yes bool) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// Charset sets the connection charset and collation.
|
||||
//
|
||||
// charset is the connection charset.
|
||||
// collation is the connection collation. It can be null or empty string.
|
||||
//
|
||||
// When collation is not specified, `SET NAMES <charset>` command is sent when the connection is established.
|
||||
// When collation is specified, `SET NAMES <charset> COLLATE <collation>` command is sent when the connection is established.
|
||||
func Charset(charset, collation string) Option {
|
||||
return func(cfg *Config) error {
|
||||
cfg.charsets = []string{charset}
|
||||
cfg.Collation = collation
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (cfg *Config) Clone() *Config {
|
||||
cp := *cfg
|
||||
if cp.TLS != nil {
|
||||
@@ -307,6 +322,10 @@ func (cfg *Config) FormatDSN() string {
|
||||
writeDSNParam(&buf, &hasParam, "columnsWithAlias", "true")
|
||||
}
|
||||
|
||||
if cfg.ConnectionAttributes != "" {
|
||||
writeDSNParam(&buf, &hasParam, "connectionAttributes", url.QueryEscape(cfg.ConnectionAttributes))
|
||||
}
|
||||
|
||||
if cfg.compress {
|
||||
writeDSNParam(&buf, &hasParam, "compress", "true")
|
||||
}
|
||||
|
||||
36
vendor/github.com/golang-jwt/jwt/v4/parser.go
generated
vendored
36
vendor/github.com/golang-jwt/jwt/v4/parser.go
generated
vendored
@@ -7,6 +7,8 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
const tokenDelimiter = "."
|
||||
|
||||
type Parser struct {
|
||||
// If populated, only these methods will be considered valid.
|
||||
//
|
||||
@@ -122,9 +124,10 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf
|
||||
// It's only ever useful in cases where you know the signature is valid (because it has
|
||||
// been checked previously in the stack) and you want to extract values from it.
|
||||
func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) {
|
||||
parts = strings.Split(tokenString, ".")
|
||||
if len(parts) != 3 {
|
||||
return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed)
|
||||
var ok bool
|
||||
parts, ok = splitToken(tokenString)
|
||||
if !ok {
|
||||
return nil, nil, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed)
|
||||
}
|
||||
|
||||
token = &Token{Raw: tokenString}
|
||||
@@ -174,3 +177,30 @@ func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Toke
|
||||
|
||||
return token, parts, nil
|
||||
}
|
||||
|
||||
// splitToken splits a token string into three parts: header, claims, and signature. It will only
|
||||
// return true if the token contains exactly two delimiters and three parts. In all other cases, it
|
||||
// will return nil parts and false.
|
||||
func splitToken(token string) ([]string, bool) {
|
||||
parts := make([]string, 3)
|
||||
header, remain, ok := strings.Cut(token, tokenDelimiter)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
parts[0] = header
|
||||
claims, remain, ok := strings.Cut(remain, tokenDelimiter)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
parts[1] = claims
|
||||
// One more cut to ensure the signature is the last part of the token and there are no more
|
||||
// delimiters. This avoids an issue where malicious input could contain additional delimiters
|
||||
// causing unecessary overhead parsing tokens.
|
||||
signature, _, unexpected := strings.Cut(remain, tokenDelimiter)
|
||||
if unexpected {
|
||||
return nil, false
|
||||
}
|
||||
parts[2] = signature
|
||||
|
||||
return parts, true
|
||||
}
|
||||
|
||||
16
vendor/github.com/golang-jwt/jwt/v5/README.md
generated
vendored
16
vendor/github.com/golang-jwt/jwt/v5/README.md
generated
vendored
@@ -10,11 +10,11 @@ implementation of [JSON Web
|
||||
Tokens](https://datatracker.ietf.org/doc/html/rfc7519).
|
||||
|
||||
Starting with [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0)
|
||||
this project adds Go module support, but maintains backwards compatibility with
|
||||
this project adds Go module support, but maintains backward compatibility with
|
||||
older `v3.x.y` tags and upstream `github.com/dgrijalva/jwt-go`. See the
|
||||
[`MIGRATION_GUIDE.md`](./MIGRATION_GUIDE.md) for more information. Version
|
||||
v5.0.0 introduces major improvements to the validation of tokens, but is not
|
||||
entirely backwards compatible.
|
||||
entirely backward compatible.
|
||||
|
||||
> After the original author of the library suggested migrating the maintenance
|
||||
> of `jwt-go`, a dedicated team of open source maintainers decided to clone the
|
||||
@@ -24,7 +24,7 @@ entirely backwards compatible.
|
||||
|
||||
|
||||
**SECURITY NOTICE:** Some older versions of Go have a security issue in the
|
||||
crypto/elliptic. Recommendation is to upgrade to at least 1.15 See issue
|
||||
crypto/elliptic. The recommendation is to upgrade to at least 1.15 See issue
|
||||
[dgrijalva/jwt-go#216](https://github.com/dgrijalva/jwt-go/issues/216) for more
|
||||
detail.
|
||||
|
||||
@@ -32,7 +32,7 @@ detail.
|
||||
what you
|
||||
expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/).
|
||||
This library attempts to make it easy to do the right thing by requiring key
|
||||
types match the expected alg, but you should take the extra step to verify it in
|
||||
types to match the expected alg, but you should take the extra step to verify it in
|
||||
your usage. See the examples provided.
|
||||
|
||||
### Supported Go versions
|
||||
@@ -41,7 +41,7 @@ Our support of Go versions is aligned with Go's [version release
|
||||
policy](https://golang.org/doc/devel/release#policy). So we will support a major
|
||||
version of Go until there are two newer major releases. We no longer support
|
||||
building jwt-go with unsupported Go versions, as these contain security
|
||||
vulnerabilities which will not be fixed.
|
||||
vulnerabilities that will not be fixed.
|
||||
|
||||
## What the heck is a JWT?
|
||||
|
||||
@@ -117,7 +117,7 @@ notable differences:
|
||||
|
||||
This library is considered production ready. Feedback and feature requests are
|
||||
appreciated. The API should be considered stable. There should be very few
|
||||
backwards-incompatible changes outside of major version updates (and only with
|
||||
backward-incompatible changes outside of major version updates (and only with
|
||||
good reason).
|
||||
|
||||
This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull
|
||||
@@ -125,8 +125,8 @@ requests will land on `main`. Periodically, versions will be tagged from
|
||||
`main`. You can find all the releases on [the project releases
|
||||
page](https://github.com/golang-jwt/jwt/releases).
|
||||
|
||||
**BREAKING CHANGES:*** A full list of breaking changes is available in
|
||||
`VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating
|
||||
**BREAKING CHANGES:** A full list of breaking changes is available in
|
||||
`VERSION_HISTORY.md`. See [`MIGRATION_GUIDE.md`](./MIGRATION_GUIDE.md) for more information on updating
|
||||
your code.
|
||||
|
||||
## Extensions
|
||||
|
||||
4
vendor/github.com/golang-jwt/jwt/v5/SECURITY.md
generated
vendored
4
vendor/github.com/golang-jwt/jwt/v5/SECURITY.md
generated
vendored
@@ -2,11 +2,11 @@
|
||||
|
||||
## Supported Versions
|
||||
|
||||
As of February 2022 (and until this document is updated), the latest version `v4` is supported.
|
||||
As of November 2024 (and until this document is updated), the latest version `v5` is supported. In critical cases, we might supply back-ported patches for `v4`.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
If you think you found a vulnerability, and even if you are not sure, please report it to jwt-go-security@googlegroups.com or one of the other [golang-jwt maintainers](https://github.com/orgs/golang-jwt/people). Please try be explicit, describe steps to reproduce the security issue with code example(s).
|
||||
If you think you found a vulnerability, and even if you are not sure, please report it a [GitHub Security Advisory](https://github.com/golang-jwt/jwt/security/advisories/new). Please try be explicit, describe steps to reproduce the security issue with code example(s).
|
||||
|
||||
You will receive a response within a timely manner. If the issue is confirmed, we will do our best to release a patch as soon as possible given the complexity of the problem.
|
||||
|
||||
|
||||
36
vendor/github.com/golang-jwt/jwt/v5/parser.go
generated
vendored
36
vendor/github.com/golang-jwt/jwt/v5/parser.go
generated
vendored
@@ -8,6 +8,8 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
const tokenDelimiter = "."
|
||||
|
||||
type Parser struct {
|
||||
// If populated, only these methods will be considered valid.
|
||||
validMethods []string
|
||||
@@ -136,9 +138,10 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf
|
||||
// It's only ever useful in cases where you know the signature is valid (since it has already
|
||||
// been or will be checked elsewhere in the stack) and you want to extract values from it.
|
||||
func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) {
|
||||
parts = strings.Split(tokenString, ".")
|
||||
if len(parts) != 3 {
|
||||
return nil, parts, newError("token contains an invalid number of segments", ErrTokenMalformed)
|
||||
var ok bool
|
||||
parts, ok = splitToken(tokenString)
|
||||
if !ok {
|
||||
return nil, nil, newError("token contains an invalid number of segments", ErrTokenMalformed)
|
||||
}
|
||||
|
||||
token = &Token{Raw: tokenString}
|
||||
@@ -196,6 +199,33 @@ func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Toke
|
||||
return token, parts, nil
|
||||
}
|
||||
|
||||
// splitToken splits a token string into three parts: header, claims, and signature. It will only
|
||||
// return true if the token contains exactly two delimiters and three parts. In all other cases, it
|
||||
// will return nil parts and false.
|
||||
func splitToken(token string) ([]string, bool) {
|
||||
parts := make([]string, 3)
|
||||
header, remain, ok := strings.Cut(token, tokenDelimiter)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
parts[0] = header
|
||||
claims, remain, ok := strings.Cut(remain, tokenDelimiter)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
parts[1] = claims
|
||||
// One more cut to ensure the signature is the last part of the token and there are no more
|
||||
// delimiters. This avoids an issue where malicious input could contain additional delimiters
|
||||
// causing unecessary overhead parsing tokens.
|
||||
signature, _, unexpected := strings.Cut(remain, tokenDelimiter)
|
||||
if unexpected {
|
||||
return nil, false
|
||||
}
|
||||
parts[2] = signature
|
||||
|
||||
return parts, true
|
||||
}
|
||||
|
||||
// DecodeSegment decodes a JWT specific base64url encoding. This function will
|
||||
// take into account whether the [Parser] is configured with additional options,
|
||||
// such as [WithStrictDecoding] or [WithPaddingAllowed].
|
||||
|
||||
2
vendor/github.com/golang-jwt/jwt/v5/token.go
generated
vendored
2
vendor/github.com/golang-jwt/jwt/v5/token.go
generated
vendored
@@ -75,7 +75,7 @@ func (t *Token) SignedString(key interface{}) (string, error) {
|
||||
}
|
||||
|
||||
// SigningString generates the signing string. This is the most expensive part
|
||||
// of the whole deal. Unless you need this for something special, just go
|
||||
// of the whole deal. Unless you need this for something special, just go
|
||||
// straight for the SignedString.
|
||||
func (t *Token) SigningString() (string, error) {
|
||||
h, err := json.Marshal(t.Header)
|
||||
|
||||
202
vendor/github.com/google/go-tpm/LICENSE
generated
vendored
Normal file
202
vendor/github.com/google/go-tpm/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
35
vendor/github.com/google/go-tpm/legacy/tpm2/README.md
generated
vendored
Normal file
35
vendor/github.com/google/go-tpm/legacy/tpm2/README.md
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
# TPM 2.0 client library
|
||||
|
||||
## Tests
|
||||
|
||||
This library contains unit tests in `github.com/google/go-tpm/tpm2`, which just
|
||||
tests that various encoding and error checking functions work correctly. It also
|
||||
contains more comprehensive integration tests in
|
||||
`github.com/google/go-tpm/tpm2/test`, which run actual commands on a TPM.
|
||||
|
||||
By default, these integration tests are run against the
|
||||
[`go-tpm-tools`](https://github.com/google/go-tpm-tools)
|
||||
simulator, which is baesed on the
|
||||
[Microsoft Reference TPM2 code](https://github.com/microsoft/ms-tpm-20-ref). To
|
||||
run both the unit and integration tests, run (in this directory)
|
||||
```bash
|
||||
go test . ./test
|
||||
```
|
||||
|
||||
These integration tests can also be run against a real TPM device. This is
|
||||
slightly more complex as the tests often need to be built as a normal user and
|
||||
then executed as root. For example,
|
||||
```bash
|
||||
# Build the test binary without running it
|
||||
go test -c github.com/google/go-tpm/tpm2/test
|
||||
# Execute the test binary as root
|
||||
sudo ./test.test --tpm-path=/dev/tpmrm0
|
||||
```
|
||||
On Linux, The `--tpm-path` causes the integration tests to be run against a
|
||||
real TPM located at that path (usually `/dev/tpmrm0` or `/dev/tpm0`). On Windows, the story is similar, execept that
|
||||
the `--use-tbs` flag is used instead.
|
||||
|
||||
Tip: if your TPM host is remote and you don't want to install Go on it, this
|
||||
same two-step process can be used. The test binary can be copied to a remote
|
||||
host and run without extra installation (as the test binary has very few
|
||||
*runtime* dependancies).
|
||||
575
vendor/github.com/google/go-tpm/legacy/tpm2/constants.go
generated
vendored
Normal file
575
vendor/github.com/google/go-tpm/legacy/tpm2/constants.go
generated
vendored
Normal file
@@ -0,0 +1,575 @@
|
||||
// Copyright (c) 2018, Google LLC All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package tpm2
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/elliptic"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
// Register the relevant hash implementations to prevent a runtime failure.
|
||||
_ "crypto/sha1"
|
||||
_ "crypto/sha256"
|
||||
_ "crypto/sha512"
|
||||
|
||||
"github.com/google/go-tpm/tpmutil"
|
||||
)
|
||||
|
||||
var hashInfo = []struct {
|
||||
alg Algorithm
|
||||
hash crypto.Hash
|
||||
}{
|
||||
{AlgSHA1, crypto.SHA1},
|
||||
{AlgSHA256, crypto.SHA256},
|
||||
{AlgSHA384, crypto.SHA384},
|
||||
{AlgSHA512, crypto.SHA512},
|
||||
{AlgSHA3_256, crypto.SHA3_256},
|
||||
{AlgSHA3_384, crypto.SHA3_384},
|
||||
{AlgSHA3_512, crypto.SHA3_512},
|
||||
}
|
||||
|
||||
// MAX_DIGEST_BUFFER is the maximum size of []byte request or response fields.
|
||||
// Typically used for chunking of big blobs of data (such as for hashing or
|
||||
// encryption).
|
||||
const maxDigestBuffer = 1024
|
||||
|
||||
// Algorithm represents a TPM_ALG_ID value.
|
||||
type Algorithm uint16
|
||||
|
||||
// HashToAlgorithm looks up the TPM2 algorithm corresponding to the provided crypto.Hash
|
||||
func HashToAlgorithm(hash crypto.Hash) (Algorithm, error) {
|
||||
for _, info := range hashInfo {
|
||||
if info.hash == hash {
|
||||
return info.alg, nil
|
||||
}
|
||||
}
|
||||
return AlgUnknown, fmt.Errorf("go hash algorithm #%d has no TPM2 algorithm", hash)
|
||||
}
|
||||
|
||||
// IsNull returns true if a is AlgNull or zero (unset).
|
||||
func (a Algorithm) IsNull() bool {
|
||||
return a == AlgNull || a == AlgUnknown
|
||||
}
|
||||
|
||||
// UsesCount returns true if a signature algorithm uses count value.
|
||||
func (a Algorithm) UsesCount() bool {
|
||||
return a == AlgECDAA
|
||||
}
|
||||
|
||||
// UsesHash returns true if the algorithm requires the use of a hash.
|
||||
func (a Algorithm) UsesHash() bool {
|
||||
return a == AlgOAEP
|
||||
}
|
||||
|
||||
// Hash returns a crypto.Hash based on the given TPM_ALG_ID.
|
||||
// An error is returned if the given algorithm is not a hash algorithm or is not available.
|
||||
func (a Algorithm) Hash() (crypto.Hash, error) {
|
||||
for _, info := range hashInfo {
|
||||
if info.alg == a {
|
||||
if !info.hash.Available() {
|
||||
return crypto.Hash(0), fmt.Errorf("go hash algorithm #%d not available", info.hash)
|
||||
}
|
||||
return info.hash, nil
|
||||
}
|
||||
}
|
||||
return crypto.Hash(0), fmt.Errorf("hash algorithm not supported: 0x%x", a)
|
||||
}
|
||||
|
||||
func (a Algorithm) String() string {
|
||||
var s strings.Builder
|
||||
var err error
|
||||
switch a {
|
||||
case AlgUnknown:
|
||||
_, err = s.WriteString("AlgUnknown")
|
||||
case AlgRSA:
|
||||
_, err = s.WriteString("RSA")
|
||||
case AlgSHA1:
|
||||
_, err = s.WriteString("SHA1")
|
||||
case AlgHMAC:
|
||||
_, err = s.WriteString("HMAC")
|
||||
case AlgAES:
|
||||
_, err = s.WriteString("AES")
|
||||
case AlgKeyedHash:
|
||||
_, err = s.WriteString("KeyedHash")
|
||||
case AlgXOR:
|
||||
_, err = s.WriteString("XOR")
|
||||
case AlgSHA256:
|
||||
_, err = s.WriteString("SHA256")
|
||||
case AlgSHA384:
|
||||
_, err = s.WriteString("SHA384")
|
||||
case AlgSHA512:
|
||||
_, err = s.WriteString("SHA512")
|
||||
case AlgNull:
|
||||
_, err = s.WriteString("AlgNull")
|
||||
case AlgRSASSA:
|
||||
_, err = s.WriteString("RSASSA")
|
||||
case AlgRSAES:
|
||||
_, err = s.WriteString("RSAES")
|
||||
case AlgRSAPSS:
|
||||
_, err = s.WriteString("RSAPSS")
|
||||
case AlgOAEP:
|
||||
_, err = s.WriteString("OAEP")
|
||||
case AlgECDSA:
|
||||
_, err = s.WriteString("ECDSA")
|
||||
case AlgECDH:
|
||||
_, err = s.WriteString("ECDH")
|
||||
case AlgECDAA:
|
||||
_, err = s.WriteString("ECDAA")
|
||||
case AlgKDF2:
|
||||
_, err = s.WriteString("KDF2")
|
||||
case AlgECC:
|
||||
_, err = s.WriteString("ECC")
|
||||
case AlgSymCipher:
|
||||
_, err = s.WriteString("SymCipher")
|
||||
case AlgSHA3_256:
|
||||
_, err = s.WriteString("SHA3_256")
|
||||
case AlgSHA3_384:
|
||||
_, err = s.WriteString("SHA3_384")
|
||||
case AlgSHA3_512:
|
||||
_, err = s.WriteString("SHA3_512")
|
||||
case AlgCTR:
|
||||
_, err = s.WriteString("CTR")
|
||||
case AlgOFB:
|
||||
_, err = s.WriteString("OFB")
|
||||
case AlgCBC:
|
||||
_, err = s.WriteString("CBC")
|
||||
case AlgCFB:
|
||||
_, err = s.WriteString("CFB")
|
||||
case AlgECB:
|
||||
_, err = s.WriteString("ECB")
|
||||
default:
|
||||
return fmt.Sprintf("Alg?<%d>", int(a))
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Sprintf("Writing to string builder failed: %v", err)
|
||||
}
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// Supported Algorithms.
|
||||
const (
|
||||
AlgUnknown Algorithm = 0x0000
|
||||
AlgRSA Algorithm = 0x0001
|
||||
AlgSHA1 Algorithm = 0x0004
|
||||
AlgHMAC Algorithm = 0x0005
|
||||
AlgAES Algorithm = 0x0006
|
||||
AlgKeyedHash Algorithm = 0x0008
|
||||
AlgXOR Algorithm = 0x000A
|
||||
AlgSHA256 Algorithm = 0x000B
|
||||
AlgSHA384 Algorithm = 0x000C
|
||||
AlgSHA512 Algorithm = 0x000D
|
||||
AlgNull Algorithm = 0x0010
|
||||
AlgRSASSA Algorithm = 0x0014
|
||||
AlgRSAES Algorithm = 0x0015
|
||||
AlgRSAPSS Algorithm = 0x0016
|
||||
AlgOAEP Algorithm = 0x0017
|
||||
AlgECDSA Algorithm = 0x0018
|
||||
AlgECDH Algorithm = 0x0019
|
||||
AlgECDAA Algorithm = 0x001A
|
||||
AlgKDF2 Algorithm = 0x0021
|
||||
AlgECC Algorithm = 0x0023
|
||||
AlgSymCipher Algorithm = 0x0025
|
||||
AlgSHA3_256 Algorithm = 0x0027
|
||||
AlgSHA3_384 Algorithm = 0x0028
|
||||
AlgSHA3_512 Algorithm = 0x0029
|
||||
AlgCTR Algorithm = 0x0040
|
||||
AlgOFB Algorithm = 0x0041
|
||||
AlgCBC Algorithm = 0x0042
|
||||
AlgCFB Algorithm = 0x0043
|
||||
AlgECB Algorithm = 0x0044
|
||||
)
|
||||
|
||||
// HandleType defines a type of handle.
|
||||
type HandleType uint8
|
||||
|
||||
// Supported handle types
|
||||
const (
|
||||
HandleTypePCR HandleType = 0x00
|
||||
HandleTypeNVIndex HandleType = 0x01
|
||||
HandleTypeHMACSession HandleType = 0x02
|
||||
HandleTypeLoadedSession HandleType = 0x02
|
||||
HandleTypePolicySession HandleType = 0x03
|
||||
HandleTypeSavedSession HandleType = 0x03
|
||||
HandleTypePermanent HandleType = 0x40
|
||||
HandleTypeTransient HandleType = 0x80
|
||||
HandleTypePersistent HandleType = 0x81
|
||||
)
|
||||
|
||||
// SessionType defines the type of session created in StartAuthSession.
|
||||
type SessionType uint8
|
||||
|
||||
// Supported session types.
|
||||
const (
|
||||
SessionHMAC SessionType = 0x00
|
||||
SessionPolicy SessionType = 0x01
|
||||
SessionTrial SessionType = 0x03
|
||||
)
|
||||
|
||||
// SessionAttributes represents an attribute of a session.
|
||||
type SessionAttributes byte
|
||||
|
||||
// Session Attributes (Structures 8.4 TPMA_SESSION)
|
||||
const (
|
||||
AttrContinueSession SessionAttributes = 1 << iota
|
||||
AttrAuditExclusive
|
||||
AttrAuditReset
|
||||
_ // bit 3 reserved
|
||||
_ // bit 4 reserved
|
||||
AttrDecrypt
|
||||
AttrEcrypt
|
||||
AttrAudit
|
||||
)
|
||||
|
||||
// EmptyAuth represents the empty authorization value.
|
||||
var EmptyAuth []byte
|
||||
|
||||
// KeyProp is a bitmask used in Attributes field of key templates. Individual
|
||||
// flags should be OR-ed to form a full mask.
|
||||
type KeyProp uint32
|
||||
|
||||
// Key properties.
|
||||
const (
|
||||
FlagFixedTPM KeyProp = 0x00000002
|
||||
FlagStClear KeyProp = 0x00000004
|
||||
FlagFixedParent KeyProp = 0x00000010
|
||||
FlagSensitiveDataOrigin KeyProp = 0x00000020
|
||||
FlagUserWithAuth KeyProp = 0x00000040
|
||||
FlagAdminWithPolicy KeyProp = 0x00000080
|
||||
FlagNoDA KeyProp = 0x00000400
|
||||
FlagRestricted KeyProp = 0x00010000
|
||||
FlagDecrypt KeyProp = 0x00020000
|
||||
FlagSign KeyProp = 0x00040000
|
||||
|
||||
FlagSealDefault = FlagFixedTPM | FlagFixedParent
|
||||
FlagSignerDefault = FlagSign | FlagRestricted | FlagFixedTPM |
|
||||
FlagFixedParent | FlagSensitiveDataOrigin | FlagUserWithAuth
|
||||
FlagStorageDefault = FlagDecrypt | FlagRestricted | FlagFixedTPM |
|
||||
FlagFixedParent | FlagSensitiveDataOrigin | FlagUserWithAuth
|
||||
)
|
||||
|
||||
// TPMProp represents a Property Tag (TPM_PT) used with calls to GetCapability(CapabilityTPMProperties).
|
||||
type TPMProp uint32
|
||||
|
||||
// TPM Capability Properties, see TPM 2.0 Spec, Rev 1.38, Table 23.
|
||||
// Fixed TPM Properties (PT_FIXED)
|
||||
const (
|
||||
FamilyIndicator TPMProp = 0x100 + iota
|
||||
SpecLevel
|
||||
SpecRevision
|
||||
SpecDayOfYear
|
||||
SpecYear
|
||||
Manufacturer
|
||||
VendorString1
|
||||
VendorString2
|
||||
VendorString3
|
||||
VendorString4
|
||||
VendorTPMType
|
||||
FirmwareVersion1
|
||||
FirmwareVersion2
|
||||
InputMaxBufferSize
|
||||
TransientObjectsMin
|
||||
PersistentObjectsMin
|
||||
LoadedObjectsMin
|
||||
ActiveSessionsMax
|
||||
PCRCount
|
||||
PCRSelectMin
|
||||
ContextGapMax
|
||||
_ // (PT_FIXED + 21) is skipped
|
||||
NVCountersMax
|
||||
NVIndexMax
|
||||
MemoryMethod
|
||||
ClockUpdate
|
||||
ContextHash
|
||||
ContextSym
|
||||
ContextSymSize
|
||||
OrderlyCount
|
||||
CommandMaxSize
|
||||
ResponseMaxSize
|
||||
DigestMaxSize
|
||||
ObjectContextMaxSize
|
||||
SessionContextMaxSize
|
||||
PSFamilyIndicator
|
||||
PSSpecLevel
|
||||
PSSpecRevision
|
||||
PSSpecDayOfYear
|
||||
PSSpecYear
|
||||
SplitSigningMax
|
||||
TotalCommands
|
||||
LibraryCommands
|
||||
VendorCommands
|
||||
NVMaxBufferSize
|
||||
TPMModes
|
||||
CapabilityMaxBufferSize
|
||||
)
|
||||
|
||||
// Variable TPM Properties (PT_VAR)
|
||||
const (
|
||||
TPMAPermanent TPMProp = 0x200 + iota
|
||||
TPMAStartupClear
|
||||
HRNVIndex
|
||||
HRLoaded
|
||||
HRLoadedAvail
|
||||
HRActive
|
||||
HRActiveAvail
|
||||
HRTransientAvail
|
||||
CurrentPersistent
|
||||
AvailPersistent
|
||||
NVCounters
|
||||
NVCountersAvail
|
||||
AlgorithmSet
|
||||
LoadedCurves
|
||||
LockoutCounter
|
||||
MaxAuthFail
|
||||
LockoutInterval
|
||||
LockoutRecovery
|
||||
NVWriteRecovery
|
||||
AuditCounter0
|
||||
AuditCounter1
|
||||
)
|
||||
|
||||
// Allowed ranges of different kinds of Handles (TPM_HANDLE)
|
||||
// These constants have type TPMProp for backwards compatibility.
|
||||
const (
|
||||
PCRFirst TPMProp = 0x00000000
|
||||
HMACSessionFirst TPMProp = 0x02000000
|
||||
LoadedSessionFirst TPMProp = 0x02000000
|
||||
PolicySessionFirst TPMProp = 0x03000000
|
||||
ActiveSessionFirst TPMProp = 0x03000000
|
||||
TransientFirst TPMProp = 0x80000000
|
||||
PersistentFirst TPMProp = 0x81000000
|
||||
PersistentLast TPMProp = 0x81FFFFFF
|
||||
PlatformPersistent TPMProp = 0x81800000
|
||||
NVIndexFirst TPMProp = 0x01000000
|
||||
NVIndexLast TPMProp = 0x01FFFFFF
|
||||
PermanentFirst TPMProp = 0x40000000
|
||||
PermanentLast TPMProp = 0x4000010F
|
||||
)
|
||||
|
||||
// Reserved Handles.
|
||||
const (
|
||||
HandleOwner tpmutil.Handle = 0x40000001 + iota
|
||||
HandleRevoke
|
||||
HandleTransport
|
||||
HandleOperator
|
||||
HandleAdmin
|
||||
HandleEK
|
||||
HandleNull
|
||||
HandleUnassigned
|
||||
HandlePasswordSession
|
||||
HandleLockout
|
||||
HandleEndorsement
|
||||
HandlePlatform
|
||||
)
|
||||
|
||||
// Capability identifies some TPM property or state type.
|
||||
type Capability uint32
|
||||
|
||||
// TPM Capabilities.
|
||||
const (
|
||||
CapabilityAlgs Capability = iota
|
||||
CapabilityHandles
|
||||
CapabilityCommands
|
||||
CapabilityPPCommands
|
||||
CapabilityAuditCommands
|
||||
CapabilityPCRs
|
||||
CapabilityTPMProperties
|
||||
CapabilityPCRProperties
|
||||
CapabilityECCCurves
|
||||
CapabilityAuthPolicies
|
||||
)
|
||||
|
||||
// TPM Structure Tags. Tags are used to disambiguate structures, similar to Alg
|
||||
// values: tag value defines what kind of data lives in a nested field.
|
||||
const (
|
||||
TagNull tpmutil.Tag = 0x8000
|
||||
TagNoSessions tpmutil.Tag = 0x8001
|
||||
TagSessions tpmutil.Tag = 0x8002
|
||||
TagAttestCertify tpmutil.Tag = 0x8017
|
||||
TagAttestQuote tpmutil.Tag = 0x8018
|
||||
TagAttestCreation tpmutil.Tag = 0x801a
|
||||
TagAuthSecret tpmutil.Tag = 0x8023
|
||||
TagHashCheck tpmutil.Tag = 0x8024
|
||||
TagAuthSigned tpmutil.Tag = 0x8025
|
||||
)
|
||||
|
||||
// StartupType instructs the TPM on how to handle its state during Shutdown or
|
||||
// Startup.
|
||||
type StartupType uint16
|
||||
|
||||
// Startup types
|
||||
const (
|
||||
StartupClear StartupType = iota
|
||||
StartupState
|
||||
)
|
||||
|
||||
// EllipticCurve identifies specific EC curves.
|
||||
type EllipticCurve uint16
|
||||
|
||||
// ECC curves supported by TPM 2.0 spec.
|
||||
const (
|
||||
CurveNISTP192 = EllipticCurve(iota + 1)
|
||||
CurveNISTP224
|
||||
CurveNISTP256
|
||||
CurveNISTP384
|
||||
CurveNISTP521
|
||||
|
||||
CurveBNP256 = EllipticCurve(iota + 10)
|
||||
CurveBNP638
|
||||
|
||||
CurveSM2P256 = EllipticCurve(0x0020)
|
||||
)
|
||||
|
||||
var toGoCurve = map[EllipticCurve]elliptic.Curve{
|
||||
CurveNISTP224: elliptic.P224(),
|
||||
CurveNISTP256: elliptic.P256(),
|
||||
CurveNISTP384: elliptic.P384(),
|
||||
CurveNISTP521: elliptic.P521(),
|
||||
}
|
||||
|
||||
// Supported TPM operations.
|
||||
const (
|
||||
CmdNVUndefineSpaceSpecial tpmutil.Command = 0x0000011F
|
||||
CmdEvictControl tpmutil.Command = 0x00000120
|
||||
CmdUndefineSpace tpmutil.Command = 0x00000122
|
||||
CmdClear tpmutil.Command = 0x00000126
|
||||
CmdHierarchyChangeAuth tpmutil.Command = 0x00000129
|
||||
CmdDefineSpace tpmutil.Command = 0x0000012A
|
||||
CmdCreatePrimary tpmutil.Command = 0x00000131
|
||||
CmdIncrementNVCounter tpmutil.Command = 0x00000134
|
||||
CmdWriteNV tpmutil.Command = 0x00000137
|
||||
CmdWriteLockNV tpmutil.Command = 0x00000138
|
||||
CmdDictionaryAttackLockReset tpmutil.Command = 0x00000139
|
||||
CmdDictionaryAttackParameters tpmutil.Command = 0x0000013A
|
||||
CmdPCREvent tpmutil.Command = 0x0000013C
|
||||
CmdPCRReset tpmutil.Command = 0x0000013D
|
||||
CmdSequenceComplete tpmutil.Command = 0x0000013E
|
||||
CmdStartup tpmutil.Command = 0x00000144
|
||||
CmdShutdown tpmutil.Command = 0x00000145
|
||||
CmdActivateCredential tpmutil.Command = 0x00000147
|
||||
CmdCertify tpmutil.Command = 0x00000148
|
||||
CmdCertifyCreation tpmutil.Command = 0x0000014A
|
||||
CmdReadNV tpmutil.Command = 0x0000014E
|
||||
CmdReadLockNV tpmutil.Command = 0x0000014F
|
||||
CmdPolicySecret tpmutil.Command = 0x00000151
|
||||
CmdCreate tpmutil.Command = 0x00000153
|
||||
CmdECDHZGen tpmutil.Command = 0x00000154
|
||||
CmdImport tpmutil.Command = 0x00000156
|
||||
CmdLoad tpmutil.Command = 0x00000157
|
||||
CmdQuote tpmutil.Command = 0x00000158
|
||||
CmdRSADecrypt tpmutil.Command = 0x00000159
|
||||
CmdSequenceUpdate tpmutil.Command = 0x0000015C
|
||||
CmdSign tpmutil.Command = 0x0000015D
|
||||
CmdUnseal tpmutil.Command = 0x0000015E
|
||||
CmdPolicySigned tpmutil.Command = 0x00000160
|
||||
CmdContextLoad tpmutil.Command = 0x00000161
|
||||
CmdContextSave tpmutil.Command = 0x00000162
|
||||
CmdECDHKeyGen tpmutil.Command = 0x00000163
|
||||
CmdEncryptDecrypt tpmutil.Command = 0x00000164
|
||||
CmdFlushContext tpmutil.Command = 0x00000165
|
||||
CmdLoadExternal tpmutil.Command = 0x00000167
|
||||
CmdMakeCredential tpmutil.Command = 0x00000168
|
||||
CmdReadPublicNV tpmutil.Command = 0x00000169
|
||||
CmdPolicyCommandCode tpmutil.Command = 0x0000016C
|
||||
CmdPolicyOr tpmutil.Command = 0x00000171
|
||||
CmdReadPublic tpmutil.Command = 0x00000173
|
||||
CmdRSAEncrypt tpmutil.Command = 0x00000174
|
||||
CmdStartAuthSession tpmutil.Command = 0x00000176
|
||||
CmdGetCapability tpmutil.Command = 0x0000017A
|
||||
CmdGetRandom tpmutil.Command = 0x0000017B
|
||||
CmdHash tpmutil.Command = 0x0000017D
|
||||
CmdPCRRead tpmutil.Command = 0x0000017E
|
||||
CmdPolicyPCR tpmutil.Command = 0x0000017F
|
||||
CmdReadClock tpmutil.Command = 0x00000181
|
||||
CmdPCRExtend tpmutil.Command = 0x00000182
|
||||
CmdEventSequenceComplete tpmutil.Command = 0x00000185
|
||||
CmdHashSequenceStart tpmutil.Command = 0x00000186
|
||||
CmdPolicyGetDigest tpmutil.Command = 0x00000189
|
||||
CmdPolicyPassword tpmutil.Command = 0x0000018C
|
||||
CmdEncryptDecrypt2 tpmutil.Command = 0x00000193
|
||||
)
|
||||
|
||||
// Regular TPM 2.0 devices use 24-bit mask (3 bytes) for PCR selection.
|
||||
const sizeOfPCRSelect = 3
|
||||
|
||||
const defaultRSAExponent = 1<<16 + 1
|
||||
|
||||
// NVAttr is a bitmask used in Attributes field of NV indexes. Individual
|
||||
// flags should be OR-ed to form a full mask.
|
||||
type NVAttr uint32
|
||||
|
||||
// NV Attributes
|
||||
const (
|
||||
AttrPPWrite NVAttr = 0x00000001
|
||||
AttrOwnerWrite NVAttr = 0x00000002
|
||||
AttrAuthWrite NVAttr = 0x00000004
|
||||
AttrPolicyWrite NVAttr = 0x00000008
|
||||
AttrPolicyDelete NVAttr = 0x00000400
|
||||
AttrWriteLocked NVAttr = 0x00000800
|
||||
AttrWriteAll NVAttr = 0x00001000
|
||||
AttrWriteDefine NVAttr = 0x00002000
|
||||
AttrWriteSTClear NVAttr = 0x00004000
|
||||
AttrGlobalLock NVAttr = 0x00008000
|
||||
AttrPPRead NVAttr = 0x00010000
|
||||
AttrOwnerRead NVAttr = 0x00020000
|
||||
AttrAuthRead NVAttr = 0x00040000
|
||||
AttrPolicyRead NVAttr = 0x00080000
|
||||
AttrNoDA NVAttr = 0x02000000
|
||||
AttrOrderly NVAttr = 0x04000000
|
||||
AttrClearSTClear NVAttr = 0x08000000
|
||||
AttrReadLocked NVAttr = 0x10000000
|
||||
AttrWritten NVAttr = 0x20000000
|
||||
AttrPlatformCreate NVAttr = 0x40000000
|
||||
AttrReadSTClear NVAttr = 0x80000000
|
||||
)
|
||||
|
||||
var permMap = map[NVAttr]string{
|
||||
AttrPPWrite: "PPWrite",
|
||||
AttrOwnerWrite: "OwnerWrite",
|
||||
AttrAuthWrite: "AuthWrite",
|
||||
AttrPolicyWrite: "PolicyWrite",
|
||||
AttrPolicyDelete: "PolicyDelete",
|
||||
AttrWriteLocked: "WriteLocked",
|
||||
AttrWriteAll: "WriteAll",
|
||||
AttrWriteDefine: "WriteDefine",
|
||||
AttrWriteSTClear: "WriteSTClear",
|
||||
AttrGlobalLock: "GlobalLock",
|
||||
AttrPPRead: "PPRead",
|
||||
AttrOwnerRead: "OwnerRead",
|
||||
AttrAuthRead: "AuthRead",
|
||||
AttrPolicyRead: "PolicyRead",
|
||||
AttrNoDA: "No Do",
|
||||
AttrOrderly: "Oderly",
|
||||
AttrClearSTClear: "ClearSTClear",
|
||||
AttrReadLocked: "ReadLocked",
|
||||
AttrWritten: "Writte",
|
||||
AttrPlatformCreate: "PlatformCreate",
|
||||
AttrReadSTClear: "ReadSTClear",
|
||||
}
|
||||
|
||||
// String returns a textual representation of the set of NVAttr
|
||||
func (p NVAttr) String() string {
|
||||
var retString strings.Builder
|
||||
for iterator, item := range permMap {
|
||||
if (p & iterator) != 0 {
|
||||
retString.WriteString(item + " + ")
|
||||
}
|
||||
}
|
||||
if retString.String() == "" {
|
||||
return "Permission/s not found"
|
||||
}
|
||||
return strings.TrimSuffix(retString.String(), " + ")
|
||||
|
||||
}
|
||||
362
vendor/github.com/google/go-tpm/legacy/tpm2/error.go
generated
vendored
Normal file
362
vendor/github.com/google/go-tpm/legacy/tpm2/error.go
generated
vendored
Normal file
@@ -0,0 +1,362 @@
|
||||
package tpm2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/google/go-tpm/tpmutil"
|
||||
)
|
||||
|
||||
type (
|
||||
// RCFmt0 holds Format 0 error codes
|
||||
RCFmt0 uint8
|
||||
|
||||
// RCFmt1 holds Format 1 error codes
|
||||
RCFmt1 uint8
|
||||
|
||||
// RCWarn holds error codes used in warnings
|
||||
RCWarn uint8
|
||||
|
||||
// RCIndex is used to reference arguments, handles and sessions in errors
|
||||
RCIndex uint8
|
||||
)
|
||||
|
||||
// Format 0 error codes.
|
||||
const (
|
||||
RCInitialize RCFmt0 = 0x00
|
||||
RCFailure RCFmt0 = 0x01
|
||||
RCSequence RCFmt0 = 0x03
|
||||
RCPrivate RCFmt0 = 0x0B
|
||||
RCHMAC RCFmt0 = 0x19
|
||||
RCDisabled RCFmt0 = 0x20
|
||||
RCExclusive RCFmt0 = 0x21
|
||||
RCAuthType RCFmt0 = 0x24
|
||||
RCAuthMissing RCFmt0 = 0x25
|
||||
RCPolicy RCFmt0 = 0x26
|
||||
RCPCR RCFmt0 = 0x27
|
||||
RCPCRChanged RCFmt0 = 0x28
|
||||
RCUpgrade RCFmt0 = 0x2D
|
||||
RCTooManyContexts RCFmt0 = 0x2E
|
||||
RCAuthUnavailable RCFmt0 = 0x2F
|
||||
RCReboot RCFmt0 = 0x30
|
||||
RCUnbalanced RCFmt0 = 0x31
|
||||
RCCommandSize RCFmt0 = 0x42
|
||||
RCCommandCode RCFmt0 = 0x43
|
||||
RCAuthSize RCFmt0 = 0x44
|
||||
RCAuthContext RCFmt0 = 0x45
|
||||
RCNVRange RCFmt0 = 0x46
|
||||
RCNVSize RCFmt0 = 0x47
|
||||
RCNVLocked RCFmt0 = 0x48
|
||||
RCNVAuthorization RCFmt0 = 0x49
|
||||
RCNVUninitialized RCFmt0 = 0x4A
|
||||
RCNVSpace RCFmt0 = 0x4B
|
||||
RCNVDefined RCFmt0 = 0x4C
|
||||
RCBadContext RCFmt0 = 0x50
|
||||
RCCPHash RCFmt0 = 0x51
|
||||
RCParent RCFmt0 = 0x52
|
||||
RCNeedsTest RCFmt0 = 0x53
|
||||
RCNoResult RCFmt0 = 0x54
|
||||
RCSensitive RCFmt0 = 0x55
|
||||
)
|
||||
|
||||
var fmt0Msg = map[RCFmt0]string{
|
||||
RCInitialize: "TPM not initialized by TPM2_Startup or already initialized",
|
||||
RCFailure: "commands not being accepted because of a TPM failure",
|
||||
RCSequence: "improper use of a sequence handle",
|
||||
RCPrivate: "not currently used",
|
||||
RCHMAC: "not currently used",
|
||||
RCDisabled: "the command is disabled",
|
||||
RCExclusive: "command failed because audit sequence required exclusivity",
|
||||
RCAuthType: "authorization handle is not correct for command",
|
||||
RCAuthMissing: "5 command requires an authorization session for handle and it is not present",
|
||||
RCPolicy: "policy failure in math operation or an invalid authPolicy value",
|
||||
RCPCR: "PCR check fail",
|
||||
RCPCRChanged: "PCR have changed since checked",
|
||||
RCUpgrade: "TPM is in field upgrade mode unless called via TPM2_FieldUpgradeData(), then it is not in field upgrade mode",
|
||||
RCTooManyContexts: "context ID counter is at maximum",
|
||||
RCAuthUnavailable: "authValue or authPolicy is not available for selected entity",
|
||||
RCReboot: "a _TPM_Init and Startup(CLEAR) is required before the TPM can resume operation",
|
||||
RCUnbalanced: "the protection algorithms (hash and symmetric) are not reasonably balanced; the digest size of the hash must be larger than the key size of the symmetric algorithm",
|
||||
RCCommandSize: "command commandSize value is inconsistent with contents of the command buffer; either the size is not the same as the octets loaded by the hardware interface layer or the value is not large enough to hold a command header",
|
||||
RCCommandCode: "command code not supported",
|
||||
RCAuthSize: "the value of authorizationSize is out of range or the number of octets in the Authorization Area is greater than required",
|
||||
RCAuthContext: "use of an authorization session with a context command or another command that cannot have an authorization session",
|
||||
RCNVRange: "NV offset+size is out of range",
|
||||
RCNVSize: "Requested allocation size is larger than allowed",
|
||||
RCNVLocked: "NV access locked",
|
||||
RCNVAuthorization: "NV access authorization fails in command actions",
|
||||
RCNVUninitialized: "an NV Index is used before being initialized or the state saved by TPM2_Shutdown(STATE) could not be restored",
|
||||
RCNVSpace: "insufficient space for NV allocation",
|
||||
RCNVDefined: "NV Index or persistent object already defined",
|
||||
RCBadContext: "context in TPM2_ContextLoad() is not valid",
|
||||
RCCPHash: "cpHash value already set or not correct for use",
|
||||
RCParent: "handle for parent is not a valid parent",
|
||||
RCNeedsTest: "some function needs testing",
|
||||
RCNoResult: "returned when an internal function cannot process a request due to an unspecified problem; this code is usually related to invalid parameters that are not properly filtered by the input unmarshaling code",
|
||||
RCSensitive: "the sensitive area did not unmarshal correctly after decryption",
|
||||
}
|
||||
|
||||
// Format 1 error codes.
|
||||
const (
|
||||
RCAsymmetric = 0x01
|
||||
RCAttributes = 0x02
|
||||
RCHash = 0x03
|
||||
RCValue = 0x04
|
||||
RCHierarchy = 0x05
|
||||
RCKeySize = 0x07
|
||||
RCMGF = 0x08
|
||||
RCMode = 0x09
|
||||
RCType = 0x0A
|
||||
RCHandle = 0x0B
|
||||
RCKDF = 0x0C
|
||||
RCRange = 0x0D
|
||||
RCAuthFail = 0x0E
|
||||
RCNonce = 0x0F
|
||||
RCPP = 0x10
|
||||
RCScheme = 0x12
|
||||
RCSize = 0x15
|
||||
RCSymmetric = 0x16
|
||||
RCTag = 0x17
|
||||
RCSelector = 0x18
|
||||
RCInsufficient = 0x1A
|
||||
RCSignature = 0x1B
|
||||
RCKey = 0x1C
|
||||
RCPolicyFail = 0x1D
|
||||
RCIntegrity = 0x1F
|
||||
RCTicket = 0x20
|
||||
RCReservedBits = 0x21
|
||||
RCBadAuth = 0x22
|
||||
RCExpired = 0x23
|
||||
RCPolicyCC = 0x24
|
||||
RCBinding = 0x25
|
||||
RCCurve = 0x26
|
||||
RCECCPoint = 0x27
|
||||
)
|
||||
|
||||
var fmt1Msg = map[RCFmt1]string{
|
||||
RCAsymmetric: "asymmetric algorithm not supported or not correct",
|
||||
RCAttributes: "inconsistent attributes",
|
||||
RCHash: "hash algorithm not supported or not appropriate",
|
||||
RCValue: "value is out of range or is not correct for the context",
|
||||
RCHierarchy: "hierarchy is not enabled or is not correct for the use",
|
||||
RCKeySize: "key size is not supported",
|
||||
RCMGF: "mask generation function not supported",
|
||||
RCMode: "mode of operation not supported",
|
||||
RCType: "the type of the value is not appropriate for the use",
|
||||
RCHandle: "the handle is not correct for the use",
|
||||
RCKDF: "unsupported key derivation function or function not appropriate for use",
|
||||
RCRange: "value was out of allowed range",
|
||||
RCAuthFail: "the authorization HMAC check failed and DA counter incremented",
|
||||
RCNonce: "invalid nonce size or nonce value mismatch",
|
||||
RCPP: "authorization requires assertion of PP",
|
||||
RCScheme: "unsupported or incompatible scheme",
|
||||
RCSize: "structure is the wrong size",
|
||||
RCSymmetric: "unsupported symmetric algorithm or key size, or not appropriate for instance",
|
||||
RCTag: "incorrect structure tag",
|
||||
RCSelector: "union selector is incorrect",
|
||||
RCInsufficient: "the TPM was unable to unmarshal a value because there were not enough octets in the input buffer",
|
||||
RCSignature: "the signature is not valid",
|
||||
RCKey: "key fields are not compatible with the selected use",
|
||||
RCPolicyFail: "a policy check failed",
|
||||
RCIntegrity: "integrity check failed",
|
||||
RCTicket: "invalid ticket",
|
||||
RCReservedBits: "reserved bits not set to zero as required",
|
||||
RCBadAuth: "authorization failure without DA implications",
|
||||
RCExpired: "the policy has expired",
|
||||
RCPolicyCC: "the commandCode in the policy is not the commandCode of the command or the command code in a policy command references a command that is not implemented",
|
||||
RCBinding: "public and sensitive portions of an object are not cryptographically bound",
|
||||
RCCurve: "curve not supported",
|
||||
RCECCPoint: "point is not on the required curve",
|
||||
}
|
||||
|
||||
// Warning codes.
|
||||
const (
|
||||
RCContextGap RCWarn = 0x01
|
||||
RCObjectMemory RCWarn = 0x02
|
||||
RCSessionMemory RCWarn = 0x03
|
||||
RCMemory RCWarn = 0x04
|
||||
RCSessionHandles RCWarn = 0x05
|
||||
RCObjectHandles RCWarn = 0x06
|
||||
RCLocality RCWarn = 0x07
|
||||
RCYielded RCWarn = 0x08
|
||||
RCCanceled RCWarn = 0x09
|
||||
RCTesting RCWarn = 0x0A
|
||||
RCReferenceH0 RCWarn = 0x10
|
||||
RCReferenceH1 RCWarn = 0x11
|
||||
RCReferenceH2 RCWarn = 0x12
|
||||
RCReferenceH3 RCWarn = 0x13
|
||||
RCReferenceH4 RCWarn = 0x14
|
||||
RCReferenceH5 RCWarn = 0x15
|
||||
RCReferenceH6 RCWarn = 0x16
|
||||
RCReferenceS0 RCWarn = 0x18
|
||||
RCReferenceS1 RCWarn = 0x19
|
||||
RCReferenceS2 RCWarn = 0x1A
|
||||
RCReferenceS3 RCWarn = 0x1B
|
||||
RCReferenceS4 RCWarn = 0x1C
|
||||
RCReferenceS5 RCWarn = 0x1D
|
||||
RCReferenceS6 RCWarn = 0x1E
|
||||
RCNVRate RCWarn = 0x20
|
||||
RCLockout RCWarn = 0x21
|
||||
RCRetry RCWarn = 0x22
|
||||
RCNVUnavailable RCWarn = 0x23
|
||||
)
|
||||
|
||||
var warnMsg = map[RCWarn]string{
|
||||
RCContextGap: "gap for context ID is too large",
|
||||
RCObjectMemory: "out of memory for object contexts",
|
||||
RCSessionMemory: "out of memory for session contexts",
|
||||
RCMemory: "out of shared object/session memory or need space for internal operations",
|
||||
RCSessionHandles: "out of session handles",
|
||||
RCObjectHandles: "out of object handles",
|
||||
RCLocality: "bad locality",
|
||||
RCYielded: "the TPM has suspended operation on the command; forward progress was made and the command may be retried",
|
||||
RCCanceled: "the command was canceled",
|
||||
RCTesting: "TPM is performing self-tests",
|
||||
RCReferenceH0: "the 1st handle in the handle area references a transient object or session that is not loaded",
|
||||
RCReferenceH1: "the 2nd handle in the handle area references a transient object or session that is not loaded",
|
||||
RCReferenceH2: "the 3rd handle in the handle area references a transient object or session that is not loaded",
|
||||
RCReferenceH3: "the 4th handle in the handle area references a transient object or session that is not loaded",
|
||||
RCReferenceH4: "the 5th handle in the handle area references a transient object or session that is not loaded",
|
||||
RCReferenceH5: "the 6th handle in the handle area references a transient object or session that is not loaded",
|
||||
RCReferenceH6: "the 7th handle in the handle area references a transient object or session that is not loaded",
|
||||
RCReferenceS0: "the 1st authorization session handle references a session that is not loaded",
|
||||
RCReferenceS1: "the 2nd authorization session handle references a session that is not loaded",
|
||||
RCReferenceS2: "the 3rd authorization session handle references a session that is not loaded",
|
||||
RCReferenceS3: "the 4th authorization session handle references a session that is not loaded",
|
||||
RCReferenceS4: "the 5th authorization session handle references a session that is not loaded",
|
||||
RCReferenceS5: "the 6th authorization session handle references a session that is not loaded",
|
||||
RCReferenceS6: "the 7th authorization session handle references a session that is not loaded",
|
||||
RCNVRate: "the TPM is rate-limiting accesses to prevent wearout of NV",
|
||||
RCLockout: "authorizations for objects subject to DA protection are not allowed at this time because the TPM is in DA lockout mode",
|
||||
RCRetry: "the TPM was not able to start the command",
|
||||
RCNVUnavailable: "the command may require writing of NV and NV is not current accessible",
|
||||
}
|
||||
|
||||
// Indexes for arguments, handles and sessions.
|
||||
const (
|
||||
RC1 RCIndex = iota + 1
|
||||
RC2
|
||||
RC3
|
||||
RC4
|
||||
RC5
|
||||
RC6
|
||||
RC7
|
||||
RC8
|
||||
RC9
|
||||
RCA
|
||||
RCB
|
||||
RCC
|
||||
RCD
|
||||
RCE
|
||||
RCF
|
||||
)
|
||||
|
||||
const unknownCode = "unknown error code"
|
||||
|
||||
// Error is returned for all Format 0 errors from the TPM. It is used for general
|
||||
// errors not specific to a parameter, handle or session.
|
||||
type Error struct {
|
||||
Code RCFmt0
|
||||
}
|
||||
|
||||
func (e Error) Error() string {
|
||||
msg := fmt0Msg[e.Code]
|
||||
if msg == "" {
|
||||
msg = unknownCode
|
||||
}
|
||||
return fmt.Sprintf("error code 0x%x : %s", e.Code, msg)
|
||||
}
|
||||
|
||||
// VendorError represents a vendor-specific error response. These types of responses
|
||||
// are not decoded and Code contains the complete response code.
|
||||
type VendorError struct {
|
||||
Code uint32
|
||||
}
|
||||
|
||||
func (e VendorError) Error() string {
|
||||
return fmt.Sprintf("vendor error code 0x%x", e.Code)
|
||||
}
|
||||
|
||||
// Warning is typically used to report transient errors.
|
||||
type Warning struct {
|
||||
Code RCWarn
|
||||
}
|
||||
|
||||
func (w Warning) Error() string {
|
||||
msg := warnMsg[w.Code]
|
||||
if msg == "" {
|
||||
msg = unknownCode
|
||||
}
|
||||
return fmt.Sprintf("warning code 0x%x : %s", w.Code, msg)
|
||||
}
|
||||
|
||||
// ParameterError describes an error related to a parameter, and the parameter number.
|
||||
type ParameterError struct {
|
||||
Code RCFmt1
|
||||
Parameter RCIndex
|
||||
}
|
||||
|
||||
func (e ParameterError) Error() string {
|
||||
msg := fmt1Msg[e.Code]
|
||||
if msg == "" {
|
||||
msg = unknownCode
|
||||
}
|
||||
return fmt.Sprintf("parameter %d, error code 0x%x : %s", e.Parameter, e.Code, msg)
|
||||
}
|
||||
|
||||
// HandleError describes an error related to a handle, and the handle number.
|
||||
type HandleError struct {
|
||||
Code RCFmt1
|
||||
Handle RCIndex
|
||||
}
|
||||
|
||||
func (e HandleError) Error() string {
|
||||
msg := fmt1Msg[e.Code]
|
||||
if msg == "" {
|
||||
msg = unknownCode
|
||||
}
|
||||
return fmt.Sprintf("handle %d, error code 0x%x : %s", e.Handle, e.Code, msg)
|
||||
}
|
||||
|
||||
// SessionError describes an error related to a session, and the session number.
|
||||
type SessionError struct {
|
||||
Code RCFmt1
|
||||
Session RCIndex
|
||||
}
|
||||
|
||||
func (e SessionError) Error() string {
|
||||
msg := fmt1Msg[e.Code]
|
||||
if msg == "" {
|
||||
msg = unknownCode
|
||||
}
|
||||
return fmt.Sprintf("session %d, error code 0x%x : %s", e.Session, e.Code, msg)
|
||||
}
|
||||
|
||||
// Decode a TPM2 response code and return the appropriate error. Logic
|
||||
// according to the "Response Code Evaluation" chart in Part 1 of the TPM 2.0
|
||||
// spec.
|
||||
func decodeResponse(code tpmutil.ResponseCode) error {
|
||||
if code == tpmutil.RCSuccess {
|
||||
return nil
|
||||
}
|
||||
if code&0x180 == 0 { // Bits 7:8 == 0 is a TPM1 error
|
||||
return fmt.Errorf("response status 0x%x", code)
|
||||
}
|
||||
if code&0x80 == 0 { // Bit 7 unset
|
||||
if code&0x400 > 0 { // Bit 10 set, vendor specific code
|
||||
return VendorError{uint32(code)}
|
||||
}
|
||||
if code&0x800 > 0 { // Bit 11 set, warning with code in bit 0:6
|
||||
return Warning{RCWarn(code & 0x7f)}
|
||||
}
|
||||
// error with code in bit 0:6
|
||||
return Error{RCFmt0(code & 0x7f)}
|
||||
}
|
||||
if code&0x40 > 0 { // Bit 6 set, code in 0:5, parameter number in 8:11
|
||||
return ParameterError{RCFmt1(code & 0x3f), RCIndex((code & 0xf00) >> 8)}
|
||||
}
|
||||
if code&0x800 == 0 { // Bit 11 unset, code in 0:5, handle in 8:10
|
||||
return HandleError{RCFmt1(code & 0x3f), RCIndex((code & 0x700) >> 8)}
|
||||
}
|
||||
// Code in 0:5, Session in 8:10
|
||||
return SessionError{RCFmt1(code & 0x3f), RCIndex((code & 0x700) >> 8)}
|
||||
}
|
||||
116
vendor/github.com/google/go-tpm/legacy/tpm2/kdf.go
generated
vendored
Normal file
116
vendor/github.com/google/go-tpm/legacy/tpm2/kdf.go
generated
vendored
Normal file
@@ -0,0 +1,116 @@
|
||||
// Copyright (c) 2018, Google LLC All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package tpm2
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/hmac"
|
||||
"encoding/binary"
|
||||
"hash"
|
||||
)
|
||||
|
||||
// KDFa implements TPM 2.0's default key derivation function, as defined in
|
||||
// section 11.4.9.2 of the TPM revision 2 specification part 1.
|
||||
// See: https://trustedcomputinggroup.org/resource/tpm-library-specification/
|
||||
// The key & label parameters must not be zero length.
|
||||
// The label parameter is a non-null-terminated string.
|
||||
// The contextU & contextV parameters are optional.
|
||||
// Deprecated: Use KDFaHash.
|
||||
func KDFa(hashAlg Algorithm, key []byte, label string, contextU, contextV []byte, bits int) ([]byte, error) {
|
||||
h, err := hashAlg.Hash()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return KDFaHash(h, key, label, contextU, contextV, bits), nil
|
||||
}
|
||||
|
||||
// KDFe implements TPM 2.0's ECDH key derivation function, as defined in
|
||||
// section 11.4.9.3 of the TPM revision 2 specification part 1.
|
||||
// See: https://trustedcomputinggroup.org/resource/tpm-library-specification/
|
||||
// The z parameter is the x coordinate of one party's private ECC key multiplied
|
||||
// by the other party's public ECC point.
|
||||
// The use parameter is a non-null-terminated string.
|
||||
// The partyUInfo and partyVInfo are the x coordinates of the initiator's and
|
||||
// Deprecated: Use KDFeHash.
|
||||
func KDFe(hashAlg Algorithm, z []byte, use string, partyUInfo, partyVInfo []byte, bits int) ([]byte, error) {
|
||||
h, err := hashAlg.Hash()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return KDFeHash(h, z, use, partyUInfo, partyVInfo, bits), nil
|
||||
}
|
||||
|
||||
// KDFaHash implements TPM 2.0's default key derivation function, as defined in
|
||||
// section 11.4.9.2 of the TPM revision 2 specification part 1.
|
||||
// See: https://trustedcomputinggroup.org/resource/tpm-library-specification/
|
||||
// The key & label parameters must not be zero length.
|
||||
// The label parameter is a non-null-terminated string.
|
||||
// The contextU & contextV parameters are optional.
|
||||
func KDFaHash(h crypto.Hash, key []byte, label string, contextU, contextV []byte, bits int) []byte {
|
||||
mac := hmac.New(h.New, key)
|
||||
|
||||
out := kdf(mac, bits, func() {
|
||||
mac.Write([]byte(label))
|
||||
mac.Write([]byte{0}) // Terminating null character for C-string.
|
||||
mac.Write(contextU)
|
||||
mac.Write(contextV)
|
||||
binary.Write(mac, binary.BigEndian, uint32(bits))
|
||||
})
|
||||
return out
|
||||
}
|
||||
|
||||
// KDFeHash implements TPM 2.0's ECDH key derivation function, as defined in
|
||||
// section 11.4.9.3 of the TPM revision 2 specification part 1.
|
||||
// See: https://trustedcomputinggroup.org/resource/tpm-library-specification/
|
||||
// The z parameter is the x coordinate of one party's private ECC key multiplied
|
||||
// by the other party's public ECC point.
|
||||
// The use parameter is a non-null-terminated string.
|
||||
// The partyUInfo and partyVInfo are the x coordinates of the initiator's and
|
||||
// the responder's ECC points, respectively.
|
||||
func KDFeHash(h crypto.Hash, z []byte, use string, partyUInfo, partyVInfo []byte, bits int) []byte {
|
||||
hash := h.New()
|
||||
|
||||
out := kdf(hash, bits, func() {
|
||||
hash.Write(z)
|
||||
hash.Write([]byte(use))
|
||||
hash.Write([]byte{0}) // Terminating null character for C-string.
|
||||
hash.Write(partyUInfo)
|
||||
hash.Write(partyVInfo)
|
||||
})
|
||||
return out
|
||||
}
|
||||
|
||||
func kdf(h hash.Hash, bits int, update func()) []byte {
|
||||
bytes := (bits + 7) / 8
|
||||
out := []byte{}
|
||||
|
||||
for counter := 1; len(out) < bytes; counter++ {
|
||||
h.Reset()
|
||||
binary.Write(h, binary.BigEndian, uint32(counter))
|
||||
update()
|
||||
|
||||
out = h.Sum(out)
|
||||
}
|
||||
// out's length is a multiple of hash size, so there will be excess
|
||||
// bytes if bytes isn't a multiple of hash size.
|
||||
out = out[:bytes]
|
||||
|
||||
// As mentioned in the KDFa and KDFe specs mentioned above,
|
||||
// the unused bits of the most significant octet are masked off.
|
||||
if maskBits := uint8(bits % 8); maskBits > 0 {
|
||||
out[0] &= (1 << maskBits) - 1
|
||||
}
|
||||
return out
|
||||
}
|
||||
57
vendor/github.com/google/go-tpm/legacy/tpm2/open_other.go
generated
vendored
Normal file
57
vendor/github.com/google/go-tpm/legacy/tpm2/open_other.go
generated
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
//go:build !windows
|
||||
|
||||
// Copyright (c) 2019, Google LLC All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package tpm2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/google/go-tpm/tpmutil"
|
||||
)
|
||||
|
||||
// OpenTPM opens a channel to the TPM at the given path. If the file is a
|
||||
// device, then it treats it like a normal TPM device, and if the file is a
|
||||
// Unix domain socket, then it opens a connection to the socket.
|
||||
//
|
||||
// This function may also be invoked with no paths, as tpm2.OpenTPM(). In this
|
||||
// case, the default paths on Linux (/dev/tpmrm0 then /dev/tpm0), will be used.
|
||||
func OpenTPM(path ...string) (tpm io.ReadWriteCloser, err error) {
|
||||
switch len(path) {
|
||||
case 0:
|
||||
tpm, err = tpmutil.OpenTPM("/dev/tpmrm0")
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
tpm, err = tpmutil.OpenTPM("/dev/tpm0")
|
||||
}
|
||||
case 1:
|
||||
tpm, err = tpmutil.OpenTPM(path[0])
|
||||
default:
|
||||
return nil, errors.New("cannot specify multiple paths to tpm2.OpenTPM")
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Make sure this is a TPM 2.0
|
||||
_, err = GetManufacturer(tpm)
|
||||
if err != nil {
|
||||
tpm.Close()
|
||||
return nil, fmt.Errorf("open %s: device is not a TPM 2.0", path)
|
||||
}
|
||||
return tpm, nil
|
||||
}
|
||||
@@ -1,4 +1,6 @@
|
||||
// Copyright 2018-2024 CERN
|
||||
//go:build windows
|
||||
|
||||
// Copyright (c) 2018, Google LLC All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@@ -11,15 +13,27 @@
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// In applying this license, CERN does not waive the privileges and immunities
|
||||
// granted to it by virtue of its status as an Intergovernmental Organization
|
||||
// or submit itself to any jurisdiction.
|
||||
|
||||
package loader
|
||||
package tpm2
|
||||
|
||||
import (
|
||||
// Load core storage filesystem backends.
|
||||
_ "github.com/opencloud-eu/reva/v2/pkg/storage/fs/posix"
|
||||
// Add your own here
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/google/go-tpm/tpmutil"
|
||||
"github.com/google/go-tpm/tpmutil/tbs"
|
||||
)
|
||||
|
||||
// OpenTPM opens a channel to the TPM.
|
||||
func OpenTPM() (io.ReadWriteCloser, error) {
|
||||
info, err := tbs.GetDeviceInfo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if info.TPMVersion != tbs.TPMVersion20 {
|
||||
return nil, fmt.Errorf("openTPM: device is not a TPM 2.0")
|
||||
}
|
||||
|
||||
return tpmutil.OpenTPM()
|
||||
}
|
||||
1112
vendor/github.com/google/go-tpm/legacy/tpm2/structures.go
generated
vendored
Normal file
1112
vendor/github.com/google/go-tpm/legacy/tpm2/structures.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
2326
vendor/github.com/google/go-tpm/legacy/tpm2/tpm2.go
generated
vendored
Normal file
2326
vendor/github.com/google/go-tpm/legacy/tpm2/tpm2.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
211
vendor/github.com/google/go-tpm/tpmutil/encoding.go
generated
vendored
Normal file
211
vendor/github.com/google/go-tpm/tpmutil/encoding.go
generated
vendored
Normal file
@@ -0,0 +1,211 @@
|
||||
// Copyright (c) 2018, Google LLC All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package tpmutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
var (
|
||||
selfMarshalerType = reflect.TypeOf((*SelfMarshaler)(nil)).Elem()
|
||||
handlesAreaType = reflect.TypeOf((*[]Handle)(nil))
|
||||
)
|
||||
|
||||
// packWithHeader takes a header and a sequence of elements that are either of
|
||||
// fixed length or slices of fixed-length types and packs them into a single
|
||||
// byte array using binary.Write. It updates the CommandHeader to have the right
|
||||
// length.
|
||||
func packWithHeader(ch commandHeader, cmd ...interface{}) ([]byte, error) {
|
||||
hdrSize := binary.Size(ch)
|
||||
body, err := Pack(cmd...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't pack message body: %v", err)
|
||||
}
|
||||
bodySize := len(body)
|
||||
ch.Size = uint32(hdrSize + bodySize)
|
||||
header, err := Pack(ch)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't pack message header: %v", err)
|
||||
}
|
||||
return append(header, body...), nil
|
||||
}
|
||||
|
||||
// Pack encodes a set of elements into a single byte array, using
|
||||
// encoding/binary. This means that all the elements must be encodeable
|
||||
// according to the rules of encoding/binary.
|
||||
//
|
||||
// It has one difference from encoding/binary: it encodes byte slices with a
|
||||
// prepended length, to match how the TPM encodes variable-length arrays. If
|
||||
// you wish to add a byte slice without length prefix, use RawBytes.
|
||||
func Pack(elts ...interface{}) ([]byte, error) {
|
||||
buf := new(bytes.Buffer)
|
||||
if err := packType(buf, elts...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// tryMarshal attempts to use a TPMMarshal() method defined on the type
|
||||
// to pack v into buf. True is returned if the method exists and the
|
||||
// marshal was attempted.
|
||||
func tryMarshal(buf io.Writer, v reflect.Value) (bool, error) {
|
||||
t := v.Type()
|
||||
if t.Implements(selfMarshalerType) {
|
||||
if v.Kind() == reflect.Ptr && v.IsNil() {
|
||||
return true, fmt.Errorf("cannot call TPMMarshal on a nil pointer of type %T", v)
|
||||
}
|
||||
return true, v.Interface().(SelfMarshaler).TPMMarshal(buf)
|
||||
}
|
||||
|
||||
// We might have a non-pointer struct field, but we dont have a
|
||||
// pointer with which to implement the interface.
|
||||
// If the pointer of the type implements the interface, we should be
|
||||
// able to construct a value to call TPMMarshal() with.
|
||||
// TODO(awly): Try and avoid blowing away private data by using Addr() instead of Set()
|
||||
if reflect.PtrTo(t).Implements(selfMarshalerType) {
|
||||
tmp := reflect.New(t)
|
||||
tmp.Elem().Set(v)
|
||||
return true, tmp.Interface().(SelfMarshaler).TPMMarshal(buf)
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func packValue(buf io.Writer, v reflect.Value) error {
|
||||
if v.Type() == handlesAreaType {
|
||||
v = v.Convert(reflect.TypeOf((*handleList)(nil)))
|
||||
}
|
||||
if canMarshal, err := tryMarshal(buf, v); canMarshal {
|
||||
return err
|
||||
}
|
||||
|
||||
switch v.Kind() {
|
||||
case reflect.Ptr:
|
||||
if v.IsNil() {
|
||||
return fmt.Errorf("cannot pack nil %s", v.Type().String())
|
||||
}
|
||||
return packValue(buf, v.Elem())
|
||||
case reflect.Struct:
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
f := v.Field(i)
|
||||
if err := packValue(buf, f); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
default:
|
||||
return binary.Write(buf, binary.BigEndian, v.Interface())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func packType(buf io.Writer, elts ...interface{}) error {
|
||||
for _, e := range elts {
|
||||
if err := packValue(buf, reflect.ValueOf(e)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// tryUnmarshal attempts to use TPMUnmarshal() to perform the
|
||||
// unpack, if the given value implements SelfMarshaler.
|
||||
// True is returned if v implements SelfMarshaler & TPMUnmarshal
|
||||
// was called, along with an error returned from TPMUnmarshal.
|
||||
func tryUnmarshal(buf io.Reader, v reflect.Value) (bool, error) {
|
||||
t := v.Type()
|
||||
if t.Implements(selfMarshalerType) {
|
||||
if v.Kind() == reflect.Ptr && v.IsNil() {
|
||||
return true, fmt.Errorf("cannot call TPMUnmarshal on a nil pointer")
|
||||
}
|
||||
return true, v.Interface().(SelfMarshaler).TPMUnmarshal(buf)
|
||||
}
|
||||
|
||||
// We might have a non-pointer struct field, which is addressable,
|
||||
// If the pointer of the type implements the interface, and the
|
||||
// value is addressable, we should be able to call TPMUnmarshal().
|
||||
if v.CanAddr() && reflect.PtrTo(t).Implements(selfMarshalerType) {
|
||||
return true, v.Addr().Interface().(SelfMarshaler).TPMUnmarshal(buf)
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Unpack is a convenience wrapper around UnpackBuf. Unpack returns the number
|
||||
// of bytes read from b to fill elts and error, if any.
|
||||
func Unpack(b []byte, elts ...interface{}) (int, error) {
|
||||
buf := bytes.NewBuffer(b)
|
||||
err := UnpackBuf(buf, elts...)
|
||||
read := len(b) - buf.Len()
|
||||
return read, err
|
||||
}
|
||||
|
||||
func unpackValue(buf io.Reader, v reflect.Value) error {
|
||||
if v.Type() == handlesAreaType {
|
||||
v = v.Convert(reflect.TypeOf((*handleList)(nil)))
|
||||
}
|
||||
if didUnmarshal, err := tryUnmarshal(buf, v); didUnmarshal {
|
||||
return err
|
||||
}
|
||||
|
||||
switch v.Kind() {
|
||||
case reflect.Ptr:
|
||||
if v.IsNil() {
|
||||
return fmt.Errorf("cannot unpack nil %s", v.Type().String())
|
||||
}
|
||||
return unpackValue(buf, v.Elem())
|
||||
case reflect.Struct:
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
f := v.Field(i)
|
||||
if err := unpackValue(buf, f); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
default:
|
||||
// binary.Read can only set pointer values, so we need to take the address.
|
||||
if !v.CanAddr() {
|
||||
return fmt.Errorf("cannot unpack unaddressable leaf type %q", v.Type().String())
|
||||
}
|
||||
return binary.Read(buf, binary.BigEndian, v.Addr().Interface())
|
||||
}
|
||||
}
|
||||
|
||||
// UnpackBuf recursively unpacks types from a reader just as encoding/binary
|
||||
// does under binary.BigEndian, but with one difference: it unpacks a byte
|
||||
// slice by first reading an integer with lengthPrefixSize bytes, then reading
|
||||
// that many bytes. It assumes that incoming values are pointers to values so
|
||||
// that, e.g., underlying slices can be resized as needed.
|
||||
func UnpackBuf(buf io.Reader, elts ...interface{}) error {
|
||||
for _, e := range elts {
|
||||
v := reflect.ValueOf(e)
|
||||
if v.Kind() != reflect.Ptr {
|
||||
return fmt.Errorf("non-pointer value %q passed to UnpackBuf", v.Type().String())
|
||||
}
|
||||
if v.IsNil() {
|
||||
return errors.New("nil pointer passed to UnpackBuf")
|
||||
}
|
||||
|
||||
if err := unpackValue(buf, v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
10
vendor/github.com/google/go-tpm/tpmutil/poll_other.go
generated
vendored
Normal file
10
vendor/github.com/google/go-tpm/tpmutil/poll_other.go
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
//go:build !linux && !darwin
|
||||
|
||||
package tpmutil
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
// Not implemented on Windows.
|
||||
func poll(_ *os.File) error { return nil }
|
||||
32
vendor/github.com/google/go-tpm/tpmutil/poll_unix.go
generated
vendored
Normal file
32
vendor/github.com/google/go-tpm/tpmutil/poll_unix.go
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
//go:build linux || darwin
|
||||
|
||||
package tpmutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// poll blocks until the file descriptor is ready for reading or an error occurs.
|
||||
func poll(f *os.File) error {
|
||||
var (
|
||||
fds = []unix.PollFd{{
|
||||
Fd: int32(f.Fd()),
|
||||
Events: 0x1, // POLLIN
|
||||
}}
|
||||
timeout = -1 // Indefinite timeout
|
||||
)
|
||||
|
||||
if _, err := unix.Poll(fds, timeout); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Revents is filled in by the kernel.
|
||||
// If the expected event happened, Revents should match Events.
|
||||
if fds[0].Revents != fds[0].Events {
|
||||
return fmt.Errorf("unexpected poll Revents 0x%x", fds[0].Revents)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
113
vendor/github.com/google/go-tpm/tpmutil/run.go
generated
vendored
Normal file
113
vendor/github.com/google/go-tpm/tpmutil/run.go
generated
vendored
Normal file
@@ -0,0 +1,113 @@
|
||||
// Copyright (c) 2018, Google LLC All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package tpmutil provides common utility functions for both TPM 1.2 and TPM
|
||||
// 2.0 devices.
|
||||
package tpmutil
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// maxTPMResponse is the largest possible response from the TPM. We need to know
|
||||
// this because we don't always know the length of the TPM response, and
|
||||
// /dev/tpm insists on giving it all back in a single value rather than
|
||||
// returning a header and a body in separate responses.
|
||||
const maxTPMResponse = 4096
|
||||
|
||||
// RunCommandRaw executes the given raw command and returns the raw response.
|
||||
// Does not check the response code except to execute retry logic.
|
||||
func RunCommandRaw(rw io.ReadWriter, inb []byte) ([]byte, error) {
|
||||
if rw == nil {
|
||||
return nil, errors.New("nil TPM handle")
|
||||
}
|
||||
|
||||
// f(t) = (2^t)ms, up to 2s
|
||||
var backoffFac uint
|
||||
var rh responseHeader
|
||||
var outb []byte
|
||||
|
||||
for {
|
||||
if _, err := rw.Write(inb); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If the TPM is a real device, it may not be ready for reading
|
||||
// immediately after writing the command. Wait until the file
|
||||
// descriptor is ready to be read from.
|
||||
if f, ok := rw.(*os.File); ok {
|
||||
if err := poll(f); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
outb = make([]byte, maxTPMResponse)
|
||||
outlen, err := rw.Read(outb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Resize the buffer to match the amount read from the TPM.
|
||||
outb = outb[:outlen]
|
||||
|
||||
_, err = Unpack(outb, &rh)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If TPM is busy, retry the command after waiting a few ms.
|
||||
if rh.Res == RCRetry {
|
||||
if backoffFac < 11 {
|
||||
dur := (1 << backoffFac) * time.Millisecond
|
||||
time.Sleep(dur)
|
||||
backoffFac++
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return outb, nil
|
||||
}
|
||||
|
||||
// RunCommand executes cmd with given tag and arguments. Returns TPM response
|
||||
// body (without response header) and response code from the header. Returned
|
||||
// error may be nil if response code is not RCSuccess; caller should check
|
||||
// both.
|
||||
func RunCommand(rw io.ReadWriter, tag Tag, cmd Command, in ...interface{}) ([]byte, ResponseCode, error) {
|
||||
inb, err := packWithHeader(commandHeader{tag, 0, cmd}, in...)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
outb, err := RunCommandRaw(rw, inb)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
var rh responseHeader
|
||||
read, err := Unpack(outb, &rh)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if rh.Res != RCSuccess {
|
||||
return nil, rh.Res, nil
|
||||
}
|
||||
|
||||
return outb[read:], rh.Res, nil
|
||||
}
|
||||
111
vendor/github.com/google/go-tpm/tpmutil/run_other.go
generated
vendored
Normal file
111
vendor/github.com/google/go-tpm/tpmutil/run_other.go
generated
vendored
Normal file
@@ -0,0 +1,111 @@
|
||||
//go:build !windows
|
||||
|
||||
// Copyright (c) 2018, Google LLC All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package tpmutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
)
|
||||
|
||||
// OpenTPM opens a channel to the TPM at the given path. If the file is a
|
||||
// device, then it treats it like a normal TPM device, and if the file is a
|
||||
// Unix domain socket, then it opens a connection to the socket.
|
||||
func OpenTPM(path string) (io.ReadWriteCloser, error) {
|
||||
// If it's a regular file, then open it
|
||||
var rwc io.ReadWriteCloser
|
||||
fi, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if fi.Mode()&os.ModeDevice != 0 {
|
||||
var f *os.File
|
||||
f, err = os.OpenFile(path, os.O_RDWR, 0600)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rwc = io.ReadWriteCloser(f)
|
||||
} else if fi.Mode()&os.ModeSocket != 0 {
|
||||
rwc = NewEmulatorReadWriteCloser(path)
|
||||
} else {
|
||||
return nil, fmt.Errorf("unsupported TPM file mode %s", fi.Mode().String())
|
||||
}
|
||||
|
||||
return rwc, nil
|
||||
}
|
||||
|
||||
// dialer abstracts the net.Dial call so test code can provide its own net.Conn
|
||||
// implementation.
|
||||
type dialer func(network, path string) (net.Conn, error)
|
||||
|
||||
// EmulatorReadWriteCloser manages connections with a TPM emulator over a Unix
|
||||
// domain socket. These emulators often operate in a write/read/disconnect
|
||||
// sequence, so the Write method always connects, and the Read method always
|
||||
// closes. EmulatorReadWriteCloser is not thread safe.
|
||||
type EmulatorReadWriteCloser struct {
|
||||
path string
|
||||
conn net.Conn
|
||||
dialer dialer
|
||||
}
|
||||
|
||||
// NewEmulatorReadWriteCloser stores information about a Unix domain socket to
|
||||
// write to and read from.
|
||||
func NewEmulatorReadWriteCloser(path string) *EmulatorReadWriteCloser {
|
||||
return &EmulatorReadWriteCloser{
|
||||
path: path,
|
||||
dialer: net.Dial,
|
||||
}
|
||||
}
|
||||
|
||||
// Read implements io.Reader by reading from the Unix domain socket and closing
|
||||
// it.
|
||||
func (erw *EmulatorReadWriteCloser) Read(p []byte) (int, error) {
|
||||
// Read is always the second operation in a Write/Read sequence.
|
||||
if erw.conn == nil {
|
||||
return 0, fmt.Errorf("must call Write then Read in an alternating sequence")
|
||||
}
|
||||
n, err := erw.conn.Read(p)
|
||||
erw.conn.Close()
|
||||
erw.conn = nil
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Write implements io.Writer by connecting to the Unix domain socket and
|
||||
// writing.
|
||||
func (erw *EmulatorReadWriteCloser) Write(p []byte) (int, error) {
|
||||
if erw.conn != nil {
|
||||
return 0, fmt.Errorf("must call Write then Read in an alternating sequence")
|
||||
}
|
||||
var err error
|
||||
erw.conn, err = erw.dialer("unix", erw.path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return erw.conn.Write(p)
|
||||
}
|
||||
|
||||
// Close implements io.Closer by closing the Unix domain socket if one is open.
|
||||
func (erw *EmulatorReadWriteCloser) Close() error {
|
||||
if erw.conn == nil {
|
||||
return fmt.Errorf("cannot call Close when no connection is open")
|
||||
}
|
||||
err := erw.conn.Close()
|
||||
erw.conn = nil
|
||||
return err
|
||||
}
|
||||
84
vendor/github.com/google/go-tpm/tpmutil/run_windows.go
generated
vendored
Normal file
84
vendor/github.com/google/go-tpm/tpmutil/run_windows.go
generated
vendored
Normal file
@@ -0,0 +1,84 @@
|
||||
// Copyright (c) 2018, Google LLC All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package tpmutil
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/google/go-tpm/tpmutil/tbs"
|
||||
)
|
||||
|
||||
// winTPMBuffer is a ReadWriteCloser to access the TPM in Windows.
|
||||
type winTPMBuffer struct {
|
||||
context tbs.Context
|
||||
outBuffer []byte
|
||||
}
|
||||
|
||||
// Executes the TPM command specified by commandBuffer (at Normal Priority), returning the number
|
||||
// of bytes in the command and any error code returned by executing the TPM command. Command
|
||||
// response can be read by calling Read().
|
||||
func (rwc *winTPMBuffer) Write(commandBuffer []byte) (int, error) {
|
||||
// TPM spec defines longest possible response to be maxTPMResponse.
|
||||
rwc.outBuffer = rwc.outBuffer[:maxTPMResponse]
|
||||
|
||||
outBufferLen, err := rwc.context.SubmitCommand(
|
||||
tbs.NormalPriority,
|
||||
commandBuffer,
|
||||
rwc.outBuffer,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
rwc.outBuffer = rwc.outBuffer[:0]
|
||||
return 0, err
|
||||
}
|
||||
// Shrink outBuffer so it is length of response.
|
||||
rwc.outBuffer = rwc.outBuffer[:outBufferLen]
|
||||
return len(commandBuffer), nil
|
||||
}
|
||||
|
||||
// Provides TPM response from the command called in the last Write call.
|
||||
func (rwc *winTPMBuffer) Read(responseBuffer []byte) (int, error) {
|
||||
if len(rwc.outBuffer) == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
lenCopied := copy(responseBuffer, rwc.outBuffer)
|
||||
// Cut out the piece of slice which was just read out, maintaining original slice capacity.
|
||||
rwc.outBuffer = append(rwc.outBuffer[:0], rwc.outBuffer[lenCopied:]...)
|
||||
return lenCopied, nil
|
||||
}
|
||||
|
||||
func (rwc *winTPMBuffer) Close() error {
|
||||
return rwc.context.Close()
|
||||
}
|
||||
|
||||
// OpenTPM creates a new instance of a ReadWriteCloser which can interact with a
|
||||
// Windows TPM.
|
||||
func OpenTPM() (io.ReadWriteCloser, error) {
|
||||
tpmContext, err := tbs.CreateContext(tbs.TPMVersion20, tbs.IncludeTPM12|tbs.IncludeTPM20)
|
||||
rwc := &winTPMBuffer{
|
||||
context: tpmContext,
|
||||
outBuffer: make([]byte, 0, maxTPMResponse),
|
||||
}
|
||||
return rwc, err
|
||||
}
|
||||
|
||||
// FromContext creates a new instance of a ReadWriteCloser which can
|
||||
// interact with a Windows TPM, using the specified TBS handle.
|
||||
func FromContext(ctx tbs.Context) io.ReadWriteCloser {
|
||||
return &winTPMBuffer{
|
||||
context: ctx,
|
||||
outBuffer: make([]byte, 0, maxTPMResponse),
|
||||
}
|
||||
}
|
||||
195
vendor/github.com/google/go-tpm/tpmutil/structures.go
generated
vendored
Normal file
195
vendor/github.com/google/go-tpm/tpmutil/structures.go
generated
vendored
Normal file
@@ -0,0 +1,195 @@
|
||||
// Copyright (c) 2018, Google LLC All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package tpmutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// maxBytesBufferSize sets a sane upper bound on the size of a U32Bytes
|
||||
// buffer. This limit exists to prevent a maliciously large size prefix
|
||||
// from resulting in a massive memory allocation, potentially causing
|
||||
// an OOM condition on the system.
|
||||
// We expect no buffer from a TPM to approach 1Mb in size.
|
||||
const maxBytesBufferSize uint32 = 1024 * 1024 // 1Mb.
|
||||
|
||||
// RawBytes is for Pack and RunCommand arguments that are already encoded.
|
||||
// Compared to []byte, RawBytes will not be prepended with slice length during
|
||||
// encoding.
|
||||
type RawBytes []byte
|
||||
|
||||
// U16Bytes is a byte slice with a 16-bit header
|
||||
type U16Bytes []byte
|
||||
|
||||
// TPMMarshal packs U16Bytes
|
||||
func (b *U16Bytes) TPMMarshal(out io.Writer) error {
|
||||
size := len([]byte(*b))
|
||||
if err := binary.Write(out, binary.BigEndian, uint16(size)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n, err := out.Write(*b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n != size {
|
||||
return fmt.Errorf("unable to write all contents of U16Bytes")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TPMUnmarshal unpacks a U16Bytes
|
||||
func (b *U16Bytes) TPMUnmarshal(in io.Reader) error {
|
||||
var tmpSize uint16
|
||||
if err := binary.Read(in, binary.BigEndian, &tmpSize); err != nil {
|
||||
return err
|
||||
}
|
||||
size := int(tmpSize)
|
||||
|
||||
if len(*b) >= size {
|
||||
*b = (*b)[:size]
|
||||
} else {
|
||||
*b = append(*b, make([]byte, size-len(*b))...)
|
||||
}
|
||||
|
||||
n, err := in.Read(*b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n != size {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// U32Bytes is a byte slice with a 32-bit header
|
||||
type U32Bytes []byte
|
||||
|
||||
// TPMMarshal packs U32Bytes
|
||||
func (b *U32Bytes) TPMMarshal(out io.Writer) error {
|
||||
size := len([]byte(*b))
|
||||
if err := binary.Write(out, binary.BigEndian, uint32(size)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n, err := out.Write(*b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n != size {
|
||||
return fmt.Errorf("unable to write all contents of U32Bytes")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TPMUnmarshal unpacks a U32Bytes
|
||||
func (b *U32Bytes) TPMUnmarshal(in io.Reader) error {
|
||||
var tmpSize uint32
|
||||
if err := binary.Read(in, binary.BigEndian, &tmpSize); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if tmpSize > maxBytesBufferSize {
|
||||
return bytes.ErrTooLarge
|
||||
}
|
||||
// We can now safely cast to an int on 32-bit or 64-bit machines
|
||||
size := int(tmpSize)
|
||||
|
||||
if len(*b) >= size {
|
||||
*b = (*b)[:size]
|
||||
} else {
|
||||
*b = append(*b, make([]byte, size-len(*b))...)
|
||||
}
|
||||
|
||||
n, err := in.Read(*b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n != size {
|
||||
return fmt.Errorf("unable to read all contents in to U32Bytes")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Tag is a command tag.
|
||||
type Tag uint16
|
||||
|
||||
// Command is an identifier of a TPM command.
|
||||
type Command uint32
|
||||
|
||||
// A commandHeader is the header for a TPM command.
|
||||
type commandHeader struct {
|
||||
Tag Tag
|
||||
Size uint32
|
||||
Cmd Command
|
||||
}
|
||||
|
||||
// ResponseCode is a response code returned by TPM.
|
||||
type ResponseCode uint32
|
||||
|
||||
// RCSuccess is response code for successful command. Identical for TPM 1.2 and
|
||||
// 2.0.
|
||||
const RCSuccess ResponseCode = 0x000
|
||||
|
||||
// RCRetry is response code for TPM is busy.
|
||||
const RCRetry ResponseCode = 0x922
|
||||
|
||||
// A responseHeader is a header for TPM responses.
|
||||
type responseHeader struct {
|
||||
Tag Tag
|
||||
Size uint32
|
||||
Res ResponseCode
|
||||
}
|
||||
|
||||
// A Handle is a reference to a TPM object.
|
||||
type Handle uint32
|
||||
|
||||
// HandleValue returns the handle value. This behavior is intended to satisfy
|
||||
// an interface that can be implemented by other, more complex types as well.
|
||||
func (h Handle) HandleValue() uint32 {
|
||||
return uint32(h)
|
||||
}
|
||||
|
||||
type handleList []Handle
|
||||
|
||||
func (l *handleList) TPMMarshal(_ io.Writer) error {
|
||||
return fmt.Errorf("TPMMarhsal on []Handle is not supported yet")
|
||||
}
|
||||
|
||||
func (l *handleList) TPMUnmarshal(in io.Reader) error {
|
||||
var numHandles uint16
|
||||
if err := binary.Read(in, binary.BigEndian, &numHandles); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Make len(e) match size exactly.
|
||||
size := int(numHandles)
|
||||
if len(*l) >= size {
|
||||
*l = (*l)[:size]
|
||||
} else {
|
||||
*l = append(*l, make([]Handle, size-len(*l))...)
|
||||
}
|
||||
return binary.Read(in, binary.BigEndian, *l)
|
||||
}
|
||||
|
||||
// SelfMarshaler allows custom types to override default encoding/decoding
|
||||
// behavior in Pack, Unpack and UnpackBuf.
|
||||
type SelfMarshaler interface {
|
||||
TPMMarshal(out io.Writer) error
|
||||
TPMUnmarshal(in io.Reader) error
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user